fix(agent/agentcontainers): reduce need to recreate sub agents (#18402)

This commit is contained in:
Mathias Fredriksson
2025-06-17 18:53:41 +03:00
committed by GitHub
parent 7e9a9e098c
commit 7fa1ad8923
5 changed files with 199 additions and 106 deletions

View File

@ -2080,6 +2080,10 @@ func TestAgent_DevcontainerAutostart(t *testing.T) {
subAgentConnected := make(chan subAgentRequestPayload, 1) subAgentConnected := make(chan subAgentRequestPayload, 1)
subAgentReady := make(chan struct{}, 1) subAgentReady := make(chan struct{}, 1)
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodGet && strings.HasPrefix(r.URL.Path, "/api/v2/workspaceagents/me/") {
return
}
t.Logf("Sub-agent request received: %s %s", r.Method, r.URL.Path) t.Logf("Sub-agent request received: %s %s", r.Method, r.URL.Path)
if r.Method != http.MethodPost { if r.Method != http.MethodPost {
@ -2226,11 +2230,22 @@ func TestAgent_DevcontainerAutostart(t *testing.T) {
// Ensure the container update routine runs. // Ensure the container update routine runs.
tickerFuncTrap.MustWait(ctx).MustRelease(ctx) tickerFuncTrap.MustWait(ctx).MustRelease(ctx)
tickerFuncTrap.Close() tickerFuncTrap.Close()
_, next := mClock.AdvanceNext()
next.MustWait(ctx)
// Verify that a subagent was created. // Since the agent does RefreshContainers, and the ticker function
subAgents := agentClient.GetSubAgents() // is set to skip instead of queue, we must advance the clock
// multiple times to ensure that the sub-agent is created.
var subAgents []*proto.SubAgent
for {
_, next := mClock.AdvanceNext()
next.MustWait(ctx)
// Verify that a subagent was created.
subAgents = agentClient.GetSubAgents()
if len(subAgents) > 0 {
t.Logf("Found sub-agents: %d", len(subAgents))
break
}
}
require.Len(t, subAgents, 1, "expected one sub agent") require.Len(t, subAgents, 1, "expected one sub agent")
subAgent := subAgents[0] subAgent := subAgents[0]

View File

@ -671,9 +671,9 @@ func (api *API) getContainers() (codersdk.WorkspaceAgentListContainersResponse,
if len(api.knownDevcontainers) > 0 { if len(api.knownDevcontainers) > 0 {
devcontainers = make([]codersdk.WorkspaceAgentDevcontainer, 0, len(api.knownDevcontainers)) devcontainers = make([]codersdk.WorkspaceAgentDevcontainer, 0, len(api.knownDevcontainers))
for _, dc := range api.knownDevcontainers { for _, dc := range api.knownDevcontainers {
// Include the agent if it's been created (we're iterating over // Include the agent if it's running (we're iterating over
// copies, so mutating is fine). // copies, so mutating is fine).
if proc := api.injectedSubAgentProcs[dc.WorkspaceFolder]; proc.agent.ID != uuid.Nil && dc.Container != nil && proc.containerID == dc.Container.ID { if proc := api.injectedSubAgentProcs[dc.WorkspaceFolder]; proc.agent.ID != uuid.Nil {
dc.Agent = &codersdk.WorkspaceAgentDevcontainerAgent{ dc.Agent = &codersdk.WorkspaceAgentDevcontainerAgent{
ID: proc.agent.ID, ID: proc.agent.ID,
Name: proc.agent.Name, Name: proc.agent.Name,
@ -977,7 +977,7 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
) )
// Check if subagent already exists for this devcontainer. // Check if subagent already exists for this devcontainer.
recreateSubAgent := false maybeRecreateSubAgent := false
proc, injected := api.injectedSubAgentProcs[dc.WorkspaceFolder] proc, injected := api.injectedSubAgentProcs[dc.WorkspaceFolder]
if injected { if injected {
if proc.containerID == container.ID && proc.ctx.Err() == nil { if proc.containerID == container.ID && proc.ctx.Err() == nil {
@ -992,12 +992,15 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
logger.Debug(ctx, "container ID changed, injecting subagent into new container", logger.Debug(ctx, "container ID changed, injecting subagent into new container",
slog.F("old_container_id", proc.containerID), slog.F("old_container_id", proc.containerID),
) )
recreateSubAgent = true maybeRecreateSubAgent = proc.agent.ID != uuid.Nil
} }
// Container ID changed or the subagent process is not running, // Container ID changed or the subagent process is not running,
// stop the existing subagent context to replace it. // stop the existing subagent context to replace it.
proc.stop() proc.stop()
} else {
// Set SubAgent defaults.
proc.agent.OperatingSystem = "linux" // Assuming Linux for devcontainers.
} }
// Prepare the subAgentProcess to be used when running the subagent. // Prepare the subAgentProcess to be used when running the subagent.
@ -1090,36 +1093,29 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
// logger.Warn(ctx, "set CAP_NET_ADMIN on agent binary failed", slog.Error(err)) // logger.Warn(ctx, "set CAP_NET_ADMIN on agent binary failed", slog.Error(err))
// } // }
// Detect workspace folder by executing `pwd` in the container. subAgentConfig := proc.agent.CloneConfig(dc)
// NOTE(mafredri): This is a quick and dirty way to detect the if proc.agent.ID == uuid.Nil || maybeRecreateSubAgent {
// workspace folder inside the container. In the future we will // Detect workspace folder by executing `pwd` in the container.
// rely more on `devcontainer read-configuration`. // NOTE(mafredri): This is a quick and dirty way to detect the
var pwdBuf bytes.Buffer // workspace folder inside the container. In the future we will
err = api.dccli.Exec(ctx, dc.WorkspaceFolder, dc.ConfigPath, "pwd", []string{}, // rely more on `devcontainer read-configuration`.
WithExecOutput(&pwdBuf, io.Discard), var pwdBuf bytes.Buffer
WithExecContainerID(container.ID), err = api.dccli.Exec(ctx, dc.WorkspaceFolder, dc.ConfigPath, "pwd", []string{},
) WithExecOutput(&pwdBuf, io.Discard),
if err != nil { WithExecContainerID(container.ID),
return xerrors.Errorf("check workspace folder in container: %w", err)
}
directory := strings.TrimSpace(pwdBuf.String())
if directory == "" {
logger.Warn(ctx, "detected workspace folder is empty, using default workspace folder",
slog.F("default_workspace_folder", DevcontainerDefaultContainerWorkspaceFolder),
) )
directory = DevcontainerDefaultContainerWorkspaceFolder
}
if proc.agent.ID != uuid.Nil && recreateSubAgent {
logger.Debug(ctx, "deleting existing subagent for recreation", slog.F("agent_id", proc.agent.ID))
client := *api.subAgentClient.Load()
err = client.Delete(ctx, proc.agent.ID)
if err != nil { if err != nil {
return xerrors.Errorf("delete existing subagent failed: %w", err) return xerrors.Errorf("check workspace folder in container: %w", err)
} }
proc.agent = SubAgent{} directory := strings.TrimSpace(pwdBuf.String())
} if directory == "" {
if proc.agent.ID == uuid.Nil { logger.Warn(ctx, "detected workspace folder is empty, using default workspace folder",
slog.F("default_workspace_folder", DevcontainerDefaultContainerWorkspaceFolder),
)
directory = DevcontainerDefaultContainerWorkspaceFolder
}
subAgentConfig.Directory = directory
displayAppsMap := map[codersdk.DisplayApp]bool{ displayAppsMap := map[codersdk.DisplayApp]bool{
// NOTE(DanielleMaywood): // NOTE(DanielleMaywood):
// We use the same defaults here as set in terraform-provider-coder. // We use the same defaults here as set in terraform-provider-coder.
@ -1138,6 +1134,13 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
for _, customization := range coderCustomization { for _, customization := range coderCustomization {
for app, enabled := range customization.DisplayApps { for app, enabled := range customization.DisplayApps {
if _, ok := displayAppsMap[app]; !ok {
logger.Warn(ctx, "unknown display app in devcontainer customization, ignoring",
slog.F("app", app),
slog.F("enabled", enabled),
)
continue
}
displayAppsMap[app] = enabled displayAppsMap[app] = enabled
} }
} }
@ -1149,26 +1152,41 @@ func (api *API) maybeInjectSubAgentIntoContainerLocked(ctx context.Context, dc c
displayApps = append(displayApps, app) displayApps = append(displayApps, app)
} }
} }
slices.Sort(displayApps)
subAgentConfig.DisplayApps = displayApps
}
deleteSubAgent := proc.agent.ID != uuid.Nil && maybeRecreateSubAgent && !proc.agent.EqualConfig(subAgentConfig)
if deleteSubAgent {
logger.Debug(ctx, "deleting existing subagent for recreation", slog.F("agent_id", proc.agent.ID))
client := *api.subAgentClient.Load()
err = client.Delete(ctx, proc.agent.ID)
if err != nil {
return xerrors.Errorf("delete existing subagent failed: %w", err)
}
proc.agent = SubAgent{} // Clear agent to signal that we need to create a new one.
}
if proc.agent.ID == uuid.Nil {
logger.Debug(ctx, "creating new subagent", logger.Debug(ctx, "creating new subagent",
slog.F("directory", directory), slog.F("directory", subAgentConfig.Directory),
slog.F("display_apps", displayApps), slog.F("display_apps", subAgentConfig.DisplayApps),
) )
// Create new subagent record in the database to receive the auth token. // Create new subagent record in the database to receive the auth token.
client := *api.subAgentClient.Load() client := *api.subAgentClient.Load()
proc.agent, err = client.Create(ctx, SubAgent{ newSubAgent, err := client.Create(ctx, subAgentConfig)
Name: dc.Name,
Directory: directory,
OperatingSystem: "linux", // Assuming Linux for devcontainers.
Architecture: arch,
DisplayApps: displayApps,
})
if err != nil { if err != nil {
return xerrors.Errorf("create subagent failed: %w", err) return xerrors.Errorf("create subagent failed: %w", err)
} }
proc.agent = newSubAgent
logger.Info(ctx, "created new subagent", slog.F("agent_id", proc.agent.ID)) logger.Info(ctx, "created new subagent", slog.F("agent_id", proc.agent.ID))
} else {
logger.Debug(ctx, "subagent already exists, skipping recreation",
slog.F("agent_id", proc.agent.ID),
)
} }
api.mu.Lock() // Re-lock to update the agent. api.mu.Lock() // Re-lock to update the agent.

View File

@ -212,6 +212,7 @@ func (w *fakeWatcher) sendEventWaitNextCalled(ctx context.Context, event fsnotif
// fakeSubAgentClient implements SubAgentClient for testing purposes. // fakeSubAgentClient implements SubAgentClient for testing purposes.
type fakeSubAgentClient struct { type fakeSubAgentClient struct {
logger slog.Logger
agents map[uuid.UUID]agentcontainers.SubAgent agents map[uuid.UUID]agentcontainers.SubAgent
listErrC chan error // If set, send to return error, close to return nil. listErrC chan error // If set, send to return error, close to return nil.
@ -240,6 +241,7 @@ func (m *fakeSubAgentClient) List(ctx context.Context) ([]agentcontainers.SubAge
} }
func (m *fakeSubAgentClient) Create(ctx context.Context, agent agentcontainers.SubAgent) (agentcontainers.SubAgent, error) { func (m *fakeSubAgentClient) Create(ctx context.Context, agent agentcontainers.SubAgent) (agentcontainers.SubAgent, error) {
m.logger.Debug(ctx, "creating sub agent", slog.F("agent", agent))
if m.createErrC != nil { if m.createErrC != nil {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -261,6 +263,7 @@ func (m *fakeSubAgentClient) Create(ctx context.Context, agent agentcontainers.S
} }
func (m *fakeSubAgentClient) Delete(ctx context.Context, id uuid.UUID) error { func (m *fakeSubAgentClient) Delete(ctx context.Context, id uuid.UUID) error {
m.logger.Debug(ctx, "deleting sub agent", slog.F("id", id.String()))
if m.deleteErrC != nil { if m.deleteErrC != nil {
select { select {
case <-ctx.Done(): case <-ctx.Done():
@ -1245,6 +1248,7 @@ func TestAPI(t *testing.T) {
mClock = quartz.NewMock(t) mClock = quartz.NewMock(t)
mCCLI = acmock.NewMockContainerCLI(gomock.NewController(t)) mCCLI = acmock.NewMockContainerCLI(gomock.NewController(t))
fakeSAC = &fakeSubAgentClient{ fakeSAC = &fakeSubAgentClient{
logger: logger.Named("fakeSubAgentClient"),
createErrC: make(chan error, 1), createErrC: make(chan error, 1),
deleteErrC: make(chan error, 1), deleteErrC: make(chan error, 1),
} }
@ -1270,7 +1274,7 @@ func TestAPI(t *testing.T) {
mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{ mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{testContainer}, Containers: []codersdk.WorkspaceAgentContainer{testContainer},
}, nil).Times(1 + 3) // 1 initial call + 3 updates. }, nil).Times(3) // 1 initial call + 2 updates.
gomock.InOrder( gomock.InOrder(
mCCLI.EXPECT().DetectArchitecture(gomock.Any(), "test-container-id").Return(runtime.GOARCH, nil), mCCLI.EXPECT().DetectArchitecture(gomock.Any(), "test-container-id").Return(runtime.GOARCH, nil),
mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil), mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil),
@ -1315,19 +1319,20 @@ func TestAPI(t *testing.T) {
tickerTrap.MustWait(ctx).MustRelease(ctx) tickerTrap.MustWait(ctx).MustRelease(ctx)
tickerTrap.Close() tickerTrap.Close()
// Ensure we only inject the agent once. // Refresh twice to ensure idempotency of agent creation.
for i := range 3 { err = api.RefreshContainers(ctx)
_, aw := mClock.AdvanceNext() require.NoError(t, err, "refresh containers should not fail")
aw.MustWait(ctx) t.Logf("Agents created: %d, deleted: %d", len(fakeSAC.created), len(fakeSAC.deleted))
t.Logf("Iteration %d: agents created: %d", i+1, len(fakeSAC.created)) err = api.RefreshContainers(ctx)
require.NoError(t, err, "refresh containers should not fail")
t.Logf("Agents created: %d, deleted: %d", len(fakeSAC.created), len(fakeSAC.deleted))
// Verify agent was created. // Verify agent was created.
require.Len(t, fakeSAC.created, 1) require.Len(t, fakeSAC.created, 1)
assert.Equal(t, "test-container", fakeSAC.created[0].Name) assert.Equal(t, "test-container", fakeSAC.created[0].Name)
assert.Equal(t, "/workspaces", fakeSAC.created[0].Directory) assert.Equal(t, "/workspaces", fakeSAC.created[0].Directory)
assert.Len(t, fakeSAC.deleted, 0) assert.Len(t, fakeSAC.deleted, 0)
}
t.Log("Agent injected successfully, now testing reinjection into the same container...") t.Log("Agent injected successfully, now testing reinjection into the same container...")
@ -1342,14 +1347,15 @@ func TestAPI(t *testing.T) {
} }
return errTestTermination return errTestTermination
}) })
<-terminated select {
case <-ctx.Done():
t.Fatal("timeout waiting for agent termination")
case <-terminated:
}
t.Log("Waiting for agent reinjection...") t.Log("Waiting for agent reinjection...")
// Expect the agent to be reinjected. // Expect the agent to be reinjected.
mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{testContainer},
}, nil).Times(3) // 3 updates.
gomock.InOrder( gomock.InOrder(
mCCLI.EXPECT().DetectArchitecture(gomock.Any(), "test-container-id").Return(runtime.GOARCH, nil), mCCLI.EXPECT().DetectArchitecture(gomock.Any(), "test-container-id").Return(runtime.GOARCH, nil),
mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil), mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil),
@ -1357,41 +1363,9 @@ func TestAPI(t *testing.T) {
mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "chmod", "0755", "/.coder-agent", "/.coder-agent/coder").Return(nil, nil), mCCLI.EXPECT().ExecAs(gomock.Any(), "test-container-id", "root", "chmod", "0755", "/.coder-agent", "/.coder-agent/coder").Return(nil, nil),
) )
// Allow agent reinjection to succeed. // Verify that the agent has started.
testutil.RequireSend(ctx, t, fakeDCCLI.execErrC, func(cmd string, args ...string) error { agentStarted := make(chan struct{})
assert.Equal(t, "pwd", cmd) continueTerminate := make(chan struct{})
assert.Empty(t, args)
return nil
}) // Exec pwd.
// Ensure we only inject the agent once.
for i := range 3 {
_, aw := mClock.AdvanceNext()
aw.MustWait(ctx)
t.Logf("Iteration %d: agents created: %d", i+1, len(fakeSAC.created))
// Verify that the agent was reused.
require.Len(t, fakeSAC.created, 1)
assert.Len(t, fakeSAC.deleted, 0)
}
t.Log("Agent reinjected successfully, now testing agent deletion and recreation...")
// New container ID means the agent will be recreated.
testContainer.ID = "new-test-container-id" // Simulate a new container ID after recreation.
// Expect the agent to be injected.
mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{testContainer},
}, nil).Times(3) // 3 updates.
gomock.InOrder(
mCCLI.EXPECT().DetectArchitecture(gomock.Any(), "new-test-container-id").Return(runtime.GOARCH, nil),
mCCLI.EXPECT().ExecAs(gomock.Any(), "new-test-container-id", "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil),
mCCLI.EXPECT().Copy(gomock.Any(), "new-test-container-id", coderBin, "/.coder-agent/coder").Return(nil),
mCCLI.EXPECT().ExecAs(gomock.Any(), "new-test-container-id", "root", "chmod", "0755", "/.coder-agent", "/.coder-agent/coder").Return(nil, nil),
)
// Terminate the agent and verify it can be reinjected.
terminated = make(chan struct{}) terminated = make(chan struct{})
testutil.RequireSend(ctx, t, fakeDCCLI.execErrC, func(_ string, args ...string) error { testutil.RequireSend(ctx, t, fakeDCCLI.execErrC, func(_ string, args ...string) error {
defer close(terminated) defer close(terminated)
@ -1400,11 +1374,77 @@ func TestAPI(t *testing.T) {
} else { } else {
assert.Fail(t, `want "agent" command argument`) assert.Fail(t, `want "agent" command argument`)
} }
close(agentStarted)
select {
case <-ctx.Done():
t.Error("timeout waiting for agent continueTerminate")
case <-continueTerminate:
}
return errTestTermination return errTestTermination
}) })
<-terminated
// Simulate the agent deletion. WaitStartLoop:
for {
// Agent reinjection will succeed and we will not re-create the
// agent, nor re-probe pwd.
mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{testContainer},
}, nil).Times(1) // 1 update.
err = api.RefreshContainers(ctx)
require.NoError(t, err, "refresh containers should not fail")
t.Logf("Agents created: %d, deleted: %d", len(fakeSAC.created), len(fakeSAC.deleted))
select {
case <-agentStarted:
break WaitStartLoop
case <-ctx.Done():
t.Fatal("timeout waiting for agent to start")
default:
}
}
// Verify that the agent was reused.
require.Len(t, fakeSAC.created, 1)
assert.Len(t, fakeSAC.deleted, 0)
t.Log("Agent reinjected successfully, now testing agent deletion and recreation...")
// New container ID means the agent will be recreated.
testContainer.ID = "new-test-container-id" // Simulate a new container ID after recreation.
// Expect the agent to be injected.
mCCLI.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{
Containers: []codersdk.WorkspaceAgentContainer{testContainer},
}, nil).Times(1) // 1 update.
gomock.InOrder(
mCCLI.EXPECT().DetectArchitecture(gomock.Any(), "new-test-container-id").Return(runtime.GOARCH, nil),
mCCLI.EXPECT().ExecAs(gomock.Any(), "new-test-container-id", "root", "mkdir", "-p", "/.coder-agent").Return(nil, nil),
mCCLI.EXPECT().Copy(gomock.Any(), "new-test-container-id", coderBin, "/.coder-agent/coder").Return(nil),
mCCLI.EXPECT().ExecAs(gomock.Any(), "new-test-container-id", "root", "chmod", "0755", "/.coder-agent", "/.coder-agent/coder").Return(nil, nil),
)
fakeDCCLI.readConfig.MergedConfiguration.Customizations.Coder = []agentcontainers.CoderCustomization{
{
DisplayApps: map[codersdk.DisplayApp]bool{
codersdk.DisplayAppSSH: true,
codersdk.DisplayAppWebTerminal: true,
codersdk.DisplayAppVSCodeDesktop: true,
codersdk.DisplayAppVSCodeInsiders: true,
codersdk.DisplayAppPortForward: true,
},
},
}
// Terminate the running agent.
close(continueTerminate)
select {
case <-ctx.Done():
t.Fatal("timeout waiting for agent termination")
case <-terminated:
}
// Simulate the agent deletion (this happens because the
// devcontainer configuration changed).
testutil.RequireSend(ctx, t, fakeSAC.deleteErrC, nil) testutil.RequireSend(ctx, t, fakeSAC.deleteErrC, nil)
// Expect the agent to be recreated. // Expect the agent to be recreated.
testutil.RequireSend(ctx, t, fakeSAC.createErrC, nil) testutil.RequireSend(ctx, t, fakeSAC.createErrC, nil)
@ -1414,13 +1454,9 @@ func TestAPI(t *testing.T) {
return nil return nil
}) // Exec pwd. }) // Exec pwd.
// Advance the clock to run updaterLoop. err = api.RefreshContainers(ctx)
for i := range 3 { require.NoError(t, err, "refresh containers should not fail")
_, aw := mClock.AdvanceNext() t.Logf("Agents created: %d, deleted: %d", len(fakeSAC.created), len(fakeSAC.deleted))
aw.MustWait(ctx)
t.Logf("Iteration %d: agents created: %d, deleted: %d", i+1, len(fakeSAC.created), len(fakeSAC.deleted))
}
// Verify the agent was deleted and recreated. // Verify the agent was deleted and recreated.
require.Len(t, fakeSAC.deleted, 1, "there should be one deleted agent after recreation") require.Len(t, fakeSAC.deleted, 1, "there should be one deleted agent after recreation")
@ -1453,6 +1489,7 @@ func TestAPI(t *testing.T) {
mClock = quartz.NewMock(t) mClock = quartz.NewMock(t)
mCCLI = acmock.NewMockContainerCLI(gomock.NewController(t)) mCCLI = acmock.NewMockContainerCLI(gomock.NewController(t))
fakeSAC = &fakeSubAgentClient{ fakeSAC = &fakeSubAgentClient{
logger: logger.Named("fakeSubAgentClient"),
agents: map[uuid.UUID]agentcontainers.SubAgent{ agents: map[uuid.UUID]agentcontainers.SubAgent{
existingAgentID: existingAgent, existingAgentID: existingAgent,
}, },
@ -1577,7 +1614,10 @@ func TestAPI(t *testing.T) {
logger = testutil.Logger(t) logger = testutil.Logger(t)
mClock = quartz.NewMock(t) mClock = quartz.NewMock(t)
mCCLI = acmock.NewMockContainerCLI(gomock.NewController(t)) mCCLI = acmock.NewMockContainerCLI(gomock.NewController(t))
fSAC = &fakeSubAgentClient{createErrC: make(chan error, 1)} fSAC = &fakeSubAgentClient{
logger: logger.Named("fakeSubAgentClient"),
createErrC: make(chan error, 1),
}
fDCCLI = &fakeDevcontainerCLI{ fDCCLI = &fakeDevcontainerCLI{
readConfig: agentcontainers.DevcontainerConfig{ readConfig: agentcontainers.DevcontainerConfig{
MergedConfiguration: agentcontainers.DevcontainerConfiguration{ MergedConfiguration: agentcontainers.DevcontainerConfiguration{

View File

@ -2,6 +2,7 @@ package agentcontainers
import ( import (
"context" "context"
"slices"
"github.com/google/uuid" "github.com/google/uuid"
"golang.org/x/xerrors" "golang.org/x/xerrors"
@ -23,6 +24,26 @@ type SubAgent struct {
DisplayApps []codersdk.DisplayApp DisplayApps []codersdk.DisplayApp
} }
// CloneConfig makes a copy of SubAgent without ID and AuthToken. The
// name is inherited from the devcontainer.
func (s SubAgent) CloneConfig(dc codersdk.WorkspaceAgentDevcontainer) SubAgent {
return SubAgent{
Name: dc.Name,
Directory: s.Directory,
Architecture: s.Architecture,
OperatingSystem: s.OperatingSystem,
DisplayApps: slices.Clone(s.DisplayApps),
}
}
func (s SubAgent) EqualConfig(other SubAgent) bool {
return s.Name == other.Name &&
s.Directory == other.Directory &&
s.Architecture == other.Architecture &&
s.OperatingSystem == other.OperatingSystem &&
slices.Equal(s.DisplayApps, other.DisplayApps)
}
// SubAgentClient is an interface for managing sub agents and allows // SubAgentClient is an interface for managing sub agents and allows
// changing the implementation without having to deal with the // changing the implementation without having to deal with the
// agentproto package directly. // agentproto package directly.

View File

@ -116,7 +116,6 @@ export const AgentDevcontainerCard: FC<AgentDevcontainerCardProps> = ({
if (dc.id === devcontainer.id) { if (dc.id === devcontainer.id) {
return { return {
...dc, ...dc,
agent: null,
container: null, container: null,
status: "starting", status: "starting",
}; };