feat(agent): send devcontainer CLI logs during recreate (#17845)

We need a way to surface what's happening to the user, since autostart
logs here, it's natural we do so during re-create as well.

Updates #16424
This commit is contained in:
Mathias Fredriksson
2025-05-15 16:06:56 +03:00
committed by GitHub
parent 6e1ba75b06
commit 3de0003e4b
7 changed files with 342 additions and 25 deletions

View File

@ -1935,8 +1935,6 @@ func TestAgent_ReconnectingPTYContainer(t *testing.T) {
t.Skip("Set CODER_TEST_USE_DOCKER=1 to run this test") t.Skip("Set CODER_TEST_USE_DOCKER=1 to run this test")
} }
ctx := testutil.Context(t, testutil.WaitLong)
pool, err := dockertest.NewPool("") pool, err := dockertest.NewPool("")
require.NoError(t, err, "Could not connect to docker") require.NoError(t, err, "Could not connect to docker")
ct, err := pool.RunWithOptions(&dockertest.RunOptions{ ct, err := pool.RunWithOptions(&dockertest.RunOptions{
@ -1948,10 +1946,10 @@ func TestAgent_ReconnectingPTYContainer(t *testing.T) {
config.RestartPolicy = docker.RestartPolicy{Name: "no"} config.RestartPolicy = docker.RestartPolicy{Name: "no"}
}) })
require.NoError(t, err, "Could not start container") require.NoError(t, err, "Could not start container")
t.Cleanup(func() { defer func() {
err := pool.Purge(ct) err := pool.Purge(ct)
require.NoError(t, err, "Could not stop container") require.NoError(t, err, "Could not stop container")
}) }()
// Wait for container to start // Wait for container to start
require.Eventually(t, func() bool { require.Eventually(t, func() bool {
ct, ok := pool.ContainerByName(ct.Container.Name) ct, ok := pool.ContainerByName(ct.Container.Name)
@ -1962,6 +1960,7 @@ func TestAgent_ReconnectingPTYContainer(t *testing.T) {
conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) { conn, _, _, _, _ := setupAgent(t, agentsdk.Manifest{}, 0, func(_ *agenttest.Client, o *agent.Options) {
o.ExperimentalDevcontainersEnabled = true o.ExperimentalDevcontainersEnabled = true
}) })
ctx := testutil.Context(t, testutil.WaitLong)
ac, err := conn.ReconnectingPTY(ctx, uuid.New(), 80, 80, "/bin/sh", func(arp *workspacesdk.AgentReconnectingPTYInit) { ac, err := conn.ReconnectingPTY(ctx, uuid.New(), 80, 80, "/bin/sh", func(arp *workspacesdk.AgentReconnectingPTYInit) {
arp.Container = ct.Container.ID arp.Container = ct.Container.ID
}) })
@ -2005,9 +2004,6 @@ func TestAgent_DevcontainerAutostart(t *testing.T) {
t.Skip("Set CODER_TEST_USE_DOCKER=1 to run this test") t.Skip("Set CODER_TEST_USE_DOCKER=1 to run this test")
} }
ctx := testutil.Context(t, testutil.WaitLong)
// Connect to Docker
pool, err := dockertest.NewPool("") pool, err := dockertest.NewPool("")
require.NoError(t, err, "Could not connect to docker") require.NoError(t, err, "Could not connect to docker")
@ -2051,7 +2047,7 @@ func TestAgent_DevcontainerAutostart(t *testing.T) {
}, },
}, },
} }
// nolint: dogsled //nolint:dogsled
conn, _, _, _, _ := setupAgent(t, manifest, 0, func(_ *agenttest.Client, o *agent.Options) { conn, _, _, _, _ := setupAgent(t, manifest, 0, func(_ *agenttest.Client, o *agent.Options) {
o.ExperimentalDevcontainersEnabled = true o.ExperimentalDevcontainersEnabled = true
}) })
@ -2079,8 +2075,7 @@ func TestAgent_DevcontainerAutostart(t *testing.T) {
return false return false
}, testutil.WaitSuperLong, testutil.IntervalMedium, "no container with workspace folder label found") }, testutil.WaitSuperLong, testutil.IntervalMedium, "no container with workspace folder label found")
defer func() {
t.Cleanup(func() {
// We can't rely on pool here because the container is not // We can't rely on pool here because the container is not
// managed by it (it is managed by @devcontainer/cli). // managed by it (it is managed by @devcontainer/cli).
err := pool.Client.RemoveContainer(docker.RemoveContainerOptions{ err := pool.Client.RemoveContainer(docker.RemoveContainerOptions{
@ -2089,13 +2084,15 @@ func TestAgent_DevcontainerAutostart(t *testing.T) {
Force: true, Force: true,
}) })
assert.NoError(t, err, "remove container") assert.NoError(t, err, "remove container")
}) }()
containerInfo, err := pool.Client.InspectContainer(container.ID) containerInfo, err := pool.Client.InspectContainer(container.ID)
require.NoError(t, err, "inspect container") require.NoError(t, err, "inspect container")
t.Logf("Container state: status: %v", containerInfo.State.Status) t.Logf("Container state: status: %v", containerInfo.State.Status)
require.True(t, containerInfo.State.Running, "container should be running") require.True(t, containerInfo.State.Running, "container should be running")
ctx := testutil.Context(t, testutil.WaitLong)
ac, err := conn.ReconnectingPTY(ctx, uuid.New(), 80, 80, "", func(opts *workspacesdk.AgentReconnectingPTYInit) { ac, err := conn.ReconnectingPTY(ctx, uuid.New(), 80, 80, "", func(opts *workspacesdk.AgentReconnectingPTYInit) {
opts.Container = container.ID opts.Container = container.ID
}) })
@ -2124,6 +2121,173 @@ func TestAgent_DevcontainerAutostart(t *testing.T) {
require.NoError(t, err, "file should exist outside devcontainer") require.NoError(t, err, "file should exist outside devcontainer")
} }
// TestAgent_DevcontainerRecreate tests that RecreateDevcontainer
// recreates a devcontainer and emits logs.
//
// This tests end-to-end functionality of auto-starting a devcontainer.
// It runs "devcontainer up" which creates a real Docker container. As
// such, it does not run by default in CI.
//
// You can run it manually as follows:
//
// CODER_TEST_USE_DOCKER=1 go test -count=1 ./agent -run TestAgent_DevcontainerRecreate
func TestAgent_DevcontainerRecreate(t *testing.T) {
if os.Getenv("CODER_TEST_USE_DOCKER") != "1" {
t.Skip("Set CODER_TEST_USE_DOCKER=1 to run this test")
}
t.Parallel()
pool, err := dockertest.NewPool("")
require.NoError(t, err, "Could not connect to docker")
// Prepare temporary devcontainer for test (mywork).
devcontainerID := uuid.New()
devcontainerLogSourceID := uuid.New()
workspaceFolder := filepath.Join(t.TempDir(), "mywork")
t.Logf("Workspace folder: %s", workspaceFolder)
devcontainerPath := filepath.Join(workspaceFolder, ".devcontainer")
err = os.MkdirAll(devcontainerPath, 0o755)
require.NoError(t, err, "create devcontainer directory")
devcontainerFile := filepath.Join(devcontainerPath, "devcontainer.json")
err = os.WriteFile(devcontainerFile, []byte(`{
"name": "mywork",
"image": "busybox:latest",
"cmd": ["sleep", "infinity"]
}`), 0o600)
require.NoError(t, err, "write devcontainer.json")
manifest := agentsdk.Manifest{
// Set up pre-conditions for auto-starting a devcontainer, the
// script is used to extract the log source ID.
Devcontainers: []codersdk.WorkspaceAgentDevcontainer{
{
ID: devcontainerID,
Name: "test",
WorkspaceFolder: workspaceFolder,
},
},
Scripts: []codersdk.WorkspaceAgentScript{
{
ID: devcontainerID,
LogSourceID: devcontainerLogSourceID,
},
},
}
//nolint:dogsled
conn, client, _, _, _ := setupAgent(t, manifest, 0, func(_ *agenttest.Client, o *agent.Options) {
o.ExperimentalDevcontainersEnabled = true
})
ctx := testutil.Context(t, testutil.WaitLong)
// We enabled autostart for the devcontainer, so ready is a good
// indication that the devcontainer is up and running. Importantly,
// this also means that the devcontainer startup is no longer
// producing logs that may interfere with the recreate logs.
testutil.Eventually(ctx, t, func(context.Context) bool {
states := client.GetLifecycleStates()
return slices.Contains(states, codersdk.WorkspaceAgentLifecycleReady)
}, testutil.IntervalMedium, "devcontainer not ready")
t.Logf("Looking for container with label: devcontainer.local_folder=%s", workspaceFolder)
var container docker.APIContainers
testutil.Eventually(ctx, t, func(context.Context) bool {
containers, err := pool.Client.ListContainers(docker.ListContainersOptions{All: true})
if err != nil {
t.Logf("Error listing containers: %v", err)
return false
}
for _, c := range containers {
t.Logf("Found container: %s with labels: %v", c.ID[:12], c.Labels)
if v, ok := c.Labels["devcontainer.local_folder"]; ok && v == workspaceFolder {
t.Logf("Found matching container: %s", c.ID[:12])
container = c
return true
}
}
return false
}, testutil.IntervalMedium, "no container with workspace folder label found")
defer func(container docker.APIContainers) {
// We can't rely on pool here because the container is not
// managed by it (it is managed by @devcontainer/cli).
err := pool.Client.RemoveContainer(docker.RemoveContainerOptions{
ID: container.ID,
RemoveVolumes: true,
Force: true,
})
assert.Error(t, err, "container should be removed by recreate")
}(container)
ctx = testutil.Context(t, testutil.WaitLong) // Reset context.
// Capture logs via ScriptLogger.
logsCh := make(chan *proto.BatchCreateLogsRequest, 1)
client.SetLogsChannel(logsCh)
// Invoke recreate to trigger the destruction and recreation of the
// devcontainer, we do it in a goroutine so we can process logs
// concurrently.
go func(container docker.APIContainers) {
err := conn.RecreateDevcontainer(ctx, container.ID)
assert.NoError(t, err, "recreate devcontainer should succeed")
}(container)
t.Logf("Checking recreate logs for outcome...")
// Wait for the logs to be emitted, the @devcontainer/cli up command
// will emit a log with the outcome at the end suggesting we did
// receive all the logs.
waitForOutcomeLoop:
for {
batch := testutil.RequireReceive(ctx, t, logsCh)
if bytes.Equal(batch.LogSourceId, devcontainerLogSourceID[:]) {
for _, log := range batch.Logs {
t.Logf("Received log: %s", log.Output)
if strings.Contains(log.Output, "\"outcome\"") {
break waitForOutcomeLoop
}
}
}
}
t.Logf("Checking there's a new container with label: devcontainer.local_folder=%s", workspaceFolder)
// Make sure the container exists and isn't the same as the old one.
testutil.Eventually(ctx, t, func(context.Context) bool {
containers, err := pool.Client.ListContainers(docker.ListContainersOptions{All: true})
if err != nil {
t.Logf("Error listing containers: %v", err)
return false
}
for _, c := range containers {
t.Logf("Found container: %s with labels: %v", c.ID[:12], c.Labels)
if v, ok := c.Labels["devcontainer.local_folder"]; ok && v == workspaceFolder {
if c.ID == container.ID {
t.Logf("Found same container: %s", c.ID[:12])
return false
}
t.Logf("Found new container: %s", c.ID[:12])
container = c
return true
}
}
return false
}, testutil.IntervalMedium, "new devcontainer not found")
defer func(container docker.APIContainers) {
// We can't rely on pool here because the container is not
// managed by it (it is managed by @devcontainer/cli).
err := pool.Client.RemoveContainer(docker.RemoveContainerOptions{
ID: container.ID,
RemoveVolumes: true,
Force: true,
})
assert.NoError(t, err, "remove container")
}(container)
}
func TestAgent_Dial(t *testing.T) { func TestAgent_Dial(t *testing.T) {
t.Parallel() t.Parallel()

View File

@ -20,6 +20,7 @@ import (
"github.com/coder/coder/v2/agent/agentexec" "github.com/coder/coder/v2/agent/agentexec"
"github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk"
"github.com/coder/coder/v2/codersdk/agentsdk"
"github.com/coder/quartz" "github.com/coder/quartz"
) )
@ -43,6 +44,7 @@ type API struct {
cl Lister cl Lister
dccli DevcontainerCLI dccli DevcontainerCLI
clock quartz.Clock clock quartz.Clock
scriptLogger func(logSourceID uuid.UUID) ScriptLogger
// lockCh protects the below fields. We use a channel instead of a // lockCh protects the below fields. We use a channel instead of a
// mutex so we can handle cancellation properly. // mutex so we can handle cancellation properly.
@ -52,6 +54,8 @@ type API struct {
devcontainerNames map[string]struct{} // Track devcontainer names to avoid duplicates. devcontainerNames map[string]struct{} // Track devcontainer names to avoid duplicates.
knownDevcontainers []codersdk.WorkspaceAgentDevcontainer // Track predefined and runtime-detected devcontainers. knownDevcontainers []codersdk.WorkspaceAgentDevcontainer // Track predefined and runtime-detected devcontainers.
configFileModifiedTimes map[string]time.Time // Track when config files were last modified. configFileModifiedTimes map[string]time.Time // Track when config files were last modified.
devcontainerLogSourceIDs map[string]uuid.UUID // Track devcontainer log source IDs.
} }
// Option is a functional option for API. // Option is a functional option for API.
@ -91,13 +95,30 @@ func WithDevcontainerCLI(dccli DevcontainerCLI) Option {
// WithDevcontainers sets the known devcontainers for the API. This // WithDevcontainers sets the known devcontainers for the API. This
// allows the API to be aware of devcontainers defined in the workspace // allows the API to be aware of devcontainers defined in the workspace
// agent manifest. // agent manifest.
func WithDevcontainers(devcontainers []codersdk.WorkspaceAgentDevcontainer) Option { func WithDevcontainers(devcontainers []codersdk.WorkspaceAgentDevcontainer, scripts []codersdk.WorkspaceAgentScript) Option {
return func(api *API) { return func(api *API) {
if len(devcontainers) > 0 { if len(devcontainers) == 0 {
api.knownDevcontainers = slices.Clone(devcontainers) return
api.devcontainerNames = make(map[string]struct{}, len(devcontainers)) }
for _, devcontainer := range devcontainers { api.knownDevcontainers = slices.Clone(devcontainers)
api.devcontainerNames[devcontainer.Name] = struct{}{} api.devcontainerNames = make(map[string]struct{}, len(devcontainers))
api.devcontainerLogSourceIDs = make(map[string]uuid.UUID)
for _, devcontainer := range devcontainers {
api.devcontainerNames[devcontainer.Name] = struct{}{}
for _, script := range scripts {
// The devcontainer scripts match the devcontainer ID for
// identification.
if script.ID == devcontainer.ID {
api.devcontainerLogSourceIDs[devcontainer.WorkspaceFolder] = script.LogSourceID
break
}
}
if api.devcontainerLogSourceIDs[devcontainer.WorkspaceFolder] == uuid.Nil {
api.logger.Error(api.ctx, "devcontainer log source ID not found for devcontainer",
slog.F("devcontainer", devcontainer.Name),
slog.F("workspace_folder", devcontainer.WorkspaceFolder),
slog.F("config_path", devcontainer.ConfigPath),
)
} }
} }
} }
@ -112,6 +133,27 @@ func WithWatcher(w watcher.Watcher) Option {
} }
} }
// ScriptLogger is an interface for sending devcontainer logs to the
// controlplane.
type ScriptLogger interface {
Send(ctx context.Context, log ...agentsdk.Log) error
Flush(ctx context.Context) error
}
// noopScriptLogger is a no-op implementation of the ScriptLogger
// interface.
type noopScriptLogger struct{}
func (noopScriptLogger) Send(context.Context, ...agentsdk.Log) error { return nil }
func (noopScriptLogger) Flush(context.Context) error { return nil }
// WithScriptLogger sets the script logger provider for devcontainer operations.
func WithScriptLogger(scriptLogger func(logSourceID uuid.UUID) ScriptLogger) Option {
return func(api *API) {
api.scriptLogger = scriptLogger
}
}
// NewAPI returns a new API with the given options applied. // NewAPI returns a new API with the given options applied.
func NewAPI(logger slog.Logger, options ...Option) *API { func NewAPI(logger slog.Logger, options ...Option) *API {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -127,7 +169,10 @@ func NewAPI(logger slog.Logger, options ...Option) *API {
devcontainerNames: make(map[string]struct{}), devcontainerNames: make(map[string]struct{}),
knownDevcontainers: []codersdk.WorkspaceAgentDevcontainer{}, knownDevcontainers: []codersdk.WorkspaceAgentDevcontainer{},
configFileModifiedTimes: make(map[string]time.Time), configFileModifiedTimes: make(map[string]time.Time),
scriptLogger: func(uuid.UUID) ScriptLogger { return noopScriptLogger{} },
} }
// The ctx and logger must be set before applying options to avoid
// nil pointer dereference.
for _, opt := range options { for _, opt := range options {
opt(api) opt(api)
} }
@ -426,7 +471,26 @@ func (api *API) handleDevcontainerRecreate(w http.ResponseWriter, r *http.Reques
return return
} }
_, err = api.dccli.Up(ctx, workspaceFolder, configPath, WithRemoveExistingContainer()) // Send logs via agent logging facilities.
logSourceID := api.devcontainerLogSourceIDs[workspaceFolder]
if logSourceID == uuid.Nil {
// Fallback to the external log source ID if not found.
logSourceID = agentsdk.ExternalLogSourceID
}
scriptLogger := api.scriptLogger(logSourceID)
defer func() {
flushCtx, cancel := context.WithTimeout(api.ctx, 5*time.Second)
defer cancel()
if err := scriptLogger.Flush(flushCtx); err != nil {
api.logger.Error(flushCtx, "flush devcontainer logs failed", slog.Error(err))
}
}()
infoW := agentsdk.LogsWriter(ctx, scriptLogger.Send, logSourceID, codersdk.LogLevelInfo)
defer infoW.Close()
errW := agentsdk.LogsWriter(ctx, scriptLogger.Send, logSourceID, codersdk.LogLevelError)
defer errW.Close()
_, err = api.dccli.Up(ctx, workspaceFolder, configPath, WithOutput(infoW, errW), WithRemoveExistingContainer())
if err != nil { if err != nil {
httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{ httpapi.Write(ctx, w, http.StatusInternalServerError, codersdk.Response{
Message: "Could not recreate devcontainer", Message: "Could not recreate devcontainer",

View File

@ -563,8 +563,17 @@ func TestAPI(t *testing.T) {
agentcontainers.WithWatcher(watcher.NewNoop()), agentcontainers.WithWatcher(watcher.NewNoop()),
} }
// Generate matching scripts for the known devcontainers
// (required to extract log source ID).
var scripts []codersdk.WorkspaceAgentScript
for i := range tt.knownDevcontainers {
scripts = append(scripts, codersdk.WorkspaceAgentScript{
ID: tt.knownDevcontainers[i].ID,
LogSourceID: uuid.New(),
})
}
if len(tt.knownDevcontainers) > 0 { if len(tt.knownDevcontainers) > 0 {
apiOptions = append(apiOptions, agentcontainers.WithDevcontainers(tt.knownDevcontainers)) apiOptions = append(apiOptions, agentcontainers.WithDevcontainers(tt.knownDevcontainers, scripts))
} }
api := agentcontainers.NewAPI(logger, apiOptions...) api := agentcontainers.NewAPI(logger, apiOptions...)

View File

@ -31,8 +31,18 @@ func WithRemoveExistingContainer() DevcontainerCLIUpOptions {
} }
} }
// WithOutput sets stdout and stderr writers for Up command logs.
func WithOutput(stdout, stderr io.Writer) DevcontainerCLIUpOptions {
return func(o *devcontainerCLIUpConfig) {
o.stdout = stdout
o.stderr = stderr
}
}
type devcontainerCLIUpConfig struct { type devcontainerCLIUpConfig struct {
removeExistingContainer bool removeExistingContainer bool
stdout io.Writer
stderr io.Writer
} }
func applyDevcontainerCLIUpOptions(opts []DevcontainerCLIUpOptions) devcontainerCLIUpConfig { func applyDevcontainerCLIUpOptions(opts []DevcontainerCLIUpOptions) devcontainerCLIUpConfig {
@ -78,18 +88,28 @@ func (d *devcontainerCLI) Up(ctx context.Context, workspaceFolder, configPath st
} }
cmd := d.execer.CommandContext(ctx, "devcontainer", args...) cmd := d.execer.CommandContext(ctx, "devcontainer", args...)
var stdout bytes.Buffer // Capture stdout for parsing and stream logs for both default and provided writers.
cmd.Stdout = io.MultiWriter(&stdout, &devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stdout", true))}) var stdoutBuf bytes.Buffer
cmd.Stderr = &devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stderr", true))} stdoutWriters := []io.Writer{&stdoutBuf, &devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stdout", true))}}
if conf.stdout != nil {
stdoutWriters = append(stdoutWriters, conf.stdout)
}
cmd.Stdout = io.MultiWriter(stdoutWriters...)
// Stream stderr logs and provided writer if any.
stderrWriters := []io.Writer{&devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stderr", true))}}
if conf.stderr != nil {
stderrWriters = append(stderrWriters, conf.stderr)
}
cmd.Stderr = io.MultiWriter(stderrWriters...)
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
if _, err2 := parseDevcontainerCLILastLine(ctx, logger, stdout.Bytes()); err2 != nil { if _, err2 := parseDevcontainerCLILastLine(ctx, logger, stdoutBuf.Bytes()); err2 != nil {
err = errors.Join(err, err2) err = errors.Join(err, err2)
} }
return "", err return "", err
} }
result, err := parseDevcontainerCLILastLine(ctx, logger, stdout.Bytes()) result, err := parseDevcontainerCLILastLine(ctx, logger, stdoutBuf.Bytes())
if err != nil { if err != nil {
return "", err return "", err
} }

View File

@ -128,6 +128,45 @@ func TestDevcontainerCLI_ArgsAndParsing(t *testing.T) {
}) })
} }
// TestDevcontainerCLI_WithOutput tests that WithOutput captures CLI
// logs to provided writers.
func TestDevcontainerCLI_WithOutput(t *testing.T) {
t.Parallel()
// Prepare test executable and logger.
testExePath, err := os.Executable()
require.NoError(t, err, "get test executable path")
logger := slogtest.Make(t, &slogtest.Options{IgnoreErrors: true}).Leveled(slog.LevelDebug)
ctx := testutil.Context(t, testutil.WaitMedium)
// Buffers to capture stdout and stderr.
outBuf := &bytes.Buffer{}
errBuf := &bytes.Buffer{}
// Simulate CLI execution with a standard up.log file.
wantArgs := "up --log-format json --workspace-folder /test/workspace"
testExecer := &testDevcontainerExecer{
testExePath: testExePath,
wantArgs: wantArgs,
wantError: false,
logFile: filepath.Join("testdata", "devcontainercli", "parse", "up.log"),
}
dccli := agentcontainers.NewDevcontainerCLI(logger, testExecer)
// Call Up with WithOutput to capture CLI logs.
containerID, err := dccli.Up(ctx, "/test/workspace", "", agentcontainers.WithOutput(outBuf, errBuf))
require.NoError(t, err, "Up should succeed")
require.NotEmpty(t, containerID, "expected non-empty container ID")
// Read expected log content.
expLog, err := os.ReadFile(filepath.Join("testdata", "devcontainercli", "parse", "up.log"))
require.NoError(t, err, "reading expected log file")
// Verify stdout buffer contains the CLI logs and stderr is empty.
assert.Equal(t, string(expLog), outBuf.String(), "stdout buffer should match CLI logs")
assert.Empty(t, errBuf.String(), "stderr buffer should be empty on success")
}
// testDevcontainerExecer implements the agentexec.Execer interface for testing. // testDevcontainerExecer implements the agentexec.Execer interface for testing.
type testDevcontainerExecer struct { type testDevcontainerExecer struct {
testExePath string testExePath string

View File

@ -7,6 +7,8 @@ import (
"github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5"
"github.com/google/uuid"
"github.com/coder/coder/v2/agent/agentcontainers" "github.com/coder/coder/v2/agent/agentcontainers"
"github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk"
@ -40,12 +42,15 @@ func (a *agent) apiHandler() (http.Handler, func() error) {
if a.experimentalDevcontainersEnabled { if a.experimentalDevcontainersEnabled {
containerAPIOpts := []agentcontainers.Option{ containerAPIOpts := []agentcontainers.Option{
agentcontainers.WithExecer(a.execer), agentcontainers.WithExecer(a.execer),
agentcontainers.WithScriptLogger(func(logSourceID uuid.UUID) agentcontainers.ScriptLogger {
return a.logSender.GetScriptLogger(logSourceID)
}),
} }
manifest := a.manifest.Load() manifest := a.manifest.Load()
if manifest != nil && len(manifest.Devcontainers) > 0 { if manifest != nil && len(manifest.Devcontainers) > 0 {
containerAPIOpts = append( containerAPIOpts = append(
containerAPIOpts, containerAPIOpts,
agentcontainers.WithDevcontainers(manifest.Devcontainers), agentcontainers.WithDevcontainers(manifest.Devcontainers, manifest.Scripts),
) )
} }

View File

@ -387,6 +387,22 @@ func (c *AgentConn) ListContainers(ctx context.Context) (codersdk.WorkspaceAgent
return resp, json.NewDecoder(res.Body).Decode(&resp) return resp, json.NewDecoder(res.Body).Decode(&resp)
} }
// RecreateDevcontainer recreates a devcontainer with the given container.
// This is a blocking call and will wait for the container to be recreated.
func (c *AgentConn) RecreateDevcontainer(ctx context.Context, containerIDOrName string) error {
ctx, span := tracing.StartSpan(ctx)
defer span.End()
res, err := c.apiRequest(ctx, http.MethodPost, "/api/v0/containers/devcontainers/container/"+containerIDOrName+"/recreate", nil)
if err != nil {
return xerrors.Errorf("do request: %w", err)
}
defer res.Body.Close()
if res.StatusCode != http.StatusNoContent {
return codersdk.ReadBodyAsError(res)
}
return nil
}
// apiRequest makes a request to the workspace agent's HTTP API server. // apiRequest makes a request to the workspace agent's HTTP API server.
func (c *AgentConn) apiRequest(ctx context.Context, method, path string, body io.Reader) (*http.Response, error) { func (c *AgentConn) apiRequest(ctx context.Context, method, path string, body io.Reader) (*http.Response, error) {
ctx, span := tracing.StartSpan(ctx) ctx, span := tracing.StartSpan(ctx)