fix(agent): start devcontainers through agentcontainers package (#18471)

Fixes https://github.com/coder/internal/issues/706

Context for the implementation here
https://github.com/coder/internal/issues/706#issuecomment-2990490282

Synchronously starts dev containers defined in terraform with our
`DevcontainerCLI` abstraction, instead of piggybacking off of our
`agentscripts` package. This gives us more control over logs, instead of
being reliant on packages which may or may not exist in the
user-provided image.
This commit is contained in:
Danielle Maywood
2025-06-25 11:52:50 +01:00
committed by GitHub
parent f6d9765daf
commit c4e4fe85f9
12 changed files with 304 additions and 480 deletions

View File

@ -91,6 +91,7 @@ type Options struct {
Execer agentexec.Execer Execer agentexec.Execer
Devcontainers bool Devcontainers bool
DevcontainerAPIOptions []agentcontainers.Option // Enable Devcontainers for these to be effective. DevcontainerAPIOptions []agentcontainers.Option // Enable Devcontainers for these to be effective.
Clock quartz.Clock
} }
type Client interface { type Client interface {
@ -144,6 +145,9 @@ func New(options Options) Agent {
if options.PortCacheDuration == 0 { if options.PortCacheDuration == 0 {
options.PortCacheDuration = 1 * time.Second options.PortCacheDuration = 1 * time.Second
} }
if options.Clock == nil {
options.Clock = quartz.NewReal()
}
prometheusRegistry := options.PrometheusRegistry prometheusRegistry := options.PrometheusRegistry
if prometheusRegistry == nil { if prometheusRegistry == nil {
@ -157,6 +161,7 @@ func New(options Options) Agent {
hardCtx, hardCancel := context.WithCancel(context.Background()) hardCtx, hardCancel := context.WithCancel(context.Background())
gracefulCtx, gracefulCancel := context.WithCancel(hardCtx) gracefulCtx, gracefulCancel := context.WithCancel(hardCtx)
a := &agent{ a := &agent{
clock: options.Clock,
tailnetListenPort: options.TailnetListenPort, tailnetListenPort: options.TailnetListenPort,
reconnectingPTYTimeout: options.ReconnectingPTYTimeout, reconnectingPTYTimeout: options.ReconnectingPTYTimeout,
logger: options.Logger, logger: options.Logger,
@ -204,6 +209,7 @@ func New(options Options) Agent {
} }
type agent struct { type agent struct {
clock quartz.Clock
logger slog.Logger logger slog.Logger
client Client client Client
exchangeToken func(ctx context.Context) (string, error) exchangeToken func(ctx context.Context) (string, error)
@ -273,7 +279,7 @@ type agent struct {
devcontainers bool devcontainers bool
containerAPIOptions []agentcontainers.Option containerAPIOptions []agentcontainers.Option
containerAPI atomic.Pointer[agentcontainers.API] // Set by apiHandler. containerAPI *agentcontainers.API
} }
func (a *agent) TailnetConn() *tailnet.Conn { func (a *agent) TailnetConn() *tailnet.Conn {
@ -330,6 +336,19 @@ func (a *agent) init() {
// will not report anywhere. // will not report anywhere.
a.scriptRunner.RegisterMetrics(a.prometheusRegistry) a.scriptRunner.RegisterMetrics(a.prometheusRegistry)
if a.devcontainers {
containerAPIOpts := []agentcontainers.Option{
agentcontainers.WithExecer(a.execer),
agentcontainers.WithCommandEnv(a.sshServer.CommandEnv),
agentcontainers.WithScriptLogger(func(logSourceID uuid.UUID) agentcontainers.ScriptLogger {
return a.logSender.GetScriptLogger(logSourceID)
}),
}
containerAPIOpts = append(containerAPIOpts, a.containerAPIOptions...)
a.containerAPI = agentcontainers.NewAPI(a.logger.Named("containers"), containerAPIOpts...)
}
a.reconnectingPTYServer = reconnectingpty.NewServer( a.reconnectingPTYServer = reconnectingpty.NewServer(
a.logger.Named("reconnecting-pty"), a.logger.Named("reconnecting-pty"),
a.sshServer, a.sshServer,
@ -1141,15 +1160,18 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
} }
var ( var (
scripts = manifest.Scripts scripts = manifest.Scripts
scriptRunnerOpts []agentscripts.InitOption scriptRunnerOpts []agentscripts.InitOption
devcontainerScripts map[uuid.UUID]codersdk.WorkspaceAgentScript
) )
if a.devcontainers { if a.devcontainers {
var dcScripts []codersdk.WorkspaceAgentScript a.containerAPI.Init(
scripts, dcScripts = agentcontainers.ExtractAndInitializeDevcontainerScripts(manifest.Devcontainers, scripts) agentcontainers.WithManifestInfo(manifest.OwnerName, manifest.WorkspaceName),
// See ExtractAndInitializeDevcontainerScripts for motivation agentcontainers.WithDevcontainers(manifest.Devcontainers, scripts),
// behind running dcScripts as post start scripts. agentcontainers.WithSubAgentClient(agentcontainers.NewSubAgentClientFromAPI(a.logger, aAPI)),
scriptRunnerOpts = append(scriptRunnerOpts, agentscripts.WithPostStartScripts(dcScripts...)) )
scripts, devcontainerScripts = agentcontainers.ExtractDevcontainerScripts(manifest.Devcontainers, scripts)
} }
err = a.scriptRunner.Init(scripts, aAPI.ScriptCompleted, scriptRunnerOpts...) err = a.scriptRunner.Init(scripts, aAPI.ScriptCompleted, scriptRunnerOpts...)
if err != nil { if err != nil {
@ -1168,7 +1190,12 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
// finished (both start and post start). For instance, an // finished (both start and post start). For instance, an
// autostarted devcontainer will be included in this time. // autostarted devcontainer will be included in this time.
err := a.scriptRunner.Execute(a.gracefulCtx, agentscripts.ExecuteStartScripts) err := a.scriptRunner.Execute(a.gracefulCtx, agentscripts.ExecuteStartScripts)
err = errors.Join(err, a.scriptRunner.Execute(a.gracefulCtx, agentscripts.ExecutePostStartScripts))
for _, dc := range manifest.Devcontainers {
cErr := a.createDevcontainer(ctx, aAPI, dc, devcontainerScripts[dc.ID])
err = errors.Join(err, cErr)
}
dur := time.Since(start).Seconds() dur := time.Since(start).Seconds()
if err != nil { if err != nil {
a.logger.Warn(ctx, "startup script(s) failed", slog.Error(err)) a.logger.Warn(ctx, "startup script(s) failed", slog.Error(err))
@ -1187,14 +1214,6 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
} }
a.metrics.startupScriptSeconds.WithLabelValues(label).Set(dur) a.metrics.startupScriptSeconds.WithLabelValues(label).Set(dur)
a.scriptRunner.StartCron() a.scriptRunner.StartCron()
// If the container API is enabled, trigger an immediate refresh
// for quick sub agent injection.
if cAPI := a.containerAPI.Load(); cAPI != nil {
if err := cAPI.RefreshContainers(ctx); err != nil {
a.logger.Error(ctx, "failed to refresh containers", slog.Error(err))
}
}
}) })
if err != nil { if err != nil {
return xerrors.Errorf("track conn goroutine: %w", err) return xerrors.Errorf("track conn goroutine: %w", err)
@ -1204,6 +1223,38 @@ func (a *agent) handleManifest(manifestOK *checkpoint) func(ctx context.Context,
} }
} }
func (a *agent) createDevcontainer(
ctx context.Context,
aAPI proto.DRPCAgentClient26,
dc codersdk.WorkspaceAgentDevcontainer,
script codersdk.WorkspaceAgentScript,
) (err error) {
var (
exitCode = int32(0)
startTime = a.clock.Now()
status = proto.Timing_OK
)
if err = a.containerAPI.CreateDevcontainer(dc.WorkspaceFolder, dc.ConfigPath); err != nil {
exitCode = 1
status = proto.Timing_EXIT_FAILURE
}
endTime := a.clock.Now()
if _, scriptErr := aAPI.ScriptCompleted(ctx, &proto.WorkspaceAgentScriptCompletedRequest{
Timing: &proto.Timing{
ScriptId: script.ID[:],
Start: timestamppb.New(startTime),
End: timestamppb.New(endTime),
ExitCode: exitCode,
Stage: proto.Timing_START,
Status: status,
},
}); scriptErr != nil {
a.logger.Warn(ctx, "reporting script completed failed", slog.Error(scriptErr))
}
return err
}
// createOrUpdateNetwork waits for the manifest to be set using manifestOK, then creates or updates // createOrUpdateNetwork waits for the manifest to be set using manifestOK, then creates or updates
// the tailnet using the information in the manifest // the tailnet using the information in the manifest
func (a *agent) createOrUpdateNetwork(manifestOK, networkOK *checkpoint) func(context.Context, proto.DRPCAgentClient26) error { func (a *agent) createOrUpdateNetwork(manifestOK, networkOK *checkpoint) func(context.Context, proto.DRPCAgentClient26) error {
@ -1227,7 +1278,6 @@ func (a *agent) createOrUpdateNetwork(manifestOK, networkOK *checkpoint) func(co
// agent API. // agent API.
network, err = a.createTailnet( network, err = a.createTailnet(
a.gracefulCtx, a.gracefulCtx,
aAPI,
manifest.AgentID, manifest.AgentID,
manifest.DERPMap, manifest.DERPMap,
manifest.DERPForceWebSockets, manifest.DERPForceWebSockets,
@ -1262,9 +1312,9 @@ func (a *agent) createOrUpdateNetwork(manifestOK, networkOK *checkpoint) func(co
network.SetBlockEndpoints(manifest.DisableDirectConnections) network.SetBlockEndpoints(manifest.DisableDirectConnections)
// Update the subagent client if the container API is available. // Update the subagent client if the container API is available.
if cAPI := a.containerAPI.Load(); cAPI != nil { if a.containerAPI != nil {
client := agentcontainers.NewSubAgentClientFromAPI(a.logger, aAPI) client := agentcontainers.NewSubAgentClientFromAPI(a.logger, aAPI)
cAPI.UpdateSubAgentClient(client) a.containerAPI.UpdateSubAgentClient(client)
} }
} }
return nil return nil
@ -1382,7 +1432,6 @@ func (a *agent) trackGoroutine(fn func()) error {
func (a *agent) createTailnet( func (a *agent) createTailnet(
ctx context.Context, ctx context.Context,
aAPI proto.DRPCAgentClient26,
agentID uuid.UUID, agentID uuid.UUID,
derpMap *tailcfg.DERPMap, derpMap *tailcfg.DERPMap,
derpForceWebSockets, disableDirectConnections bool, derpForceWebSockets, disableDirectConnections bool,
@ -1515,10 +1564,7 @@ func (a *agent) createTailnet(
}() }()
if err = a.trackGoroutine(func() { if err = a.trackGoroutine(func() {
defer apiListener.Close() defer apiListener.Close()
apiHandler, closeAPIHAndler := a.apiHandler(aAPI) apiHandler := a.apiHandler()
defer func() {
_ = closeAPIHAndler()
}()
server := &http.Server{ server := &http.Server{
BaseContext: func(net.Listener) context.Context { return ctx }, BaseContext: func(net.Listener) context.Context { return ctx },
Handler: apiHandler, Handler: apiHandler,
@ -1532,7 +1578,6 @@ func (a *agent) createTailnet(
case <-ctx.Done(): case <-ctx.Done():
case <-a.hardCtx.Done(): case <-a.hardCtx.Done():
} }
_ = closeAPIHAndler()
_ = server.Close() _ = server.Close()
}() }()
@ -1871,6 +1916,12 @@ func (a *agent) Close() error {
a.logger.Error(a.hardCtx, "script runner close", slog.Error(err)) a.logger.Error(a.hardCtx, "script runner close", slog.Error(err))
} }
if a.containerAPI != nil {
if err := a.containerAPI.Close(); err != nil {
a.logger.Error(a.hardCtx, "container API close", slog.Error(err))
}
}
// Wait for the graceful shutdown to complete, but don't wait forever so // Wait for the graceful shutdown to complete, but don't wait forever so
// that we don't break user expectations. // that we don't break user expectations.
go func() { go func() {

View File

@ -207,6 +207,10 @@ func WithDevcontainers(devcontainers []codersdk.WorkspaceAgentDevcontainer, scri
api.devcontainerNames = make(map[string]bool, len(devcontainers)) api.devcontainerNames = make(map[string]bool, len(devcontainers))
api.devcontainerLogSourceIDs = make(map[string]uuid.UUID) api.devcontainerLogSourceIDs = make(map[string]uuid.UUID)
for _, dc := range devcontainers { for _, dc := range devcontainers {
if dc.Status == "" {
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusStarting
}
api.knownDevcontainers[dc.WorkspaceFolder] = dc api.knownDevcontainers[dc.WorkspaceFolder] = dc
api.devcontainerNames[dc.Name] = true api.devcontainerNames[dc.Name] = true
for _, script := range scripts { for _, script := range scripts {
@ -265,8 +269,6 @@ func NewAPI(logger slog.Logger, options ...Option) *API {
api := &API{ api := &API{
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
watcherDone: make(chan struct{}),
updaterDone: make(chan struct{}),
initialUpdateDone: make(chan struct{}), initialUpdateDone: make(chan struct{}),
updateTrigger: make(chan chan error), updateTrigger: make(chan chan error),
updateInterval: defaultUpdateInterval, updateInterval: defaultUpdateInterval,
@ -315,10 +317,28 @@ func NewAPI(logger slog.Logger, options ...Option) *API {
api.subAgentClient.Store(&c) api.subAgentClient.Store(&c)
} }
return api
}
// Init applies a final set of options to the API and then
// begins the watcherLoop and updaterLoop. This function
// must only be called once.
func (api *API) Init(opts ...Option) {
api.mu.Lock()
defer api.mu.Unlock()
if api.closed {
return
}
for _, opt := range opts {
opt(api)
}
api.watcherDone = make(chan struct{})
api.updaterDone = make(chan struct{})
go api.watcherLoop() go api.watcherLoop()
go api.updaterLoop() go api.updaterLoop()
return api
} }
func (api *API) watcherLoop() { func (api *API) watcherLoop() {
@ -909,8 +929,9 @@ func (api *API) handleDevcontainerRecreate(w http.ResponseWriter, r *http.Reques
dc.Status = codersdk.WorkspaceAgentDevcontainerStatusStarting dc.Status = codersdk.WorkspaceAgentDevcontainerStatusStarting
dc.Container = nil dc.Container = nil
api.knownDevcontainers[dc.WorkspaceFolder] = dc api.knownDevcontainers[dc.WorkspaceFolder] = dc
api.asyncWg.Add(1) go func() {
go api.recreateDevcontainer(dc, configPath) _ = api.CreateDevcontainer(dc.WorkspaceFolder, configPath, WithRemoveExistingContainer())
}()
api.mu.Unlock() api.mu.Unlock()
@ -920,15 +941,29 @@ func (api *API) handleDevcontainerRecreate(w http.ResponseWriter, r *http.Reques
}) })
} }
// recreateDevcontainer should run in its own goroutine and is responsible for // createDevcontainer should run in its own goroutine and is responsible for
// recreating a devcontainer based on the provided devcontainer configuration. // recreating a devcontainer based on the provided devcontainer configuration.
// It updates the devcontainer status and logs the process. The configPath is // It updates the devcontainer status and logs the process. The configPath is
// passed as a parameter for the odd chance that the container being recreated // passed as a parameter for the odd chance that the container being recreated
// has a different config file than the one stored in the devcontainer state. // has a different config file than the one stored in the devcontainer state.
// The devcontainer state must be set to starting and the asyncWg must be // The devcontainer state must be set to starting and the asyncWg must be
// incremented before calling this function. // incremented before calling this function.
func (api *API) recreateDevcontainer(dc codersdk.WorkspaceAgentDevcontainer, configPath string) { func (api *API) CreateDevcontainer(workspaceFolder, configPath string, opts ...DevcontainerCLIUpOptions) error {
api.mu.Lock()
if api.closed {
api.mu.Unlock()
return nil
}
dc, found := api.knownDevcontainers[workspaceFolder]
if !found {
api.mu.Unlock()
return xerrors.Errorf("devcontainer not found")
}
api.asyncWg.Add(1)
defer api.asyncWg.Done() defer api.asyncWg.Done()
api.mu.Unlock()
var ( var (
err error err error
@ -969,12 +1004,15 @@ func (api *API) recreateDevcontainer(dc codersdk.WorkspaceAgentDevcontainer, con
logger.Debug(ctx, "starting devcontainer recreation") logger.Debug(ctx, "starting devcontainer recreation")
_, err = api.dccli.Up(ctx, dc.WorkspaceFolder, configPath, WithUpOutput(infoW, errW), WithRemoveExistingContainer()) upOptions := []DevcontainerCLIUpOptions{WithUpOutput(infoW, errW)}
upOptions = append(upOptions, opts...)
_, err = api.dccli.Up(ctx, dc.WorkspaceFolder, configPath, upOptions...)
if err != nil { if err != nil {
// No need to log if the API is closing (context canceled), as this // No need to log if the API is closing (context canceled), as this
// is expected behavior when the API is shutting down. // is expected behavior when the API is shutting down.
if !errors.Is(err, context.Canceled) { if !errors.Is(err, context.Canceled) {
logger.Error(ctx, "devcontainer recreation failed", slog.Error(err)) logger.Error(ctx, "devcontainer creation failed", slog.Error(err))
} }
api.mu.Lock() api.mu.Lock()
@ -983,10 +1021,11 @@ func (api *API) recreateDevcontainer(dc codersdk.WorkspaceAgentDevcontainer, con
api.knownDevcontainers[dc.WorkspaceFolder] = dc api.knownDevcontainers[dc.WorkspaceFolder] = dc
api.recreateErrorTimes[dc.WorkspaceFolder] = api.clock.Now("agentcontainers", "recreate", "errorTimes") api.recreateErrorTimes[dc.WorkspaceFolder] = api.clock.Now("agentcontainers", "recreate", "errorTimes")
api.mu.Unlock() api.mu.Unlock()
return
return xerrors.Errorf("start devcontainer: %w", err)
} }
logger.Info(ctx, "devcontainer recreated successfully") logger.Info(ctx, "devcontainer created successfully")
api.mu.Lock() api.mu.Lock()
dc = api.knownDevcontainers[dc.WorkspaceFolder] dc = api.knownDevcontainers[dc.WorkspaceFolder]
@ -1009,8 +1048,11 @@ func (api *API) recreateDevcontainer(dc codersdk.WorkspaceAgentDevcontainer, con
// Ensure an immediate refresh to accurately reflect the // Ensure an immediate refresh to accurately reflect the
// devcontainer state after recreation. // devcontainer state after recreation.
if err := api.RefreshContainers(ctx); err != nil { if err := api.RefreshContainers(ctx); err != nil {
logger.Error(ctx, "failed to trigger immediate refresh after devcontainer recreation", slog.Error(err)) logger.Error(ctx, "failed to trigger immediate refresh after devcontainer creation", slog.Error(err))
return xerrors.Errorf("refresh containers: %w", err)
} }
return nil
} }
// markDevcontainerDirty finds the devcontainer with the given config file path // markDevcontainerDirty finds the devcontainer with the given config file path
@ -1609,8 +1651,12 @@ func (api *API) Close() error {
err := api.watcher.Close() err := api.watcher.Close()
// Wait for loops to finish. // Wait for loops to finish.
<-api.watcherDone if api.watcherDone != nil {
<-api.updaterDone <-api.watcherDone
}
if api.updaterDone != nil {
<-api.updaterDone
}
// Wait for all async tasks to complete. // Wait for all async tasks to complete.
api.asyncWg.Wait() api.asyncWg.Wait()

View File

@ -437,6 +437,7 @@ func TestAPI(t *testing.T) {
agentcontainers.WithContainerCLI(mLister), agentcontainers.WithContainerCLI(mLister),
agentcontainers.WithContainerLabelIncludeFilter("this.label.does.not.exist.ignore.devcontainers", "true"), agentcontainers.WithContainerLabelIncludeFilter("this.label.does.not.exist.ignore.devcontainers", "true"),
) )
api.Init()
defer api.Close() defer api.Close()
r.Mount("/", api.Routes()) r.Mount("/", api.Routes())
@ -614,6 +615,7 @@ func TestAPI(t *testing.T) {
agentcontainers.WithDevcontainerCLI(tt.devcontainerCLI), agentcontainers.WithDevcontainerCLI(tt.devcontainerCLI),
agentcontainers.WithWatcher(watcher.NewNoop()), agentcontainers.WithWatcher(watcher.NewNoop()),
) )
api.Init()
defer api.Close() defer api.Close()
r.Mount("/", api.Routes()) r.Mount("/", api.Routes())
@ -1010,6 +1012,7 @@ func TestAPI(t *testing.T) {
apiOptions := []agentcontainers.Option{ apiOptions := []agentcontainers.Option{
agentcontainers.WithClock(mClock), agentcontainers.WithClock(mClock),
agentcontainers.WithContainerCLI(tt.lister), agentcontainers.WithContainerCLI(tt.lister),
agentcontainers.WithDevcontainerCLI(&fakeDevcontainerCLI{}),
agentcontainers.WithWatcher(watcher.NewNoop()), agentcontainers.WithWatcher(watcher.NewNoop()),
} }
@ -1027,6 +1030,7 @@ func TestAPI(t *testing.T) {
} }
api := agentcontainers.NewAPI(logger, apiOptions...) api := agentcontainers.NewAPI(logger, apiOptions...)
api.Init()
defer api.Close() defer api.Close()
r.Mount("/", api.Routes()) r.Mount("/", api.Routes())
@ -1038,6 +1042,11 @@ func TestAPI(t *testing.T) {
tickerTrap.MustWait(ctx).MustRelease(ctx) tickerTrap.MustWait(ctx).MustRelease(ctx)
tickerTrap.Close() tickerTrap.Close()
for _, dc := range tt.knownDevcontainers {
err := api.CreateDevcontainer(dc.WorkspaceFolder, dc.ConfigPath)
require.NoError(t, err)
}
// Advance the clock to run the updater loop. // Advance the clock to run the updater loop.
_, aw := mClock.AdvanceNext() _, aw := mClock.AdvanceNext()
aw.MustWait(ctx) aw.MustWait(ctx)
@ -1111,6 +1120,7 @@ func TestAPI(t *testing.T) {
[]codersdk.WorkspaceAgentScript{{LogSourceID: uuid.New(), ID: dc.ID}}, []codersdk.WorkspaceAgentScript{{LogSourceID: uuid.New(), ID: dc.ID}},
), ),
) )
api.Init()
defer api.Close() defer api.Close()
// Make sure the ticker function has been registered // Make sure the ticker function has been registered
@ -1206,6 +1216,7 @@ func TestAPI(t *testing.T) {
agentcontainers.WithWatcher(fWatcher), agentcontainers.WithWatcher(fWatcher),
agentcontainers.WithClock(mClock), agentcontainers.WithClock(mClock),
) )
api.Init()
defer api.Close() defer api.Close()
r := chi.NewRouter() r := chi.NewRouter()
@ -1358,6 +1369,7 @@ func TestAPI(t *testing.T) {
agentcontainers.WithDevcontainerCLI(fakeDCCLI), agentcontainers.WithDevcontainerCLI(fakeDCCLI),
agentcontainers.WithManifestInfo("test-user", "test-workspace"), agentcontainers.WithManifestInfo("test-user", "test-workspace"),
) )
api.Init()
apiClose := func() { apiClose := func() {
closeOnce.Do(func() { closeOnce.Do(func() {
// Close before api.Close() defer to avoid deadlock after test. // Close before api.Close() defer to avoid deadlock after test.
@ -1578,6 +1590,7 @@ func TestAPI(t *testing.T) {
agentcontainers.WithSubAgentClient(fakeSAC), agentcontainers.WithSubAgentClient(fakeSAC),
agentcontainers.WithDevcontainerCLI(&fakeDevcontainerCLI{}), agentcontainers.WithDevcontainerCLI(&fakeDevcontainerCLI{}),
) )
api.Init()
defer api.Close() defer api.Close()
tickerTrap.MustWait(ctx).MustRelease(ctx) tickerTrap.MustWait(ctx).MustRelease(ctx)
@ -1899,6 +1912,7 @@ func TestAPI(t *testing.T) {
agentcontainers.WithSubAgentURL("test-subagent-url"), agentcontainers.WithSubAgentURL("test-subagent-url"),
agentcontainers.WithWatcher(watcher.NewNoop()), agentcontainers.WithWatcher(watcher.NewNoop()),
) )
api.Init()
defer api.Close() defer api.Close()
// Close before api.Close() defer to avoid deadlock after test. // Close before api.Close() defer to avoid deadlock after test.
@ -1991,6 +2005,7 @@ func TestAPI(t *testing.T) {
agentcontainers.WithSubAgentURL("test-subagent-url"), agentcontainers.WithSubAgentURL("test-subagent-url"),
agentcontainers.WithWatcher(watcher.NewNoop()), agentcontainers.WithWatcher(watcher.NewNoop()),
) )
api.Init()
defer api.Close() defer api.Close()
// Close before api.Close() defer to avoid deadlock after test. // Close before api.Close() defer to avoid deadlock after test.
@ -2045,6 +2060,7 @@ func TestAPI(t *testing.T) {
agentcontainers.WithExecer(fakeExec), agentcontainers.WithExecer(fakeExec),
agentcontainers.WithCommandEnv(commandEnv), agentcontainers.WithCommandEnv(commandEnv),
) )
api.Init()
defer api.Close() defer api.Close()
// Call RefreshContainers directly to trigger CommandEnv usage. // Call RefreshContainers directly to trigger CommandEnv usage.
@ -2134,6 +2150,7 @@ func TestAPI(t *testing.T) {
agentcontainers.WithWatcher(fWatcher), agentcontainers.WithWatcher(fWatcher),
agentcontainers.WithClock(mClock), agentcontainers.WithClock(mClock),
) )
api.Init()
defer func() { defer func() {
close(fakeSAC.createErrC) close(fakeSAC.createErrC)
close(fakeSAC.deleteErrC) close(fakeSAC.deleteErrC)
@ -2334,6 +2351,7 @@ func TestSubAgentCreationWithNameRetry(t *testing.T) {
agentcontainers.WithSubAgentClient(fSAC), agentcontainers.WithSubAgentClient(fSAC),
agentcontainers.WithWatcher(watcher.NewNoop()), agentcontainers.WithWatcher(watcher.NewNoop()),
) )
api.Init()
defer api.Close() defer api.Close()
tickerTrap.MustWait(ctx).MustRelease(ctx) tickerTrap.MustWait(ctx).MustRelease(ctx)

View File

@ -2,10 +2,10 @@ package agentcontainers
import ( import (
"context" "context"
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"github.com/google/uuid"
"cdr.dev/slog" "cdr.dev/slog"
"github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk"
@ -22,33 +22,18 @@ const (
DevcontainerDefaultContainerWorkspaceFolder = "/workspaces" DevcontainerDefaultContainerWorkspaceFolder = "/workspaces"
) )
const devcontainerUpScriptTemplate = ` func ExtractDevcontainerScripts(
if ! which devcontainer > /dev/null 2>&1; then
echo "ERROR: Unable to start devcontainer, @devcontainers/cli is not installed or not found in \$PATH." 1>&2
echo "Please install @devcontainers/cli by running \"npm install -g @devcontainers/cli\" or by using the \"devcontainers-cli\" Coder module." 1>&2
exit 1
fi
devcontainer up %s
`
// ExtractAndInitializeDevcontainerScripts extracts devcontainer scripts from
// the given scripts and devcontainers. The devcontainer scripts are removed
// from the returned scripts so that they can be run separately.
//
// Dev Containers have an inherent dependency on start scripts, since they
// initialize the workspace (e.g. git clone, npm install, etc). This is
// important if e.g. a Coder module to install @devcontainer/cli is used.
func ExtractAndInitializeDevcontainerScripts(
devcontainers []codersdk.WorkspaceAgentDevcontainer, devcontainers []codersdk.WorkspaceAgentDevcontainer,
scripts []codersdk.WorkspaceAgentScript, scripts []codersdk.WorkspaceAgentScript,
) (filteredScripts []codersdk.WorkspaceAgentScript, devcontainerScripts []codersdk.WorkspaceAgentScript) { ) (filteredScripts []codersdk.WorkspaceAgentScript, devcontainerScripts map[uuid.UUID]codersdk.WorkspaceAgentScript) {
devcontainerScripts = make(map[uuid.UUID]codersdk.WorkspaceAgentScript)
ScriptLoop: ScriptLoop:
for _, script := range scripts { for _, script := range scripts {
for _, dc := range devcontainers { for _, dc := range devcontainers {
// The devcontainer scripts match the devcontainer ID for // The devcontainer scripts match the devcontainer ID for
// identification. // identification.
if script.ID == dc.ID { if script.ID == dc.ID {
devcontainerScripts = append(devcontainerScripts, devcontainerStartupScript(dc, script)) devcontainerScripts[dc.ID] = script
continue ScriptLoop continue ScriptLoop
} }
} }
@ -59,24 +44,6 @@ ScriptLoop:
return filteredScripts, devcontainerScripts return filteredScripts, devcontainerScripts
} }
func devcontainerStartupScript(dc codersdk.WorkspaceAgentDevcontainer, script codersdk.WorkspaceAgentScript) codersdk.WorkspaceAgentScript {
args := []string{
"--log-format json",
fmt.Sprintf("--workspace-folder %q", dc.WorkspaceFolder),
}
if dc.ConfigPath != "" {
args = append(args, fmt.Sprintf("--config %q", dc.ConfigPath))
}
cmd := fmt.Sprintf(devcontainerUpScriptTemplate, strings.Join(args, " "))
// Force the script to run in /bin/sh, since some shells (e.g. fish)
// don't support the script.
script.Script = fmt.Sprintf("/bin/sh -c '%s'", cmd)
// Disable RunOnStart, scripts have this set so that when devcontainers
// have not been enabled, a warning will be surfaced in the agent logs.
script.RunOnStart = false
return script
}
// ExpandAllDevcontainerPaths expands all devcontainer paths in the given // ExpandAllDevcontainerPaths expands all devcontainer paths in the given
// devcontainers. This is required by the devcontainer CLI, which requires // devcontainers. This is required by the devcontainer CLI, which requires
// absolute paths for the workspace folder and config path. // absolute paths for the workspace folder and config path.

View File

@ -1,274 +0,0 @@
package agentcontainers_test
import (
"path/filepath"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/agent/agentcontainers"
"github.com/coder/coder/v2/codersdk"
)
func TestExtractAndInitializeDevcontainerScripts(t *testing.T) {
t.Parallel()
scriptIDs := []uuid.UUID{uuid.New(), uuid.New()}
devcontainerIDs := []uuid.UUID{uuid.New(), uuid.New()}
type args struct {
expandPath func(string) (string, error)
devcontainers []codersdk.WorkspaceAgentDevcontainer
scripts []codersdk.WorkspaceAgentScript
}
tests := []struct {
name string
args args
wantFilteredScripts []codersdk.WorkspaceAgentScript
wantDevcontainerScripts []codersdk.WorkspaceAgentScript
skipOnWindowsDueToPathSeparator bool
}{
{
name: "no scripts",
args: args{
expandPath: nil,
devcontainers: nil,
scripts: nil,
},
wantFilteredScripts: nil,
wantDevcontainerScripts: nil,
},
{
name: "no devcontainers",
args: args{
expandPath: nil,
devcontainers: nil,
scripts: []codersdk.WorkspaceAgentScript{
{ID: scriptIDs[0]},
{ID: scriptIDs[1]},
},
},
wantFilteredScripts: []codersdk.WorkspaceAgentScript{
{ID: scriptIDs[0]},
{ID: scriptIDs[1]},
},
wantDevcontainerScripts: nil,
},
{
name: "no scripts match devcontainers",
args: args{
expandPath: nil,
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
{ID: devcontainerIDs[0]},
{ID: devcontainerIDs[1]},
},
scripts: []codersdk.WorkspaceAgentScript{
{ID: scriptIDs[0]},
{ID: scriptIDs[1]},
},
},
wantFilteredScripts: []codersdk.WorkspaceAgentScript{
{ID: scriptIDs[0]},
{ID: scriptIDs[1]},
},
wantDevcontainerScripts: nil,
},
{
name: "scripts match devcontainers and sets RunOnStart=false",
args: args{
expandPath: nil,
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
{ID: devcontainerIDs[0], WorkspaceFolder: "workspace1"},
{ID: devcontainerIDs[1], WorkspaceFolder: "workspace2"},
},
scripts: []codersdk.WorkspaceAgentScript{
{ID: scriptIDs[0], RunOnStart: true},
{ID: scriptIDs[1], RunOnStart: true},
{ID: devcontainerIDs[0], RunOnStart: true},
{ID: devcontainerIDs[1], RunOnStart: true},
},
},
wantFilteredScripts: []codersdk.WorkspaceAgentScript{
{ID: scriptIDs[0], RunOnStart: true},
{ID: scriptIDs[1], RunOnStart: true},
},
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
{
ID: devcontainerIDs[0],
Script: "devcontainer up --log-format json --workspace-folder \"workspace1\"",
RunOnStart: false,
},
{
ID: devcontainerIDs[1],
Script: "devcontainer up --log-format json --workspace-folder \"workspace2\"",
RunOnStart: false,
},
},
},
{
name: "scripts match devcontainers with config path",
args: args{
expandPath: nil,
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
{
ID: devcontainerIDs[0],
WorkspaceFolder: "workspace1",
ConfigPath: "config1",
},
{
ID: devcontainerIDs[1],
WorkspaceFolder: "workspace2",
ConfigPath: "config2",
},
},
scripts: []codersdk.WorkspaceAgentScript{
{ID: devcontainerIDs[0]},
{ID: devcontainerIDs[1]},
},
},
wantFilteredScripts: []codersdk.WorkspaceAgentScript{},
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
{
ID: devcontainerIDs[0],
Script: "devcontainer up --log-format json --workspace-folder \"workspace1\" --config \"workspace1/config1\"",
RunOnStart: false,
},
{
ID: devcontainerIDs[1],
Script: "devcontainer up --log-format json --workspace-folder \"workspace2\" --config \"workspace2/config2\"",
RunOnStart: false,
},
},
skipOnWindowsDueToPathSeparator: true,
},
{
name: "scripts match devcontainers with expand path",
args: args{
expandPath: func(s string) (string, error) {
return "/home/" + s, nil
},
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
{
ID: devcontainerIDs[0],
WorkspaceFolder: "workspace1",
ConfigPath: "config1",
},
{
ID: devcontainerIDs[1],
WorkspaceFolder: "workspace2",
ConfigPath: "config2",
},
},
scripts: []codersdk.WorkspaceAgentScript{
{ID: devcontainerIDs[0], RunOnStart: true},
{ID: devcontainerIDs[1], RunOnStart: true},
},
},
wantFilteredScripts: []codersdk.WorkspaceAgentScript{},
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
{
ID: devcontainerIDs[0],
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace1\" --config \"/home/workspace1/config1\"",
RunOnStart: false,
},
{
ID: devcontainerIDs[1],
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace2\" --config \"/home/workspace2/config2\"",
RunOnStart: false,
},
},
skipOnWindowsDueToPathSeparator: true,
},
{
name: "expand config path when ~",
args: args{
expandPath: func(s string) (string, error) {
s = strings.Replace(s, "~/", "", 1)
if filepath.IsAbs(s) {
return s, nil
}
return "/home/" + s, nil
},
devcontainers: []codersdk.WorkspaceAgentDevcontainer{
{
ID: devcontainerIDs[0],
WorkspaceFolder: "workspace1",
ConfigPath: "~/config1",
},
{
ID: devcontainerIDs[1],
WorkspaceFolder: "workspace2",
ConfigPath: "/config2",
},
},
scripts: []codersdk.WorkspaceAgentScript{
{ID: devcontainerIDs[0], RunOnStart: true},
{ID: devcontainerIDs[1], RunOnStart: true},
},
},
wantFilteredScripts: []codersdk.WorkspaceAgentScript{},
wantDevcontainerScripts: []codersdk.WorkspaceAgentScript{
{
ID: devcontainerIDs[0],
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace1\" --config \"/home/config1\"",
RunOnStart: false,
},
{
ID: devcontainerIDs[1],
Script: "devcontainer up --log-format json --workspace-folder \"/home/workspace2\" --config \"/config2\"",
RunOnStart: false,
},
},
skipOnWindowsDueToPathSeparator: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
if tt.skipOnWindowsDueToPathSeparator && filepath.Separator == '\\' {
t.Skip("Skipping test on Windows due to path separator difference.")
}
logger := slogtest.Make(t, nil)
if tt.args.expandPath == nil {
tt.args.expandPath = func(s string) (string, error) {
return s, nil
}
}
gotFilteredScripts, gotDevcontainerScripts := agentcontainers.ExtractAndInitializeDevcontainerScripts(
agentcontainers.ExpandAllDevcontainerPaths(logger, tt.args.expandPath, tt.args.devcontainers),
tt.args.scripts,
)
if diff := cmp.Diff(tt.wantFilteredScripts, gotFilteredScripts, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("ExtractAndInitializeDevcontainerScripts() gotFilteredScripts mismatch (-want +got):\n%s", diff)
}
// Preprocess the devcontainer scripts to remove scripting part.
for i := range gotDevcontainerScripts {
gotDevcontainerScripts[i].Script = textGrep("devcontainer up", gotDevcontainerScripts[i].Script)
require.NotEmpty(t, gotDevcontainerScripts[i].Script, "devcontainer up script not found")
}
if diff := cmp.Diff(tt.wantDevcontainerScripts, gotDevcontainerScripts); diff != "" {
t.Errorf("ExtractAndInitializeDevcontainerScripts() gotDevcontainerScripts mismatch (-want +got):\n%s", diff)
}
})
}
}
// textGrep returns matching lines from multiline string.
func textGrep(want, got string) (filtered string) {
var lines []string
for _, line := range strings.Split(got, "\n") {
if strings.Contains(line, want) {
lines = append(lines, line)
}
}
return strings.Join(lines, "\n")
}

View File

@ -140,7 +140,7 @@ func WithReadConfigOutput(stdout, stderr io.Writer) DevcontainerCLIReadConfigOpt
} }
func applyDevcontainerCLIUpOptions(opts []DevcontainerCLIUpOptions) devcontainerCLIUpConfig { func applyDevcontainerCLIUpOptions(opts []DevcontainerCLIUpOptions) devcontainerCLIUpConfig {
conf := devcontainerCLIUpConfig{} conf := devcontainerCLIUpConfig{stdout: io.Discard, stderr: io.Discard}
for _, opt := range opts { for _, opt := range opts {
if opt != nil { if opt != nil {
opt(&conf) opt(&conf)
@ -150,7 +150,7 @@ func applyDevcontainerCLIUpOptions(opts []DevcontainerCLIUpOptions) devcontainer
} }
func applyDevcontainerCLIExecOptions(opts []DevcontainerCLIExecOptions) devcontainerCLIExecConfig { func applyDevcontainerCLIExecOptions(opts []DevcontainerCLIExecOptions) devcontainerCLIExecConfig {
conf := devcontainerCLIExecConfig{} conf := devcontainerCLIExecConfig{stdout: io.Discard, stderr: io.Discard}
for _, opt := range opts { for _, opt := range opts {
if opt != nil { if opt != nil {
opt(&conf) opt(&conf)
@ -160,7 +160,7 @@ func applyDevcontainerCLIExecOptions(opts []DevcontainerCLIExecOptions) devconta
} }
func applyDevcontainerCLIReadConfigOptions(opts []DevcontainerCLIReadConfigOptions) devcontainerCLIReadConfigConfig { func applyDevcontainerCLIReadConfigOptions(opts []DevcontainerCLIReadConfigOptions) devcontainerCLIReadConfigConfig {
conf := devcontainerCLIReadConfigConfig{} conf := devcontainerCLIReadConfigConfig{stdout: io.Discard, stderr: io.Discard}
for _, opt := range opts { for _, opt := range opts {
if opt != nil { if opt != nil {
opt(&conf) opt(&conf)
@ -200,17 +200,20 @@ func (d *devcontainerCLI) Up(ctx context.Context, workspaceFolder, configPath st
// Capture stdout for parsing and stream logs for both default and provided writers. // Capture stdout for parsing and stream logs for both default and provided writers.
var stdoutBuf bytes.Buffer var stdoutBuf bytes.Buffer
stdoutWriters := []io.Writer{&stdoutBuf, &devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stdout", true))}} cmd.Stdout = io.MultiWriter(
if conf.stdout != nil { &stdoutBuf,
stdoutWriters = append(stdoutWriters, conf.stdout) &devcontainerCLILogWriter{
} ctx: ctx,
cmd.Stdout = io.MultiWriter(stdoutWriters...) logger: logger.With(slog.F("stdout", true)),
writer: conf.stdout,
},
)
// Stream stderr logs and provided writer if any. // Stream stderr logs and provided writer if any.
stderrWriters := []io.Writer{&devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stderr", true))}} cmd.Stderr = &devcontainerCLILogWriter{
if conf.stderr != nil { ctx: ctx,
stderrWriters = append(stderrWriters, conf.stderr) logger: logger.With(slog.F("stderr", true)),
writer: conf.stderr,
} }
cmd.Stderr = io.MultiWriter(stderrWriters...)
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
_, err2 := parseDevcontainerCLILastLine[devcontainerCLIResult](ctx, logger, stdoutBuf.Bytes()) _, err2 := parseDevcontainerCLILastLine[devcontainerCLIResult](ctx, logger, stdoutBuf.Bytes())
@ -249,16 +252,16 @@ func (d *devcontainerCLI) Exec(ctx context.Context, workspaceFolder, configPath
args = append(args, cmdArgs...) args = append(args, cmdArgs...)
c := d.execer.CommandContext(ctx, "devcontainer", args...) c := d.execer.CommandContext(ctx, "devcontainer", args...)
stdoutWriters := []io.Writer{&devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stdout", true))}} c.Stdout = io.MultiWriter(conf.stdout, &devcontainerCLILogWriter{
if conf.stdout != nil { ctx: ctx,
stdoutWriters = append(stdoutWriters, conf.stdout) logger: logger.With(slog.F("stdout", true)),
} writer: io.Discard,
c.Stdout = io.MultiWriter(stdoutWriters...) })
stderrWriters := []io.Writer{&devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stderr", true))}} c.Stderr = io.MultiWriter(conf.stderr, &devcontainerCLILogWriter{
if conf.stderr != nil { ctx: ctx,
stderrWriters = append(stderrWriters, conf.stderr) logger: logger.With(slog.F("stderr", true)),
} writer: io.Discard,
c.Stderr = io.MultiWriter(stderrWriters...) })
if err := c.Run(); err != nil { if err := c.Run(); err != nil {
return xerrors.Errorf("devcontainer exec failed: %w", err) return xerrors.Errorf("devcontainer exec failed: %w", err)
@ -283,16 +286,19 @@ func (d *devcontainerCLI) ReadConfig(ctx context.Context, workspaceFolder, confi
c.Env = append(c.Env, env...) c.Env = append(c.Env, env...)
var stdoutBuf bytes.Buffer var stdoutBuf bytes.Buffer
stdoutWriters := []io.Writer{&stdoutBuf, &devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stdout", true))}} c.Stdout = io.MultiWriter(
if conf.stdout != nil { &stdoutBuf,
stdoutWriters = append(stdoutWriters, conf.stdout) &devcontainerCLILogWriter{
ctx: ctx,
logger: logger.With(slog.F("stdout", true)),
writer: conf.stdout,
},
)
c.Stderr = &devcontainerCLILogWriter{
ctx: ctx,
logger: logger.With(slog.F("stderr", true)),
writer: conf.stderr,
} }
c.Stdout = io.MultiWriter(stdoutWriters...)
stderrWriters := []io.Writer{&devcontainerCLILogWriter{ctx: ctx, logger: logger.With(slog.F("stderr", true))}}
if conf.stderr != nil {
stderrWriters = append(stderrWriters, conf.stderr)
}
c.Stderr = io.MultiWriter(stderrWriters...)
if err := c.Run(); err != nil { if err := c.Run(); err != nil {
return DevcontainerConfig{}, xerrors.Errorf("devcontainer read-configuration failed: %w", err) return DevcontainerConfig{}, xerrors.Errorf("devcontainer read-configuration failed: %w", err)
@ -385,6 +391,7 @@ type devcontainerCLIJSONLogLine struct {
type devcontainerCLILogWriter struct { type devcontainerCLILogWriter struct {
ctx context.Context ctx context.Context
logger slog.Logger logger slog.Logger
writer io.Writer
} }
func (l *devcontainerCLILogWriter) Write(p []byte) (n int, err error) { func (l *devcontainerCLILogWriter) Write(p []byte) (n int, err error) {
@ -405,8 +412,20 @@ func (l *devcontainerCLILogWriter) Write(p []byte) (n int, err error) {
} }
if logLine.Level >= 3 { if logLine.Level >= 3 {
l.logger.Info(l.ctx, "@devcontainer/cli", slog.F("line", string(line))) l.logger.Info(l.ctx, "@devcontainer/cli", slog.F("line", string(line)))
_, _ = l.writer.Write([]byte(logLine.Text + "\n"))
continue continue
} }
// If we've successfully parsed the final log line, it will successfully parse
// but will not fill out any of the fields for `logLine`. In this scenario we
// assume it is the final log line, unmarshal it as that, and check if the
// outcome is a non-empty string.
if logLine.Level == 0 {
var lastLine devcontainerCLIResult
if err := json.Unmarshal(line, &lastLine); err == nil && lastLine.Outcome != "" {
_, _ = l.writer.Write(line)
_, _ = l.writer.Write([]byte{'\n'})
}
}
l.logger.Debug(l.ctx, "@devcontainer/cli", slog.F("line", string(line))) l.logger.Debug(l.ctx, "@devcontainer/cli", slog.F("line", string(line)))
} }
if err := s.Err(); err != nil { if err := s.Err(); err != nil {

View File

@ -363,7 +363,7 @@ func TestDevcontainerCLI_WithOutput(t *testing.T) {
require.NotEmpty(t, containerID, "expected non-empty container ID") require.NotEmpty(t, containerID, "expected non-empty container ID")
// Read expected log content. // Read expected log content.
expLog, err := os.ReadFile(filepath.Join("testdata", "devcontainercli", "parse", "up.log")) expLog, err := os.ReadFile(filepath.Join("testdata", "devcontainercli", "parse", "up.golden"))
require.NoError(t, err, "reading expected log file") require.NoError(t, err, "reading expected log file")
// Verify stdout buffer contains the CLI logs and stderr is empty. // Verify stdout buffer contains the CLI logs and stderr is empty.

View File

@ -0,0 +1,77 @@
@devcontainers/cli 0.75.0. Node.js v23.9.0. darwin 24.4.0 arm64.
Resolving Feature dependencies for 'ghcr.io/devcontainers/features/docker-in-docker:2'...
Soft-dependency 'ghcr.io/devcontainers/features/common-utils' is not required. Removing from installation order...
Files to omit: ''
Run: docker buildx build --load --build-context dev_containers_feature_content_source=/var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193 --build-arg _DEV_CONTAINERS_BASE_IMAGE=mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye --build-arg _DEV_CONTAINERS_IMAGE_USER=root --build-arg _DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp --target dev_containers_target_stage -f /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193/Dockerfile.extended -t vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/empty-folder
#0 building with "orbstack" instance using docker driver
#1 [internal] load build definition from Dockerfile.extended
#1 transferring dockerfile: 3.09kB done
#1 DONE 0.0s
#2 resolve image config for docker-image://docker.io/docker/dockerfile:1.4
#2 DONE 1.3s
#3 docker-image://docker.io/docker/dockerfile:1.4@sha256:9ba7531bd80fb0a858632727cf7a112fbfd19b17e94c4e84ced81e24ef1a0dbc
#3 CACHED
#4 [internal] load .dockerignore
#4 transferring context: 2B done
#4 DONE 0.0s
#5 [internal] load metadata for mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye
#5 DONE 0.0s
#6 [context dev_containers_feature_content_source] load .dockerignore
#6 transferring dev_containers_feature_content_source: 2B done
#6 DONE 0.0s
#7 [dev_containers_feature_content_normalize 1/3] FROM mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye
#7 DONE 0.0s
#8 [context dev_containers_feature_content_source] load from client
#8 transferring dev_containers_feature_content_source: 82.11kB 0.0s done
#8 DONE 0.0s
#9 [dev_containers_feature_content_normalize 2/3] COPY --from=dev_containers_feature_content_source devcontainer-features.builtin.env /tmp/build-features/
#9 CACHED
#10 [dev_containers_target_stage 2/5] RUN mkdir -p /tmp/dev-container-features
#10 CACHED
#11 [dev_containers_target_stage 3/5] COPY --from=dev_containers_feature_content_normalize /tmp/build-features/ /tmp/dev-container-features
#11 CACHED
#12 [dev_containers_target_stage 4/5] RUN echo "_CONTAINER_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'root' || grep -E '^root|^[^:]*:[^:]*:root:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env && echo "_REMOTE_USER_HOME=$( (command -v getent >/dev/null 2>&1 && getent passwd 'node' || grep -E '^node|^[^:]*:[^:]*:node:' /etc/passwd || true) | cut -d: -f6)" >> /tmp/dev-container-features/devcontainer-features.builtin.env
#12 CACHED
#13 [dev_containers_feature_content_normalize 3/3] RUN chmod -R 0755 /tmp/build-features/
#13 CACHED
#14 [dev_containers_target_stage 5/5] RUN --mount=type=bind,from=dev_containers_feature_content_source,source=docker-in-docker_0,target=/tmp/build-features-src/docker-in-docker_0 cp -ar /tmp/build-features-src/docker-in-docker_0 /tmp/dev-container-features && chmod -R 0755 /tmp/dev-container-features/docker-in-docker_0 && cd /tmp/dev-container-features/docker-in-docker_0 && chmod +x ./devcontainer-features-install.sh && ./devcontainer-features-install.sh && rm -rf /tmp/dev-container-features/docker-in-docker_0
#14 CACHED
#15 exporting to image
#15 exporting layers done
#15 writing image sha256:275dc193c905d448ef3945e3fc86220cc315fe0cb41013988d6ff9f8d6ef2357 done
#15 naming to docker.io/library/vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features done
#15 DONE 0.0s
Run: docker buildx build --load --build-context dev_containers_feature_content_source=/var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193 --build-arg _DEV_CONTAINERS_BASE_IMAGE=mcr.microsoft.com/devcontainers/javascript-node:1-18-bullseye --build-arg _DEV_CONTAINERS_IMAGE_USER=root --build-arg _DEV_CONTAINERS_FEATURE_CONTENT_SOURCE=dev_container_feature_content_temp --target dev_containers_target_stage -f /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/container-features/0.75.0-1744102171193/Dockerfile.extended -t vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features /var/folders/1y/cm8mblxd7_x9cljwl_jvfprh0000gn/T/devcontainercli/empty-folder
Run: docker run --sig-proxy=false -a STDOUT -a STDERR --mount type=bind,source=/code/devcontainers-template-starter,target=/workspaces/devcontainers-template-starter,consistency=cached --mount type=volume,src=dind-var-lib-docker-0pctifo8bbg3pd06g3j5s9ae8j7lp5qfcd67m25kuahurel7v7jm,dst=/var/lib/docker -l devcontainer.local_folder=/code/devcontainers-template-starter -l devcontainer.config_file=/code/devcontainers-template-starter/.devcontainer/devcontainer.json --privileged --entrypoint /bin/sh vsc-devcontainers-template-starter-81d8f17e32abef6d434cbb5a37fe05e5c8a6f8ccede47a61197f002dcbf60566-features -c echo Container started
Container started
Not setting dockerd DNS manually.
Running the postCreateCommand from devcontainer.json...
added 1 package in 784ms
{"outcome":"success","containerId":"bc72db8d0c4c4e941bd9ffc341aee64a18d3397fd45b87cd93d4746150967ba8","remoteUser":"node","remoteWorkspaceFolder":"/workspaces/devcontainers-template-starter"}

View File

@ -79,21 +79,6 @@ func New(opts Options) *Runner {
type ScriptCompletedFunc func(context.Context, *proto.WorkspaceAgentScriptCompletedRequest) (*proto.WorkspaceAgentScriptCompletedResponse, error) type ScriptCompletedFunc func(context.Context, *proto.WorkspaceAgentScriptCompletedRequest) (*proto.WorkspaceAgentScriptCompletedResponse, error)
type runnerScript struct {
runOnPostStart bool
codersdk.WorkspaceAgentScript
}
func toRunnerScript(scripts ...codersdk.WorkspaceAgentScript) []runnerScript {
var rs []runnerScript
for _, s := range scripts {
rs = append(rs, runnerScript{
WorkspaceAgentScript: s,
})
}
return rs
}
type Runner struct { type Runner struct {
Options Options
@ -103,7 +88,7 @@ type Runner struct {
closed chan struct{} closed chan struct{}
closeMutex sync.Mutex closeMutex sync.Mutex
cron *cron.Cron cron *cron.Cron
scripts []runnerScript scripts []codersdk.WorkspaceAgentScript
dataDir string dataDir string
scriptCompleted ScriptCompletedFunc scriptCompleted ScriptCompletedFunc
@ -138,19 +123,6 @@ func (r *Runner) RegisterMetrics(reg prometheus.Registerer) {
// InitOption describes an option for the runner initialization. // InitOption describes an option for the runner initialization.
type InitOption func(*Runner) type InitOption func(*Runner)
// WithPostStartScripts adds scripts that should be run after the workspace
// start scripts but before the workspace is marked as started.
func WithPostStartScripts(scripts ...codersdk.WorkspaceAgentScript) InitOption {
return func(r *Runner) {
for _, s := range scripts {
r.scripts = append(r.scripts, runnerScript{
runOnPostStart: true,
WorkspaceAgentScript: s,
})
}
}
}
// Init initializes the runner with the provided scripts. // Init initializes the runner with the provided scripts.
// It also schedules any scripts that have a schedule. // It also schedules any scripts that have a schedule.
// This function must be called before Execute. // This function must be called before Execute.
@ -161,7 +133,7 @@ func (r *Runner) Init(scripts []codersdk.WorkspaceAgentScript, scriptCompleted S
return xerrors.New("init: already initialized") return xerrors.New("init: already initialized")
} }
r.initialized = true r.initialized = true
r.scripts = toRunnerScript(scripts...) r.scripts = scripts
r.scriptCompleted = scriptCompleted r.scriptCompleted = scriptCompleted
for _, opt := range opts { for _, opt := range opts {
opt(r) opt(r)
@ -179,7 +151,7 @@ func (r *Runner) Init(scripts []codersdk.WorkspaceAgentScript, scriptCompleted S
} }
script := script script := script
_, err := r.cron.AddFunc(script.Cron, func() { _, err := r.cron.AddFunc(script.Cron, func() {
err := r.trackRun(r.cronCtx, script.WorkspaceAgentScript, ExecuteCronScripts) err := r.trackRun(r.cronCtx, script, ExecuteCronScripts)
if err != nil { if err != nil {
r.Logger.Warn(context.Background(), "run agent script on schedule", slog.Error(err)) r.Logger.Warn(context.Background(), "run agent script on schedule", slog.Error(err))
} }
@ -223,7 +195,6 @@ type ExecuteOption int
const ( const (
ExecuteAllScripts ExecuteOption = iota ExecuteAllScripts ExecuteOption = iota
ExecuteStartScripts ExecuteStartScripts
ExecutePostStartScripts
ExecuteStopScripts ExecuteStopScripts
ExecuteCronScripts ExecuteCronScripts
) )
@ -246,7 +217,6 @@ func (r *Runner) Execute(ctx context.Context, option ExecuteOption) error {
for _, script := range r.scripts { for _, script := range r.scripts {
runScript := (option == ExecuteStartScripts && script.RunOnStart) || runScript := (option == ExecuteStartScripts && script.RunOnStart) ||
(option == ExecuteStopScripts && script.RunOnStop) || (option == ExecuteStopScripts && script.RunOnStop) ||
(option == ExecutePostStartScripts && script.runOnPostStart) ||
(option == ExecuteCronScripts && script.Cron != "") || (option == ExecuteCronScripts && script.Cron != "") ||
option == ExecuteAllScripts option == ExecuteAllScripts
@ -256,7 +226,7 @@ func (r *Runner) Execute(ctx context.Context, option ExecuteOption) error {
script := script script := script
eg.Go(func() error { eg.Go(func() error {
err := r.trackRun(ctx, script.WorkspaceAgentScript, option) err := r.trackRun(ctx, script, option)
if err != nil { if err != nil {
return xerrors.Errorf("run agent script %q: %w", script.LogSourceID, err) return xerrors.Errorf("run agent script %q: %w", script.LogSourceID, err)
} }

View File

@ -4,7 +4,6 @@ import (
"context" "context"
"path/filepath" "path/filepath"
"runtime" "runtime"
"slices"
"sync" "sync"
"testing" "testing"
"time" "time"
@ -177,11 +176,6 @@ func TestExecuteOptions(t *testing.T) {
Script: "echo stop", Script: "echo stop",
RunOnStop: true, RunOnStop: true,
} }
postStartScript := codersdk.WorkspaceAgentScript{
ID: uuid.New(),
LogSourceID: uuid.New(),
Script: "echo poststart",
}
regularScript := codersdk.WorkspaceAgentScript{ regularScript := codersdk.WorkspaceAgentScript{
ID: uuid.New(), ID: uuid.New(),
LogSourceID: uuid.New(), LogSourceID: uuid.New(),
@ -193,10 +187,9 @@ func TestExecuteOptions(t *testing.T) {
stopScript, stopScript,
regularScript, regularScript,
} }
allScripts := append(slices.Clone(scripts), postStartScript)
scriptByID := func(t *testing.T, id uuid.UUID) codersdk.WorkspaceAgentScript { scriptByID := func(t *testing.T, id uuid.UUID) codersdk.WorkspaceAgentScript {
for _, script := range allScripts { for _, script := range scripts {
if script.ID == id { if script.ID == id {
return script return script
} }
@ -206,10 +199,9 @@ func TestExecuteOptions(t *testing.T) {
} }
wantOutput := map[uuid.UUID]string{ wantOutput := map[uuid.UUID]string{
startScript.ID: "start", startScript.ID: "start",
stopScript.ID: "stop", stopScript.ID: "stop",
postStartScript.ID: "poststart", regularScript.ID: "regular",
regularScript.ID: "regular",
} }
testCases := []struct { testCases := []struct {
@ -220,18 +212,13 @@ func TestExecuteOptions(t *testing.T) {
{ {
name: "ExecuteAllScripts", name: "ExecuteAllScripts",
option: agentscripts.ExecuteAllScripts, option: agentscripts.ExecuteAllScripts,
wantRun: []uuid.UUID{startScript.ID, stopScript.ID, regularScript.ID, postStartScript.ID}, wantRun: []uuid.UUID{startScript.ID, stopScript.ID, regularScript.ID},
}, },
{ {
name: "ExecuteStartScripts", name: "ExecuteStartScripts",
option: agentscripts.ExecuteStartScripts, option: agentscripts.ExecuteStartScripts,
wantRun: []uuid.UUID{startScript.ID}, wantRun: []uuid.UUID{startScript.ID},
}, },
{
name: "ExecutePostStartScripts",
option: agentscripts.ExecutePostStartScripts,
wantRun: []uuid.UUID{postStartScript.ID},
},
{ {
name: "ExecuteStopScripts", name: "ExecuteStopScripts",
option: agentscripts.ExecuteStopScripts, option: agentscripts.ExecuteStopScripts,
@ -260,7 +247,6 @@ func TestExecuteOptions(t *testing.T) {
err := runner.Init( err := runner.Init(
scripts, scripts,
aAPI.ScriptCompleted, aAPI.ScriptCompleted,
agentscripts.WithPostStartScripts(postStartScript),
) )
require.NoError(t, err) require.NoError(t, err)
@ -274,7 +260,7 @@ func TestExecuteOptions(t *testing.T) {
"script %s should have run when using filter %s", scriptByID(t, id).Script, tc.name) "script %s should have run when using filter %s", scriptByID(t, id).Script, tc.name)
} }
for _, script := range allScripts { for _, script := range scripts {
if _, ok := gotRun[script.ID]; ok { if _, ok := gotRun[script.ID]; ok {
continue continue
} }

View File

@ -7,15 +7,11 @@ import (
"github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5"
"github.com/google/uuid"
"github.com/coder/coder/v2/agent/agentcontainers"
"github.com/coder/coder/v2/agent/proto"
"github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/coderd/httpapi"
"github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk"
) )
func (a *agent) apiHandler(aAPI proto.DRPCAgentClient26) (http.Handler, func() error) { func (a *agent) apiHandler() http.Handler {
r := chi.NewRouter() r := chi.NewRouter()
r.Get("/", func(rw http.ResponseWriter, r *http.Request) { r.Get("/", func(rw http.ResponseWriter, r *http.Request) {
httpapi.Write(r.Context(), rw, http.StatusOK, codersdk.Response{ httpapi.Write(r.Context(), rw, http.StatusOK, codersdk.Response{
@ -41,34 +37,7 @@ func (a *agent) apiHandler(aAPI proto.DRPCAgentClient26) (http.Handler, func() e
} }
if a.devcontainers { if a.devcontainers {
containerAPIOpts := []agentcontainers.Option{ r.Mount("/api/v0/containers", a.containerAPI.Routes())
agentcontainers.WithExecer(a.execer),
agentcontainers.WithCommandEnv(a.sshServer.CommandEnv),
agentcontainers.WithScriptLogger(func(logSourceID uuid.UUID) agentcontainers.ScriptLogger {
return a.logSender.GetScriptLogger(logSourceID)
}),
agentcontainers.WithSubAgentClient(agentcontainers.NewSubAgentClientFromAPI(a.logger, aAPI)),
}
manifest := a.manifest.Load()
if manifest != nil {
containerAPIOpts = append(containerAPIOpts,
agentcontainers.WithManifestInfo(manifest.OwnerName, manifest.WorkspaceName),
)
if len(manifest.Devcontainers) > 0 {
containerAPIOpts = append(
containerAPIOpts,
agentcontainers.WithDevcontainers(manifest.Devcontainers, manifest.Scripts),
)
}
}
// Append after to allow the agent options to override the default options.
containerAPIOpts = append(containerAPIOpts, a.containerAPIOptions...)
containerAPI := agentcontainers.NewAPI(a.logger.Named("containers"), containerAPIOpts...)
r.Mount("/api/v0/containers", containerAPI.Routes())
a.containerAPI.Store(containerAPI)
} else { } else {
r.HandleFunc("/api/v0/containers", func(w http.ResponseWriter, r *http.Request) { r.HandleFunc("/api/v0/containers", func(w http.ResponseWriter, r *http.Request) {
httpapi.Write(r.Context(), w, http.StatusForbidden, codersdk.Response{ httpapi.Write(r.Context(), w, http.StatusForbidden, codersdk.Response{
@ -89,12 +58,7 @@ func (a *agent) apiHandler(aAPI proto.DRPCAgentClient26) (http.Handler, func() e
r.Get("/debug/manifest", a.HandleHTTPDebugManifest) r.Get("/debug/manifest", a.HandleHTTPDebugManifest)
r.Get("/debug/prometheus", promHandler.ServeHTTP) r.Get("/debug/prometheus", promHandler.ServeHTTP)
return r, func() error { return r
if containerAPI := a.containerAPI.Load(); containerAPI != nil {
return containerAPI.Close()
}
return nil
}
} }
type listeningPortsHandler struct { type listeningPortsHandler struct {

View File

@ -1432,7 +1432,7 @@ func TestWorkspaceAgentRecreateDevcontainer(t *testing.T) {
}, nil).AnyTimes() }, nil).AnyTimes()
// DetectArchitecture always returns "<none>" for this test to disable agent injection. // DetectArchitecture always returns "<none>" for this test to disable agent injection.
mccli.EXPECT().DetectArchitecture(gomock.Any(), devContainer.ID).Return("<none>", nil).AnyTimes() mccli.EXPECT().DetectArchitecture(gomock.Any(), devContainer.ID).Return("<none>", nil).AnyTimes()
mdccli.EXPECT().ReadConfig(gomock.Any(), workspaceFolder, configFile, gomock.Any()).Return(agentcontainers.DevcontainerConfig{}, nil).Times(1) mdccli.EXPECT().ReadConfig(gomock.Any(), workspaceFolder, configFile, gomock.Any()).Return(agentcontainers.DevcontainerConfig{}, nil).AnyTimes()
mdccli.EXPECT().Up(gomock.Any(), workspaceFolder, configFile, gomock.Any()).Return("someid", nil).Times(1) mdccli.EXPECT().Up(gomock.Any(), workspaceFolder, configFile, gomock.Any()).Return("someid", nil).Times(1)
return 0 return 0
}, },