mirror of
https://github.com/coder/coder.git
synced 2025-07-08 11:39:50 +00:00
Merge branch 'main' of github.com:/coder/coder into dk/prebuilds
Signed-off-by: Danny Kopping <danny@coder.com>
This commit is contained in:
@ -17,10 +17,12 @@ import (
|
||||
|
||||
"cdr.dev/slog"
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/agentapi/resourcesmonitor"
|
||||
"github.com/coder/coder/v2/coderd/appearance"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/pubsub"
|
||||
"github.com/coder/coder/v2/coderd/externalauth"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/prometheusmetrics"
|
||||
"github.com/coder/coder/v2/coderd/tracing"
|
||||
"github.com/coder/coder/v2/coderd/workspacestats"
|
||||
@ -29,6 +31,7 @@ import (
|
||||
"github.com/coder/coder/v2/codersdk/agentsdk"
|
||||
"github.com/coder/coder/v2/tailnet"
|
||||
tailnetproto "github.com/coder/coder/v2/tailnet/proto"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
// API implements the DRPC agent API interface from agent/proto. This struct is
|
||||
@ -59,7 +62,9 @@ type Options struct {
|
||||
|
||||
Ctx context.Context
|
||||
Log slog.Logger
|
||||
Clock quartz.Clock
|
||||
Database database.Store
|
||||
NotificationsEnqueuer notifications.Enqueuer
|
||||
Pubsub pubsub.Pubsub
|
||||
DerpMapFn func() *tailcfg.DERPMap
|
||||
TailnetCoordinator *atomic.Pointer[tailnet.Coordinator]
|
||||
@ -82,6 +87,10 @@ type Options struct {
|
||||
}
|
||||
|
||||
func New(opts Options) *API {
|
||||
if opts.Clock == nil {
|
||||
opts.Clock = quartz.NewReal()
|
||||
}
|
||||
|
||||
api := &API{
|
||||
opts: opts,
|
||||
mu: sync.Mutex{},
|
||||
@ -106,9 +115,22 @@ func New(opts Options) *API {
|
||||
}
|
||||
|
||||
api.ResourcesMonitoringAPI = &ResourcesMonitoringAPI{
|
||||
Log: opts.Log,
|
||||
AgentID: opts.AgentID,
|
||||
Database: opts.Database,
|
||||
AgentID: opts.AgentID,
|
||||
WorkspaceID: opts.WorkspaceID,
|
||||
Clock: opts.Clock,
|
||||
Database: opts.Database,
|
||||
NotificationsEnqueuer: opts.NotificationsEnqueuer,
|
||||
Debounce: 5 * time.Minute,
|
||||
|
||||
Config: resourcesmonitor.Config{
|
||||
NumDatapoints: 20,
|
||||
CollectionInterval: 10 * time.Second,
|
||||
|
||||
Alert: resourcesmonitor.AlertConfig{
|
||||
MinimumNOKsPercent: 20,
|
||||
ConsecutiveNOKsPercent: 50,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
api.StatsAPI = &StatsAPI{
|
||||
|
@ -4,20 +4,35 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"cdr.dev/slog"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"cdr.dev/slog"
|
||||
"github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/agentapi/resourcesmonitor"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtime"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
type ResourcesMonitoringAPI struct {
|
||||
AgentID uuid.UUID
|
||||
Database database.Store
|
||||
Log slog.Logger
|
||||
AgentID uuid.UUID
|
||||
WorkspaceID uuid.UUID
|
||||
|
||||
Log slog.Logger
|
||||
Clock quartz.Clock
|
||||
Database database.Store
|
||||
NotificationsEnqueuer notifications.Enqueuer
|
||||
|
||||
Debounce time.Duration
|
||||
Config resourcesmonitor.Config
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) GetResourcesMonitoringConfiguration(ctx context.Context, _ *proto.GetResourcesMonitoringConfigurationRequest) (*proto.GetResourcesMonitoringConfigurationResponse, error) {
|
||||
@ -33,8 +48,8 @@ func (a *ResourcesMonitoringAPI) GetResourcesMonitoringConfiguration(ctx context
|
||||
|
||||
return &proto.GetResourcesMonitoringConfigurationResponse{
|
||||
Config: &proto.GetResourcesMonitoringConfigurationResponse_Config{
|
||||
CollectionIntervalSeconds: 10,
|
||||
NumDatapoints: 20,
|
||||
CollectionIntervalSeconds: int32(a.Config.CollectionInterval.Seconds()),
|
||||
NumDatapoints: a.Config.NumDatapoints,
|
||||
},
|
||||
Memory: func() *proto.GetResourcesMonitoringConfigurationResponse_Memory {
|
||||
if memoryErr != nil {
|
||||
@ -60,8 +75,182 @@ func (a *ResourcesMonitoringAPI) GetResourcesMonitoringConfiguration(ctx context
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) PushResourcesMonitoringUsage(ctx context.Context, req *proto.PushResourcesMonitoringUsageRequest) (*proto.PushResourcesMonitoringUsageResponse, error) {
|
||||
a.Log.Info(ctx, "resources monitoring usage received",
|
||||
slog.F("request", req))
|
||||
var err error
|
||||
|
||||
return &proto.PushResourcesMonitoringUsageResponse{}, nil
|
||||
if memoryErr := a.monitorMemory(ctx, req.Datapoints); memoryErr != nil {
|
||||
err = errors.Join(err, xerrors.Errorf("monitor memory: %w", memoryErr))
|
||||
}
|
||||
|
||||
if volumeErr := a.monitorVolumes(ctx, req.Datapoints); volumeErr != nil {
|
||||
err = errors.Join(err, xerrors.Errorf("monitor volume: %w", volumeErr))
|
||||
}
|
||||
|
||||
return &proto.PushResourcesMonitoringUsageResponse{}, err
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) monitorMemory(ctx context.Context, datapoints []*proto.PushResourcesMonitoringUsageRequest_Datapoint) error {
|
||||
monitor, err := a.Database.FetchMemoryResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if err != nil {
|
||||
// It is valid for an agent to not have a memory monitor, so we
|
||||
// do not want to treat it as an error.
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return xerrors.Errorf("fetch memory resource monitor: %w", err)
|
||||
}
|
||||
|
||||
if !monitor.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
usageDatapoints := make([]*proto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage, 0, len(datapoints))
|
||||
for _, datapoint := range datapoints {
|
||||
usageDatapoints = append(usageDatapoints, datapoint.Memory)
|
||||
}
|
||||
|
||||
usageStates := resourcesmonitor.CalculateMemoryUsageStates(monitor, usageDatapoints)
|
||||
|
||||
oldState := monitor.State
|
||||
newState := resourcesmonitor.NextState(a.Config, oldState, usageStates)
|
||||
|
||||
debouncedUntil, shouldNotify := monitor.Debounce(a.Debounce, a.Clock.Now(), oldState, newState)
|
||||
|
||||
//nolint:gocritic // We need to be able to update the resource monitor here.
|
||||
err = a.Database.UpdateMemoryResourceMonitor(dbauthz.AsResourceMonitor(ctx), database.UpdateMemoryResourceMonitorParams{
|
||||
AgentID: a.AgentID,
|
||||
State: newState,
|
||||
UpdatedAt: dbtime.Time(a.Clock.Now()),
|
||||
DebouncedUntil: dbtime.Time(debouncedUntil),
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("update workspace monitor: %w", err)
|
||||
}
|
||||
|
||||
if !shouldNotify {
|
||||
return nil
|
||||
}
|
||||
|
||||
workspace, err := a.Database.GetWorkspaceByID(ctx, a.WorkspaceID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get workspace by id: %w", err)
|
||||
}
|
||||
|
||||
_, err = a.NotificationsEnqueuer.EnqueueWithData(
|
||||
// nolint:gocritic // We need to be able to send the notification.
|
||||
dbauthz.AsNotifier(ctx),
|
||||
workspace.OwnerID,
|
||||
notifications.TemplateWorkspaceOutOfMemory,
|
||||
map[string]string{
|
||||
"workspace": workspace.Name,
|
||||
"threshold": fmt.Sprintf("%d%%", monitor.Threshold),
|
||||
},
|
||||
map[string]any{
|
||||
// NOTE(DanielleMaywood):
|
||||
// When notifications are enqueued, they are checked to be
|
||||
// unique within a single day. This means that if we attempt
|
||||
// to send two OOM notifications for the same workspace on
|
||||
// the same day, the enqueuer will prevent us from sending
|
||||
// a second one. We are inject a timestamp to make the
|
||||
// notifications appear different enough to circumvent this
|
||||
// deduplication logic.
|
||||
"timestamp": a.Clock.Now(),
|
||||
},
|
||||
"workspace-monitor-memory",
|
||||
)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("notify workspace OOM: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ResourcesMonitoringAPI) monitorVolumes(ctx context.Context, datapoints []*proto.PushResourcesMonitoringUsageRequest_Datapoint) error {
|
||||
volumeMonitors, err := a.Database.FetchVolumesResourceMonitorsByAgentID(ctx, a.AgentID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get or insert volume monitor: %w", err)
|
||||
}
|
||||
|
||||
outOfDiskVolumes := make([]map[string]any, 0)
|
||||
|
||||
for _, monitor := range volumeMonitors {
|
||||
if !monitor.Enabled {
|
||||
continue
|
||||
}
|
||||
|
||||
usageDatapoints := make([]*proto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage, 0, len(datapoints))
|
||||
for _, datapoint := range datapoints {
|
||||
var usage *proto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage
|
||||
|
||||
for _, volume := range datapoint.Volumes {
|
||||
if volume.Volume == monitor.Path {
|
||||
usage = volume
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
usageDatapoints = append(usageDatapoints, usage)
|
||||
}
|
||||
|
||||
usageStates := resourcesmonitor.CalculateVolumeUsageStates(monitor, usageDatapoints)
|
||||
|
||||
oldState := monitor.State
|
||||
newState := resourcesmonitor.NextState(a.Config, oldState, usageStates)
|
||||
|
||||
debouncedUntil, shouldNotify := monitor.Debounce(a.Debounce, a.Clock.Now(), oldState, newState)
|
||||
|
||||
if shouldNotify {
|
||||
outOfDiskVolumes = append(outOfDiskVolumes, map[string]any{
|
||||
"path": monitor.Path,
|
||||
"threshold": fmt.Sprintf("%d%%", monitor.Threshold),
|
||||
})
|
||||
}
|
||||
|
||||
//nolint:gocritic // We need to be able to update the resource monitor here.
|
||||
if err := a.Database.UpdateVolumeResourceMonitor(dbauthz.AsResourceMonitor(ctx), database.UpdateVolumeResourceMonitorParams{
|
||||
AgentID: a.AgentID,
|
||||
Path: monitor.Path,
|
||||
State: newState,
|
||||
UpdatedAt: dbtime.Time(a.Clock.Now()),
|
||||
DebouncedUntil: dbtime.Time(debouncedUntil),
|
||||
}); err != nil {
|
||||
return xerrors.Errorf("update workspace monitor: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(outOfDiskVolumes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
workspace, err := a.Database.GetWorkspaceByID(ctx, a.WorkspaceID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("get workspace by id: %w", err)
|
||||
}
|
||||
|
||||
if _, err := a.NotificationsEnqueuer.EnqueueWithData(
|
||||
// nolint:gocritic // We need to be able to send the notification.
|
||||
dbauthz.AsNotifier(ctx),
|
||||
workspace.OwnerID,
|
||||
notifications.TemplateWorkspaceOutOfDisk,
|
||||
map[string]string{
|
||||
"workspace": workspace.Name,
|
||||
},
|
||||
map[string]any{
|
||||
"volumes": outOfDiskVolumes,
|
||||
// NOTE(DanielleMaywood):
|
||||
// When notifications are enqueued, they are checked to be
|
||||
// unique within a single day. This means that if we attempt
|
||||
// to send two OOM notifications for the same workspace on
|
||||
// the same day, the enqueuer will prevent us from sending
|
||||
// a second one. We are inject a timestamp to make the
|
||||
// notifications appear different enough to circumvent this
|
||||
// deduplication logic.
|
||||
"timestamp": a.Clock.Now(),
|
||||
},
|
||||
"workspace-monitor-volumes",
|
||||
); err != nil {
|
||||
return xerrors.Errorf("notify workspace OOD: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
944
coderd/agentapi/resources_monitoring_test.go
Normal file
944
coderd/agentapi/resources_monitoring_test.go
Normal file
@ -0,0 +1,944 @@
|
||||
package agentapi_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
agentproto "github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/agentapi"
|
||||
"github.com/coder/coder/v2/coderd/agentapi/resourcesmonitor"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbgen"
|
||||
"github.com/coder/coder/v2/coderd/database/dbtestutil"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/notifications/notificationstest"
|
||||
"github.com/coder/quartz"
|
||||
)
|
||||
|
||||
func resourceMonitorAPI(t *testing.T) (*agentapi.ResourcesMonitoringAPI, database.User, *quartz.Mock, *notificationstest.FakeEnqueuer) {
|
||||
t.Helper()
|
||||
|
||||
db, _ := dbtestutil.NewDB(t)
|
||||
user := dbgen.User(t, db, database.User{})
|
||||
org := dbgen.Organization(t, db, database.Organization{})
|
||||
template := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
templateVersion := dbgen.TemplateVersion(t, db, database.TemplateVersion{
|
||||
TemplateID: uuid.NullUUID{Valid: true, UUID: template.ID},
|
||||
OrganizationID: org.ID,
|
||||
CreatedBy: user.ID,
|
||||
})
|
||||
workspace := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
OrganizationID: org.ID,
|
||||
TemplateID: template.ID,
|
||||
OwnerID: user.ID,
|
||||
})
|
||||
job := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
|
||||
Type: database.ProvisionerJobTypeWorkspaceBuild,
|
||||
})
|
||||
build := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
JobID: job.ID,
|
||||
WorkspaceID: workspace.ID,
|
||||
TemplateVersionID: templateVersion.ID,
|
||||
})
|
||||
resource := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{
|
||||
JobID: build.JobID,
|
||||
})
|
||||
agent := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{
|
||||
ResourceID: resource.ID,
|
||||
})
|
||||
|
||||
notifyEnq := ¬ificationstest.FakeEnqueuer{}
|
||||
clock := quartz.NewMock(t)
|
||||
|
||||
return &agentapi.ResourcesMonitoringAPI{
|
||||
AgentID: agent.ID,
|
||||
WorkspaceID: workspace.ID,
|
||||
Clock: clock,
|
||||
Database: db,
|
||||
NotificationsEnqueuer: notifyEnq,
|
||||
Config: resourcesmonitor.Config{
|
||||
NumDatapoints: 20,
|
||||
CollectionInterval: 10 * time.Second,
|
||||
|
||||
Alert: resourcesmonitor.AlertConfig{
|
||||
MinimumNOKsPercent: 20,
|
||||
ConsecutiveNOKsPercent: 50,
|
||||
},
|
||||
},
|
||||
Debounce: 1 * time.Minute,
|
||||
}, user, clock, notifyEnq
|
||||
}
|
||||
|
||||
func TestMemoryResourceMonitorDebounce(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// This test is a bit of a long one. We're testing that
|
||||
// when a monitor goes into an alert state, it doesn't
|
||||
// allow another notification to occur until after the
|
||||
// debounce period.
|
||||
//
|
||||
// 1. OK -> NOK |> sends a notification
|
||||
// 2. NOK -> OK |> does nothing
|
||||
// 3. OK -> NOK |> does nothing due to debounce period
|
||||
// 4. NOK -> OK |> does nothing
|
||||
// 5. OK -> NOK |> sends a notification as debounce period exceeded
|
||||
|
||||
api, user, clock, notifyEnq := resourceMonitorAPI(t)
|
||||
api.Config.Alert.ConsecutiveNOKsPercent = 100
|
||||
|
||||
// Given: A monitor in an OK state
|
||||
dbgen.WorkspaceAgentMemoryResourceMonitor(t, api.Database, database.WorkspaceAgentMemoryResourceMonitor{
|
||||
AgentID: api.AgentID,
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// When: The monitor is given a state that will trigger NOK
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Memory: &agentproto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage{
|
||||
Used: 10,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect there to be a notification sent
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfMemory))
|
||||
require.Len(t, sent, 1)
|
||||
require.Equal(t, user.ID, sent[0].UserID)
|
||||
notifyEnq.Clear()
|
||||
|
||||
// When: The monitor moves to an OK state from NOK
|
||||
clock.Advance(api.Debounce / 4)
|
||||
_, err = api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Memory: &agentproto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage{
|
||||
Used: 1,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect no new notifications
|
||||
sent = notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfMemory))
|
||||
require.Len(t, sent, 0)
|
||||
notifyEnq.Clear()
|
||||
|
||||
// When: The monitor moves back to a NOK state before the debounced time.
|
||||
clock.Advance(api.Debounce / 4)
|
||||
_, err = api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Memory: &agentproto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage{
|
||||
Used: 10,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect no new notifications (showing the debouncer working)
|
||||
sent = notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfMemory))
|
||||
require.Len(t, sent, 0)
|
||||
notifyEnq.Clear()
|
||||
|
||||
// When: The monitor moves back to an OK state from NOK
|
||||
clock.Advance(api.Debounce / 4)
|
||||
_, err = api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Memory: &agentproto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage{
|
||||
Used: 1,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We still expect no new notifications
|
||||
sent = notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfMemory))
|
||||
require.Len(t, sent, 0)
|
||||
notifyEnq.Clear()
|
||||
|
||||
// When: The monitor moves back to a NOK state after the debounce period.
|
||||
clock.Advance(api.Debounce/4 + 1*time.Second)
|
||||
_, err = api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Memory: &agentproto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage{
|
||||
Used: 10,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect a notification
|
||||
sent = notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfMemory))
|
||||
require.Len(t, sent, 1)
|
||||
require.Equal(t, user.ID, sent[0].UserID)
|
||||
}
|
||||
|
||||
func TestMemoryResourceMonitor(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
memoryUsage []int64
|
||||
memoryTotal int64
|
||||
previousState database.WorkspaceAgentMonitorState
|
||||
expectState database.WorkspaceAgentMonitorState
|
||||
shouldNotify bool
|
||||
}{
|
||||
{
|
||||
name: "WhenOK/NeverExceedsThreshold",
|
||||
memoryUsage: []int64{2, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 2, 3, 1, 2},
|
||||
memoryTotal: 10,
|
||||
previousState: database.WorkspaceAgentMonitorStateOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
{
|
||||
name: "WhenOK/ShouldStayInOK",
|
||||
memoryUsage: []int64{9, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 2, 3, 1, 2},
|
||||
memoryTotal: 10,
|
||||
previousState: database.WorkspaceAgentMonitorStateOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
{
|
||||
name: "WhenOK/ConsecutiveExceedsThreshold",
|
||||
memoryUsage: []int64{2, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 8, 9, 8, 9},
|
||||
memoryTotal: 10,
|
||||
previousState: database.WorkspaceAgentMonitorStateOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateNOK,
|
||||
shouldNotify: true,
|
||||
},
|
||||
{
|
||||
name: "WhenOK/MinimumExceedsThreshold",
|
||||
memoryUsage: []int64{2, 8, 2, 9, 2, 8, 2, 9, 2, 8, 4, 9, 1, 8, 2, 8, 9},
|
||||
memoryTotal: 10,
|
||||
previousState: database.WorkspaceAgentMonitorStateOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateNOK,
|
||||
shouldNotify: true,
|
||||
},
|
||||
{
|
||||
name: "WhenNOK/NeverExceedsThreshold",
|
||||
memoryUsage: []int64{2, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 2, 3, 1, 2},
|
||||
memoryTotal: 10,
|
||||
previousState: database.WorkspaceAgentMonitorStateNOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
{
|
||||
name: "WhenNOK/ShouldStayInNOK",
|
||||
memoryUsage: []int64{9, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 2, 3, 1, 2},
|
||||
memoryTotal: 10,
|
||||
previousState: database.WorkspaceAgentMonitorStateNOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateNOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
{
|
||||
name: "WhenNOK/ConsecutiveExceedsThreshold",
|
||||
memoryUsage: []int64{2, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 8, 9, 8, 9},
|
||||
memoryTotal: 10,
|
||||
previousState: database.WorkspaceAgentMonitorStateNOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateNOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
{
|
||||
name: "WhenNOK/MinimumExceedsThreshold",
|
||||
memoryUsage: []int64{2, 8, 2, 9, 2, 8, 2, 9, 2, 8, 4, 9, 1, 8, 2, 8, 9},
|
||||
memoryTotal: 10,
|
||||
previousState: database.WorkspaceAgentMonitorStateNOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateNOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
api, user, clock, notifyEnq := resourceMonitorAPI(t)
|
||||
|
||||
datapoints := make([]*agentproto.PushResourcesMonitoringUsageRequest_Datapoint, 0, len(tt.memoryUsage))
|
||||
collectedAt := clock.Now()
|
||||
for _, usage := range tt.memoryUsage {
|
||||
collectedAt = collectedAt.Add(15 * time.Second)
|
||||
datapoints = append(datapoints, &agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
CollectedAt: timestamppb.New(collectedAt),
|
||||
Memory: &agentproto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage{
|
||||
Used: usage,
|
||||
Total: tt.memoryTotal,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
dbgen.WorkspaceAgentMemoryResourceMonitor(t, api.Database, database.WorkspaceAgentMemoryResourceMonitor{
|
||||
AgentID: api.AgentID,
|
||||
State: tt.previousState,
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
clock.Set(collectedAt)
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: datapoints,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfMemory))
|
||||
if tt.shouldNotify {
|
||||
require.Len(t, sent, 1)
|
||||
require.Equal(t, user.ID, sent[0].UserID)
|
||||
} else {
|
||||
require.Len(t, sent, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryResourceMonitorMissingData(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("UnknownPreventsMovingIntoAlertState", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
api, _, clock, notifyEnq := resourceMonitorAPI(t)
|
||||
api.Config.Alert.ConsecutiveNOKsPercent = 50
|
||||
api.Config.Alert.MinimumNOKsPercent = 100
|
||||
|
||||
// Given: A monitor in an OK state.
|
||||
dbgen.WorkspaceAgentMemoryResourceMonitor(t, api.Database, database.WorkspaceAgentMemoryResourceMonitor{
|
||||
AgentID: api.AgentID,
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// When: A datapoint is missing, surrounded by two NOK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Memory: &agentproto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage{
|
||||
Used: 10,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now().Add(10 * time.Second)),
|
||||
Memory: nil,
|
||||
},
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now().Add(20 * time.Second)),
|
||||
Memory: &agentproto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage{
|
||||
Used: 10,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect no notifications, as this unknown prevents us knowing we should alert.
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfMemory))
|
||||
require.Len(t, sent, 0)
|
||||
|
||||
// Then: We expect the monitor to still be in an OK state.
|
||||
monitor, err := api.Database.FetchMemoryResourceMonitorsByAgentID(context.Background(), api.AgentID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, database.WorkspaceAgentMonitorStateOK, monitor.State)
|
||||
})
|
||||
|
||||
t.Run("UnknownPreventsMovingOutOfAlertState", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
api, _, clock, _ := resourceMonitorAPI(t)
|
||||
api.Config.Alert.ConsecutiveNOKsPercent = 50
|
||||
api.Config.Alert.MinimumNOKsPercent = 100
|
||||
|
||||
// Given: A monitor in a NOK state.
|
||||
dbgen.WorkspaceAgentMemoryResourceMonitor(t, api.Database, database.WorkspaceAgentMemoryResourceMonitor{
|
||||
AgentID: api.AgentID,
|
||||
State: database.WorkspaceAgentMonitorStateNOK,
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// When: A datapoint is missing, surrounded by two OK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Memory: &agentproto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage{
|
||||
Used: 1,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now().Add(10 * time.Second)),
|
||||
Memory: nil,
|
||||
},
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now().Add(20 * time.Second)),
|
||||
Memory: &agentproto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage{
|
||||
Used: 1,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect the monitor to still be in a NOK state.
|
||||
monitor, err := api.Database.FetchMemoryResourceMonitorsByAgentID(context.Background(), api.AgentID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, database.WorkspaceAgentMonitorStateNOK, monitor.State)
|
||||
})
|
||||
}
|
||||
|
||||
func TestVolumeResourceMonitorDebounce(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// This test is an even longer one. We're testing
|
||||
// that the debounce logic is independent per
|
||||
// volume monitor. We interleave the triggering
|
||||
// of each monitor to ensure the debounce logic
|
||||
// is monitor independent.
|
||||
//
|
||||
// First Monitor:
|
||||
// 1. OK -> NOK |> sends a notification
|
||||
// 2. NOK -> OK |> does nothing
|
||||
// 3. OK -> NOK |> does nothing due to debounce period
|
||||
// 4. NOK -> OK |> does nothing
|
||||
// 5. OK -> NOK |> sends a notification as debounce period exceeded
|
||||
// 6. NOK -> OK |> does nothing
|
||||
//
|
||||
// Second Monitor:
|
||||
// 1. OK -> OK |> does nothing
|
||||
// 2. OK -> NOK |> sends a notification
|
||||
// 3. NOK -> OK |> does nothing
|
||||
// 4. OK -> NOK |> does nothing due to debounce period
|
||||
// 5. NOK -> OK |> does nothing
|
||||
// 6. OK -> NOK |> sends a notification as debounce period exceeded
|
||||
//
|
||||
|
||||
firstVolumePath := "/home/coder"
|
||||
secondVolumePath := "/dev/coder"
|
||||
|
||||
api, _, clock, notifyEnq := resourceMonitorAPI(t)
|
||||
|
||||
// Given:
|
||||
// - First monitor in an OK state
|
||||
// - Second monitor in an OK state
|
||||
dbgen.WorkspaceAgentVolumeResourceMonitor(t, api.Database, database.WorkspaceAgentVolumeResourceMonitor{
|
||||
AgentID: api.AgentID,
|
||||
Path: firstVolumePath,
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
Threshold: 80,
|
||||
})
|
||||
dbgen.WorkspaceAgentVolumeResourceMonitor(t, api.Database, database.WorkspaceAgentVolumeResourceMonitor{
|
||||
AgentID: api.AgentID,
|
||||
Path: secondVolumePath,
|
||||
State: database.WorkspaceAgentMonitorStateNOK,
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// When:
|
||||
// - First monitor is in a NOK state
|
||||
// - Second monitor is in an OK state
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{Volume: firstVolumePath, Used: 10, Total: 10},
|
||||
{Volume: secondVolumePath, Used: 1, Total: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then:
|
||||
// - We expect a notification from only the first monitor
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfDisk))
|
||||
require.Len(t, sent, 1)
|
||||
volumes := requireVolumeData(t, sent[0])
|
||||
require.Len(t, volumes, 1)
|
||||
require.Equal(t, firstVolumePath, volumes[0]["path"])
|
||||
notifyEnq.Clear()
|
||||
|
||||
// When:
|
||||
// - First monitor moves back to OK
|
||||
// - Second monitor moves to NOK
|
||||
clock.Advance(api.Debounce / 4)
|
||||
_, err = api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{Volume: firstVolumePath, Used: 1, Total: 10},
|
||||
{Volume: secondVolumePath, Used: 10, Total: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then:
|
||||
// - We expect a notification from only the second monitor
|
||||
sent = notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfDisk))
|
||||
require.Len(t, sent, 1)
|
||||
volumes = requireVolumeData(t, sent[0])
|
||||
require.Len(t, volumes, 1)
|
||||
require.Equal(t, secondVolumePath, volumes[0]["path"])
|
||||
notifyEnq.Clear()
|
||||
|
||||
// When:
|
||||
// - First monitor moves back to NOK before debounce period has ended
|
||||
// - Second monitor moves back to OK
|
||||
clock.Advance(api.Debounce / 4)
|
||||
_, err = api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{Volume: firstVolumePath, Used: 10, Total: 10},
|
||||
{Volume: secondVolumePath, Used: 1, Total: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then:
|
||||
// - We expect no new notifications
|
||||
sent = notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfDisk))
|
||||
require.Len(t, sent, 0)
|
||||
notifyEnq.Clear()
|
||||
|
||||
// When:
|
||||
// - First monitor moves back to OK
|
||||
// - Second monitor moves back to NOK
|
||||
clock.Advance(api.Debounce / 4)
|
||||
_, err = api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{Volume: firstVolumePath, Used: 1, Total: 10},
|
||||
{Volume: secondVolumePath, Used: 10, Total: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then:
|
||||
// - We expect no new notifications.
|
||||
sent = notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfDisk))
|
||||
require.Len(t, sent, 0)
|
||||
notifyEnq.Clear()
|
||||
|
||||
// When:
|
||||
// - First monitor moves back to a NOK state after the debounce period
|
||||
// - Second monitor moves back to OK
|
||||
clock.Advance(api.Debounce/4 + 1*time.Second)
|
||||
_, err = api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{Volume: firstVolumePath, Used: 10, Total: 10},
|
||||
{Volume: secondVolumePath, Used: 1, Total: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then:
|
||||
// - We expect a notification from only the first monitor
|
||||
sent = notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfDisk))
|
||||
require.Len(t, sent, 1)
|
||||
volumes = requireVolumeData(t, sent[0])
|
||||
require.Len(t, volumes, 1)
|
||||
require.Equal(t, firstVolumePath, volumes[0]["path"])
|
||||
notifyEnq.Clear()
|
||||
|
||||
// When:
|
||||
// - First montior moves back to OK
|
||||
// - Second monitor moves back to NOK after the debounce period
|
||||
clock.Advance(api.Debounce/4 + 1*time.Second)
|
||||
_, err = api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{Volume: firstVolumePath, Used: 1, Total: 10},
|
||||
{Volume: secondVolumePath, Used: 10, Total: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then:
|
||||
// - We expect a notification from only the second monitor
|
||||
sent = notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfDisk))
|
||||
require.Len(t, sent, 1)
|
||||
volumes = requireVolumeData(t, sent[0])
|
||||
require.Len(t, volumes, 1)
|
||||
require.Equal(t, secondVolumePath, volumes[0]["path"])
|
||||
}
|
||||
|
||||
func TestVolumeResourceMonitor(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
volumePath string
|
||||
volumeUsage []int64
|
||||
volumeTotal int64
|
||||
thresholdPercent int32
|
||||
previousState database.WorkspaceAgentMonitorState
|
||||
expectState database.WorkspaceAgentMonitorState
|
||||
shouldNotify bool
|
||||
}{
|
||||
{
|
||||
name: "WhenOK/NeverExceedsThreshold",
|
||||
volumePath: "/home/coder",
|
||||
volumeUsage: []int64{2, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 2, 3, 1, 2},
|
||||
volumeTotal: 10,
|
||||
thresholdPercent: 80,
|
||||
previousState: database.WorkspaceAgentMonitorStateOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
{
|
||||
name: "WhenOK/ShouldStayInOK",
|
||||
volumePath: "/home/coder",
|
||||
volumeUsage: []int64{9, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 2, 3, 1, 2},
|
||||
volumeTotal: 10,
|
||||
thresholdPercent: 80,
|
||||
previousState: database.WorkspaceAgentMonitorStateOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
{
|
||||
name: "WhenOK/ConsecutiveExceedsThreshold",
|
||||
volumePath: "/home/coder",
|
||||
volumeUsage: []int64{2, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 8, 9, 8, 9},
|
||||
volumeTotal: 10,
|
||||
thresholdPercent: 80,
|
||||
previousState: database.WorkspaceAgentMonitorStateOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateNOK,
|
||||
shouldNotify: true,
|
||||
},
|
||||
{
|
||||
name: "WhenOK/MinimumExceedsThreshold",
|
||||
volumePath: "/home/coder",
|
||||
volumeUsage: []int64{2, 8, 2, 9, 2, 8, 2, 9, 2, 8, 4, 9, 1, 8, 2, 8, 9},
|
||||
volumeTotal: 10,
|
||||
thresholdPercent: 80,
|
||||
previousState: database.WorkspaceAgentMonitorStateOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateNOK,
|
||||
shouldNotify: true,
|
||||
},
|
||||
{
|
||||
name: "WhenNOK/NeverExceedsThreshold",
|
||||
volumePath: "/home/coder",
|
||||
volumeUsage: []int64{2, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 2, 3, 1, 2},
|
||||
volumeTotal: 10,
|
||||
thresholdPercent: 80,
|
||||
previousState: database.WorkspaceAgentMonitorStateNOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
{
|
||||
name: "WhenNOK/ShouldStayInNOK",
|
||||
volumePath: "/home/coder",
|
||||
volumeUsage: []int64{9, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 2, 3, 1, 2},
|
||||
volumeTotal: 10,
|
||||
thresholdPercent: 80,
|
||||
previousState: database.WorkspaceAgentMonitorStateNOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateNOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
{
|
||||
name: "WhenNOK/ConsecutiveExceedsThreshold",
|
||||
volumePath: "/home/coder",
|
||||
volumeUsage: []int64{2, 3, 2, 4, 2, 3, 2, 1, 2, 3, 4, 4, 1, 8, 9, 8, 9},
|
||||
volumeTotal: 10,
|
||||
thresholdPercent: 80,
|
||||
previousState: database.WorkspaceAgentMonitorStateNOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateNOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
{
|
||||
name: "WhenNOK/MinimumExceedsThreshold",
|
||||
volumePath: "/home/coder",
|
||||
volumeUsage: []int64{2, 8, 2, 9, 2, 8, 2, 9, 2, 8, 4, 9, 1, 8, 2, 8, 9},
|
||||
volumeTotal: 10,
|
||||
thresholdPercent: 80,
|
||||
previousState: database.WorkspaceAgentMonitorStateNOK,
|
||||
expectState: database.WorkspaceAgentMonitorStateNOK,
|
||||
shouldNotify: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
api, user, clock, notifyEnq := resourceMonitorAPI(t)
|
||||
|
||||
datapoints := make([]*agentproto.PushResourcesMonitoringUsageRequest_Datapoint, 0, len(tt.volumeUsage))
|
||||
collectedAt := clock.Now()
|
||||
for _, volumeUsage := range tt.volumeUsage {
|
||||
collectedAt = collectedAt.Add(15 * time.Second)
|
||||
|
||||
volumeDatapoints := []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{
|
||||
Volume: tt.volumePath,
|
||||
Used: volumeUsage,
|
||||
Total: tt.volumeTotal,
|
||||
},
|
||||
}
|
||||
|
||||
datapoints = append(datapoints, &agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
CollectedAt: timestamppb.New(collectedAt),
|
||||
Volumes: volumeDatapoints,
|
||||
})
|
||||
}
|
||||
|
||||
dbgen.WorkspaceAgentVolumeResourceMonitor(t, api.Database, database.WorkspaceAgentVolumeResourceMonitor{
|
||||
AgentID: api.AgentID,
|
||||
Path: tt.volumePath,
|
||||
State: tt.previousState,
|
||||
Threshold: tt.thresholdPercent,
|
||||
})
|
||||
|
||||
clock.Set(collectedAt)
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: datapoints,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfDisk))
|
||||
if tt.shouldNotify {
|
||||
require.Len(t, sent, 1)
|
||||
require.Equal(t, user.ID, sent[0].UserID)
|
||||
} else {
|
||||
require.Len(t, sent, 0)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeResourceMonitorMultiple(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
api, _, clock, notifyEnq := resourceMonitorAPI(t)
|
||||
api.Config.Alert.ConsecutiveNOKsPercent = 100
|
||||
|
||||
// Given: two different volume resource monitors
|
||||
dbgen.WorkspaceAgentVolumeResourceMonitor(t, api.Database, database.WorkspaceAgentVolumeResourceMonitor{
|
||||
AgentID: api.AgentID,
|
||||
Path: "/home/coder",
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
dbgen.WorkspaceAgentVolumeResourceMonitor(t, api.Database, database.WorkspaceAgentVolumeResourceMonitor{
|
||||
AgentID: api.AgentID,
|
||||
Path: "/dev/coder",
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// When: both of them move to a NOK state
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{
|
||||
Volume: "/home/coder",
|
||||
Used: 10,
|
||||
Total: 10,
|
||||
},
|
||||
{
|
||||
Volume: "/dev/coder",
|
||||
Used: 10,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect a notification to alert with information about both
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfDisk))
|
||||
require.Len(t, sent, 1)
|
||||
|
||||
volumes := requireVolumeData(t, sent[0])
|
||||
require.Len(t, volumes, 2)
|
||||
require.Equal(t, "/home/coder", volumes[0]["path"])
|
||||
require.Equal(t, "/dev/coder", volumes[1]["path"])
|
||||
}
|
||||
|
||||
func TestVolumeResourceMonitorMissingData(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("UnknownPreventsMovingIntoAlertState", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
volumePath := "/home/coder"
|
||||
|
||||
api, _, clock, notifyEnq := resourceMonitorAPI(t)
|
||||
api.Config.Alert.ConsecutiveNOKsPercent = 50
|
||||
api.Config.Alert.MinimumNOKsPercent = 100
|
||||
|
||||
// Given: A monitor in an OK state.
|
||||
dbgen.WorkspaceAgentVolumeResourceMonitor(t, api.Database, database.WorkspaceAgentVolumeResourceMonitor{
|
||||
AgentID: api.AgentID,
|
||||
Path: volumePath,
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// When: A datapoint is missing, surrounded by two NOK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{
|
||||
Volume: volumePath,
|
||||
Used: 10,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now().Add(10 * time.Second)),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{},
|
||||
},
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now().Add(20 * time.Second)),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{
|
||||
Volume: volumePath,
|
||||
Used: 10,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect no notifications, as this unknown prevents us knowing we should alert.
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateWorkspaceOutOfDisk))
|
||||
require.Len(t, sent, 0)
|
||||
|
||||
// Then: We expect the monitor to still be in an OK state.
|
||||
monitors, err := api.Database.FetchVolumesResourceMonitorsByAgentID(context.Background(), api.AgentID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, monitors, 1)
|
||||
require.Equal(t, database.WorkspaceAgentMonitorStateOK, monitors[0].State)
|
||||
})
|
||||
|
||||
t.Run("UnknownPreventsMovingOutOfAlertState", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
volumePath := "/home/coder"
|
||||
|
||||
api, _, clock, _ := resourceMonitorAPI(t)
|
||||
api.Config.Alert.ConsecutiveNOKsPercent = 50
|
||||
api.Config.Alert.MinimumNOKsPercent = 100
|
||||
|
||||
// Given: A monitor in a NOK state.
|
||||
dbgen.WorkspaceAgentVolumeResourceMonitor(t, api.Database, database.WorkspaceAgentVolumeResourceMonitor{
|
||||
AgentID: api.AgentID,
|
||||
Path: volumePath,
|
||||
State: database.WorkspaceAgentMonitorStateNOK,
|
||||
Threshold: 80,
|
||||
})
|
||||
|
||||
// When: A datapoint is missing, surrounded by two OK datapoints.
|
||||
_, err := api.PushResourcesMonitoringUsage(context.Background(), &agentproto.PushResourcesMonitoringUsageRequest{
|
||||
Datapoints: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint{
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now()),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{
|
||||
Volume: volumePath,
|
||||
Used: 1,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now().Add(10 * time.Second)),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{},
|
||||
},
|
||||
{
|
||||
CollectedAt: timestamppb.New(clock.Now().Add(20 * time.Second)),
|
||||
Volumes: []*agentproto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage{
|
||||
{
|
||||
Volume: volumePath,
|
||||
Used: 1,
|
||||
Total: 10,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect the monitor to still be in a NOK state.
|
||||
monitors, err := api.Database.FetchVolumesResourceMonitorsByAgentID(context.Background(), api.AgentID)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, monitors, 1)
|
||||
require.Equal(t, database.WorkspaceAgentMonitorStateNOK, monitors[0].State)
|
||||
})
|
||||
}
|
||||
|
||||
func requireVolumeData(t *testing.T, notif *notificationstest.FakeNotification) []map[string]any {
|
||||
t.Helper()
|
||||
|
||||
volumesData := notif.Data["volumes"]
|
||||
require.IsType(t, []map[string]any{}, volumesData)
|
||||
|
||||
return volumesData.([]map[string]any)
|
||||
}
|
129
coderd/agentapi/resourcesmonitor/resources_monitor.go
Normal file
129
coderd/agentapi/resourcesmonitor/resources_monitor.go
Normal file
@ -0,0 +1,129 @@
|
||||
package resourcesmonitor
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/coder/coder/v2/agent/proto"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/util/slice"
|
||||
)
|
||||
|
||||
type State int
|
||||
|
||||
const (
|
||||
StateOK State = iota
|
||||
StateNOK
|
||||
StateUnknown
|
||||
)
|
||||
|
||||
type AlertConfig struct {
|
||||
// What percentage of datapoints in a row are
|
||||
// required to put the monitor in an alert state.
|
||||
ConsecutiveNOKsPercent int
|
||||
|
||||
// What percentage of datapoints in a window are
|
||||
// required to put the monitor in an alert state.
|
||||
MinimumNOKsPercent int
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
// How many datapoints should the agent send
|
||||
NumDatapoints int32
|
||||
|
||||
// How long between each datapoint should
|
||||
// collection occur.
|
||||
CollectionInterval time.Duration
|
||||
|
||||
Alert AlertConfig
|
||||
}
|
||||
|
||||
func CalculateMemoryUsageStates(
|
||||
monitor database.WorkspaceAgentMemoryResourceMonitor,
|
||||
datapoints []*proto.PushResourcesMonitoringUsageRequest_Datapoint_MemoryUsage,
|
||||
) []State {
|
||||
states := make([]State, 0, len(datapoints))
|
||||
|
||||
for _, datapoint := range datapoints {
|
||||
state := StateUnknown
|
||||
|
||||
if datapoint != nil {
|
||||
percent := int32(float64(datapoint.Used) / float64(datapoint.Total) * 100)
|
||||
|
||||
if percent < monitor.Threshold {
|
||||
state = StateOK
|
||||
} else {
|
||||
state = StateNOK
|
||||
}
|
||||
}
|
||||
|
||||
states = append(states, state)
|
||||
}
|
||||
|
||||
return states
|
||||
}
|
||||
|
||||
func CalculateVolumeUsageStates(
|
||||
monitor database.WorkspaceAgentVolumeResourceMonitor,
|
||||
datapoints []*proto.PushResourcesMonitoringUsageRequest_Datapoint_VolumeUsage,
|
||||
) []State {
|
||||
states := make([]State, 0, len(datapoints))
|
||||
|
||||
for _, datapoint := range datapoints {
|
||||
state := StateUnknown
|
||||
|
||||
if datapoint != nil {
|
||||
percent := int32(float64(datapoint.Used) / float64(datapoint.Total) * 100)
|
||||
|
||||
if percent < monitor.Threshold {
|
||||
state = StateOK
|
||||
} else {
|
||||
state = StateNOK
|
||||
}
|
||||
}
|
||||
|
||||
states = append(states, state)
|
||||
}
|
||||
|
||||
return states
|
||||
}
|
||||
|
||||
func NextState(c Config, oldState database.WorkspaceAgentMonitorState, states []State) database.WorkspaceAgentMonitorState {
|
||||
// If there are enough consecutive NOK states, we should be in an
|
||||
// alert state.
|
||||
consecutiveNOKs := slice.CountConsecutive(StateNOK, states...)
|
||||
if percent(consecutiveNOKs, len(states)) >= c.Alert.ConsecutiveNOKsPercent {
|
||||
return database.WorkspaceAgentMonitorStateNOK
|
||||
}
|
||||
|
||||
// We do not explicitly handle StateUnknown because it could have
|
||||
// been either StateOK or StateNOK if collection didn't fail. As
|
||||
// it could be either, our best bet is to ignore it.
|
||||
nokCount, okCount := 0, 0
|
||||
for _, state := range states {
|
||||
switch state {
|
||||
case StateOK:
|
||||
okCount++
|
||||
case StateNOK:
|
||||
nokCount++
|
||||
}
|
||||
}
|
||||
|
||||
// If there are enough NOK datapoints, we should be in an alert state.
|
||||
if percent(nokCount, len(states)) >= c.Alert.MinimumNOKsPercent {
|
||||
return database.WorkspaceAgentMonitorStateNOK
|
||||
}
|
||||
|
||||
// If all datapoints are OK, we should be in an OK state
|
||||
if okCount == len(states) {
|
||||
return database.WorkspaceAgentMonitorStateOK
|
||||
}
|
||||
|
||||
// Otherwise we stay in the same state as last.
|
||||
return oldState
|
||||
}
|
||||
|
||||
func percent[T int](numerator, denominator T) int {
|
||||
percent := float64(numerator*100) / float64(denominator)
|
||||
return int(math.Round(percent))
|
||||
}
|
44
coderd/apidoc/docs.go
generated
44
coderd/apidoc/docs.go
generated
@ -1787,6 +1787,25 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/notifications/test": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Notifications"
|
||||
],
|
||||
"summary": "Send a test notification",
|
||||
"operationId": "send-a-test-notification",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/oauth2-provider/apps": {
|
||||
"get": {
|
||||
"security": [
|
||||
@ -10151,7 +10170,11 @@ const docTemplate = `{
|
||||
"login",
|
||||
"logout",
|
||||
"register",
|
||||
"request_password_reset"
|
||||
"request_password_reset",
|
||||
"connect",
|
||||
"disconnect",
|
||||
"open",
|
||||
"close"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"AuditActionCreate",
|
||||
@ -10162,7 +10185,11 @@ const docTemplate = `{
|
||||
"AuditActionLogin",
|
||||
"AuditActionLogout",
|
||||
"AuditActionRegister",
|
||||
"AuditActionRequestPasswordReset"
|
||||
"AuditActionRequestPasswordReset",
|
||||
"AuditActionConnect",
|
||||
"AuditActionDisconnect",
|
||||
"AuditActionOpen",
|
||||
"AuditActionClose"
|
||||
]
|
||||
},
|
||||
"codersdk.AuditDiff": {
|
||||
@ -10823,6 +10850,10 @@ const docTemplate = `{
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"request_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"resource_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
@ -11798,6 +11829,7 @@ const docTemplate = `{
|
||||
"format": "date-time"
|
||||
},
|
||||
"public_key": {
|
||||
"description": "PublicKey is the SSH public key in OpenSSH format.\nExample: \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID3OmYJvT7q1cF1azbybYy0OZ9yrXfA+M6Lr4vzX5zlp\\n\"\nNote: The key includes a trailing newline (\\n).",
|
||||
"type": "string"
|
||||
},
|
||||
"updated_at": {
|
||||
@ -13927,7 +13959,9 @@ const docTemplate = `{
|
||||
"notification_template",
|
||||
"idp_sync_settings_organization",
|
||||
"idp_sync_settings_group",
|
||||
"idp_sync_settings_role"
|
||||
"idp_sync_settings_role",
|
||||
"workspace_agent",
|
||||
"workspace_app"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"ResourceTypeTemplate",
|
||||
@ -13951,7 +13985,9 @@ const docTemplate = `{
|
||||
"ResourceTypeNotificationTemplate",
|
||||
"ResourceTypeIdpSyncSettingsOrganization",
|
||||
"ResourceTypeIdpSyncSettingsGroup",
|
||||
"ResourceTypeIdpSyncSettingsRole"
|
||||
"ResourceTypeIdpSyncSettingsRole",
|
||||
"ResourceTypeWorkspaceAgent",
|
||||
"ResourceTypeWorkspaceApp"
|
||||
]
|
||||
},
|
||||
"codersdk.Response": {
|
||||
|
42
coderd/apidoc/swagger.json
generated
42
coderd/apidoc/swagger.json
generated
@ -1554,6 +1554,23 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/notifications/test": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"CoderSessionToken": []
|
||||
}
|
||||
],
|
||||
"tags": ["Notifications"],
|
||||
"summary": "Send a test notification",
|
||||
"operationId": "send-a-test-notification",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/oauth2-provider/apps": {
|
||||
"get": {
|
||||
"security": [
|
||||
@ -9015,7 +9032,11 @@
|
||||
"login",
|
||||
"logout",
|
||||
"register",
|
||||
"request_password_reset"
|
||||
"request_password_reset",
|
||||
"connect",
|
||||
"disconnect",
|
||||
"open",
|
||||
"close"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"AuditActionCreate",
|
||||
@ -9026,7 +9047,11 @@
|
||||
"AuditActionLogin",
|
||||
"AuditActionLogout",
|
||||
"AuditActionRegister",
|
||||
"AuditActionRequestPasswordReset"
|
||||
"AuditActionRequestPasswordReset",
|
||||
"AuditActionConnect",
|
||||
"AuditActionDisconnect",
|
||||
"AuditActionOpen",
|
||||
"AuditActionClose"
|
||||
]
|
||||
},
|
||||
"codersdk.AuditDiff": {
|
||||
@ -9636,6 +9661,10 @@
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"request_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
},
|
||||
"resource_id": {
|
||||
"type": "string",
|
||||
"format": "uuid"
|
||||
@ -10589,6 +10618,7 @@
|
||||
"format": "date-time"
|
||||
},
|
||||
"public_key": {
|
||||
"description": "PublicKey is the SSH public key in OpenSSH format.\nExample: \"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID3OmYJvT7q1cF1azbybYy0OZ9yrXfA+M6Lr4vzX5zlp\\n\"\nNote: The key includes a trailing newline (\\n).",
|
||||
"type": "string"
|
||||
},
|
||||
"updated_at": {
|
||||
@ -12612,7 +12642,9 @@
|
||||
"notification_template",
|
||||
"idp_sync_settings_organization",
|
||||
"idp_sync_settings_group",
|
||||
"idp_sync_settings_role"
|
||||
"idp_sync_settings_role",
|
||||
"workspace_agent",
|
||||
"workspace_app"
|
||||
],
|
||||
"x-enum-varnames": [
|
||||
"ResourceTypeTemplate",
|
||||
@ -12636,7 +12668,9 @@
|
||||
"ResourceTypeNotificationTemplate",
|
||||
"ResourceTypeIdpSyncSettingsOrganization",
|
||||
"ResourceTypeIdpSyncSettingsGroup",
|
||||
"ResourceTypeIdpSyncSettingsRole"
|
||||
"ResourceTypeIdpSyncSettingsRole",
|
||||
"ResourceTypeWorkspaceAgent",
|
||||
"ResourceTypeWorkspaceApp"
|
||||
]
|
||||
},
|
||||
"codersdk.Response": {
|
||||
|
@ -159,7 +159,7 @@ func (api *API) generateFakeAuditLog(rw http.ResponseWriter, r *http.Request) {
|
||||
Diff: diff,
|
||||
StatusCode: http.StatusOK,
|
||||
AdditionalFields: params.AdditionalFields,
|
||||
RequestID: uuid.Nil, // no request ID to attach this to
|
||||
RequestID: params.RequestID,
|
||||
ResourceIcon: "",
|
||||
OrganizationID: params.OrganizationID,
|
||||
})
|
||||
|
@ -30,7 +30,9 @@ type Auditable interface {
|
||||
database.NotificationTemplate |
|
||||
idpsync.OrganizationSyncSettings |
|
||||
idpsync.GroupSyncSettings |
|
||||
idpsync.RoleSyncSettings
|
||||
idpsync.RoleSyncSettings |
|
||||
database.WorkspaceAgent |
|
||||
database.WorkspaceApp
|
||||
}
|
||||
|
||||
// Map is a map of changed fields in an audited resource. It maps field names to
|
||||
|
@ -128,6 +128,10 @@ func ResourceTarget[T Auditable](tgt T) string {
|
||||
return "Organization Group Sync"
|
||||
case idpsync.RoleSyncSettings:
|
||||
return "Organization Role Sync"
|
||||
case database.WorkspaceAgent:
|
||||
return typed.Name
|
||||
case database.WorkspaceApp:
|
||||
return typed.Slug
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown resource %T for ResourceTarget", tgt))
|
||||
}
|
||||
@ -187,6 +191,10 @@ func ResourceID[T Auditable](tgt T) uuid.UUID {
|
||||
return noID // Org field on audit log has org id
|
||||
case idpsync.RoleSyncSettings:
|
||||
return noID // Org field on audit log has org id
|
||||
case database.WorkspaceAgent:
|
||||
return typed.ID
|
||||
case database.WorkspaceApp:
|
||||
return typed.ID
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown resource %T for ResourceID", tgt))
|
||||
}
|
||||
@ -238,6 +246,10 @@ func ResourceType[T Auditable](tgt T) database.ResourceType {
|
||||
return database.ResourceTypeIdpSyncSettingsRole
|
||||
case idpsync.GroupSyncSettings:
|
||||
return database.ResourceTypeIdpSyncSettingsGroup
|
||||
case database.WorkspaceAgent:
|
||||
return database.ResourceTypeWorkspaceAgent
|
||||
case database.WorkspaceApp:
|
||||
return database.ResourceTypeWorkspaceApp
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown resource %T for ResourceType", typed))
|
||||
}
|
||||
@ -291,6 +303,10 @@ func ResourceRequiresOrgID[T Auditable]() bool {
|
||||
return true
|
||||
case idpsync.RoleSyncSettings:
|
||||
return true
|
||||
case database.WorkspaceAgent:
|
||||
return true
|
||||
case database.WorkspaceApp:
|
||||
return true
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown resource %T for ResourceRequiresOrgID", tgt))
|
||||
}
|
||||
|
@ -17,6 +17,8 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/provisioner/echo"
|
||||
"github.com/coder/coder/v2/provisionersdk/proto"
|
||||
)
|
||||
|
||||
func TestAuditLogs(t *testing.T) {
|
||||
@ -30,7 +32,8 @@ func TestAuditLogs(t *testing.T) {
|
||||
user := coderdtest.CreateFirstUser(t, client)
|
||||
|
||||
err := client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
ResourceID: user.UserID,
|
||||
ResourceID: user.UserID,
|
||||
OrganizationID: user.OrganizationID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -54,7 +57,8 @@ func TestAuditLogs(t *testing.T) {
|
||||
client2, user2 := coderdtest.CreateAnotherUser(t, client, user.OrganizationID, rbac.RoleOwner())
|
||||
|
||||
err := client2.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
ResourceID: user2.ID,
|
||||
ResourceID: user2.ID,
|
||||
OrganizationID: user.OrganizationID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -123,6 +127,7 @@ func TestAuditLogs(t *testing.T) {
|
||||
ResourceType: codersdk.ResourceTypeWorkspaceBuild,
|
||||
ResourceID: workspace.LatestBuild.ID,
|
||||
AdditionalFields: wriBytes,
|
||||
OrganizationID: user.OrganizationID,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -158,7 +163,8 @@ func TestAuditLogs(t *testing.T) {
|
||||
|
||||
// Add an extra audit log in another organization
|
||||
err = client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
ResourceID: owner.UserID,
|
||||
ResourceID: owner.UserID,
|
||||
OrganizationID: uuid.New(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -229,53 +235,102 @@ func TestAuditLogsFilter(t *testing.T) {
|
||||
ctx = context.Background()
|
||||
client = coderdtest.New(t, &coderdtest.Options{IncludeProvisionerDaemon: true})
|
||||
user = coderdtest.CreateFirstUser(t, client)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, nil)
|
||||
version = coderdtest.CreateTemplateVersion(t, client, user.OrganizationID, completeWithAgentAndApp())
|
||||
template = coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
|
||||
)
|
||||
|
||||
coderdtest.AwaitTemplateVersionJobCompleted(t, client, version.ID)
|
||||
workspace := coderdtest.CreateWorkspace(t, client, template.ID)
|
||||
workspace.LatestBuild = coderdtest.AwaitWorkspaceBuildJobCompleted(t, client, workspace.LatestBuild.ID)
|
||||
|
||||
// Create two logs with "Create"
|
||||
err := client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
Action: codersdk.AuditActionCreate,
|
||||
ResourceType: codersdk.ResourceTypeTemplate,
|
||||
ResourceID: template.ID,
|
||||
Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45
|
||||
OrganizationID: user.OrganizationID,
|
||||
Action: codersdk.AuditActionCreate,
|
||||
ResourceType: codersdk.ResourceTypeTemplate,
|
||||
ResourceID: template.ID,
|
||||
Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
Action: codersdk.AuditActionCreate,
|
||||
ResourceType: codersdk.ResourceTypeUser,
|
||||
ResourceID: user.UserID,
|
||||
Time: time.Date(2022, 8, 16, 14, 30, 45, 100, time.UTC), // 2022-8-16 14:30:45
|
||||
OrganizationID: user.OrganizationID,
|
||||
Action: codersdk.AuditActionCreate,
|
||||
ResourceType: codersdk.ResourceTypeUser,
|
||||
ResourceID: user.UserID,
|
||||
Time: time.Date(2022, 8, 16, 14, 30, 45, 100, time.UTC), // 2022-8-16 14:30:45
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create one log with "Delete"
|
||||
err = client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
Action: codersdk.AuditActionDelete,
|
||||
ResourceType: codersdk.ResourceTypeUser,
|
||||
ResourceID: user.UserID,
|
||||
Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45
|
||||
OrganizationID: user.OrganizationID,
|
||||
Action: codersdk.AuditActionDelete,
|
||||
ResourceType: codersdk.ResourceTypeUser,
|
||||
ResourceID: user.UserID,
|
||||
Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create one log with "Start"
|
||||
err = client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
Action: codersdk.AuditActionStart,
|
||||
ResourceType: codersdk.ResourceTypeWorkspaceBuild,
|
||||
ResourceID: workspace.LatestBuild.ID,
|
||||
Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45
|
||||
OrganizationID: user.OrganizationID,
|
||||
Action: codersdk.AuditActionStart,
|
||||
ResourceType: codersdk.ResourceTypeWorkspaceBuild,
|
||||
ResourceID: workspace.LatestBuild.ID,
|
||||
Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create one log with "Stop"
|
||||
err = client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
Action: codersdk.AuditActionStop,
|
||||
ResourceType: codersdk.ResourceTypeWorkspaceBuild,
|
||||
ResourceID: workspace.LatestBuild.ID,
|
||||
Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45
|
||||
OrganizationID: user.OrganizationID,
|
||||
Action: codersdk.AuditActionStop,
|
||||
ResourceType: codersdk.ResourceTypeWorkspaceBuild,
|
||||
ResourceID: workspace.LatestBuild.ID,
|
||||
Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create one log with "Connect" and "Disconect".
|
||||
connectRequestID := uuid.New()
|
||||
err = client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
OrganizationID: user.OrganizationID,
|
||||
Action: codersdk.AuditActionConnect,
|
||||
RequestID: connectRequestID,
|
||||
ResourceType: codersdk.ResourceTypeWorkspaceAgent,
|
||||
ResourceID: workspace.LatestBuild.Resources[0].Agents[0].ID,
|
||||
Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
OrganizationID: user.OrganizationID,
|
||||
Action: codersdk.AuditActionDisconnect,
|
||||
RequestID: connectRequestID,
|
||||
ResourceType: codersdk.ResourceTypeWorkspaceAgent,
|
||||
ResourceID: workspace.LatestBuild.Resources[0].Agents[0].ID,
|
||||
Time: time.Date(2022, 8, 15, 14, 35, 0o0, 100, time.UTC), // 2022-8-15 14:35:00
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create one log with "Open" and "Close".
|
||||
openRequestID := uuid.New()
|
||||
err = client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
OrganizationID: user.OrganizationID,
|
||||
Action: codersdk.AuditActionOpen,
|
||||
RequestID: openRequestID,
|
||||
ResourceType: codersdk.ResourceTypeWorkspaceApp,
|
||||
ResourceID: workspace.LatestBuild.Resources[0].Agents[0].Apps[0].ID,
|
||||
Time: time.Date(2022, 8, 15, 14, 30, 45, 100, time.UTC), // 2022-8-15 14:30:45
|
||||
})
|
||||
require.NoError(t, err)
|
||||
err = client.CreateTestAuditLog(ctx, codersdk.CreateTestAuditLogRequest{
|
||||
OrganizationID: user.OrganizationID,
|
||||
Action: codersdk.AuditActionClose,
|
||||
RequestID: openRequestID,
|
||||
ResourceType: codersdk.ResourceTypeWorkspaceApp,
|
||||
ResourceID: workspace.LatestBuild.Resources[0].Agents[0].Apps[0].ID,
|
||||
Time: time.Date(2022, 8, 15, 14, 35, 0o0, 100, time.UTC), // 2022-8-15 14:35:00
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -309,12 +364,12 @@ func TestAuditLogsFilter(t *testing.T) {
|
||||
{
|
||||
Name: "FilterByEmail",
|
||||
SearchQuery: "email:" + coderdtest.FirstUserParams.Email,
|
||||
ExpectedResult: 5,
|
||||
ExpectedResult: 9,
|
||||
},
|
||||
{
|
||||
Name: "FilterByUsername",
|
||||
SearchQuery: "username:" + coderdtest.FirstUserParams.Username,
|
||||
ExpectedResult: 5,
|
||||
ExpectedResult: 9,
|
||||
},
|
||||
{
|
||||
Name: "FilterByResourceID",
|
||||
@ -366,6 +421,36 @@ func TestAuditLogsFilter(t *testing.T) {
|
||||
SearchQuery: "resource_type:workspace_build action:start build_reason:initiator",
|
||||
ExpectedResult: 1,
|
||||
},
|
||||
{
|
||||
Name: "FilterOnWorkspaceAgentConnect",
|
||||
SearchQuery: "resource_type:workspace_agent action:connect",
|
||||
ExpectedResult: 1,
|
||||
},
|
||||
{
|
||||
Name: "FilterOnWorkspaceAgentDisconnect",
|
||||
SearchQuery: "resource_type:workspace_agent action:disconnect",
|
||||
ExpectedResult: 1,
|
||||
},
|
||||
{
|
||||
Name: "FilterOnWorkspaceAgentConnectionRequestID",
|
||||
SearchQuery: "resource_type:workspace_agent request_id:" + connectRequestID.String(),
|
||||
ExpectedResult: 2,
|
||||
},
|
||||
{
|
||||
Name: "FilterOnWorkspaceAppOpen",
|
||||
SearchQuery: "resource_type:workspace_app action:open",
|
||||
ExpectedResult: 1,
|
||||
},
|
||||
{
|
||||
Name: "FilterOnWorkspaceAppClose",
|
||||
SearchQuery: "resource_type:workspace_app action:close",
|
||||
ExpectedResult: 1,
|
||||
},
|
||||
{
|
||||
Name: "FilterOnWorkspaceAppOpenRequestID",
|
||||
SearchQuery: "resource_type:workspace_app request_id:" + openRequestID.String(),
|
||||
ExpectedResult: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
@ -387,3 +472,63 @@ func TestAuditLogsFilter(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func completeWithAgentAndApp() *echo.Responses {
|
||||
return &echo.Responses{
|
||||
Parse: echo.ParseComplete,
|
||||
ProvisionPlan: []*proto.Response{
|
||||
{
|
||||
Type: &proto.Response_Plan{
|
||||
Plan: &proto.PlanComplete{
|
||||
Resources: []*proto.Resource{
|
||||
{
|
||||
Type: "compute",
|
||||
Name: "main",
|
||||
Agents: []*proto.Agent{
|
||||
{
|
||||
Name: "smith",
|
||||
OperatingSystem: "linux",
|
||||
Architecture: "i386",
|
||||
Apps: []*proto.App{
|
||||
{
|
||||
Slug: "app",
|
||||
DisplayName: "App",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ProvisionApply: []*proto.Response{
|
||||
{
|
||||
Type: &proto.Response_Apply{
|
||||
Apply: &proto.ApplyComplete{
|
||||
Resources: []*proto.Resource{
|
||||
{
|
||||
Type: "compute",
|
||||
Name: "main",
|
||||
Agents: []*proto.Agent{
|
||||
{
|
||||
Name: "smith",
|
||||
OperatingSystem: "linux",
|
||||
Architecture: "i386",
|
||||
Apps: []*proto.App{
|
||||
{
|
||||
Slug: "app",
|
||||
DisplayName: "App",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -1374,6 +1374,7 @@ func New(options *Options) *API {
|
||||
r.Get("/system", api.systemNotificationTemplates)
|
||||
})
|
||||
r.Get("/dispatch-methods", api.notificationDispatchMethods)
|
||||
r.Post("/test", api.postTestNotification)
|
||||
})
|
||||
r.Route("/tailnet", func(r chi.Router) {
|
||||
r.Use(apiKeyMiddleware)
|
||||
|
@ -289,6 +289,24 @@ var (
|
||||
Scope: rbac.ScopeAll,
|
||||
}.WithCachedASTValue()
|
||||
|
||||
subjectResourceMonitor = rbac.Subject{
|
||||
FriendlyName: "Resource Monitor",
|
||||
ID: uuid.Nil.String(),
|
||||
Roles: rbac.Roles([]rbac.Role{
|
||||
{
|
||||
Identifier: rbac.RoleIdentifier{Name: "resourcemonitor"},
|
||||
DisplayName: "Resource Monitor",
|
||||
Site: rbac.Permissions(map[string][]policy.Action{
|
||||
// The workspace monitor needs to be able to update monitors
|
||||
rbac.ResourceWorkspaceAgentResourceMonitor.Type: {policy.ActionUpdate},
|
||||
}),
|
||||
Org: map[string][]rbac.Permission{},
|
||||
User: []rbac.Permission{},
|
||||
},
|
||||
}),
|
||||
Scope: rbac.ScopeAll,
|
||||
}.WithCachedASTValue()
|
||||
|
||||
subjectSystemRestricted = rbac.Subject{
|
||||
FriendlyName: "System",
|
||||
ID: uuid.Nil.String(),
|
||||
@ -376,6 +394,12 @@ func AsNotifier(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, authContextKey{}, subjectNotifier)
|
||||
}
|
||||
|
||||
// AsResourceMonitor returns a context with an actor that has permissions required for
|
||||
// updating resource monitors.
|
||||
func AsResourceMonitor(ctx context.Context) context.Context {
|
||||
return context.WithValue(ctx, authContextKey{}, subjectResourceMonitor)
|
||||
}
|
||||
|
||||
// AsSystemRestricted returns a context with an actor that has permissions
|
||||
// required for various system operations (login, logout, metrics cache).
|
||||
func AsSystemRestricted(ctx context.Context) context.Context {
|
||||
@ -3701,6 +3725,14 @@ func (q *querier) UpdateMemberRoles(ctx context.Context, arg database.UpdateMemb
|
||||
return q.db.UpdateMemberRoles(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateMemoryResourceMonitor(ctx context.Context, arg database.UpdateMemoryResourceMonitorParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceWorkspaceAgentResourceMonitor); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return q.db.UpdateMemoryResourceMonitor(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateNotificationTemplateMethodByID(ctx context.Context, arg database.UpdateNotificationTemplateMethodByIDParams) (database.NotificationTemplate, error) {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceNotificationTemplate); err != nil {
|
||||
return database.NotificationTemplate{}, err
|
||||
@ -4097,6 +4129,14 @@ func (q *querier) UpdateUserStatus(ctx context.Context, arg database.UpdateUserS
|
||||
return updateWithReturn(q.log, q.auth, fetch, q.db.UpdateUserStatus)(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateVolumeResourceMonitor(ctx context.Context, arg database.UpdateVolumeResourceMonitorParams) error {
|
||||
if err := q.authorizeContext(ctx, policy.ActionUpdate, rbac.ResourceWorkspaceAgentResourceMonitor); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return q.db.UpdateVolumeResourceMonitor(ctx, arg)
|
||||
}
|
||||
|
||||
func (q *querier) UpdateWorkspace(ctx context.Context, arg database.UpdateWorkspaceParams) (database.WorkspaceTable, error) {
|
||||
fetch := func(ctx context.Context, arg database.UpdateWorkspaceParams) (database.WorkspaceTable, error) {
|
||||
w, err := q.db.GetWorkspaceByID(ctx, arg.ID)
|
||||
|
@ -3354,11 +3354,11 @@ func (s *MethodTestSuite) TestExtraMethods() {
|
||||
dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{ID: wbID, WorkspaceID: w.ID, TemplateVersionID: tv.ID, JobID: j2.ID})
|
||||
|
||||
ds, err := db.GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisioner(context.Background(), database.GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerParams{
|
||||
OrganizationID: uuid.NullUUID{Valid: true, UUID: org.ID},
|
||||
OrganizationID: org.ID,
|
||||
})
|
||||
s.NoError(err, "get provisioner jobs by org")
|
||||
check.Args(database.GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerParams{
|
||||
OrganizationID: uuid.NullUUID{Valid: true, UUID: org.ID},
|
||||
OrganizationID: org.ID,
|
||||
}).Asserts(j1, policy.ActionRead, j2, policy.ActionRead).Returns(ds)
|
||||
}))
|
||||
}
|
||||
@ -4725,43 +4725,78 @@ func (s *MethodTestSuite) TestOAuth2ProviderAppTokens() {
|
||||
}
|
||||
|
||||
func (s *MethodTestSuite) TestResourcesMonitor() {
|
||||
s.Run("InsertMemoryResourceMonitor", s.Subtest(func(db database.Store, check *expects) {
|
||||
dbtestutil.DisableForeignKeysAndTriggers(s.T(), db)
|
||||
check.Args(database.InsertMemoryResourceMonitorParams{}).Asserts(rbac.ResourceWorkspaceAgentResourceMonitor, policy.ActionCreate)
|
||||
}))
|
||||
createAgent := func(t *testing.T, db database.Store) (database.WorkspaceAgent, database.WorkspaceTable) {
|
||||
t.Helper()
|
||||
|
||||
s.Run("InsertVolumeResourceMonitor", s.Subtest(func(db database.Store, check *expects) {
|
||||
dbtestutil.DisableForeignKeysAndTriggers(s.T(), db)
|
||||
check.Args(database.InsertVolumeResourceMonitorParams{}).Asserts(rbac.ResourceWorkspaceAgentResourceMonitor, policy.ActionCreate)
|
||||
}))
|
||||
|
||||
s.Run("FetchMemoryResourceMonitorsByAgentID", s.Subtest(func(db database.Store, check *expects) {
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
o := dbgen.Organization(s.T(), db, database.Organization{})
|
||||
tpl := dbgen.Template(s.T(), db, database.Template{
|
||||
u := dbgen.User(t, db, database.User{})
|
||||
o := dbgen.Organization(t, db, database.Organization{})
|
||||
tpl := dbgen.Template(t, db, database.Template{
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
tv := dbgen.TemplateVersion(s.T(), db, database.TemplateVersion{
|
||||
tv := dbgen.TemplateVersion(t, db, database.TemplateVersion{
|
||||
TemplateID: uuid.NullUUID{UUID: tpl.ID, Valid: true},
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
w := dbgen.Workspace(s.T(), db, database.WorkspaceTable{
|
||||
w := dbgen.Workspace(t, db, database.WorkspaceTable{
|
||||
TemplateID: tpl.ID,
|
||||
OrganizationID: o.ID,
|
||||
OwnerID: u.ID,
|
||||
})
|
||||
j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
|
||||
j := dbgen.ProvisionerJob(t, db, nil, database.ProvisionerJob{
|
||||
Type: database.ProvisionerJobTypeWorkspaceBuild,
|
||||
})
|
||||
b := dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{
|
||||
b := dbgen.WorkspaceBuild(t, db, database.WorkspaceBuild{
|
||||
JobID: j.ID,
|
||||
WorkspaceID: w.ID,
|
||||
TemplateVersionID: tv.ID,
|
||||
})
|
||||
res := dbgen.WorkspaceResource(s.T(), db, database.WorkspaceResource{JobID: b.JobID})
|
||||
agt := dbgen.WorkspaceAgent(s.T(), db, database.WorkspaceAgent{ResourceID: res.ID})
|
||||
res := dbgen.WorkspaceResource(t, db, database.WorkspaceResource{JobID: b.JobID})
|
||||
agt := dbgen.WorkspaceAgent(t, db, database.WorkspaceAgent{ResourceID: res.ID})
|
||||
|
||||
return agt, w
|
||||
}
|
||||
|
||||
s.Run("InsertMemoryResourceMonitor", s.Subtest(func(db database.Store, check *expects) {
|
||||
agt, _ := createAgent(s.T(), db)
|
||||
|
||||
check.Args(database.InsertMemoryResourceMonitorParams{
|
||||
AgentID: agt.ID,
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
}).Asserts(rbac.ResourceWorkspaceAgentResourceMonitor, policy.ActionCreate)
|
||||
}))
|
||||
|
||||
s.Run("InsertVolumeResourceMonitor", s.Subtest(func(db database.Store, check *expects) {
|
||||
agt, _ := createAgent(s.T(), db)
|
||||
|
||||
check.Args(database.InsertVolumeResourceMonitorParams{
|
||||
AgentID: agt.ID,
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
}).Asserts(rbac.ResourceWorkspaceAgentResourceMonitor, policy.ActionCreate)
|
||||
}))
|
||||
|
||||
s.Run("UpdateMemoryResourceMonitor", s.Subtest(func(db database.Store, check *expects) {
|
||||
agt, _ := createAgent(s.T(), db)
|
||||
|
||||
check.Args(database.UpdateMemoryResourceMonitorParams{
|
||||
AgentID: agt.ID,
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
}).Asserts(rbac.ResourceWorkspaceAgentResourceMonitor, policy.ActionUpdate)
|
||||
}))
|
||||
|
||||
s.Run("UpdateVolumeResourceMonitor", s.Subtest(func(db database.Store, check *expects) {
|
||||
agt, _ := createAgent(s.T(), db)
|
||||
|
||||
check.Args(database.UpdateVolumeResourceMonitorParams{
|
||||
AgentID: agt.ID,
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
}).Asserts(rbac.ResourceWorkspaceAgentResourceMonitor, policy.ActionUpdate)
|
||||
}))
|
||||
|
||||
s.Run("FetchMemoryResourceMonitorsByAgentID", s.Subtest(func(db database.Store, check *expects) {
|
||||
agt, w := createAgent(s.T(), db)
|
||||
|
||||
dbgen.WorkspaceAgentMemoryResourceMonitor(s.T(), db, database.WorkspaceAgentMemoryResourceMonitor{
|
||||
AgentID: agt.ID,
|
||||
Enabled: true,
|
||||
@ -4776,32 +4811,8 @@ func (s *MethodTestSuite) TestResourcesMonitor() {
|
||||
}))
|
||||
|
||||
s.Run("FetchVolumesResourceMonitorsByAgentID", s.Subtest(func(db database.Store, check *expects) {
|
||||
u := dbgen.User(s.T(), db, database.User{})
|
||||
o := dbgen.Organization(s.T(), db, database.Organization{})
|
||||
tpl := dbgen.Template(s.T(), db, database.Template{
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
tv := dbgen.TemplateVersion(s.T(), db, database.TemplateVersion{
|
||||
TemplateID: uuid.NullUUID{UUID: tpl.ID, Valid: true},
|
||||
OrganizationID: o.ID,
|
||||
CreatedBy: u.ID,
|
||||
})
|
||||
w := dbgen.Workspace(s.T(), db, database.WorkspaceTable{
|
||||
TemplateID: tpl.ID,
|
||||
OrganizationID: o.ID,
|
||||
OwnerID: u.ID,
|
||||
})
|
||||
j := dbgen.ProvisionerJob(s.T(), db, nil, database.ProvisionerJob{
|
||||
Type: database.ProvisionerJobTypeWorkspaceBuild,
|
||||
})
|
||||
b := dbgen.WorkspaceBuild(s.T(), db, database.WorkspaceBuild{
|
||||
JobID: j.ID,
|
||||
WorkspaceID: w.ID,
|
||||
TemplateVersionID: tv.ID,
|
||||
})
|
||||
res := dbgen.WorkspaceResource(s.T(), db, database.WorkspaceResource{JobID: b.JobID})
|
||||
agt := dbgen.WorkspaceAgent(s.T(), db, database.WorkspaceAgent{ResourceID: res.ID})
|
||||
agt, w := createAgent(s.T(), db)
|
||||
|
||||
dbgen.WorkspaceAgentVolumeResourceMonitor(s.T(), db, database.WorkspaceAgentVolumeResourceMonitor{
|
||||
AgentID: agt.ID,
|
||||
Path: "/var/lib",
|
||||
|
@ -1038,10 +1038,13 @@ func OAuth2ProviderAppToken(t testing.TB, db database.Store, seed database.OAuth
|
||||
|
||||
func WorkspaceAgentMemoryResourceMonitor(t testing.TB, db database.Store, seed database.WorkspaceAgentMemoryResourceMonitor) database.WorkspaceAgentMemoryResourceMonitor {
|
||||
monitor, err := db.InsertMemoryResourceMonitor(genCtx, database.InsertMemoryResourceMonitorParams{
|
||||
AgentID: takeFirst(seed.AgentID, uuid.New()),
|
||||
Enabled: takeFirst(seed.Enabled, true),
|
||||
Threshold: takeFirst(seed.Threshold, 100),
|
||||
CreatedAt: takeFirst(seed.CreatedAt, dbtime.Now()),
|
||||
AgentID: takeFirst(seed.AgentID, uuid.New()),
|
||||
Enabled: takeFirst(seed.Enabled, true),
|
||||
State: takeFirst(seed.State, database.WorkspaceAgentMonitorStateOK),
|
||||
Threshold: takeFirst(seed.Threshold, 100),
|
||||
CreatedAt: takeFirst(seed.CreatedAt, dbtime.Now()),
|
||||
UpdatedAt: takeFirst(seed.UpdatedAt, dbtime.Now()),
|
||||
DebouncedUntil: takeFirst(seed.DebouncedUntil, time.Time{}),
|
||||
})
|
||||
require.NoError(t, err, "insert workspace agent memory resource monitor")
|
||||
return monitor
|
||||
@ -1049,11 +1052,14 @@ func WorkspaceAgentMemoryResourceMonitor(t testing.TB, db database.Store, seed d
|
||||
|
||||
func WorkspaceAgentVolumeResourceMonitor(t testing.TB, db database.Store, seed database.WorkspaceAgentVolumeResourceMonitor) database.WorkspaceAgentVolumeResourceMonitor {
|
||||
monitor, err := db.InsertVolumeResourceMonitor(genCtx, database.InsertVolumeResourceMonitorParams{
|
||||
AgentID: takeFirst(seed.AgentID, uuid.New()),
|
||||
Path: takeFirst(seed.Path, "/"),
|
||||
Enabled: takeFirst(seed.Enabled, true),
|
||||
Threshold: takeFirst(seed.Threshold, 100),
|
||||
CreatedAt: takeFirst(seed.CreatedAt, dbtime.Now()),
|
||||
AgentID: takeFirst(seed.AgentID, uuid.New()),
|
||||
Path: takeFirst(seed.Path, "/"),
|
||||
Enabled: takeFirst(seed.Enabled, true),
|
||||
State: takeFirst(seed.State, database.WorkspaceAgentMonitorStateOK),
|
||||
Threshold: takeFirst(seed.Threshold, 100),
|
||||
CreatedAt: takeFirst(seed.CreatedAt, dbtime.Now()),
|
||||
UpdatedAt: takeFirst(seed.UpdatedAt, dbtime.Now()),
|
||||
DebouncedUntil: takeFirst(seed.DebouncedUntil, time.Time{}),
|
||||
})
|
||||
require.NoError(t, err, "insert workspace agent volume resource monitor")
|
||||
return monitor
|
||||
|
@ -4225,7 +4225,7 @@ func (q *FakeQuerier) GetProvisionerJobsByOrganizationAndStatusWithQueuePosition
|
||||
for _, rowQP := range rowsWithQueuePosition {
|
||||
job := rowQP.ProvisionerJob
|
||||
|
||||
if arg.OrganizationID.Valid && job.OrganizationID != arg.OrganizationID.UUID {
|
||||
if job.OrganizationID != arg.OrganizationID {
|
||||
continue
|
||||
}
|
||||
if len(arg.Status) > 0 && !slices.Contains(arg.Status, job.JobStatus) {
|
||||
@ -7997,7 +7997,16 @@ func (q *FakeQuerier) InsertMemoryResourceMonitor(_ context.Context, arg databas
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
monitor := database.WorkspaceAgentMemoryResourceMonitor(arg)
|
||||
//nolint:unconvert // The structs field-order differs so this is needed.
|
||||
monitor := database.WorkspaceAgentMemoryResourceMonitor(database.WorkspaceAgentMemoryResourceMonitor{
|
||||
AgentID: arg.AgentID,
|
||||
Enabled: arg.Enabled,
|
||||
State: arg.State,
|
||||
Threshold: arg.Threshold,
|
||||
CreatedAt: arg.CreatedAt,
|
||||
UpdatedAt: arg.UpdatedAt,
|
||||
DebouncedUntil: arg.DebouncedUntil,
|
||||
})
|
||||
|
||||
q.workspaceAgentMemoryResourceMonitors = append(q.workspaceAgentMemoryResourceMonitors, monitor)
|
||||
return monitor, nil
|
||||
@ -8693,11 +8702,14 @@ func (q *FakeQuerier) InsertVolumeResourceMonitor(_ context.Context, arg databas
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
monitor := database.WorkspaceAgentVolumeResourceMonitor{
|
||||
AgentID: arg.AgentID,
|
||||
Path: arg.Path,
|
||||
Enabled: arg.Enabled,
|
||||
Threshold: arg.Threshold,
|
||||
CreatedAt: arg.CreatedAt,
|
||||
AgentID: arg.AgentID,
|
||||
Path: arg.Path,
|
||||
Enabled: arg.Enabled,
|
||||
State: arg.State,
|
||||
Threshold: arg.Threshold,
|
||||
CreatedAt: arg.CreatedAt,
|
||||
UpdatedAt: arg.UpdatedAt,
|
||||
DebouncedUntil: arg.DebouncedUntil,
|
||||
}
|
||||
|
||||
q.workspaceAgentVolumeResourceMonitors = append(q.workspaceAgentVolumeResourceMonitors, monitor)
|
||||
@ -9708,6 +9720,30 @@ func (q *FakeQuerier) UpdateMemberRoles(_ context.Context, arg database.UpdateMe
|
||||
return database.OrganizationMember{}, sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) UpdateMemoryResourceMonitor(_ context.Context, arg database.UpdateMemoryResourceMonitorParams) error {
|
||||
err := validateDatabaseType(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for i, monitor := range q.workspaceAgentMemoryResourceMonitors {
|
||||
if monitor.AgentID != arg.AgentID {
|
||||
continue
|
||||
}
|
||||
|
||||
monitor.State = arg.State
|
||||
monitor.UpdatedAt = arg.UpdatedAt
|
||||
monitor.DebouncedUntil = arg.DebouncedUntil
|
||||
q.workspaceAgentMemoryResourceMonitors[i] = monitor
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*FakeQuerier) UpdateNotificationTemplateMethodByID(_ context.Context, _ database.UpdateNotificationTemplateMethodByIDParams) (database.NotificationTemplate, error) {
|
||||
// Not implementing this function because it relies on state in the database which is created with migrations.
|
||||
// We could consider using code-generation to align the database state and dbmem, but it's not worth it right now.
|
||||
@ -10486,6 +10522,30 @@ func (q *FakeQuerier) UpdateUserStatus(_ context.Context, arg database.UpdateUse
|
||||
return database.User{}, sql.ErrNoRows
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) UpdateVolumeResourceMonitor(_ context.Context, arg database.UpdateVolumeResourceMonitorParams) error {
|
||||
err := validateDatabaseType(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
|
||||
for i, monitor := range q.workspaceAgentVolumeResourceMonitors {
|
||||
if monitor.AgentID != arg.AgentID || monitor.Path != arg.Path {
|
||||
continue
|
||||
}
|
||||
|
||||
monitor.State = arg.State
|
||||
monitor.UpdatedAt = arg.UpdatedAt
|
||||
monitor.DebouncedUntil = arg.DebouncedUntil
|
||||
q.workspaceAgentVolumeResourceMonitors[i] = monitor
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *FakeQuerier) UpdateWorkspace(_ context.Context, arg database.UpdateWorkspaceParams) (database.WorkspaceTable, error) {
|
||||
if err := validateDatabaseType(arg); err != nil {
|
||||
return database.WorkspaceTable{}, err
|
||||
@ -12450,10 +12510,13 @@ func (q *FakeQuerier) GetAuthorizedAuditLogsOffset(ctx context.Context, arg data
|
||||
arg.OffsetOpt--
|
||||
continue
|
||||
}
|
||||
if arg.RequestID != uuid.Nil && arg.RequestID != alog.RequestID {
|
||||
continue
|
||||
}
|
||||
if arg.OrganizationID != uuid.Nil && arg.OrganizationID != alog.OrganizationID {
|
||||
continue
|
||||
}
|
||||
if arg.Action != "" && !strings.Contains(string(alog.Action), arg.Action) {
|
||||
if arg.Action != "" && string(alog.Action) != arg.Action {
|
||||
continue
|
||||
}
|
||||
if arg.ResourceType != "" && !strings.Contains(string(alog.ResourceType), arg.ResourceType) {
|
||||
|
@ -2352,6 +2352,13 @@ func (m queryMetricsStore) UpdateMemberRoles(ctx context.Context, arg database.U
|
||||
return member, err
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateMemoryResourceMonitor(ctx context.Context, arg database.UpdateMemoryResourceMonitorParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.UpdateMemoryResourceMonitor(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpdateMemoryResourceMonitor").Observe(time.Since(start).Seconds())
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateNotificationTemplateMethodByID(ctx context.Context, arg database.UpdateNotificationTemplateMethodByIDParams) (database.NotificationTemplate, error) {
|
||||
start := time.Now()
|
||||
r0, r1 := m.s.UpdateNotificationTemplateMethodByID(ctx, arg)
|
||||
@ -2590,6 +2597,13 @@ func (m queryMetricsStore) UpdateUserStatus(ctx context.Context, arg database.Up
|
||||
return user, err
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateVolumeResourceMonitor(ctx context.Context, arg database.UpdateVolumeResourceMonitorParams) error {
|
||||
start := time.Now()
|
||||
r0 := m.s.UpdateVolumeResourceMonitor(ctx, arg)
|
||||
m.queryLatencies.WithLabelValues("UpdateVolumeResourceMonitor").Observe(time.Since(start).Seconds())
|
||||
return r0
|
||||
}
|
||||
|
||||
func (m queryMetricsStore) UpdateWorkspace(ctx context.Context, arg database.UpdateWorkspaceParams) (database.WorkspaceTable, error) {
|
||||
start := time.Now()
|
||||
workspace, err := m.s.UpdateWorkspace(ctx, arg)
|
||||
|
@ -5010,6 +5010,20 @@ func (mr *MockStoreMockRecorder) UpdateMemberRoles(ctx, arg any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMemberRoles", reflect.TypeOf((*MockStore)(nil).UpdateMemberRoles), ctx, arg)
|
||||
}
|
||||
|
||||
// UpdateMemoryResourceMonitor mocks base method.
|
||||
func (m *MockStore) UpdateMemoryResourceMonitor(ctx context.Context, arg database.UpdateMemoryResourceMonitorParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdateMemoryResourceMonitor", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UpdateMemoryResourceMonitor indicates an expected call of UpdateMemoryResourceMonitor.
|
||||
func (mr *MockStoreMockRecorder) UpdateMemoryResourceMonitor(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMemoryResourceMonitor", reflect.TypeOf((*MockStore)(nil).UpdateMemoryResourceMonitor), ctx, arg)
|
||||
}
|
||||
|
||||
// UpdateNotificationTemplateMethodByID mocks base method.
|
||||
func (m *MockStore) UpdateNotificationTemplateMethodByID(ctx context.Context, arg database.UpdateNotificationTemplateMethodByIDParams) (database.NotificationTemplate, error) {
|
||||
m.ctrl.T.Helper()
|
||||
@ -5501,6 +5515,20 @@ func (mr *MockStoreMockRecorder) UpdateUserStatus(ctx, arg any) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateUserStatus", reflect.TypeOf((*MockStore)(nil).UpdateUserStatus), ctx, arg)
|
||||
}
|
||||
|
||||
// UpdateVolumeResourceMonitor mocks base method.
|
||||
func (m *MockStore) UpdateVolumeResourceMonitor(ctx context.Context, arg database.UpdateVolumeResourceMonitorParams) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "UpdateVolumeResourceMonitor", ctx, arg)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// UpdateVolumeResourceMonitor indicates an expected call of UpdateVolumeResourceMonitor.
|
||||
func (mr *MockStoreMockRecorder) UpdateVolumeResourceMonitor(ctx, arg any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateVolumeResourceMonitor", reflect.TypeOf((*MockStore)(nil).UpdateVolumeResourceMonitor), ctx, arg)
|
||||
}
|
||||
|
||||
// UpdateWorkspace mocks base method.
|
||||
func (m *MockStore) UpdateWorkspace(ctx context.Context, arg database.UpdateWorkspaceParams) (database.WorkspaceTable, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
25
coderd/database/dump.sql
generated
25
coderd/database/dump.sql
generated
@ -25,7 +25,11 @@ CREATE TYPE audit_action AS ENUM (
|
||||
'login',
|
||||
'logout',
|
||||
'register',
|
||||
'request_password_reset'
|
||||
'request_password_reset',
|
||||
'connect',
|
||||
'disconnect',
|
||||
'open',
|
||||
'close'
|
||||
);
|
||||
|
||||
CREATE TYPE automatic_updates AS ENUM (
|
||||
@ -201,7 +205,9 @@ CREATE TYPE resource_type AS ENUM (
|
||||
'notification_template',
|
||||
'idp_sync_settings_organization',
|
||||
'idp_sync_settings_group',
|
||||
'idp_sync_settings_role'
|
||||
'idp_sync_settings_role',
|
||||
'workspace_agent',
|
||||
'workspace_app'
|
||||
);
|
||||
|
||||
CREATE TYPE startup_script_behavior AS ENUM (
|
||||
@ -238,6 +244,11 @@ CREATE TYPE workspace_agent_lifecycle_state AS ENUM (
|
||||
'off'
|
||||
);
|
||||
|
||||
CREATE TYPE workspace_agent_monitor_state AS ENUM (
|
||||
'OK',
|
||||
'NOK'
|
||||
);
|
||||
|
||||
CREATE TYPE workspace_agent_script_timing_stage AS ENUM (
|
||||
'start',
|
||||
'stop',
|
||||
@ -1522,7 +1533,10 @@ CREATE TABLE workspace_agent_memory_resource_monitors (
|
||||
agent_id uuid NOT NULL,
|
||||
enabled boolean NOT NULL,
|
||||
threshold integer NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
state workspace_agent_monitor_state DEFAULT 'OK'::workspace_agent_monitor_state NOT NULL,
|
||||
debounced_until timestamp with time zone DEFAULT '0001-01-01 00:00:00+00'::timestamp with time zone NOT NULL
|
||||
);
|
||||
|
||||
CREATE UNLOGGED TABLE workspace_agent_metadata (
|
||||
@ -1607,7 +1621,10 @@ CREATE TABLE workspace_agent_volume_resource_monitors (
|
||||
enabled boolean NOT NULL,
|
||||
threshold integer NOT NULL,
|
||||
path text NOT NULL,
|
||||
created_at timestamp with time zone NOT NULL
|
||||
created_at timestamp with time zone NOT NULL,
|
||||
updated_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
state workspace_agent_monitor_state DEFAULT 'OK'::workspace_agent_monitor_state NOT NULL,
|
||||
debounced_until timestamp with time zone DEFAULT '0001-01-01 00:00:00+00'::timestamp with time zone NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE workspace_agents (
|
||||
|
@ -0,0 +1 @@
|
||||
-- No-op, enum values can't be dropped.
|
@ -0,0 +1,13 @@
|
||||
-- Add new audit types for connect and open actions.
|
||||
ALTER TYPE audit_action
|
||||
ADD VALUE IF NOT EXISTS 'connect';
|
||||
ALTER TYPE audit_action
|
||||
ADD VALUE IF NOT EXISTS 'disconnect';
|
||||
ALTER TYPE resource_type
|
||||
ADD VALUE IF NOT EXISTS 'workspace_agent';
|
||||
ALTER TYPE audit_action
|
||||
ADD VALUE IF NOT EXISTS 'open';
|
||||
ALTER TYPE audit_action
|
||||
ADD VALUE IF NOT EXISTS 'close';
|
||||
ALTER TYPE resource_type
|
||||
ADD VALUE IF NOT EXISTS 'workspace_app';
|
@ -0,0 +1,11 @@
|
||||
ALTER TABLE workspace_agent_volume_resource_monitors
|
||||
DROP COLUMN updated_at,
|
||||
DROP COLUMN state,
|
||||
DROP COLUMN debounced_until;
|
||||
|
||||
ALTER TABLE workspace_agent_memory_resource_monitors
|
||||
DROP COLUMN updated_at,
|
||||
DROP COLUMN state,
|
||||
DROP COLUMN debounced_until;
|
||||
|
||||
DROP TYPE workspace_agent_monitor_state;
|
@ -0,0 +1,14 @@
|
||||
CREATE TYPE workspace_agent_monitor_state AS ENUM (
|
||||
'OK',
|
||||
'NOK'
|
||||
);
|
||||
|
||||
ALTER TABLE workspace_agent_memory_resource_monitors
|
||||
ADD COLUMN updated_at timestamp with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
ADD COLUMN state workspace_agent_monitor_state NOT NULL DEFAULT 'OK',
|
||||
ADD COLUMN debounced_until timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00'::timestamptz;
|
||||
|
||||
ALTER TABLE workspace_agent_volume_resource_monitors
|
||||
ADD COLUMN updated_at timestamp with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
ADD COLUMN state workspace_agent_monitor_state NOT NULL DEFAULT 'OK',
|
||||
ADD COLUMN debounced_until timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00'::timestamptz;
|
@ -0,0 +1 @@
|
||||
DELETE FROM notification_templates WHERE id = 'c425f63e-716a-4bf4-ae24-78348f706c3f';
|
16
coderd/database/migrations/000295_test_notification.up.sql
Normal file
16
coderd/database/migrations/000295_test_notification.up.sql
Normal file
@ -0,0 +1,16 @@
|
||||
INSERT INTO notification_templates
|
||||
(id, name, title_template, body_template, "group", actions)
|
||||
VALUES (
|
||||
'c425f63e-716a-4bf4-ae24-78348f706c3f',
|
||||
'Test Notification',
|
||||
E'A test notification',
|
||||
E'Hi {{.UserName}},\n\n'||
|
||||
E'This is a test notification.',
|
||||
'Notification Events',
|
||||
'[
|
||||
{
|
||||
"label": "View notification settings",
|
||||
"url": "{{base_url}}/deployment/notifications?tab=settings"
|
||||
}
|
||||
]'::jsonb
|
||||
);
|
@ -527,3 +527,31 @@ func (k CryptoKey) CanVerify(now time.Time) bool {
|
||||
func (r GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerRow) RBACObject() rbac.Object {
|
||||
return r.ProvisionerJob.RBACObject()
|
||||
}
|
||||
|
||||
func (m WorkspaceAgentMemoryResourceMonitor) Debounce(
|
||||
by time.Duration,
|
||||
now time.Time,
|
||||
oldState, newState WorkspaceAgentMonitorState,
|
||||
) (time.Time, bool) {
|
||||
if now.After(m.DebouncedUntil) &&
|
||||
oldState == WorkspaceAgentMonitorStateOK &&
|
||||
newState == WorkspaceAgentMonitorStateNOK {
|
||||
return now.Add(by), true
|
||||
}
|
||||
|
||||
return m.DebouncedUntil, false
|
||||
}
|
||||
|
||||
func (m WorkspaceAgentVolumeResourceMonitor) Debounce(
|
||||
by time.Duration,
|
||||
now time.Time,
|
||||
oldState, newState WorkspaceAgentMonitorState,
|
||||
) (debouncedUntil time.Time, shouldNotify bool) {
|
||||
if now.After(m.DebouncedUntil) &&
|
||||
oldState == WorkspaceAgentMonitorStateOK &&
|
||||
newState == WorkspaceAgentMonitorStateNOK {
|
||||
return now.Add(by), true
|
||||
}
|
||||
|
||||
return m.DebouncedUntil, false
|
||||
}
|
||||
|
@ -468,6 +468,7 @@ func (q *sqlQuerier) GetAuthorizedAuditLogsOffset(ctx context.Context, arg GetAu
|
||||
arg.DateFrom,
|
||||
arg.DateTo,
|
||||
arg.BuildReason,
|
||||
arg.RequestID,
|
||||
arg.OffsetOpt,
|
||||
arg.LimitOpt,
|
||||
)
|
||||
|
@ -147,6 +147,10 @@ const (
|
||||
AuditActionLogout AuditAction = "logout"
|
||||
AuditActionRegister AuditAction = "register"
|
||||
AuditActionRequestPasswordReset AuditAction = "request_password_reset"
|
||||
AuditActionConnect AuditAction = "connect"
|
||||
AuditActionDisconnect AuditAction = "disconnect"
|
||||
AuditActionOpen AuditAction = "open"
|
||||
AuditActionClose AuditAction = "close"
|
||||
)
|
||||
|
||||
func (e *AuditAction) Scan(src interface{}) error {
|
||||
@ -194,7 +198,11 @@ func (e AuditAction) Valid() bool {
|
||||
AuditActionLogin,
|
||||
AuditActionLogout,
|
||||
AuditActionRegister,
|
||||
AuditActionRequestPasswordReset:
|
||||
AuditActionRequestPasswordReset,
|
||||
AuditActionConnect,
|
||||
AuditActionDisconnect,
|
||||
AuditActionOpen,
|
||||
AuditActionClose:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@ -211,6 +219,10 @@ func AllAuditActionValues() []AuditAction {
|
||||
AuditActionLogout,
|
||||
AuditActionRegister,
|
||||
AuditActionRequestPasswordReset,
|
||||
AuditActionConnect,
|
||||
AuditActionDisconnect,
|
||||
AuditActionOpen,
|
||||
AuditActionClose,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1608,6 +1620,8 @@ const (
|
||||
ResourceTypeIdpSyncSettingsOrganization ResourceType = "idp_sync_settings_organization"
|
||||
ResourceTypeIdpSyncSettingsGroup ResourceType = "idp_sync_settings_group"
|
||||
ResourceTypeIdpSyncSettingsRole ResourceType = "idp_sync_settings_role"
|
||||
ResourceTypeWorkspaceAgent ResourceType = "workspace_agent"
|
||||
ResourceTypeWorkspaceApp ResourceType = "workspace_app"
|
||||
)
|
||||
|
||||
func (e *ResourceType) Scan(src interface{}) error {
|
||||
@ -1668,7 +1682,9 @@ func (e ResourceType) Valid() bool {
|
||||
ResourceTypeNotificationTemplate,
|
||||
ResourceTypeIdpSyncSettingsOrganization,
|
||||
ResourceTypeIdpSyncSettingsGroup,
|
||||
ResourceTypeIdpSyncSettingsRole:
|
||||
ResourceTypeIdpSyncSettingsRole,
|
||||
ResourceTypeWorkspaceAgent,
|
||||
ResourceTypeWorkspaceApp:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@ -1698,6 +1714,8 @@ func AllResourceTypeValues() []ResourceType {
|
||||
ResourceTypeIdpSyncSettingsOrganization,
|
||||
ResourceTypeIdpSyncSettingsGroup,
|
||||
ResourceTypeIdpSyncSettingsRole,
|
||||
ResourceTypeWorkspaceAgent,
|
||||
ResourceTypeWorkspaceApp,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1958,6 +1976,64 @@ func AllWorkspaceAgentLifecycleStateValues() []WorkspaceAgentLifecycleState {
|
||||
}
|
||||
}
|
||||
|
||||
type WorkspaceAgentMonitorState string
|
||||
|
||||
const (
|
||||
WorkspaceAgentMonitorStateOK WorkspaceAgentMonitorState = "OK"
|
||||
WorkspaceAgentMonitorStateNOK WorkspaceAgentMonitorState = "NOK"
|
||||
)
|
||||
|
||||
func (e *WorkspaceAgentMonitorState) Scan(src interface{}) error {
|
||||
switch s := src.(type) {
|
||||
case []byte:
|
||||
*e = WorkspaceAgentMonitorState(s)
|
||||
case string:
|
||||
*e = WorkspaceAgentMonitorState(s)
|
||||
default:
|
||||
return fmt.Errorf("unsupported scan type for WorkspaceAgentMonitorState: %T", src)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type NullWorkspaceAgentMonitorState struct {
|
||||
WorkspaceAgentMonitorState WorkspaceAgentMonitorState `json:"workspace_agent_monitor_state"`
|
||||
Valid bool `json:"valid"` // Valid is true if WorkspaceAgentMonitorState is not NULL
|
||||
}
|
||||
|
||||
// Scan implements the Scanner interface.
|
||||
func (ns *NullWorkspaceAgentMonitorState) Scan(value interface{}) error {
|
||||
if value == nil {
|
||||
ns.WorkspaceAgentMonitorState, ns.Valid = "", false
|
||||
return nil
|
||||
}
|
||||
ns.Valid = true
|
||||
return ns.WorkspaceAgentMonitorState.Scan(value)
|
||||
}
|
||||
|
||||
// Value implements the driver Valuer interface.
|
||||
func (ns NullWorkspaceAgentMonitorState) Value() (driver.Value, error) {
|
||||
if !ns.Valid {
|
||||
return nil, nil
|
||||
}
|
||||
return string(ns.WorkspaceAgentMonitorState), nil
|
||||
}
|
||||
|
||||
func (e WorkspaceAgentMonitorState) Valid() bool {
|
||||
switch e {
|
||||
case WorkspaceAgentMonitorStateOK,
|
||||
WorkspaceAgentMonitorStateNOK:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func AllWorkspaceAgentMonitorStateValues() []WorkspaceAgentMonitorState {
|
||||
return []WorkspaceAgentMonitorState{
|
||||
WorkspaceAgentMonitorStateOK,
|
||||
WorkspaceAgentMonitorStateNOK,
|
||||
}
|
||||
}
|
||||
|
||||
// What stage the script was ran in.
|
||||
type WorkspaceAgentScriptTimingStage string
|
||||
|
||||
@ -3184,10 +3260,13 @@ type WorkspaceAgentLogSource struct {
|
||||
}
|
||||
|
||||
type WorkspaceAgentMemoryResourceMonitor struct {
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
Enabled bool `db:"enabled" json:"enabled"`
|
||||
Threshold int32 `db:"threshold" json:"threshold"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
Enabled bool `db:"enabled" json:"enabled"`
|
||||
Threshold int32 `db:"threshold" json:"threshold"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
State WorkspaceAgentMonitorState `db:"state" json:"state"`
|
||||
DebouncedUntil time.Time `db:"debounced_until" json:"debounced_until"`
|
||||
}
|
||||
|
||||
type WorkspaceAgentMetadatum struct {
|
||||
@ -3258,11 +3337,14 @@ type WorkspaceAgentStat struct {
|
||||
}
|
||||
|
||||
type WorkspaceAgentVolumeResourceMonitor struct {
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
Enabled bool `db:"enabled" json:"enabled"`
|
||||
Threshold int32 `db:"threshold" json:"threshold"`
|
||||
Path string `db:"path" json:"path"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
Enabled bool `db:"enabled" json:"enabled"`
|
||||
Threshold int32 `db:"threshold" json:"threshold"`
|
||||
Path string `db:"path" json:"path"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
State WorkspaceAgentMonitorState `db:"state" json:"state"`
|
||||
DebouncedUntil time.Time `db:"debounced_until" json:"debounced_until"`
|
||||
}
|
||||
|
||||
type WorkspaceApp struct {
|
||||
|
@ -484,6 +484,7 @@ type sqlcQuerier interface {
|
||||
UpdateGroupByID(ctx context.Context, arg UpdateGroupByIDParams) (Group, error)
|
||||
UpdateInactiveUsersToDormant(ctx context.Context, arg UpdateInactiveUsersToDormantParams) ([]UpdateInactiveUsersToDormantRow, error)
|
||||
UpdateMemberRoles(ctx context.Context, arg UpdateMemberRolesParams) (OrganizationMember, error)
|
||||
UpdateMemoryResourceMonitor(ctx context.Context, arg UpdateMemoryResourceMonitorParams) error
|
||||
UpdateNotificationTemplateMethodByID(ctx context.Context, arg UpdateNotificationTemplateMethodByIDParams) (NotificationTemplate, error)
|
||||
UpdateOAuth2ProviderAppByID(ctx context.Context, arg UpdateOAuth2ProviderAppByIDParams) (OAuth2ProviderApp, error)
|
||||
UpdateOAuth2ProviderAppSecretByID(ctx context.Context, arg UpdateOAuth2ProviderAppSecretByIDParams) (OAuth2ProviderAppSecret, error)
|
||||
@ -518,6 +519,7 @@ type sqlcQuerier interface {
|
||||
UpdateUserQuietHoursSchedule(ctx context.Context, arg UpdateUserQuietHoursScheduleParams) (User, error)
|
||||
UpdateUserRoles(ctx context.Context, arg UpdateUserRolesParams) (User, error)
|
||||
UpdateUserStatus(ctx context.Context, arg UpdateUserStatusParams) (User, error)
|
||||
UpdateVolumeResourceMonitor(ctx context.Context, arg UpdateVolumeResourceMonitorParams) error
|
||||
UpdateWorkspace(ctx context.Context, arg UpdateWorkspaceParams) (WorkspaceTable, error)
|
||||
UpdateWorkspaceAgentConnectionByID(ctx context.Context, arg UpdateWorkspaceAgentConnectionByIDParams) error
|
||||
UpdateWorkspaceAgentLifecycleStateByID(ctx context.Context, arg UpdateWorkspaceAgentLifecycleStateByIDParams) error
|
||||
|
@ -558,6 +558,12 @@ WHERE
|
||||
workspace_builds.reason::text = $11
|
||||
ELSE true
|
||||
END
|
||||
-- Filter request_id
|
||||
AND CASE
|
||||
WHEN $12 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
||||
audit_logs.request_id = $12
|
||||
ELSE true
|
||||
END
|
||||
|
||||
-- Authorize Filter clause will be injected below in GetAuthorizedAuditLogsOffset
|
||||
-- @authorize_filter
|
||||
@ -567,9 +573,9 @@ LIMIT
|
||||
-- a limit of 0 means "no limit". The audit log table is unbounded
|
||||
-- in size, and is expected to be quite large. Implement a default
|
||||
-- limit of 100 to prevent accidental excessively large queries.
|
||||
COALESCE(NULLIF($13 :: int, 0), 100)
|
||||
COALESCE(NULLIF($14 :: int, 0), 100)
|
||||
OFFSET
|
||||
$12
|
||||
$13
|
||||
`
|
||||
|
||||
type GetAuditLogsOffsetParams struct {
|
||||
@ -584,6 +590,7 @@ type GetAuditLogsOffsetParams struct {
|
||||
DateFrom time.Time `db:"date_from" json:"date_from"`
|
||||
DateTo time.Time `db:"date_to" json:"date_to"`
|
||||
BuildReason string `db:"build_reason" json:"build_reason"`
|
||||
RequestID uuid.UUID `db:"request_id" json:"request_id"`
|
||||
OffsetOpt int32 `db:"offset_opt" json:"offset_opt"`
|
||||
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
||||
}
|
||||
@ -624,6 +631,7 @@ func (q *sqlQuerier) GetAuditLogsOffset(ctx context.Context, arg GetAuditLogsOff
|
||||
arg.DateFrom,
|
||||
arg.DateTo,
|
||||
arg.BuildReason,
|
||||
arg.RequestID,
|
||||
arg.OffsetOpt,
|
||||
arg.LimitOpt,
|
||||
)
|
||||
@ -5981,6 +5989,7 @@ JOIN
|
||||
LEFT JOIN
|
||||
provisioner_jobs current_job ON (
|
||||
current_job.worker_id = pd.id
|
||||
AND current_job.organization_id = pd.organization_id
|
||||
AND current_job.completed_at IS NULL
|
||||
)
|
||||
LEFT JOIN
|
||||
@ -5992,26 +6001,40 @@ LEFT JOIN
|
||||
provisioner_jobs
|
||||
WHERE
|
||||
worker_id = pd.id
|
||||
AND organization_id = pd.organization_id
|
||||
AND completed_at IS NOT NULL
|
||||
ORDER BY
|
||||
completed_at DESC
|
||||
LIMIT 1
|
||||
)
|
||||
AND previous_job.organization_id = pd.organization_id
|
||||
)
|
||||
LEFT JOIN
|
||||
workspace_builds current_build ON current_build.id = CASE WHEN current_job.input ? 'workspace_build_id' THEN (current_job.input->>'workspace_build_id')::uuid END
|
||||
LEFT JOIN
|
||||
-- We should always have a template version, either explicitly or implicitly via workspace build.
|
||||
template_versions current_version ON current_version.id = CASE WHEN current_job.input ? 'template_version_id' THEN (current_job.input->>'template_version_id')::uuid ELSE current_build.template_version_id END
|
||||
template_versions current_version ON (
|
||||
current_version.id = CASE WHEN current_job.input ? 'template_version_id' THEN (current_job.input->>'template_version_id')::uuid ELSE current_build.template_version_id END
|
||||
AND current_version.organization_id = pd.organization_id
|
||||
)
|
||||
LEFT JOIN
|
||||
templates current_template ON current_template.id = current_version.template_id
|
||||
templates current_template ON (
|
||||
current_template.id = current_version.template_id
|
||||
AND current_template.organization_id = pd.organization_id
|
||||
)
|
||||
LEFT JOIN
|
||||
workspace_builds previous_build ON previous_build.id = CASE WHEN previous_job.input ? 'workspace_build_id' THEN (previous_job.input->>'workspace_build_id')::uuid END
|
||||
LEFT JOIN
|
||||
-- We should always have a template version, either explicitly or implicitly via workspace build.
|
||||
template_versions previous_version ON previous_version.id = CASE WHEN previous_job.input ? 'template_version_id' THEN (previous_job.input->>'template_version_id')::uuid ELSE previous_build.template_version_id END
|
||||
template_versions previous_version ON (
|
||||
previous_version.id = CASE WHEN previous_job.input ? 'template_version_id' THEN (previous_job.input->>'template_version_id')::uuid ELSE previous_build.template_version_id END
|
||||
AND previous_version.organization_id = pd.organization_id
|
||||
)
|
||||
LEFT JOIN
|
||||
templates previous_template ON previous_template.id = previous_version.template_id
|
||||
templates previous_template ON (
|
||||
previous_template.id = previous_version.template_id
|
||||
AND previous_template.organization_id = pd.organization_id
|
||||
)
|
||||
WHERE
|
||||
pd.organization_id = $2::uuid
|
||||
AND (COALESCE(array_length($3::uuid[], 1), 0) = 0 OR pd.id = ANY($3::uuid[]))
|
||||
@ -6712,14 +6735,23 @@ LEFT JOIN
|
||||
LEFT JOIN
|
||||
workspace_builds wb ON wb.id = CASE WHEN pj.input ? 'workspace_build_id' THEN (pj.input->>'workspace_build_id')::uuid END
|
||||
LEFT JOIN
|
||||
workspaces w ON wb.workspace_id = w.id
|
||||
workspaces w ON (
|
||||
w.id = wb.workspace_id
|
||||
AND w.organization_id = pj.organization_id
|
||||
)
|
||||
LEFT JOIN
|
||||
-- We should always have a template version, either explicitly or implicitly via workspace build.
|
||||
template_versions tv ON tv.id = CASE WHEN pj.input ? 'template_version_id' THEN (pj.input->>'template_version_id')::uuid ELSE wb.template_version_id END
|
||||
template_versions tv ON (
|
||||
tv.id = CASE WHEN pj.input ? 'template_version_id' THEN (pj.input->>'template_version_id')::uuid ELSE wb.template_version_id END
|
||||
AND tv.organization_id = pj.organization_id
|
||||
)
|
||||
LEFT JOIN
|
||||
templates t ON tv.template_id = t.id
|
||||
templates t ON (
|
||||
t.id = tv.template_id
|
||||
AND t.organization_id = pj.organization_id
|
||||
)
|
||||
WHERE
|
||||
($1::uuid IS NULL OR pj.organization_id = $1)
|
||||
pj.organization_id = $1::uuid
|
||||
AND (COALESCE(array_length($2::uuid[], 1), 0) = 0 OR pj.id = ANY($2::uuid[]))
|
||||
AND (COALESCE(array_length($3::provisioner_job_status[], 1), 0) = 0 OR pj.job_status = ANY($3::provisioner_job_status[]))
|
||||
AND ($4::tagset = 'null'::tagset OR provisioner_tagset_contains(pj.tags::tagset, $4::tagset))
|
||||
@ -6741,7 +6773,7 @@ LIMIT
|
||||
`
|
||||
|
||||
type GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerParams struct {
|
||||
OrganizationID uuid.NullUUID `db:"organization_id" json:"organization_id"`
|
||||
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
||||
IDs []uuid.UUID `db:"ids" json:"ids"`
|
||||
Status []ProvisionerJobStatus `db:"status" json:"status"`
|
||||
Tags StringMap `db:"tags" json:"tags"`
|
||||
@ -12255,7 +12287,7 @@ func (q *sqlQuerier) UpsertWorkspaceAgentPortShare(ctx context.Context, arg Upse
|
||||
|
||||
const fetchMemoryResourceMonitorsByAgentID = `-- name: FetchMemoryResourceMonitorsByAgentID :one
|
||||
SELECT
|
||||
agent_id, enabled, threshold, created_at
|
||||
agent_id, enabled, threshold, created_at, updated_at, state, debounced_until
|
||||
FROM
|
||||
workspace_agent_memory_resource_monitors
|
||||
WHERE
|
||||
@ -12270,13 +12302,16 @@ func (q *sqlQuerier) FetchMemoryResourceMonitorsByAgentID(ctx context.Context, a
|
||||
&i.Enabled,
|
||||
&i.Threshold,
|
||||
&i.CreatedAt,
|
||||
&i.UpdatedAt,
|
||||
&i.State,
|
||||
&i.DebouncedUntil,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const fetchVolumesResourceMonitorsByAgentID = `-- name: FetchVolumesResourceMonitorsByAgentID :many
|
||||
SELECT
|
||||
agent_id, enabled, threshold, path, created_at
|
||||
agent_id, enabled, threshold, path, created_at, updated_at, state, debounced_until
|
||||
FROM
|
||||
workspace_agent_volume_resource_monitors
|
||||
WHERE
|
||||
@ -12298,6 +12333,9 @@ func (q *sqlQuerier) FetchVolumesResourceMonitorsByAgentID(ctx context.Context,
|
||||
&i.Threshold,
|
||||
&i.Path,
|
||||
&i.CreatedAt,
|
||||
&i.UpdatedAt,
|
||||
&i.State,
|
||||
&i.DebouncedUntil,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -12317,26 +12355,35 @@ INSERT INTO
|
||||
workspace_agent_memory_resource_monitors (
|
||||
agent_id,
|
||||
enabled,
|
||||
state,
|
||||
threshold,
|
||||
created_at
|
||||
created_at,
|
||||
updated_at,
|
||||
debounced_until
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, $4) RETURNING agent_id, enabled, threshold, created_at
|
||||
($1, $2, $3, $4, $5, $6, $7) RETURNING agent_id, enabled, threshold, created_at, updated_at, state, debounced_until
|
||||
`
|
||||
|
||||
type InsertMemoryResourceMonitorParams struct {
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
Enabled bool `db:"enabled" json:"enabled"`
|
||||
Threshold int32 `db:"threshold" json:"threshold"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
Enabled bool `db:"enabled" json:"enabled"`
|
||||
State WorkspaceAgentMonitorState `db:"state" json:"state"`
|
||||
Threshold int32 `db:"threshold" json:"threshold"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
DebouncedUntil time.Time `db:"debounced_until" json:"debounced_until"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) InsertMemoryResourceMonitor(ctx context.Context, arg InsertMemoryResourceMonitorParams) (WorkspaceAgentMemoryResourceMonitor, error) {
|
||||
row := q.db.QueryRowContext(ctx, insertMemoryResourceMonitor,
|
||||
arg.AgentID,
|
||||
arg.Enabled,
|
||||
arg.State,
|
||||
arg.Threshold,
|
||||
arg.CreatedAt,
|
||||
arg.UpdatedAt,
|
||||
arg.DebouncedUntil,
|
||||
)
|
||||
var i WorkspaceAgentMemoryResourceMonitor
|
||||
err := row.Scan(
|
||||
@ -12344,6 +12391,9 @@ func (q *sqlQuerier) InsertMemoryResourceMonitor(ctx context.Context, arg Insert
|
||||
&i.Enabled,
|
||||
&i.Threshold,
|
||||
&i.CreatedAt,
|
||||
&i.UpdatedAt,
|
||||
&i.State,
|
||||
&i.DebouncedUntil,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
@ -12354,19 +12404,25 @@ INSERT INTO
|
||||
agent_id,
|
||||
path,
|
||||
enabled,
|
||||
state,
|
||||
threshold,
|
||||
created_at
|
||||
created_at,
|
||||
updated_at,
|
||||
debounced_until
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5) RETURNING agent_id, enabled, threshold, path, created_at
|
||||
($1, $2, $3, $4, $5, $6, $7, $8) RETURNING agent_id, enabled, threshold, path, created_at, updated_at, state, debounced_until
|
||||
`
|
||||
|
||||
type InsertVolumeResourceMonitorParams struct {
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
Path string `db:"path" json:"path"`
|
||||
Enabled bool `db:"enabled" json:"enabled"`
|
||||
Threshold int32 `db:"threshold" json:"threshold"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
Path string `db:"path" json:"path"`
|
||||
Enabled bool `db:"enabled" json:"enabled"`
|
||||
State WorkspaceAgentMonitorState `db:"state" json:"state"`
|
||||
Threshold int32 `db:"threshold" json:"threshold"`
|
||||
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
DebouncedUntil time.Time `db:"debounced_until" json:"debounced_until"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) InsertVolumeResourceMonitor(ctx context.Context, arg InsertVolumeResourceMonitorParams) (WorkspaceAgentVolumeResourceMonitor, error) {
|
||||
@ -12374,8 +12430,11 @@ func (q *sqlQuerier) InsertVolumeResourceMonitor(ctx context.Context, arg Insert
|
||||
arg.AgentID,
|
||||
arg.Path,
|
||||
arg.Enabled,
|
||||
arg.State,
|
||||
arg.Threshold,
|
||||
arg.CreatedAt,
|
||||
arg.UpdatedAt,
|
||||
arg.DebouncedUntil,
|
||||
)
|
||||
var i WorkspaceAgentVolumeResourceMonitor
|
||||
err := row.Scan(
|
||||
@ -12384,10 +12443,69 @@ func (q *sqlQuerier) InsertVolumeResourceMonitor(ctx context.Context, arg Insert
|
||||
&i.Threshold,
|
||||
&i.Path,
|
||||
&i.CreatedAt,
|
||||
&i.UpdatedAt,
|
||||
&i.State,
|
||||
&i.DebouncedUntil,
|
||||
)
|
||||
return i, err
|
||||
}
|
||||
|
||||
const updateMemoryResourceMonitor = `-- name: UpdateMemoryResourceMonitor :exec
|
||||
UPDATE workspace_agent_memory_resource_monitors
|
||||
SET
|
||||
updated_at = $2,
|
||||
state = $3,
|
||||
debounced_until = $4
|
||||
WHERE
|
||||
agent_id = $1
|
||||
`
|
||||
|
||||
type UpdateMemoryResourceMonitorParams struct {
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
State WorkspaceAgentMonitorState `db:"state" json:"state"`
|
||||
DebouncedUntil time.Time `db:"debounced_until" json:"debounced_until"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) UpdateMemoryResourceMonitor(ctx context.Context, arg UpdateMemoryResourceMonitorParams) error {
|
||||
_, err := q.db.ExecContext(ctx, updateMemoryResourceMonitor,
|
||||
arg.AgentID,
|
||||
arg.UpdatedAt,
|
||||
arg.State,
|
||||
arg.DebouncedUntil,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const updateVolumeResourceMonitor = `-- name: UpdateVolumeResourceMonitor :exec
|
||||
UPDATE workspace_agent_volume_resource_monitors
|
||||
SET
|
||||
updated_at = $3,
|
||||
state = $4,
|
||||
debounced_until = $5
|
||||
WHERE
|
||||
agent_id = $1 AND path = $2
|
||||
`
|
||||
|
||||
type UpdateVolumeResourceMonitorParams struct {
|
||||
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
||||
Path string `db:"path" json:"path"`
|
||||
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
||||
State WorkspaceAgentMonitorState `db:"state" json:"state"`
|
||||
DebouncedUntil time.Time `db:"debounced_until" json:"debounced_until"`
|
||||
}
|
||||
|
||||
func (q *sqlQuerier) UpdateVolumeResourceMonitor(ctx context.Context, arg UpdateVolumeResourceMonitorParams) error {
|
||||
_, err := q.db.ExecContext(ctx, updateVolumeResourceMonitor,
|
||||
arg.AgentID,
|
||||
arg.Path,
|
||||
arg.UpdatedAt,
|
||||
arg.State,
|
||||
arg.DebouncedUntil,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
const deleteOldWorkspaceAgentLogs = `-- name: DeleteOldWorkspaceAgentLogs :exec
|
||||
WITH
|
||||
latest_builds AS (
|
||||
|
@ -117,6 +117,12 @@ WHERE
|
||||
workspace_builds.reason::text = @build_reason
|
||||
ELSE true
|
||||
END
|
||||
-- Filter request_id
|
||||
AND CASE
|
||||
WHEN @request_id :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
||||
audit_logs.request_id = @request_id
|
||||
ELSE true
|
||||
END
|
||||
|
||||
-- Authorize Filter clause will be injected below in GetAuthorizedAuditLogsOffset
|
||||
-- @authorize_filter
|
||||
|
@ -58,6 +58,7 @@ JOIN
|
||||
LEFT JOIN
|
||||
provisioner_jobs current_job ON (
|
||||
current_job.worker_id = pd.id
|
||||
AND current_job.organization_id = pd.organization_id
|
||||
AND current_job.completed_at IS NULL
|
||||
)
|
||||
LEFT JOIN
|
||||
@ -69,28 +70,42 @@ LEFT JOIN
|
||||
provisioner_jobs
|
||||
WHERE
|
||||
worker_id = pd.id
|
||||
AND organization_id = pd.organization_id
|
||||
AND completed_at IS NOT NULL
|
||||
ORDER BY
|
||||
completed_at DESC
|
||||
LIMIT 1
|
||||
)
|
||||
AND previous_job.organization_id = pd.organization_id
|
||||
)
|
||||
-- Current job information.
|
||||
LEFT JOIN
|
||||
workspace_builds current_build ON current_build.id = CASE WHEN current_job.input ? 'workspace_build_id' THEN (current_job.input->>'workspace_build_id')::uuid END
|
||||
LEFT JOIN
|
||||
-- We should always have a template version, either explicitly or implicitly via workspace build.
|
||||
template_versions current_version ON current_version.id = CASE WHEN current_job.input ? 'template_version_id' THEN (current_job.input->>'template_version_id')::uuid ELSE current_build.template_version_id END
|
||||
template_versions current_version ON (
|
||||
current_version.id = CASE WHEN current_job.input ? 'template_version_id' THEN (current_job.input->>'template_version_id')::uuid ELSE current_build.template_version_id END
|
||||
AND current_version.organization_id = pd.organization_id
|
||||
)
|
||||
LEFT JOIN
|
||||
templates current_template ON current_template.id = current_version.template_id
|
||||
templates current_template ON (
|
||||
current_template.id = current_version.template_id
|
||||
AND current_template.organization_id = pd.organization_id
|
||||
)
|
||||
-- Previous job information.
|
||||
LEFT JOIN
|
||||
workspace_builds previous_build ON previous_build.id = CASE WHEN previous_job.input ? 'workspace_build_id' THEN (previous_job.input->>'workspace_build_id')::uuid END
|
||||
LEFT JOIN
|
||||
-- We should always have a template version, either explicitly or implicitly via workspace build.
|
||||
template_versions previous_version ON previous_version.id = CASE WHEN previous_job.input ? 'template_version_id' THEN (previous_job.input->>'template_version_id')::uuid ELSE previous_build.template_version_id END
|
||||
template_versions previous_version ON (
|
||||
previous_version.id = CASE WHEN previous_job.input ? 'template_version_id' THEN (previous_job.input->>'template_version_id')::uuid ELSE previous_build.template_version_id END
|
||||
AND previous_version.organization_id = pd.organization_id
|
||||
)
|
||||
LEFT JOIN
|
||||
templates previous_template ON previous_template.id = previous_version.template_id
|
||||
templates previous_template ON (
|
||||
previous_template.id = previous_version.template_id
|
||||
AND previous_template.organization_id = pd.organization_id
|
||||
)
|
||||
WHERE
|
||||
pd.organization_id = @organization_id::uuid
|
||||
AND (COALESCE(array_length(@ids::uuid[], 1), 0) = 0 OR pd.id = ANY(@ids::uuid[]))
|
||||
|
@ -148,14 +148,23 @@ LEFT JOIN
|
||||
LEFT JOIN
|
||||
workspace_builds wb ON wb.id = CASE WHEN pj.input ? 'workspace_build_id' THEN (pj.input->>'workspace_build_id')::uuid END
|
||||
LEFT JOIN
|
||||
workspaces w ON wb.workspace_id = w.id
|
||||
workspaces w ON (
|
||||
w.id = wb.workspace_id
|
||||
AND w.organization_id = pj.organization_id
|
||||
)
|
||||
LEFT JOIN
|
||||
-- We should always have a template version, either explicitly or implicitly via workspace build.
|
||||
template_versions tv ON tv.id = CASE WHEN pj.input ? 'template_version_id' THEN (pj.input->>'template_version_id')::uuid ELSE wb.template_version_id END
|
||||
template_versions tv ON (
|
||||
tv.id = CASE WHEN pj.input ? 'template_version_id' THEN (pj.input->>'template_version_id')::uuid ELSE wb.template_version_id END
|
||||
AND tv.organization_id = pj.organization_id
|
||||
)
|
||||
LEFT JOIN
|
||||
templates t ON tv.template_id = t.id
|
||||
templates t ON (
|
||||
t.id = tv.template_id
|
||||
AND t.organization_id = pj.organization_id
|
||||
)
|
||||
WHERE
|
||||
(sqlc.narg('organization_id')::uuid IS NULL OR pj.organization_id = @organization_id)
|
||||
pj.organization_id = @organization_id::uuid
|
||||
AND (COALESCE(array_length(@ids::uuid[], 1), 0) = 0 OR pj.id = ANY(@ids::uuid[]))
|
||||
AND (COALESCE(array_length(@status::provisioner_job_status[], 1), 0) = 0 OR pj.job_status = ANY(@status::provisioner_job_status[]))
|
||||
AND (@tags::tagset = 'null'::tagset OR provisioner_tagset_contains(pj.tags::tagset, @tags::tagset))
|
||||
|
@ -19,11 +19,14 @@ INSERT INTO
|
||||
workspace_agent_memory_resource_monitors (
|
||||
agent_id,
|
||||
enabled,
|
||||
state,
|
||||
threshold,
|
||||
created_at
|
||||
created_at,
|
||||
updated_at,
|
||||
debounced_until
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, $4) RETURNING *;
|
||||
($1, $2, $3, $4, $5, $6, $7) RETURNING *;
|
||||
|
||||
-- name: InsertVolumeResourceMonitor :one
|
||||
INSERT INTO
|
||||
@ -31,8 +34,29 @@ INSERT INTO
|
||||
agent_id,
|
||||
path,
|
||||
enabled,
|
||||
state,
|
||||
threshold,
|
||||
created_at
|
||||
created_at,
|
||||
updated_at,
|
||||
debounced_until
|
||||
)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5) RETURNING *;
|
||||
($1, $2, $3, $4, $5, $6, $7, $8) RETURNING *;
|
||||
|
||||
-- name: UpdateMemoryResourceMonitor :exec
|
||||
UPDATE workspace_agent_memory_resource_monitors
|
||||
SET
|
||||
updated_at = $2,
|
||||
state = $3,
|
||||
debounced_until = $4
|
||||
WHERE
|
||||
agent_id = $1;
|
||||
|
||||
-- name: UpdateVolumeResourceMonitor :exec
|
||||
UPDATE workspace_agent_volume_resource_monitors
|
||||
SET
|
||||
updated_at = $3,
|
||||
state = $4,
|
||||
debounced_until = $5
|
||||
WHERE
|
||||
agent_id = $1 AND path = $2;
|
||||
|
@ -2,6 +2,7 @@ package httpapi
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
@ -257,6 +258,23 @@ func (p *QueryParamParser) Strings(vals url.Values, def []string, queryParam str
|
||||
})
|
||||
}
|
||||
|
||||
func (p *QueryParamParser) JSONStringMap(vals url.Values, def map[string]string, queryParam string) map[string]string {
|
||||
v, err := parseQueryParam(p, vals, func(v string) (map[string]string, error) {
|
||||
var m map[string]string
|
||||
if err := json.NewDecoder(strings.NewReader(v)).Decode(&m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}, def, queryParam)
|
||||
if err != nil {
|
||||
p.Errors = append(p.Errors, codersdk.ValidationError{
|
||||
Field: queryParam,
|
||||
Detail: fmt.Sprintf("Query param %q must be a valid JSON object: %s", queryParam, err.Error()),
|
||||
})
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// ValidEnum represents an enum that can be parsed and validated.
|
||||
type ValidEnum interface {
|
||||
// Add more types as needed (avoid importing large dependency trees).
|
||||
|
@ -473,6 +473,70 @@ func TestParseQueryParams(t *testing.T) {
|
||||
testQueryParams(t, expParams, parser, parser.UUIDs)
|
||||
})
|
||||
|
||||
t.Run("JSONStringMap", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
expParams := []queryParamTestCase[map[string]string]{
|
||||
{
|
||||
QueryParam: "valid_map",
|
||||
Value: `{"key1": "value1", "key2": "value2"}`,
|
||||
Expected: map[string]string{
|
||||
"key1": "value1",
|
||||
"key2": "value2",
|
||||
},
|
||||
},
|
||||
{
|
||||
QueryParam: "empty",
|
||||
Value: "{}",
|
||||
Default: map[string]string{},
|
||||
Expected: map[string]string{},
|
||||
},
|
||||
{
|
||||
QueryParam: "no_value",
|
||||
NoSet: true,
|
||||
Default: map[string]string{},
|
||||
Expected: map[string]string{},
|
||||
},
|
||||
{
|
||||
QueryParam: "default",
|
||||
NoSet: true,
|
||||
Default: map[string]string{"key": "value"},
|
||||
Expected: map[string]string{"key": "value"},
|
||||
},
|
||||
{
|
||||
QueryParam: "null",
|
||||
Value: "null",
|
||||
Expected: map[string]string(nil),
|
||||
},
|
||||
{
|
||||
QueryParam: "undefined",
|
||||
Value: "undefined",
|
||||
Expected: map[string]string(nil),
|
||||
},
|
||||
{
|
||||
QueryParam: "invalid_map",
|
||||
Value: `{"key1": "value1", "key2": "value2"`, // missing closing brace
|
||||
Expected: map[string]string(nil),
|
||||
Default: map[string]string{},
|
||||
ExpectedErrorContains: `Query param "invalid_map" must be a valid JSON object: unexpected EOF`,
|
||||
},
|
||||
{
|
||||
QueryParam: "incorrect_type",
|
||||
Value: `{"key1": 1, "key2": true}`,
|
||||
Expected: map[string]string(nil),
|
||||
ExpectedErrorContains: `Query param "incorrect_type" must be a valid JSON object: json: cannot unmarshal number into Go value of type string`,
|
||||
},
|
||||
{
|
||||
QueryParam: "multiple_keys",
|
||||
Values: []string{`{"key1": "value1"}`, `{"key2": "value2"}`},
|
||||
Expected: map[string]string(nil),
|
||||
ExpectedErrorContains: `Query param "multiple_keys" provided more than once, found 2 times.`,
|
||||
},
|
||||
}
|
||||
parser := httpapi.NewQueryParamParser()
|
||||
testQueryParams(t, expParams, parser, parser.JSONStringMap)
|
||||
})
|
||||
|
||||
t.Run("Required", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
@ -11,9 +11,12 @@ import (
|
||||
|
||||
"github.com/coder/coder/v2/coderd/audit"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/database/dbauthz"
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
|
||||
@ -163,6 +166,53 @@ func (api *API) notificationDispatchMethods(rw http.ResponseWriter, r *http.Requ
|
||||
})
|
||||
}
|
||||
|
||||
// @Summary Send a test notification
|
||||
// @ID send-a-test-notification
|
||||
// @Security CoderSessionToken
|
||||
// @Tags Notifications
|
||||
// @Success 200
|
||||
// @Router /notifications/test [post]
|
||||
func (api *API) postTestNotification(rw http.ResponseWriter, r *http.Request) {
|
||||
var (
|
||||
ctx = r.Context()
|
||||
key = httpmw.APIKey(r)
|
||||
)
|
||||
|
||||
if !api.Authorize(r, policy.ActionUpdate, rbac.ResourceDeploymentConfig) {
|
||||
httpapi.Forbidden(rw)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := api.NotificationsEnqueuer.EnqueueWithData(
|
||||
//nolint:gocritic // We need to be notifier to send the notification.
|
||||
dbauthz.AsNotifier(ctx),
|
||||
key.UserID,
|
||||
notifications.TemplateTestNotification,
|
||||
map[string]string{},
|
||||
map[string]any{
|
||||
// NOTE(DanielleMaywood):
|
||||
// When notifications are enqueued, they are checked to be
|
||||
// unique within a single day. This means that if we attempt
|
||||
// to send two test notifications to the same user on
|
||||
// the same day, the enqueuer will prevent us from sending
|
||||
// a second one. We are injecting a timestamp to make the
|
||||
// notifications appear different enough to circumvent this
|
||||
// deduplication logic.
|
||||
"timestamp": api.Clock.Now(),
|
||||
},
|
||||
"send-test-notification",
|
||||
); err != nil {
|
||||
api.Logger.Error(ctx, "send notification", slog.Error(err))
|
||||
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
|
||||
Message: "Failed to send test notification",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
httpapi.Write(ctx, rw, http.StatusOK, nil)
|
||||
}
|
||||
|
||||
// @Summary Get user notification preferences
|
||||
// @ID get-user-notification-preferences
|
||||
// @Security CoderSessionToken
|
||||
|
@ -39,3 +39,8 @@ var (
|
||||
|
||||
TemplateWorkspaceBuildsFailedReport = uuid.MustParse("34a20db2-e9cc-4a93-b0e4-8569699d7a00")
|
||||
)
|
||||
|
||||
// Notification-related events.
|
||||
var (
|
||||
TemplateTestNotification = uuid.MustParse("c425f63e-716a-4bf4-ae24-78348f706c3f")
|
||||
)
|
||||
|
@ -1125,6 +1125,16 @@ func TestNotificationTemplates_Golden(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "TemplateTestNotification",
|
||||
id: notifications.TemplateTestNotification,
|
||||
payload: types.MessagePayload{
|
||||
UserName: "Bobby",
|
||||
UserEmail: "bobby@coder.com",
|
||||
UserUsername: "bobby",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// We must have a test case for every notification_template. This is enforced below:
|
||||
|
79
coderd/notifications/testdata/rendered-templates/smtp/TemplateTestNotification.html.golden
vendored
Normal file
79
coderd/notifications/testdata/rendered-templates/smtp/TemplateTestNotification.html.golden
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
From: system@coder.com
|
||||
To: bobby@coder.com
|
||||
Subject: A test notification
|
||||
Message-Id: 02ee4935-73be-4fa1-a290-ff9999026b13@blush-whale-48
|
||||
Date: Fri, 11 Oct 2024 09:03:06 +0000
|
||||
Content-Type: multipart/alternative; boundary=bbe61b741255b6098bb6b3c1f41b885773df633cb18d2a3002b68e4bc9c4
|
||||
MIME-Version: 1.0
|
||||
|
||||
--bbe61b741255b6098bb6b3c1f41b885773df633cb18d2a3002b68e4bc9c4
|
||||
Content-Transfer-Encoding: quoted-printable
|
||||
Content-Type: text/plain; charset=UTF-8
|
||||
|
||||
Hi Bobby,
|
||||
|
||||
This is a test notification.
|
||||
|
||||
|
||||
View notification settings: http://test.com/deployment/notifications?tab=3D=
|
||||
settings
|
||||
|
||||
--bbe61b741255b6098bb6b3c1f41b885773df633cb18d2a3002b68e4bc9c4
|
||||
Content-Transfer-Encoding: quoted-printable
|
||||
Content-Type: text/html; charset=UTF-8
|
||||
|
||||
<!doctype html>
|
||||
<html lang=3D"en">
|
||||
<head>
|
||||
<meta charset=3D"UTF-8" />
|
||||
<meta name=3D"viewport" content=3D"width=3Ddevice-width, initial-scale=
|
||||
=3D1.0" />
|
||||
<title>A test notification</title>
|
||||
</head>
|
||||
<body style=3D"margin: 0; padding: 0; font-family: -apple-system, system-=
|
||||
ui, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarel=
|
||||
l', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif; color: #020617=
|
||||
; background: #f8fafc;">
|
||||
<div style=3D"max-width: 600px; margin: 20px auto; padding: 60px; borde=
|
||||
r: 1px solid #e2e8f0; border-radius: 8px; background-color: #fff; text-alig=
|
||||
n: left; font-size: 14px; line-height: 1.5;">
|
||||
<div style=3D"text-align: center;">
|
||||
<img src=3D"https://coder.com/coder-logo-horizontal.png" alt=3D"Cod=
|
||||
er Logo" style=3D"height: 40px;" />
|
||||
</div>
|
||||
<h1 style=3D"text-align: center; font-size: 24px; font-weight: 400; m=
|
||||
argin: 8px 0 32px; line-height: 1.5;">
|
||||
A test notification
|
||||
</h1>
|
||||
<div style=3D"line-height: 1.5;">
|
||||
<p>Hi Bobby,</p>
|
||||
|
||||
<p>This is a test notification.</p>
|
||||
</div>
|
||||
<div style=3D"text-align: center; margin-top: 32px;">
|
||||
=20
|
||||
<a href=3D"http://test.com/deployment/notifications?tab=3Dsettings"=
|
||||
style=3D"display: inline-block; padding: 13px 24px; background-color: #020=
|
||||
617; color: #f8fafc; text-decoration: none; border-radius: 8px; margin: 0 4=
|
||||
px;">
|
||||
View notification settings
|
||||
</a>
|
||||
=20
|
||||
</div>
|
||||
<div style=3D"border-top: 1px solid #e2e8f0; color: #475569; font-siz=
|
||||
e: 12px; margin-top: 64px; padding-top: 24px; line-height: 1.6;">
|
||||
<p>© 2024 Coder. All rights reserved - <a =
|
||||
href=3D"http://test.com" style=3D"color: #2563eb; text-decoration: none;">h=
|
||||
ttp://test.com</a></p>
|
||||
<p><a href=3D"http://test.com/settings/notifications" style=3D"colo=
|
||||
r: #2563eb; text-decoration: none;">Click here to manage your notification =
|
||||
settings</a></p>
|
||||
<p><a href=3D"http://test.com/settings/notifications?disabled=3Dc42=
|
||||
5f63e-716a-4bf4-ae24-78348f706c3f" style=3D"color: #2563eb; text-decoration=
|
||||
: none;">Stop receiving emails like this</a></p>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
--bbe61b741255b6098bb6b3c1f41b885773df633cb18d2a3002b68e4bc9c4--
|
25
coderd/notifications/testdata/rendered-templates/webhook/TemplateTestNotification.json.golden
vendored
Normal file
25
coderd/notifications/testdata/rendered-templates/webhook/TemplateTestNotification.json.golden
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
{
|
||||
"_version": "1.1",
|
||||
"msg_id": "00000000-0000-0000-0000-000000000000",
|
||||
"payload": {
|
||||
"_version": "1.1",
|
||||
"notification_name": "Test Notification",
|
||||
"notification_template_id": "00000000-0000-0000-0000-000000000000",
|
||||
"user_id": "00000000-0000-0000-0000-000000000000",
|
||||
"user_email": "bobby@coder.com",
|
||||
"user_name": "Bobby",
|
||||
"user_username": "bobby",
|
||||
"actions": [
|
||||
{
|
||||
"label": "View notification settings",
|
||||
"url": "http://test.com/deployment/notifications?tab=settings"
|
||||
}
|
||||
],
|
||||
"labels": {},
|
||||
"data": null
|
||||
},
|
||||
"title": "A test notification",
|
||||
"title_markdown": "A test notification",
|
||||
"body": "Hi Bobby,\n\nThis is a test notification.",
|
||||
"body_markdown": "Hi Bobby,\n\nThis is a test notification."
|
||||
}
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/coderdtest"
|
||||
"github.com/coder/coder/v2/coderd/database"
|
||||
"github.com/coder/coder/v2/coderd/notifications"
|
||||
"github.com/coder/coder/v2/coderd/notifications/notificationstest"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
"github.com/coder/coder/v2/testutil"
|
||||
)
|
||||
@ -317,3 +318,58 @@ func TestNotificationDispatchMethods(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotificationTest(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
t.Run("OwnerCanSendTestNotification", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
notifyEnq := ¬ificationstest.FakeEnqueuer{}
|
||||
ownerClient := coderdtest.New(t, &coderdtest.Options{
|
||||
DeploymentValues: coderdtest.DeploymentValues(t),
|
||||
NotificationsEnqueuer: notifyEnq,
|
||||
})
|
||||
|
||||
// Given: A user with owner permissions.
|
||||
_ = coderdtest.CreateFirstUser(t, ownerClient)
|
||||
|
||||
// When: They attempt to send a test notification.
|
||||
err := ownerClient.PostTestNotification(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Then: We expect a notification to have been sent.
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateTestNotification))
|
||||
require.Len(t, sent, 1)
|
||||
})
|
||||
|
||||
t.Run("MemberCannotSendTestNotification", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctx := testutil.Context(t, testutil.WaitShort)
|
||||
|
||||
notifyEnq := ¬ificationstest.FakeEnqueuer{}
|
||||
ownerClient := coderdtest.New(t, &coderdtest.Options{
|
||||
DeploymentValues: coderdtest.DeploymentValues(t),
|
||||
NotificationsEnqueuer: notifyEnq,
|
||||
})
|
||||
|
||||
// Given: A user without owner permissions.
|
||||
ownerUser := coderdtest.CreateFirstUser(t, ownerClient)
|
||||
memberClient, _ := coderdtest.CreateAnotherUser(t, ownerClient, ownerUser.OrganizationID)
|
||||
|
||||
// When: They attempt to send a test notification.
|
||||
err := memberClient.PostTestNotification(ctx)
|
||||
|
||||
// Then: We expect a forbidden error with no notifications sent
|
||||
var sdkError *codersdk.Error
|
||||
require.Error(t, err)
|
||||
require.ErrorAsf(t, err, &sdkError, "error should be of type *codersdk.Error")
|
||||
require.Equal(t, http.StatusForbidden, sdkError.StatusCode())
|
||||
|
||||
sent := notifyEnq.Sent(notificationstest.WithTemplateID(notifications.TemplateTestNotification))
|
||||
require.Len(t, sent, 0)
|
||||
})
|
||||
}
|
||||
|
@ -9,6 +9,8 @@ import (
|
||||
"github.com/coder/coder/v2/coderd/httpapi"
|
||||
"github.com/coder/coder/v2/coderd/httpmw"
|
||||
"github.com/coder/coder/v2/coderd/provisionerdserver"
|
||||
"github.com/coder/coder/v2/coderd/rbac"
|
||||
"github.com/coder/coder/v2/coderd/rbac/policy"
|
||||
"github.com/coder/coder/v2/coderd/util/ptr"
|
||||
"github.com/coder/coder/v2/codersdk"
|
||||
)
|
||||
@ -31,11 +33,18 @@ func (api *API) provisionerDaemons(rw http.ResponseWriter, r *http.Request) {
|
||||
org = httpmw.OrganizationParam(r)
|
||||
)
|
||||
|
||||
// This endpoint returns information about provisioner jobs.
|
||||
// For now, only owners and template admins can access provisioner jobs.
|
||||
if !api.Authorize(r, policy.ActionRead, rbac.ResourceProvisionerJobs.InOrg(org.ID)) {
|
||||
httpapi.ResourceNotFound(rw)
|
||||
return
|
||||
}
|
||||
|
||||
qp := r.URL.Query()
|
||||
p := httpapi.NewQueryParamParser()
|
||||
limit := p.PositiveInt32(qp, 50, "limit")
|
||||
ids := p.UUIDs(qp, nil, "ids")
|
||||
tagsRaw := p.String(qp, "", "tags")
|
||||
tags := p.JSONStringMap(qp, database.StringMap{}, "tags")
|
||||
p.ErrorExcessParams(qp)
|
||||
if len(p.Errors) > 0 {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
@ -45,17 +54,6 @@ func (api *API) provisionerDaemons(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
tags := database.StringMap{}
|
||||
if tagsRaw != "" {
|
||||
if err := tags.Scan([]byte(tagsRaw)); err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Invalid tags query parameter",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
daemons, err := api.Database.GetProvisionerDaemonsWithStatusByOrganization(
|
||||
ctx,
|
||||
database.GetProvisionerDaemonsWithStatusByOrganizationParams{
|
||||
|
@ -241,11 +241,14 @@ func TestProvisionerDaemons(t *testing.T) {
|
||||
require.Nil(t, daemons[0].PreviousJob)
|
||||
})
|
||||
|
||||
t.Run("MemberAllowed", func(t *testing.T) {
|
||||
// For now, this is not allowed even though the member has created a
|
||||
// workspace. Once member-level permissions for jobs are supported
|
||||
// by RBAC, this test should be updated.
|
||||
t.Run("MemberDenied", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := testutil.Context(t, testutil.WaitMedium)
|
||||
daemons, err := memberClient.OrganizationProvisionerDaemons(ctx, owner.OrganizationID, nil)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, daemons, 50)
|
||||
require.Error(t, err)
|
||||
require.Len(t, daemons, 0)
|
||||
})
|
||||
}
|
||||
|
@ -2022,10 +2022,13 @@ func InsertWorkspaceResource(ctx context.Context, db database.Store, jobID uuid.
|
||||
if prAgent.ResourcesMonitoring != nil {
|
||||
if prAgent.ResourcesMonitoring.Memory != nil {
|
||||
_, err = db.InsertMemoryResourceMonitor(ctx, database.InsertMemoryResourceMonitorParams{
|
||||
AgentID: agentID,
|
||||
Enabled: prAgent.ResourcesMonitoring.Memory.Enabled,
|
||||
Threshold: prAgent.ResourcesMonitoring.Memory.Threshold,
|
||||
CreatedAt: dbtime.Now(),
|
||||
AgentID: agentID,
|
||||
Enabled: prAgent.ResourcesMonitoring.Memory.Enabled,
|
||||
Threshold: prAgent.ResourcesMonitoring.Memory.Threshold,
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
CreatedAt: dbtime.Now(),
|
||||
UpdatedAt: dbtime.Now(),
|
||||
DebouncedUntil: time.Time{},
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to insert agent memory resource monitor into db: %w", err)
|
||||
@ -2033,11 +2036,14 @@ func InsertWorkspaceResource(ctx context.Context, db database.Store, jobID uuid.
|
||||
}
|
||||
for _, volume := range prAgent.ResourcesMonitoring.Volumes {
|
||||
_, err = db.InsertVolumeResourceMonitor(ctx, database.InsertVolumeResourceMonitorParams{
|
||||
AgentID: agentID,
|
||||
Path: volume.Path,
|
||||
Enabled: volume.Enabled,
|
||||
Threshold: volume.Threshold,
|
||||
CreatedAt: dbtime.Now(),
|
||||
AgentID: agentID,
|
||||
Path: volume.Path,
|
||||
Enabled: volume.Enabled,
|
||||
Threshold: volume.Threshold,
|
||||
State: database.WorkspaceAgentMonitorStateOK,
|
||||
CreatedAt: dbtime.Now(),
|
||||
UpdatedAt: dbtime.Now(),
|
||||
DebouncedUntil: time.Time{},
|
||||
})
|
||||
if err != nil {
|
||||
return xerrors.Errorf("failed to insert agent volume resource monitor into db: %w", err)
|
||||
|
@ -108,7 +108,7 @@ func (api *API) handleAuthAndFetchProvisionerJobs(rw http.ResponseWriter, r *htt
|
||||
if ids == nil {
|
||||
ids = p.UUIDs(qp, nil, "ids")
|
||||
}
|
||||
tagsRaw := p.String(qp, "", "tags")
|
||||
tags := p.JSONStringMap(qp, database.StringMap{}, "tags")
|
||||
p.ErrorExcessParams(qp)
|
||||
if len(p.Errors) > 0 {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
@ -118,19 +118,8 @@ func (api *API) handleAuthAndFetchProvisionerJobs(rw http.ResponseWriter, r *htt
|
||||
return nil, false
|
||||
}
|
||||
|
||||
tags := database.StringMap{}
|
||||
if tagsRaw != "" {
|
||||
if err := tags.Scan([]byte(tagsRaw)); err != nil {
|
||||
httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{
|
||||
Message: "Invalid tags query parameter",
|
||||
Detail: err.Error(),
|
||||
})
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
jobs, err := api.Database.GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisioner(ctx, database.GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerParams{
|
||||
OrganizationID: uuid.NullUUID{UUID: org.ID, Valid: true},
|
||||
OrganizationID: org.ID,
|
||||
Status: slice.StringEnums[database.ProvisionerJobStatus](status),
|
||||
Limit: sql.NullInt32{Int32: limit, Valid: limit > 0},
|
||||
IDs: ids,
|
||||
|
@ -299,6 +299,7 @@ var (
|
||||
// Valid Actions
|
||||
// - "ActionCreate" :: create workspace agent resource monitor
|
||||
// - "ActionRead" :: read workspace agent resource monitor
|
||||
// - "ActionUpdate" :: update workspace agent resource monitor
|
||||
ResourceWorkspaceAgentResourceMonitor = Object{
|
||||
Type: "workspace_agent_resource_monitor",
|
||||
}
|
||||
|
@ -306,6 +306,7 @@ var RBACPermissions = map[string]PermissionDefinition{
|
||||
Actions: map[Action]ActionDefinition{
|
||||
ActionRead: actDef("read workspace agent resource monitor"),
|
||||
ActionCreate: actDef("create workspace agent resource monitor"),
|
||||
ActionUpdate: actDef("update workspace agent resource monitor"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -779,7 +779,7 @@ func TestRolePermissions(t *testing.T) {
|
||||
},
|
||||
{
|
||||
Name: "ResourceMonitor",
|
||||
Actions: []policy.Action{policy.ActionRead, policy.ActionCreate},
|
||||
Actions: []policy.Action{policy.ActionRead, policy.ActionCreate, policy.ActionUpdate},
|
||||
Resource: rbac.ResourceWorkspaceAgentResourceMonitor,
|
||||
AuthorizeMap: map[bool][]hasAuthSubjects{
|
||||
true: {owner},
|
||||
|
@ -19,6 +19,20 @@ import (
|
||||
|
||||
// AuditLogs requires the database to fetch an organization by name
|
||||
// to convert to organization uuid.
|
||||
//
|
||||
// Supported query parameters:
|
||||
//
|
||||
// - request_id: UUID (can be used to search for associated audits e.g. connect/disconnect or open/close)
|
||||
// - resource_id: UUID
|
||||
// - resource_target: string
|
||||
// - username: string
|
||||
// - email: string
|
||||
// - date_from: string (date in format "2006-01-02")
|
||||
// - date_to: string (date in format "2006-01-02")
|
||||
// - organization: string (organization UUID or name)
|
||||
// - resource_type: string (enum)
|
||||
// - action: string (enum)
|
||||
// - build_reason: string (enum)
|
||||
func AuditLogs(ctx context.Context, db database.Store, query string) (database.GetAuditLogsOffsetParams, []codersdk.ValidationError) {
|
||||
// Always lowercase for all searches.
|
||||
query = strings.ToLower(query)
|
||||
@ -33,6 +47,7 @@ func AuditLogs(ctx context.Context, db database.Store, query string) (database.G
|
||||
const dateLayout = "2006-01-02"
|
||||
parser := httpapi.NewQueryParamParser()
|
||||
filter := database.GetAuditLogsOffsetParams{
|
||||
RequestID: parser.UUID(values, uuid.Nil, "request_id"),
|
||||
ResourceID: parser.UUID(values, uuid.Nil, "resource_id"),
|
||||
ResourceTarget: parser.String(values, "", "resource_target"),
|
||||
Username: parser.String(values, "", "username"),
|
||||
|
@ -344,6 +344,11 @@ func TestSearchAudit(t *testing.T) {
|
||||
ResourceTarget: "foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "RequestID",
|
||||
Query: "request_id:foo",
|
||||
ExpectedErrorContains: "valid uuid",
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range testCases {
|
||||
|
@ -177,3 +177,19 @@ func DifferenceFunc[T any](a []T, b []T, equal func(a, b T) bool) []T {
|
||||
}
|
||||
return tmp
|
||||
}
|
||||
|
||||
func CountConsecutive[T comparable](needle T, haystack ...T) int {
|
||||
maxLength := 0
|
||||
curLength := 0
|
||||
|
||||
for _, v := range haystack {
|
||||
if v == needle {
|
||||
curLength++
|
||||
} else {
|
||||
maxLength = max(maxLength, curLength)
|
||||
curLength = 0
|
||||
}
|
||||
}
|
||||
|
||||
return max(maxLength, curLength)
|
||||
}
|
||||
|
@ -143,7 +143,9 @@ func (api *API) workspaceAgentRPC(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
Ctx: api.ctx,
|
||||
Log: logger,
|
||||
Clock: api.Clock,
|
||||
Database: api.Database,
|
||||
NotificationsEnqueuer: api.NotificationsEnqueuer,
|
||||
Pubsub: api.Pubsub,
|
||||
DerpMapFn: api.DERPMap,
|
||||
TailnetCoordinator: &api.TailnetCoordinator,
|
||||
|
Reference in New Issue
Block a user