feat: add connection statistics for workspace agents (#6469)

* fix: don't make session counts cumulative

This made for some weird tracking... we want the point-in-time
number of counts!

* Add databasefake query for getting agent stats

* Add deployment stats endpoint

* The query... works?!?

* Fix aggregation query

* Select from multiple tables instead

* Fix continuous stats

* Increase period of stat refreshes

* Add workspace counts to deployment stats

* fmt

* Add a slight bit of responsiveness

* Fix template version editor overflow

* Add refresh button

* Fix font family on button

* Fix latest stat being reported

* Revert agent conn stats

* Fix linting error

* Fix tests

* Fix gen

* Fix migrations

* Block on sending stat updates

* Add test fixtures

* Fix response structure

* make gen
This commit is contained in:
Kyle Carberry
2023-03-08 21:05:45 -06:00
committed by GitHub
parent 9d40d2ffdc
commit 5304b4e483
43 changed files with 1790 additions and 174 deletions

View File

@ -201,6 +201,14 @@ func (q *querier) DeleteOldWorkspaceAgentStats(ctx context.Context) error {
return q.db.DeleteOldWorkspaceAgentStats(ctx)
}
func (q *querier) GetDeploymentWorkspaceAgentStats(ctx context.Context, createdAfter time.Time) (database.GetDeploymentWorkspaceAgentStatsRow, error) {
return q.db.GetDeploymentWorkspaceAgentStats(ctx, createdAfter)
}
func (q *querier) GetDeploymentWorkspaceStats(ctx context.Context) (database.GetDeploymentWorkspaceStatsRow, error) {
return q.db.GetDeploymentWorkspaceStats(ctx)
}
func (q *querier) GetParameterSchemasCreatedAfter(ctx context.Context, createdAt time.Time) ([]database.ParameterSchema, error) {
return q.db.GetParameterSchemasCreatedAfter(ctx, createdAt)
}

View File

@ -312,6 +312,53 @@ func (*fakeQuerier) DeleteOldWorkspaceAgentStats(_ context.Context) error {
return nil
}
func (q *fakeQuerier) GetDeploymentWorkspaceAgentStats(_ context.Context, createdAfter time.Time) (database.GetDeploymentWorkspaceAgentStatsRow, error) {
q.mutex.RLock()
defer q.mutex.RUnlock()
agentStatsCreatedAfter := make([]database.WorkspaceAgentStat, 0)
for _, agentStat := range q.workspaceAgentStats {
if agentStat.CreatedAt.After(createdAfter) {
agentStatsCreatedAfter = append(agentStatsCreatedAfter, agentStat)
}
}
latestAgentStats := map[uuid.UUID]database.WorkspaceAgentStat{}
for _, agentStat := range q.workspaceAgentStats {
if agentStat.CreatedAt.After(createdAfter) {
latestAgentStats[agentStat.AgentID] = agentStat
}
}
stat := database.GetDeploymentWorkspaceAgentStatsRow{}
for _, agentStat := range latestAgentStats {
stat.SessionCountVSCode += agentStat.SessionCountVSCode
stat.SessionCountJetBrains += agentStat.SessionCountJetBrains
stat.SessionCountReconnectingPTY += agentStat.SessionCountReconnectingPTY
stat.SessionCountSSH += agentStat.SessionCountSSH
}
latencies := make([]float64, 0)
for _, agentStat := range agentStatsCreatedAfter {
stat.WorkspaceRxBytes += agentStat.RxBytes
stat.WorkspaceTxBytes += agentStat.TxBytes
latencies = append(latencies, agentStat.ConnectionMedianLatencyMS)
}
tryPercentile := func(fs []float64, p float64) float64 {
if len(fs) == 0 {
return -1
}
sort.Float64s(fs)
return fs[int(float64(len(fs))*p/100)]
}
stat.WorkspaceConnectionLatency50 = tryPercentile(latencies, 50)
stat.WorkspaceConnectionLatency95 = tryPercentile(latencies, 95)
return stat, nil
}
func (q *fakeQuerier) InsertWorkspaceAgentStat(_ context.Context, p database.InsertWorkspaceAgentStatParams) (database.WorkspaceAgentStat, error) {
if err := validateDatabaseType(p); err != nil {
return database.WorkspaceAgentStat{}, err
@ -1031,7 +1078,7 @@ func (q *fakeQuerier) GetAuthorizedWorkspaces(ctx context.Context, arg database.
}
if arg.Status != "" {
build, err := q.GetLatestWorkspaceBuildByWorkspaceID(ctx, workspace.ID)
build, err := q.getLatestWorkspaceBuildByWorkspaceIDNoLock(ctx, workspace.ID)
if err != nil {
return nil, xerrors.Errorf("get latest build: %w", err)
}
@ -1120,7 +1167,7 @@ func (q *fakeQuerier) GetAuthorizedWorkspaces(ctx context.Context, arg database.
}
if arg.HasAgent != "" {
build, err := q.GetLatestWorkspaceBuildByWorkspaceID(ctx, workspace.ID)
build, err := q.getLatestWorkspaceBuildByWorkspaceIDNoLock(ctx, workspace.ID)
if err != nil {
return nil, xerrors.Errorf("get latest build: %w", err)
}
@ -1426,10 +1473,14 @@ func (q *fakeQuerier) GetWorkspaceBuildByJobID(_ context.Context, jobID uuid.UUI
return database.WorkspaceBuild{}, sql.ErrNoRows
}
func (q *fakeQuerier) GetLatestWorkspaceBuildByWorkspaceID(_ context.Context, workspaceID uuid.UUID) (database.WorkspaceBuild, error) {
func (q *fakeQuerier) GetLatestWorkspaceBuildByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) (database.WorkspaceBuild, error) {
q.mutex.RLock()
defer q.mutex.RUnlock()
return q.getLatestWorkspaceBuildByWorkspaceIDNoLock(ctx, workspaceID)
}
func (q *fakeQuerier) getLatestWorkspaceBuildByWorkspaceIDNoLock(_ context.Context, workspaceID uuid.UUID) (database.WorkspaceBuild, error) {
var row database.WorkspaceBuild
var buildNum int32 = -1
for _, workspaceBuild := range q.workspaceBuilds {
@ -3609,6 +3660,50 @@ func (q *fakeQuerier) UpdateWorkspaceLastUsedAt(_ context.Context, arg database.
return sql.ErrNoRows
}
func (q *fakeQuerier) GetDeploymentWorkspaceStats(ctx context.Context) (database.GetDeploymentWorkspaceStatsRow, error) {
q.mutex.RLock()
defer q.mutex.RUnlock()
stat := database.GetDeploymentWorkspaceStatsRow{}
for _, workspace := range q.workspaces {
build, err := q.getLatestWorkspaceBuildByWorkspaceIDNoLock(ctx, workspace.ID)
if err != nil {
return stat, err
}
job, err := q.getProvisionerJobByIDNoLock(ctx, build.JobID)
if err != nil {
return stat, err
}
if !job.StartedAt.Valid {
stat.PendingWorkspaces++
continue
}
if job.StartedAt.Valid &&
!job.CanceledAt.Valid &&
time.Since(job.UpdatedAt) <= 30*time.Second &&
!job.CompletedAt.Valid {
stat.BuildingWorkspaces++
continue
}
if job.CompletedAt.Valid &&
!job.CanceledAt.Valid &&
!job.Error.Valid {
if build.Transition == database.WorkspaceTransitionStart {
stat.RunningWorkspaces++
}
if build.Transition == database.WorkspaceTransitionStop {
stat.StoppedWorkspaces++
}
continue
}
if job.CanceledAt.Valid || job.Error.Valid {
stat.FailedWorkspaces++
continue
}
}
return stat, nil
}
func (q *fakeQuerier) UpdateWorkspaceTTLToBeWithinTemplateMax(_ context.Context, arg database.UpdateWorkspaceTTLToBeWithinTemplateMaxParams) error {
if err := validateDatabaseType(arg); err != nil {
return err

View File

@ -5,6 +5,7 @@ import (
"crypto/sha256"
"database/sql"
"encoding/hex"
"encoding/json"
"fmt"
"net"
"testing"
@ -437,3 +438,30 @@ func ParameterValue(t testing.TB, db database.Store, seed database.ParameterValu
require.NoError(t, err, "insert parameter value")
return scheme
}
func WorkspaceAgentStat(t testing.TB, db database.Store, orig database.WorkspaceAgentStat) database.WorkspaceAgentStat {
if orig.ConnectionsByProto == nil {
orig.ConnectionsByProto = json.RawMessage([]byte("{}"))
}
scheme, err := db.InsertWorkspaceAgentStat(context.Background(), database.InsertWorkspaceAgentStatParams{
ID: takeFirst(orig.ID, uuid.New()),
CreatedAt: takeFirst(orig.CreatedAt, database.Now()),
UserID: takeFirst(orig.UserID, uuid.New()),
TemplateID: takeFirst(orig.TemplateID, uuid.New()),
WorkspaceID: takeFirst(orig.WorkspaceID, uuid.New()),
AgentID: takeFirst(orig.AgentID, uuid.New()),
ConnectionsByProto: orig.ConnectionsByProto,
ConnectionCount: takeFirst(orig.ConnectionCount, 0),
RxPackets: takeFirst(orig.RxPackets, 0),
RxBytes: takeFirst(orig.RxBytes, 0),
TxPackets: takeFirst(orig.TxPackets, 0),
TxBytes: takeFirst(orig.TxBytes, 0),
SessionCountVSCode: takeFirst(orig.SessionCountVSCode, 0),
SessionCountJetBrains: takeFirst(orig.SessionCountJetBrains, 0),
SessionCountReconnectingPTY: takeFirst(orig.SessionCountReconnectingPTY, 0),
SessionCountSSH: takeFirst(orig.SessionCountSSH, 0),
ConnectionMedianLatencyMS: takeFirst(orig.ConnectionMedianLatencyMS, 0),
})
require.NoError(t, err, "insert workspace agent stat")
return scheme
}

View File

@ -485,7 +485,7 @@ CREATE TABLE workspace_agent_stats (
rx_bytes bigint DEFAULT 0 NOT NULL,
tx_packets bigint DEFAULT 0 NOT NULL,
tx_bytes bigint DEFAULT 0 NOT NULL,
connection_median_latency_ms bigint DEFAULT '-1'::integer NOT NULL,
connection_median_latency_ms double precision DEFAULT '-1'::integer NOT NULL,
session_count_vscode bigint DEFAULT 0 NOT NULL,
session_count_jetbrains bigint DEFAULT 0 NOT NULL,
session_count_reconnecting_pty bigint DEFAULT 0 NOT NULL,

View File

@ -18,7 +18,7 @@ SCRIPT_DIR=$(dirname "${BASH_SOURCE[0]}")
# Dump the updated schema (use make to utilize caching).
make -C ../.. --no-print-directory coderd/database/dump.sql
# The logic below depends on the exact version being correct :(
go run github.com/kyleconroy/sqlc/cmd/sqlc@v1.16.0 generate
sqlc generate
first=true
for fi in queries/*.sql.go; do

View File

@ -0,0 +1 @@
ALTER TABLE workspace_agent_stats ALTER COLUMN connection_median_latency_ms TYPE bigint;

View File

@ -0,0 +1 @@
ALTER TABLE workspace_agent_stats ALTER COLUMN connection_median_latency_ms TYPE FLOAT;

View File

@ -0,0 +1,17 @@
INSERT INTO workspace_agent_stats (
id,
created_at,
user_id,
agent_id,
workspace_id,
template_id,
connection_median_latency_ms
) VALUES (
gen_random_uuid(),
NOW(),
gen_random_uuid(),
gen_random_uuid(),
gen_random_uuid(),
gen_random_uuid(),
1::bigint
);

View File

@ -0,0 +1,17 @@
INSERT INTO workspace_agent_stats (
id,
created_at,
user_id,
agent_id,
workspace_id,
template_id,
connection_median_latency_ms
) VALUES (
gen_random_uuid(),
NOW(),
gen_random_uuid(),
gen_random_uuid(),
gen_random_uuid(),
gen_random_uuid(),
0.5::float
);

View File

@ -1582,7 +1582,7 @@ type WorkspaceAgentStat struct {
RxBytes int64 `db:"rx_bytes" json:"rx_bytes"`
TxPackets int64 `db:"tx_packets" json:"tx_packets"`
TxBytes int64 `db:"tx_bytes" json:"tx_bytes"`
ConnectionMedianLatencyMS int64 `db:"connection_median_latency_ms" json:"connection_median_latency_ms"`
ConnectionMedianLatencyMS float64 `db:"connection_median_latency_ms" json:"connection_median_latency_ms"`
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`

View File

@ -53,6 +53,8 @@ type sqlcQuerier interface {
GetDERPMeshKey(ctx context.Context) (string, error)
GetDeploymentDAUs(ctx context.Context) ([]GetDeploymentDAUsRow, error)
GetDeploymentID(ctx context.Context) (string, error)
GetDeploymentWorkspaceAgentStats(ctx context.Context, createdAt time.Time) (GetDeploymentWorkspaceAgentStatsRow, error)
GetDeploymentWorkspaceStats(ctx context.Context) (GetDeploymentWorkspaceStatsRow, error)
GetFileByHashAndCreator(ctx context.Context, arg GetFileByHashAndCreatorParams) (File, error)
GetFileByID(ctx context.Context, id uuid.UUID) (File, error)
// This will never count deleted users.

View File

@ -0,0 +1,88 @@
//go:build linux
package database_test
import (
"context"
"testing"
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/require"
"github.com/coder/coder/coderd/database"
"github.com/coder/coder/coderd/database/dbgen"
"github.com/coder/coder/coderd/database/migrations"
)
func TestGetDeploymentWorkspaceAgentStats(t *testing.T) {
t.Parallel()
if testing.Short() {
t.SkipNow()
}
t.Run("Aggregates", func(t *testing.T) {
t.Parallel()
sqlDB := testSQLDB(t)
err := migrations.Up(sqlDB)
require.NoError(t, err)
db := database.New(sqlDB)
ctx := context.Background()
dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{
TxBytes: 1,
RxBytes: 1,
ConnectionMedianLatencyMS: 1,
SessionCountVSCode: 1,
})
dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{
TxBytes: 1,
RxBytes: 1,
ConnectionMedianLatencyMS: 2,
SessionCountVSCode: 1,
})
stats, err := db.GetDeploymentWorkspaceAgentStats(ctx, database.Now().Add(-time.Hour))
require.NoError(t, err)
require.Equal(t, int64(2), stats.WorkspaceTxBytes)
require.Equal(t, int64(2), stats.WorkspaceRxBytes)
require.Equal(t, 1.5, stats.WorkspaceConnectionLatency50)
require.Equal(t, 1.95, stats.WorkspaceConnectionLatency95)
require.Equal(t, int64(2), stats.SessionCountVSCode)
})
t.Run("GroupsByAgentID", func(t *testing.T) {
t.Parallel()
sqlDB := testSQLDB(t)
err := migrations.Up(sqlDB)
require.NoError(t, err)
db := database.New(sqlDB)
ctx := context.Background()
agentID := uuid.New()
insertTime := database.Now()
dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{
CreatedAt: insertTime.Add(-time.Second),
AgentID: agentID,
TxBytes: 1,
RxBytes: 1,
ConnectionMedianLatencyMS: 1,
SessionCountVSCode: 1,
})
dbgen.WorkspaceAgentStat(t, db, database.WorkspaceAgentStat{
// Ensure this stat is newer!
CreatedAt: insertTime,
AgentID: agentID,
TxBytes: 1,
RxBytes: 1,
ConnectionMedianLatencyMS: 2,
SessionCountVSCode: 1,
})
stats, err := db.GetDeploymentWorkspaceAgentStats(ctx, database.Now().Add(-time.Hour))
require.NoError(t, err)
require.Equal(t, int64(2), stats.WorkspaceTxBytes)
require.Equal(t, int64(2), stats.WorkspaceRxBytes)
require.Equal(t, 1.5, stats.WorkspaceConnectionLatency50)
require.Equal(t, 1.95, stats.WorkspaceConnectionLatency95)
require.Equal(t, int64(1), stats.SessionCountVSCode)
})
}

View File

@ -5544,6 +5544,56 @@ func (q *sqlQuerier) GetDeploymentDAUs(ctx context.Context) ([]GetDeploymentDAUs
return items, nil
}
const getDeploymentWorkspaceAgentStats = `-- name: GetDeploymentWorkspaceAgentStats :one
WITH agent_stats AS (
SELECT
coalesce(SUM(rx_bytes), 0)::bigint AS workspace_rx_bytes,
coalesce(SUM(tx_bytes), 0)::bigint AS workspace_tx_bytes,
coalesce((PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_50,
coalesce((PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_95
FROM workspace_agent_stats
WHERE workspace_agent_stats.created_at > $1
), latest_agent_stats AS (
SELECT
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty
FROM (
SELECT id, created_at, user_id, agent_id, workspace_id, template_id, connections_by_proto, connection_count, rx_packets, rx_bytes, tx_packets, tx_bytes, connection_median_latency_ms, session_count_vscode, session_count_jetbrains, session_count_reconnecting_pty, session_count_ssh, ROW_NUMBER() OVER(PARTITION BY agent_id ORDER BY created_at DESC) AS rn
FROM workspace_agent_stats
) AS a WHERE a.rn = 1
)
SELECT workspace_rx_bytes, workspace_tx_bytes, workspace_connection_latency_50, workspace_connection_latency_95, session_count_vscode, session_count_ssh, session_count_jetbrains, session_count_reconnecting_pty FROM agent_stats, latest_agent_stats
`
type GetDeploymentWorkspaceAgentStatsRow struct {
WorkspaceRxBytes int64 `db:"workspace_rx_bytes" json:"workspace_rx_bytes"`
WorkspaceTxBytes int64 `db:"workspace_tx_bytes" json:"workspace_tx_bytes"`
WorkspaceConnectionLatency50 float64 `db:"workspace_connection_latency_50" json:"workspace_connection_latency_50"`
WorkspaceConnectionLatency95 float64 `db:"workspace_connection_latency_95" json:"workspace_connection_latency_95"`
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
}
func (q *sqlQuerier) GetDeploymentWorkspaceAgentStats(ctx context.Context, createdAt time.Time) (GetDeploymentWorkspaceAgentStatsRow, error) {
row := q.db.QueryRowContext(ctx, getDeploymentWorkspaceAgentStats, createdAt)
var i GetDeploymentWorkspaceAgentStatsRow
err := row.Scan(
&i.WorkspaceRxBytes,
&i.WorkspaceTxBytes,
&i.WorkspaceConnectionLatency50,
&i.WorkspaceConnectionLatency95,
&i.SessionCountVSCode,
&i.SessionCountSSH,
&i.SessionCountJetBrains,
&i.SessionCountReconnectingPTY,
)
return i, err
}
const getTemplateDAUs = `-- name: GetTemplateDAUs :many
SELECT
(created_at at TIME ZONE 'UTC')::date as date,
@ -5629,7 +5679,7 @@ type InsertWorkspaceAgentStatParams struct {
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
ConnectionMedianLatencyMS int64 `db:"connection_median_latency_ms" json:"connection_median_latency_ms"`
ConnectionMedianLatencyMS float64 `db:"connection_median_latency_ms" json:"connection_median_latency_ms"`
}
func (q *sqlQuerier) InsertWorkspaceAgentStat(ctx context.Context, arg InsertWorkspaceAgentStatParams) (WorkspaceAgentStat, error) {
@ -6843,6 +6893,90 @@ func (q *sqlQuerier) InsertWorkspaceResourceMetadata(ctx context.Context, arg In
return items, nil
}
const getDeploymentWorkspaceStats = `-- name: GetDeploymentWorkspaceStats :one
WITH workspaces_with_jobs AS (
SELECT
latest_build.transition, latest_build.provisioner_job_id, latest_build.started_at, latest_build.updated_at, latest_build.canceled_at, latest_build.completed_at, latest_build.error FROM workspaces
LEFT JOIN LATERAL (
SELECT
workspace_builds.transition,
provisioner_jobs.id AS provisioner_job_id,
provisioner_jobs.started_at,
provisioner_jobs.updated_at,
provisioner_jobs.canceled_at,
provisioner_jobs.completed_at,
provisioner_jobs.error
FROM
workspace_builds
LEFT JOIN
provisioner_jobs
ON
provisioner_jobs.id = workspace_builds.job_id
WHERE
workspace_builds.workspace_id = workspaces.id
ORDER BY
build_number DESC
LIMIT
1
) latest_build ON TRUE
), pending_workspaces AS (
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
started_at IS NULL
), building_workspaces AS (
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
started_at IS NOT NULL AND
canceled_at IS NULL AND
updated_at - INTERVAL '30 seconds' < NOW() AND
completed_at IS NULL
), running_workspaces AS (
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
completed_at IS NOT NULL AND
canceled_at IS NULL AND
error IS NULL AND
transition = 'start'::workspace_transition
), failed_workspaces AS (
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
(canceled_at IS NOT NULL AND
error IS NOT NULL) OR
(completed_at IS NOT NULL AND
error IS NOT NULL)
), stopped_workspaces AS (
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
completed_at IS NOT NULL AND
canceled_at IS NULL AND
error IS NULL AND
transition = 'stop'::workspace_transition
)
SELECT
pending_workspaces.count AS pending_workspaces,
building_workspaces.count AS building_workspaces,
running_workspaces.count AS running_workspaces,
failed_workspaces.count AS failed_workspaces,
stopped_workspaces.count AS stopped_workspaces
FROM pending_workspaces, building_workspaces, running_workspaces, failed_workspaces, stopped_workspaces
`
type GetDeploymentWorkspaceStatsRow struct {
PendingWorkspaces int64 `db:"pending_workspaces" json:"pending_workspaces"`
BuildingWorkspaces int64 `db:"building_workspaces" json:"building_workspaces"`
RunningWorkspaces int64 `db:"running_workspaces" json:"running_workspaces"`
FailedWorkspaces int64 `db:"failed_workspaces" json:"failed_workspaces"`
StoppedWorkspaces int64 `db:"stopped_workspaces" json:"stopped_workspaces"`
}
func (q *sqlQuerier) GetDeploymentWorkspaceStats(ctx context.Context) (GetDeploymentWorkspaceStatsRow, error) {
row := q.db.QueryRowContext(ctx, getDeploymentWorkspaceStats)
var i GetDeploymentWorkspaceStatsRow
err := row.Scan(
&i.PendingWorkspaces,
&i.BuildingWorkspaces,
&i.RunningWorkspaces,
&i.FailedWorkspaces,
&i.StoppedWorkspaces,
)
return i, err
}
const getWorkspaceByAgentID = `-- name: GetWorkspaceByAgentID :one
SELECT
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at

View File

@ -51,3 +51,25 @@ ORDER BY
-- name: DeleteOldWorkspaceAgentStats :exec
DELETE FROM workspace_agent_stats WHERE created_at < NOW() - INTERVAL '30 days';
-- name: GetDeploymentWorkspaceAgentStats :one
WITH agent_stats AS (
SELECT
coalesce(SUM(rx_bytes), 0)::bigint AS workspace_rx_bytes,
coalesce(SUM(tx_bytes), 0)::bigint AS workspace_tx_bytes,
coalesce((PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_50,
coalesce((PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_95
FROM workspace_agent_stats
WHERE workspace_agent_stats.created_at > $1
), latest_agent_stats AS (
SELECT
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty
FROM (
SELECT *, ROW_NUMBER() OVER(PARTITION BY agent_id ORDER BY created_at DESC) AS rn
FROM workspace_agent_stats
) AS a WHERE a.rn = 1
)
SELECT * FROM agent_stats, latest_agent_stats;

View File

@ -330,3 +330,65 @@ WHERE
-- During build time, the template max TTL will still be used if the
-- workspace TTL is NULL.
AND ttl IS NOT NULL;
-- name: GetDeploymentWorkspaceStats :one
WITH workspaces_with_jobs AS (
SELECT
latest_build.* FROM workspaces
LEFT JOIN LATERAL (
SELECT
workspace_builds.transition,
provisioner_jobs.id AS provisioner_job_id,
provisioner_jobs.started_at,
provisioner_jobs.updated_at,
provisioner_jobs.canceled_at,
provisioner_jobs.completed_at,
provisioner_jobs.error
FROM
workspace_builds
LEFT JOIN
provisioner_jobs
ON
provisioner_jobs.id = workspace_builds.job_id
WHERE
workspace_builds.workspace_id = workspaces.id
ORDER BY
build_number DESC
LIMIT
1
) latest_build ON TRUE
), pending_workspaces AS (
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
started_at IS NULL
), building_workspaces AS (
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
started_at IS NOT NULL AND
canceled_at IS NULL AND
updated_at - INTERVAL '30 seconds' < NOW() AND
completed_at IS NULL
), running_workspaces AS (
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
completed_at IS NOT NULL AND
canceled_at IS NULL AND
error IS NULL AND
transition = 'start'::workspace_transition
), failed_workspaces AS (
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
(canceled_at IS NOT NULL AND
error IS NOT NULL) OR
(completed_at IS NOT NULL AND
error IS NOT NULL)
), stopped_workspaces AS (
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
completed_at IS NOT NULL AND
canceled_at IS NULL AND
error IS NULL AND
transition = 'stop'::workspace_transition
)
SELECT
pending_workspaces.count AS pending_workspaces,
building_workspaces.count AS building_workspaces,
running_workspaces.count AS running_workspaces,
failed_workspaces.count AS failed_workspaces,
stopped_workspaces.count AS stopped_workspaces
FROM pending_workspaces, building_workspaces, running_workspaces, failed_workspaces, stopped_workspaces;