mirror of
https://github.com/coder/coder.git
synced 2025-07-03 16:13:58 +00:00
20018 lines
607 KiB
Go
20018 lines
607 KiB
Go
// Code generated by sqlc. DO NOT EDIT.
|
|
// versions:
|
|
// sqlc v1.27.0
|
|
|
|
package database
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"encoding/json"
|
|
"time"
|
|
|
|
"github.com/google/uuid"
|
|
"github.com/lib/pq"
|
|
"github.com/sqlc-dev/pqtype"
|
|
)
|
|
|
|
const activityBumpWorkspace = `-- name: ActivityBumpWorkspace :exec
|
|
WITH latest AS (
|
|
SELECT
|
|
workspace_builds.id::uuid AS build_id,
|
|
workspace_builds.deadline::timestamp with time zone AS build_deadline,
|
|
workspace_builds.max_deadline::timestamp with time zone AS build_max_deadline,
|
|
workspace_builds.transition AS build_transition,
|
|
provisioner_jobs.completed_at::timestamp with time zone AS job_completed_at,
|
|
templates.activity_bump AS activity_bump,
|
|
(
|
|
CASE
|
|
-- If the extension would push us over the next_autostart
|
|
-- interval, then extend the deadline by the full TTL (NOT
|
|
-- activity bump) from the autostart time. This will essentially
|
|
-- be as if the workspace auto started at the given time and the
|
|
-- original TTL was applied.
|
|
--
|
|
-- Sadly we can't define ` + "`" + `activity_bump_interval` + "`" + ` above since
|
|
-- it won't be available for this CASE statement, so we have to
|
|
-- copy the cast twice.
|
|
WHEN NOW() + (templates.activity_bump / 1000 / 1000 / 1000 || ' seconds')::interval > $1 :: timestamptz
|
|
-- If the autostart is behind now(), then the
|
|
-- autostart schedule is either the 0 time and not provided,
|
|
-- or it was the autostart in the past, which is no longer
|
|
-- relevant. If autostart is > 0 and in the past, then
|
|
-- that is a mistake by the caller.
|
|
AND $1 > NOW()
|
|
THEN
|
|
-- Extend to the autostart, then add the activity bump
|
|
(($1 :: timestamptz) - NOW()) + CASE
|
|
WHEN templates.allow_user_autostop
|
|
THEN (workspaces.ttl / 1000 / 1000 / 1000 || ' seconds')::interval
|
|
ELSE (templates.default_ttl / 1000 / 1000 / 1000 || ' seconds')::interval
|
|
END
|
|
|
|
-- Default to the activity bump duration.
|
|
ELSE
|
|
(templates.activity_bump / 1000 / 1000 / 1000 || ' seconds')::interval
|
|
END
|
|
) AS ttl_interval
|
|
FROM workspace_builds
|
|
JOIN provisioner_jobs
|
|
ON provisioner_jobs.id = workspace_builds.job_id
|
|
JOIN workspaces
|
|
ON workspaces.id = workspace_builds.workspace_id
|
|
JOIN templates
|
|
ON templates.id = workspaces.template_id
|
|
WHERE workspace_builds.workspace_id = $2::uuid
|
|
ORDER BY workspace_builds.build_number DESC
|
|
LIMIT 1
|
|
)
|
|
UPDATE
|
|
workspace_builds wb
|
|
SET
|
|
updated_at = NOW(),
|
|
deadline = CASE
|
|
WHEN l.build_max_deadline = '0001-01-01 00:00:00+00'
|
|
-- Never reduce the deadline from activity.
|
|
THEN GREATEST(wb.deadline, NOW() + l.ttl_interval)
|
|
ELSE LEAST(GREATEST(wb.deadline, NOW() + l.ttl_interval), l.build_max_deadline)
|
|
END
|
|
FROM latest l
|
|
WHERE wb.id = l.build_id
|
|
AND l.job_completed_at IS NOT NULL
|
|
AND l.activity_bump > 0
|
|
AND l.build_transition = 'start'
|
|
AND l.ttl_interval > '0 seconds'::interval
|
|
AND l.build_deadline != '0001-01-01 00:00:00+00'
|
|
AND l.build_deadline - (l.ttl_interval * 0.95) < NOW()
|
|
`
|
|
|
|
type ActivityBumpWorkspaceParams struct {
|
|
NextAutostart time.Time `db:"next_autostart" json:"next_autostart"`
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
}
|
|
|
|
// Bumps the workspace deadline by the template's configured "activity_bump"
|
|
// duration (default 1h). If the workspace bump will cross an autostart
|
|
// threshold, then the bump is autostart + TTL. This is the deadline behavior if
|
|
// the workspace was to autostart from a stopped state.
|
|
//
|
|
// Max deadline is respected, and the deadline will never be bumped past it.
|
|
// The deadline will never decrease.
|
|
// We only bump if the template has an activity bump duration set.
|
|
// We only bump if the raw interval is positive and non-zero.
|
|
// We only bump if workspace shutdown is manual.
|
|
// We only bump when 5% of the deadline has elapsed.
|
|
func (q *sqlQuerier) ActivityBumpWorkspace(ctx context.Context, arg ActivityBumpWorkspaceParams) error {
|
|
_, err := q.db.ExecContext(ctx, activityBumpWorkspace, arg.NextAutostart, arg.WorkspaceID)
|
|
return err
|
|
}
|
|
|
|
const deleteAPIKeyByID = `-- name: DeleteAPIKeyByID :exec
|
|
DELETE FROM
|
|
api_keys
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteAPIKeyByID(ctx context.Context, id string) error {
|
|
_, err := q.db.ExecContext(ctx, deleteAPIKeyByID, id)
|
|
return err
|
|
}
|
|
|
|
const deleteAPIKeysByUserID = `-- name: DeleteAPIKeysByUserID :exec
|
|
DELETE FROM
|
|
api_keys
|
|
WHERE
|
|
user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteAPIKeysByUserID(ctx context.Context, userID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteAPIKeysByUserID, userID)
|
|
return err
|
|
}
|
|
|
|
const deleteApplicationConnectAPIKeysByUserID = `-- name: DeleteApplicationConnectAPIKeysByUserID :exec
|
|
DELETE FROM
|
|
api_keys
|
|
WHERE
|
|
user_id = $1 AND
|
|
scope = 'application_connect'::api_key_scope
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteApplicationConnectAPIKeysByUserID(ctx context.Context, userID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteApplicationConnectAPIKeysByUserID, userID)
|
|
return err
|
|
}
|
|
|
|
const getAPIKeyByID = `-- name: GetAPIKeyByID :one
|
|
SELECT
|
|
id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name
|
|
FROM
|
|
api_keys
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAPIKeyByID(ctx context.Context, id string) (APIKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getAPIKeyByID, id)
|
|
var i APIKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getAPIKeyByName = `-- name: GetAPIKeyByName :one
|
|
SELECT
|
|
id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name
|
|
FROM
|
|
api_keys
|
|
WHERE
|
|
user_id = $1 AND
|
|
token_name = $2 AND
|
|
token_name != ''
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetAPIKeyByNameParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
TokenName string `db:"token_name" json:"token_name"`
|
|
}
|
|
|
|
// there is no unique constraint on empty token names
|
|
func (q *sqlQuerier) GetAPIKeyByName(ctx context.Context, arg GetAPIKeyByNameParams) (APIKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getAPIKeyByName, arg.UserID, arg.TokenName)
|
|
var i APIKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getAPIKeysByLoginType = `-- name: GetAPIKeysByLoginType :many
|
|
SELECT id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name FROM api_keys WHERE login_type = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAPIKeysByLoginType(ctx context.Context, loginType LoginType) ([]APIKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAPIKeysByLoginType, loginType)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []APIKey
|
|
for rows.Next() {
|
|
var i APIKey
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getAPIKeysByUserID = `-- name: GetAPIKeysByUserID :many
|
|
SELECT id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name FROM api_keys WHERE login_type = $1 AND user_id = $2
|
|
`
|
|
|
|
type GetAPIKeysByUserIDParams struct {
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetAPIKeysByUserID(ctx context.Context, arg GetAPIKeysByUserIDParams) ([]APIKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAPIKeysByUserID, arg.LoginType, arg.UserID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []APIKey
|
|
for rows.Next() {
|
|
var i APIKey
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getAPIKeysLastUsedAfter = `-- name: GetAPIKeysLastUsedAfter :many
|
|
SELECT id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name FROM api_keys WHERE last_used > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAPIKeysLastUsedAfter(ctx context.Context, lastUsed time.Time) ([]APIKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAPIKeysLastUsedAfter, lastUsed)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []APIKey
|
|
for rows.Next() {
|
|
var i APIKey
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertAPIKey = `-- name: InsertAPIKey :one
|
|
INSERT INTO
|
|
api_keys (
|
|
id,
|
|
lifetime_seconds,
|
|
hashed_secret,
|
|
ip_address,
|
|
user_id,
|
|
last_used,
|
|
expires_at,
|
|
created_at,
|
|
updated_at,
|
|
login_type,
|
|
scope,
|
|
token_name
|
|
)
|
|
VALUES
|
|
($1,
|
|
-- If the lifetime is set to 0, default to 24hrs
|
|
CASE $2::bigint
|
|
WHEN 0 THEN 86400
|
|
ELSE $2::bigint
|
|
END
|
|
, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name
|
|
`
|
|
|
|
type InsertAPIKeyParams struct {
|
|
ID string `db:"id" json:"id"`
|
|
LifetimeSeconds int64 `db:"lifetime_seconds" json:"lifetime_seconds"`
|
|
HashedSecret []byte `db:"hashed_secret" json:"hashed_secret"`
|
|
IPAddress pqtype.Inet `db:"ip_address" json:"ip_address"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
LastUsed time.Time `db:"last_used" json:"last_used"`
|
|
ExpiresAt time.Time `db:"expires_at" json:"expires_at"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
Scope APIKeyScope `db:"scope" json:"scope"`
|
|
TokenName string `db:"token_name" json:"token_name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertAPIKey(ctx context.Context, arg InsertAPIKeyParams) (APIKey, error) {
|
|
row := q.db.QueryRowContext(ctx, insertAPIKey,
|
|
arg.ID,
|
|
arg.LifetimeSeconds,
|
|
arg.HashedSecret,
|
|
arg.IPAddress,
|
|
arg.UserID,
|
|
arg.LastUsed,
|
|
arg.ExpiresAt,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.LoginType,
|
|
arg.Scope,
|
|
arg.TokenName,
|
|
)
|
|
var i APIKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateAPIKeyByID = `-- name: UpdateAPIKeyByID :exec
|
|
UPDATE
|
|
api_keys
|
|
SET
|
|
last_used = $2,
|
|
expires_at = $3,
|
|
ip_address = $4
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateAPIKeyByIDParams struct {
|
|
ID string `db:"id" json:"id"`
|
|
LastUsed time.Time `db:"last_used" json:"last_used"`
|
|
ExpiresAt time.Time `db:"expires_at" json:"expires_at"`
|
|
IPAddress pqtype.Inet `db:"ip_address" json:"ip_address"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateAPIKeyByID(ctx context.Context, arg UpdateAPIKeyByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateAPIKeyByID,
|
|
arg.ID,
|
|
arg.LastUsed,
|
|
arg.ExpiresAt,
|
|
arg.IPAddress,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const getAuditLogsOffset = `-- name: GetAuditLogsOffset :many
|
|
SELECT
|
|
audit_logs.id, audit_logs.time, audit_logs.user_id, audit_logs.organization_id, audit_logs.ip, audit_logs.user_agent, audit_logs.resource_type, audit_logs.resource_id, audit_logs.resource_target, audit_logs.action, audit_logs.diff, audit_logs.status_code, audit_logs.additional_fields, audit_logs.request_id, audit_logs.resource_icon,
|
|
-- sqlc.embed(users) would be nice but it does not seem to play well with
|
|
-- left joins.
|
|
users.username AS user_username,
|
|
users.name AS user_name,
|
|
users.email AS user_email,
|
|
users.created_at AS user_created_at,
|
|
users.updated_at AS user_updated_at,
|
|
users.last_seen_at AS user_last_seen_at,
|
|
users.status AS user_status,
|
|
users.login_type AS user_login_type,
|
|
users.rbac_roles AS user_roles,
|
|
users.avatar_url AS user_avatar_url,
|
|
users.deleted AS user_deleted,
|
|
users.quiet_hours_schedule AS user_quiet_hours_schedule,
|
|
COALESCE(organizations.name, '') AS organization_name,
|
|
COALESCE(organizations.display_name, '') AS organization_display_name,
|
|
COALESCE(organizations.icon, '') AS organization_icon,
|
|
COUNT(audit_logs.*) OVER () AS count
|
|
FROM
|
|
audit_logs
|
|
LEFT JOIN users ON audit_logs.user_id = users.id
|
|
LEFT JOIN
|
|
-- First join on workspaces to get the initial workspace create
|
|
-- to workspace build 1 id. This is because the first create is
|
|
-- is a different audit log than subsequent starts.
|
|
workspaces ON
|
|
audit_logs.resource_type = 'workspace' AND
|
|
audit_logs.resource_id = workspaces.id
|
|
LEFT JOIN
|
|
workspace_builds ON
|
|
-- Get the reason from the build if the resource type
|
|
-- is a workspace_build
|
|
(
|
|
audit_logs.resource_type = 'workspace_build'
|
|
AND audit_logs.resource_id = workspace_builds.id
|
|
)
|
|
OR
|
|
-- Get the reason from the build #1 if this is the first
|
|
-- workspace create.
|
|
(
|
|
audit_logs.resource_type = 'workspace' AND
|
|
audit_logs.action = 'create' AND
|
|
workspaces.id = workspace_builds.workspace_id AND
|
|
workspace_builds.build_number = 1
|
|
)
|
|
LEFT JOIN organizations ON audit_logs.organization_id = organizations.id
|
|
WHERE
|
|
-- Filter resource_type
|
|
CASE
|
|
WHEN $1 :: text != '' THEN
|
|
resource_type = $1 :: resource_type
|
|
ELSE true
|
|
END
|
|
-- Filter resource_id
|
|
AND CASE
|
|
WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
resource_id = $2
|
|
ELSE true
|
|
END
|
|
-- Filter organization_id
|
|
AND CASE
|
|
WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
audit_logs.organization_id = $3
|
|
ELSE true
|
|
END
|
|
-- Filter by resource_target
|
|
AND CASE
|
|
WHEN $4 :: text != '' THEN
|
|
resource_target = $4
|
|
ELSE true
|
|
END
|
|
-- Filter action
|
|
AND CASE
|
|
WHEN $5 :: text != '' THEN
|
|
action = $5 :: audit_action
|
|
ELSE true
|
|
END
|
|
-- Filter by user_id
|
|
AND CASE
|
|
WHEN $6 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
user_id = $6
|
|
ELSE true
|
|
END
|
|
-- Filter by username
|
|
AND CASE
|
|
WHEN $7 :: text != '' THEN
|
|
user_id = (SELECT id FROM users WHERE lower(username) = lower($7) AND deleted = false)
|
|
ELSE true
|
|
END
|
|
-- Filter by user_email
|
|
AND CASE
|
|
WHEN $8 :: text != '' THEN
|
|
users.email = $8
|
|
ELSE true
|
|
END
|
|
-- Filter by date_from
|
|
AND CASE
|
|
WHEN $9 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
|
"time" >= $9
|
|
ELSE true
|
|
END
|
|
-- Filter by date_to
|
|
AND CASE
|
|
WHEN $10 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
|
"time" <= $10
|
|
ELSE true
|
|
END
|
|
-- Filter by build_reason
|
|
AND CASE
|
|
WHEN $11::text != '' THEN
|
|
workspace_builds.reason::text = $11
|
|
ELSE true
|
|
END
|
|
-- Filter request_id
|
|
AND CASE
|
|
WHEN $12 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
audit_logs.request_id = $12
|
|
ELSE true
|
|
END
|
|
|
|
-- Authorize Filter clause will be injected below in GetAuthorizedAuditLogsOffset
|
|
-- @authorize_filter
|
|
ORDER BY
|
|
"time" DESC
|
|
LIMIT
|
|
-- a limit of 0 means "no limit". The audit log table is unbounded
|
|
-- in size, and is expected to be quite large. Implement a default
|
|
-- limit of 100 to prevent accidental excessively large queries.
|
|
COALESCE(NULLIF($14 :: int, 0), 100)
|
|
OFFSET
|
|
$13
|
|
`
|
|
|
|
type GetAuditLogsOffsetParams struct {
|
|
ResourceType string `db:"resource_type" json:"resource_type"`
|
|
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
ResourceTarget string `db:"resource_target" json:"resource_target"`
|
|
Action string `db:"action" json:"action"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Username string `db:"username" json:"username"`
|
|
Email string `db:"email" json:"email"`
|
|
DateFrom time.Time `db:"date_from" json:"date_from"`
|
|
DateTo time.Time `db:"date_to" json:"date_to"`
|
|
BuildReason string `db:"build_reason" json:"build_reason"`
|
|
RequestID uuid.UUID `db:"request_id" json:"request_id"`
|
|
OffsetOpt int32 `db:"offset_opt" json:"offset_opt"`
|
|
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
|
}
|
|
|
|
type GetAuditLogsOffsetRow struct {
|
|
AuditLog AuditLog `db:"audit_log" json:"audit_log"`
|
|
UserUsername sql.NullString `db:"user_username" json:"user_username"`
|
|
UserName sql.NullString `db:"user_name" json:"user_name"`
|
|
UserEmail sql.NullString `db:"user_email" json:"user_email"`
|
|
UserCreatedAt sql.NullTime `db:"user_created_at" json:"user_created_at"`
|
|
UserUpdatedAt sql.NullTime `db:"user_updated_at" json:"user_updated_at"`
|
|
UserLastSeenAt sql.NullTime `db:"user_last_seen_at" json:"user_last_seen_at"`
|
|
UserStatus NullUserStatus `db:"user_status" json:"user_status"`
|
|
UserLoginType NullLoginType `db:"user_login_type" json:"user_login_type"`
|
|
UserRoles pq.StringArray `db:"user_roles" json:"user_roles"`
|
|
UserAvatarUrl sql.NullString `db:"user_avatar_url" json:"user_avatar_url"`
|
|
UserDeleted sql.NullBool `db:"user_deleted" json:"user_deleted"`
|
|
UserQuietHoursSchedule sql.NullString `db:"user_quiet_hours_schedule" json:"user_quiet_hours_schedule"`
|
|
OrganizationName string `db:"organization_name" json:"organization_name"`
|
|
OrganizationDisplayName string `db:"organization_display_name" json:"organization_display_name"`
|
|
OrganizationIcon string `db:"organization_icon" json:"organization_icon"`
|
|
Count int64 `db:"count" json:"count"`
|
|
}
|
|
|
|
// GetAuditLogsBefore retrieves `row_limit` number of audit logs before the provided
|
|
// ID.
|
|
func (q *sqlQuerier) GetAuditLogsOffset(ctx context.Context, arg GetAuditLogsOffsetParams) ([]GetAuditLogsOffsetRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAuditLogsOffset,
|
|
arg.ResourceType,
|
|
arg.ResourceID,
|
|
arg.OrganizationID,
|
|
arg.ResourceTarget,
|
|
arg.Action,
|
|
arg.UserID,
|
|
arg.Username,
|
|
arg.Email,
|
|
arg.DateFrom,
|
|
arg.DateTo,
|
|
arg.BuildReason,
|
|
arg.RequestID,
|
|
arg.OffsetOpt,
|
|
arg.LimitOpt,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetAuditLogsOffsetRow
|
|
for rows.Next() {
|
|
var i GetAuditLogsOffsetRow
|
|
if err := rows.Scan(
|
|
&i.AuditLog.ID,
|
|
&i.AuditLog.Time,
|
|
&i.AuditLog.UserID,
|
|
&i.AuditLog.OrganizationID,
|
|
&i.AuditLog.Ip,
|
|
&i.AuditLog.UserAgent,
|
|
&i.AuditLog.ResourceType,
|
|
&i.AuditLog.ResourceID,
|
|
&i.AuditLog.ResourceTarget,
|
|
&i.AuditLog.Action,
|
|
&i.AuditLog.Diff,
|
|
&i.AuditLog.StatusCode,
|
|
&i.AuditLog.AdditionalFields,
|
|
&i.AuditLog.RequestID,
|
|
&i.AuditLog.ResourceIcon,
|
|
&i.UserUsername,
|
|
&i.UserName,
|
|
&i.UserEmail,
|
|
&i.UserCreatedAt,
|
|
&i.UserUpdatedAt,
|
|
&i.UserLastSeenAt,
|
|
&i.UserStatus,
|
|
&i.UserLoginType,
|
|
&i.UserRoles,
|
|
&i.UserAvatarUrl,
|
|
&i.UserDeleted,
|
|
&i.UserQuietHoursSchedule,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
&i.Count,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertAuditLog = `-- name: InsertAuditLog :one
|
|
INSERT INTO
|
|
audit_logs (
|
|
id,
|
|
"time",
|
|
user_id,
|
|
organization_id,
|
|
ip,
|
|
user_agent,
|
|
resource_type,
|
|
resource_id,
|
|
resource_target,
|
|
action,
|
|
diff,
|
|
status_code,
|
|
additional_fields,
|
|
request_id,
|
|
resource_icon
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) RETURNING id, time, user_id, organization_id, ip, user_agent, resource_type, resource_id, resource_target, action, diff, status_code, additional_fields, request_id, resource_icon
|
|
`
|
|
|
|
type InsertAuditLogParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Time time.Time `db:"time" json:"time"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Ip pqtype.Inet `db:"ip" json:"ip"`
|
|
UserAgent sql.NullString `db:"user_agent" json:"user_agent"`
|
|
ResourceType ResourceType `db:"resource_type" json:"resource_type"`
|
|
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
|
|
ResourceTarget string `db:"resource_target" json:"resource_target"`
|
|
Action AuditAction `db:"action" json:"action"`
|
|
Diff json.RawMessage `db:"diff" json:"diff"`
|
|
StatusCode int32 `db:"status_code" json:"status_code"`
|
|
AdditionalFields json.RawMessage `db:"additional_fields" json:"additional_fields"`
|
|
RequestID uuid.UUID `db:"request_id" json:"request_id"`
|
|
ResourceIcon string `db:"resource_icon" json:"resource_icon"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertAuditLog(ctx context.Context, arg InsertAuditLogParams) (AuditLog, error) {
|
|
row := q.db.QueryRowContext(ctx, insertAuditLog,
|
|
arg.ID,
|
|
arg.Time,
|
|
arg.UserID,
|
|
arg.OrganizationID,
|
|
arg.Ip,
|
|
arg.UserAgent,
|
|
arg.ResourceType,
|
|
arg.ResourceID,
|
|
arg.ResourceTarget,
|
|
arg.Action,
|
|
arg.Diff,
|
|
arg.StatusCode,
|
|
arg.AdditionalFields,
|
|
arg.RequestID,
|
|
arg.ResourceIcon,
|
|
)
|
|
var i AuditLog
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Time,
|
|
&i.UserID,
|
|
&i.OrganizationID,
|
|
&i.Ip,
|
|
&i.UserAgent,
|
|
&i.ResourceType,
|
|
&i.ResourceID,
|
|
&i.ResourceTarget,
|
|
&i.Action,
|
|
&i.Diff,
|
|
&i.StatusCode,
|
|
&i.AdditionalFields,
|
|
&i.RequestID,
|
|
&i.ResourceIcon,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const deleteChat = `-- name: DeleteChat :exec
|
|
DELETE FROM chats WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteChat(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteChat, id)
|
|
return err
|
|
}
|
|
|
|
const getChatByID = `-- name: GetChatByID :one
|
|
SELECT id, owner_id, created_at, updated_at, title FROM chats
|
|
WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetChatByID(ctx context.Context, id uuid.UUID) (Chat, error) {
|
|
row := q.db.QueryRowContext(ctx, getChatByID, id)
|
|
var i Chat
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.OwnerID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Title,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getChatMessagesByChatID = `-- name: GetChatMessagesByChatID :many
|
|
SELECT id, chat_id, created_at, model, provider, content FROM chat_messages
|
|
WHERE chat_id = $1
|
|
ORDER BY created_at ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetChatMessagesByChatID(ctx context.Context, chatID uuid.UUID) ([]ChatMessage, error) {
|
|
rows, err := q.db.QueryContext(ctx, getChatMessagesByChatID, chatID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ChatMessage
|
|
for rows.Next() {
|
|
var i ChatMessage
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.ChatID,
|
|
&i.CreatedAt,
|
|
&i.Model,
|
|
&i.Provider,
|
|
&i.Content,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getChatsByOwnerID = `-- name: GetChatsByOwnerID :many
|
|
SELECT id, owner_id, created_at, updated_at, title FROM chats
|
|
WHERE owner_id = $1
|
|
ORDER BY created_at DESC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetChatsByOwnerID(ctx context.Context, ownerID uuid.UUID) ([]Chat, error) {
|
|
rows, err := q.db.QueryContext(ctx, getChatsByOwnerID, ownerID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Chat
|
|
for rows.Next() {
|
|
var i Chat
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.OwnerID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Title,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertChat = `-- name: InsertChat :one
|
|
INSERT INTO chats (owner_id, created_at, updated_at, title)
|
|
VALUES ($1, $2, $3, $4)
|
|
RETURNING id, owner_id, created_at, updated_at, title
|
|
`
|
|
|
|
type InsertChatParams struct {
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Title string `db:"title" json:"title"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertChat(ctx context.Context, arg InsertChatParams) (Chat, error) {
|
|
row := q.db.QueryRowContext(ctx, insertChat,
|
|
arg.OwnerID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Title,
|
|
)
|
|
var i Chat
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.OwnerID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Title,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertChatMessages = `-- name: InsertChatMessages :many
|
|
INSERT INTO chat_messages (chat_id, created_at, model, provider, content)
|
|
SELECT
|
|
$1 :: uuid AS chat_id,
|
|
$2 :: timestamptz AS created_at,
|
|
$3 :: VARCHAR(127) AS model,
|
|
$4 :: VARCHAR(127) AS provider,
|
|
jsonb_array_elements($5 :: jsonb) AS content
|
|
RETURNING chat_messages.id, chat_messages.chat_id, chat_messages.created_at, chat_messages.model, chat_messages.provider, chat_messages.content
|
|
`
|
|
|
|
type InsertChatMessagesParams struct {
|
|
ChatID uuid.UUID `db:"chat_id" json:"chat_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
Model string `db:"model" json:"model"`
|
|
Provider string `db:"provider" json:"provider"`
|
|
Content json.RawMessage `db:"content" json:"content"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertChatMessages(ctx context.Context, arg InsertChatMessagesParams) ([]ChatMessage, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertChatMessages,
|
|
arg.ChatID,
|
|
arg.CreatedAt,
|
|
arg.Model,
|
|
arg.Provider,
|
|
arg.Content,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ChatMessage
|
|
for rows.Next() {
|
|
var i ChatMessage
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.ChatID,
|
|
&i.CreatedAt,
|
|
&i.Model,
|
|
&i.Provider,
|
|
&i.Content,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateChatByID = `-- name: UpdateChatByID :exec
|
|
UPDATE chats
|
|
SET title = $2, updated_at = $3
|
|
WHERE id = $1
|
|
`
|
|
|
|
type UpdateChatByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Title string `db:"title" json:"title"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateChatByID(ctx context.Context, arg UpdateChatByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateChatByID, arg.ID, arg.Title, arg.UpdatedAt)
|
|
return err
|
|
}
|
|
|
|
const deleteCryptoKey = `-- name: DeleteCryptoKey :one
|
|
UPDATE crypto_keys
|
|
SET secret = NULL, secret_key_id = NULL
|
|
WHERE feature = $1 AND sequence = $2 RETURNING feature, sequence, secret, secret_key_id, starts_at, deletes_at
|
|
`
|
|
|
|
type DeleteCryptoKeyParams struct {
|
|
Feature CryptoKeyFeature `db:"feature" json:"feature"`
|
|
Sequence int32 `db:"sequence" json:"sequence"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteCryptoKey(ctx context.Context, arg DeleteCryptoKeyParams) (CryptoKey, error) {
|
|
row := q.db.QueryRowContext(ctx, deleteCryptoKey, arg.Feature, arg.Sequence)
|
|
var i CryptoKey
|
|
err := row.Scan(
|
|
&i.Feature,
|
|
&i.Sequence,
|
|
&i.Secret,
|
|
&i.SecretKeyID,
|
|
&i.StartsAt,
|
|
&i.DeletesAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getCryptoKeyByFeatureAndSequence = `-- name: GetCryptoKeyByFeatureAndSequence :one
|
|
SELECT feature, sequence, secret, secret_key_id, starts_at, deletes_at
|
|
FROM crypto_keys
|
|
WHERE feature = $1
|
|
AND sequence = $2
|
|
AND secret IS NOT NULL
|
|
`
|
|
|
|
type GetCryptoKeyByFeatureAndSequenceParams struct {
|
|
Feature CryptoKeyFeature `db:"feature" json:"feature"`
|
|
Sequence int32 `db:"sequence" json:"sequence"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetCryptoKeyByFeatureAndSequence(ctx context.Context, arg GetCryptoKeyByFeatureAndSequenceParams) (CryptoKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getCryptoKeyByFeatureAndSequence, arg.Feature, arg.Sequence)
|
|
var i CryptoKey
|
|
err := row.Scan(
|
|
&i.Feature,
|
|
&i.Sequence,
|
|
&i.Secret,
|
|
&i.SecretKeyID,
|
|
&i.StartsAt,
|
|
&i.DeletesAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getCryptoKeys = `-- name: GetCryptoKeys :many
|
|
SELECT feature, sequence, secret, secret_key_id, starts_at, deletes_at
|
|
FROM crypto_keys
|
|
WHERE secret IS NOT NULL
|
|
`
|
|
|
|
func (q *sqlQuerier) GetCryptoKeys(ctx context.Context) ([]CryptoKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, getCryptoKeys)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []CryptoKey
|
|
for rows.Next() {
|
|
var i CryptoKey
|
|
if err := rows.Scan(
|
|
&i.Feature,
|
|
&i.Sequence,
|
|
&i.Secret,
|
|
&i.SecretKeyID,
|
|
&i.StartsAt,
|
|
&i.DeletesAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getCryptoKeysByFeature = `-- name: GetCryptoKeysByFeature :many
|
|
SELECT feature, sequence, secret, secret_key_id, starts_at, deletes_at
|
|
FROM crypto_keys
|
|
WHERE feature = $1
|
|
AND secret IS NOT NULL
|
|
ORDER BY sequence DESC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetCryptoKeysByFeature(ctx context.Context, feature CryptoKeyFeature) ([]CryptoKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, getCryptoKeysByFeature, feature)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []CryptoKey
|
|
for rows.Next() {
|
|
var i CryptoKey
|
|
if err := rows.Scan(
|
|
&i.Feature,
|
|
&i.Sequence,
|
|
&i.Secret,
|
|
&i.SecretKeyID,
|
|
&i.StartsAt,
|
|
&i.DeletesAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getLatestCryptoKeyByFeature = `-- name: GetLatestCryptoKeyByFeature :one
|
|
SELECT feature, sequence, secret, secret_key_id, starts_at, deletes_at
|
|
FROM crypto_keys
|
|
WHERE feature = $1
|
|
ORDER BY sequence DESC
|
|
LIMIT 1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLatestCryptoKeyByFeature(ctx context.Context, feature CryptoKeyFeature) (CryptoKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getLatestCryptoKeyByFeature, feature)
|
|
var i CryptoKey
|
|
err := row.Scan(
|
|
&i.Feature,
|
|
&i.Sequence,
|
|
&i.Secret,
|
|
&i.SecretKeyID,
|
|
&i.StartsAt,
|
|
&i.DeletesAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertCryptoKey = `-- name: InsertCryptoKey :one
|
|
INSERT INTO crypto_keys (
|
|
feature,
|
|
sequence,
|
|
secret,
|
|
starts_at,
|
|
secret_key_id
|
|
) VALUES (
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5
|
|
) RETURNING feature, sequence, secret, secret_key_id, starts_at, deletes_at
|
|
`
|
|
|
|
type InsertCryptoKeyParams struct {
|
|
Feature CryptoKeyFeature `db:"feature" json:"feature"`
|
|
Sequence int32 `db:"sequence" json:"sequence"`
|
|
Secret sql.NullString `db:"secret" json:"secret"`
|
|
StartsAt time.Time `db:"starts_at" json:"starts_at"`
|
|
SecretKeyID sql.NullString `db:"secret_key_id" json:"secret_key_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertCryptoKey(ctx context.Context, arg InsertCryptoKeyParams) (CryptoKey, error) {
|
|
row := q.db.QueryRowContext(ctx, insertCryptoKey,
|
|
arg.Feature,
|
|
arg.Sequence,
|
|
arg.Secret,
|
|
arg.StartsAt,
|
|
arg.SecretKeyID,
|
|
)
|
|
var i CryptoKey
|
|
err := row.Scan(
|
|
&i.Feature,
|
|
&i.Sequence,
|
|
&i.Secret,
|
|
&i.SecretKeyID,
|
|
&i.StartsAt,
|
|
&i.DeletesAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateCryptoKeyDeletesAt = `-- name: UpdateCryptoKeyDeletesAt :one
|
|
UPDATE crypto_keys
|
|
SET deletes_at = $3
|
|
WHERE feature = $1 AND sequence = $2 RETURNING feature, sequence, secret, secret_key_id, starts_at, deletes_at
|
|
`
|
|
|
|
type UpdateCryptoKeyDeletesAtParams struct {
|
|
Feature CryptoKeyFeature `db:"feature" json:"feature"`
|
|
Sequence int32 `db:"sequence" json:"sequence"`
|
|
DeletesAt sql.NullTime `db:"deletes_at" json:"deletes_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateCryptoKeyDeletesAt(ctx context.Context, arg UpdateCryptoKeyDeletesAtParams) (CryptoKey, error) {
|
|
row := q.db.QueryRowContext(ctx, updateCryptoKeyDeletesAt, arg.Feature, arg.Sequence, arg.DeletesAt)
|
|
var i CryptoKey
|
|
err := row.Scan(
|
|
&i.Feature,
|
|
&i.Sequence,
|
|
&i.Secret,
|
|
&i.SecretKeyID,
|
|
&i.StartsAt,
|
|
&i.DeletesAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getDBCryptKeys = `-- name: GetDBCryptKeys :many
|
|
SELECT number, active_key_digest, revoked_key_digest, created_at, revoked_at, test FROM dbcrypt_keys ORDER BY number ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetDBCryptKeys(ctx context.Context) ([]DBCryptKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, getDBCryptKeys)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []DBCryptKey
|
|
for rows.Next() {
|
|
var i DBCryptKey
|
|
if err := rows.Scan(
|
|
&i.Number,
|
|
&i.ActiveKeyDigest,
|
|
&i.RevokedKeyDigest,
|
|
&i.CreatedAt,
|
|
&i.RevokedAt,
|
|
&i.Test,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertDBCryptKey = `-- name: InsertDBCryptKey :exec
|
|
INSERT INTO dbcrypt_keys
|
|
(number, active_key_digest, created_at, test)
|
|
VALUES ($1::int, $2::text, CURRENT_TIMESTAMP, $3::text)
|
|
`
|
|
|
|
type InsertDBCryptKeyParams struct {
|
|
Number int32 `db:"number" json:"number"`
|
|
ActiveKeyDigest string `db:"active_key_digest" json:"active_key_digest"`
|
|
Test string `db:"test" json:"test"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertDBCryptKey(ctx context.Context, arg InsertDBCryptKeyParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertDBCryptKey, arg.Number, arg.ActiveKeyDigest, arg.Test)
|
|
return err
|
|
}
|
|
|
|
const revokeDBCryptKey = `-- name: RevokeDBCryptKey :exec
|
|
UPDATE dbcrypt_keys
|
|
SET
|
|
revoked_key_digest = active_key_digest,
|
|
active_key_digest = revoked_key_digest,
|
|
revoked_at = CURRENT_TIMESTAMP
|
|
WHERE
|
|
active_key_digest = $1::text
|
|
AND
|
|
revoked_key_digest IS NULL
|
|
`
|
|
|
|
func (q *sqlQuerier) RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error {
|
|
_, err := q.db.ExecContext(ctx, revokeDBCryptKey, activeKeyDigest)
|
|
return err
|
|
}
|
|
|
|
const deleteExternalAuthLink = `-- name: DeleteExternalAuthLink :exec
|
|
DELETE FROM external_auth_links WHERE provider_id = $1 AND user_id = $2
|
|
`
|
|
|
|
type DeleteExternalAuthLinkParams struct {
|
|
ProviderID string `db:"provider_id" json:"provider_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteExternalAuthLink(ctx context.Context, arg DeleteExternalAuthLinkParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteExternalAuthLink, arg.ProviderID, arg.UserID)
|
|
return err
|
|
}
|
|
|
|
const getExternalAuthLink = `-- name: GetExternalAuthLink :one
|
|
SELECT provider_id, user_id, created_at, updated_at, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, oauth_extra FROM external_auth_links WHERE provider_id = $1 AND user_id = $2
|
|
`
|
|
|
|
type GetExternalAuthLinkParams struct {
|
|
ProviderID string `db:"provider_id" json:"provider_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetExternalAuthLink(ctx context.Context, arg GetExternalAuthLinkParams) (ExternalAuthLink, error) {
|
|
row := q.db.QueryRowContext(ctx, getExternalAuthLink, arg.ProviderID, arg.UserID)
|
|
var i ExternalAuthLink
|
|
err := row.Scan(
|
|
&i.ProviderID,
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.OAuthExtra,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getExternalAuthLinksByUserID = `-- name: GetExternalAuthLinksByUserID :many
|
|
SELECT provider_id, user_id, created_at, updated_at, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, oauth_extra FROM external_auth_links WHERE user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetExternalAuthLinksByUserID(ctx context.Context, userID uuid.UUID) ([]ExternalAuthLink, error) {
|
|
rows, err := q.db.QueryContext(ctx, getExternalAuthLinksByUserID, userID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ExternalAuthLink
|
|
for rows.Next() {
|
|
var i ExternalAuthLink
|
|
if err := rows.Scan(
|
|
&i.ProviderID,
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.OAuthExtra,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertExternalAuthLink = `-- name: InsertExternalAuthLink :one
|
|
INSERT INTO external_auth_links (
|
|
provider_id,
|
|
user_id,
|
|
created_at,
|
|
updated_at,
|
|
oauth_access_token,
|
|
oauth_access_token_key_id,
|
|
oauth_refresh_token,
|
|
oauth_refresh_token_key_id,
|
|
oauth_expiry,
|
|
oauth_extra
|
|
) VALUES (
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7,
|
|
$8,
|
|
$9,
|
|
$10
|
|
) RETURNING provider_id, user_id, created_at, updated_at, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, oauth_extra
|
|
`
|
|
|
|
type InsertExternalAuthLinkParams struct {
|
|
ProviderID string `db:"provider_id" json:"provider_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OAuthAccessToken string `db:"oauth_access_token" json:"oauth_access_token"`
|
|
OAuthAccessTokenKeyID sql.NullString `db:"oauth_access_token_key_id" json:"oauth_access_token_key_id"`
|
|
OAuthRefreshToken string `db:"oauth_refresh_token" json:"oauth_refresh_token"`
|
|
OAuthRefreshTokenKeyID sql.NullString `db:"oauth_refresh_token_key_id" json:"oauth_refresh_token_key_id"`
|
|
OAuthExpiry time.Time `db:"oauth_expiry" json:"oauth_expiry"`
|
|
OAuthExtra pqtype.NullRawMessage `db:"oauth_extra" json:"oauth_extra"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertExternalAuthLink(ctx context.Context, arg InsertExternalAuthLinkParams) (ExternalAuthLink, error) {
|
|
row := q.db.QueryRowContext(ctx, insertExternalAuthLink,
|
|
arg.ProviderID,
|
|
arg.UserID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.OAuthAccessToken,
|
|
arg.OAuthAccessTokenKeyID,
|
|
arg.OAuthRefreshToken,
|
|
arg.OAuthRefreshTokenKeyID,
|
|
arg.OAuthExpiry,
|
|
arg.OAuthExtra,
|
|
)
|
|
var i ExternalAuthLink
|
|
err := row.Scan(
|
|
&i.ProviderID,
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.OAuthExtra,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateExternalAuthLink = `-- name: UpdateExternalAuthLink :one
|
|
UPDATE external_auth_links SET
|
|
updated_at = $3,
|
|
oauth_access_token = $4,
|
|
oauth_access_token_key_id = $5,
|
|
oauth_refresh_token = $6,
|
|
oauth_refresh_token_key_id = $7,
|
|
oauth_expiry = $8,
|
|
oauth_extra = $9
|
|
WHERE provider_id = $1 AND user_id = $2 RETURNING provider_id, user_id, created_at, updated_at, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, oauth_extra
|
|
`
|
|
|
|
type UpdateExternalAuthLinkParams struct {
|
|
ProviderID string `db:"provider_id" json:"provider_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OAuthAccessToken string `db:"oauth_access_token" json:"oauth_access_token"`
|
|
OAuthAccessTokenKeyID sql.NullString `db:"oauth_access_token_key_id" json:"oauth_access_token_key_id"`
|
|
OAuthRefreshToken string `db:"oauth_refresh_token" json:"oauth_refresh_token"`
|
|
OAuthRefreshTokenKeyID sql.NullString `db:"oauth_refresh_token_key_id" json:"oauth_refresh_token_key_id"`
|
|
OAuthExpiry time.Time `db:"oauth_expiry" json:"oauth_expiry"`
|
|
OAuthExtra pqtype.NullRawMessage `db:"oauth_extra" json:"oauth_extra"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateExternalAuthLink(ctx context.Context, arg UpdateExternalAuthLinkParams) (ExternalAuthLink, error) {
|
|
row := q.db.QueryRowContext(ctx, updateExternalAuthLink,
|
|
arg.ProviderID,
|
|
arg.UserID,
|
|
arg.UpdatedAt,
|
|
arg.OAuthAccessToken,
|
|
arg.OAuthAccessTokenKeyID,
|
|
arg.OAuthRefreshToken,
|
|
arg.OAuthRefreshTokenKeyID,
|
|
arg.OAuthExpiry,
|
|
arg.OAuthExtra,
|
|
)
|
|
var i ExternalAuthLink
|
|
err := row.Scan(
|
|
&i.ProviderID,
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.OAuthExtra,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateExternalAuthLinkRefreshToken = `-- name: UpdateExternalAuthLinkRefreshToken :exec
|
|
UPDATE
|
|
external_auth_links
|
|
SET
|
|
oauth_refresh_token = $1,
|
|
updated_at = $2
|
|
WHERE
|
|
provider_id = $3
|
|
AND
|
|
user_id = $4
|
|
AND
|
|
-- Required for sqlc to generate a parameter for the oauth_refresh_token_key_id
|
|
$5 :: text = $5 :: text
|
|
`
|
|
|
|
type UpdateExternalAuthLinkRefreshTokenParams struct {
|
|
OAuthRefreshToken string `db:"oauth_refresh_token" json:"oauth_refresh_token"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
ProviderID string `db:"provider_id" json:"provider_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
OAuthRefreshTokenKeyID string `db:"oauth_refresh_token_key_id" json:"oauth_refresh_token_key_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateExternalAuthLinkRefreshToken(ctx context.Context, arg UpdateExternalAuthLinkRefreshTokenParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateExternalAuthLinkRefreshToken,
|
|
arg.OAuthRefreshToken,
|
|
arg.UpdatedAt,
|
|
arg.ProviderID,
|
|
arg.UserID,
|
|
arg.OAuthRefreshTokenKeyID,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const getFileByHashAndCreator = `-- name: GetFileByHashAndCreator :one
|
|
SELECT
|
|
hash, created_at, created_by, mimetype, data, id
|
|
FROM
|
|
files
|
|
WHERE
|
|
hash = $1
|
|
AND
|
|
created_by = $2
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetFileByHashAndCreatorParams struct {
|
|
Hash string `db:"hash" json:"hash"`
|
|
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetFileByHashAndCreator(ctx context.Context, arg GetFileByHashAndCreatorParams) (File, error) {
|
|
row := q.db.QueryRowContext(ctx, getFileByHashAndCreator, arg.Hash, arg.CreatedBy)
|
|
var i File
|
|
err := row.Scan(
|
|
&i.Hash,
|
|
&i.CreatedAt,
|
|
&i.CreatedBy,
|
|
&i.Mimetype,
|
|
&i.Data,
|
|
&i.ID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getFileByID = `-- name: GetFileByID :one
|
|
SELECT
|
|
hash, created_at, created_by, mimetype, data, id
|
|
FROM
|
|
files
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetFileByID(ctx context.Context, id uuid.UUID) (File, error) {
|
|
row := q.db.QueryRowContext(ctx, getFileByID, id)
|
|
var i File
|
|
err := row.Scan(
|
|
&i.Hash,
|
|
&i.CreatedAt,
|
|
&i.CreatedBy,
|
|
&i.Mimetype,
|
|
&i.Data,
|
|
&i.ID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getFileIDByTemplateVersionID = `-- name: GetFileIDByTemplateVersionID :one
|
|
SELECT
|
|
files.id
|
|
FROM
|
|
files
|
|
JOIN
|
|
provisioner_jobs ON
|
|
provisioner_jobs.storage_method = 'file'
|
|
AND provisioner_jobs.file_id = files.id
|
|
JOIN
|
|
template_versions ON template_versions.job_id = provisioner_jobs.id
|
|
WHERE
|
|
template_versions.id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetFileIDByTemplateVersionID(ctx context.Context, templateVersionID uuid.UUID) (uuid.UUID, error) {
|
|
row := q.db.QueryRowContext(ctx, getFileIDByTemplateVersionID, templateVersionID)
|
|
var id uuid.UUID
|
|
err := row.Scan(&id)
|
|
return id, err
|
|
}
|
|
|
|
const getFileTemplates = `-- name: GetFileTemplates :many
|
|
SELECT
|
|
files.id AS file_id,
|
|
files.created_by AS file_created_by,
|
|
templates.id AS template_id,
|
|
templates.organization_id AS template_organization_id,
|
|
templates.created_by AS template_created_by,
|
|
templates.user_acl,
|
|
templates.group_acl
|
|
FROM
|
|
templates
|
|
INNER JOIN
|
|
template_versions
|
|
ON templates.id = template_versions.template_id
|
|
INNER JOIN
|
|
provisioner_jobs
|
|
ON job_id = provisioner_jobs.id
|
|
INNER JOIN
|
|
files
|
|
ON files.id = provisioner_jobs.file_id
|
|
WHERE
|
|
-- Only fetch template version associated files.
|
|
storage_method = 'file'
|
|
AND provisioner_jobs.type = 'template_version_import'
|
|
AND file_id = $1
|
|
`
|
|
|
|
type GetFileTemplatesRow struct {
|
|
FileID uuid.UUID `db:"file_id" json:"file_id"`
|
|
FileCreatedBy uuid.UUID `db:"file_created_by" json:"file_created_by"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TemplateOrganizationID uuid.UUID `db:"template_organization_id" json:"template_organization_id"`
|
|
TemplateCreatedBy uuid.UUID `db:"template_created_by" json:"template_created_by"`
|
|
UserACL TemplateACL `db:"user_acl" json:"user_acl"`
|
|
GroupACL TemplateACL `db:"group_acl" json:"group_acl"`
|
|
}
|
|
|
|
// Get all templates that use a file.
|
|
func (q *sqlQuerier) GetFileTemplates(ctx context.Context, fileID uuid.UUID) ([]GetFileTemplatesRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getFileTemplates, fileID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetFileTemplatesRow
|
|
for rows.Next() {
|
|
var i GetFileTemplatesRow
|
|
if err := rows.Scan(
|
|
&i.FileID,
|
|
&i.FileCreatedBy,
|
|
&i.TemplateID,
|
|
&i.TemplateOrganizationID,
|
|
&i.TemplateCreatedBy,
|
|
&i.UserACL,
|
|
&i.GroupACL,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertFile = `-- name: InsertFile :one
|
|
INSERT INTO
|
|
files (id, hash, created_at, created_by, mimetype, "data")
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6) RETURNING hash, created_at, created_by, mimetype, data, id
|
|
`
|
|
|
|
type InsertFileParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Hash string `db:"hash" json:"hash"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
|
|
Mimetype string `db:"mimetype" json:"mimetype"`
|
|
Data []byte `db:"data" json:"data"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertFile(ctx context.Context, arg InsertFileParams) (File, error) {
|
|
row := q.db.QueryRowContext(ctx, insertFile,
|
|
arg.ID,
|
|
arg.Hash,
|
|
arg.CreatedAt,
|
|
arg.CreatedBy,
|
|
arg.Mimetype,
|
|
arg.Data,
|
|
)
|
|
var i File
|
|
err := row.Scan(
|
|
&i.Hash,
|
|
&i.CreatedAt,
|
|
&i.CreatedBy,
|
|
&i.Mimetype,
|
|
&i.Data,
|
|
&i.ID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const deleteGitSSHKey = `-- name: DeleteGitSSHKey :exec
|
|
DELETE FROM
|
|
gitsshkeys
|
|
WHERE
|
|
user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteGitSSHKey(ctx context.Context, userID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteGitSSHKey, userID)
|
|
return err
|
|
}
|
|
|
|
const getGitSSHKey = `-- name: GetGitSSHKey :one
|
|
SELECT
|
|
user_id, created_at, updated_at, private_key, public_key
|
|
FROM
|
|
gitsshkeys
|
|
WHERE
|
|
user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetGitSSHKey(ctx context.Context, userID uuid.UUID) (GitSSHKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getGitSSHKey, userID)
|
|
var i GitSSHKey
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.PrivateKey,
|
|
&i.PublicKey,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertGitSSHKey = `-- name: InsertGitSSHKey :one
|
|
INSERT INTO
|
|
gitsshkeys (
|
|
user_id,
|
|
created_at,
|
|
updated_at,
|
|
private_key,
|
|
public_key
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5) RETURNING user_id, created_at, updated_at, private_key, public_key
|
|
`
|
|
|
|
type InsertGitSSHKeyParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
PrivateKey string `db:"private_key" json:"private_key"`
|
|
PublicKey string `db:"public_key" json:"public_key"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertGitSSHKey(ctx context.Context, arg InsertGitSSHKeyParams) (GitSSHKey, error) {
|
|
row := q.db.QueryRowContext(ctx, insertGitSSHKey,
|
|
arg.UserID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.PrivateKey,
|
|
arg.PublicKey,
|
|
)
|
|
var i GitSSHKey
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.PrivateKey,
|
|
&i.PublicKey,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateGitSSHKey = `-- name: UpdateGitSSHKey :one
|
|
UPDATE
|
|
gitsshkeys
|
|
SET
|
|
updated_at = $2,
|
|
private_key = $3,
|
|
public_key = $4
|
|
WHERE
|
|
user_id = $1
|
|
RETURNING
|
|
user_id, created_at, updated_at, private_key, public_key
|
|
`
|
|
|
|
type UpdateGitSSHKeyParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
PrivateKey string `db:"private_key" json:"private_key"`
|
|
PublicKey string `db:"public_key" json:"public_key"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateGitSSHKey(ctx context.Context, arg UpdateGitSSHKeyParams) (GitSSHKey, error) {
|
|
row := q.db.QueryRowContext(ctx, updateGitSSHKey,
|
|
arg.UserID,
|
|
arg.UpdatedAt,
|
|
arg.PrivateKey,
|
|
arg.PublicKey,
|
|
)
|
|
var i GitSSHKey
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.PrivateKey,
|
|
&i.PublicKey,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const deleteGroupMemberFromGroup = `-- name: DeleteGroupMemberFromGroup :exec
|
|
DELETE FROM
|
|
group_members
|
|
WHERE
|
|
user_id = $1 AND
|
|
group_id = $2
|
|
`
|
|
|
|
type DeleteGroupMemberFromGroupParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
GroupID uuid.UUID `db:"group_id" json:"group_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteGroupMemberFromGroup(ctx context.Context, arg DeleteGroupMemberFromGroupParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteGroupMemberFromGroup, arg.UserID, arg.GroupID)
|
|
return err
|
|
}
|
|
|
|
const getGroupMembers = `-- name: GetGroupMembers :many
|
|
SELECT user_id, user_email, user_username, user_hashed_password, user_created_at, user_updated_at, user_status, user_rbac_roles, user_login_type, user_avatar_url, user_deleted, user_last_seen_at, user_quiet_hours_schedule, user_name, user_github_com_user_id, user_is_system, organization_id, group_name, group_id FROM group_members_expanded
|
|
WHERE CASE
|
|
WHEN $1::bool THEN TRUE
|
|
ELSE
|
|
user_is_system = false
|
|
END
|
|
`
|
|
|
|
func (q *sqlQuerier) GetGroupMembers(ctx context.Context, includeSystem bool) ([]GroupMember, error) {
|
|
rows, err := q.db.QueryContext(ctx, getGroupMembers, includeSystem)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GroupMember
|
|
for rows.Next() {
|
|
var i GroupMember
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.UserEmail,
|
|
&i.UserUsername,
|
|
&i.UserHashedPassword,
|
|
&i.UserCreatedAt,
|
|
&i.UserUpdatedAt,
|
|
&i.UserStatus,
|
|
pq.Array(&i.UserRbacRoles),
|
|
&i.UserLoginType,
|
|
&i.UserAvatarUrl,
|
|
&i.UserDeleted,
|
|
&i.UserLastSeenAt,
|
|
&i.UserQuietHoursSchedule,
|
|
&i.UserName,
|
|
&i.UserGithubComUserID,
|
|
&i.UserIsSystem,
|
|
&i.OrganizationID,
|
|
&i.GroupName,
|
|
&i.GroupID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getGroupMembersByGroupID = `-- name: GetGroupMembersByGroupID :many
|
|
SELECT user_id, user_email, user_username, user_hashed_password, user_created_at, user_updated_at, user_status, user_rbac_roles, user_login_type, user_avatar_url, user_deleted, user_last_seen_at, user_quiet_hours_schedule, user_name, user_github_com_user_id, user_is_system, organization_id, group_name, group_id
|
|
FROM group_members_expanded
|
|
WHERE group_id = $1
|
|
-- Filter by system type
|
|
AND CASE
|
|
WHEN $2::bool THEN TRUE
|
|
ELSE
|
|
user_is_system = false
|
|
END
|
|
`
|
|
|
|
type GetGroupMembersByGroupIDParams struct {
|
|
GroupID uuid.UUID `db:"group_id" json:"group_id"`
|
|
IncludeSystem bool `db:"include_system" json:"include_system"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetGroupMembersByGroupID(ctx context.Context, arg GetGroupMembersByGroupIDParams) ([]GroupMember, error) {
|
|
rows, err := q.db.QueryContext(ctx, getGroupMembersByGroupID, arg.GroupID, arg.IncludeSystem)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GroupMember
|
|
for rows.Next() {
|
|
var i GroupMember
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.UserEmail,
|
|
&i.UserUsername,
|
|
&i.UserHashedPassword,
|
|
&i.UserCreatedAt,
|
|
&i.UserUpdatedAt,
|
|
&i.UserStatus,
|
|
pq.Array(&i.UserRbacRoles),
|
|
&i.UserLoginType,
|
|
&i.UserAvatarUrl,
|
|
&i.UserDeleted,
|
|
&i.UserLastSeenAt,
|
|
&i.UserQuietHoursSchedule,
|
|
&i.UserName,
|
|
&i.UserGithubComUserID,
|
|
&i.UserIsSystem,
|
|
&i.OrganizationID,
|
|
&i.GroupName,
|
|
&i.GroupID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getGroupMembersCountByGroupID = `-- name: GetGroupMembersCountByGroupID :one
|
|
SELECT COUNT(*)
|
|
FROM group_members_expanded
|
|
WHERE group_id = $1
|
|
-- Filter by system type
|
|
AND CASE
|
|
WHEN $2::bool THEN TRUE
|
|
ELSE
|
|
user_is_system = false
|
|
END
|
|
`
|
|
|
|
type GetGroupMembersCountByGroupIDParams struct {
|
|
GroupID uuid.UUID `db:"group_id" json:"group_id"`
|
|
IncludeSystem bool `db:"include_system" json:"include_system"`
|
|
}
|
|
|
|
// Returns the total count of members in a group. Shows the total
|
|
// count even if the caller does not have read access to ResourceGroupMember.
|
|
// They only need ResourceGroup read access.
|
|
func (q *sqlQuerier) GetGroupMembersCountByGroupID(ctx context.Context, arg GetGroupMembersCountByGroupIDParams) (int64, error) {
|
|
row := q.db.QueryRowContext(ctx, getGroupMembersCountByGroupID, arg.GroupID, arg.IncludeSystem)
|
|
var count int64
|
|
err := row.Scan(&count)
|
|
return count, err
|
|
}
|
|
|
|
const insertGroupMember = `-- name: InsertGroupMember :exec
|
|
INSERT INTO
|
|
group_members (user_id, group_id)
|
|
VALUES
|
|
($1, $2)
|
|
`
|
|
|
|
type InsertGroupMemberParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
GroupID uuid.UUID `db:"group_id" json:"group_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertGroupMember(ctx context.Context, arg InsertGroupMemberParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertGroupMember, arg.UserID, arg.GroupID)
|
|
return err
|
|
}
|
|
|
|
const insertUserGroupsByID = `-- name: InsertUserGroupsByID :many
|
|
WITH groups AS (
|
|
SELECT
|
|
id
|
|
FROM
|
|
groups
|
|
WHERE
|
|
groups.id = ANY($2 :: uuid [])
|
|
)
|
|
INSERT INTO
|
|
group_members (user_id, group_id)
|
|
SELECT
|
|
$1,
|
|
groups.id
|
|
FROM
|
|
groups
|
|
ON CONFLICT DO NOTHING
|
|
RETURNING group_id
|
|
`
|
|
|
|
type InsertUserGroupsByIDParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
GroupIds []uuid.UUID `db:"group_ids" json:"group_ids"`
|
|
}
|
|
|
|
// InsertUserGroupsByID adds a user to all provided groups, if they exist.
|
|
// If there is a conflict, the user is already a member
|
|
func (q *sqlQuerier) InsertUserGroupsByID(ctx context.Context, arg InsertUserGroupsByIDParams) ([]uuid.UUID, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertUserGroupsByID, arg.UserID, pq.Array(arg.GroupIds))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []uuid.UUID
|
|
for rows.Next() {
|
|
var group_id uuid.UUID
|
|
if err := rows.Scan(&group_id); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, group_id)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertUserGroupsByName = `-- name: InsertUserGroupsByName :exec
|
|
WITH groups AS (
|
|
SELECT
|
|
id
|
|
FROM
|
|
groups
|
|
WHERE
|
|
groups.organization_id = $2 AND
|
|
groups.name = ANY($3 :: text [])
|
|
)
|
|
INSERT INTO
|
|
group_members (user_id, group_id)
|
|
SELECT
|
|
$1,
|
|
groups.id
|
|
FROM
|
|
groups
|
|
`
|
|
|
|
type InsertUserGroupsByNameParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
GroupNames []string `db:"group_names" json:"group_names"`
|
|
}
|
|
|
|
// InsertUserGroupsByName adds a user to all provided groups, if they exist.
|
|
func (q *sqlQuerier) InsertUserGroupsByName(ctx context.Context, arg InsertUserGroupsByNameParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertUserGroupsByName, arg.UserID, arg.OrganizationID, pq.Array(arg.GroupNames))
|
|
return err
|
|
}
|
|
|
|
const removeUserFromAllGroups = `-- name: RemoveUserFromAllGroups :exec
|
|
DELETE FROM
|
|
group_members
|
|
WHERE
|
|
user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) RemoveUserFromAllGroups(ctx context.Context, userID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, removeUserFromAllGroups, userID)
|
|
return err
|
|
}
|
|
|
|
const removeUserFromGroups = `-- name: RemoveUserFromGroups :many
|
|
DELETE FROM
|
|
group_members
|
|
WHERE
|
|
user_id = $1 AND
|
|
group_id = ANY($2 :: uuid [])
|
|
RETURNING group_id
|
|
`
|
|
|
|
type RemoveUserFromGroupsParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
GroupIds []uuid.UUID `db:"group_ids" json:"group_ids"`
|
|
}
|
|
|
|
func (q *sqlQuerier) RemoveUserFromGroups(ctx context.Context, arg RemoveUserFromGroupsParams) ([]uuid.UUID, error) {
|
|
rows, err := q.db.QueryContext(ctx, removeUserFromGroups, arg.UserID, pq.Array(arg.GroupIds))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []uuid.UUID
|
|
for rows.Next() {
|
|
var group_id uuid.UUID
|
|
if err := rows.Scan(&group_id); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, group_id)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const deleteGroupByID = `-- name: DeleteGroupByID :exec
|
|
DELETE FROM
|
|
groups
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteGroupByID(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteGroupByID, id)
|
|
return err
|
|
}
|
|
|
|
const getGroupByID = `-- name: GetGroupByID :one
|
|
SELECT
|
|
id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
FROM
|
|
groups
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetGroupByID(ctx context.Context, id uuid.UUID) (Group, error) {
|
|
row := q.db.QueryRowContext(ctx, getGroupByID, id)
|
|
var i Group
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getGroupByOrgAndName = `-- name: GetGroupByOrgAndName :one
|
|
SELECT
|
|
id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
FROM
|
|
groups
|
|
WHERE
|
|
organization_id = $1
|
|
AND
|
|
name = $2
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetGroupByOrgAndNameParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetGroupByOrgAndName(ctx context.Context, arg GetGroupByOrgAndNameParams) (Group, error) {
|
|
row := q.db.QueryRowContext(ctx, getGroupByOrgAndName, arg.OrganizationID, arg.Name)
|
|
var i Group
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getGroups = `-- name: GetGroups :many
|
|
SELECT
|
|
groups.id, groups.name, groups.organization_id, groups.avatar_url, groups.quota_allowance, groups.display_name, groups.source,
|
|
organizations.name AS organization_name,
|
|
organizations.display_name AS organization_display_name
|
|
FROM
|
|
groups
|
|
INNER JOIN
|
|
organizations ON groups.organization_id = organizations.id
|
|
WHERE
|
|
true
|
|
AND CASE
|
|
WHEN $1:: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
groups.organization_id = $1
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
-- Filter to only include groups a user is a member of
|
|
WHEN $2::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
EXISTS (
|
|
SELECT
|
|
1
|
|
FROM
|
|
-- this view handles the 'everyone' group in orgs.
|
|
group_members_expanded
|
|
WHERE
|
|
group_members_expanded.group_id = groups.id
|
|
AND
|
|
group_members_expanded.user_id = $2
|
|
)
|
|
ELSE true
|
|
END
|
|
AND CASE WHEN array_length($3 :: text[], 1) > 0 THEN
|
|
groups.name = ANY($3)
|
|
ELSE true
|
|
END
|
|
AND CASE WHEN array_length($4 :: uuid[], 1) > 0 THEN
|
|
groups.id = ANY($4)
|
|
ELSE true
|
|
END
|
|
`
|
|
|
|
type GetGroupsParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
HasMemberID uuid.UUID `db:"has_member_id" json:"has_member_id"`
|
|
GroupNames []string `db:"group_names" json:"group_names"`
|
|
GroupIds []uuid.UUID `db:"group_ids" json:"group_ids"`
|
|
}
|
|
|
|
type GetGroupsRow struct {
|
|
Group Group `db:"group" json:"group"`
|
|
OrganizationName string `db:"organization_name" json:"organization_name"`
|
|
OrganizationDisplayName string `db:"organization_display_name" json:"organization_display_name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetGroups(ctx context.Context, arg GetGroupsParams) ([]GetGroupsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getGroups,
|
|
arg.OrganizationID,
|
|
arg.HasMemberID,
|
|
pq.Array(arg.GroupNames),
|
|
pq.Array(arg.GroupIds),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetGroupsRow
|
|
for rows.Next() {
|
|
var i GetGroupsRow
|
|
if err := rows.Scan(
|
|
&i.Group.ID,
|
|
&i.Group.Name,
|
|
&i.Group.OrganizationID,
|
|
&i.Group.AvatarURL,
|
|
&i.Group.QuotaAllowance,
|
|
&i.Group.DisplayName,
|
|
&i.Group.Source,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertAllUsersGroup = `-- name: InsertAllUsersGroup :one
|
|
INSERT INTO groups (
|
|
id,
|
|
name,
|
|
organization_id
|
|
)
|
|
VALUES
|
|
($1, 'Everyone', $1) RETURNING id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
`
|
|
|
|
// We use the organization_id as the id
|
|
// for simplicity since all users is
|
|
// every member of the org.
|
|
func (q *sqlQuerier) InsertAllUsersGroup(ctx context.Context, organizationID uuid.UUID) (Group, error) {
|
|
row := q.db.QueryRowContext(ctx, insertAllUsersGroup, organizationID)
|
|
var i Group
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertGroup = `-- name: InsertGroup :one
|
|
INSERT INTO groups (
|
|
id,
|
|
name,
|
|
display_name,
|
|
organization_id,
|
|
avatar_url,
|
|
quota_allowance
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6) RETURNING id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
`
|
|
|
|
type InsertGroupParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
QuotaAllowance int32 `db:"quota_allowance" json:"quota_allowance"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertGroup(ctx context.Context, arg InsertGroupParams) (Group, error) {
|
|
row := q.db.QueryRowContext(ctx, insertGroup,
|
|
arg.ID,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.OrganizationID,
|
|
arg.AvatarURL,
|
|
arg.QuotaAllowance,
|
|
)
|
|
var i Group
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertMissingGroups = `-- name: InsertMissingGroups :many
|
|
INSERT INTO groups (
|
|
id,
|
|
name,
|
|
organization_id,
|
|
source
|
|
)
|
|
SELECT
|
|
gen_random_uuid(),
|
|
group_name,
|
|
$1,
|
|
$2
|
|
FROM
|
|
UNNEST($3 :: text[]) AS group_name
|
|
ON CONFLICT DO NOTHING
|
|
RETURNING id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
`
|
|
|
|
type InsertMissingGroupsParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Source GroupSource `db:"source" json:"source"`
|
|
GroupNames []string `db:"group_names" json:"group_names"`
|
|
}
|
|
|
|
// Inserts any group by name that does not exist. All new groups are given
|
|
// a random uuid, are inserted into the same organization. They have the default
|
|
// values for avatar, display name, and quota allowance (all zero values).
|
|
// If the name conflicts, do nothing.
|
|
func (q *sqlQuerier) InsertMissingGroups(ctx context.Context, arg InsertMissingGroupsParams) ([]Group, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertMissingGroups, arg.OrganizationID, arg.Source, pq.Array(arg.GroupNames))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Group
|
|
for rows.Next() {
|
|
var i Group
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateGroupByID = `-- name: UpdateGroupByID :one
|
|
UPDATE
|
|
groups
|
|
SET
|
|
name = $1,
|
|
display_name = $2,
|
|
avatar_url = $3,
|
|
quota_allowance = $4
|
|
WHERE
|
|
id = $5
|
|
RETURNING id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
`
|
|
|
|
type UpdateGroupByIDParams struct {
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
QuotaAllowance int32 `db:"quota_allowance" json:"quota_allowance"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateGroupByID(ctx context.Context, arg UpdateGroupByIDParams) (Group, error) {
|
|
row := q.db.QueryRowContext(ctx, updateGroupByID,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.AvatarURL,
|
|
arg.QuotaAllowance,
|
|
arg.ID,
|
|
)
|
|
var i Group
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateAppInsights = `-- name: GetTemplateAppInsights :many
|
|
WITH
|
|
-- Create a list of all unique apps by template, this is used to
|
|
-- filter out irrelevant template usage stats.
|
|
apps AS (
|
|
SELECT DISTINCT ON (ws.template_id, app.slug)
|
|
ws.template_id,
|
|
app.slug,
|
|
app.display_name,
|
|
app.icon
|
|
FROM
|
|
workspaces ws
|
|
JOIN
|
|
workspace_builds AS build
|
|
ON
|
|
build.workspace_id = ws.id
|
|
JOIN
|
|
workspace_resources AS resource
|
|
ON
|
|
resource.job_id = build.job_id
|
|
JOIN
|
|
workspace_agents AS agent
|
|
ON
|
|
agent.resource_id = resource.id
|
|
JOIN
|
|
workspace_apps AS app
|
|
ON
|
|
app.agent_id = agent.id
|
|
WHERE
|
|
-- Partial query parameter filter.
|
|
CASE WHEN COALESCE(array_length($1::uuid[], 1), 0) > 0 THEN ws.template_id = ANY($1::uuid[]) ELSE TRUE END
|
|
ORDER BY
|
|
ws.template_id, app.slug, app.created_at DESC
|
|
),
|
|
-- Join apps and template usage stats to filter out irrelevant rows.
|
|
-- Note that this way of joining will eliminate all data-points that
|
|
-- aren't for "real" apps. That means ports are ignored (even though
|
|
-- they're part of the dataset), as well as are "[terminal]" entries
|
|
-- which are alternate datapoints for reconnecting pty usage.
|
|
template_usage_stats_with_apps AS (
|
|
SELECT
|
|
tus.start_time,
|
|
tus.template_id,
|
|
tus.user_id,
|
|
apps.slug,
|
|
apps.display_name,
|
|
apps.icon,
|
|
(tus.app_usage_mins -> apps.slug)::smallint AS usage_mins
|
|
FROM
|
|
apps
|
|
JOIN
|
|
template_usage_stats AS tus
|
|
ON
|
|
-- Query parameter filter.
|
|
tus.start_time >= $2::timestamptz
|
|
AND tus.end_time <= $3::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($1::uuid[], 1), 0) > 0 THEN tus.template_id = ANY($1::uuid[]) ELSE TRUE END
|
|
-- Primary join condition.
|
|
AND tus.template_id = apps.template_id
|
|
AND tus.app_usage_mins ? apps.slug -- Key exists in object.
|
|
),
|
|
-- Group the app insights by interval, user and unique app. This
|
|
-- allows us to deduplicate a user using the same app across
|
|
-- multiple templates.
|
|
app_insights AS (
|
|
SELECT
|
|
user_id,
|
|
slug,
|
|
display_name,
|
|
icon,
|
|
-- See motivation in GetTemplateInsights for LEAST(SUM(n), 30).
|
|
LEAST(SUM(usage_mins), 30) AS usage_mins
|
|
FROM
|
|
template_usage_stats_with_apps
|
|
GROUP BY
|
|
start_time, user_id, slug, display_name, icon
|
|
),
|
|
-- Analyze the users unique app usage across all templates. Count
|
|
-- usage across consecutive intervals as continuous usage.
|
|
times_used AS (
|
|
SELECT DISTINCT ON (user_id, slug, display_name, icon, uniq)
|
|
slug,
|
|
display_name,
|
|
icon,
|
|
-- Turn start_time into a unique identifier that identifies a users
|
|
-- continuous app usage. The value of uniq is otherwise garbage.
|
|
--
|
|
-- Since we're aggregating per user app usage across templates,
|
|
-- there can be duplicate start_times. To handle this, we use the
|
|
-- dense_rank() function, otherwise row_number() would suffice.
|
|
start_time - (
|
|
dense_rank() OVER (
|
|
PARTITION BY
|
|
user_id, slug, display_name, icon
|
|
ORDER BY
|
|
start_time
|
|
) * '30 minutes'::interval
|
|
) AS uniq
|
|
FROM
|
|
template_usage_stats_with_apps
|
|
),
|
|
-- Even though we allow identical apps to be aggregated across
|
|
-- templates, we still want to be able to report which templates
|
|
-- the data comes from.
|
|
templates AS (
|
|
SELECT
|
|
slug,
|
|
display_name,
|
|
icon,
|
|
array_agg(DISTINCT template_id)::uuid[] AS template_ids
|
|
FROM
|
|
template_usage_stats_with_apps
|
|
GROUP BY
|
|
slug, display_name, icon
|
|
)
|
|
|
|
SELECT
|
|
t.template_ids,
|
|
COUNT(DISTINCT ai.user_id) AS active_users,
|
|
ai.slug,
|
|
ai.display_name,
|
|
ai.icon,
|
|
(SUM(ai.usage_mins) * 60)::bigint AS usage_seconds,
|
|
COALESCE((
|
|
SELECT
|
|
COUNT(*)
|
|
FROM
|
|
times_used
|
|
WHERE
|
|
times_used.slug = ai.slug
|
|
AND times_used.display_name = ai.display_name
|
|
AND times_used.icon = ai.icon
|
|
), 0)::bigint AS times_used
|
|
FROM
|
|
app_insights AS ai
|
|
JOIN
|
|
templates AS t
|
|
ON
|
|
t.slug = ai.slug
|
|
AND t.display_name = ai.display_name
|
|
AND t.icon = ai.icon
|
|
GROUP BY
|
|
t.template_ids, ai.slug, ai.display_name, ai.icon
|
|
`
|
|
|
|
type GetTemplateAppInsightsParams struct {
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
}
|
|
|
|
type GetTemplateAppInsightsRow struct {
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
ActiveUsers int64 `db:"active_users" json:"active_users"`
|
|
Slug string `db:"slug" json:"slug"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
UsageSeconds int64 `db:"usage_seconds" json:"usage_seconds"`
|
|
TimesUsed int64 `db:"times_used" json:"times_used"`
|
|
}
|
|
|
|
// GetTemplateAppInsights returns the aggregate usage of each app in a given
|
|
// timeframe. The result can be filtered on template_ids, meaning only user data
|
|
// from workspaces based on those templates will be included.
|
|
func (q *sqlQuerier) GetTemplateAppInsights(ctx context.Context, arg GetTemplateAppInsightsParams) ([]GetTemplateAppInsightsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateAppInsights, pq.Array(arg.TemplateIDs), arg.StartTime, arg.EndTime)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateAppInsightsRow
|
|
for rows.Next() {
|
|
var i GetTemplateAppInsightsRow
|
|
if err := rows.Scan(
|
|
pq.Array(&i.TemplateIDs),
|
|
&i.ActiveUsers,
|
|
&i.Slug,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.UsageSeconds,
|
|
&i.TimesUsed,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateAppInsightsByTemplate = `-- name: GetTemplateAppInsightsByTemplate :many
|
|
WITH
|
|
-- This CTE is used to explode app usage into minute buckets, then
|
|
-- flatten the users app usage within the template so that usage in
|
|
-- multiple workspaces under one template is only counted once for
|
|
-- every minute.
|
|
app_insights AS (
|
|
SELECT
|
|
w.template_id,
|
|
was.user_id,
|
|
-- Both app stats and agent stats track web terminal usage, but
|
|
-- by different means. The app stats value should be more
|
|
-- accurate so we don't want to discard it just yet.
|
|
CASE
|
|
WHEN was.access_method = 'terminal'
|
|
THEN '[terminal]' -- Unique name, app names can't contain brackets.
|
|
ELSE was.slug_or_port
|
|
END::text AS app_name,
|
|
COALESCE(wa.display_name, '') AS display_name,
|
|
(wa.slug IS NOT NULL)::boolean AS is_app,
|
|
COUNT(DISTINCT s.minute_bucket) AS app_minutes
|
|
FROM
|
|
workspace_app_stats AS was
|
|
JOIN
|
|
workspaces AS w
|
|
ON
|
|
w.id = was.workspace_id
|
|
-- We do a left join here because we want to include user IDs that have used
|
|
-- e.g. ports when counting active users.
|
|
LEFT JOIN
|
|
workspace_apps wa
|
|
ON
|
|
wa.agent_id = was.agent_id
|
|
AND wa.slug = was.slug_or_port
|
|
-- Generate a series of minute buckets for each session for computing the
|
|
-- mintes/bucket.
|
|
CROSS JOIN
|
|
generate_series(
|
|
date_trunc('minute', was.session_started_at),
|
|
-- Subtract 1 μs to avoid creating an extra series.
|
|
date_trunc('minute', was.session_ended_at - '1 microsecond'::interval),
|
|
'1 minute'::interval
|
|
) AS s(minute_bucket)
|
|
WHERE
|
|
s.minute_bucket >= $1::timestamptz
|
|
AND s.minute_bucket < $2::timestamptz
|
|
GROUP BY
|
|
w.template_id, was.user_id, was.access_method, was.slug_or_port, wa.display_name, wa.slug
|
|
)
|
|
|
|
SELECT
|
|
template_id,
|
|
app_name AS slug_or_port,
|
|
display_name AS display_name,
|
|
COUNT(DISTINCT user_id)::bigint AS active_users,
|
|
(SUM(app_minutes) * 60)::bigint AS usage_seconds
|
|
FROM
|
|
app_insights
|
|
WHERE
|
|
is_app IS TRUE
|
|
GROUP BY
|
|
template_id, slug_or_port, display_name
|
|
`
|
|
|
|
type GetTemplateAppInsightsByTemplateParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
}
|
|
|
|
type GetTemplateAppInsightsByTemplateRow struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
SlugOrPort string `db:"slug_or_port" json:"slug_or_port"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
ActiveUsers int64 `db:"active_users" json:"active_users"`
|
|
UsageSeconds int64 `db:"usage_seconds" json:"usage_seconds"`
|
|
}
|
|
|
|
// GetTemplateAppInsightsByTemplate is used for Prometheus metrics. Keep
|
|
// in sync with GetTemplateAppInsights and UpsertTemplateUsageStats.
|
|
func (q *sqlQuerier) GetTemplateAppInsightsByTemplate(ctx context.Context, arg GetTemplateAppInsightsByTemplateParams) ([]GetTemplateAppInsightsByTemplateRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateAppInsightsByTemplate, arg.StartTime, arg.EndTime)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateAppInsightsByTemplateRow
|
|
for rows.Next() {
|
|
var i GetTemplateAppInsightsByTemplateRow
|
|
if err := rows.Scan(
|
|
&i.TemplateID,
|
|
&i.SlugOrPort,
|
|
&i.DisplayName,
|
|
&i.ActiveUsers,
|
|
&i.UsageSeconds,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateInsights = `-- name: GetTemplateInsights :one
|
|
WITH
|
|
insights AS (
|
|
SELECT
|
|
user_id,
|
|
-- See motivation in GetTemplateInsights for LEAST(SUM(n), 30).
|
|
LEAST(SUM(usage_mins), 30) AS usage_mins,
|
|
LEAST(SUM(ssh_mins), 30) AS ssh_mins,
|
|
LEAST(SUM(sftp_mins), 30) AS sftp_mins,
|
|
LEAST(SUM(reconnecting_pty_mins), 30) AS reconnecting_pty_mins,
|
|
LEAST(SUM(vscode_mins), 30) AS vscode_mins,
|
|
LEAST(SUM(jetbrains_mins), 30) AS jetbrains_mins
|
|
FROM
|
|
template_usage_stats
|
|
WHERE
|
|
start_time >= $1::timestamptz
|
|
AND end_time <= $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
GROUP BY
|
|
start_time, user_id
|
|
),
|
|
templates AS (
|
|
SELECT
|
|
array_agg(DISTINCT template_id) AS template_ids,
|
|
array_agg(DISTINCT template_id) FILTER (WHERE ssh_mins > 0) AS ssh_template_ids,
|
|
array_agg(DISTINCT template_id) FILTER (WHERE sftp_mins > 0) AS sftp_template_ids,
|
|
array_agg(DISTINCT template_id) FILTER (WHERE reconnecting_pty_mins > 0) AS reconnecting_pty_template_ids,
|
|
array_agg(DISTINCT template_id) FILTER (WHERE vscode_mins > 0) AS vscode_template_ids,
|
|
array_agg(DISTINCT template_id) FILTER (WHERE jetbrains_mins > 0) AS jetbrains_template_ids
|
|
FROM
|
|
template_usage_stats
|
|
WHERE
|
|
start_time >= $1::timestamptz
|
|
AND end_time <= $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
)
|
|
|
|
SELECT
|
|
COALESCE((SELECT template_ids FROM templates), '{}')::uuid[] AS template_ids, -- Includes app usage.
|
|
COALESCE((SELECT ssh_template_ids FROM templates), '{}')::uuid[] AS ssh_template_ids,
|
|
COALESCE((SELECT sftp_template_ids FROM templates), '{}')::uuid[] AS sftp_template_ids,
|
|
COALESCE((SELECT reconnecting_pty_template_ids FROM templates), '{}')::uuid[] AS reconnecting_pty_template_ids,
|
|
COALESCE((SELECT vscode_template_ids FROM templates), '{}')::uuid[] AS vscode_template_ids,
|
|
COALESCE((SELECT jetbrains_template_ids FROM templates), '{}')::uuid[] AS jetbrains_template_ids,
|
|
COALESCE(COUNT(DISTINCT user_id), 0)::bigint AS active_users, -- Includes app usage.
|
|
COALESCE(SUM(usage_mins) * 60, 0)::bigint AS usage_total_seconds, -- Includes app usage.
|
|
COALESCE(SUM(ssh_mins) * 60, 0)::bigint AS usage_ssh_seconds,
|
|
COALESCE(SUM(sftp_mins) * 60, 0)::bigint AS usage_sftp_seconds,
|
|
COALESCE(SUM(reconnecting_pty_mins) * 60, 0)::bigint AS usage_reconnecting_pty_seconds,
|
|
COALESCE(SUM(vscode_mins) * 60, 0)::bigint AS usage_vscode_seconds,
|
|
COALESCE(SUM(jetbrains_mins) * 60, 0)::bigint AS usage_jetbrains_seconds
|
|
FROM
|
|
insights
|
|
`
|
|
|
|
type GetTemplateInsightsParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
}
|
|
|
|
type GetTemplateInsightsRow struct {
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
SshTemplateIds []uuid.UUID `db:"ssh_template_ids" json:"ssh_template_ids"`
|
|
SftpTemplateIds []uuid.UUID `db:"sftp_template_ids" json:"sftp_template_ids"`
|
|
ReconnectingPtyTemplateIds []uuid.UUID `db:"reconnecting_pty_template_ids" json:"reconnecting_pty_template_ids"`
|
|
VscodeTemplateIds []uuid.UUID `db:"vscode_template_ids" json:"vscode_template_ids"`
|
|
JetbrainsTemplateIds []uuid.UUID `db:"jetbrains_template_ids" json:"jetbrains_template_ids"`
|
|
ActiveUsers int64 `db:"active_users" json:"active_users"`
|
|
UsageTotalSeconds int64 `db:"usage_total_seconds" json:"usage_total_seconds"`
|
|
UsageSshSeconds int64 `db:"usage_ssh_seconds" json:"usage_ssh_seconds"`
|
|
UsageSftpSeconds int64 `db:"usage_sftp_seconds" json:"usage_sftp_seconds"`
|
|
UsageReconnectingPtySeconds int64 `db:"usage_reconnecting_pty_seconds" json:"usage_reconnecting_pty_seconds"`
|
|
UsageVscodeSeconds int64 `db:"usage_vscode_seconds" json:"usage_vscode_seconds"`
|
|
UsageJetbrainsSeconds int64 `db:"usage_jetbrains_seconds" json:"usage_jetbrains_seconds"`
|
|
}
|
|
|
|
// GetTemplateInsights returns the aggregate user-produced usage of all
|
|
// workspaces in a given timeframe. The template IDs, active users, and
|
|
// usage_seconds all reflect any usage in the template, including apps.
|
|
//
|
|
// When combining data from multiple templates, we must make a guess at
|
|
// how the user behaved for the 30 minute interval. In this case we make
|
|
// the assumption that if the user used two workspaces for 15 minutes,
|
|
// they did so sequentially, thus we sum the usage up to a maximum of
|
|
// 30 minutes with LEAST(SUM(n), 30).
|
|
func (q *sqlQuerier) GetTemplateInsights(ctx context.Context, arg GetTemplateInsightsParams) (GetTemplateInsightsRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateInsights, arg.StartTime, arg.EndTime, pq.Array(arg.TemplateIDs))
|
|
var i GetTemplateInsightsRow
|
|
err := row.Scan(
|
|
pq.Array(&i.TemplateIDs),
|
|
pq.Array(&i.SshTemplateIds),
|
|
pq.Array(&i.SftpTemplateIds),
|
|
pq.Array(&i.ReconnectingPtyTemplateIds),
|
|
pq.Array(&i.VscodeTemplateIds),
|
|
pq.Array(&i.JetbrainsTemplateIds),
|
|
&i.ActiveUsers,
|
|
&i.UsageTotalSeconds,
|
|
&i.UsageSshSeconds,
|
|
&i.UsageSftpSeconds,
|
|
&i.UsageReconnectingPtySeconds,
|
|
&i.UsageVscodeSeconds,
|
|
&i.UsageJetbrainsSeconds,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateInsightsByInterval = `-- name: GetTemplateInsightsByInterval :many
|
|
WITH
|
|
ts AS (
|
|
SELECT
|
|
d::timestamptz AS from_,
|
|
LEAST(
|
|
(d::timestamptz + ($2::int || ' day')::interval)::timestamptz,
|
|
$3::timestamptz
|
|
)::timestamptz AS to_
|
|
FROM
|
|
generate_series(
|
|
$4::timestamptz,
|
|
-- Subtract 1 μs to avoid creating an extra series.
|
|
($3::timestamptz) - '1 microsecond'::interval,
|
|
($2::int || ' day')::interval
|
|
) AS d
|
|
)
|
|
|
|
SELECT
|
|
ts.from_ AS start_time,
|
|
ts.to_ AS end_time,
|
|
array_remove(array_agg(DISTINCT tus.template_id), NULL)::uuid[] AS template_ids,
|
|
COUNT(DISTINCT tus.user_id) AS active_users
|
|
FROM
|
|
ts
|
|
LEFT JOIN
|
|
template_usage_stats AS tus
|
|
ON
|
|
tus.start_time >= ts.from_
|
|
AND tus.start_time < ts.to_ -- End time exclusion criteria optimization for index.
|
|
AND tus.end_time <= ts.to_
|
|
AND CASE WHEN COALESCE(array_length($1::uuid[], 1), 0) > 0 THEN tus.template_id = ANY($1::uuid[]) ELSE TRUE END
|
|
GROUP BY
|
|
ts.from_, ts.to_
|
|
`
|
|
|
|
type GetTemplateInsightsByIntervalParams struct {
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
IntervalDays int32 `db:"interval_days" json:"interval_days"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
}
|
|
|
|
type GetTemplateInsightsByIntervalRow struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
ActiveUsers int64 `db:"active_users" json:"active_users"`
|
|
}
|
|
|
|
// GetTemplateInsightsByInterval returns all intervals between start and end
|
|
// time, if end time is a partial interval, it will be included in the results and
|
|
// that interval will be shorter than a full one. If there is no data for a selected
|
|
// interval/template, it will be included in the results with 0 active users.
|
|
func (q *sqlQuerier) GetTemplateInsightsByInterval(ctx context.Context, arg GetTemplateInsightsByIntervalParams) ([]GetTemplateInsightsByIntervalRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateInsightsByInterval,
|
|
pq.Array(arg.TemplateIDs),
|
|
arg.IntervalDays,
|
|
arg.EndTime,
|
|
arg.StartTime,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateInsightsByIntervalRow
|
|
for rows.Next() {
|
|
var i GetTemplateInsightsByIntervalRow
|
|
if err := rows.Scan(
|
|
&i.StartTime,
|
|
&i.EndTime,
|
|
pq.Array(&i.TemplateIDs),
|
|
&i.ActiveUsers,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateInsightsByTemplate = `-- name: GetTemplateInsightsByTemplate :many
|
|
WITH
|
|
-- This CTE is used to truncate agent usage into minute buckets, then
|
|
-- flatten the users agent usage within the template so that usage in
|
|
-- multiple workspaces under one template is only counted once for
|
|
-- every minute (per user).
|
|
insights AS (
|
|
SELECT
|
|
template_id,
|
|
user_id,
|
|
COUNT(DISTINCT CASE WHEN session_count_ssh > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS ssh_mins,
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
-- COUNT(DISTINCT CASE WHEN session_count_sftp > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS sftp_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_reconnecting_pty > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS reconnecting_pty_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_vscode > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS vscode_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_jetbrains > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS jetbrains_mins,
|
|
-- NOTE(mafredri): The agent stats are currently very unreliable, and
|
|
-- sometimes the connections are missing, even during active sessions.
|
|
-- Since we can't fully rely on this, we check for "any connection
|
|
-- within this bucket". A better solution here would be preferable.
|
|
MAX(connection_count) > 0 AS has_connection
|
|
FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
created_at >= $1::timestamptz
|
|
AND created_at < $2::timestamptz
|
|
-- Inclusion criteria to filter out empty results.
|
|
AND (
|
|
session_count_ssh > 0
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
-- OR session_count_sftp > 0
|
|
OR session_count_reconnecting_pty > 0
|
|
OR session_count_vscode > 0
|
|
OR session_count_jetbrains > 0
|
|
)
|
|
GROUP BY
|
|
template_id, user_id
|
|
)
|
|
|
|
SELECT
|
|
template_id,
|
|
COUNT(DISTINCT user_id)::bigint AS active_users,
|
|
(SUM(vscode_mins) * 60)::bigint AS usage_vscode_seconds,
|
|
(SUM(jetbrains_mins) * 60)::bigint AS usage_jetbrains_seconds,
|
|
(SUM(reconnecting_pty_mins) * 60)::bigint AS usage_reconnecting_pty_seconds,
|
|
(SUM(ssh_mins) * 60)::bigint AS usage_ssh_seconds
|
|
FROM
|
|
insights
|
|
WHERE
|
|
has_connection
|
|
GROUP BY
|
|
template_id
|
|
`
|
|
|
|
type GetTemplateInsightsByTemplateParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
}
|
|
|
|
type GetTemplateInsightsByTemplateRow struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
ActiveUsers int64 `db:"active_users" json:"active_users"`
|
|
UsageVscodeSeconds int64 `db:"usage_vscode_seconds" json:"usage_vscode_seconds"`
|
|
UsageJetbrainsSeconds int64 `db:"usage_jetbrains_seconds" json:"usage_jetbrains_seconds"`
|
|
UsageReconnectingPtySeconds int64 `db:"usage_reconnecting_pty_seconds" json:"usage_reconnecting_pty_seconds"`
|
|
UsageSshSeconds int64 `db:"usage_ssh_seconds" json:"usage_ssh_seconds"`
|
|
}
|
|
|
|
// GetTemplateInsightsByTemplate is used for Prometheus metrics. Keep
|
|
// in sync with GetTemplateInsights and UpsertTemplateUsageStats.
|
|
func (q *sqlQuerier) GetTemplateInsightsByTemplate(ctx context.Context, arg GetTemplateInsightsByTemplateParams) ([]GetTemplateInsightsByTemplateRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateInsightsByTemplate, arg.StartTime, arg.EndTime)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateInsightsByTemplateRow
|
|
for rows.Next() {
|
|
var i GetTemplateInsightsByTemplateRow
|
|
if err := rows.Scan(
|
|
&i.TemplateID,
|
|
&i.ActiveUsers,
|
|
&i.UsageVscodeSeconds,
|
|
&i.UsageJetbrainsSeconds,
|
|
&i.UsageReconnectingPtySeconds,
|
|
&i.UsageSshSeconds,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateParameterInsights = `-- name: GetTemplateParameterInsights :many
|
|
WITH latest_workspace_builds AS (
|
|
SELECT
|
|
wb.id,
|
|
wbmax.template_id,
|
|
wb.template_version_id
|
|
FROM (
|
|
SELECT
|
|
tv.template_id, wbmax.workspace_id, MAX(wbmax.build_number) as max_build_number
|
|
FROM workspace_builds wbmax
|
|
JOIN template_versions tv ON (tv.id = wbmax.template_version_id)
|
|
WHERE
|
|
wbmax.created_at >= $1::timestamptz
|
|
AND wbmax.created_at < $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN tv.template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
GROUP BY tv.template_id, wbmax.workspace_id
|
|
) wbmax
|
|
JOIN workspace_builds wb ON (
|
|
wb.workspace_id = wbmax.workspace_id
|
|
AND wb.build_number = wbmax.max_build_number
|
|
)
|
|
), unique_template_params AS (
|
|
SELECT
|
|
ROW_NUMBER() OVER () AS num,
|
|
array_agg(DISTINCT wb.template_id)::uuid[] AS template_ids,
|
|
array_agg(wb.id)::uuid[] AS workspace_build_ids,
|
|
tvp.name,
|
|
tvp.type,
|
|
tvp.display_name,
|
|
tvp.description,
|
|
tvp.options
|
|
FROM latest_workspace_builds wb
|
|
JOIN template_version_parameters tvp ON (tvp.template_version_id = wb.template_version_id)
|
|
GROUP BY tvp.name, tvp.type, tvp.display_name, tvp.description, tvp.options
|
|
)
|
|
|
|
SELECT
|
|
utp.num,
|
|
utp.template_ids,
|
|
utp.name,
|
|
utp.type,
|
|
utp.display_name,
|
|
utp.description,
|
|
utp.options,
|
|
wbp.value,
|
|
COUNT(wbp.value) AS count
|
|
FROM unique_template_params utp
|
|
JOIN workspace_build_parameters wbp ON (utp.workspace_build_ids @> ARRAY[wbp.workspace_build_id] AND utp.name = wbp.name)
|
|
GROUP BY utp.num, utp.template_ids, utp.name, utp.type, utp.display_name, utp.description, utp.options, wbp.value
|
|
`
|
|
|
|
type GetTemplateParameterInsightsParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
}
|
|
|
|
type GetTemplateParameterInsightsRow struct {
|
|
Num int64 `db:"num" json:"num"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
Name string `db:"name" json:"name"`
|
|
Type string `db:"type" json:"type"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Description string `db:"description" json:"description"`
|
|
Options json.RawMessage `db:"options" json:"options"`
|
|
Value string `db:"value" json:"value"`
|
|
Count int64 `db:"count" json:"count"`
|
|
}
|
|
|
|
// GetTemplateParameterInsights does for each template in a given timeframe,
|
|
// look for the latest workspace build (for every workspace) that has been
|
|
// created in the timeframe and return the aggregate usage counts of parameter
|
|
// values.
|
|
func (q *sqlQuerier) GetTemplateParameterInsights(ctx context.Context, arg GetTemplateParameterInsightsParams) ([]GetTemplateParameterInsightsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateParameterInsights, arg.StartTime, arg.EndTime, pq.Array(arg.TemplateIDs))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateParameterInsightsRow
|
|
for rows.Next() {
|
|
var i GetTemplateParameterInsightsRow
|
|
if err := rows.Scan(
|
|
&i.Num,
|
|
pq.Array(&i.TemplateIDs),
|
|
&i.Name,
|
|
&i.Type,
|
|
&i.DisplayName,
|
|
&i.Description,
|
|
&i.Options,
|
|
&i.Value,
|
|
&i.Count,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateUsageStats = `-- name: GetTemplateUsageStats :many
|
|
SELECT
|
|
start_time, end_time, template_id, user_id, median_latency_ms, usage_mins, ssh_mins, sftp_mins, reconnecting_pty_mins, vscode_mins, jetbrains_mins, app_usage_mins
|
|
FROM
|
|
template_usage_stats
|
|
WHERE
|
|
start_time >= $1::timestamptz
|
|
AND end_time <= $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
`
|
|
|
|
type GetTemplateUsageStatsParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateUsageStats(ctx context.Context, arg GetTemplateUsageStatsParams) ([]TemplateUsageStat, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateUsageStats, arg.StartTime, arg.EndTime, pq.Array(arg.TemplateIDs))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateUsageStat
|
|
for rows.Next() {
|
|
var i TemplateUsageStat
|
|
if err := rows.Scan(
|
|
&i.StartTime,
|
|
&i.EndTime,
|
|
&i.TemplateID,
|
|
&i.UserID,
|
|
&i.MedianLatencyMs,
|
|
&i.UsageMins,
|
|
&i.SshMins,
|
|
&i.SftpMins,
|
|
&i.ReconnectingPtyMins,
|
|
&i.VscodeMins,
|
|
&i.JetbrainsMins,
|
|
&i.AppUsageMins,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getUserActivityInsights = `-- name: GetUserActivityInsights :many
|
|
WITH
|
|
deployment_stats AS (
|
|
SELECT
|
|
start_time,
|
|
user_id,
|
|
array_agg(template_id) AS template_ids,
|
|
-- See motivation in GetTemplateInsights for LEAST(SUM(n), 30).
|
|
LEAST(SUM(usage_mins), 30) AS usage_mins
|
|
FROM
|
|
template_usage_stats
|
|
WHERE
|
|
start_time >= $1::timestamptz
|
|
AND end_time <= $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
GROUP BY
|
|
start_time, user_id
|
|
),
|
|
template_ids AS (
|
|
SELECT
|
|
user_id,
|
|
array_agg(DISTINCT template_id) AS ids
|
|
FROM
|
|
deployment_stats, unnest(template_ids) template_id
|
|
GROUP BY
|
|
user_id
|
|
)
|
|
|
|
SELECT
|
|
ds.user_id,
|
|
u.username,
|
|
u.avatar_url,
|
|
t.ids::uuid[] AS template_ids,
|
|
(SUM(ds.usage_mins) * 60)::bigint AS usage_seconds
|
|
FROM
|
|
deployment_stats ds
|
|
JOIN
|
|
users u
|
|
ON
|
|
u.id = ds.user_id
|
|
JOIN
|
|
template_ids t
|
|
ON
|
|
ds.user_id = t.user_id
|
|
GROUP BY
|
|
ds.user_id, u.username, u.avatar_url, t.ids
|
|
ORDER BY
|
|
ds.user_id ASC
|
|
`
|
|
|
|
type GetUserActivityInsightsParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
}
|
|
|
|
type GetUserActivityInsightsRow struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Username string `db:"username" json:"username"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
UsageSeconds int64 `db:"usage_seconds" json:"usage_seconds"`
|
|
}
|
|
|
|
// GetUserActivityInsights returns the ranking with top active users.
|
|
// The result can be filtered on template_ids, meaning only user data
|
|
// from workspaces based on those templates will be included.
|
|
// Note: The usage_seconds and usage_seconds_cumulative differ only when
|
|
// requesting deployment-wide (or multiple template) data. Cumulative
|
|
// produces a bloated value if a user has used multiple templates
|
|
// simultaneously.
|
|
func (q *sqlQuerier) GetUserActivityInsights(ctx context.Context, arg GetUserActivityInsightsParams) ([]GetUserActivityInsightsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUserActivityInsights, arg.StartTime, arg.EndTime, pq.Array(arg.TemplateIDs))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetUserActivityInsightsRow
|
|
for rows.Next() {
|
|
var i GetUserActivityInsightsRow
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.Username,
|
|
&i.AvatarURL,
|
|
pq.Array(&i.TemplateIDs),
|
|
&i.UsageSeconds,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getUserLatencyInsights = `-- name: GetUserLatencyInsights :many
|
|
SELECT
|
|
tus.user_id,
|
|
u.username,
|
|
u.avatar_url,
|
|
array_agg(DISTINCT tus.template_id)::uuid[] AS template_ids,
|
|
COALESCE((PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY tus.median_latency_ms)), -1)::float AS workspace_connection_latency_50,
|
|
COALESCE((PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY tus.median_latency_ms)), -1)::float AS workspace_connection_latency_95
|
|
FROM
|
|
template_usage_stats tus
|
|
JOIN
|
|
users u
|
|
ON
|
|
u.id = tus.user_id
|
|
WHERE
|
|
tus.start_time >= $1::timestamptz
|
|
AND tus.end_time <= $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN tus.template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
GROUP BY
|
|
tus.user_id, u.username, u.avatar_url
|
|
ORDER BY
|
|
tus.user_id ASC
|
|
`
|
|
|
|
type GetUserLatencyInsightsParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
}
|
|
|
|
type GetUserLatencyInsightsRow struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Username string `db:"username" json:"username"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
WorkspaceConnectionLatency50 float64 `db:"workspace_connection_latency_50" json:"workspace_connection_latency_50"`
|
|
WorkspaceConnectionLatency95 float64 `db:"workspace_connection_latency_95" json:"workspace_connection_latency_95"`
|
|
}
|
|
|
|
// GetUserLatencyInsights returns the median and 95th percentile connection
|
|
// latency that users have experienced. The result can be filtered on
|
|
// template_ids, meaning only user data from workspaces based on those templates
|
|
// will be included.
|
|
func (q *sqlQuerier) GetUserLatencyInsights(ctx context.Context, arg GetUserLatencyInsightsParams) ([]GetUserLatencyInsightsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUserLatencyInsights, arg.StartTime, arg.EndTime, pq.Array(arg.TemplateIDs))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetUserLatencyInsightsRow
|
|
for rows.Next() {
|
|
var i GetUserLatencyInsightsRow
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.Username,
|
|
&i.AvatarURL,
|
|
pq.Array(&i.TemplateIDs),
|
|
&i.WorkspaceConnectionLatency50,
|
|
&i.WorkspaceConnectionLatency95,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getUserStatusCounts = `-- name: GetUserStatusCounts :many
|
|
WITH
|
|
-- dates_of_interest defines all points in time that are relevant to the query.
|
|
-- It includes the start_time, all status changes, all deletions, and the end_time.
|
|
dates_of_interest AS (
|
|
SELECT date FROM generate_series(
|
|
$1::timestamptz,
|
|
$2::timestamptz,
|
|
(CASE WHEN $3::int <= 0 THEN 3600 * 24 ELSE $3::int END || ' seconds')::interval
|
|
) AS date
|
|
),
|
|
-- latest_status_before_range defines the status of each user before the start_time.
|
|
-- We do not include users who were deleted before the start_time. We use this to ensure that
|
|
-- we correctly count users prior to the start_time for a complete graph.
|
|
latest_status_before_range AS (
|
|
SELECT
|
|
DISTINCT usc.user_id,
|
|
usc.new_status,
|
|
usc.changed_at,
|
|
ud.deleted
|
|
FROM user_status_changes usc
|
|
LEFT JOIN LATERAL (
|
|
SELECT COUNT(*) > 0 AS deleted
|
|
FROM user_deleted ud
|
|
WHERE ud.user_id = usc.user_id AND (ud.deleted_at < usc.changed_at OR ud.deleted_at < $1)
|
|
) AS ud ON true
|
|
WHERE usc.changed_at < $1::timestamptz
|
|
ORDER BY usc.user_id, usc.changed_at DESC
|
|
),
|
|
-- status_changes_during_range defines the status of each user during the start_time and end_time.
|
|
-- If a user is deleted during the time range, we count status changes between the start_time and the deletion date.
|
|
-- Theoretically, it should probably not be possible to update the status of a deleted user, but we
|
|
-- need to ensure that this is enforced, so that a change in business logic later does not break this graph.
|
|
status_changes_during_range AS (
|
|
SELECT
|
|
usc.user_id,
|
|
usc.new_status,
|
|
usc.changed_at,
|
|
ud.deleted
|
|
FROM user_status_changes usc
|
|
LEFT JOIN LATERAL (
|
|
SELECT COUNT(*) > 0 AS deleted
|
|
FROM user_deleted ud
|
|
WHERE ud.user_id = usc.user_id AND ud.deleted_at < usc.changed_at
|
|
) AS ud ON true
|
|
WHERE usc.changed_at >= $1::timestamptz
|
|
AND usc.changed_at <= $2::timestamptz
|
|
),
|
|
-- relevant_status_changes defines the status of each user at any point in time.
|
|
-- It includes the status of each user before the start_time, and the status of each user during the start_time and end_time.
|
|
relevant_status_changes AS (
|
|
SELECT
|
|
user_id,
|
|
new_status,
|
|
changed_at
|
|
FROM latest_status_before_range
|
|
WHERE NOT deleted
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
user_id,
|
|
new_status,
|
|
changed_at
|
|
FROM status_changes_during_range
|
|
WHERE NOT deleted
|
|
),
|
|
-- statuses defines all the distinct statuses that were present just before and during the time range.
|
|
-- This is used to ensure that we have a series for every relevant status.
|
|
statuses AS (
|
|
SELECT DISTINCT new_status FROM relevant_status_changes
|
|
),
|
|
-- We only want to count the latest status change for each user on each date and then filter them by the relevant status.
|
|
-- We use the row_number function to ensure that we only count the latest status change for each user on each date.
|
|
-- We then filter the status changes by the relevant status in the final select statement below.
|
|
ranked_status_change_per_user_per_date AS (
|
|
SELECT
|
|
d.date,
|
|
rsc1.user_id,
|
|
ROW_NUMBER() OVER (PARTITION BY d.date, rsc1.user_id ORDER BY rsc1.changed_at DESC) AS rn,
|
|
rsc1.new_status
|
|
FROM dates_of_interest d
|
|
LEFT JOIN relevant_status_changes rsc1 ON rsc1.changed_at <= d.date
|
|
)
|
|
SELECT
|
|
rscpupd.date::timestamptz AS date,
|
|
statuses.new_status AS status,
|
|
COUNT(rscpupd.user_id) FILTER (
|
|
WHERE rscpupd.rn = 1
|
|
AND (
|
|
rscpupd.new_status = statuses.new_status
|
|
AND (
|
|
-- Include users who haven't been deleted
|
|
NOT EXISTS (SELECT 1 FROM user_deleted WHERE user_id = rscpupd.user_id)
|
|
OR
|
|
-- Or users whose deletion date is after the current date we're looking at
|
|
rscpupd.date < (SELECT deleted_at FROM user_deleted WHERE user_id = rscpupd.user_id)
|
|
)
|
|
)
|
|
) AS count
|
|
FROM ranked_status_change_per_user_per_date rscpupd
|
|
CROSS JOIN statuses
|
|
GROUP BY rscpupd.date, statuses.new_status
|
|
ORDER BY rscpupd.date
|
|
`
|
|
|
|
type GetUserStatusCountsParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
Interval int32 `db:"interval" json:"interval"`
|
|
}
|
|
|
|
type GetUserStatusCountsRow struct {
|
|
Date time.Time `db:"date" json:"date"`
|
|
Status UserStatus `db:"status" json:"status"`
|
|
Count int64 `db:"count" json:"count"`
|
|
}
|
|
|
|
// GetUserStatusCounts returns the count of users in each status over time.
|
|
// The time range is inclusively defined by the start_time and end_time parameters.
|
|
//
|
|
// Bucketing:
|
|
// Between the start_time and end_time, we include each timestamp where a user's status changed or they were deleted.
|
|
// We do not bucket these results by day or some other time unit. This is because such bucketing would hide potentially
|
|
// important patterns. If a user was active for 23 hours and 59 minutes, and then suspended, a daily bucket would hide this.
|
|
// A daily bucket would also have required us to carefully manage the timezone of the bucket based on the timezone of the user.
|
|
//
|
|
// Accumulation:
|
|
// We do not start counting from 0 at the start_time. We check the last status change before the start_time for each user. As such,
|
|
// the result shows the total number of users in each status on any particular day.
|
|
func (q *sqlQuerier) GetUserStatusCounts(ctx context.Context, arg GetUserStatusCountsParams) ([]GetUserStatusCountsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUserStatusCounts, arg.StartTime, arg.EndTime, arg.Interval)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetUserStatusCountsRow
|
|
for rows.Next() {
|
|
var i GetUserStatusCountsRow
|
|
if err := rows.Scan(&i.Date, &i.Status, &i.Count); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const upsertTemplateUsageStats = `-- name: UpsertTemplateUsageStats :exec
|
|
WITH
|
|
latest_start AS (
|
|
SELECT
|
|
-- Truncate to hour so that we always look at even ranges of data.
|
|
date_trunc('hour', COALESCE(
|
|
MAX(start_time) - '1 hour'::interval,
|
|
-- Fallback when there are no template usage stats yet.
|
|
-- App stats can exist before this, but not agent stats,
|
|
-- limit the lookback to avoid inconsistency.
|
|
(SELECT MIN(created_at) FROM workspace_agent_stats)
|
|
)) AS t
|
|
FROM
|
|
template_usage_stats
|
|
),
|
|
workspace_app_stat_buckets AS (
|
|
SELECT
|
|
-- Truncate the minute to the nearest half hour, this is the bucket size
|
|
-- for the data.
|
|
date_trunc('hour', s.minute_bucket) + trunc(date_part('minute', s.minute_bucket) / 30) * 30 * '1 minute'::interval AS time_bucket,
|
|
w.template_id,
|
|
was.user_id,
|
|
-- Both app stats and agent stats track web terminal usage, but
|
|
-- by different means. The app stats value should be more
|
|
-- accurate so we don't want to discard it just yet.
|
|
CASE
|
|
WHEN was.access_method = 'terminal'
|
|
THEN '[terminal]' -- Unique name, app names can't contain brackets.
|
|
ELSE was.slug_or_port
|
|
END AS app_name,
|
|
COUNT(DISTINCT s.minute_bucket) AS app_minutes,
|
|
-- Store each unique minute bucket for later merge between datasets.
|
|
array_agg(DISTINCT s.minute_bucket) AS minute_buckets
|
|
FROM
|
|
workspace_app_stats AS was
|
|
JOIN
|
|
workspaces AS w
|
|
ON
|
|
w.id = was.workspace_id
|
|
-- Generate a series of minute buckets for each session for computing the
|
|
-- mintes/bucket.
|
|
CROSS JOIN
|
|
generate_series(
|
|
date_trunc('minute', was.session_started_at),
|
|
-- Subtract 1 μs to avoid creating an extra series.
|
|
date_trunc('minute', was.session_ended_at - '1 microsecond'::interval),
|
|
'1 minute'::interval
|
|
) AS s(minute_bucket)
|
|
WHERE
|
|
-- s.minute_bucket >= @start_time::timestamptz
|
|
-- AND s.minute_bucket < @end_time::timestamptz
|
|
s.minute_bucket >= (SELECT t FROM latest_start)
|
|
AND s.minute_bucket < NOW()
|
|
GROUP BY
|
|
time_bucket, w.template_id, was.user_id, was.access_method, was.slug_or_port
|
|
),
|
|
agent_stats_buckets AS (
|
|
SELECT
|
|
-- Truncate the minute to the nearest half hour, this is the bucket size
|
|
-- for the data.
|
|
date_trunc('hour', created_at) + trunc(date_part('minute', created_at) / 30) * 30 * '1 minute'::interval AS time_bucket,
|
|
template_id,
|
|
user_id,
|
|
-- Store each unique minute bucket for later merge between datasets.
|
|
array_agg(
|
|
DISTINCT CASE
|
|
WHEN
|
|
session_count_ssh > 0
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
-- OR session_count_sftp > 0
|
|
OR session_count_reconnecting_pty > 0
|
|
OR session_count_vscode > 0
|
|
OR session_count_jetbrains > 0
|
|
THEN
|
|
date_trunc('minute', created_at)
|
|
ELSE
|
|
NULL
|
|
END
|
|
) AS minute_buckets,
|
|
COUNT(DISTINCT CASE WHEN session_count_ssh > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS ssh_mins,
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
-- COUNT(DISTINCT CASE WHEN session_count_sftp > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS sftp_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_reconnecting_pty > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS reconnecting_pty_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_vscode > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS vscode_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_jetbrains > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS jetbrains_mins,
|
|
-- NOTE(mafredri): The agent stats are currently very unreliable, and
|
|
-- sometimes the connections are missing, even during active sessions.
|
|
-- Since we can't fully rely on this, we check for "any connection
|
|
-- during this half-hour". A better solution here would be preferable.
|
|
MAX(connection_count) > 0 AS has_connection
|
|
FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
-- created_at >= @start_time::timestamptz
|
|
-- AND created_at < @end_time::timestamptz
|
|
created_at >= (SELECT t FROM latest_start)
|
|
AND created_at < NOW()
|
|
-- Inclusion criteria to filter out empty results.
|
|
AND (
|
|
session_count_ssh > 0
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
-- OR session_count_sftp > 0
|
|
OR session_count_reconnecting_pty > 0
|
|
OR session_count_vscode > 0
|
|
OR session_count_jetbrains > 0
|
|
)
|
|
GROUP BY
|
|
time_bucket, template_id, user_id
|
|
),
|
|
stats AS (
|
|
SELECT
|
|
stats.time_bucket AS start_time,
|
|
stats.time_bucket + '30 minutes'::interval AS end_time,
|
|
stats.template_id,
|
|
stats.user_id,
|
|
-- Sum/distinct to handle zero/duplicate values due union and to unnest.
|
|
COUNT(DISTINCT minute_bucket) AS usage_mins,
|
|
array_agg(DISTINCT minute_bucket) AS minute_buckets,
|
|
SUM(DISTINCT stats.ssh_mins) AS ssh_mins,
|
|
SUM(DISTINCT stats.sftp_mins) AS sftp_mins,
|
|
SUM(DISTINCT stats.reconnecting_pty_mins) AS reconnecting_pty_mins,
|
|
SUM(DISTINCT stats.vscode_mins) AS vscode_mins,
|
|
SUM(DISTINCT stats.jetbrains_mins) AS jetbrains_mins,
|
|
-- This is what we unnested, re-nest as json.
|
|
jsonb_object_agg(stats.app_name, stats.app_minutes) FILTER (WHERE stats.app_name IS NOT NULL) AS app_usage_mins
|
|
FROM (
|
|
SELECT
|
|
time_bucket,
|
|
template_id,
|
|
user_id,
|
|
0 AS ssh_mins,
|
|
0 AS sftp_mins,
|
|
0 AS reconnecting_pty_mins,
|
|
0 AS vscode_mins,
|
|
0 AS jetbrains_mins,
|
|
app_name,
|
|
app_minutes,
|
|
minute_buckets
|
|
FROM
|
|
workspace_app_stat_buckets
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
time_bucket,
|
|
template_id,
|
|
user_id,
|
|
ssh_mins,
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
0 AS sftp_mins,
|
|
reconnecting_pty_mins,
|
|
vscode_mins,
|
|
jetbrains_mins,
|
|
NULL AS app_name,
|
|
NULL AS app_minutes,
|
|
minute_buckets
|
|
FROM
|
|
agent_stats_buckets
|
|
WHERE
|
|
-- See note in the agent_stats_buckets CTE.
|
|
has_connection
|
|
) AS stats, unnest(minute_buckets) AS minute_bucket
|
|
GROUP BY
|
|
stats.time_bucket, stats.template_id, stats.user_id
|
|
),
|
|
minute_buckets AS (
|
|
-- Create distinct minute buckets for user-activity, so we can filter out
|
|
-- irrelevant latencies.
|
|
SELECT DISTINCT ON (stats.start_time, stats.template_id, stats.user_id, minute_bucket)
|
|
stats.start_time,
|
|
stats.template_id,
|
|
stats.user_id,
|
|
minute_bucket
|
|
FROM
|
|
stats, unnest(minute_buckets) AS minute_bucket
|
|
),
|
|
latencies AS (
|
|
-- Select all non-zero latencies for all the minutes that a user used the
|
|
-- workspace in some way.
|
|
SELECT
|
|
mb.start_time,
|
|
mb.template_id,
|
|
mb.user_id,
|
|
-- TODO(mafredri): We're doing medians on medians here, we may want to
|
|
-- improve upon this at some point.
|
|
PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY was.connection_median_latency_ms)::real AS median_latency_ms
|
|
FROM
|
|
minute_buckets AS mb
|
|
JOIN
|
|
workspace_agent_stats AS was
|
|
ON
|
|
was.created_at >= (SELECT t FROM latest_start)
|
|
AND was.created_at < NOW()
|
|
AND date_trunc('minute', was.created_at) = mb.minute_bucket
|
|
AND was.template_id = mb.template_id
|
|
AND was.user_id = mb.user_id
|
|
AND was.connection_median_latency_ms > 0
|
|
GROUP BY
|
|
mb.start_time, mb.template_id, mb.user_id
|
|
)
|
|
|
|
INSERT INTO template_usage_stats AS tus (
|
|
start_time,
|
|
end_time,
|
|
template_id,
|
|
user_id,
|
|
usage_mins,
|
|
median_latency_ms,
|
|
ssh_mins,
|
|
sftp_mins,
|
|
reconnecting_pty_mins,
|
|
vscode_mins,
|
|
jetbrains_mins,
|
|
app_usage_mins
|
|
) (
|
|
SELECT
|
|
stats.start_time,
|
|
stats.end_time,
|
|
stats.template_id,
|
|
stats.user_id,
|
|
stats.usage_mins,
|
|
latencies.median_latency_ms,
|
|
stats.ssh_mins,
|
|
stats.sftp_mins,
|
|
stats.reconnecting_pty_mins,
|
|
stats.vscode_mins,
|
|
stats.jetbrains_mins,
|
|
stats.app_usage_mins
|
|
FROM
|
|
stats
|
|
LEFT JOIN
|
|
latencies
|
|
ON
|
|
-- The latencies group-by ensures there at most one row.
|
|
latencies.start_time = stats.start_time
|
|
AND latencies.template_id = stats.template_id
|
|
AND latencies.user_id = stats.user_id
|
|
)
|
|
ON CONFLICT
|
|
(start_time, template_id, user_id)
|
|
DO UPDATE
|
|
SET
|
|
usage_mins = EXCLUDED.usage_mins,
|
|
median_latency_ms = EXCLUDED.median_latency_ms,
|
|
ssh_mins = EXCLUDED.ssh_mins,
|
|
sftp_mins = EXCLUDED.sftp_mins,
|
|
reconnecting_pty_mins = EXCLUDED.reconnecting_pty_mins,
|
|
vscode_mins = EXCLUDED.vscode_mins,
|
|
jetbrains_mins = EXCLUDED.jetbrains_mins,
|
|
app_usage_mins = EXCLUDED.app_usage_mins
|
|
WHERE
|
|
(tus.*) IS DISTINCT FROM (EXCLUDED.*)
|
|
`
|
|
|
|
// This query aggregates the workspace_agent_stats and workspace_app_stats data
|
|
// into a single table for efficient storage and querying. Half-hour buckets are
|
|
// used to store the data, and the minutes are summed for each user and template
|
|
// combination. The result is stored in the template_usage_stats table.
|
|
func (q *sqlQuerier) UpsertTemplateUsageStats(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, upsertTemplateUsageStats)
|
|
return err
|
|
}
|
|
|
|
const deleteLicense = `-- name: DeleteLicense :one
|
|
DELETE
|
|
FROM licenses
|
|
WHERE id = $1
|
|
RETURNING id
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteLicense(ctx context.Context, id int32) (int32, error) {
|
|
row := q.db.QueryRowContext(ctx, deleteLicense, id)
|
|
err := row.Scan(&id)
|
|
return id, err
|
|
}
|
|
|
|
const getLicenseByID = `-- name: GetLicenseByID :one
|
|
SELECT
|
|
id, uploaded_at, jwt, exp, uuid
|
|
FROM
|
|
licenses
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLicenseByID(ctx context.Context, id int32) (License, error) {
|
|
row := q.db.QueryRowContext(ctx, getLicenseByID, id)
|
|
var i License
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.UploadedAt,
|
|
&i.JWT,
|
|
&i.Exp,
|
|
&i.UUID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getLicenses = `-- name: GetLicenses :many
|
|
SELECT id, uploaded_at, jwt, exp, uuid
|
|
FROM licenses
|
|
ORDER BY (id)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLicenses(ctx context.Context) ([]License, error) {
|
|
rows, err := q.db.QueryContext(ctx, getLicenses)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []License
|
|
for rows.Next() {
|
|
var i License
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.UploadedAt,
|
|
&i.JWT,
|
|
&i.Exp,
|
|
&i.UUID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getUnexpiredLicenses = `-- name: GetUnexpiredLicenses :many
|
|
SELECT id, uploaded_at, jwt, exp, uuid
|
|
FROM licenses
|
|
WHERE exp > NOW()
|
|
ORDER BY (id)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUnexpiredLicenses(ctx context.Context) ([]License, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUnexpiredLicenses)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []License
|
|
for rows.Next() {
|
|
var i License
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.UploadedAt,
|
|
&i.JWT,
|
|
&i.Exp,
|
|
&i.UUID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertLicense = `-- name: InsertLicense :one
|
|
INSERT INTO
|
|
licenses (
|
|
uploaded_at,
|
|
jwt,
|
|
exp,
|
|
uuid
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4) RETURNING id, uploaded_at, jwt, exp, uuid
|
|
`
|
|
|
|
type InsertLicenseParams struct {
|
|
UploadedAt time.Time `db:"uploaded_at" json:"uploaded_at"`
|
|
JWT string `db:"jwt" json:"jwt"`
|
|
Exp time.Time `db:"exp" json:"exp"`
|
|
UUID uuid.UUID `db:"uuid" json:"uuid"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertLicense(ctx context.Context, arg InsertLicenseParams) (License, error) {
|
|
row := q.db.QueryRowContext(ctx, insertLicense,
|
|
arg.UploadedAt,
|
|
arg.JWT,
|
|
arg.Exp,
|
|
arg.UUID,
|
|
)
|
|
var i License
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.UploadedAt,
|
|
&i.JWT,
|
|
&i.Exp,
|
|
&i.UUID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const acquireLock = `-- name: AcquireLock :exec
|
|
SELECT pg_advisory_xact_lock($1)
|
|
`
|
|
|
|
// Blocks until the lock is acquired.
|
|
//
|
|
// This must be called from within a transaction. The lock will be automatically
|
|
// released when the transaction ends.
|
|
func (q *sqlQuerier) AcquireLock(ctx context.Context, pgAdvisoryXactLock int64) error {
|
|
_, err := q.db.ExecContext(ctx, acquireLock, pgAdvisoryXactLock)
|
|
return err
|
|
}
|
|
|
|
const tryAcquireLock = `-- name: TryAcquireLock :one
|
|
SELECT pg_try_advisory_xact_lock($1)
|
|
`
|
|
|
|
// Non blocking lock. Returns true if the lock was acquired, false otherwise.
|
|
//
|
|
// This must be called from within a transaction. The lock will be automatically
|
|
// released when the transaction ends.
|
|
func (q *sqlQuerier) TryAcquireLock(ctx context.Context, pgTryAdvisoryXactLock int64) (bool, error) {
|
|
row := q.db.QueryRowContext(ctx, tryAcquireLock, pgTryAdvisoryXactLock)
|
|
var pg_try_advisory_xact_lock bool
|
|
err := row.Scan(&pg_try_advisory_xact_lock)
|
|
return pg_try_advisory_xact_lock, err
|
|
}
|
|
|
|
const acquireNotificationMessages = `-- name: AcquireNotificationMessages :many
|
|
WITH acquired AS (
|
|
UPDATE
|
|
notification_messages
|
|
SET queued_seconds = GREATEST(0, EXTRACT(EPOCH FROM (NOW() - updated_at)))::FLOAT,
|
|
updated_at = NOW(),
|
|
status = 'leased'::notification_message_status,
|
|
status_reason = 'Leased by notifier ' || $1::uuid,
|
|
leased_until = NOW() + CONCAT($2::int, ' seconds')::interval
|
|
WHERE id IN (SELECT nm.id
|
|
FROM notification_messages AS nm
|
|
WHERE (
|
|
(
|
|
-- message is in acquirable states
|
|
nm.status IN (
|
|
'pending'::notification_message_status,
|
|
'temporary_failure'::notification_message_status
|
|
)
|
|
)
|
|
-- or somehow the message was left in leased for longer than its lease period
|
|
OR (
|
|
nm.status = 'leased'::notification_message_status
|
|
AND nm.leased_until < NOW()
|
|
)
|
|
)
|
|
AND (
|
|
-- exclude all messages which have exceeded the max attempts; these will be purged later
|
|
nm.attempt_count IS NULL OR nm.attempt_count < $3::int
|
|
)
|
|
-- if set, do not retry until we've exceeded the wait time
|
|
AND (
|
|
CASE
|
|
WHEN nm.next_retry_after IS NOT NULL THEN nm.next_retry_after < NOW()
|
|
ELSE true
|
|
END
|
|
)
|
|
ORDER BY nm.created_at ASC
|
|
-- Ensure that multiple concurrent readers cannot retrieve the same rows
|
|
FOR UPDATE OF nm
|
|
SKIP LOCKED
|
|
LIMIT $4)
|
|
RETURNING id, notification_template_id, user_id, method, status, status_reason, created_by, payload, attempt_count, targets, created_at, updated_at, leased_until, next_retry_after, queued_seconds, dedupe_hash)
|
|
SELECT
|
|
-- message
|
|
nm.id,
|
|
nm.payload,
|
|
nm.method,
|
|
nm.attempt_count::int AS attempt_count,
|
|
nm.queued_seconds::float AS queued_seconds,
|
|
-- template
|
|
nt.id AS template_id,
|
|
nt.title_template,
|
|
nt.body_template,
|
|
-- preferences
|
|
(CASE WHEN np.disabled IS NULL THEN false ELSE np.disabled END)::bool AS disabled
|
|
FROM acquired nm
|
|
JOIN notification_templates nt ON nm.notification_template_id = nt.id
|
|
LEFT JOIN notification_preferences AS np
|
|
ON (np.user_id = nm.user_id AND np.notification_template_id = nm.notification_template_id)
|
|
`
|
|
|
|
type AcquireNotificationMessagesParams struct {
|
|
NotifierID uuid.UUID `db:"notifier_id" json:"notifier_id"`
|
|
LeaseSeconds int32 `db:"lease_seconds" json:"lease_seconds"`
|
|
MaxAttemptCount int32 `db:"max_attempt_count" json:"max_attempt_count"`
|
|
Count int32 `db:"count" json:"count"`
|
|
}
|
|
|
|
type AcquireNotificationMessagesRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Payload json.RawMessage `db:"payload" json:"payload"`
|
|
Method NotificationMethod `db:"method" json:"method"`
|
|
AttemptCount int32 `db:"attempt_count" json:"attempt_count"`
|
|
QueuedSeconds float64 `db:"queued_seconds" json:"queued_seconds"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TitleTemplate string `db:"title_template" json:"title_template"`
|
|
BodyTemplate string `db:"body_template" json:"body_template"`
|
|
Disabled bool `db:"disabled" json:"disabled"`
|
|
}
|
|
|
|
// Acquires the lease for a given count of notification messages, to enable concurrent dequeuing and subsequent sending.
|
|
// Only rows that aren't already leased (or ones which are leased but have exceeded their lease period) are returned.
|
|
//
|
|
// A "lease" here refers to a notifier taking ownership of a notification_messages row. A lease survives for the duration
|
|
// of CODER_NOTIFICATIONS_LEASE_PERIOD. Once a message is delivered, its status is updated and the lease expires (set to NULL).
|
|
// If a message exceeds its lease, that implies the notifier did not shutdown cleanly, or the table update failed somehow,
|
|
// and the row will then be eligible to be dequeued by another notifier.
|
|
//
|
|
// SKIP LOCKED is used to jump over locked rows. This prevents multiple notifiers from acquiring the same messages.
|
|
// See: https://www.postgresql.org/docs/9.5/sql-select.html#SQL-FOR-UPDATE-SHARE
|
|
func (q *sqlQuerier) AcquireNotificationMessages(ctx context.Context, arg AcquireNotificationMessagesParams) ([]AcquireNotificationMessagesRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, acquireNotificationMessages,
|
|
arg.NotifierID,
|
|
arg.LeaseSeconds,
|
|
arg.MaxAttemptCount,
|
|
arg.Count,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []AcquireNotificationMessagesRow
|
|
for rows.Next() {
|
|
var i AcquireNotificationMessagesRow
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Payload,
|
|
&i.Method,
|
|
&i.AttemptCount,
|
|
&i.QueuedSeconds,
|
|
&i.TemplateID,
|
|
&i.TitleTemplate,
|
|
&i.BodyTemplate,
|
|
&i.Disabled,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const bulkMarkNotificationMessagesFailed = `-- name: BulkMarkNotificationMessagesFailed :execrows
|
|
UPDATE notification_messages
|
|
SET queued_seconds = 0,
|
|
updated_at = subquery.failed_at,
|
|
attempt_count = attempt_count + 1,
|
|
status = CASE
|
|
WHEN attempt_count + 1 < $1::int THEN subquery.status
|
|
ELSE 'permanent_failure'::notification_message_status END,
|
|
status_reason = subquery.status_reason,
|
|
leased_until = NULL,
|
|
next_retry_after = CASE
|
|
WHEN (attempt_count + 1 < $1::int)
|
|
THEN NOW() + CONCAT($2::int, ' seconds')::interval END
|
|
FROM (SELECT UNNEST($3::uuid[]) AS id,
|
|
UNNEST($4::timestamptz[]) AS failed_at,
|
|
UNNEST($5::notification_message_status[]) AS status,
|
|
UNNEST($6::text[]) AS status_reason) AS subquery
|
|
WHERE notification_messages.id = subquery.id
|
|
`
|
|
|
|
type BulkMarkNotificationMessagesFailedParams struct {
|
|
MaxAttempts int32 `db:"max_attempts" json:"max_attempts"`
|
|
RetryInterval int32 `db:"retry_interval" json:"retry_interval"`
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
FailedAts []time.Time `db:"failed_ats" json:"failed_ats"`
|
|
Statuses []NotificationMessageStatus `db:"statuses" json:"statuses"`
|
|
StatusReasons []string `db:"status_reasons" json:"status_reasons"`
|
|
}
|
|
|
|
func (q *sqlQuerier) BulkMarkNotificationMessagesFailed(ctx context.Context, arg BulkMarkNotificationMessagesFailedParams) (int64, error) {
|
|
result, err := q.db.ExecContext(ctx, bulkMarkNotificationMessagesFailed,
|
|
arg.MaxAttempts,
|
|
arg.RetryInterval,
|
|
pq.Array(arg.IDs),
|
|
pq.Array(arg.FailedAts),
|
|
pq.Array(arg.Statuses),
|
|
pq.Array(arg.StatusReasons),
|
|
)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
return result.RowsAffected()
|
|
}
|
|
|
|
const bulkMarkNotificationMessagesSent = `-- name: BulkMarkNotificationMessagesSent :execrows
|
|
UPDATE notification_messages
|
|
SET queued_seconds = 0,
|
|
updated_at = new_values.sent_at,
|
|
attempt_count = attempt_count + 1,
|
|
status = 'sent'::notification_message_status,
|
|
status_reason = NULL,
|
|
leased_until = NULL,
|
|
next_retry_after = NULL
|
|
FROM (SELECT UNNEST($1::uuid[]) AS id,
|
|
UNNEST($2::timestamptz[]) AS sent_at)
|
|
AS new_values
|
|
WHERE notification_messages.id = new_values.id
|
|
`
|
|
|
|
type BulkMarkNotificationMessagesSentParams struct {
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
SentAts []time.Time `db:"sent_ats" json:"sent_ats"`
|
|
}
|
|
|
|
func (q *sqlQuerier) BulkMarkNotificationMessagesSent(ctx context.Context, arg BulkMarkNotificationMessagesSentParams) (int64, error) {
|
|
result, err := q.db.ExecContext(ctx, bulkMarkNotificationMessagesSent, pq.Array(arg.IDs), pq.Array(arg.SentAts))
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
return result.RowsAffected()
|
|
}
|
|
|
|
const deleteAllWebpushSubscriptions = `-- name: DeleteAllWebpushSubscriptions :exec
|
|
TRUNCATE TABLE webpush_subscriptions
|
|
`
|
|
|
|
// Deletes all existing webpush subscriptions.
|
|
// This should be called when the VAPID keypair is regenerated, as the old
|
|
// keypair will no longer be valid and all existing subscriptions will need to
|
|
// be recreated.
|
|
func (q *sqlQuerier) DeleteAllWebpushSubscriptions(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, deleteAllWebpushSubscriptions)
|
|
return err
|
|
}
|
|
|
|
const deleteOldNotificationMessages = `-- name: DeleteOldNotificationMessages :exec
|
|
DELETE
|
|
FROM notification_messages
|
|
WHERE id IN
|
|
(SELECT id
|
|
FROM notification_messages AS nested
|
|
WHERE nested.updated_at < NOW() - INTERVAL '7 days')
|
|
`
|
|
|
|
// Delete all notification messages which have not been updated for over a week.
|
|
func (q *sqlQuerier) DeleteOldNotificationMessages(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOldNotificationMessages)
|
|
return err
|
|
}
|
|
|
|
const deleteWebpushSubscriptionByUserIDAndEndpoint = `-- name: DeleteWebpushSubscriptionByUserIDAndEndpoint :exec
|
|
DELETE FROM webpush_subscriptions
|
|
WHERE user_id = $1 AND endpoint = $2
|
|
`
|
|
|
|
type DeleteWebpushSubscriptionByUserIDAndEndpointParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Endpoint string `db:"endpoint" json:"endpoint"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteWebpushSubscriptionByUserIDAndEndpoint(ctx context.Context, arg DeleteWebpushSubscriptionByUserIDAndEndpointParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteWebpushSubscriptionByUserIDAndEndpoint, arg.UserID, arg.Endpoint)
|
|
return err
|
|
}
|
|
|
|
const deleteWebpushSubscriptions = `-- name: DeleteWebpushSubscriptions :exec
|
|
DELETE FROM webpush_subscriptions
|
|
WHERE id = ANY($1::uuid[])
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteWebpushSubscriptions(ctx context.Context, ids []uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteWebpushSubscriptions, pq.Array(ids))
|
|
return err
|
|
}
|
|
|
|
const enqueueNotificationMessage = `-- name: EnqueueNotificationMessage :exec
|
|
INSERT INTO notification_messages (id, notification_template_id, user_id, method, payload, targets, created_by, created_at)
|
|
VALUES ($1,
|
|
$2,
|
|
$3,
|
|
$4::notification_method,
|
|
$5::jsonb,
|
|
$6,
|
|
$7,
|
|
$8)
|
|
`
|
|
|
|
type EnqueueNotificationMessageParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
NotificationTemplateID uuid.UUID `db:"notification_template_id" json:"notification_template_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Method NotificationMethod `db:"method" json:"method"`
|
|
Payload json.RawMessage `db:"payload" json:"payload"`
|
|
Targets []uuid.UUID `db:"targets" json:"targets"`
|
|
CreatedBy string `db:"created_by" json:"created_by"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) EnqueueNotificationMessage(ctx context.Context, arg EnqueueNotificationMessageParams) error {
|
|
_, err := q.db.ExecContext(ctx, enqueueNotificationMessage,
|
|
arg.ID,
|
|
arg.NotificationTemplateID,
|
|
arg.UserID,
|
|
arg.Method,
|
|
arg.Payload,
|
|
pq.Array(arg.Targets),
|
|
arg.CreatedBy,
|
|
arg.CreatedAt,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const fetchNewMessageMetadata = `-- name: FetchNewMessageMetadata :one
|
|
SELECT nt.name AS notification_name,
|
|
nt.id AS notification_template_id,
|
|
nt.actions AS actions,
|
|
nt.method AS custom_method,
|
|
u.id AS user_id,
|
|
u.email AS user_email,
|
|
COALESCE(NULLIF(u.name, ''), NULLIF(u.username, ''))::text AS user_name,
|
|
u.username AS user_username
|
|
FROM notification_templates nt,
|
|
users u
|
|
WHERE nt.id = $1
|
|
AND u.id = $2
|
|
`
|
|
|
|
type FetchNewMessageMetadataParams struct {
|
|
NotificationTemplateID uuid.UUID `db:"notification_template_id" json:"notification_template_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
type FetchNewMessageMetadataRow struct {
|
|
NotificationName string `db:"notification_name" json:"notification_name"`
|
|
NotificationTemplateID uuid.UUID `db:"notification_template_id" json:"notification_template_id"`
|
|
Actions []byte `db:"actions" json:"actions"`
|
|
CustomMethod NullNotificationMethod `db:"custom_method" json:"custom_method"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
UserEmail string `db:"user_email" json:"user_email"`
|
|
UserName string `db:"user_name" json:"user_name"`
|
|
UserUsername string `db:"user_username" json:"user_username"`
|
|
}
|
|
|
|
// This is used to build up the notification_message's JSON payload.
|
|
func (q *sqlQuerier) FetchNewMessageMetadata(ctx context.Context, arg FetchNewMessageMetadataParams) (FetchNewMessageMetadataRow, error) {
|
|
row := q.db.QueryRowContext(ctx, fetchNewMessageMetadata, arg.NotificationTemplateID, arg.UserID)
|
|
var i FetchNewMessageMetadataRow
|
|
err := row.Scan(
|
|
&i.NotificationName,
|
|
&i.NotificationTemplateID,
|
|
&i.Actions,
|
|
&i.CustomMethod,
|
|
&i.UserID,
|
|
&i.UserEmail,
|
|
&i.UserName,
|
|
&i.UserUsername,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getNotificationMessagesByStatus = `-- name: GetNotificationMessagesByStatus :many
|
|
SELECT id, notification_template_id, user_id, method, status, status_reason, created_by, payload, attempt_count, targets, created_at, updated_at, leased_until, next_retry_after, queued_seconds, dedupe_hash
|
|
FROM notification_messages
|
|
WHERE status = $1
|
|
LIMIT $2::int
|
|
`
|
|
|
|
type GetNotificationMessagesByStatusParams struct {
|
|
Status NotificationMessageStatus `db:"status" json:"status"`
|
|
Limit int32 `db:"limit" json:"limit"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetNotificationMessagesByStatus(ctx context.Context, arg GetNotificationMessagesByStatusParams) ([]NotificationMessage, error) {
|
|
rows, err := q.db.QueryContext(ctx, getNotificationMessagesByStatus, arg.Status, arg.Limit)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []NotificationMessage
|
|
for rows.Next() {
|
|
var i NotificationMessage
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.NotificationTemplateID,
|
|
&i.UserID,
|
|
&i.Method,
|
|
&i.Status,
|
|
&i.StatusReason,
|
|
&i.CreatedBy,
|
|
&i.Payload,
|
|
&i.AttemptCount,
|
|
pq.Array(&i.Targets),
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LeasedUntil,
|
|
&i.NextRetryAfter,
|
|
&i.QueuedSeconds,
|
|
&i.DedupeHash,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getNotificationReportGeneratorLogByTemplate = `-- name: GetNotificationReportGeneratorLogByTemplate :one
|
|
SELECT
|
|
notification_template_id, last_generated_at
|
|
FROM
|
|
notification_report_generator_logs
|
|
WHERE
|
|
notification_template_id = $1::uuid
|
|
`
|
|
|
|
// Fetch the notification report generator log indicating recent activity.
|
|
func (q *sqlQuerier) GetNotificationReportGeneratorLogByTemplate(ctx context.Context, templateID uuid.UUID) (NotificationReportGeneratorLog, error) {
|
|
row := q.db.QueryRowContext(ctx, getNotificationReportGeneratorLogByTemplate, templateID)
|
|
var i NotificationReportGeneratorLog
|
|
err := row.Scan(&i.NotificationTemplateID, &i.LastGeneratedAt)
|
|
return i, err
|
|
}
|
|
|
|
const getNotificationTemplateByID = `-- name: GetNotificationTemplateByID :one
|
|
SELECT id, name, title_template, body_template, actions, "group", method, kind, enabled_by_default
|
|
FROM notification_templates
|
|
WHERE id = $1::uuid
|
|
`
|
|
|
|
func (q *sqlQuerier) GetNotificationTemplateByID(ctx context.Context, id uuid.UUID) (NotificationTemplate, error) {
|
|
row := q.db.QueryRowContext(ctx, getNotificationTemplateByID, id)
|
|
var i NotificationTemplate
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.TitleTemplate,
|
|
&i.BodyTemplate,
|
|
&i.Actions,
|
|
&i.Group,
|
|
&i.Method,
|
|
&i.Kind,
|
|
&i.EnabledByDefault,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getNotificationTemplatesByKind = `-- name: GetNotificationTemplatesByKind :many
|
|
SELECT id, name, title_template, body_template, actions, "group", method, kind, enabled_by_default
|
|
FROM notification_templates
|
|
WHERE kind = $1::notification_template_kind
|
|
ORDER BY name ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetNotificationTemplatesByKind(ctx context.Context, kind NotificationTemplateKind) ([]NotificationTemplate, error) {
|
|
rows, err := q.db.QueryContext(ctx, getNotificationTemplatesByKind, kind)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []NotificationTemplate
|
|
for rows.Next() {
|
|
var i NotificationTemplate
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.TitleTemplate,
|
|
&i.BodyTemplate,
|
|
&i.Actions,
|
|
&i.Group,
|
|
&i.Method,
|
|
&i.Kind,
|
|
&i.EnabledByDefault,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getUserNotificationPreferences = `-- name: GetUserNotificationPreferences :many
|
|
SELECT user_id, notification_template_id, disabled, created_at, updated_at
|
|
FROM notification_preferences
|
|
WHERE user_id = $1::uuid
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserNotificationPreferences(ctx context.Context, userID uuid.UUID) ([]NotificationPreference, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUserNotificationPreferences, userID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []NotificationPreference
|
|
for rows.Next() {
|
|
var i NotificationPreference
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.NotificationTemplateID,
|
|
&i.Disabled,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWebpushSubscriptionsByUserID = `-- name: GetWebpushSubscriptionsByUserID :many
|
|
SELECT id, user_id, created_at, endpoint, endpoint_p256dh_key, endpoint_auth_key
|
|
FROM webpush_subscriptions
|
|
WHERE user_id = $1::uuid
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWebpushSubscriptionsByUserID(ctx context.Context, userID uuid.UUID) ([]WebpushSubscription, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWebpushSubscriptionsByUserID, userID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WebpushSubscription
|
|
for rows.Next() {
|
|
var i WebpushSubscription
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.Endpoint,
|
|
&i.EndpointP256dhKey,
|
|
&i.EndpointAuthKey,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWebpushSubscription = `-- name: InsertWebpushSubscription :one
|
|
INSERT INTO webpush_subscriptions (user_id, created_at, endpoint, endpoint_p256dh_key, endpoint_auth_key)
|
|
VALUES ($1, $2, $3, $4, $5)
|
|
RETURNING id, user_id, created_at, endpoint, endpoint_p256dh_key, endpoint_auth_key
|
|
`
|
|
|
|
type InsertWebpushSubscriptionParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
Endpoint string `db:"endpoint" json:"endpoint"`
|
|
EndpointP256dhKey string `db:"endpoint_p256dh_key" json:"endpoint_p256dh_key"`
|
|
EndpointAuthKey string `db:"endpoint_auth_key" json:"endpoint_auth_key"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWebpushSubscription(ctx context.Context, arg InsertWebpushSubscriptionParams) (WebpushSubscription, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWebpushSubscription,
|
|
arg.UserID,
|
|
arg.CreatedAt,
|
|
arg.Endpoint,
|
|
arg.EndpointP256dhKey,
|
|
arg.EndpointAuthKey,
|
|
)
|
|
var i WebpushSubscription
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.Endpoint,
|
|
&i.EndpointP256dhKey,
|
|
&i.EndpointAuthKey,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateNotificationTemplateMethodByID = `-- name: UpdateNotificationTemplateMethodByID :one
|
|
UPDATE notification_templates
|
|
SET method = $1::notification_method
|
|
WHERE id = $2::uuid
|
|
RETURNING id, name, title_template, body_template, actions, "group", method, kind, enabled_by_default
|
|
`
|
|
|
|
type UpdateNotificationTemplateMethodByIDParams struct {
|
|
Method NullNotificationMethod `db:"method" json:"method"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateNotificationTemplateMethodByID(ctx context.Context, arg UpdateNotificationTemplateMethodByIDParams) (NotificationTemplate, error) {
|
|
row := q.db.QueryRowContext(ctx, updateNotificationTemplateMethodByID, arg.Method, arg.ID)
|
|
var i NotificationTemplate
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.TitleTemplate,
|
|
&i.BodyTemplate,
|
|
&i.Actions,
|
|
&i.Group,
|
|
&i.Method,
|
|
&i.Kind,
|
|
&i.EnabledByDefault,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserNotificationPreferences = `-- name: UpdateUserNotificationPreferences :execrows
|
|
INSERT
|
|
INTO notification_preferences (user_id, notification_template_id, disabled)
|
|
SELECT $1::uuid, new_values.notification_template_id, new_values.disabled
|
|
FROM (SELECT UNNEST($2::uuid[]) AS notification_template_id,
|
|
UNNEST($3::bool[]) AS disabled) AS new_values
|
|
ON CONFLICT (user_id, notification_template_id) DO UPDATE
|
|
SET disabled = EXCLUDED.disabled,
|
|
updated_at = CURRENT_TIMESTAMP
|
|
`
|
|
|
|
type UpdateUserNotificationPreferencesParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
NotificationTemplateIds []uuid.UUID `db:"notification_template_ids" json:"notification_template_ids"`
|
|
Disableds []bool `db:"disableds" json:"disableds"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserNotificationPreferences(ctx context.Context, arg UpdateUserNotificationPreferencesParams) (int64, error) {
|
|
result, err := q.db.ExecContext(ctx, updateUserNotificationPreferences, arg.UserID, pq.Array(arg.NotificationTemplateIds), pq.Array(arg.Disableds))
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
return result.RowsAffected()
|
|
}
|
|
|
|
const upsertNotificationReportGeneratorLog = `-- name: UpsertNotificationReportGeneratorLog :exec
|
|
INSERT INTO notification_report_generator_logs (notification_template_id, last_generated_at) VALUES ($1, $2)
|
|
ON CONFLICT (notification_template_id) DO UPDATE set last_generated_at = EXCLUDED.last_generated_at
|
|
WHERE notification_report_generator_logs.notification_template_id = EXCLUDED.notification_template_id
|
|
`
|
|
|
|
type UpsertNotificationReportGeneratorLogParams struct {
|
|
NotificationTemplateID uuid.UUID `db:"notification_template_id" json:"notification_template_id"`
|
|
LastGeneratedAt time.Time `db:"last_generated_at" json:"last_generated_at"`
|
|
}
|
|
|
|
// Insert or update notification report generator logs with recent activity.
|
|
func (q *sqlQuerier) UpsertNotificationReportGeneratorLog(ctx context.Context, arg UpsertNotificationReportGeneratorLogParams) error {
|
|
_, err := q.db.ExecContext(ctx, upsertNotificationReportGeneratorLog, arg.NotificationTemplateID, arg.LastGeneratedAt)
|
|
return err
|
|
}
|
|
|
|
const countUnreadInboxNotificationsByUserID = `-- name: CountUnreadInboxNotificationsByUserID :one
|
|
SELECT COUNT(*) FROM inbox_notifications WHERE user_id = $1 AND read_at IS NULL
|
|
`
|
|
|
|
func (q *sqlQuerier) CountUnreadInboxNotificationsByUserID(ctx context.Context, userID uuid.UUID) (int64, error) {
|
|
row := q.db.QueryRowContext(ctx, countUnreadInboxNotificationsByUserID, userID)
|
|
var count int64
|
|
err := row.Scan(&count)
|
|
return count, err
|
|
}
|
|
|
|
const getFilteredInboxNotificationsByUserID = `-- name: GetFilteredInboxNotificationsByUserID :many
|
|
SELECT id, user_id, template_id, targets, title, content, icon, actions, read_at, created_at FROM inbox_notifications WHERE
|
|
user_id = $1 AND
|
|
($2::UUID[] IS NULL OR template_id = ANY($2::UUID[])) AND
|
|
($3::UUID[] IS NULL OR targets @> $3::UUID[]) AND
|
|
($4::inbox_notification_read_status = 'all' OR ($4::inbox_notification_read_status = 'unread' AND read_at IS NULL) OR ($4::inbox_notification_read_status = 'read' AND read_at IS NOT NULL)) AND
|
|
($5::TIMESTAMPTZ = '0001-01-01 00:00:00Z' OR created_at < $5::TIMESTAMPTZ)
|
|
ORDER BY created_at DESC
|
|
LIMIT (COALESCE(NULLIF($6 :: INT, 0), 25))
|
|
`
|
|
|
|
type GetFilteredInboxNotificationsByUserIDParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Templates []uuid.UUID `db:"templates" json:"templates"`
|
|
Targets []uuid.UUID `db:"targets" json:"targets"`
|
|
ReadStatus InboxNotificationReadStatus `db:"read_status" json:"read_status"`
|
|
CreatedAtOpt time.Time `db:"created_at_opt" json:"created_at_opt"`
|
|
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
|
}
|
|
|
|
// Fetches inbox notifications for a user filtered by templates and targets
|
|
// param user_id: The user ID
|
|
// param templates: The template IDs to filter by - the template_id = ANY(@templates::UUID[]) condition checks if the template_id is in the @templates array
|
|
// param targets: The target IDs to filter by - the targets @> COALESCE(@targets, ARRAY[]::UUID[]) condition checks if the targets array (from the DB) contains all the elements in the @targets array
|
|
// param read_status: The read status to filter by - can be any of 'ALL', 'UNREAD', 'READ'
|
|
// param created_at_opt: The created_at timestamp to filter by. This parameter is usd for pagination - it fetches notifications created before the specified timestamp if it is not the zero value
|
|
// param limit_opt: The limit of notifications to fetch. If the limit is not specified, it defaults to 25
|
|
func (q *sqlQuerier) GetFilteredInboxNotificationsByUserID(ctx context.Context, arg GetFilteredInboxNotificationsByUserIDParams) ([]InboxNotification, error) {
|
|
rows, err := q.db.QueryContext(ctx, getFilteredInboxNotificationsByUserID,
|
|
arg.UserID,
|
|
pq.Array(arg.Templates),
|
|
pq.Array(arg.Targets),
|
|
arg.ReadStatus,
|
|
arg.CreatedAtOpt,
|
|
arg.LimitOpt,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []InboxNotification
|
|
for rows.Next() {
|
|
var i InboxNotification
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.UserID,
|
|
&i.TemplateID,
|
|
pq.Array(&i.Targets),
|
|
&i.Title,
|
|
&i.Content,
|
|
&i.Icon,
|
|
&i.Actions,
|
|
&i.ReadAt,
|
|
&i.CreatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getInboxNotificationByID = `-- name: GetInboxNotificationByID :one
|
|
SELECT id, user_id, template_id, targets, title, content, icon, actions, read_at, created_at FROM inbox_notifications WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetInboxNotificationByID(ctx context.Context, id uuid.UUID) (InboxNotification, error) {
|
|
row := q.db.QueryRowContext(ctx, getInboxNotificationByID, id)
|
|
var i InboxNotification
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.UserID,
|
|
&i.TemplateID,
|
|
pq.Array(&i.Targets),
|
|
&i.Title,
|
|
&i.Content,
|
|
&i.Icon,
|
|
&i.Actions,
|
|
&i.ReadAt,
|
|
&i.CreatedAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getInboxNotificationsByUserID = `-- name: GetInboxNotificationsByUserID :many
|
|
SELECT id, user_id, template_id, targets, title, content, icon, actions, read_at, created_at FROM inbox_notifications WHERE
|
|
user_id = $1 AND
|
|
($2::inbox_notification_read_status = 'all' OR ($2::inbox_notification_read_status = 'unread' AND read_at IS NULL) OR ($2::inbox_notification_read_status = 'read' AND read_at IS NOT NULL)) AND
|
|
($3::TIMESTAMPTZ = '0001-01-01 00:00:00Z' OR created_at < $3::TIMESTAMPTZ)
|
|
ORDER BY created_at DESC
|
|
LIMIT (COALESCE(NULLIF($4 :: INT, 0), 25))
|
|
`
|
|
|
|
type GetInboxNotificationsByUserIDParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
ReadStatus InboxNotificationReadStatus `db:"read_status" json:"read_status"`
|
|
CreatedAtOpt time.Time `db:"created_at_opt" json:"created_at_opt"`
|
|
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
|
}
|
|
|
|
// Fetches inbox notifications for a user filtered by templates and targets
|
|
// param user_id: The user ID
|
|
// param read_status: The read status to filter by - can be any of 'ALL', 'UNREAD', 'READ'
|
|
// param created_at_opt: The created_at timestamp to filter by. This parameter is usd for pagination - it fetches notifications created before the specified timestamp if it is not the zero value
|
|
// param limit_opt: The limit of notifications to fetch. If the limit is not specified, it defaults to 25
|
|
func (q *sqlQuerier) GetInboxNotificationsByUserID(ctx context.Context, arg GetInboxNotificationsByUserIDParams) ([]InboxNotification, error) {
|
|
rows, err := q.db.QueryContext(ctx, getInboxNotificationsByUserID,
|
|
arg.UserID,
|
|
arg.ReadStatus,
|
|
arg.CreatedAtOpt,
|
|
arg.LimitOpt,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []InboxNotification
|
|
for rows.Next() {
|
|
var i InboxNotification
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.UserID,
|
|
&i.TemplateID,
|
|
pq.Array(&i.Targets),
|
|
&i.Title,
|
|
&i.Content,
|
|
&i.Icon,
|
|
&i.Actions,
|
|
&i.ReadAt,
|
|
&i.CreatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertInboxNotification = `-- name: InsertInboxNotification :one
|
|
INSERT INTO
|
|
inbox_notifications (
|
|
id,
|
|
user_id,
|
|
template_id,
|
|
targets,
|
|
title,
|
|
content,
|
|
icon,
|
|
actions,
|
|
created_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING id, user_id, template_id, targets, title, content, icon, actions, read_at, created_at
|
|
`
|
|
|
|
type InsertInboxNotificationParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
Targets []uuid.UUID `db:"targets" json:"targets"`
|
|
Title string `db:"title" json:"title"`
|
|
Content string `db:"content" json:"content"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
Actions json.RawMessage `db:"actions" json:"actions"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertInboxNotification(ctx context.Context, arg InsertInboxNotificationParams) (InboxNotification, error) {
|
|
row := q.db.QueryRowContext(ctx, insertInboxNotification,
|
|
arg.ID,
|
|
arg.UserID,
|
|
arg.TemplateID,
|
|
pq.Array(arg.Targets),
|
|
arg.Title,
|
|
arg.Content,
|
|
arg.Icon,
|
|
arg.Actions,
|
|
arg.CreatedAt,
|
|
)
|
|
var i InboxNotification
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.UserID,
|
|
&i.TemplateID,
|
|
pq.Array(&i.Targets),
|
|
&i.Title,
|
|
&i.Content,
|
|
&i.Icon,
|
|
&i.Actions,
|
|
&i.ReadAt,
|
|
&i.CreatedAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const markAllInboxNotificationsAsRead = `-- name: MarkAllInboxNotificationsAsRead :exec
|
|
UPDATE
|
|
inbox_notifications
|
|
SET
|
|
read_at = $1
|
|
WHERE
|
|
user_id = $2 and read_at IS NULL
|
|
`
|
|
|
|
type MarkAllInboxNotificationsAsReadParams struct {
|
|
ReadAt sql.NullTime `db:"read_at" json:"read_at"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) MarkAllInboxNotificationsAsRead(ctx context.Context, arg MarkAllInboxNotificationsAsReadParams) error {
|
|
_, err := q.db.ExecContext(ctx, markAllInboxNotificationsAsRead, arg.ReadAt, arg.UserID)
|
|
return err
|
|
}
|
|
|
|
const updateInboxNotificationReadStatus = `-- name: UpdateInboxNotificationReadStatus :exec
|
|
UPDATE
|
|
inbox_notifications
|
|
SET
|
|
read_at = $1
|
|
WHERE
|
|
id = $2
|
|
`
|
|
|
|
type UpdateInboxNotificationReadStatusParams struct {
|
|
ReadAt sql.NullTime `db:"read_at" json:"read_at"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateInboxNotificationReadStatus(ctx context.Context, arg UpdateInboxNotificationReadStatusParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateInboxNotificationReadStatus, arg.ReadAt, arg.ID)
|
|
return err
|
|
}
|
|
|
|
const deleteOAuth2ProviderAppByID = `-- name: DeleteOAuth2ProviderAppByID :exec
|
|
DELETE FROM oauth2_provider_apps WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteOAuth2ProviderAppByID(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOAuth2ProviderAppByID, id)
|
|
return err
|
|
}
|
|
|
|
const deleteOAuth2ProviderAppCodeByID = `-- name: DeleteOAuth2ProviderAppCodeByID :exec
|
|
DELETE FROM oauth2_provider_app_codes WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteOAuth2ProviderAppCodeByID(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOAuth2ProviderAppCodeByID, id)
|
|
return err
|
|
}
|
|
|
|
const deleteOAuth2ProviderAppCodesByAppAndUserID = `-- name: DeleteOAuth2ProviderAppCodesByAppAndUserID :exec
|
|
DELETE FROM oauth2_provider_app_codes WHERE app_id = $1 AND user_id = $2
|
|
`
|
|
|
|
type DeleteOAuth2ProviderAppCodesByAppAndUserIDParams struct {
|
|
AppID uuid.UUID `db:"app_id" json:"app_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteOAuth2ProviderAppCodesByAppAndUserID(ctx context.Context, arg DeleteOAuth2ProviderAppCodesByAppAndUserIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOAuth2ProviderAppCodesByAppAndUserID, arg.AppID, arg.UserID)
|
|
return err
|
|
}
|
|
|
|
const deleteOAuth2ProviderAppSecretByID = `-- name: DeleteOAuth2ProviderAppSecretByID :exec
|
|
DELETE FROM oauth2_provider_app_secrets WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteOAuth2ProviderAppSecretByID(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOAuth2ProviderAppSecretByID, id)
|
|
return err
|
|
}
|
|
|
|
const deleteOAuth2ProviderAppTokensByAppAndUserID = `-- name: DeleteOAuth2ProviderAppTokensByAppAndUserID :exec
|
|
DELETE FROM
|
|
oauth2_provider_app_tokens
|
|
USING
|
|
oauth2_provider_app_secrets, api_keys
|
|
WHERE
|
|
oauth2_provider_app_secrets.id = oauth2_provider_app_tokens.app_secret_id
|
|
AND api_keys.id = oauth2_provider_app_tokens.api_key_id
|
|
AND oauth2_provider_app_secrets.app_id = $1
|
|
AND api_keys.user_id = $2
|
|
`
|
|
|
|
type DeleteOAuth2ProviderAppTokensByAppAndUserIDParams struct {
|
|
AppID uuid.UUID `db:"app_id" json:"app_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx context.Context, arg DeleteOAuth2ProviderAppTokensByAppAndUserIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOAuth2ProviderAppTokensByAppAndUserID, arg.AppID, arg.UserID)
|
|
return err
|
|
}
|
|
|
|
const getOAuth2ProviderAppByID = `-- name: GetOAuth2ProviderAppByID :one
|
|
SELECT id, created_at, updated_at, name, icon, callback_url FROM oauth2_provider_apps WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppByID(ctx context.Context, id uuid.UUID) (OAuth2ProviderApp, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppByID, id)
|
|
var i OAuth2ProviderApp
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Icon,
|
|
&i.CallbackURL,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderAppCodeByID = `-- name: GetOAuth2ProviderAppCodeByID :one
|
|
SELECT id, created_at, expires_at, secret_prefix, hashed_secret, user_id, app_id FROM oauth2_provider_app_codes WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppCodeByID(ctx context.Context, id uuid.UUID) (OAuth2ProviderAppCode, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppCodeByID, id)
|
|
var i OAuth2ProviderAppCode
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.ExpiresAt,
|
|
&i.SecretPrefix,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.AppID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderAppCodeByPrefix = `-- name: GetOAuth2ProviderAppCodeByPrefix :one
|
|
SELECT id, created_at, expires_at, secret_prefix, hashed_secret, user_id, app_id FROM oauth2_provider_app_codes WHERE secret_prefix = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppCodeByPrefix(ctx context.Context, secretPrefix []byte) (OAuth2ProviderAppCode, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppCodeByPrefix, secretPrefix)
|
|
var i OAuth2ProviderAppCode
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.ExpiresAt,
|
|
&i.SecretPrefix,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.AppID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderAppSecretByID = `-- name: GetOAuth2ProviderAppSecretByID :one
|
|
SELECT id, created_at, last_used_at, hashed_secret, display_secret, app_id, secret_prefix FROM oauth2_provider_app_secrets WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppSecretByID(ctx context.Context, id uuid.UUID) (OAuth2ProviderAppSecret, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppSecretByID, id)
|
|
var i OAuth2ProviderAppSecret
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.LastUsedAt,
|
|
&i.HashedSecret,
|
|
&i.DisplaySecret,
|
|
&i.AppID,
|
|
&i.SecretPrefix,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderAppSecretByPrefix = `-- name: GetOAuth2ProviderAppSecretByPrefix :one
|
|
SELECT id, created_at, last_used_at, hashed_secret, display_secret, app_id, secret_prefix FROM oauth2_provider_app_secrets WHERE secret_prefix = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppSecretByPrefix(ctx context.Context, secretPrefix []byte) (OAuth2ProviderAppSecret, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppSecretByPrefix, secretPrefix)
|
|
var i OAuth2ProviderAppSecret
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.LastUsedAt,
|
|
&i.HashedSecret,
|
|
&i.DisplaySecret,
|
|
&i.AppID,
|
|
&i.SecretPrefix,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderAppSecretsByAppID = `-- name: GetOAuth2ProviderAppSecretsByAppID :many
|
|
SELECT id, created_at, last_used_at, hashed_secret, display_secret, app_id, secret_prefix FROM oauth2_provider_app_secrets WHERE app_id = $1 ORDER BY (created_at, id) ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppSecretsByAppID(ctx context.Context, appID uuid.UUID) ([]OAuth2ProviderAppSecret, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOAuth2ProviderAppSecretsByAppID, appID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []OAuth2ProviderAppSecret
|
|
for rows.Next() {
|
|
var i OAuth2ProviderAppSecret
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.LastUsedAt,
|
|
&i.HashedSecret,
|
|
&i.DisplaySecret,
|
|
&i.AppID,
|
|
&i.SecretPrefix,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getOAuth2ProviderAppTokenByPrefix = `-- name: GetOAuth2ProviderAppTokenByPrefix :one
|
|
SELECT id, created_at, expires_at, hash_prefix, refresh_hash, app_secret_id, api_key_id FROM oauth2_provider_app_tokens WHERE hash_prefix = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppTokenByPrefix(ctx context.Context, hashPrefix []byte) (OAuth2ProviderAppToken, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppTokenByPrefix, hashPrefix)
|
|
var i OAuth2ProviderAppToken
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.ExpiresAt,
|
|
&i.HashPrefix,
|
|
&i.RefreshHash,
|
|
&i.AppSecretID,
|
|
&i.APIKeyID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderApps = `-- name: GetOAuth2ProviderApps :many
|
|
SELECT id, created_at, updated_at, name, icon, callback_url FROM oauth2_provider_apps ORDER BY (name, id) ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderApps(ctx context.Context) ([]OAuth2ProviderApp, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOAuth2ProviderApps)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []OAuth2ProviderApp
|
|
for rows.Next() {
|
|
var i OAuth2ProviderApp
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Icon,
|
|
&i.CallbackURL,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getOAuth2ProviderAppsByUserID = `-- name: GetOAuth2ProviderAppsByUserID :many
|
|
SELECT
|
|
COUNT(DISTINCT oauth2_provider_app_tokens.id) as token_count,
|
|
oauth2_provider_apps.id, oauth2_provider_apps.created_at, oauth2_provider_apps.updated_at, oauth2_provider_apps.name, oauth2_provider_apps.icon, oauth2_provider_apps.callback_url
|
|
FROM oauth2_provider_app_tokens
|
|
INNER JOIN oauth2_provider_app_secrets
|
|
ON oauth2_provider_app_secrets.id = oauth2_provider_app_tokens.app_secret_id
|
|
INNER JOIN oauth2_provider_apps
|
|
ON oauth2_provider_apps.id = oauth2_provider_app_secrets.app_id
|
|
INNER JOIN api_keys
|
|
ON api_keys.id = oauth2_provider_app_tokens.api_key_id
|
|
WHERE
|
|
api_keys.user_id = $1
|
|
GROUP BY
|
|
oauth2_provider_apps.id
|
|
`
|
|
|
|
type GetOAuth2ProviderAppsByUserIDRow struct {
|
|
TokenCount int64 `db:"token_count" json:"token_count"`
|
|
OAuth2ProviderApp OAuth2ProviderApp `db:"oauth2_provider_app" json:"oauth2_provider_app"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppsByUserID(ctx context.Context, userID uuid.UUID) ([]GetOAuth2ProviderAppsByUserIDRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOAuth2ProviderAppsByUserID, userID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetOAuth2ProviderAppsByUserIDRow
|
|
for rows.Next() {
|
|
var i GetOAuth2ProviderAppsByUserIDRow
|
|
if err := rows.Scan(
|
|
&i.TokenCount,
|
|
&i.OAuth2ProviderApp.ID,
|
|
&i.OAuth2ProviderApp.CreatedAt,
|
|
&i.OAuth2ProviderApp.UpdatedAt,
|
|
&i.OAuth2ProviderApp.Name,
|
|
&i.OAuth2ProviderApp.Icon,
|
|
&i.OAuth2ProviderApp.CallbackURL,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertOAuth2ProviderApp = `-- name: InsertOAuth2ProviderApp :one
|
|
INSERT INTO oauth2_provider_apps (
|
|
id,
|
|
created_at,
|
|
updated_at,
|
|
name,
|
|
icon,
|
|
callback_url
|
|
) VALUES(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6
|
|
) RETURNING id, created_at, updated_at, name, icon, callback_url
|
|
`
|
|
|
|
type InsertOAuth2ProviderAppParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
CallbackURL string `db:"callback_url" json:"callback_url"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOAuth2ProviderApp(ctx context.Context, arg InsertOAuth2ProviderAppParams) (OAuth2ProviderApp, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOAuth2ProviderApp,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.Icon,
|
|
arg.CallbackURL,
|
|
)
|
|
var i OAuth2ProviderApp
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Icon,
|
|
&i.CallbackURL,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertOAuth2ProviderAppCode = `-- name: InsertOAuth2ProviderAppCode :one
|
|
INSERT INTO oauth2_provider_app_codes (
|
|
id,
|
|
created_at,
|
|
expires_at,
|
|
secret_prefix,
|
|
hashed_secret,
|
|
app_id,
|
|
user_id
|
|
) VALUES(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7
|
|
) RETURNING id, created_at, expires_at, secret_prefix, hashed_secret, user_id, app_id
|
|
`
|
|
|
|
type InsertOAuth2ProviderAppCodeParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
ExpiresAt time.Time `db:"expires_at" json:"expires_at"`
|
|
SecretPrefix []byte `db:"secret_prefix" json:"secret_prefix"`
|
|
HashedSecret []byte `db:"hashed_secret" json:"hashed_secret"`
|
|
AppID uuid.UUID `db:"app_id" json:"app_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOAuth2ProviderAppCode(ctx context.Context, arg InsertOAuth2ProviderAppCodeParams) (OAuth2ProviderAppCode, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOAuth2ProviderAppCode,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.ExpiresAt,
|
|
arg.SecretPrefix,
|
|
arg.HashedSecret,
|
|
arg.AppID,
|
|
arg.UserID,
|
|
)
|
|
var i OAuth2ProviderAppCode
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.ExpiresAt,
|
|
&i.SecretPrefix,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.AppID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertOAuth2ProviderAppSecret = `-- name: InsertOAuth2ProviderAppSecret :one
|
|
INSERT INTO oauth2_provider_app_secrets (
|
|
id,
|
|
created_at,
|
|
secret_prefix,
|
|
hashed_secret,
|
|
display_secret,
|
|
app_id
|
|
) VALUES(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6
|
|
) RETURNING id, created_at, last_used_at, hashed_secret, display_secret, app_id, secret_prefix
|
|
`
|
|
|
|
type InsertOAuth2ProviderAppSecretParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
SecretPrefix []byte `db:"secret_prefix" json:"secret_prefix"`
|
|
HashedSecret []byte `db:"hashed_secret" json:"hashed_secret"`
|
|
DisplaySecret string `db:"display_secret" json:"display_secret"`
|
|
AppID uuid.UUID `db:"app_id" json:"app_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOAuth2ProviderAppSecret(ctx context.Context, arg InsertOAuth2ProviderAppSecretParams) (OAuth2ProviderAppSecret, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOAuth2ProviderAppSecret,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.SecretPrefix,
|
|
arg.HashedSecret,
|
|
arg.DisplaySecret,
|
|
arg.AppID,
|
|
)
|
|
var i OAuth2ProviderAppSecret
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.LastUsedAt,
|
|
&i.HashedSecret,
|
|
&i.DisplaySecret,
|
|
&i.AppID,
|
|
&i.SecretPrefix,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertOAuth2ProviderAppToken = `-- name: InsertOAuth2ProviderAppToken :one
|
|
INSERT INTO oauth2_provider_app_tokens (
|
|
id,
|
|
created_at,
|
|
expires_at,
|
|
hash_prefix,
|
|
refresh_hash,
|
|
app_secret_id,
|
|
api_key_id
|
|
) VALUES(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7
|
|
) RETURNING id, created_at, expires_at, hash_prefix, refresh_hash, app_secret_id, api_key_id
|
|
`
|
|
|
|
type InsertOAuth2ProviderAppTokenParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
ExpiresAt time.Time `db:"expires_at" json:"expires_at"`
|
|
HashPrefix []byte `db:"hash_prefix" json:"hash_prefix"`
|
|
RefreshHash []byte `db:"refresh_hash" json:"refresh_hash"`
|
|
AppSecretID uuid.UUID `db:"app_secret_id" json:"app_secret_id"`
|
|
APIKeyID string `db:"api_key_id" json:"api_key_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOAuth2ProviderAppToken(ctx context.Context, arg InsertOAuth2ProviderAppTokenParams) (OAuth2ProviderAppToken, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOAuth2ProviderAppToken,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.ExpiresAt,
|
|
arg.HashPrefix,
|
|
arg.RefreshHash,
|
|
arg.AppSecretID,
|
|
arg.APIKeyID,
|
|
)
|
|
var i OAuth2ProviderAppToken
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.ExpiresAt,
|
|
&i.HashPrefix,
|
|
&i.RefreshHash,
|
|
&i.AppSecretID,
|
|
&i.APIKeyID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateOAuth2ProviderAppByID = `-- name: UpdateOAuth2ProviderAppByID :one
|
|
UPDATE oauth2_provider_apps SET
|
|
updated_at = $2,
|
|
name = $3,
|
|
icon = $4,
|
|
callback_url = $5
|
|
WHERE id = $1 RETURNING id, created_at, updated_at, name, icon, callback_url
|
|
`
|
|
|
|
type UpdateOAuth2ProviderAppByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
CallbackURL string `db:"callback_url" json:"callback_url"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateOAuth2ProviderAppByID(ctx context.Context, arg UpdateOAuth2ProviderAppByIDParams) (OAuth2ProviderApp, error) {
|
|
row := q.db.QueryRowContext(ctx, updateOAuth2ProviderAppByID,
|
|
arg.ID,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.Icon,
|
|
arg.CallbackURL,
|
|
)
|
|
var i OAuth2ProviderApp
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Icon,
|
|
&i.CallbackURL,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateOAuth2ProviderAppSecretByID = `-- name: UpdateOAuth2ProviderAppSecretByID :one
|
|
UPDATE oauth2_provider_app_secrets SET
|
|
last_used_at = $2
|
|
WHERE id = $1 RETURNING id, created_at, last_used_at, hashed_secret, display_secret, app_id, secret_prefix
|
|
`
|
|
|
|
type UpdateOAuth2ProviderAppSecretByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
LastUsedAt sql.NullTime `db:"last_used_at" json:"last_used_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateOAuth2ProviderAppSecretByID(ctx context.Context, arg UpdateOAuth2ProviderAppSecretByIDParams) (OAuth2ProviderAppSecret, error) {
|
|
row := q.db.QueryRowContext(ctx, updateOAuth2ProviderAppSecretByID, arg.ID, arg.LastUsedAt)
|
|
var i OAuth2ProviderAppSecret
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.LastUsedAt,
|
|
&i.HashedSecret,
|
|
&i.DisplaySecret,
|
|
&i.AppID,
|
|
&i.SecretPrefix,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const deleteOrganizationMember = `-- name: DeleteOrganizationMember :exec
|
|
DELETE
|
|
FROM
|
|
organization_members
|
|
WHERE
|
|
organization_id = $1 AND
|
|
user_id = $2
|
|
`
|
|
|
|
type DeleteOrganizationMemberParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteOrganizationMember(ctx context.Context, arg DeleteOrganizationMemberParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOrganizationMember, arg.OrganizationID, arg.UserID)
|
|
return err
|
|
}
|
|
|
|
const getOrganizationIDsByMemberIDs = `-- name: GetOrganizationIDsByMemberIDs :many
|
|
SELECT
|
|
user_id, array_agg(organization_id) :: uuid [ ] AS "organization_IDs"
|
|
FROM
|
|
organization_members
|
|
WHERE
|
|
user_id = ANY($1 :: uuid [ ])
|
|
GROUP BY
|
|
user_id
|
|
`
|
|
|
|
type GetOrganizationIDsByMemberIDsRow struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
OrganizationIDs []uuid.UUID `db:"organization_IDs" json:"organization_IDs"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetOrganizationIDsByMemberIDs(ctx context.Context, ids []uuid.UUID) ([]GetOrganizationIDsByMemberIDsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOrganizationIDsByMemberIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetOrganizationIDsByMemberIDsRow
|
|
for rows.Next() {
|
|
var i GetOrganizationIDsByMemberIDsRow
|
|
if err := rows.Scan(&i.UserID, pq.Array(&i.OrganizationIDs)); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertOrganizationMember = `-- name: InsertOrganizationMember :one
|
|
INSERT INTO
|
|
organization_members (
|
|
organization_id,
|
|
user_id,
|
|
created_at,
|
|
updated_at,
|
|
roles
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5) RETURNING user_id, organization_id, created_at, updated_at, roles
|
|
`
|
|
|
|
type InsertOrganizationMemberParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Roles []string `db:"roles" json:"roles"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOrganizationMember(ctx context.Context, arg InsertOrganizationMemberParams) (OrganizationMember, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOrganizationMember,
|
|
arg.OrganizationID,
|
|
arg.UserID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
pq.Array(arg.Roles),
|
|
)
|
|
var i OrganizationMember
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
pq.Array(&i.Roles),
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const organizationMembers = `-- name: OrganizationMembers :many
|
|
SELECT
|
|
organization_members.user_id, organization_members.organization_id, organization_members.created_at, organization_members.updated_at, organization_members.roles,
|
|
users.username, users.avatar_url, users.name, users.email, users.rbac_roles as "global_roles"
|
|
FROM
|
|
organization_members
|
|
INNER JOIN
|
|
users ON organization_members.user_id = users.id AND users.deleted = false
|
|
WHERE
|
|
-- Filter by organization id
|
|
CASE
|
|
WHEN $1 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
organization_id = $1
|
|
ELSE true
|
|
END
|
|
-- Filter by user id
|
|
AND CASE
|
|
WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
user_id = $2
|
|
ELSE true
|
|
END
|
|
-- Filter by system type
|
|
AND CASE
|
|
WHEN $3::bool THEN TRUE
|
|
ELSE
|
|
is_system = false
|
|
END
|
|
`
|
|
|
|
type OrganizationMembersParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
IncludeSystem bool `db:"include_system" json:"include_system"`
|
|
}
|
|
|
|
type OrganizationMembersRow struct {
|
|
OrganizationMember OrganizationMember `db:"organization_member" json:"organization_member"`
|
|
Username string `db:"username" json:"username"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
Name string `db:"name" json:"name"`
|
|
Email string `db:"email" json:"email"`
|
|
GlobalRoles pq.StringArray `db:"global_roles" json:"global_roles"`
|
|
}
|
|
|
|
// Arguments are optional with uuid.Nil to ignore.
|
|
// - Use just 'organization_id' to get all members of an org
|
|
// - Use just 'user_id' to get all orgs a user is a member of
|
|
// - Use both to get a specific org member row
|
|
func (q *sqlQuerier) OrganizationMembers(ctx context.Context, arg OrganizationMembersParams) ([]OrganizationMembersRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, organizationMembers, arg.OrganizationID, arg.UserID, arg.IncludeSystem)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []OrganizationMembersRow
|
|
for rows.Next() {
|
|
var i OrganizationMembersRow
|
|
if err := rows.Scan(
|
|
&i.OrganizationMember.UserID,
|
|
&i.OrganizationMember.OrganizationID,
|
|
&i.OrganizationMember.CreatedAt,
|
|
&i.OrganizationMember.UpdatedAt,
|
|
pq.Array(&i.OrganizationMember.Roles),
|
|
&i.Username,
|
|
&i.AvatarURL,
|
|
&i.Name,
|
|
&i.Email,
|
|
&i.GlobalRoles,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const paginatedOrganizationMembers = `-- name: PaginatedOrganizationMembers :many
|
|
SELECT
|
|
organization_members.user_id, organization_members.organization_id, organization_members.created_at, organization_members.updated_at, organization_members.roles,
|
|
users.username, users.avatar_url, users.name, users.email, users.rbac_roles as "global_roles",
|
|
COUNT(*) OVER() AS count
|
|
FROM
|
|
organization_members
|
|
INNER JOIN
|
|
users ON organization_members.user_id = users.id AND users.deleted = false
|
|
WHERE
|
|
-- Filter by organization id
|
|
CASE
|
|
WHEN $1 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
organization_id = $1
|
|
ELSE true
|
|
END
|
|
ORDER BY
|
|
-- Deterministic and consistent ordering of all users. This is to ensure consistent pagination.
|
|
LOWER(username) ASC OFFSET $2
|
|
LIMIT
|
|
-- A null limit means "no limit", so 0 means return all
|
|
NULLIF($3 :: int, 0)
|
|
`
|
|
|
|
type PaginatedOrganizationMembersParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
OffsetOpt int32 `db:"offset_opt" json:"offset_opt"`
|
|
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
|
}
|
|
|
|
type PaginatedOrganizationMembersRow struct {
|
|
OrganizationMember OrganizationMember `db:"organization_member" json:"organization_member"`
|
|
Username string `db:"username" json:"username"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
Name string `db:"name" json:"name"`
|
|
Email string `db:"email" json:"email"`
|
|
GlobalRoles pq.StringArray `db:"global_roles" json:"global_roles"`
|
|
Count int64 `db:"count" json:"count"`
|
|
}
|
|
|
|
func (q *sqlQuerier) PaginatedOrganizationMembers(ctx context.Context, arg PaginatedOrganizationMembersParams) ([]PaginatedOrganizationMembersRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, paginatedOrganizationMembers, arg.OrganizationID, arg.OffsetOpt, arg.LimitOpt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []PaginatedOrganizationMembersRow
|
|
for rows.Next() {
|
|
var i PaginatedOrganizationMembersRow
|
|
if err := rows.Scan(
|
|
&i.OrganizationMember.UserID,
|
|
&i.OrganizationMember.OrganizationID,
|
|
&i.OrganizationMember.CreatedAt,
|
|
&i.OrganizationMember.UpdatedAt,
|
|
pq.Array(&i.OrganizationMember.Roles),
|
|
&i.Username,
|
|
&i.AvatarURL,
|
|
&i.Name,
|
|
&i.Email,
|
|
&i.GlobalRoles,
|
|
&i.Count,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateMemberRoles = `-- name: UpdateMemberRoles :one
|
|
UPDATE
|
|
organization_members
|
|
SET
|
|
-- Remove all duplicates from the roles.
|
|
roles = ARRAY(SELECT DISTINCT UNNEST($1 :: text[]))
|
|
WHERE
|
|
user_id = $2
|
|
AND organization_id = $3
|
|
RETURNING user_id, organization_id, created_at, updated_at, roles
|
|
`
|
|
|
|
type UpdateMemberRolesParams struct {
|
|
GrantedRoles []string `db:"granted_roles" json:"granted_roles"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
OrgID uuid.UUID `db:"org_id" json:"org_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateMemberRoles(ctx context.Context, arg UpdateMemberRolesParams) (OrganizationMember, error) {
|
|
row := q.db.QueryRowContext(ctx, updateMemberRoles, pq.Array(arg.GrantedRoles), arg.UserID, arg.OrgID)
|
|
var i OrganizationMember
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
pq.Array(&i.Roles),
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getDefaultOrganization = `-- name: GetDefaultOrganization :one
|
|
SELECT
|
|
id, name, description, created_at, updated_at, is_default, display_name, icon, deleted
|
|
FROM
|
|
organizations
|
|
WHERE
|
|
is_default = true
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetDefaultOrganization(ctx context.Context) (Organization, error) {
|
|
row := q.db.QueryRowContext(ctx, getDefaultOrganization)
|
|
var i Organization
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Deleted,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOrganizationByID = `-- name: GetOrganizationByID :one
|
|
SELECT
|
|
id, name, description, created_at, updated_at, is_default, display_name, icon, deleted
|
|
FROM
|
|
organizations
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOrganizationByID(ctx context.Context, id uuid.UUID) (Organization, error) {
|
|
row := q.db.QueryRowContext(ctx, getOrganizationByID, id)
|
|
var i Organization
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Deleted,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOrganizationByName = `-- name: GetOrganizationByName :one
|
|
SELECT
|
|
id, name, description, created_at, updated_at, is_default, display_name, icon, deleted
|
|
FROM
|
|
organizations
|
|
WHERE
|
|
-- Optionally include deleted organizations
|
|
deleted = $1 AND
|
|
LOWER("name") = LOWER($2)
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetOrganizationByNameParams struct {
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetOrganizationByName(ctx context.Context, arg GetOrganizationByNameParams) (Organization, error) {
|
|
row := q.db.QueryRowContext(ctx, getOrganizationByName, arg.Deleted, arg.Name)
|
|
var i Organization
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Deleted,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOrganizationResourceCountByID = `-- name: GetOrganizationResourceCountByID :one
|
|
SELECT
|
|
(
|
|
SELECT
|
|
count(*)
|
|
FROM
|
|
workspaces
|
|
WHERE
|
|
workspaces.organization_id = $1
|
|
AND workspaces.deleted = FALSE) AS workspace_count,
|
|
(
|
|
SELECT
|
|
count(*)
|
|
FROM
|
|
GROUPS
|
|
WHERE
|
|
groups.organization_id = $1) AS group_count,
|
|
(
|
|
SELECT
|
|
count(*)
|
|
FROM
|
|
templates
|
|
WHERE
|
|
templates.organization_id = $1
|
|
AND templates.deleted = FALSE) AS template_count,
|
|
(
|
|
SELECT
|
|
count(*)
|
|
FROM
|
|
organization_members
|
|
LEFT JOIN users ON organization_members.user_id = users.id
|
|
WHERE
|
|
organization_members.organization_id = $1
|
|
AND users.deleted = FALSE) AS member_count,
|
|
(
|
|
SELECT
|
|
count(*)
|
|
FROM
|
|
provisioner_keys
|
|
WHERE
|
|
provisioner_keys.organization_id = $1) AS provisioner_key_count
|
|
`
|
|
|
|
type GetOrganizationResourceCountByIDRow struct {
|
|
WorkspaceCount int64 `db:"workspace_count" json:"workspace_count"`
|
|
GroupCount int64 `db:"group_count" json:"group_count"`
|
|
TemplateCount int64 `db:"template_count" json:"template_count"`
|
|
MemberCount int64 `db:"member_count" json:"member_count"`
|
|
ProvisionerKeyCount int64 `db:"provisioner_key_count" json:"provisioner_key_count"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetOrganizationResourceCountByID(ctx context.Context, organizationID uuid.UUID) (GetOrganizationResourceCountByIDRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getOrganizationResourceCountByID, organizationID)
|
|
var i GetOrganizationResourceCountByIDRow
|
|
err := row.Scan(
|
|
&i.WorkspaceCount,
|
|
&i.GroupCount,
|
|
&i.TemplateCount,
|
|
&i.MemberCount,
|
|
&i.ProvisionerKeyCount,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOrganizations = `-- name: GetOrganizations :many
|
|
SELECT
|
|
id, name, description, created_at, updated_at, is_default, display_name, icon, deleted
|
|
FROM
|
|
organizations
|
|
WHERE
|
|
-- Optionally include deleted organizations
|
|
deleted = $1
|
|
-- Filter by ids
|
|
AND CASE
|
|
WHEN array_length($2 :: uuid[], 1) > 0 THEN
|
|
id = ANY($2)
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
WHEN $3::text != '' THEN
|
|
LOWER("name") = LOWER($3)
|
|
ELSE true
|
|
END
|
|
`
|
|
|
|
type GetOrganizationsParams struct {
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetOrganizations(ctx context.Context, arg GetOrganizationsParams) ([]Organization, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOrganizations, arg.Deleted, pq.Array(arg.IDs), arg.Name)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Organization
|
|
for rows.Next() {
|
|
var i Organization
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Deleted,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getOrganizationsByUserID = `-- name: GetOrganizationsByUserID :many
|
|
SELECT
|
|
id, name, description, created_at, updated_at, is_default, display_name, icon, deleted
|
|
FROM
|
|
organizations
|
|
WHERE
|
|
-- Optionally provide a filter for deleted organizations.
|
|
CASE WHEN
|
|
$2 :: boolean IS NULL THEN
|
|
true
|
|
ELSE
|
|
deleted = $2
|
|
END AND
|
|
id = ANY(
|
|
SELECT
|
|
organization_id
|
|
FROM
|
|
organization_members
|
|
WHERE
|
|
user_id = $1
|
|
)
|
|
`
|
|
|
|
type GetOrganizationsByUserIDParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Deleted sql.NullBool `db:"deleted" json:"deleted"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetOrganizationsByUserID(ctx context.Context, arg GetOrganizationsByUserIDParams) ([]Organization, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOrganizationsByUserID, arg.UserID, arg.Deleted)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Organization
|
|
for rows.Next() {
|
|
var i Organization
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Deleted,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertOrganization = `-- name: InsertOrganization :one
|
|
INSERT INTO
|
|
organizations (id, "name", display_name, description, icon, created_at, updated_at, is_default)
|
|
VALUES
|
|
-- If no organizations exist, and this is the first, make it the default.
|
|
($1, $2, $3, $4, $5, $6, $7, (SELECT TRUE FROM organizations LIMIT 1) IS NULL) RETURNING id, name, description, created_at, updated_at, is_default, display_name, icon, deleted
|
|
`
|
|
|
|
type InsertOrganizationParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Description string `db:"description" json:"description"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOrganization(ctx context.Context, arg InsertOrganizationParams) (Organization, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOrganization,
|
|
arg.ID,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.Description,
|
|
arg.Icon,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
)
|
|
var i Organization
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Deleted,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateOrganization = `-- name: UpdateOrganization :one
|
|
UPDATE
|
|
organizations
|
|
SET
|
|
updated_at = $1,
|
|
name = $2,
|
|
display_name = $3,
|
|
description = $4,
|
|
icon = $5
|
|
WHERE
|
|
id = $6
|
|
RETURNING id, name, description, created_at, updated_at, is_default, display_name, icon, deleted
|
|
`
|
|
|
|
type UpdateOrganizationParams struct {
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Description string `db:"description" json:"description"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateOrganization(ctx context.Context, arg UpdateOrganizationParams) (Organization, error) {
|
|
row := q.db.QueryRowContext(ctx, updateOrganization,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.Description,
|
|
arg.Icon,
|
|
arg.ID,
|
|
)
|
|
var i Organization
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Deleted,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateOrganizationDeletedByID = `-- name: UpdateOrganizationDeletedByID :exec
|
|
UPDATE organizations
|
|
SET
|
|
deleted = true,
|
|
updated_at = $1
|
|
WHERE
|
|
id = $2 AND
|
|
is_default = false
|
|
`
|
|
|
|
type UpdateOrganizationDeletedByIDParams struct {
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateOrganizationDeletedByID(ctx context.Context, arg UpdateOrganizationDeletedByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateOrganizationDeletedByID, arg.UpdatedAt, arg.ID)
|
|
return err
|
|
}
|
|
|
|
const getParameterSchemasByJobID = `-- name: GetParameterSchemasByJobID :many
|
|
SELECT
|
|
id, created_at, job_id, name, description, default_source_scheme, default_source_value, allow_override_source, default_destination_scheme, allow_override_destination, default_refresh, redisplay_value, validation_error, validation_condition, validation_type_system, validation_value_type, index
|
|
FROM
|
|
parameter_schemas
|
|
WHERE
|
|
job_id = $1
|
|
ORDER BY
|
|
index
|
|
`
|
|
|
|
func (q *sqlQuerier) GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]ParameterSchema, error) {
|
|
rows, err := q.db.QueryContext(ctx, getParameterSchemasByJobID, jobID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ParameterSchema
|
|
for rows.Next() {
|
|
var i ParameterSchema
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.DefaultSourceScheme,
|
|
&i.DefaultSourceValue,
|
|
&i.AllowOverrideSource,
|
|
&i.DefaultDestinationScheme,
|
|
&i.AllowOverrideDestination,
|
|
&i.DefaultRefresh,
|
|
&i.RedisplayValue,
|
|
&i.ValidationError,
|
|
&i.ValidationCondition,
|
|
&i.ValidationTypeSystem,
|
|
&i.ValidationValueType,
|
|
&i.Index,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const claimPrebuiltWorkspace = `-- name: ClaimPrebuiltWorkspace :one
|
|
UPDATE workspaces w
|
|
SET owner_id = $1::uuid,
|
|
name = $2::text,
|
|
updated_at = NOW()
|
|
WHERE w.id IN (
|
|
SELECT p.id
|
|
FROM workspace_prebuilds p
|
|
INNER JOIN workspace_latest_builds b ON b.workspace_id = p.id
|
|
INNER JOIN templates t ON p.template_id = t.id
|
|
WHERE (b.transition = 'start'::workspace_transition
|
|
AND b.job_status IN ('succeeded'::provisioner_job_status))
|
|
-- The prebuilds system should never try to claim a prebuild for an inactive template version.
|
|
-- Nevertheless, this filter is here as a defensive measure:
|
|
AND b.template_version_id = t.active_version_id
|
|
AND p.current_preset_id = $3::uuid
|
|
AND p.ready
|
|
AND NOT t.deleted
|
|
LIMIT 1 FOR UPDATE OF p SKIP LOCKED -- Ensure that a concurrent request will not select the same prebuild.
|
|
)
|
|
RETURNING w.id, w.name
|
|
`
|
|
|
|
type ClaimPrebuiltWorkspaceParams struct {
|
|
NewUserID uuid.UUID `db:"new_user_id" json:"new_user_id"`
|
|
NewName string `db:"new_name" json:"new_name"`
|
|
PresetID uuid.UUID `db:"preset_id" json:"preset_id"`
|
|
}
|
|
|
|
type ClaimPrebuiltWorkspaceRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) ClaimPrebuiltWorkspace(ctx context.Context, arg ClaimPrebuiltWorkspaceParams) (ClaimPrebuiltWorkspaceRow, error) {
|
|
row := q.db.QueryRowContext(ctx, claimPrebuiltWorkspace, arg.NewUserID, arg.NewName, arg.PresetID)
|
|
var i ClaimPrebuiltWorkspaceRow
|
|
err := row.Scan(&i.ID, &i.Name)
|
|
return i, err
|
|
}
|
|
|
|
const countInProgressPrebuilds = `-- name: CountInProgressPrebuilds :many
|
|
SELECT t.id AS template_id, wpb.template_version_id, wpb.transition, COUNT(wpb.transition)::int AS count, wlb.template_version_preset_id as preset_id
|
|
FROM workspace_latest_builds wlb
|
|
INNER JOIN workspace_prebuild_builds wpb ON wpb.id = wlb.id
|
|
-- We only need these counts for active template versions.
|
|
-- It doesn't influence whether we create or delete prebuilds
|
|
-- for inactive template versions. This is because we never create
|
|
-- prebuilds for inactive template versions, we always delete
|
|
-- running prebuilds for inactive template versions, and we ignore
|
|
-- prebuilds that are still building.
|
|
INNER JOIN templates t ON t.active_version_id = wlb.template_version_id
|
|
WHERE wlb.job_status IN ('pending'::provisioner_job_status, 'running'::provisioner_job_status)
|
|
-- AND NOT t.deleted -- We don't exclude deleted templates because there's no constraint in the DB preventing a soft deletion on a template while workspaces are running.
|
|
GROUP BY t.id, wpb.template_version_id, wpb.transition, wlb.template_version_preset_id
|
|
`
|
|
|
|
type CountInProgressPrebuildsRow struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
Transition WorkspaceTransition `db:"transition" json:"transition"`
|
|
Count int32 `db:"count" json:"count"`
|
|
PresetID uuid.NullUUID `db:"preset_id" json:"preset_id"`
|
|
}
|
|
|
|
// CountInProgressPrebuilds returns the number of in-progress prebuilds, grouped by preset ID and transition.
|
|
// Prebuild considered in-progress if it's in the "starting", "stopping", or "deleting" state.
|
|
func (q *sqlQuerier) CountInProgressPrebuilds(ctx context.Context) ([]CountInProgressPrebuildsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, countInProgressPrebuilds)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []CountInProgressPrebuildsRow
|
|
for rows.Next() {
|
|
var i CountInProgressPrebuildsRow
|
|
if err := rows.Scan(
|
|
&i.TemplateID,
|
|
&i.TemplateVersionID,
|
|
&i.Transition,
|
|
&i.Count,
|
|
&i.PresetID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getPrebuildMetrics = `-- name: GetPrebuildMetrics :many
|
|
SELECT
|
|
t.name as template_name,
|
|
tvp.name as preset_name,
|
|
o.name as organization_name,
|
|
COUNT(*) as created_count,
|
|
COUNT(*) FILTER (WHERE pj.job_status = 'failed'::provisioner_job_status) as failed_count,
|
|
COUNT(*) FILTER (
|
|
WHERE w.owner_id != 'c42fdf75-3097-471c-8c33-fb52454d81c0'::uuid -- The system user responsible for prebuilds.
|
|
) as claimed_count
|
|
FROM workspaces w
|
|
INNER JOIN workspace_prebuild_builds wpb ON wpb.workspace_id = w.id
|
|
INNER JOIN templates t ON t.id = w.template_id
|
|
INNER JOIN template_version_presets tvp ON tvp.id = wpb.template_version_preset_id
|
|
INNER JOIN provisioner_jobs pj ON pj.id = wpb.job_id
|
|
INNER JOIN organizations o ON o.id = w.organization_id
|
|
WHERE NOT t.deleted AND wpb.build_number = 1
|
|
GROUP BY t.name, tvp.name, o.name
|
|
ORDER BY t.name, tvp.name, o.name
|
|
`
|
|
|
|
type GetPrebuildMetricsRow struct {
|
|
TemplateName string `db:"template_name" json:"template_name"`
|
|
PresetName string `db:"preset_name" json:"preset_name"`
|
|
OrganizationName string `db:"organization_name" json:"organization_name"`
|
|
CreatedCount int64 `db:"created_count" json:"created_count"`
|
|
FailedCount int64 `db:"failed_count" json:"failed_count"`
|
|
ClaimedCount int64 `db:"claimed_count" json:"claimed_count"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetPrebuildMetrics(ctx context.Context) ([]GetPrebuildMetricsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getPrebuildMetrics)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetPrebuildMetricsRow
|
|
for rows.Next() {
|
|
var i GetPrebuildMetricsRow
|
|
if err := rows.Scan(
|
|
&i.TemplateName,
|
|
&i.PresetName,
|
|
&i.OrganizationName,
|
|
&i.CreatedCount,
|
|
&i.FailedCount,
|
|
&i.ClaimedCount,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getPresetsAtFailureLimit = `-- name: GetPresetsAtFailureLimit :many
|
|
WITH filtered_builds AS (
|
|
-- Only select builds which are for prebuild creations
|
|
SELECT wlb.template_version_id, wlb.created_at, tvp.id AS preset_id, wlb.job_status, tvp.desired_instances
|
|
FROM template_version_presets tvp
|
|
INNER JOIN workspace_latest_builds wlb ON wlb.template_version_preset_id = tvp.id
|
|
INNER JOIN workspaces w ON wlb.workspace_id = w.id
|
|
INNER JOIN template_versions tv ON wlb.template_version_id = tv.id
|
|
INNER JOIN templates t ON tv.template_id = t.id AND t.active_version_id = tv.id
|
|
WHERE tvp.desired_instances IS NOT NULL -- Consider only presets that have a prebuild configuration.
|
|
AND wlb.transition = 'start'::workspace_transition
|
|
AND w.owner_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0'
|
|
),
|
|
time_sorted_builds AS (
|
|
-- Group builds by preset, then sort each group by created_at.
|
|
SELECT fb.template_version_id, fb.created_at, fb.preset_id, fb.job_status, fb.desired_instances,
|
|
ROW_NUMBER() OVER (PARTITION BY fb.preset_id ORDER BY fb.created_at DESC) as rn
|
|
FROM filtered_builds fb
|
|
)
|
|
SELECT
|
|
tsb.template_version_id,
|
|
tsb.preset_id
|
|
FROM time_sorted_builds tsb
|
|
WHERE tsb.rn <= $1::bigint
|
|
AND tsb.job_status = 'failed'::provisioner_job_status
|
|
GROUP BY tsb.template_version_id, tsb.preset_id
|
|
HAVING COUNT(*) = $1::bigint
|
|
`
|
|
|
|
type GetPresetsAtFailureLimitRow struct {
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
PresetID uuid.UUID `db:"preset_id" json:"preset_id"`
|
|
}
|
|
|
|
// GetPresetsAtFailureLimit groups workspace builds by preset ID.
|
|
// Each preset is associated with exactly one template version ID.
|
|
// For each preset, the query checks the last hard_limit builds.
|
|
// If all of them failed, the preset is considered to have hit the hard failure limit.
|
|
// The query returns a list of preset IDs that have reached this failure threshold.
|
|
// Only active template versions with configured presets are considered.
|
|
// For each preset, check the last hard_limit builds.
|
|
// If all of them failed, the preset is considered to have hit the hard failure limit.
|
|
func (q *sqlQuerier) GetPresetsAtFailureLimit(ctx context.Context, hardLimit int64) ([]GetPresetsAtFailureLimitRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getPresetsAtFailureLimit, hardLimit)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetPresetsAtFailureLimitRow
|
|
for rows.Next() {
|
|
var i GetPresetsAtFailureLimitRow
|
|
if err := rows.Scan(&i.TemplateVersionID, &i.PresetID); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getPresetsBackoff = `-- name: GetPresetsBackoff :many
|
|
WITH filtered_builds AS (
|
|
-- Only select builds which are for prebuild creations
|
|
SELECT wlb.template_version_id, wlb.created_at, tvp.id AS preset_id, wlb.job_status, tvp.desired_instances
|
|
FROM template_version_presets tvp
|
|
INNER JOIN workspace_latest_builds wlb ON wlb.template_version_preset_id = tvp.id
|
|
INNER JOIN workspaces w ON wlb.workspace_id = w.id
|
|
INNER JOIN template_versions tv ON wlb.template_version_id = tv.id
|
|
INNER JOIN templates t ON tv.template_id = t.id AND t.active_version_id = tv.id
|
|
WHERE tvp.desired_instances IS NOT NULL -- Consider only presets that have a prebuild configuration.
|
|
AND wlb.transition = 'start'::workspace_transition
|
|
AND w.owner_id = 'c42fdf75-3097-471c-8c33-fb52454d81c0'
|
|
AND NOT t.deleted
|
|
),
|
|
time_sorted_builds AS (
|
|
-- Group builds by preset, then sort each group by created_at.
|
|
SELECT fb.template_version_id, fb.created_at, fb.preset_id, fb.job_status, fb.desired_instances,
|
|
ROW_NUMBER() OVER (PARTITION BY fb.preset_id ORDER BY fb.created_at DESC) as rn
|
|
FROM filtered_builds fb
|
|
),
|
|
failed_count AS (
|
|
-- Count failed builds per preset in the given period
|
|
SELECT preset_id, COUNT(*) AS num_failed
|
|
FROM filtered_builds
|
|
WHERE job_status = 'failed'::provisioner_job_status
|
|
AND created_at >= $1::timestamptz
|
|
GROUP BY preset_id
|
|
)
|
|
SELECT
|
|
tsb.template_version_id,
|
|
tsb.preset_id,
|
|
COALESCE(fc.num_failed, 0)::int AS num_failed,
|
|
MAX(tsb.created_at)::timestamptz AS last_build_at
|
|
FROM time_sorted_builds tsb
|
|
LEFT JOIN failed_count fc ON fc.preset_id = tsb.preset_id
|
|
WHERE tsb.rn <= tsb.desired_instances -- Fetch the last N builds, where N is the number of desired instances; if any fail, we backoff
|
|
AND tsb.job_status = 'failed'::provisioner_job_status
|
|
AND created_at >= $1::timestamptz
|
|
GROUP BY tsb.template_version_id, tsb.preset_id, fc.num_failed
|
|
`
|
|
|
|
type GetPresetsBackoffRow struct {
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
PresetID uuid.UUID `db:"preset_id" json:"preset_id"`
|
|
NumFailed int32 `db:"num_failed" json:"num_failed"`
|
|
LastBuildAt time.Time `db:"last_build_at" json:"last_build_at"`
|
|
}
|
|
|
|
// GetPresetsBackoff groups workspace builds by preset ID.
|
|
// Each preset is associated with exactly one template version ID.
|
|
// For each group, the query checks up to N of the most recent jobs that occurred within the
|
|
// lookback period, where N equals the number of desired instances for the corresponding preset.
|
|
// If at least one of the job within a group has failed, we should backoff on the corresponding preset ID.
|
|
// Query returns a list of preset IDs for which we should backoff.
|
|
// Only active template versions with configured presets are considered.
|
|
// We also return the number of failed workspace builds that occurred during the lookback period.
|
|
//
|
|
// NOTE:
|
|
// - To **decide whether to back off**, we look at up to the N most recent builds (within the defined lookback period).
|
|
// - To **calculate the number of failed builds**, we consider all builds within the defined lookback period.
|
|
//
|
|
// The number of failed builds is used downstream to determine the backoff duration.
|
|
func (q *sqlQuerier) GetPresetsBackoff(ctx context.Context, lookback time.Time) ([]GetPresetsBackoffRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getPresetsBackoff, lookback)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetPresetsBackoffRow
|
|
for rows.Next() {
|
|
var i GetPresetsBackoffRow
|
|
if err := rows.Scan(
|
|
&i.TemplateVersionID,
|
|
&i.PresetID,
|
|
&i.NumFailed,
|
|
&i.LastBuildAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getRunningPrebuiltWorkspaces = `-- name: GetRunningPrebuiltWorkspaces :many
|
|
SELECT
|
|
p.id,
|
|
p.name,
|
|
p.template_id,
|
|
b.template_version_id,
|
|
p.current_preset_id AS current_preset_id,
|
|
p.ready,
|
|
p.created_at
|
|
FROM workspace_prebuilds p
|
|
INNER JOIN workspace_latest_builds b ON b.workspace_id = p.id
|
|
WHERE (b.transition = 'start'::workspace_transition
|
|
AND b.job_status = 'succeeded'::provisioner_job_status)
|
|
`
|
|
|
|
type GetRunningPrebuiltWorkspacesRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
CurrentPresetID uuid.NullUUID `db:"current_preset_id" json:"current_preset_id"`
|
|
Ready bool `db:"ready" json:"ready"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetRunningPrebuiltWorkspaces(ctx context.Context) ([]GetRunningPrebuiltWorkspacesRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getRunningPrebuiltWorkspaces)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetRunningPrebuiltWorkspacesRow
|
|
for rows.Next() {
|
|
var i GetRunningPrebuiltWorkspacesRow
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.TemplateID,
|
|
&i.TemplateVersionID,
|
|
&i.CurrentPresetID,
|
|
&i.Ready,
|
|
&i.CreatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplatePresetsWithPrebuilds = `-- name: GetTemplatePresetsWithPrebuilds :many
|
|
SELECT
|
|
t.id AS template_id,
|
|
t.name AS template_name,
|
|
o.id AS organization_id,
|
|
o.name AS organization_name,
|
|
tv.id AS template_version_id,
|
|
tv.name AS template_version_name,
|
|
tv.id = t.active_version_id AS using_active_version,
|
|
tvp.id,
|
|
tvp.name,
|
|
tvp.desired_instances AS desired_instances,
|
|
tvp.scheduling_timezone,
|
|
tvp.invalidate_after_secs AS ttl,
|
|
tvp.prebuild_status,
|
|
t.deleted,
|
|
t.deprecated != '' AS deprecated
|
|
FROM templates t
|
|
INNER JOIN template_versions tv ON tv.template_id = t.id
|
|
INNER JOIN template_version_presets tvp ON tvp.template_version_id = tv.id
|
|
INNER JOIN organizations o ON o.id = t.organization_id
|
|
WHERE tvp.desired_instances IS NOT NULL -- Consider only presets that have a prebuild configuration.
|
|
-- AND NOT t.deleted -- We don't exclude deleted templates because there's no constraint in the DB preventing a soft deletion on a template while workspaces are running.
|
|
AND (t.id = $1::uuid OR $1 IS NULL)
|
|
`
|
|
|
|
type GetTemplatePresetsWithPrebuildsRow struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TemplateName string `db:"template_name" json:"template_name"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
OrganizationName string `db:"organization_name" json:"organization_name"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
TemplateVersionName string `db:"template_version_name" json:"template_version_name"`
|
|
UsingActiveVersion bool `db:"using_active_version" json:"using_active_version"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
DesiredInstances sql.NullInt32 `db:"desired_instances" json:"desired_instances"`
|
|
SchedulingTimezone string `db:"scheduling_timezone" json:"scheduling_timezone"`
|
|
Ttl sql.NullInt32 `db:"ttl" json:"ttl"`
|
|
PrebuildStatus PrebuildStatus `db:"prebuild_status" json:"prebuild_status"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
Deprecated bool `db:"deprecated" json:"deprecated"`
|
|
}
|
|
|
|
// GetTemplatePresetsWithPrebuilds retrieves template versions with configured presets and prebuilds.
|
|
// It also returns the number of desired instances for each preset.
|
|
// If template_id is specified, only template versions associated with that template will be returned.
|
|
func (q *sqlQuerier) GetTemplatePresetsWithPrebuilds(ctx context.Context, templateID uuid.NullUUID) ([]GetTemplatePresetsWithPrebuildsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplatePresetsWithPrebuilds, templateID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplatePresetsWithPrebuildsRow
|
|
for rows.Next() {
|
|
var i GetTemplatePresetsWithPrebuildsRow
|
|
if err := rows.Scan(
|
|
&i.TemplateID,
|
|
&i.TemplateName,
|
|
&i.OrganizationID,
|
|
&i.OrganizationName,
|
|
&i.TemplateVersionID,
|
|
&i.TemplateVersionName,
|
|
&i.UsingActiveVersion,
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DesiredInstances,
|
|
&i.SchedulingTimezone,
|
|
&i.Ttl,
|
|
&i.PrebuildStatus,
|
|
&i.Deleted,
|
|
&i.Deprecated,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getActivePresetPrebuildSchedules = `-- name: GetActivePresetPrebuildSchedules :many
|
|
SELECT
|
|
tvpps.id, tvpps.preset_id, tvpps.cron_expression, tvpps.desired_instances
|
|
FROM
|
|
template_version_preset_prebuild_schedules tvpps
|
|
INNER JOIN template_version_presets tvp ON tvp.id = tvpps.preset_id
|
|
INNER JOIN template_versions tv ON tv.id = tvp.template_version_id
|
|
INNER JOIN templates t ON t.id = tv.template_id
|
|
WHERE
|
|
-- Template version is active, and template is not deleted or deprecated
|
|
tv.id = t.active_version_id
|
|
AND NOT t.deleted
|
|
AND t.deprecated = ''
|
|
`
|
|
|
|
func (q *sqlQuerier) GetActivePresetPrebuildSchedules(ctx context.Context) ([]TemplateVersionPresetPrebuildSchedule, error) {
|
|
rows, err := q.db.QueryContext(ctx, getActivePresetPrebuildSchedules)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersionPresetPrebuildSchedule
|
|
for rows.Next() {
|
|
var i TemplateVersionPresetPrebuildSchedule
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.PresetID,
|
|
&i.CronExpression,
|
|
&i.DesiredInstances,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getPresetByID = `-- name: GetPresetByID :one
|
|
SELECT tvp.id, tvp.template_version_id, tvp.name, tvp.created_at, tvp.desired_instances, tvp.invalidate_after_secs, tvp.prebuild_status, tvp.scheduling_timezone, tv.template_id, tv.organization_id FROM
|
|
template_version_presets tvp
|
|
INNER JOIN template_versions tv ON tvp.template_version_id = tv.id
|
|
WHERE tvp.id = $1
|
|
`
|
|
|
|
type GetPresetByIDRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
Name string `db:"name" json:"name"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
DesiredInstances sql.NullInt32 `db:"desired_instances" json:"desired_instances"`
|
|
InvalidateAfterSecs sql.NullInt32 `db:"invalidate_after_secs" json:"invalidate_after_secs"`
|
|
PrebuildStatus PrebuildStatus `db:"prebuild_status" json:"prebuild_status"`
|
|
SchedulingTimezone string `db:"scheduling_timezone" json:"scheduling_timezone"`
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetPresetByID(ctx context.Context, presetID uuid.UUID) (GetPresetByIDRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getPresetByID, presetID)
|
|
var i GetPresetByIDRow
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.CreatedAt,
|
|
&i.DesiredInstances,
|
|
&i.InvalidateAfterSecs,
|
|
&i.PrebuildStatus,
|
|
&i.SchedulingTimezone,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getPresetByWorkspaceBuildID = `-- name: GetPresetByWorkspaceBuildID :one
|
|
SELECT
|
|
template_version_presets.id, template_version_presets.template_version_id, template_version_presets.name, template_version_presets.created_at, template_version_presets.desired_instances, template_version_presets.invalidate_after_secs, template_version_presets.prebuild_status, template_version_presets.scheduling_timezone
|
|
FROM
|
|
template_version_presets
|
|
INNER JOIN workspace_builds ON workspace_builds.template_version_preset_id = template_version_presets.id
|
|
WHERE
|
|
workspace_builds.id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetPresetByWorkspaceBuildID(ctx context.Context, workspaceBuildID uuid.UUID) (TemplateVersionPreset, error) {
|
|
row := q.db.QueryRowContext(ctx, getPresetByWorkspaceBuildID, workspaceBuildID)
|
|
var i TemplateVersionPreset
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.CreatedAt,
|
|
&i.DesiredInstances,
|
|
&i.InvalidateAfterSecs,
|
|
&i.PrebuildStatus,
|
|
&i.SchedulingTimezone,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getPresetParametersByPresetID = `-- name: GetPresetParametersByPresetID :many
|
|
SELECT
|
|
tvpp.id, tvpp.template_version_preset_id, tvpp.name, tvpp.value
|
|
FROM
|
|
template_version_preset_parameters tvpp
|
|
WHERE
|
|
tvpp.template_version_preset_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetPresetParametersByPresetID(ctx context.Context, presetID uuid.UUID) ([]TemplateVersionPresetParameter, error) {
|
|
rows, err := q.db.QueryContext(ctx, getPresetParametersByPresetID, presetID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersionPresetParameter
|
|
for rows.Next() {
|
|
var i TemplateVersionPresetParameter
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.TemplateVersionPresetID,
|
|
&i.Name,
|
|
&i.Value,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getPresetParametersByTemplateVersionID = `-- name: GetPresetParametersByTemplateVersionID :many
|
|
SELECT
|
|
template_version_preset_parameters.id, template_version_preset_parameters.template_version_preset_id, template_version_preset_parameters.name, template_version_preset_parameters.value
|
|
FROM
|
|
template_version_preset_parameters
|
|
INNER JOIN template_version_presets ON template_version_preset_parameters.template_version_preset_id = template_version_presets.id
|
|
WHERE
|
|
template_version_presets.template_version_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetPresetParametersByTemplateVersionID(ctx context.Context, templateVersionID uuid.UUID) ([]TemplateVersionPresetParameter, error) {
|
|
rows, err := q.db.QueryContext(ctx, getPresetParametersByTemplateVersionID, templateVersionID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersionPresetParameter
|
|
for rows.Next() {
|
|
var i TemplateVersionPresetParameter
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.TemplateVersionPresetID,
|
|
&i.Name,
|
|
&i.Value,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getPresetsByTemplateVersionID = `-- name: GetPresetsByTemplateVersionID :many
|
|
SELECT
|
|
id, template_version_id, name, created_at, desired_instances, invalidate_after_secs, prebuild_status, scheduling_timezone
|
|
FROM
|
|
template_version_presets
|
|
WHERE
|
|
template_version_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetPresetsByTemplateVersionID(ctx context.Context, templateVersionID uuid.UUID) ([]TemplateVersionPreset, error) {
|
|
rows, err := q.db.QueryContext(ctx, getPresetsByTemplateVersionID, templateVersionID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersionPreset
|
|
for rows.Next() {
|
|
var i TemplateVersionPreset
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.CreatedAt,
|
|
&i.DesiredInstances,
|
|
&i.InvalidateAfterSecs,
|
|
&i.PrebuildStatus,
|
|
&i.SchedulingTimezone,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertPreset = `-- name: InsertPreset :one
|
|
INSERT INTO template_version_presets (
|
|
id,
|
|
template_version_id,
|
|
name,
|
|
created_at,
|
|
desired_instances,
|
|
invalidate_after_secs,
|
|
scheduling_timezone
|
|
)
|
|
VALUES (
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7
|
|
) RETURNING id, template_version_id, name, created_at, desired_instances, invalidate_after_secs, prebuild_status, scheduling_timezone
|
|
`
|
|
|
|
type InsertPresetParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
Name string `db:"name" json:"name"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
DesiredInstances sql.NullInt32 `db:"desired_instances" json:"desired_instances"`
|
|
InvalidateAfterSecs sql.NullInt32 `db:"invalidate_after_secs" json:"invalidate_after_secs"`
|
|
SchedulingTimezone string `db:"scheduling_timezone" json:"scheduling_timezone"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertPreset(ctx context.Context, arg InsertPresetParams) (TemplateVersionPreset, error) {
|
|
row := q.db.QueryRowContext(ctx, insertPreset,
|
|
arg.ID,
|
|
arg.TemplateVersionID,
|
|
arg.Name,
|
|
arg.CreatedAt,
|
|
arg.DesiredInstances,
|
|
arg.InvalidateAfterSecs,
|
|
arg.SchedulingTimezone,
|
|
)
|
|
var i TemplateVersionPreset
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.CreatedAt,
|
|
&i.DesiredInstances,
|
|
&i.InvalidateAfterSecs,
|
|
&i.PrebuildStatus,
|
|
&i.SchedulingTimezone,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertPresetParameters = `-- name: InsertPresetParameters :many
|
|
INSERT INTO
|
|
template_version_preset_parameters (template_version_preset_id, name, value)
|
|
SELECT
|
|
$1,
|
|
unnest($2 :: TEXT[]),
|
|
unnest($3 :: TEXT[])
|
|
RETURNING id, template_version_preset_id, name, value
|
|
`
|
|
|
|
type InsertPresetParametersParams struct {
|
|
TemplateVersionPresetID uuid.UUID `db:"template_version_preset_id" json:"template_version_preset_id"`
|
|
Names []string `db:"names" json:"names"`
|
|
Values []string `db:"values" json:"values"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertPresetParameters(ctx context.Context, arg InsertPresetParametersParams) ([]TemplateVersionPresetParameter, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertPresetParameters, arg.TemplateVersionPresetID, pq.Array(arg.Names), pq.Array(arg.Values))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersionPresetParameter
|
|
for rows.Next() {
|
|
var i TemplateVersionPresetParameter
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.TemplateVersionPresetID,
|
|
&i.Name,
|
|
&i.Value,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertPresetPrebuildSchedule = `-- name: InsertPresetPrebuildSchedule :one
|
|
INSERT INTO template_version_preset_prebuild_schedules (
|
|
preset_id,
|
|
cron_expression,
|
|
desired_instances
|
|
)
|
|
VALUES (
|
|
$1,
|
|
$2,
|
|
$3
|
|
) RETURNING id, preset_id, cron_expression, desired_instances
|
|
`
|
|
|
|
type InsertPresetPrebuildScheduleParams struct {
|
|
PresetID uuid.UUID `db:"preset_id" json:"preset_id"`
|
|
CronExpression string `db:"cron_expression" json:"cron_expression"`
|
|
DesiredInstances int32 `db:"desired_instances" json:"desired_instances"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertPresetPrebuildSchedule(ctx context.Context, arg InsertPresetPrebuildScheduleParams) (TemplateVersionPresetPrebuildSchedule, error) {
|
|
row := q.db.QueryRowContext(ctx, insertPresetPrebuildSchedule, arg.PresetID, arg.CronExpression, arg.DesiredInstances)
|
|
var i TemplateVersionPresetPrebuildSchedule
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.PresetID,
|
|
&i.CronExpression,
|
|
&i.DesiredInstances,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updatePresetPrebuildStatus = `-- name: UpdatePresetPrebuildStatus :exec
|
|
UPDATE template_version_presets
|
|
SET prebuild_status = $1
|
|
WHERE id = $2
|
|
`
|
|
|
|
type UpdatePresetPrebuildStatusParams struct {
|
|
Status PrebuildStatus `db:"status" json:"status"`
|
|
PresetID uuid.UUID `db:"preset_id" json:"preset_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdatePresetPrebuildStatus(ctx context.Context, arg UpdatePresetPrebuildStatusParams) error {
|
|
_, err := q.db.ExecContext(ctx, updatePresetPrebuildStatus, arg.Status, arg.PresetID)
|
|
return err
|
|
}
|
|
|
|
const deleteOldProvisionerDaemons = `-- name: DeleteOldProvisionerDaemons :exec
|
|
DELETE FROM provisioner_daemons WHERE (
|
|
(created_at < (NOW() - INTERVAL '7 days') AND last_seen_at IS NULL) OR
|
|
(last_seen_at IS NOT NULL AND last_seen_at < (NOW() - INTERVAL '7 days'))
|
|
)
|
|
`
|
|
|
|
// Delete provisioner daemons that have been created at least a week ago
|
|
// and have not connected to coderd since a week.
|
|
// A provisioner daemon with "zeroed" last_seen_at column indicates possible
|
|
// connectivity issues (no provisioner daemon activity since registration).
|
|
func (q *sqlQuerier) DeleteOldProvisionerDaemons(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOldProvisionerDaemons)
|
|
return err
|
|
}
|
|
|
|
const getEligibleProvisionerDaemonsByProvisionerJobIDs = `-- name: GetEligibleProvisionerDaemonsByProvisionerJobIDs :many
|
|
SELECT DISTINCT
|
|
provisioner_jobs.id as job_id, provisioner_daemons.id, provisioner_daemons.created_at, provisioner_daemons.name, provisioner_daemons.provisioners, provisioner_daemons.replica_id, provisioner_daemons.tags, provisioner_daemons.last_seen_at, provisioner_daemons.version, provisioner_daemons.api_version, provisioner_daemons.organization_id, provisioner_daemons.key_id
|
|
FROM
|
|
provisioner_jobs
|
|
JOIN
|
|
provisioner_daemons ON provisioner_daemons.organization_id = provisioner_jobs.organization_id
|
|
AND provisioner_tagset_contains(provisioner_daemons.tags::tagset, provisioner_jobs.tags::tagset)
|
|
AND provisioner_jobs.provisioner = ANY(provisioner_daemons.provisioners)
|
|
WHERE
|
|
provisioner_jobs.id = ANY($1 :: uuid[])
|
|
`
|
|
|
|
type GetEligibleProvisionerDaemonsByProvisionerJobIDsRow struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
ProvisionerDaemon ProvisionerDaemon `db:"provisioner_daemon" json:"provisioner_daemon"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetEligibleProvisionerDaemonsByProvisionerJobIDs(ctx context.Context, provisionerJobIds []uuid.UUID) ([]GetEligibleProvisionerDaemonsByProvisionerJobIDsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getEligibleProvisionerDaemonsByProvisionerJobIDs, pq.Array(provisionerJobIds))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetEligibleProvisionerDaemonsByProvisionerJobIDsRow
|
|
for rows.Next() {
|
|
var i GetEligibleProvisionerDaemonsByProvisionerJobIDsRow
|
|
if err := rows.Scan(
|
|
&i.JobID,
|
|
&i.ProvisionerDaemon.ID,
|
|
&i.ProvisionerDaemon.CreatedAt,
|
|
&i.ProvisionerDaemon.Name,
|
|
pq.Array(&i.ProvisionerDaemon.Provisioners),
|
|
&i.ProvisionerDaemon.ReplicaID,
|
|
&i.ProvisionerDaemon.Tags,
|
|
&i.ProvisionerDaemon.LastSeenAt,
|
|
&i.ProvisionerDaemon.Version,
|
|
&i.ProvisionerDaemon.APIVersion,
|
|
&i.ProvisionerDaemon.OrganizationID,
|
|
&i.ProvisionerDaemon.KeyID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerDaemons = `-- name: GetProvisionerDaemons :many
|
|
SELECT
|
|
id, created_at, name, provisioners, replica_id, tags, last_seen_at, version, api_version, organization_id, key_id
|
|
FROM
|
|
provisioner_daemons
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerDaemons(ctx context.Context) ([]ProvisionerDaemon, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerDaemons)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerDaemon
|
|
for rows.Next() {
|
|
var i ProvisionerDaemon
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.Name,
|
|
pq.Array(&i.Provisioners),
|
|
&i.ReplicaID,
|
|
&i.Tags,
|
|
&i.LastSeenAt,
|
|
&i.Version,
|
|
&i.APIVersion,
|
|
&i.OrganizationID,
|
|
&i.KeyID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerDaemonsByOrganization = `-- name: GetProvisionerDaemonsByOrganization :many
|
|
SELECT
|
|
id, created_at, name, provisioners, replica_id, tags, last_seen_at, version, api_version, organization_id, key_id
|
|
FROM
|
|
provisioner_daemons
|
|
WHERE
|
|
-- This is the original search criteria:
|
|
organization_id = $1 :: uuid
|
|
AND
|
|
-- adding support for searching by tags:
|
|
($2 :: tagset = 'null' :: tagset OR provisioner_tagset_contains(provisioner_daemons.tags::tagset, $2::tagset))
|
|
`
|
|
|
|
type GetProvisionerDaemonsByOrganizationParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
WantTags StringMap `db:"want_tags" json:"want_tags"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetProvisionerDaemonsByOrganization(ctx context.Context, arg GetProvisionerDaemonsByOrganizationParams) ([]ProvisionerDaemon, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerDaemonsByOrganization, arg.OrganizationID, arg.WantTags)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerDaemon
|
|
for rows.Next() {
|
|
var i ProvisionerDaemon
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.Name,
|
|
pq.Array(&i.Provisioners),
|
|
&i.ReplicaID,
|
|
&i.Tags,
|
|
&i.LastSeenAt,
|
|
&i.Version,
|
|
&i.APIVersion,
|
|
&i.OrganizationID,
|
|
&i.KeyID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerDaemonsWithStatusByOrganization = `-- name: GetProvisionerDaemonsWithStatusByOrganization :many
|
|
SELECT
|
|
pd.id, pd.created_at, pd.name, pd.provisioners, pd.replica_id, pd.tags, pd.last_seen_at, pd.version, pd.api_version, pd.organization_id, pd.key_id,
|
|
CASE
|
|
WHEN pd.last_seen_at IS NULL OR pd.last_seen_at < (NOW() - ($1::bigint || ' ms')::interval)
|
|
THEN 'offline'
|
|
ELSE CASE
|
|
WHEN current_job.id IS NOT NULL THEN 'busy'
|
|
ELSE 'idle'
|
|
END
|
|
END::provisioner_daemon_status AS status,
|
|
pk.name AS key_name,
|
|
-- NOTE(mafredri): sqlc.embed doesn't support nullable tables nor renaming them.
|
|
current_job.id AS current_job_id,
|
|
current_job.job_status AS current_job_status,
|
|
previous_job.id AS previous_job_id,
|
|
previous_job.job_status AS previous_job_status,
|
|
COALESCE(current_template.name, ''::text) AS current_job_template_name,
|
|
COALESCE(current_template.display_name, ''::text) AS current_job_template_display_name,
|
|
COALESCE(current_template.icon, ''::text) AS current_job_template_icon,
|
|
COALESCE(previous_template.name, ''::text) AS previous_job_template_name,
|
|
COALESCE(previous_template.display_name, ''::text) AS previous_job_template_display_name,
|
|
COALESCE(previous_template.icon, ''::text) AS previous_job_template_icon
|
|
FROM
|
|
provisioner_daemons pd
|
|
JOIN
|
|
provisioner_keys pk ON pk.id = pd.key_id
|
|
LEFT JOIN
|
|
provisioner_jobs current_job ON (
|
|
current_job.worker_id = pd.id
|
|
AND current_job.organization_id = pd.organization_id
|
|
AND current_job.completed_at IS NULL
|
|
)
|
|
LEFT JOIN
|
|
provisioner_jobs previous_job ON (
|
|
previous_job.id = (
|
|
SELECT
|
|
id
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
worker_id = pd.id
|
|
AND organization_id = pd.organization_id
|
|
AND completed_at IS NOT NULL
|
|
ORDER BY
|
|
completed_at DESC
|
|
LIMIT 1
|
|
)
|
|
AND previous_job.organization_id = pd.organization_id
|
|
)
|
|
LEFT JOIN
|
|
workspace_builds current_build ON current_build.id = CASE WHEN current_job.input ? 'workspace_build_id' THEN (current_job.input->>'workspace_build_id')::uuid END
|
|
LEFT JOIN
|
|
-- We should always have a template version, either explicitly or implicitly via workspace build.
|
|
template_versions current_version ON (
|
|
current_version.id = CASE WHEN current_job.input ? 'template_version_id' THEN (current_job.input->>'template_version_id')::uuid ELSE current_build.template_version_id END
|
|
AND current_version.organization_id = pd.organization_id
|
|
)
|
|
LEFT JOIN
|
|
templates current_template ON (
|
|
current_template.id = current_version.template_id
|
|
AND current_template.organization_id = pd.organization_id
|
|
)
|
|
LEFT JOIN
|
|
workspace_builds previous_build ON previous_build.id = CASE WHEN previous_job.input ? 'workspace_build_id' THEN (previous_job.input->>'workspace_build_id')::uuid END
|
|
LEFT JOIN
|
|
-- We should always have a template version, either explicitly or implicitly via workspace build.
|
|
template_versions previous_version ON (
|
|
previous_version.id = CASE WHEN previous_job.input ? 'template_version_id' THEN (previous_job.input->>'template_version_id')::uuid ELSE previous_build.template_version_id END
|
|
AND previous_version.organization_id = pd.organization_id
|
|
)
|
|
LEFT JOIN
|
|
templates previous_template ON (
|
|
previous_template.id = previous_version.template_id
|
|
AND previous_template.organization_id = pd.organization_id
|
|
)
|
|
WHERE
|
|
pd.organization_id = $2::uuid
|
|
AND (COALESCE(array_length($3::uuid[], 1), 0) = 0 OR pd.id = ANY($3::uuid[]))
|
|
AND ($4::tagset = 'null'::tagset OR provisioner_tagset_contains(pd.tags::tagset, $4::tagset))
|
|
ORDER BY
|
|
pd.created_at DESC
|
|
LIMIT
|
|
$5::int
|
|
`
|
|
|
|
type GetProvisionerDaemonsWithStatusByOrganizationParams struct {
|
|
StaleIntervalMS int64 `db:"stale_interval_ms" json:"stale_interval_ms"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
Tags StringMap `db:"tags" json:"tags"`
|
|
Limit sql.NullInt32 `db:"limit" json:"limit"`
|
|
}
|
|
|
|
type GetProvisionerDaemonsWithStatusByOrganizationRow struct {
|
|
ProvisionerDaemon ProvisionerDaemon `db:"provisioner_daemon" json:"provisioner_daemon"`
|
|
Status ProvisionerDaemonStatus `db:"status" json:"status"`
|
|
KeyName string `db:"key_name" json:"key_name"`
|
|
CurrentJobID uuid.NullUUID `db:"current_job_id" json:"current_job_id"`
|
|
CurrentJobStatus NullProvisionerJobStatus `db:"current_job_status" json:"current_job_status"`
|
|
PreviousJobID uuid.NullUUID `db:"previous_job_id" json:"previous_job_id"`
|
|
PreviousJobStatus NullProvisionerJobStatus `db:"previous_job_status" json:"previous_job_status"`
|
|
CurrentJobTemplateName string `db:"current_job_template_name" json:"current_job_template_name"`
|
|
CurrentJobTemplateDisplayName string `db:"current_job_template_display_name" json:"current_job_template_display_name"`
|
|
CurrentJobTemplateIcon string `db:"current_job_template_icon" json:"current_job_template_icon"`
|
|
PreviousJobTemplateName string `db:"previous_job_template_name" json:"previous_job_template_name"`
|
|
PreviousJobTemplateDisplayName string `db:"previous_job_template_display_name" json:"previous_job_template_display_name"`
|
|
PreviousJobTemplateIcon string `db:"previous_job_template_icon" json:"previous_job_template_icon"`
|
|
}
|
|
|
|
// Current job information.
|
|
// Previous job information.
|
|
func (q *sqlQuerier) GetProvisionerDaemonsWithStatusByOrganization(ctx context.Context, arg GetProvisionerDaemonsWithStatusByOrganizationParams) ([]GetProvisionerDaemonsWithStatusByOrganizationRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerDaemonsWithStatusByOrganization,
|
|
arg.StaleIntervalMS,
|
|
arg.OrganizationID,
|
|
pq.Array(arg.IDs),
|
|
arg.Tags,
|
|
arg.Limit,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetProvisionerDaemonsWithStatusByOrganizationRow
|
|
for rows.Next() {
|
|
var i GetProvisionerDaemonsWithStatusByOrganizationRow
|
|
if err := rows.Scan(
|
|
&i.ProvisionerDaemon.ID,
|
|
&i.ProvisionerDaemon.CreatedAt,
|
|
&i.ProvisionerDaemon.Name,
|
|
pq.Array(&i.ProvisionerDaemon.Provisioners),
|
|
&i.ProvisionerDaemon.ReplicaID,
|
|
&i.ProvisionerDaemon.Tags,
|
|
&i.ProvisionerDaemon.LastSeenAt,
|
|
&i.ProvisionerDaemon.Version,
|
|
&i.ProvisionerDaemon.APIVersion,
|
|
&i.ProvisionerDaemon.OrganizationID,
|
|
&i.ProvisionerDaemon.KeyID,
|
|
&i.Status,
|
|
&i.KeyName,
|
|
&i.CurrentJobID,
|
|
&i.CurrentJobStatus,
|
|
&i.PreviousJobID,
|
|
&i.PreviousJobStatus,
|
|
&i.CurrentJobTemplateName,
|
|
&i.CurrentJobTemplateDisplayName,
|
|
&i.CurrentJobTemplateIcon,
|
|
&i.PreviousJobTemplateName,
|
|
&i.PreviousJobTemplateDisplayName,
|
|
&i.PreviousJobTemplateIcon,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateProvisionerDaemonLastSeenAt = `-- name: UpdateProvisionerDaemonLastSeenAt :exec
|
|
UPDATE provisioner_daemons
|
|
SET
|
|
last_seen_at = $1
|
|
WHERE
|
|
id = $2
|
|
AND
|
|
last_seen_at <= $1
|
|
`
|
|
|
|
type UpdateProvisionerDaemonLastSeenAtParams struct {
|
|
LastSeenAt sql.NullTime `db:"last_seen_at" json:"last_seen_at"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateProvisionerDaemonLastSeenAt(ctx context.Context, arg UpdateProvisionerDaemonLastSeenAtParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateProvisionerDaemonLastSeenAt, arg.LastSeenAt, arg.ID)
|
|
return err
|
|
}
|
|
|
|
const upsertProvisionerDaemon = `-- name: UpsertProvisionerDaemon :one
|
|
INSERT INTO
|
|
provisioner_daemons (
|
|
id,
|
|
created_at,
|
|
"name",
|
|
provisioners,
|
|
tags,
|
|
last_seen_at,
|
|
"version",
|
|
organization_id,
|
|
api_version,
|
|
key_id
|
|
)
|
|
VALUES (
|
|
gen_random_uuid(),
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7,
|
|
$8,
|
|
$9
|
|
) ON CONFLICT("organization_id", "name", LOWER(COALESCE(tags ->> 'owner'::text, ''::text))) DO UPDATE SET
|
|
provisioners = $3,
|
|
tags = $4,
|
|
last_seen_at = $5,
|
|
"version" = $6,
|
|
api_version = $8,
|
|
organization_id = $7,
|
|
key_id = $9
|
|
RETURNING id, created_at, name, provisioners, replica_id, tags, last_seen_at, version, api_version, organization_id, key_id
|
|
`
|
|
|
|
type UpsertProvisionerDaemonParams struct {
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
Name string `db:"name" json:"name"`
|
|
Provisioners []ProvisionerType `db:"provisioners" json:"provisioners"`
|
|
Tags StringMap `db:"tags" json:"tags"`
|
|
LastSeenAt sql.NullTime `db:"last_seen_at" json:"last_seen_at"`
|
|
Version string `db:"version" json:"version"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
APIVersion string `db:"api_version" json:"api_version"`
|
|
KeyID uuid.UUID `db:"key_id" json:"key_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertProvisionerDaemon(ctx context.Context, arg UpsertProvisionerDaemonParams) (ProvisionerDaemon, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertProvisionerDaemon,
|
|
arg.CreatedAt,
|
|
arg.Name,
|
|
pq.Array(arg.Provisioners),
|
|
arg.Tags,
|
|
arg.LastSeenAt,
|
|
arg.Version,
|
|
arg.OrganizationID,
|
|
arg.APIVersion,
|
|
arg.KeyID,
|
|
)
|
|
var i ProvisionerDaemon
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.Name,
|
|
pq.Array(&i.Provisioners),
|
|
&i.ReplicaID,
|
|
&i.Tags,
|
|
&i.LastSeenAt,
|
|
&i.Version,
|
|
&i.APIVersion,
|
|
&i.OrganizationID,
|
|
&i.KeyID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getProvisionerLogsAfterID = `-- name: GetProvisionerLogsAfterID :many
|
|
SELECT
|
|
job_id, created_at, source, level, stage, output, id
|
|
FROM
|
|
provisioner_job_logs
|
|
WHERE
|
|
job_id = $1
|
|
AND (
|
|
id > $2
|
|
) ORDER BY id ASC
|
|
`
|
|
|
|
type GetProvisionerLogsAfterIDParams struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
CreatedAfter int64 `db:"created_after" json:"created_after"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetProvisionerLogsAfterID(ctx context.Context, arg GetProvisionerLogsAfterIDParams) ([]ProvisionerJobLog, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerLogsAfterID, arg.JobID, arg.CreatedAfter)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJobLog
|
|
for rows.Next() {
|
|
var i ProvisionerJobLog
|
|
if err := rows.Scan(
|
|
&i.JobID,
|
|
&i.CreatedAt,
|
|
&i.Source,
|
|
&i.Level,
|
|
&i.Stage,
|
|
&i.Output,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertProvisionerJobLogs = `-- name: InsertProvisionerJobLogs :many
|
|
INSERT INTO
|
|
provisioner_job_logs
|
|
SELECT
|
|
$1 :: uuid AS job_id,
|
|
unnest($2 :: timestamptz [ ]) AS created_at,
|
|
unnest($3 :: log_source [ ]) AS source,
|
|
unnest($4 :: log_level [ ]) AS LEVEL,
|
|
unnest($5 :: VARCHAR(128) [ ]) AS stage,
|
|
unnest($6 :: VARCHAR(1024) [ ]) AS output RETURNING job_id, created_at, source, level, stage, output, id
|
|
`
|
|
|
|
type InsertProvisionerJobLogsParams struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
CreatedAt []time.Time `db:"created_at" json:"created_at"`
|
|
Source []LogSource `db:"source" json:"source"`
|
|
Level []LogLevel `db:"level" json:"level"`
|
|
Stage []string `db:"stage" json:"stage"`
|
|
Output []string `db:"output" json:"output"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertProvisionerJobLogs(ctx context.Context, arg InsertProvisionerJobLogsParams) ([]ProvisionerJobLog, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertProvisionerJobLogs,
|
|
arg.JobID,
|
|
pq.Array(arg.CreatedAt),
|
|
pq.Array(arg.Source),
|
|
pq.Array(arg.Level),
|
|
pq.Array(arg.Stage),
|
|
pq.Array(arg.Output),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJobLog
|
|
for rows.Next() {
|
|
var i ProvisionerJobLog
|
|
if err := rows.Scan(
|
|
&i.JobID,
|
|
&i.CreatedAt,
|
|
&i.Source,
|
|
&i.Level,
|
|
&i.Stage,
|
|
&i.Output,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const acquireProvisionerJob = `-- name: AcquireProvisionerJob :one
|
|
UPDATE
|
|
provisioner_jobs
|
|
SET
|
|
started_at = $1,
|
|
updated_at = $1,
|
|
worker_id = $2
|
|
WHERE
|
|
id = (
|
|
SELECT
|
|
id
|
|
FROM
|
|
provisioner_jobs AS potential_job
|
|
WHERE
|
|
potential_job.started_at IS NULL
|
|
AND potential_job.organization_id = $3
|
|
-- Ensure the caller has the correct provisioner.
|
|
AND potential_job.provisioner = ANY($4 :: provisioner_type [ ])
|
|
-- elsewhere, we use the tagset type, but here we use jsonb for backward compatibility
|
|
-- they are aliases and the code that calls this query already relies on a different type
|
|
AND provisioner_tagset_contains($5 :: jsonb, potential_job.tags :: jsonb)
|
|
ORDER BY
|
|
potential_job.created_at
|
|
FOR UPDATE
|
|
SKIP LOCKED
|
|
LIMIT
|
|
1
|
|
) RETURNING id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status
|
|
`
|
|
|
|
type AcquireProvisionerJobParams struct {
|
|
StartedAt sql.NullTime `db:"started_at" json:"started_at"`
|
|
WorkerID uuid.NullUUID `db:"worker_id" json:"worker_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Types []ProvisionerType `db:"types" json:"types"`
|
|
ProvisionerTags json.RawMessage `db:"provisioner_tags" json:"provisioner_tags"`
|
|
}
|
|
|
|
// Acquires the lock for a single job that isn't started, completed,
|
|
// canceled, and that matches an array of provisioner types.
|
|
//
|
|
// SKIP LOCKED is used to jump over locked rows. This prevents
|
|
// multiple provisioners from acquiring the same jobs. See:
|
|
// https://www.postgresql.org/docs/9.5/sql-select.html#SQL-FOR-UPDATE-SHARE
|
|
func (q *sqlQuerier) AcquireProvisionerJob(ctx context.Context, arg AcquireProvisionerJobParams) (ProvisionerJob, error) {
|
|
row := q.db.QueryRowContext(ctx, acquireProvisionerJob,
|
|
arg.StartedAt,
|
|
arg.WorkerID,
|
|
arg.OrganizationID,
|
|
pq.Array(arg.Types),
|
|
arg.ProvisionerTags,
|
|
)
|
|
var i ProvisionerJob
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getProvisionerJobByID = `-- name: GetProvisionerJobByID :one
|
|
SELECT
|
|
id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerJobByID(ctx context.Context, id uuid.UUID) (ProvisionerJob, error) {
|
|
row := q.db.QueryRowContext(ctx, getProvisionerJobByID, id)
|
|
var i ProvisionerJob
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getProvisionerJobByIDForUpdate = `-- name: GetProvisionerJobByIDForUpdate :one
|
|
SELECT
|
|
id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
id = $1
|
|
FOR UPDATE
|
|
SKIP LOCKED
|
|
`
|
|
|
|
// Gets a single provisioner job by ID for update.
|
|
// This is used to securely reap jobs that have been hung/pending for a long time.
|
|
func (q *sqlQuerier) GetProvisionerJobByIDForUpdate(ctx context.Context, id uuid.UUID) (ProvisionerJob, error) {
|
|
row := q.db.QueryRowContext(ctx, getProvisionerJobByIDForUpdate, id)
|
|
var i ProvisionerJob
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getProvisionerJobTimingsByJobID = `-- name: GetProvisionerJobTimingsByJobID :many
|
|
SELECT job_id, started_at, ended_at, stage, source, action, resource FROM provisioner_job_timings
|
|
WHERE job_id = $1
|
|
ORDER BY started_at ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerJobTimingsByJobID(ctx context.Context, jobID uuid.UUID) ([]ProvisionerJobTiming, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerJobTimingsByJobID, jobID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJobTiming
|
|
for rows.Next() {
|
|
var i ProvisionerJobTiming
|
|
if err := rows.Scan(
|
|
&i.JobID,
|
|
&i.StartedAt,
|
|
&i.EndedAt,
|
|
&i.Stage,
|
|
&i.Source,
|
|
&i.Action,
|
|
&i.Resource,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerJobsByIDs = `-- name: GetProvisionerJobsByIDs :many
|
|
SELECT
|
|
id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerJobsByIDs(ctx context.Context, ids []uuid.UUID) ([]ProvisionerJob, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerJobsByIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJob
|
|
for rows.Next() {
|
|
var i ProvisionerJob
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerJobsByIDsWithQueuePosition = `-- name: GetProvisionerJobsByIDsWithQueuePosition :many
|
|
WITH filtered_provisioner_jobs AS (
|
|
-- Step 1: Filter provisioner_jobs
|
|
SELECT
|
|
id, created_at
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
id = ANY($1 :: uuid [ ]) -- Apply filter early to reduce dataset size before expensive JOIN
|
|
),
|
|
pending_jobs AS (
|
|
-- Step 2: Extract only pending jobs
|
|
SELECT
|
|
id, created_at, tags
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
job_status = 'pending'
|
|
),
|
|
online_provisioner_daemons AS (
|
|
SELECT id, tags FROM provisioner_daemons pd
|
|
WHERE pd.last_seen_at IS NOT NULL AND pd.last_seen_at >= (NOW() - ($2::bigint || ' ms')::interval)
|
|
),
|
|
ranked_jobs AS (
|
|
-- Step 3: Rank only pending jobs based on provisioner availability
|
|
SELECT
|
|
pj.id,
|
|
pj.created_at,
|
|
ROW_NUMBER() OVER (PARTITION BY opd.id ORDER BY pj.created_at ASC) AS queue_position,
|
|
COUNT(*) OVER (PARTITION BY opd.id) AS queue_size
|
|
FROM
|
|
pending_jobs pj
|
|
INNER JOIN online_provisioner_daemons opd
|
|
ON provisioner_tagset_contains(opd.tags, pj.tags) -- Join only on the small pending set
|
|
),
|
|
final_jobs AS (
|
|
-- Step 4: Compute best queue position and max queue size per job
|
|
SELECT
|
|
fpj.id,
|
|
fpj.created_at,
|
|
COALESCE(MIN(rj.queue_position), 0) :: BIGINT AS queue_position, -- Best queue position across provisioners
|
|
COALESCE(MAX(rj.queue_size), 0) :: BIGINT AS queue_size -- Max queue size across provisioners
|
|
FROM
|
|
filtered_provisioner_jobs fpj -- Use the pre-filtered dataset instead of full provisioner_jobs
|
|
LEFT JOIN ranked_jobs rj
|
|
ON fpj.id = rj.id -- Join with the ranking jobs CTE to assign a rank to each specified provisioner job.
|
|
GROUP BY
|
|
fpj.id, fpj.created_at
|
|
)
|
|
SELECT
|
|
-- Step 5: Final SELECT with INNER JOIN provisioner_jobs
|
|
fj.id,
|
|
fj.created_at,
|
|
pj.id, pj.created_at, pj.updated_at, pj.started_at, pj.canceled_at, pj.completed_at, pj.error, pj.organization_id, pj.initiator_id, pj.provisioner, pj.storage_method, pj.type, pj.input, pj.worker_id, pj.file_id, pj.tags, pj.error_code, pj.trace_metadata, pj.job_status,
|
|
fj.queue_position,
|
|
fj.queue_size
|
|
FROM
|
|
final_jobs fj
|
|
INNER JOIN provisioner_jobs pj
|
|
ON fj.id = pj.id -- Ensure we retrieve full details from ` + "`" + `provisioner_jobs` + "`" + `.
|
|
-- JOIN with pj is required for sqlc.embed(pj) to compile successfully.
|
|
ORDER BY
|
|
fj.created_at
|
|
`
|
|
|
|
type GetProvisionerJobsByIDsWithQueuePositionParams struct {
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
StaleIntervalMS int64 `db:"stale_interval_ms" json:"stale_interval_ms"`
|
|
}
|
|
|
|
type GetProvisionerJobsByIDsWithQueuePositionRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
ProvisionerJob ProvisionerJob `db:"provisioner_job" json:"provisioner_job"`
|
|
QueuePosition int64 `db:"queue_position" json:"queue_position"`
|
|
QueueSize int64 `db:"queue_size" json:"queue_size"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, arg GetProvisionerJobsByIDsWithQueuePositionParams) ([]GetProvisionerJobsByIDsWithQueuePositionRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerJobsByIDsWithQueuePosition, pq.Array(arg.IDs), arg.StaleIntervalMS)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetProvisionerJobsByIDsWithQueuePositionRow
|
|
for rows.Next() {
|
|
var i GetProvisionerJobsByIDsWithQueuePositionRow
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.ProvisionerJob.ID,
|
|
&i.ProvisionerJob.CreatedAt,
|
|
&i.ProvisionerJob.UpdatedAt,
|
|
&i.ProvisionerJob.StartedAt,
|
|
&i.ProvisionerJob.CanceledAt,
|
|
&i.ProvisionerJob.CompletedAt,
|
|
&i.ProvisionerJob.Error,
|
|
&i.ProvisionerJob.OrganizationID,
|
|
&i.ProvisionerJob.InitiatorID,
|
|
&i.ProvisionerJob.Provisioner,
|
|
&i.ProvisionerJob.StorageMethod,
|
|
&i.ProvisionerJob.Type,
|
|
&i.ProvisionerJob.Input,
|
|
&i.ProvisionerJob.WorkerID,
|
|
&i.ProvisionerJob.FileID,
|
|
&i.ProvisionerJob.Tags,
|
|
&i.ProvisionerJob.ErrorCode,
|
|
&i.ProvisionerJob.TraceMetadata,
|
|
&i.ProvisionerJob.JobStatus,
|
|
&i.QueuePosition,
|
|
&i.QueueSize,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisioner = `-- name: GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisioner :many
|
|
WITH pending_jobs AS (
|
|
SELECT
|
|
id, created_at
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
started_at IS NULL
|
|
AND
|
|
canceled_at IS NULL
|
|
AND
|
|
completed_at IS NULL
|
|
AND
|
|
error IS NULL
|
|
),
|
|
queue_position AS (
|
|
SELECT
|
|
id,
|
|
ROW_NUMBER() OVER (ORDER BY created_at ASC) AS queue_position
|
|
FROM
|
|
pending_jobs
|
|
),
|
|
queue_size AS (
|
|
SELECT COUNT(*) AS count FROM pending_jobs
|
|
)
|
|
SELECT
|
|
pj.id, pj.created_at, pj.updated_at, pj.started_at, pj.canceled_at, pj.completed_at, pj.error, pj.organization_id, pj.initiator_id, pj.provisioner, pj.storage_method, pj.type, pj.input, pj.worker_id, pj.file_id, pj.tags, pj.error_code, pj.trace_metadata, pj.job_status,
|
|
COALESCE(qp.queue_position, 0) AS queue_position,
|
|
COALESCE(qs.count, 0) AS queue_size,
|
|
-- Use subquery to utilize ORDER BY in array_agg since it cannot be
|
|
-- combined with FILTER.
|
|
(
|
|
SELECT
|
|
-- Order for stable output.
|
|
array_agg(pd.id ORDER BY pd.created_at ASC)::uuid[]
|
|
FROM
|
|
provisioner_daemons pd
|
|
WHERE
|
|
-- See AcquireProvisionerJob.
|
|
pj.started_at IS NULL
|
|
AND pj.organization_id = pd.organization_id
|
|
AND pj.provisioner = ANY(pd.provisioners)
|
|
AND provisioner_tagset_contains(pd.tags, pj.tags)
|
|
) AS available_workers,
|
|
-- Include template and workspace information.
|
|
COALESCE(tv.name, '') AS template_version_name,
|
|
t.id AS template_id,
|
|
COALESCE(t.name, '') AS template_name,
|
|
COALESCE(t.display_name, '') AS template_display_name,
|
|
COALESCE(t.icon, '') AS template_icon,
|
|
w.id AS workspace_id,
|
|
COALESCE(w.name, '') AS workspace_name,
|
|
-- Include the name of the provisioner_daemon associated to the job
|
|
COALESCE(pd.name, '') AS worker_name
|
|
FROM
|
|
provisioner_jobs pj
|
|
LEFT JOIN
|
|
queue_position qp ON qp.id = pj.id
|
|
LEFT JOIN
|
|
queue_size qs ON TRUE
|
|
LEFT JOIN
|
|
workspace_builds wb ON wb.id = CASE WHEN pj.input ? 'workspace_build_id' THEN (pj.input->>'workspace_build_id')::uuid END
|
|
LEFT JOIN
|
|
workspaces w ON (
|
|
w.id = wb.workspace_id
|
|
AND w.organization_id = pj.organization_id
|
|
)
|
|
LEFT JOIN
|
|
-- We should always have a template version, either explicitly or implicitly via workspace build.
|
|
template_versions tv ON (
|
|
tv.id = CASE WHEN pj.input ? 'template_version_id' THEN (pj.input->>'template_version_id')::uuid ELSE wb.template_version_id END
|
|
AND tv.organization_id = pj.organization_id
|
|
)
|
|
LEFT JOIN
|
|
templates t ON (
|
|
t.id = tv.template_id
|
|
AND t.organization_id = pj.organization_id
|
|
)
|
|
LEFT JOIN
|
|
-- Join to get the daemon name corresponding to the job's worker_id
|
|
provisioner_daemons pd ON pd.id = pj.worker_id
|
|
WHERE
|
|
pj.organization_id = $1::uuid
|
|
AND (COALESCE(array_length($2::uuid[], 1), 0) = 0 OR pj.id = ANY($2::uuid[]))
|
|
AND (COALESCE(array_length($3::provisioner_job_status[], 1), 0) = 0 OR pj.job_status = ANY($3::provisioner_job_status[]))
|
|
AND ($4::tagset = 'null'::tagset OR provisioner_tagset_contains(pj.tags::tagset, $4::tagset))
|
|
GROUP BY
|
|
pj.id,
|
|
qp.queue_position,
|
|
qs.count,
|
|
tv.name,
|
|
t.id,
|
|
t.name,
|
|
t.display_name,
|
|
t.icon,
|
|
w.id,
|
|
w.name,
|
|
pd.name
|
|
ORDER BY
|
|
pj.created_at DESC
|
|
LIMIT
|
|
$5::int
|
|
`
|
|
|
|
type GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
Status []ProvisionerJobStatus `db:"status" json:"status"`
|
|
Tags StringMap `db:"tags" json:"tags"`
|
|
Limit sql.NullInt32 `db:"limit" json:"limit"`
|
|
}
|
|
|
|
type GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerRow struct {
|
|
ProvisionerJob ProvisionerJob `db:"provisioner_job" json:"provisioner_job"`
|
|
QueuePosition int64 `db:"queue_position" json:"queue_position"`
|
|
QueueSize int64 `db:"queue_size" json:"queue_size"`
|
|
AvailableWorkers []uuid.UUID `db:"available_workers" json:"available_workers"`
|
|
TemplateVersionName string `db:"template_version_name" json:"template_version_name"`
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
TemplateName string `db:"template_name" json:"template_name"`
|
|
TemplateDisplayName string `db:"template_display_name" json:"template_display_name"`
|
|
TemplateIcon string `db:"template_icon" json:"template_icon"`
|
|
WorkspaceID uuid.NullUUID `db:"workspace_id" json:"workspace_id"`
|
|
WorkspaceName string `db:"workspace_name" json:"workspace_name"`
|
|
WorkerName string `db:"worker_name" json:"worker_name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisioner(ctx context.Context, arg GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerParams) ([]GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisioner,
|
|
arg.OrganizationID,
|
|
pq.Array(arg.IDs),
|
|
pq.Array(arg.Status),
|
|
arg.Tags,
|
|
arg.Limit,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerRow
|
|
for rows.Next() {
|
|
var i GetProvisionerJobsByOrganizationAndStatusWithQueuePositionAndProvisionerRow
|
|
if err := rows.Scan(
|
|
&i.ProvisionerJob.ID,
|
|
&i.ProvisionerJob.CreatedAt,
|
|
&i.ProvisionerJob.UpdatedAt,
|
|
&i.ProvisionerJob.StartedAt,
|
|
&i.ProvisionerJob.CanceledAt,
|
|
&i.ProvisionerJob.CompletedAt,
|
|
&i.ProvisionerJob.Error,
|
|
&i.ProvisionerJob.OrganizationID,
|
|
&i.ProvisionerJob.InitiatorID,
|
|
&i.ProvisionerJob.Provisioner,
|
|
&i.ProvisionerJob.StorageMethod,
|
|
&i.ProvisionerJob.Type,
|
|
&i.ProvisionerJob.Input,
|
|
&i.ProvisionerJob.WorkerID,
|
|
&i.ProvisionerJob.FileID,
|
|
&i.ProvisionerJob.Tags,
|
|
&i.ProvisionerJob.ErrorCode,
|
|
&i.ProvisionerJob.TraceMetadata,
|
|
&i.ProvisionerJob.JobStatus,
|
|
&i.QueuePosition,
|
|
&i.QueueSize,
|
|
pq.Array(&i.AvailableWorkers),
|
|
&i.TemplateVersionName,
|
|
&i.TemplateID,
|
|
&i.TemplateName,
|
|
&i.TemplateDisplayName,
|
|
&i.TemplateIcon,
|
|
&i.WorkspaceID,
|
|
&i.WorkspaceName,
|
|
&i.WorkerName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerJobsCreatedAfter = `-- name: GetProvisionerJobsCreatedAfter :many
|
|
SELECT id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status FROM provisioner_jobs WHERE created_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerJobsCreatedAfter(ctx context.Context, createdAt time.Time) ([]ProvisionerJob, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerJobsCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJob
|
|
for rows.Next() {
|
|
var i ProvisionerJob
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerJobsToBeReaped = `-- name: GetProvisionerJobsToBeReaped :many
|
|
SELECT
|
|
id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
(
|
|
-- If the job has not been started before @pending_since, reap it.
|
|
updated_at < $1
|
|
AND started_at IS NULL
|
|
AND completed_at IS NULL
|
|
)
|
|
OR
|
|
(
|
|
-- If the job has been started but not completed before @hung_since, reap it.
|
|
updated_at < $2
|
|
AND started_at IS NOT NULL
|
|
AND completed_at IS NULL
|
|
)
|
|
ORDER BY random()
|
|
LIMIT $3
|
|
`
|
|
|
|
type GetProvisionerJobsToBeReapedParams struct {
|
|
PendingSince time.Time `db:"pending_since" json:"pending_since"`
|
|
HungSince time.Time `db:"hung_since" json:"hung_since"`
|
|
MaxJobs int32 `db:"max_jobs" json:"max_jobs"`
|
|
}
|
|
|
|
// To avoid repeatedly attempting to reap the same jobs, we randomly order and limit to @max_jobs.
|
|
func (q *sqlQuerier) GetProvisionerJobsToBeReaped(ctx context.Context, arg GetProvisionerJobsToBeReapedParams) ([]ProvisionerJob, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerJobsToBeReaped, arg.PendingSince, arg.HungSince, arg.MaxJobs)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJob
|
|
for rows.Next() {
|
|
var i ProvisionerJob
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertProvisionerJob = `-- name: InsertProvisionerJob :one
|
|
INSERT INTO
|
|
provisioner_jobs (
|
|
id,
|
|
created_at,
|
|
updated_at,
|
|
organization_id,
|
|
initiator_id,
|
|
provisioner,
|
|
storage_method,
|
|
file_id,
|
|
"type",
|
|
"input",
|
|
tags,
|
|
trace_metadata
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status
|
|
`
|
|
|
|
type InsertProvisionerJobParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
InitiatorID uuid.UUID `db:"initiator_id" json:"initiator_id"`
|
|
Provisioner ProvisionerType `db:"provisioner" json:"provisioner"`
|
|
StorageMethod ProvisionerStorageMethod `db:"storage_method" json:"storage_method"`
|
|
FileID uuid.UUID `db:"file_id" json:"file_id"`
|
|
Type ProvisionerJobType `db:"type" json:"type"`
|
|
Input json.RawMessage `db:"input" json:"input"`
|
|
Tags StringMap `db:"tags" json:"tags"`
|
|
TraceMetadata pqtype.NullRawMessage `db:"trace_metadata" json:"trace_metadata"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertProvisionerJob(ctx context.Context, arg InsertProvisionerJobParams) (ProvisionerJob, error) {
|
|
row := q.db.QueryRowContext(ctx, insertProvisionerJob,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.OrganizationID,
|
|
arg.InitiatorID,
|
|
arg.Provisioner,
|
|
arg.StorageMethod,
|
|
arg.FileID,
|
|
arg.Type,
|
|
arg.Input,
|
|
arg.Tags,
|
|
arg.TraceMetadata,
|
|
)
|
|
var i ProvisionerJob
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertProvisionerJobTimings = `-- name: InsertProvisionerJobTimings :many
|
|
INSERT INTO provisioner_job_timings (job_id, started_at, ended_at, stage, source, action, resource)
|
|
SELECT
|
|
$1::uuid AS provisioner_job_id,
|
|
unnest($2::timestamptz[]),
|
|
unnest($3::timestamptz[]),
|
|
unnest($4::provisioner_job_timing_stage[]),
|
|
unnest($5::text[]),
|
|
unnest($6::text[]),
|
|
unnest($7::text[])
|
|
RETURNING job_id, started_at, ended_at, stage, source, action, resource
|
|
`
|
|
|
|
type InsertProvisionerJobTimingsParams struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
StartedAt []time.Time `db:"started_at" json:"started_at"`
|
|
EndedAt []time.Time `db:"ended_at" json:"ended_at"`
|
|
Stage []ProvisionerJobTimingStage `db:"stage" json:"stage"`
|
|
Source []string `db:"source" json:"source"`
|
|
Action []string `db:"action" json:"action"`
|
|
Resource []string `db:"resource" json:"resource"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertProvisionerJobTimings(ctx context.Context, arg InsertProvisionerJobTimingsParams) ([]ProvisionerJobTiming, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertProvisionerJobTimings,
|
|
arg.JobID,
|
|
pq.Array(arg.StartedAt),
|
|
pq.Array(arg.EndedAt),
|
|
pq.Array(arg.Stage),
|
|
pq.Array(arg.Source),
|
|
pq.Array(arg.Action),
|
|
pq.Array(arg.Resource),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJobTiming
|
|
for rows.Next() {
|
|
var i ProvisionerJobTiming
|
|
if err := rows.Scan(
|
|
&i.JobID,
|
|
&i.StartedAt,
|
|
&i.EndedAt,
|
|
&i.Stage,
|
|
&i.Source,
|
|
&i.Action,
|
|
&i.Resource,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateProvisionerJobByID = `-- name: UpdateProvisionerJobByID :exec
|
|
UPDATE
|
|
provisioner_jobs
|
|
SET
|
|
updated_at = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateProvisionerJobByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateProvisionerJobByID(ctx context.Context, arg UpdateProvisionerJobByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateProvisionerJobByID, arg.ID, arg.UpdatedAt)
|
|
return err
|
|
}
|
|
|
|
const updateProvisionerJobWithCancelByID = `-- name: UpdateProvisionerJobWithCancelByID :exec
|
|
UPDATE
|
|
provisioner_jobs
|
|
SET
|
|
canceled_at = $2,
|
|
completed_at = $3
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateProvisionerJobWithCancelByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CanceledAt sql.NullTime `db:"canceled_at" json:"canceled_at"`
|
|
CompletedAt sql.NullTime `db:"completed_at" json:"completed_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateProvisionerJobWithCancelByID(ctx context.Context, arg UpdateProvisionerJobWithCancelByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateProvisionerJobWithCancelByID, arg.ID, arg.CanceledAt, arg.CompletedAt)
|
|
return err
|
|
}
|
|
|
|
const updateProvisionerJobWithCompleteByID = `-- name: UpdateProvisionerJobWithCompleteByID :exec
|
|
UPDATE
|
|
provisioner_jobs
|
|
SET
|
|
updated_at = $2,
|
|
completed_at = $3,
|
|
error = $4,
|
|
error_code = $5
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateProvisionerJobWithCompleteByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
CompletedAt sql.NullTime `db:"completed_at" json:"completed_at"`
|
|
Error sql.NullString `db:"error" json:"error"`
|
|
ErrorCode sql.NullString `db:"error_code" json:"error_code"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateProvisionerJobWithCompleteByID(ctx context.Context, arg UpdateProvisionerJobWithCompleteByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateProvisionerJobWithCompleteByID,
|
|
arg.ID,
|
|
arg.UpdatedAt,
|
|
arg.CompletedAt,
|
|
arg.Error,
|
|
arg.ErrorCode,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateProvisionerJobWithCompleteWithStartedAtByID = `-- name: UpdateProvisionerJobWithCompleteWithStartedAtByID :exec
|
|
UPDATE
|
|
provisioner_jobs
|
|
SET
|
|
updated_at = $2,
|
|
completed_at = $3,
|
|
error = $4,
|
|
error_code = $5,
|
|
started_at = $6
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateProvisionerJobWithCompleteWithStartedAtByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
CompletedAt sql.NullTime `db:"completed_at" json:"completed_at"`
|
|
Error sql.NullString `db:"error" json:"error"`
|
|
ErrorCode sql.NullString `db:"error_code" json:"error_code"`
|
|
StartedAt sql.NullTime `db:"started_at" json:"started_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateProvisionerJobWithCompleteWithStartedAtByID(ctx context.Context, arg UpdateProvisionerJobWithCompleteWithStartedAtByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateProvisionerJobWithCompleteWithStartedAtByID,
|
|
arg.ID,
|
|
arg.UpdatedAt,
|
|
arg.CompletedAt,
|
|
arg.Error,
|
|
arg.ErrorCode,
|
|
arg.StartedAt,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const deleteProvisionerKey = `-- name: DeleteProvisionerKey :exec
|
|
DELETE FROM
|
|
provisioner_keys
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteProvisionerKey(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteProvisionerKey, id)
|
|
return err
|
|
}
|
|
|
|
const getProvisionerKeyByHashedSecret = `-- name: GetProvisionerKeyByHashedSecret :one
|
|
SELECT
|
|
id, created_at, organization_id, name, hashed_secret, tags
|
|
FROM
|
|
provisioner_keys
|
|
WHERE
|
|
hashed_secret = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerKeyByHashedSecret(ctx context.Context, hashedSecret []byte) (ProvisionerKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getProvisionerKeyByHashedSecret, hashedSecret)
|
|
var i ProvisionerKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.OrganizationID,
|
|
&i.Name,
|
|
&i.HashedSecret,
|
|
&i.Tags,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getProvisionerKeyByID = `-- name: GetProvisionerKeyByID :one
|
|
SELECT
|
|
id, created_at, organization_id, name, hashed_secret, tags
|
|
FROM
|
|
provisioner_keys
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerKeyByID(ctx context.Context, id uuid.UUID) (ProvisionerKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getProvisionerKeyByID, id)
|
|
var i ProvisionerKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.OrganizationID,
|
|
&i.Name,
|
|
&i.HashedSecret,
|
|
&i.Tags,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getProvisionerKeyByName = `-- name: GetProvisionerKeyByName :one
|
|
SELECT
|
|
id, created_at, organization_id, name, hashed_secret, tags
|
|
FROM
|
|
provisioner_keys
|
|
WHERE
|
|
organization_id = $1
|
|
AND
|
|
lower(name) = lower($2)
|
|
`
|
|
|
|
type GetProvisionerKeyByNameParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetProvisionerKeyByName(ctx context.Context, arg GetProvisionerKeyByNameParams) (ProvisionerKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getProvisionerKeyByName, arg.OrganizationID, arg.Name)
|
|
var i ProvisionerKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.OrganizationID,
|
|
&i.Name,
|
|
&i.HashedSecret,
|
|
&i.Tags,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertProvisionerKey = `-- name: InsertProvisionerKey :one
|
|
INSERT INTO
|
|
provisioner_keys (
|
|
id,
|
|
created_at,
|
|
organization_id,
|
|
name,
|
|
hashed_secret,
|
|
tags
|
|
)
|
|
VALUES
|
|
($1, $2, $3, lower($6), $4, $5) RETURNING id, created_at, organization_id, name, hashed_secret, tags
|
|
`
|
|
|
|
type InsertProvisionerKeyParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
HashedSecret []byte `db:"hashed_secret" json:"hashed_secret"`
|
|
Tags StringMap `db:"tags" json:"tags"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertProvisionerKey(ctx context.Context, arg InsertProvisionerKeyParams) (ProvisionerKey, error) {
|
|
row := q.db.QueryRowContext(ctx, insertProvisionerKey,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.OrganizationID,
|
|
arg.HashedSecret,
|
|
arg.Tags,
|
|
arg.Name,
|
|
)
|
|
var i ProvisionerKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.OrganizationID,
|
|
&i.Name,
|
|
&i.HashedSecret,
|
|
&i.Tags,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const listProvisionerKeysByOrganization = `-- name: ListProvisionerKeysByOrganization :many
|
|
SELECT
|
|
id, created_at, organization_id, name, hashed_secret, tags
|
|
FROM
|
|
provisioner_keys
|
|
WHERE
|
|
organization_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) ListProvisionerKeysByOrganization(ctx context.Context, organizationID uuid.UUID) ([]ProvisionerKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, listProvisionerKeysByOrganization, organizationID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerKey
|
|
for rows.Next() {
|
|
var i ProvisionerKey
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.OrganizationID,
|
|
&i.Name,
|
|
&i.HashedSecret,
|
|
&i.Tags,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const listProvisionerKeysByOrganizationExcludeReserved = `-- name: ListProvisionerKeysByOrganizationExcludeReserved :many
|
|
SELECT
|
|
id, created_at, organization_id, name, hashed_secret, tags
|
|
FROM
|
|
provisioner_keys
|
|
WHERE
|
|
organization_id = $1
|
|
AND
|
|
-- exclude reserved built-in key
|
|
id != '00000000-0000-0000-0000-000000000001'::uuid
|
|
AND
|
|
-- exclude reserved user-auth key
|
|
id != '00000000-0000-0000-0000-000000000002'::uuid
|
|
AND
|
|
-- exclude reserved psk key
|
|
id != '00000000-0000-0000-0000-000000000003'::uuid
|
|
`
|
|
|
|
func (q *sqlQuerier) ListProvisionerKeysByOrganizationExcludeReserved(ctx context.Context, organizationID uuid.UUID) ([]ProvisionerKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, listProvisionerKeysByOrganizationExcludeReserved, organizationID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerKey
|
|
for rows.Next() {
|
|
var i ProvisionerKey
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.OrganizationID,
|
|
&i.Name,
|
|
&i.HashedSecret,
|
|
&i.Tags,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceProxies = `-- name: GetWorkspaceProxies :many
|
|
SELECT
|
|
id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
FROM
|
|
workspace_proxies
|
|
WHERE
|
|
deleted = false
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceProxies(ctx context.Context) ([]WorkspaceProxy, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceProxies)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceProxy
|
|
for rows.Next() {
|
|
var i WorkspaceProxy
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceProxyByHostname = `-- name: GetWorkspaceProxyByHostname :one
|
|
SELECT
|
|
id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
FROM
|
|
workspace_proxies
|
|
WHERE
|
|
-- Validate that the @hostname has been sanitized and is not empty. This
|
|
-- doesn't prevent SQL injection (already prevented by using prepared
|
|
-- queries), but it does prevent carefully crafted hostnames from matching
|
|
-- when they shouldn't.
|
|
--
|
|
-- Periods don't need to be escaped because they're not special characters
|
|
-- in SQL matches unlike regular expressions.
|
|
$1 :: text SIMILAR TO '[a-zA-Z0-9._-]+' AND
|
|
deleted = false AND
|
|
|
|
-- Validate that the hostname matches either the wildcard hostname or the
|
|
-- access URL (ignoring scheme, port and path).
|
|
(
|
|
(
|
|
$2 :: bool = true AND
|
|
url SIMILAR TO '[^:]*://' || $1 :: text || '([:/]?%)*'
|
|
) OR
|
|
(
|
|
$3 :: bool = true AND
|
|
$1 :: text LIKE replace(wildcard_hostname, '*', '%')
|
|
)
|
|
)
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetWorkspaceProxyByHostnameParams struct {
|
|
Hostname string `db:"hostname" json:"hostname"`
|
|
AllowAccessUrl bool `db:"allow_access_url" json:"allow_access_url"`
|
|
AllowWildcardHostname bool `db:"allow_wildcard_hostname" json:"allow_wildcard_hostname"`
|
|
}
|
|
|
|
// Finds a workspace proxy that has an access URL or app hostname that matches
|
|
// the provided hostname. This is to check if a hostname matches any workspace
|
|
// proxy.
|
|
//
|
|
// The hostname must be sanitized to only contain [a-zA-Z0-9.-] before calling
|
|
// this query. The scheme, port and path should be stripped.
|
|
func (q *sqlQuerier) GetWorkspaceProxyByHostname(ctx context.Context, arg GetWorkspaceProxyByHostnameParams) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceProxyByHostname, arg.Hostname, arg.AllowAccessUrl, arg.AllowWildcardHostname)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceProxyByID = `-- name: GetWorkspaceProxyByID :one
|
|
SELECT
|
|
id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
FROM
|
|
workspace_proxies
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceProxyByID(ctx context.Context, id uuid.UUID) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceProxyByID, id)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceProxyByName = `-- name: GetWorkspaceProxyByName :one
|
|
SELECT
|
|
id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
FROM
|
|
workspace_proxies
|
|
WHERE
|
|
name = $1
|
|
AND deleted = false
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceProxyByName(ctx context.Context, name string) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceProxyByName, name)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertWorkspaceProxy = `-- name: InsertWorkspaceProxy :one
|
|
INSERT INTO
|
|
workspace_proxies (
|
|
id,
|
|
url,
|
|
wildcard_hostname,
|
|
name,
|
|
display_name,
|
|
icon,
|
|
derp_enabled,
|
|
derp_only,
|
|
token_hashed_secret,
|
|
created_at,
|
|
updated_at,
|
|
deleted
|
|
)
|
|
VALUES
|
|
($1, '', '', $2, $3, $4, $5, $6, $7, $8, $9, false) RETURNING id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
`
|
|
|
|
type InsertWorkspaceProxyParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
DerpEnabled bool `db:"derp_enabled" json:"derp_enabled"`
|
|
DerpOnly bool `db:"derp_only" json:"derp_only"`
|
|
TokenHashedSecret []byte `db:"token_hashed_secret" json:"token_hashed_secret"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceProxy(ctx context.Context, arg InsertWorkspaceProxyParams) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspaceProxy,
|
|
arg.ID,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.Icon,
|
|
arg.DerpEnabled,
|
|
arg.DerpOnly,
|
|
arg.TokenHashedSecret,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const registerWorkspaceProxy = `-- name: RegisterWorkspaceProxy :one
|
|
UPDATE
|
|
workspace_proxies
|
|
SET
|
|
url = $1 :: text,
|
|
wildcard_hostname = $2 :: text,
|
|
derp_enabled = $3 :: boolean,
|
|
derp_only = $4 :: boolean,
|
|
version = $5 :: text,
|
|
updated_at = Now()
|
|
WHERE
|
|
id = $6
|
|
RETURNING id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
`
|
|
|
|
type RegisterWorkspaceProxyParams struct {
|
|
Url string `db:"url" json:"url"`
|
|
WildcardHostname string `db:"wildcard_hostname" json:"wildcard_hostname"`
|
|
DerpEnabled bool `db:"derp_enabled" json:"derp_enabled"`
|
|
DerpOnly bool `db:"derp_only" json:"derp_only"`
|
|
Version string `db:"version" json:"version"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) RegisterWorkspaceProxy(ctx context.Context, arg RegisterWorkspaceProxyParams) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, registerWorkspaceProxy,
|
|
arg.Url,
|
|
arg.WildcardHostname,
|
|
arg.DerpEnabled,
|
|
arg.DerpOnly,
|
|
arg.Version,
|
|
arg.ID,
|
|
)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateWorkspaceProxy = `-- name: UpdateWorkspaceProxy :one
|
|
UPDATE
|
|
workspace_proxies
|
|
SET
|
|
-- These values should always be provided.
|
|
name = $1,
|
|
display_name = $2,
|
|
icon = $3,
|
|
-- Only update the token if a new one is provided.
|
|
-- So this is an optional field.
|
|
token_hashed_secret = CASE
|
|
WHEN length($4 :: bytea) > 0 THEN $4 :: bytea
|
|
ELSE workspace_proxies.token_hashed_secret
|
|
END,
|
|
-- Always update this timestamp.
|
|
updated_at = Now()
|
|
WHERE
|
|
id = $5
|
|
RETURNING id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
`
|
|
|
|
type UpdateWorkspaceProxyParams struct {
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
TokenHashedSecret []byte `db:"token_hashed_secret" json:"token_hashed_secret"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
// This allows editing the properties of a workspace proxy.
|
|
func (q *sqlQuerier) UpdateWorkspaceProxy(ctx context.Context, arg UpdateWorkspaceProxyParams) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, updateWorkspaceProxy,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.Icon,
|
|
arg.TokenHashedSecret,
|
|
arg.ID,
|
|
)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateWorkspaceProxyDeleted = `-- name: UpdateWorkspaceProxyDeleted :exec
|
|
UPDATE
|
|
workspace_proxies
|
|
SET
|
|
updated_at = Now(),
|
|
deleted = $1
|
|
WHERE
|
|
id = $2
|
|
`
|
|
|
|
type UpdateWorkspaceProxyDeletedParams struct {
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceProxyDeleted(ctx context.Context, arg UpdateWorkspaceProxyDeletedParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceProxyDeleted, arg.Deleted, arg.ID)
|
|
return err
|
|
}
|
|
|
|
const getQuotaAllowanceForUser = `-- name: GetQuotaAllowanceForUser :one
|
|
SELECT
|
|
coalesce(SUM(groups.quota_allowance), 0)::BIGINT
|
|
FROM
|
|
(
|
|
-- Select all groups this user is a member of. This will also include
|
|
-- the "Everyone" group for organizations the user is a member of.
|
|
SELECT user_id, user_email, user_username, user_hashed_password, user_created_at, user_updated_at, user_status, user_rbac_roles, user_login_type, user_avatar_url, user_deleted, user_last_seen_at, user_quiet_hours_schedule, user_name, user_github_com_user_id, user_is_system, organization_id, group_name, group_id FROM group_members_expanded
|
|
WHERE
|
|
$1 = user_id AND
|
|
$2 = group_members_expanded.organization_id
|
|
) AS members
|
|
INNER JOIN groups ON
|
|
members.group_id = groups.id
|
|
`
|
|
|
|
type GetQuotaAllowanceForUserParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetQuotaAllowanceForUser(ctx context.Context, arg GetQuotaAllowanceForUserParams) (int64, error) {
|
|
row := q.db.QueryRowContext(ctx, getQuotaAllowanceForUser, arg.UserID, arg.OrganizationID)
|
|
var column_1 int64
|
|
err := row.Scan(&column_1)
|
|
return column_1, err
|
|
}
|
|
|
|
const getQuotaConsumedForUser = `-- name: GetQuotaConsumedForUser :one
|
|
WITH latest_builds AS (
|
|
SELECT
|
|
DISTINCT ON
|
|
(wb.workspace_id) wb.workspace_id,
|
|
wb.daily_cost
|
|
FROM
|
|
workspace_builds wb
|
|
-- This INNER JOIN prevents a seq scan of the workspace_builds table.
|
|
-- Limit the rows to the absolute minimum required, which is all workspaces
|
|
-- in a given organization for a given user.
|
|
INNER JOIN
|
|
workspaces on wb.workspace_id = workspaces.id
|
|
WHERE
|
|
-- Only return workspaces that match the user + organization.
|
|
-- Quotas are calculated per user per organization.
|
|
NOT workspaces.deleted AND
|
|
workspaces.owner_id = $1 AND
|
|
workspaces.organization_id = $2
|
|
ORDER BY
|
|
wb.workspace_id,
|
|
wb.build_number DESC
|
|
)
|
|
SELECT
|
|
coalesce(SUM(daily_cost), 0)::BIGINT
|
|
FROM
|
|
latest_builds
|
|
`
|
|
|
|
type GetQuotaConsumedForUserParams struct {
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetQuotaConsumedForUser(ctx context.Context, arg GetQuotaConsumedForUserParams) (int64, error) {
|
|
row := q.db.QueryRowContext(ctx, getQuotaConsumedForUser, arg.OwnerID, arg.OrganizationID)
|
|
var column_1 int64
|
|
err := row.Scan(&column_1)
|
|
return column_1, err
|
|
}
|
|
|
|
const deleteReplicasUpdatedBefore = `-- name: DeleteReplicasUpdatedBefore :exec
|
|
DELETE FROM replicas WHERE updated_at < $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteReplicasUpdatedBefore(ctx context.Context, updatedAt time.Time) error {
|
|
_, err := q.db.ExecContext(ctx, deleteReplicasUpdatedBefore, updatedAt)
|
|
return err
|
|
}
|
|
|
|
const getReplicaByID = `-- name: GetReplicaByID :one
|
|
SELECT id, created_at, started_at, stopped_at, updated_at, hostname, region_id, relay_address, database_latency, version, error, "primary" FROM replicas WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetReplicaByID(ctx context.Context, id uuid.UUID) (Replica, error) {
|
|
row := q.db.QueryRowContext(ctx, getReplicaByID, id)
|
|
var i Replica
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.StartedAt,
|
|
&i.StoppedAt,
|
|
&i.UpdatedAt,
|
|
&i.Hostname,
|
|
&i.RegionID,
|
|
&i.RelayAddress,
|
|
&i.DatabaseLatency,
|
|
&i.Version,
|
|
&i.Error,
|
|
&i.Primary,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getReplicasUpdatedAfter = `-- name: GetReplicasUpdatedAfter :many
|
|
SELECT id, created_at, started_at, stopped_at, updated_at, hostname, region_id, relay_address, database_latency, version, error, "primary" FROM replicas WHERE updated_at > $1 AND stopped_at IS NULL
|
|
`
|
|
|
|
func (q *sqlQuerier) GetReplicasUpdatedAfter(ctx context.Context, updatedAt time.Time) ([]Replica, error) {
|
|
rows, err := q.db.QueryContext(ctx, getReplicasUpdatedAfter, updatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Replica
|
|
for rows.Next() {
|
|
var i Replica
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.StartedAt,
|
|
&i.StoppedAt,
|
|
&i.UpdatedAt,
|
|
&i.Hostname,
|
|
&i.RegionID,
|
|
&i.RelayAddress,
|
|
&i.DatabaseLatency,
|
|
&i.Version,
|
|
&i.Error,
|
|
&i.Primary,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertReplica = `-- name: InsertReplica :one
|
|
INSERT INTO replicas (
|
|
id,
|
|
created_at,
|
|
started_at,
|
|
updated_at,
|
|
hostname,
|
|
region_id,
|
|
relay_address,
|
|
version,
|
|
database_latency,
|
|
"primary"
|
|
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) RETURNING id, created_at, started_at, stopped_at, updated_at, hostname, region_id, relay_address, database_latency, version, error, "primary"
|
|
`
|
|
|
|
type InsertReplicaParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
StartedAt time.Time `db:"started_at" json:"started_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Hostname string `db:"hostname" json:"hostname"`
|
|
RegionID int32 `db:"region_id" json:"region_id"`
|
|
RelayAddress string `db:"relay_address" json:"relay_address"`
|
|
Version string `db:"version" json:"version"`
|
|
DatabaseLatency int32 `db:"database_latency" json:"database_latency"`
|
|
Primary bool `db:"primary" json:"primary"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertReplica(ctx context.Context, arg InsertReplicaParams) (Replica, error) {
|
|
row := q.db.QueryRowContext(ctx, insertReplica,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.StartedAt,
|
|
arg.UpdatedAt,
|
|
arg.Hostname,
|
|
arg.RegionID,
|
|
arg.RelayAddress,
|
|
arg.Version,
|
|
arg.DatabaseLatency,
|
|
arg.Primary,
|
|
)
|
|
var i Replica
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.StartedAt,
|
|
&i.StoppedAt,
|
|
&i.UpdatedAt,
|
|
&i.Hostname,
|
|
&i.RegionID,
|
|
&i.RelayAddress,
|
|
&i.DatabaseLatency,
|
|
&i.Version,
|
|
&i.Error,
|
|
&i.Primary,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateReplica = `-- name: UpdateReplica :one
|
|
UPDATE replicas SET
|
|
updated_at = $2,
|
|
started_at = $3,
|
|
stopped_at = $4,
|
|
relay_address = $5,
|
|
region_id = $6,
|
|
hostname = $7,
|
|
version = $8,
|
|
error = $9,
|
|
database_latency = $10,
|
|
"primary" = $11
|
|
WHERE id = $1 RETURNING id, created_at, started_at, stopped_at, updated_at, hostname, region_id, relay_address, database_latency, version, error, "primary"
|
|
`
|
|
|
|
type UpdateReplicaParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
StartedAt time.Time `db:"started_at" json:"started_at"`
|
|
StoppedAt sql.NullTime `db:"stopped_at" json:"stopped_at"`
|
|
RelayAddress string `db:"relay_address" json:"relay_address"`
|
|
RegionID int32 `db:"region_id" json:"region_id"`
|
|
Hostname string `db:"hostname" json:"hostname"`
|
|
Version string `db:"version" json:"version"`
|
|
Error string `db:"error" json:"error"`
|
|
DatabaseLatency int32 `db:"database_latency" json:"database_latency"`
|
|
Primary bool `db:"primary" json:"primary"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateReplica(ctx context.Context, arg UpdateReplicaParams) (Replica, error) {
|
|
row := q.db.QueryRowContext(ctx, updateReplica,
|
|
arg.ID,
|
|
arg.UpdatedAt,
|
|
arg.StartedAt,
|
|
arg.StoppedAt,
|
|
arg.RelayAddress,
|
|
arg.RegionID,
|
|
arg.Hostname,
|
|
arg.Version,
|
|
arg.Error,
|
|
arg.DatabaseLatency,
|
|
arg.Primary,
|
|
)
|
|
var i Replica
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.StartedAt,
|
|
&i.StoppedAt,
|
|
&i.UpdatedAt,
|
|
&i.Hostname,
|
|
&i.RegionID,
|
|
&i.RelayAddress,
|
|
&i.DatabaseLatency,
|
|
&i.Version,
|
|
&i.Error,
|
|
&i.Primary,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const customRoles = `-- name: CustomRoles :many
|
|
SELECT
|
|
name, display_name, site_permissions, org_permissions, user_permissions, created_at, updated_at, organization_id, id
|
|
FROM
|
|
custom_roles
|
|
WHERE
|
|
true
|
|
-- @lookup_roles will filter for exact (role_name, org_id) pairs
|
|
-- To do this manually in SQL, you can construct an array and cast it:
|
|
-- cast(ARRAY[('customrole','ece79dac-926e-44ca-9790-2ff7c5eb6e0c')] AS name_organization_pair[])
|
|
AND CASE WHEN array_length($1 :: name_organization_pair[], 1) > 0 THEN
|
|
-- Using 'coalesce' to avoid troubles with null literals being an empty string.
|
|
(name, coalesce(organization_id, '00000000-0000-0000-0000-000000000000' ::uuid)) = ANY ($1::name_organization_pair[])
|
|
ELSE true
|
|
END
|
|
-- This allows fetching all roles, or just site wide roles
|
|
AND CASE WHEN $2 :: boolean THEN
|
|
organization_id IS null
|
|
ELSE true
|
|
END
|
|
-- Allows fetching all roles to a particular organization
|
|
AND CASE WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
organization_id = $3
|
|
ELSE true
|
|
END
|
|
`
|
|
|
|
type CustomRolesParams struct {
|
|
LookupRoles []NameOrganizationPair `db:"lookup_roles" json:"lookup_roles"`
|
|
ExcludeOrgRoles bool `db:"exclude_org_roles" json:"exclude_org_roles"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) CustomRoles(ctx context.Context, arg CustomRolesParams) ([]CustomRole, error) {
|
|
rows, err := q.db.QueryContext(ctx, customRoles, pq.Array(arg.LookupRoles), arg.ExcludeOrgRoles, arg.OrganizationID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []CustomRole
|
|
for rows.Next() {
|
|
var i CustomRole
|
|
if err := rows.Scan(
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.SitePermissions,
|
|
&i.OrgPermissions,
|
|
&i.UserPermissions,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const deleteCustomRole = `-- name: DeleteCustomRole :exec
|
|
DELETE FROM
|
|
custom_roles
|
|
WHERE
|
|
name = lower($1)
|
|
AND organization_id = $2
|
|
`
|
|
|
|
type DeleteCustomRoleParams struct {
|
|
Name string `db:"name" json:"name"`
|
|
OrganizationID uuid.NullUUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteCustomRole(ctx context.Context, arg DeleteCustomRoleParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteCustomRole, arg.Name, arg.OrganizationID)
|
|
return err
|
|
}
|
|
|
|
const insertCustomRole = `-- name: InsertCustomRole :one
|
|
INSERT INTO
|
|
custom_roles (
|
|
name,
|
|
display_name,
|
|
organization_id,
|
|
site_permissions,
|
|
org_permissions,
|
|
user_permissions,
|
|
created_at,
|
|
updated_at
|
|
)
|
|
VALUES (
|
|
-- Always force lowercase names
|
|
lower($1),
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
now(),
|
|
now()
|
|
)
|
|
RETURNING name, display_name, site_permissions, org_permissions, user_permissions, created_at, updated_at, organization_id, id
|
|
`
|
|
|
|
type InsertCustomRoleParams struct {
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
OrganizationID uuid.NullUUID `db:"organization_id" json:"organization_id"`
|
|
SitePermissions CustomRolePermissions `db:"site_permissions" json:"site_permissions"`
|
|
OrgPermissions CustomRolePermissions `db:"org_permissions" json:"org_permissions"`
|
|
UserPermissions CustomRolePermissions `db:"user_permissions" json:"user_permissions"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertCustomRole(ctx context.Context, arg InsertCustomRoleParams) (CustomRole, error) {
|
|
row := q.db.QueryRowContext(ctx, insertCustomRole,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.OrganizationID,
|
|
arg.SitePermissions,
|
|
arg.OrgPermissions,
|
|
arg.UserPermissions,
|
|
)
|
|
var i CustomRole
|
|
err := row.Scan(
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.SitePermissions,
|
|
&i.OrgPermissions,
|
|
&i.UserPermissions,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.ID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateCustomRole = `-- name: UpdateCustomRole :one
|
|
UPDATE
|
|
custom_roles
|
|
SET
|
|
display_name = $1,
|
|
site_permissions = $2,
|
|
org_permissions = $3,
|
|
user_permissions = $4,
|
|
updated_at = now()
|
|
WHERE
|
|
name = lower($5)
|
|
AND organization_id = $6
|
|
RETURNING name, display_name, site_permissions, org_permissions, user_permissions, created_at, updated_at, organization_id, id
|
|
`
|
|
|
|
type UpdateCustomRoleParams struct {
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
SitePermissions CustomRolePermissions `db:"site_permissions" json:"site_permissions"`
|
|
OrgPermissions CustomRolePermissions `db:"org_permissions" json:"org_permissions"`
|
|
UserPermissions CustomRolePermissions `db:"user_permissions" json:"user_permissions"`
|
|
Name string `db:"name" json:"name"`
|
|
OrganizationID uuid.NullUUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateCustomRole(ctx context.Context, arg UpdateCustomRoleParams) (CustomRole, error) {
|
|
row := q.db.QueryRowContext(ctx, updateCustomRole,
|
|
arg.DisplayName,
|
|
arg.SitePermissions,
|
|
arg.OrgPermissions,
|
|
arg.UserPermissions,
|
|
arg.Name,
|
|
arg.OrganizationID,
|
|
)
|
|
var i CustomRole
|
|
err := row.Scan(
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.SitePermissions,
|
|
&i.OrgPermissions,
|
|
&i.UserPermissions,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.ID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const deleteRuntimeConfig = `-- name: DeleteRuntimeConfig :exec
|
|
DELETE FROM site_configs
|
|
WHERE site_configs.key = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteRuntimeConfig(ctx context.Context, key string) error {
|
|
_, err := q.db.ExecContext(ctx, deleteRuntimeConfig, key)
|
|
return err
|
|
}
|
|
|
|
const getAnnouncementBanners = `-- name: GetAnnouncementBanners :one
|
|
SELECT value FROM site_configs WHERE key = 'announcement_banners'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAnnouncementBanners(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getAnnouncementBanners)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getAppSecurityKey = `-- name: GetAppSecurityKey :one
|
|
SELECT value FROM site_configs WHERE key = 'app_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAppSecurityKey(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getAppSecurityKey)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getApplicationName = `-- name: GetApplicationName :one
|
|
SELECT value FROM site_configs WHERE key = 'application_name'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetApplicationName(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getApplicationName)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getCoordinatorResumeTokenSigningKey = `-- name: GetCoordinatorResumeTokenSigningKey :one
|
|
SELECT value FROM site_configs WHERE key = 'coordinator_resume_token_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getCoordinatorResumeTokenSigningKey)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getDERPMeshKey = `-- name: GetDERPMeshKey :one
|
|
SELECT value FROM site_configs WHERE key = 'derp_mesh_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetDERPMeshKey(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getDERPMeshKey)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getDefaultProxyConfig = `-- name: GetDefaultProxyConfig :one
|
|
SELECT
|
|
COALESCE((SELECT value FROM site_configs WHERE key = 'default_proxy_display_name'), 'Default') :: text AS display_name,
|
|
COALESCE((SELECT value FROM site_configs WHERE key = 'default_proxy_icon_url'), '/emojis/1f3e1.png') :: text AS icon_url
|
|
`
|
|
|
|
type GetDefaultProxyConfigRow struct {
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
IconUrl string `db:"icon_url" json:"icon_url"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetDefaultProxyConfig(ctx context.Context) (GetDefaultProxyConfigRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getDefaultProxyConfig)
|
|
var i GetDefaultProxyConfigRow
|
|
err := row.Scan(&i.DisplayName, &i.IconUrl)
|
|
return i, err
|
|
}
|
|
|
|
const getDeploymentID = `-- name: GetDeploymentID :one
|
|
SELECT value FROM site_configs WHERE key = 'deployment_id'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetDeploymentID(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getDeploymentID)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getHealthSettings = `-- name: GetHealthSettings :one
|
|
SELECT
|
|
COALESCE((SELECT value FROM site_configs WHERE key = 'health_settings'), '{}') :: text AS health_settings
|
|
`
|
|
|
|
func (q *sqlQuerier) GetHealthSettings(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getHealthSettings)
|
|
var health_settings string
|
|
err := row.Scan(&health_settings)
|
|
return health_settings, err
|
|
}
|
|
|
|
const getLastUpdateCheck = `-- name: GetLastUpdateCheck :one
|
|
SELECT value FROM site_configs WHERE key = 'last_update_check'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLastUpdateCheck(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getLastUpdateCheck)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getLogoURL = `-- name: GetLogoURL :one
|
|
SELECT value FROM site_configs WHERE key = 'logo_url'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLogoURL(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getLogoURL)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getNotificationsSettings = `-- name: GetNotificationsSettings :one
|
|
SELECT
|
|
COALESCE((SELECT value FROM site_configs WHERE key = 'notifications_settings'), '{}') :: text AS notifications_settings
|
|
`
|
|
|
|
func (q *sqlQuerier) GetNotificationsSettings(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getNotificationsSettings)
|
|
var notifications_settings string
|
|
err := row.Scan(¬ifications_settings)
|
|
return notifications_settings, err
|
|
}
|
|
|
|
const getOAuth2GithubDefaultEligible = `-- name: GetOAuth2GithubDefaultEligible :one
|
|
SELECT
|
|
CASE
|
|
WHEN value = 'true' THEN TRUE
|
|
ELSE FALSE
|
|
END
|
|
FROM site_configs
|
|
WHERE key = 'oauth2_github_default_eligible'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2GithubDefaultEligible(ctx context.Context) (bool, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2GithubDefaultEligible)
|
|
var column_1 bool
|
|
err := row.Scan(&column_1)
|
|
return column_1, err
|
|
}
|
|
|
|
const getOAuthSigningKey = `-- name: GetOAuthSigningKey :one
|
|
SELECT value FROM site_configs WHERE key = 'oauth_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuthSigningKey(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuthSigningKey)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getRuntimeConfig = `-- name: GetRuntimeConfig :one
|
|
SELECT value FROM site_configs WHERE site_configs.key = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetRuntimeConfig(ctx context.Context, key string) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getRuntimeConfig, key)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getWebpushVAPIDKeys = `-- name: GetWebpushVAPIDKeys :one
|
|
SELECT
|
|
COALESCE((SELECT value FROM site_configs WHERE key = 'webpush_vapid_public_key'), '') :: text AS vapid_public_key,
|
|
COALESCE((SELECT value FROM site_configs WHERE key = 'webpush_vapid_private_key'), '') :: text AS vapid_private_key
|
|
`
|
|
|
|
type GetWebpushVAPIDKeysRow struct {
|
|
VapidPublicKey string `db:"vapid_public_key" json:"vapid_public_key"`
|
|
VapidPrivateKey string `db:"vapid_private_key" json:"vapid_private_key"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWebpushVAPIDKeys(ctx context.Context) (GetWebpushVAPIDKeysRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getWebpushVAPIDKeys)
|
|
var i GetWebpushVAPIDKeysRow
|
|
err := row.Scan(&i.VapidPublicKey, &i.VapidPrivateKey)
|
|
return i, err
|
|
}
|
|
|
|
const insertDERPMeshKey = `-- name: InsertDERPMeshKey :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('derp_mesh_key', $1)
|
|
`
|
|
|
|
func (q *sqlQuerier) InsertDERPMeshKey(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, insertDERPMeshKey, value)
|
|
return err
|
|
}
|
|
|
|
const insertDeploymentID = `-- name: InsertDeploymentID :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('deployment_id', $1)
|
|
`
|
|
|
|
func (q *sqlQuerier) InsertDeploymentID(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, insertDeploymentID, value)
|
|
return err
|
|
}
|
|
|
|
const upsertAnnouncementBanners = `-- name: UpsertAnnouncementBanners :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('announcement_banners', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'announcement_banners'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertAnnouncementBanners(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertAnnouncementBanners, value)
|
|
return err
|
|
}
|
|
|
|
const upsertAppSecurityKey = `-- name: UpsertAppSecurityKey :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('app_signing_key', $1)
|
|
ON CONFLICT (key) DO UPDATE set value = $1 WHERE site_configs.key = 'app_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertAppSecurityKey(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertAppSecurityKey, value)
|
|
return err
|
|
}
|
|
|
|
const upsertApplicationName = `-- name: UpsertApplicationName :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('application_name', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'application_name'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertApplicationName(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertApplicationName, value)
|
|
return err
|
|
}
|
|
|
|
const upsertCoordinatorResumeTokenSigningKey = `-- name: UpsertCoordinatorResumeTokenSigningKey :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('coordinator_resume_token_signing_key', $1)
|
|
ON CONFLICT (key) DO UPDATE set value = $1 WHERE site_configs.key = 'coordinator_resume_token_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertCoordinatorResumeTokenSigningKey(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertCoordinatorResumeTokenSigningKey, value)
|
|
return err
|
|
}
|
|
|
|
const upsertDefaultProxy = `-- name: UpsertDefaultProxy :exec
|
|
INSERT INTO site_configs (key, value)
|
|
VALUES
|
|
('default_proxy_display_name', $1 :: text),
|
|
('default_proxy_icon_url', $2 :: text)
|
|
ON CONFLICT
|
|
(key)
|
|
DO UPDATE SET value = EXCLUDED.value WHERE site_configs.key = EXCLUDED.key
|
|
`
|
|
|
|
type UpsertDefaultProxyParams struct {
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
IconUrl string `db:"icon_url" json:"icon_url"`
|
|
}
|
|
|
|
// The default proxy is implied and not actually stored in the database.
|
|
// So we need to store it's configuration here for display purposes.
|
|
// The functional values are immutable and controlled implicitly.
|
|
func (q *sqlQuerier) UpsertDefaultProxy(ctx context.Context, arg UpsertDefaultProxyParams) error {
|
|
_, err := q.db.ExecContext(ctx, upsertDefaultProxy, arg.DisplayName, arg.IconUrl)
|
|
return err
|
|
}
|
|
|
|
const upsertHealthSettings = `-- name: UpsertHealthSettings :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('health_settings', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'health_settings'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertHealthSettings(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertHealthSettings, value)
|
|
return err
|
|
}
|
|
|
|
const upsertLastUpdateCheck = `-- name: UpsertLastUpdateCheck :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('last_update_check', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'last_update_check'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertLastUpdateCheck(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertLastUpdateCheck, value)
|
|
return err
|
|
}
|
|
|
|
const upsertLogoURL = `-- name: UpsertLogoURL :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('logo_url', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'logo_url'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertLogoURL(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertLogoURL, value)
|
|
return err
|
|
}
|
|
|
|
const upsertNotificationsSettings = `-- name: UpsertNotificationsSettings :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('notifications_settings', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'notifications_settings'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertNotificationsSettings(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertNotificationsSettings, value)
|
|
return err
|
|
}
|
|
|
|
const upsertOAuth2GithubDefaultEligible = `-- name: UpsertOAuth2GithubDefaultEligible :exec
|
|
INSERT INTO site_configs (key, value)
|
|
VALUES (
|
|
'oauth2_github_default_eligible',
|
|
CASE
|
|
WHEN $1::bool THEN 'true'
|
|
ELSE 'false'
|
|
END
|
|
)
|
|
ON CONFLICT (key) DO UPDATE
|
|
SET value = CASE
|
|
WHEN $1::bool THEN 'true'
|
|
ELSE 'false'
|
|
END
|
|
WHERE site_configs.key = 'oauth2_github_default_eligible'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertOAuth2GithubDefaultEligible(ctx context.Context, eligible bool) error {
|
|
_, err := q.db.ExecContext(ctx, upsertOAuth2GithubDefaultEligible, eligible)
|
|
return err
|
|
}
|
|
|
|
const upsertOAuthSigningKey = `-- name: UpsertOAuthSigningKey :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('oauth_signing_key', $1)
|
|
ON CONFLICT (key) DO UPDATE set value = $1 WHERE site_configs.key = 'oauth_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertOAuthSigningKey(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertOAuthSigningKey, value)
|
|
return err
|
|
}
|
|
|
|
const upsertRuntimeConfig = `-- name: UpsertRuntimeConfig :exec
|
|
INSERT INTO site_configs (key, value) VALUES ($1, $2)
|
|
ON CONFLICT (key) DO UPDATE SET value = $2 WHERE site_configs.key = $1
|
|
`
|
|
|
|
type UpsertRuntimeConfigParams struct {
|
|
Key string `db:"key" json:"key"`
|
|
Value string `db:"value" json:"value"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertRuntimeConfig(ctx context.Context, arg UpsertRuntimeConfigParams) error {
|
|
_, err := q.db.ExecContext(ctx, upsertRuntimeConfig, arg.Key, arg.Value)
|
|
return err
|
|
}
|
|
|
|
const upsertWebpushVAPIDKeys = `-- name: UpsertWebpushVAPIDKeys :exec
|
|
INSERT INTO site_configs (key, value)
|
|
VALUES
|
|
('webpush_vapid_public_key', $1 :: text),
|
|
('webpush_vapid_private_key', $2 :: text)
|
|
ON CONFLICT (key)
|
|
DO UPDATE SET value = EXCLUDED.value WHERE site_configs.key = EXCLUDED.key
|
|
`
|
|
|
|
type UpsertWebpushVAPIDKeysParams struct {
|
|
VapidPublicKey string `db:"vapid_public_key" json:"vapid_public_key"`
|
|
VapidPrivateKey string `db:"vapid_private_key" json:"vapid_private_key"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertWebpushVAPIDKeys(ctx context.Context, arg UpsertWebpushVAPIDKeysParams) error {
|
|
_, err := q.db.ExecContext(ctx, upsertWebpushVAPIDKeys, arg.VapidPublicKey, arg.VapidPrivateKey)
|
|
return err
|
|
}
|
|
|
|
const cleanTailnetCoordinators = `-- name: CleanTailnetCoordinators :exec
|
|
DELETE
|
|
FROM tailnet_coordinators
|
|
WHERE heartbeat_at < now() - INTERVAL '24 HOURS'
|
|
`
|
|
|
|
func (q *sqlQuerier) CleanTailnetCoordinators(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, cleanTailnetCoordinators)
|
|
return err
|
|
}
|
|
|
|
const cleanTailnetLostPeers = `-- name: CleanTailnetLostPeers :exec
|
|
DELETE
|
|
FROM tailnet_peers
|
|
WHERE updated_at < now() - INTERVAL '24 HOURS' AND status = 'lost'::tailnet_status
|
|
`
|
|
|
|
func (q *sqlQuerier) CleanTailnetLostPeers(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, cleanTailnetLostPeers)
|
|
return err
|
|
}
|
|
|
|
const cleanTailnetTunnels = `-- name: CleanTailnetTunnels :exec
|
|
DELETE FROM tailnet_tunnels
|
|
WHERE updated_at < now() - INTERVAL '24 HOURS' AND
|
|
NOT EXISTS (
|
|
SELECT 1 FROM tailnet_peers
|
|
WHERE id = tailnet_tunnels.src_id AND coordinator_id = tailnet_tunnels.coordinator_id
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) CleanTailnetTunnels(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, cleanTailnetTunnels)
|
|
return err
|
|
}
|
|
|
|
const deleteAllTailnetClientSubscriptions = `-- name: DeleteAllTailnetClientSubscriptions :exec
|
|
DELETE
|
|
FROM tailnet_client_subscriptions
|
|
WHERE client_id = $1 and coordinator_id = $2
|
|
`
|
|
|
|
type DeleteAllTailnetClientSubscriptionsParams struct {
|
|
ClientID uuid.UUID `db:"client_id" json:"client_id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteAllTailnetClientSubscriptions(ctx context.Context, arg DeleteAllTailnetClientSubscriptionsParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteAllTailnetClientSubscriptions, arg.ClientID, arg.CoordinatorID)
|
|
return err
|
|
}
|
|
|
|
const deleteAllTailnetTunnels = `-- name: DeleteAllTailnetTunnels :exec
|
|
DELETE
|
|
FROM tailnet_tunnels
|
|
WHERE coordinator_id = $1 and src_id = $2
|
|
`
|
|
|
|
type DeleteAllTailnetTunnelsParams struct {
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
SrcID uuid.UUID `db:"src_id" json:"src_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteAllTailnetTunnels(ctx context.Context, arg DeleteAllTailnetTunnelsParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteAllTailnetTunnels, arg.CoordinatorID, arg.SrcID)
|
|
return err
|
|
}
|
|
|
|
const deleteCoordinator = `-- name: DeleteCoordinator :exec
|
|
DELETE
|
|
FROM tailnet_coordinators
|
|
WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteCoordinator(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteCoordinator, id)
|
|
return err
|
|
}
|
|
|
|
const deleteTailnetAgent = `-- name: DeleteTailnetAgent :one
|
|
DELETE
|
|
FROM tailnet_agents
|
|
WHERE id = $1 and coordinator_id = $2
|
|
RETURNING id, coordinator_id
|
|
`
|
|
|
|
type DeleteTailnetAgentParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
type DeleteTailnetAgentRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteTailnetAgent(ctx context.Context, arg DeleteTailnetAgentParams) (DeleteTailnetAgentRow, error) {
|
|
row := q.db.QueryRowContext(ctx, deleteTailnetAgent, arg.ID, arg.CoordinatorID)
|
|
var i DeleteTailnetAgentRow
|
|
err := row.Scan(&i.ID, &i.CoordinatorID)
|
|
return i, err
|
|
}
|
|
|
|
const deleteTailnetClient = `-- name: DeleteTailnetClient :one
|
|
DELETE
|
|
FROM tailnet_clients
|
|
WHERE id = $1 and coordinator_id = $2
|
|
RETURNING id, coordinator_id
|
|
`
|
|
|
|
type DeleteTailnetClientParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
type DeleteTailnetClientRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteTailnetClient(ctx context.Context, arg DeleteTailnetClientParams) (DeleteTailnetClientRow, error) {
|
|
row := q.db.QueryRowContext(ctx, deleteTailnetClient, arg.ID, arg.CoordinatorID)
|
|
var i DeleteTailnetClientRow
|
|
err := row.Scan(&i.ID, &i.CoordinatorID)
|
|
return i, err
|
|
}
|
|
|
|
const deleteTailnetClientSubscription = `-- name: DeleteTailnetClientSubscription :exec
|
|
DELETE
|
|
FROM tailnet_client_subscriptions
|
|
WHERE client_id = $1 and agent_id = $2 and coordinator_id = $3
|
|
`
|
|
|
|
type DeleteTailnetClientSubscriptionParams struct {
|
|
ClientID uuid.UUID `db:"client_id" json:"client_id"`
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteTailnetClientSubscription(ctx context.Context, arg DeleteTailnetClientSubscriptionParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteTailnetClientSubscription, arg.ClientID, arg.AgentID, arg.CoordinatorID)
|
|
return err
|
|
}
|
|
|
|
const deleteTailnetPeer = `-- name: DeleteTailnetPeer :one
|
|
DELETE
|
|
FROM tailnet_peers
|
|
WHERE id = $1 and coordinator_id = $2
|
|
RETURNING id, coordinator_id
|
|
`
|
|
|
|
type DeleteTailnetPeerParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
type DeleteTailnetPeerRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteTailnetPeer(ctx context.Context, arg DeleteTailnetPeerParams) (DeleteTailnetPeerRow, error) {
|
|
row := q.db.QueryRowContext(ctx, deleteTailnetPeer, arg.ID, arg.CoordinatorID)
|
|
var i DeleteTailnetPeerRow
|
|
err := row.Scan(&i.ID, &i.CoordinatorID)
|
|
return i, err
|
|
}
|
|
|
|
const deleteTailnetTunnel = `-- name: DeleteTailnetTunnel :one
|
|
DELETE
|
|
FROM tailnet_tunnels
|
|
WHERE coordinator_id = $1 and src_id = $2 and dst_id = $3
|
|
RETURNING coordinator_id, src_id, dst_id
|
|
`
|
|
|
|
type DeleteTailnetTunnelParams struct {
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
SrcID uuid.UUID `db:"src_id" json:"src_id"`
|
|
DstID uuid.UUID `db:"dst_id" json:"dst_id"`
|
|
}
|
|
|
|
type DeleteTailnetTunnelRow struct {
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
SrcID uuid.UUID `db:"src_id" json:"src_id"`
|
|
DstID uuid.UUID `db:"dst_id" json:"dst_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteTailnetTunnel(ctx context.Context, arg DeleteTailnetTunnelParams) (DeleteTailnetTunnelRow, error) {
|
|
row := q.db.QueryRowContext(ctx, deleteTailnetTunnel, arg.CoordinatorID, arg.SrcID, arg.DstID)
|
|
var i DeleteTailnetTunnelRow
|
|
err := row.Scan(&i.CoordinatorID, &i.SrcID, &i.DstID)
|
|
return i, err
|
|
}
|
|
|
|
const getAllTailnetAgents = `-- name: GetAllTailnetAgents :many
|
|
SELECT id, coordinator_id, updated_at, node
|
|
FROM tailnet_agents
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAllTailnetAgents(ctx context.Context) ([]TailnetAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAllTailnetAgents)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetAgent
|
|
for rows.Next() {
|
|
var i TailnetAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getAllTailnetCoordinators = `-- name: GetAllTailnetCoordinators :many
|
|
|
|
SELECT id, heartbeat_at FROM tailnet_coordinators
|
|
`
|
|
|
|
// For PG Coordinator HTMLDebug
|
|
func (q *sqlQuerier) GetAllTailnetCoordinators(ctx context.Context) ([]TailnetCoordinator, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAllTailnetCoordinators)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetCoordinator
|
|
for rows.Next() {
|
|
var i TailnetCoordinator
|
|
if err := rows.Scan(&i.ID, &i.HeartbeatAt); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getAllTailnetPeers = `-- name: GetAllTailnetPeers :many
|
|
SELECT id, coordinator_id, updated_at, node, status FROM tailnet_peers
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAllTailnetPeers(ctx context.Context) ([]TailnetPeer, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAllTailnetPeers)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetPeer
|
|
for rows.Next() {
|
|
var i TailnetPeer
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
&i.Status,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getAllTailnetTunnels = `-- name: GetAllTailnetTunnels :many
|
|
SELECT coordinator_id, src_id, dst_id, updated_at FROM tailnet_tunnels
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAllTailnetTunnels(ctx context.Context) ([]TailnetTunnel, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAllTailnetTunnels)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetTunnel
|
|
for rows.Next() {
|
|
var i TailnetTunnel
|
|
if err := rows.Scan(
|
|
&i.CoordinatorID,
|
|
&i.SrcID,
|
|
&i.DstID,
|
|
&i.UpdatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTailnetAgents = `-- name: GetTailnetAgents :many
|
|
SELECT id, coordinator_id, updated_at, node
|
|
FROM tailnet_agents
|
|
WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTailnetAgents(ctx context.Context, id uuid.UUID) ([]TailnetAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTailnetAgents, id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetAgent
|
|
for rows.Next() {
|
|
var i TailnetAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTailnetClientsForAgent = `-- name: GetTailnetClientsForAgent :many
|
|
SELECT id, coordinator_id, updated_at, node
|
|
FROM tailnet_clients
|
|
WHERE id IN (
|
|
SELECT tailnet_client_subscriptions.client_id
|
|
FROM tailnet_client_subscriptions
|
|
WHERE tailnet_client_subscriptions.agent_id = $1
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTailnetClientsForAgent(ctx context.Context, agentID uuid.UUID) ([]TailnetClient, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTailnetClientsForAgent, agentID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetClient
|
|
for rows.Next() {
|
|
var i TailnetClient
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTailnetPeers = `-- name: GetTailnetPeers :many
|
|
SELECT id, coordinator_id, updated_at, node, status FROM tailnet_peers WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTailnetPeers(ctx context.Context, id uuid.UUID) ([]TailnetPeer, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTailnetPeers, id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetPeer
|
|
for rows.Next() {
|
|
var i TailnetPeer
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
&i.Status,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTailnetTunnelPeerBindings = `-- name: GetTailnetTunnelPeerBindings :many
|
|
SELECT tailnet_tunnels.dst_id as peer_id, tailnet_peers.coordinator_id, tailnet_peers.updated_at, tailnet_peers.node, tailnet_peers.status
|
|
FROM tailnet_tunnels
|
|
INNER JOIN tailnet_peers ON tailnet_tunnels.dst_id = tailnet_peers.id
|
|
WHERE tailnet_tunnels.src_id = $1
|
|
UNION
|
|
SELECT tailnet_tunnels.src_id as peer_id, tailnet_peers.coordinator_id, tailnet_peers.updated_at, tailnet_peers.node, tailnet_peers.status
|
|
FROM tailnet_tunnels
|
|
INNER JOIN tailnet_peers ON tailnet_tunnels.src_id = tailnet_peers.id
|
|
WHERE tailnet_tunnels.dst_id = $1
|
|
`
|
|
|
|
type GetTailnetTunnelPeerBindingsRow struct {
|
|
PeerID uuid.UUID `db:"peer_id" json:"peer_id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Node []byte `db:"node" json:"node"`
|
|
Status TailnetStatus `db:"status" json:"status"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTailnetTunnelPeerBindings(ctx context.Context, srcID uuid.UUID) ([]GetTailnetTunnelPeerBindingsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTailnetTunnelPeerBindings, srcID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTailnetTunnelPeerBindingsRow
|
|
for rows.Next() {
|
|
var i GetTailnetTunnelPeerBindingsRow
|
|
if err := rows.Scan(
|
|
&i.PeerID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
&i.Status,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTailnetTunnelPeerIDs = `-- name: GetTailnetTunnelPeerIDs :many
|
|
SELECT dst_id as peer_id, coordinator_id, updated_at
|
|
FROM tailnet_tunnels
|
|
WHERE tailnet_tunnels.src_id = $1
|
|
UNION
|
|
SELECT src_id as peer_id, coordinator_id, updated_at
|
|
FROM tailnet_tunnels
|
|
WHERE tailnet_tunnels.dst_id = $1
|
|
`
|
|
|
|
type GetTailnetTunnelPeerIDsRow struct {
|
|
PeerID uuid.UUID `db:"peer_id" json:"peer_id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTailnetTunnelPeerIDs(ctx context.Context, srcID uuid.UUID) ([]GetTailnetTunnelPeerIDsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTailnetTunnelPeerIDs, srcID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTailnetTunnelPeerIDsRow
|
|
for rows.Next() {
|
|
var i GetTailnetTunnelPeerIDsRow
|
|
if err := rows.Scan(&i.PeerID, &i.CoordinatorID, &i.UpdatedAt); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateTailnetPeerStatusByCoordinator = `-- name: UpdateTailnetPeerStatusByCoordinator :exec
|
|
UPDATE
|
|
tailnet_peers
|
|
SET
|
|
status = $2
|
|
WHERE
|
|
coordinator_id = $1
|
|
`
|
|
|
|
type UpdateTailnetPeerStatusByCoordinatorParams struct {
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
Status TailnetStatus `db:"status" json:"status"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTailnetPeerStatusByCoordinator(ctx context.Context, arg UpdateTailnetPeerStatusByCoordinatorParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTailnetPeerStatusByCoordinator, arg.CoordinatorID, arg.Status)
|
|
return err
|
|
}
|
|
|
|
const upsertTailnetAgent = `-- name: UpsertTailnetAgent :one
|
|
INSERT INTO
|
|
tailnet_agents (
|
|
id,
|
|
coordinator_id,
|
|
node,
|
|
updated_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, now() at time zone 'utc')
|
|
ON CONFLICT (id, coordinator_id)
|
|
DO UPDATE SET
|
|
id = $1,
|
|
coordinator_id = $2,
|
|
node = $3,
|
|
updated_at = now() at time zone 'utc'
|
|
RETURNING id, coordinator_id, updated_at, node
|
|
`
|
|
|
|
type UpsertTailnetAgentParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
Node json.RawMessage `db:"node" json:"node"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertTailnetAgent(ctx context.Context, arg UpsertTailnetAgentParams) (TailnetAgent, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertTailnetAgent, arg.ID, arg.CoordinatorID, arg.Node)
|
|
var i TailnetAgent
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const upsertTailnetClient = `-- name: UpsertTailnetClient :one
|
|
INSERT INTO
|
|
tailnet_clients (
|
|
id,
|
|
coordinator_id,
|
|
node,
|
|
updated_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, now() at time zone 'utc')
|
|
ON CONFLICT (id, coordinator_id)
|
|
DO UPDATE SET
|
|
id = $1,
|
|
coordinator_id = $2,
|
|
node = $3,
|
|
updated_at = now() at time zone 'utc'
|
|
RETURNING id, coordinator_id, updated_at, node
|
|
`
|
|
|
|
type UpsertTailnetClientParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
Node json.RawMessage `db:"node" json:"node"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertTailnetClient(ctx context.Context, arg UpsertTailnetClientParams) (TailnetClient, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertTailnetClient, arg.ID, arg.CoordinatorID, arg.Node)
|
|
var i TailnetClient
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const upsertTailnetClientSubscription = `-- name: UpsertTailnetClientSubscription :exec
|
|
INSERT INTO
|
|
tailnet_client_subscriptions (
|
|
client_id,
|
|
coordinator_id,
|
|
agent_id,
|
|
updated_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, now() at time zone 'utc')
|
|
ON CONFLICT (client_id, coordinator_id, agent_id)
|
|
DO UPDATE SET
|
|
client_id = $1,
|
|
coordinator_id = $2,
|
|
agent_id = $3,
|
|
updated_at = now() at time zone 'utc'
|
|
`
|
|
|
|
type UpsertTailnetClientSubscriptionParams struct {
|
|
ClientID uuid.UUID `db:"client_id" json:"client_id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertTailnetClientSubscription(ctx context.Context, arg UpsertTailnetClientSubscriptionParams) error {
|
|
_, err := q.db.ExecContext(ctx, upsertTailnetClientSubscription, arg.ClientID, arg.CoordinatorID, arg.AgentID)
|
|
return err
|
|
}
|
|
|
|
const upsertTailnetCoordinator = `-- name: UpsertTailnetCoordinator :one
|
|
INSERT INTO
|
|
tailnet_coordinators (
|
|
id,
|
|
heartbeat_at
|
|
)
|
|
VALUES
|
|
($1, now() at time zone 'utc')
|
|
ON CONFLICT (id)
|
|
DO UPDATE SET
|
|
id = $1,
|
|
heartbeat_at = now() at time zone 'utc'
|
|
RETURNING id, heartbeat_at
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertTailnetCoordinator(ctx context.Context, id uuid.UUID) (TailnetCoordinator, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertTailnetCoordinator, id)
|
|
var i TailnetCoordinator
|
|
err := row.Scan(&i.ID, &i.HeartbeatAt)
|
|
return i, err
|
|
}
|
|
|
|
const upsertTailnetPeer = `-- name: UpsertTailnetPeer :one
|
|
INSERT INTO
|
|
tailnet_peers (
|
|
id,
|
|
coordinator_id,
|
|
node,
|
|
status,
|
|
updated_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, now() at time zone 'utc')
|
|
ON CONFLICT (id, coordinator_id)
|
|
DO UPDATE SET
|
|
id = $1,
|
|
coordinator_id = $2,
|
|
node = $3,
|
|
status = $4,
|
|
updated_at = now() at time zone 'utc'
|
|
RETURNING id, coordinator_id, updated_at, node, status
|
|
`
|
|
|
|
type UpsertTailnetPeerParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
Node []byte `db:"node" json:"node"`
|
|
Status TailnetStatus `db:"status" json:"status"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertTailnetPeer(ctx context.Context, arg UpsertTailnetPeerParams) (TailnetPeer, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertTailnetPeer,
|
|
arg.ID,
|
|
arg.CoordinatorID,
|
|
arg.Node,
|
|
arg.Status,
|
|
)
|
|
var i TailnetPeer
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
&i.Status,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const upsertTailnetTunnel = `-- name: UpsertTailnetTunnel :one
|
|
INSERT INTO
|
|
tailnet_tunnels (
|
|
coordinator_id,
|
|
src_id,
|
|
dst_id,
|
|
updated_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, now() at time zone 'utc')
|
|
ON CONFLICT (coordinator_id, src_id, dst_id)
|
|
DO UPDATE SET
|
|
coordinator_id = $1,
|
|
src_id = $2,
|
|
dst_id = $3,
|
|
updated_at = now() at time zone 'utc'
|
|
RETURNING coordinator_id, src_id, dst_id, updated_at
|
|
`
|
|
|
|
type UpsertTailnetTunnelParams struct {
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
SrcID uuid.UUID `db:"src_id" json:"src_id"`
|
|
DstID uuid.UUID `db:"dst_id" json:"dst_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertTailnetTunnel(ctx context.Context, arg UpsertTailnetTunnelParams) (TailnetTunnel, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertTailnetTunnel, arg.CoordinatorID, arg.SrcID, arg.DstID)
|
|
var i TailnetTunnel
|
|
err := row.Scan(
|
|
&i.CoordinatorID,
|
|
&i.SrcID,
|
|
&i.DstID,
|
|
&i.UpdatedAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTelemetryItem = `-- name: GetTelemetryItem :one
|
|
SELECT key, value, created_at, updated_at FROM telemetry_items WHERE key = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTelemetryItem(ctx context.Context, key string) (TelemetryItem, error) {
|
|
row := q.db.QueryRowContext(ctx, getTelemetryItem, key)
|
|
var i TelemetryItem
|
|
err := row.Scan(
|
|
&i.Key,
|
|
&i.Value,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTelemetryItems = `-- name: GetTelemetryItems :many
|
|
SELECT key, value, created_at, updated_at FROM telemetry_items
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTelemetryItems(ctx context.Context) ([]TelemetryItem, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTelemetryItems)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TelemetryItem
|
|
for rows.Next() {
|
|
var i TelemetryItem
|
|
if err := rows.Scan(
|
|
&i.Key,
|
|
&i.Value,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertTelemetryItemIfNotExists = `-- name: InsertTelemetryItemIfNotExists :exec
|
|
INSERT INTO telemetry_items (key, value)
|
|
VALUES ($1, $2)
|
|
ON CONFLICT (key) DO NOTHING
|
|
`
|
|
|
|
type InsertTelemetryItemIfNotExistsParams struct {
|
|
Key string `db:"key" json:"key"`
|
|
Value string `db:"value" json:"value"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTelemetryItemIfNotExists(ctx context.Context, arg InsertTelemetryItemIfNotExistsParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertTelemetryItemIfNotExists, arg.Key, arg.Value)
|
|
return err
|
|
}
|
|
|
|
const upsertTelemetryItem = `-- name: UpsertTelemetryItem :exec
|
|
INSERT INTO telemetry_items (key, value)
|
|
VALUES ($1, $2)
|
|
ON CONFLICT (key) DO UPDATE SET value = $2, updated_at = NOW() WHERE telemetry_items.key = $1
|
|
`
|
|
|
|
type UpsertTelemetryItemParams struct {
|
|
Key string `db:"key" json:"key"`
|
|
Value string `db:"value" json:"value"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertTelemetryItem(ctx context.Context, arg UpsertTelemetryItemParams) error {
|
|
_, err := q.db.ExecContext(ctx, upsertTelemetryItem, arg.Key, arg.Value)
|
|
return err
|
|
}
|
|
|
|
const getTemplateAverageBuildTime = `-- name: GetTemplateAverageBuildTime :one
|
|
WITH build_times AS (
|
|
SELECT
|
|
EXTRACT(EPOCH FROM (pj.completed_at - pj.started_at))::FLOAT AS exec_time_sec,
|
|
workspace_builds.transition
|
|
FROM
|
|
workspace_builds
|
|
JOIN template_versions ON
|
|
workspace_builds.template_version_id = template_versions.id
|
|
JOIN provisioner_jobs pj ON
|
|
workspace_builds.job_id = pj.id
|
|
WHERE
|
|
template_versions.template_id = $1 AND
|
|
(pj.completed_at IS NOT NULL) AND (pj.started_at IS NOT NULL) AND
|
|
(pj.started_at > $2) AND
|
|
(pj.canceled_at IS NULL) AND
|
|
((pj.error IS NULL) OR (pj.error = ''))
|
|
ORDER BY
|
|
workspace_builds.created_at DESC
|
|
)
|
|
SELECT
|
|
-- Postgres offers no clear way to DRY this short of a function or other
|
|
-- complexities.
|
|
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'start')), -1)::FLOAT AS start_50,
|
|
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'stop')), -1)::FLOAT AS stop_50,
|
|
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'delete')), -1)::FLOAT AS delete_50,
|
|
coalesce((PERCENTILE_DISC(0.95) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'start')), -1)::FLOAT AS start_95,
|
|
coalesce((PERCENTILE_DISC(0.95) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'stop')), -1)::FLOAT AS stop_95,
|
|
coalesce((PERCENTILE_DISC(0.95) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'delete')), -1)::FLOAT AS delete_95
|
|
FROM build_times
|
|
`
|
|
|
|
type GetTemplateAverageBuildTimeParams struct {
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
StartTime sql.NullTime `db:"start_time" json:"start_time"`
|
|
}
|
|
|
|
type GetTemplateAverageBuildTimeRow struct {
|
|
Start50 float64 `db:"start_50" json:"start_50"`
|
|
Stop50 float64 `db:"stop_50" json:"stop_50"`
|
|
Delete50 float64 `db:"delete_50" json:"delete_50"`
|
|
Start95 float64 `db:"start_95" json:"start_95"`
|
|
Stop95 float64 `db:"stop_95" json:"stop_95"`
|
|
Delete95 float64 `db:"delete_95" json:"delete_95"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateAverageBuildTime(ctx context.Context, arg GetTemplateAverageBuildTimeParams) (GetTemplateAverageBuildTimeRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateAverageBuildTime, arg.TemplateID, arg.StartTime)
|
|
var i GetTemplateAverageBuildTimeRow
|
|
err := row.Scan(
|
|
&i.Start50,
|
|
&i.Stop50,
|
|
&i.Delete50,
|
|
&i.Start95,
|
|
&i.Stop95,
|
|
&i.Delete95,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateByID = `-- name: GetTemplateByID :one
|
|
SELECT
|
|
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow, created_by_avatar_url, created_by_username, created_by_name, organization_name, organization_display_name, organization_icon
|
|
FROM
|
|
template_with_names
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateByID(ctx context.Context, id uuid.UUID) (Template, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateByID, id)
|
|
var i Template
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.Provisioner,
|
|
&i.ActiveVersionID,
|
|
&i.Description,
|
|
&i.DefaultTTL,
|
|
&i.CreatedBy,
|
|
&i.Icon,
|
|
&i.UserACL,
|
|
&i.GroupACL,
|
|
&i.DisplayName,
|
|
&i.AllowUserCancelWorkspaceJobs,
|
|
&i.AllowUserAutostart,
|
|
&i.AllowUserAutostop,
|
|
&i.FailureTTL,
|
|
&i.TimeTilDormant,
|
|
&i.TimeTilDormantAutoDelete,
|
|
&i.AutostopRequirementDaysOfWeek,
|
|
&i.AutostopRequirementWeeks,
|
|
&i.AutostartBlockDaysOfWeek,
|
|
&i.RequireActiveVersion,
|
|
&i.Deprecated,
|
|
&i.ActivityBump,
|
|
&i.MaxPortSharingLevel,
|
|
&i.UseClassicParameterFlow,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.CreatedByName,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateByOrganizationAndName = `-- name: GetTemplateByOrganizationAndName :one
|
|
SELECT
|
|
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow, created_by_avatar_url, created_by_username, created_by_name, organization_name, organization_display_name, organization_icon
|
|
FROM
|
|
template_with_names AS templates
|
|
WHERE
|
|
organization_id = $1
|
|
AND deleted = $2
|
|
AND LOWER("name") = LOWER($3)
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetTemplateByOrganizationAndNameParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateByOrganizationAndName(ctx context.Context, arg GetTemplateByOrganizationAndNameParams) (Template, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateByOrganizationAndName, arg.OrganizationID, arg.Deleted, arg.Name)
|
|
var i Template
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.Provisioner,
|
|
&i.ActiveVersionID,
|
|
&i.Description,
|
|
&i.DefaultTTL,
|
|
&i.CreatedBy,
|
|
&i.Icon,
|
|
&i.UserACL,
|
|
&i.GroupACL,
|
|
&i.DisplayName,
|
|
&i.AllowUserCancelWorkspaceJobs,
|
|
&i.AllowUserAutostart,
|
|
&i.AllowUserAutostop,
|
|
&i.FailureTTL,
|
|
&i.TimeTilDormant,
|
|
&i.TimeTilDormantAutoDelete,
|
|
&i.AutostopRequirementDaysOfWeek,
|
|
&i.AutostopRequirementWeeks,
|
|
&i.AutostartBlockDaysOfWeek,
|
|
&i.RequireActiveVersion,
|
|
&i.Deprecated,
|
|
&i.ActivityBump,
|
|
&i.MaxPortSharingLevel,
|
|
&i.UseClassicParameterFlow,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.CreatedByName,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplates = `-- name: GetTemplates :many
|
|
SELECT id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow, created_by_avatar_url, created_by_username, created_by_name, organization_name, organization_display_name, organization_icon FROM template_with_names AS templates
|
|
ORDER BY (name, id) ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplates(ctx context.Context) ([]Template, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplates)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Template
|
|
for rows.Next() {
|
|
var i Template
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.Provisioner,
|
|
&i.ActiveVersionID,
|
|
&i.Description,
|
|
&i.DefaultTTL,
|
|
&i.CreatedBy,
|
|
&i.Icon,
|
|
&i.UserACL,
|
|
&i.GroupACL,
|
|
&i.DisplayName,
|
|
&i.AllowUserCancelWorkspaceJobs,
|
|
&i.AllowUserAutostart,
|
|
&i.AllowUserAutostop,
|
|
&i.FailureTTL,
|
|
&i.TimeTilDormant,
|
|
&i.TimeTilDormantAutoDelete,
|
|
&i.AutostopRequirementDaysOfWeek,
|
|
&i.AutostopRequirementWeeks,
|
|
&i.AutostartBlockDaysOfWeek,
|
|
&i.RequireActiveVersion,
|
|
&i.Deprecated,
|
|
&i.ActivityBump,
|
|
&i.MaxPortSharingLevel,
|
|
&i.UseClassicParameterFlow,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.CreatedByName,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplatesWithFilter = `-- name: GetTemplatesWithFilter :many
|
|
SELECT
|
|
t.id, t.created_at, t.updated_at, t.organization_id, t.deleted, t.name, t.provisioner, t.active_version_id, t.description, t.default_ttl, t.created_by, t.icon, t.user_acl, t.group_acl, t.display_name, t.allow_user_cancel_workspace_jobs, t.allow_user_autostart, t.allow_user_autostop, t.failure_ttl, t.time_til_dormant, t.time_til_dormant_autodelete, t.autostop_requirement_days_of_week, t.autostop_requirement_weeks, t.autostart_block_days_of_week, t.require_active_version, t.deprecated, t.activity_bump, t.max_port_sharing_level, t.use_classic_parameter_flow, t.created_by_avatar_url, t.created_by_username, t.created_by_name, t.organization_name, t.organization_display_name, t.organization_icon
|
|
FROM
|
|
template_with_names AS t
|
|
LEFT JOIN
|
|
template_versions tv ON t.active_version_id = tv.id
|
|
WHERE
|
|
-- Optionally include deleted templates
|
|
t.deleted = $1
|
|
-- Filter by organization_id
|
|
AND CASE
|
|
WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
t.organization_id = $2
|
|
ELSE true
|
|
END
|
|
-- Filter by exact name
|
|
AND CASE
|
|
WHEN $3 :: text != '' THEN
|
|
LOWER(t.name) = LOWER($3)
|
|
ELSE true
|
|
END
|
|
-- Filter by name, matching on substring
|
|
AND CASE
|
|
WHEN $4 :: text != '' THEN
|
|
lower(t.name) ILIKE '%' || lower($4) || '%'
|
|
ELSE true
|
|
END
|
|
-- Filter by ids
|
|
AND CASE
|
|
WHEN array_length($5 :: uuid[], 1) > 0 THEN
|
|
t.id = ANY($5)
|
|
ELSE true
|
|
END
|
|
-- Filter by deprecated
|
|
AND CASE
|
|
WHEN $6 :: boolean IS NOT NULL THEN
|
|
CASE
|
|
WHEN $6 :: boolean THEN
|
|
t.deprecated != ''
|
|
ELSE
|
|
t.deprecated = ''
|
|
END
|
|
ELSE true
|
|
END
|
|
-- Filter by has_ai_task in latest version
|
|
AND CASE
|
|
WHEN $7 :: boolean IS NOT NULL THEN
|
|
tv.has_ai_task = $7 :: boolean
|
|
ELSE true
|
|
END
|
|
-- Authorize Filter clause will be injected below in GetAuthorizedTemplates
|
|
-- @authorize_filter
|
|
ORDER BY (t.name, t.id) ASC
|
|
`
|
|
|
|
type GetTemplatesWithFilterParams struct {
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
ExactName string `db:"exact_name" json:"exact_name"`
|
|
FuzzyName string `db:"fuzzy_name" json:"fuzzy_name"`
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
Deprecated sql.NullBool `db:"deprecated" json:"deprecated"`
|
|
HasAITask sql.NullBool `db:"has_ai_task" json:"has_ai_task"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplatesWithFilter(ctx context.Context, arg GetTemplatesWithFilterParams) ([]Template, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplatesWithFilter,
|
|
arg.Deleted,
|
|
arg.OrganizationID,
|
|
arg.ExactName,
|
|
arg.FuzzyName,
|
|
pq.Array(arg.IDs),
|
|
arg.Deprecated,
|
|
arg.HasAITask,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Template
|
|
for rows.Next() {
|
|
var i Template
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.Provisioner,
|
|
&i.ActiveVersionID,
|
|
&i.Description,
|
|
&i.DefaultTTL,
|
|
&i.CreatedBy,
|
|
&i.Icon,
|
|
&i.UserACL,
|
|
&i.GroupACL,
|
|
&i.DisplayName,
|
|
&i.AllowUserCancelWorkspaceJobs,
|
|
&i.AllowUserAutostart,
|
|
&i.AllowUserAutostop,
|
|
&i.FailureTTL,
|
|
&i.TimeTilDormant,
|
|
&i.TimeTilDormantAutoDelete,
|
|
&i.AutostopRequirementDaysOfWeek,
|
|
&i.AutostopRequirementWeeks,
|
|
&i.AutostartBlockDaysOfWeek,
|
|
&i.RequireActiveVersion,
|
|
&i.Deprecated,
|
|
&i.ActivityBump,
|
|
&i.MaxPortSharingLevel,
|
|
&i.UseClassicParameterFlow,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.CreatedByName,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertTemplate = `-- name: InsertTemplate :exec
|
|
INSERT INTO
|
|
templates (
|
|
id,
|
|
created_at,
|
|
updated_at,
|
|
organization_id,
|
|
"name",
|
|
provisioner,
|
|
active_version_id,
|
|
description,
|
|
created_by,
|
|
icon,
|
|
user_acl,
|
|
group_acl,
|
|
display_name,
|
|
allow_user_cancel_workspace_jobs,
|
|
max_port_sharing_level
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
|
|
`
|
|
|
|
type InsertTemplateParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Name string `db:"name" json:"name"`
|
|
Provisioner ProvisionerType `db:"provisioner" json:"provisioner"`
|
|
ActiveVersionID uuid.UUID `db:"active_version_id" json:"active_version_id"`
|
|
Description string `db:"description" json:"description"`
|
|
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
UserACL TemplateACL `db:"user_acl" json:"user_acl"`
|
|
GroupACL TemplateACL `db:"group_acl" json:"group_acl"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
AllowUserCancelWorkspaceJobs bool `db:"allow_user_cancel_workspace_jobs" json:"allow_user_cancel_workspace_jobs"`
|
|
MaxPortSharingLevel AppSharingLevel `db:"max_port_sharing_level" json:"max_port_sharing_level"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTemplate(ctx context.Context, arg InsertTemplateParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertTemplate,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.OrganizationID,
|
|
arg.Name,
|
|
arg.Provisioner,
|
|
arg.ActiveVersionID,
|
|
arg.Description,
|
|
arg.CreatedBy,
|
|
arg.Icon,
|
|
arg.UserACL,
|
|
arg.GroupACL,
|
|
arg.DisplayName,
|
|
arg.AllowUserCancelWorkspaceJobs,
|
|
arg.MaxPortSharingLevel,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateACLByID = `-- name: UpdateTemplateACLByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
group_acl = $1,
|
|
user_acl = $2
|
|
WHERE
|
|
id = $3
|
|
`
|
|
|
|
type UpdateTemplateACLByIDParams struct {
|
|
GroupACL TemplateACL `db:"group_acl" json:"group_acl"`
|
|
UserACL TemplateACL `db:"user_acl" json:"user_acl"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateACLByID(ctx context.Context, arg UpdateTemplateACLByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateACLByID, arg.GroupACL, arg.UserACL, arg.ID)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateAccessControlByID = `-- name: UpdateTemplateAccessControlByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
require_active_version = $2,
|
|
deprecated = $3
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateAccessControlByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
RequireActiveVersion bool `db:"require_active_version" json:"require_active_version"`
|
|
Deprecated string `db:"deprecated" json:"deprecated"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateAccessControlByID(ctx context.Context, arg UpdateTemplateAccessControlByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateAccessControlByID, arg.ID, arg.RequireActiveVersion, arg.Deprecated)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateActiveVersionByID = `-- name: UpdateTemplateActiveVersionByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
active_version_id = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateActiveVersionByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
ActiveVersionID uuid.UUID `db:"active_version_id" json:"active_version_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateActiveVersionByID(ctx context.Context, arg UpdateTemplateActiveVersionByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateActiveVersionByID, arg.ID, arg.ActiveVersionID, arg.UpdatedAt)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateDeletedByID = `-- name: UpdateTemplateDeletedByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
deleted = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateDeletedByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateDeletedByID(ctx context.Context, arg UpdateTemplateDeletedByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateDeletedByID, arg.ID, arg.Deleted, arg.UpdatedAt)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateMetaByID = `-- name: UpdateTemplateMetaByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
updated_at = $2,
|
|
description = $3,
|
|
name = $4,
|
|
icon = $5,
|
|
display_name = $6,
|
|
allow_user_cancel_workspace_jobs = $7,
|
|
group_acl = $8,
|
|
max_port_sharing_level = $9,
|
|
use_classic_parameter_flow = $10
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateMetaByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Description string `db:"description" json:"description"`
|
|
Name string `db:"name" json:"name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
AllowUserCancelWorkspaceJobs bool `db:"allow_user_cancel_workspace_jobs" json:"allow_user_cancel_workspace_jobs"`
|
|
GroupACL TemplateACL `db:"group_acl" json:"group_acl"`
|
|
MaxPortSharingLevel AppSharingLevel `db:"max_port_sharing_level" json:"max_port_sharing_level"`
|
|
UseClassicParameterFlow bool `db:"use_classic_parameter_flow" json:"use_classic_parameter_flow"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateMetaByID(ctx context.Context, arg UpdateTemplateMetaByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateMetaByID,
|
|
arg.ID,
|
|
arg.UpdatedAt,
|
|
arg.Description,
|
|
arg.Name,
|
|
arg.Icon,
|
|
arg.DisplayName,
|
|
arg.AllowUserCancelWorkspaceJobs,
|
|
arg.GroupACL,
|
|
arg.MaxPortSharingLevel,
|
|
arg.UseClassicParameterFlow,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateScheduleByID = `-- name: UpdateTemplateScheduleByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
updated_at = $2,
|
|
allow_user_autostart = $3,
|
|
allow_user_autostop = $4,
|
|
default_ttl = $5,
|
|
activity_bump = $6,
|
|
autostop_requirement_days_of_week = $7,
|
|
autostop_requirement_weeks = $8,
|
|
autostart_block_days_of_week = $9,
|
|
failure_ttl = $10,
|
|
time_til_dormant = $11,
|
|
time_til_dormant_autodelete = $12
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateScheduleByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
AllowUserAutostart bool `db:"allow_user_autostart" json:"allow_user_autostart"`
|
|
AllowUserAutostop bool `db:"allow_user_autostop" json:"allow_user_autostop"`
|
|
DefaultTTL int64 `db:"default_ttl" json:"default_ttl"`
|
|
ActivityBump int64 `db:"activity_bump" json:"activity_bump"`
|
|
AutostopRequirementDaysOfWeek int16 `db:"autostop_requirement_days_of_week" json:"autostop_requirement_days_of_week"`
|
|
AutostopRequirementWeeks int64 `db:"autostop_requirement_weeks" json:"autostop_requirement_weeks"`
|
|
AutostartBlockDaysOfWeek int16 `db:"autostart_block_days_of_week" json:"autostart_block_days_of_week"`
|
|
FailureTTL int64 `db:"failure_ttl" json:"failure_ttl"`
|
|
TimeTilDormant int64 `db:"time_til_dormant" json:"time_til_dormant"`
|
|
TimeTilDormantAutoDelete int64 `db:"time_til_dormant_autodelete" json:"time_til_dormant_autodelete"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateScheduleByID(ctx context.Context, arg UpdateTemplateScheduleByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateScheduleByID,
|
|
arg.ID,
|
|
arg.UpdatedAt,
|
|
arg.AllowUserAutostart,
|
|
arg.AllowUserAutostop,
|
|
arg.DefaultTTL,
|
|
arg.ActivityBump,
|
|
arg.AutostopRequirementDaysOfWeek,
|
|
arg.AutostopRequirementWeeks,
|
|
arg.AutostartBlockDaysOfWeek,
|
|
arg.FailureTTL,
|
|
arg.TimeTilDormant,
|
|
arg.TimeTilDormantAutoDelete,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const getTemplateVersionParameters = `-- name: GetTemplateVersionParameters :many
|
|
SELECT template_version_id, name, description, type, mutable, default_value, icon, options, validation_regex, validation_min, validation_max, validation_error, validation_monotonic, required, display_name, display_order, ephemeral, form_type FROM template_version_parameters WHERE template_version_id = $1 ORDER BY display_order ASC, LOWER(name) ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionParameters(ctx context.Context, templateVersionID uuid.UUID) ([]TemplateVersionParameter, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionParameters, templateVersionID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersionParameter
|
|
for rows.Next() {
|
|
var i TemplateVersionParameter
|
|
if err := rows.Scan(
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.Type,
|
|
&i.Mutable,
|
|
&i.DefaultValue,
|
|
&i.Icon,
|
|
&i.Options,
|
|
&i.ValidationRegex,
|
|
&i.ValidationMin,
|
|
&i.ValidationMax,
|
|
&i.ValidationError,
|
|
&i.ValidationMonotonic,
|
|
&i.Required,
|
|
&i.DisplayName,
|
|
&i.DisplayOrder,
|
|
&i.Ephemeral,
|
|
&i.FormType,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertTemplateVersionParameter = `-- name: InsertTemplateVersionParameter :one
|
|
INSERT INTO
|
|
template_version_parameters (
|
|
template_version_id,
|
|
name,
|
|
description,
|
|
type,
|
|
form_type,
|
|
mutable,
|
|
default_value,
|
|
icon,
|
|
options,
|
|
validation_regex,
|
|
validation_min,
|
|
validation_max,
|
|
validation_error,
|
|
validation_monotonic,
|
|
required,
|
|
display_name,
|
|
display_order,
|
|
ephemeral
|
|
)
|
|
VALUES
|
|
(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7,
|
|
$8,
|
|
$9,
|
|
$10,
|
|
$11,
|
|
$12,
|
|
$13,
|
|
$14,
|
|
$15,
|
|
$16,
|
|
$17,
|
|
$18
|
|
) RETURNING template_version_id, name, description, type, mutable, default_value, icon, options, validation_regex, validation_min, validation_max, validation_error, validation_monotonic, required, display_name, display_order, ephemeral, form_type
|
|
`
|
|
|
|
type InsertTemplateVersionParameterParams struct {
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
Name string `db:"name" json:"name"`
|
|
Description string `db:"description" json:"description"`
|
|
Type string `db:"type" json:"type"`
|
|
FormType ParameterFormType `db:"form_type" json:"form_type"`
|
|
Mutable bool `db:"mutable" json:"mutable"`
|
|
DefaultValue string `db:"default_value" json:"default_value"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
Options json.RawMessage `db:"options" json:"options"`
|
|
ValidationRegex string `db:"validation_regex" json:"validation_regex"`
|
|
ValidationMin sql.NullInt32 `db:"validation_min" json:"validation_min"`
|
|
ValidationMax sql.NullInt32 `db:"validation_max" json:"validation_max"`
|
|
ValidationError string `db:"validation_error" json:"validation_error"`
|
|
ValidationMonotonic string `db:"validation_monotonic" json:"validation_monotonic"`
|
|
Required bool `db:"required" json:"required"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
DisplayOrder int32 `db:"display_order" json:"display_order"`
|
|
Ephemeral bool `db:"ephemeral" json:"ephemeral"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTemplateVersionParameter(ctx context.Context, arg InsertTemplateVersionParameterParams) (TemplateVersionParameter, error) {
|
|
row := q.db.QueryRowContext(ctx, insertTemplateVersionParameter,
|
|
arg.TemplateVersionID,
|
|
arg.Name,
|
|
arg.Description,
|
|
arg.Type,
|
|
arg.FormType,
|
|
arg.Mutable,
|
|
arg.DefaultValue,
|
|
arg.Icon,
|
|
arg.Options,
|
|
arg.ValidationRegex,
|
|
arg.ValidationMin,
|
|
arg.ValidationMax,
|
|
arg.ValidationError,
|
|
arg.ValidationMonotonic,
|
|
arg.Required,
|
|
arg.DisplayName,
|
|
arg.DisplayOrder,
|
|
arg.Ephemeral,
|
|
)
|
|
var i TemplateVersionParameter
|
|
err := row.Scan(
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.Type,
|
|
&i.Mutable,
|
|
&i.DefaultValue,
|
|
&i.Icon,
|
|
&i.Options,
|
|
&i.ValidationRegex,
|
|
&i.ValidationMin,
|
|
&i.ValidationMax,
|
|
&i.ValidationError,
|
|
&i.ValidationMonotonic,
|
|
&i.Required,
|
|
&i.DisplayName,
|
|
&i.DisplayOrder,
|
|
&i.Ephemeral,
|
|
&i.FormType,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const archiveUnusedTemplateVersions = `-- name: ArchiveUnusedTemplateVersions :many
|
|
UPDATE
|
|
template_versions
|
|
SET
|
|
archived = true,
|
|
updated_at = $1
|
|
FROM
|
|
-- Archive all versions that are returned from this query.
|
|
(
|
|
SELECT
|
|
scoped_template_versions.id
|
|
FROM
|
|
-- Scope an archive to a single template and ignore already archived template versions
|
|
(
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, source_example_id, has_ai_task
|
|
FROM
|
|
template_versions
|
|
WHERE
|
|
template_versions.template_id = $2 :: uuid
|
|
AND
|
|
archived = false
|
|
AND
|
|
-- This allows archiving a specific template version.
|
|
CASE
|
|
WHEN $3::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
template_versions.id = $3 :: uuid
|
|
ELSE
|
|
true
|
|
END
|
|
) AS scoped_template_versions
|
|
LEFT JOIN
|
|
provisioner_jobs ON scoped_template_versions.job_id = provisioner_jobs.id
|
|
LEFT JOIN
|
|
templates ON scoped_template_versions.template_id = templates.id
|
|
WHERE
|
|
-- Actively used template versions (meaning the latest build is using
|
|
-- the version) are never archived. A "restart" command on the workspace,
|
|
-- even if failed, would use the version. So it cannot be archived until
|
|
-- the build is outdated.
|
|
NOT EXISTS (
|
|
-- Return all "used" versions, where "used" is defined as being
|
|
-- used by a latest workspace build.
|
|
SELECT template_version_id FROM (
|
|
SELECT
|
|
DISTINCT ON (workspace_id) template_version_id, transition
|
|
FROM
|
|
workspace_builds
|
|
ORDER BY workspace_id, build_number DESC
|
|
) AS used_versions
|
|
WHERE
|
|
used_versions.transition != 'delete'
|
|
AND
|
|
scoped_template_versions.id = used_versions.template_version_id
|
|
)
|
|
-- Also never archive the active template version
|
|
AND active_version_id != scoped_template_versions.id
|
|
AND CASE
|
|
-- Optionally, only archive versions that match a given
|
|
-- job status like 'failed'.
|
|
WHEN $4 :: provisioner_job_status IS NOT NULL THEN
|
|
provisioner_jobs.job_status = $4 :: provisioner_job_status
|
|
ELSE
|
|
true
|
|
END
|
|
-- Pending or running jobs should not be archived, as they are "in progress"
|
|
AND provisioner_jobs.job_status != 'running'
|
|
AND provisioner_jobs.job_status != 'pending'
|
|
) AS archived_versions
|
|
WHERE
|
|
template_versions.id IN (archived_versions.id)
|
|
RETURNING template_versions.id
|
|
`
|
|
|
|
type ArchiveUnusedTemplateVersionsParams struct {
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
JobStatus NullProvisionerJobStatus `db:"job_status" json:"job_status"`
|
|
}
|
|
|
|
// Archiving templates is a soft delete action, so is reversible.
|
|
// Archiving prevents the version from being used and discovered
|
|
// by listing.
|
|
// Only unused template versions will be archived, which are any versions not
|
|
// referenced by the latest build of a workspace.
|
|
func (q *sqlQuerier) ArchiveUnusedTemplateVersions(ctx context.Context, arg ArchiveUnusedTemplateVersionsParams) ([]uuid.UUID, error) {
|
|
rows, err := q.db.QueryContext(ctx, archiveUnusedTemplateVersions,
|
|
arg.UpdatedAt,
|
|
arg.TemplateID,
|
|
arg.TemplateVersionID,
|
|
arg.JobStatus,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []uuid.UUID
|
|
for rows.Next() {
|
|
var id uuid.UUID
|
|
if err := rows.Scan(&id); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, id)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getPreviousTemplateVersion = `-- name: GetPreviousTemplateVersion :one
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, source_example_id, has_ai_task, created_by_avatar_url, created_by_username, created_by_name
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
created_at < (
|
|
SELECT created_at
|
|
FROM template_version_with_user AS tv
|
|
WHERE tv.organization_id = $1 AND tv.name = $2 AND tv.template_id = $3
|
|
)
|
|
AND organization_id = $1
|
|
AND template_id = $3
|
|
ORDER BY created_at DESC
|
|
LIMIT 1
|
|
`
|
|
|
|
type GetPreviousTemplateVersionParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Name string `db:"name" json:"name"`
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetPreviousTemplateVersion(ctx context.Context, arg GetPreviousTemplateVersionParams) (TemplateVersion, error) {
|
|
row := q.db.QueryRowContext(ctx, getPreviousTemplateVersion, arg.OrganizationID, arg.Name, arg.TemplateID)
|
|
var i TemplateVersion
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.SourceExampleID,
|
|
&i.HasAITask,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.CreatedByName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateVersionByID = `-- name: GetTemplateVersionByID :one
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, source_example_id, has_ai_task, created_by_avatar_url, created_by_username, created_by_name
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionByID(ctx context.Context, id uuid.UUID) (TemplateVersion, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateVersionByID, id)
|
|
var i TemplateVersion
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.SourceExampleID,
|
|
&i.HasAITask,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.CreatedByName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateVersionByJobID = `-- name: GetTemplateVersionByJobID :one
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, source_example_id, has_ai_task, created_by_avatar_url, created_by_username, created_by_name
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
job_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionByJobID(ctx context.Context, jobID uuid.UUID) (TemplateVersion, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateVersionByJobID, jobID)
|
|
var i TemplateVersion
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.SourceExampleID,
|
|
&i.HasAITask,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.CreatedByName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateVersionByTemplateIDAndName = `-- name: GetTemplateVersionByTemplateIDAndName :one
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, source_example_id, has_ai_task, created_by_avatar_url, created_by_username, created_by_name
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
template_id = $1
|
|
AND "name" = $2
|
|
`
|
|
|
|
type GetTemplateVersionByTemplateIDAndNameParams struct {
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionByTemplateIDAndName(ctx context.Context, arg GetTemplateVersionByTemplateIDAndNameParams) (TemplateVersion, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateVersionByTemplateIDAndName, arg.TemplateID, arg.Name)
|
|
var i TemplateVersion
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.SourceExampleID,
|
|
&i.HasAITask,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.CreatedByName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateVersionsByIDs = `-- name: GetTemplateVersionsByIDs :many
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, source_example_id, has_ai_task, created_by_avatar_url, created_by_username, created_by_name
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionsByIDs(ctx context.Context, ids []uuid.UUID) ([]TemplateVersion, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionsByIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersion
|
|
for rows.Next() {
|
|
var i TemplateVersion
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.SourceExampleID,
|
|
&i.HasAITask,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.CreatedByName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateVersionsByTemplateID = `-- name: GetTemplateVersionsByTemplateID :many
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, source_example_id, has_ai_task, created_by_avatar_url, created_by_username, created_by_name
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
template_id = $1 :: uuid
|
|
AND CASE
|
|
-- If no filter is provided, default to returning ALL template versions.
|
|
-- The called should always provide a filter if they want to omit
|
|
-- archived versions.
|
|
WHEN $2 :: boolean IS NULL THEN true
|
|
ELSE template_versions.archived = $2 :: boolean
|
|
END
|
|
AND CASE
|
|
-- This allows using the last element on a page as effectively a cursor.
|
|
-- This is an important option for scripts that need to paginate without
|
|
-- duplicating or missing data.
|
|
WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN (
|
|
-- The pagination cursor is the last ID of the previous page.
|
|
-- The query is ordered by the created_at field, so select all
|
|
-- rows after the cursor.
|
|
(created_at, id) > (
|
|
SELECT
|
|
created_at, id
|
|
FROM
|
|
template_versions
|
|
WHERE
|
|
id = $3
|
|
)
|
|
)
|
|
ELSE true
|
|
END
|
|
ORDER BY
|
|
-- Deterministic and consistent ordering of all rows, even if they share
|
|
-- a timestamp. This is to ensure consistent pagination.
|
|
(created_at, id) ASC OFFSET $4
|
|
LIMIT
|
|
-- A null limit means "no limit", so 0 means return all
|
|
NULLIF($5 :: int, 0)
|
|
`
|
|
|
|
type GetTemplateVersionsByTemplateIDParams struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
Archived sql.NullBool `db:"archived" json:"archived"`
|
|
AfterID uuid.UUID `db:"after_id" json:"after_id"`
|
|
OffsetOpt int32 `db:"offset_opt" json:"offset_opt"`
|
|
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionsByTemplateID(ctx context.Context, arg GetTemplateVersionsByTemplateIDParams) ([]TemplateVersion, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionsByTemplateID,
|
|
arg.TemplateID,
|
|
arg.Archived,
|
|
arg.AfterID,
|
|
arg.OffsetOpt,
|
|
arg.LimitOpt,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersion
|
|
for rows.Next() {
|
|
var i TemplateVersion
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.SourceExampleID,
|
|
&i.HasAITask,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.CreatedByName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateVersionsCreatedAfter = `-- name: GetTemplateVersionsCreatedAfter :many
|
|
SELECT id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, source_example_id, has_ai_task, created_by_avatar_url, created_by_username, created_by_name FROM template_version_with_user AS template_versions WHERE created_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionsCreatedAfter(ctx context.Context, createdAt time.Time) ([]TemplateVersion, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionsCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersion
|
|
for rows.Next() {
|
|
var i TemplateVersion
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.SourceExampleID,
|
|
&i.HasAITask,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.CreatedByName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const hasTemplateVersionsWithAITask = `-- name: HasTemplateVersionsWithAITask :one
|
|
SELECT EXISTS (SELECT 1 FROM template_versions WHERE has_ai_task = TRUE)
|
|
`
|
|
|
|
// Determines if the template versions table has any rows with has_ai_task = TRUE.
|
|
func (q *sqlQuerier) HasTemplateVersionsWithAITask(ctx context.Context) (bool, error) {
|
|
row := q.db.QueryRowContext(ctx, hasTemplateVersionsWithAITask)
|
|
var exists bool
|
|
err := row.Scan(&exists)
|
|
return exists, err
|
|
}
|
|
|
|
const insertTemplateVersion = `-- name: InsertTemplateVersion :exec
|
|
INSERT INTO
|
|
template_versions (
|
|
id,
|
|
template_id,
|
|
organization_id,
|
|
created_at,
|
|
updated_at,
|
|
"name",
|
|
message,
|
|
readme,
|
|
job_id,
|
|
created_by,
|
|
source_example_id,
|
|
has_ai_task
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12)
|
|
`
|
|
|
|
type InsertTemplateVersionParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
Message string `db:"message" json:"message"`
|
|
Readme string `db:"readme" json:"readme"`
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
|
|
SourceExampleID sql.NullString `db:"source_example_id" json:"source_example_id"`
|
|
HasAITask sql.NullBool `db:"has_ai_task" json:"has_ai_task"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTemplateVersion(ctx context.Context, arg InsertTemplateVersionParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertTemplateVersion,
|
|
arg.ID,
|
|
arg.TemplateID,
|
|
arg.OrganizationID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.Message,
|
|
arg.Readme,
|
|
arg.JobID,
|
|
arg.CreatedBy,
|
|
arg.SourceExampleID,
|
|
arg.HasAITask,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const unarchiveTemplateVersion = `-- name: UnarchiveTemplateVersion :exec
|
|
UPDATE
|
|
template_versions
|
|
SET
|
|
archived = false,
|
|
updated_at = $1
|
|
WHERE
|
|
id = $2
|
|
`
|
|
|
|
type UnarchiveTemplateVersionParams struct {
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
}
|
|
|
|
// This will always work regardless of the current state of the template version.
|
|
func (q *sqlQuerier) UnarchiveTemplateVersion(ctx context.Context, arg UnarchiveTemplateVersionParams) error {
|
|
_, err := q.db.ExecContext(ctx, unarchiveTemplateVersion, arg.UpdatedAt, arg.TemplateVersionID)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateVersionByID = `-- name: UpdateTemplateVersionByID :exec
|
|
UPDATE
|
|
template_versions
|
|
SET
|
|
template_id = $2,
|
|
updated_at = $3,
|
|
name = $4,
|
|
message = $5
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateVersionByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
Message string `db:"message" json:"message"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateVersionByID(ctx context.Context, arg UpdateTemplateVersionByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateVersionByID,
|
|
arg.ID,
|
|
arg.TemplateID,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.Message,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateVersionDescriptionByJobID = `-- name: UpdateTemplateVersionDescriptionByJobID :exec
|
|
UPDATE
|
|
template_versions
|
|
SET
|
|
readme = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
job_id = $1
|
|
`
|
|
|
|
type UpdateTemplateVersionDescriptionByJobIDParams struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
Readme string `db:"readme" json:"readme"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateVersionDescriptionByJobID(ctx context.Context, arg UpdateTemplateVersionDescriptionByJobIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateVersionDescriptionByJobID, arg.JobID, arg.Readme, arg.UpdatedAt)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateVersionExternalAuthProvidersByJobID = `-- name: UpdateTemplateVersionExternalAuthProvidersByJobID :exec
|
|
UPDATE
|
|
template_versions
|
|
SET
|
|
external_auth_providers = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
job_id = $1
|
|
`
|
|
|
|
type UpdateTemplateVersionExternalAuthProvidersByJobIDParams struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
ExternalAuthProviders json.RawMessage `db:"external_auth_providers" json:"external_auth_providers"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateVersionExternalAuthProvidersByJobID(ctx context.Context, arg UpdateTemplateVersionExternalAuthProvidersByJobIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateVersionExternalAuthProvidersByJobID, arg.JobID, arg.ExternalAuthProviders, arg.UpdatedAt)
|
|
return err
|
|
}
|
|
|
|
const getTemplateVersionTerraformValues = `-- name: GetTemplateVersionTerraformValues :one
|
|
SELECT
|
|
template_version_terraform_values.template_version_id, template_version_terraform_values.updated_at, template_version_terraform_values.cached_plan, template_version_terraform_values.cached_module_files, template_version_terraform_values.provisionerd_version
|
|
FROM
|
|
template_version_terraform_values
|
|
WHERE
|
|
template_version_terraform_values.template_version_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionTerraformValues(ctx context.Context, templateVersionID uuid.UUID) (TemplateVersionTerraformValue, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateVersionTerraformValues, templateVersionID)
|
|
var i TemplateVersionTerraformValue
|
|
err := row.Scan(
|
|
&i.TemplateVersionID,
|
|
&i.UpdatedAt,
|
|
&i.CachedPlan,
|
|
&i.CachedModuleFiles,
|
|
&i.ProvisionerdVersion,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertTemplateVersionTerraformValuesByJobID = `-- name: InsertTemplateVersionTerraformValuesByJobID :exec
|
|
INSERT INTO
|
|
template_version_terraform_values (
|
|
template_version_id,
|
|
cached_plan,
|
|
cached_module_files,
|
|
updated_at,
|
|
provisionerd_version
|
|
)
|
|
VALUES
|
|
(
|
|
(select id from template_versions where job_id = $1),
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5
|
|
)
|
|
`
|
|
|
|
type InsertTemplateVersionTerraformValuesByJobIDParams struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
CachedPlan json.RawMessage `db:"cached_plan" json:"cached_plan"`
|
|
CachedModuleFiles uuid.NullUUID `db:"cached_module_files" json:"cached_module_files"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
ProvisionerdVersion string `db:"provisionerd_version" json:"provisionerd_version"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTemplateVersionTerraformValuesByJobID(ctx context.Context, arg InsertTemplateVersionTerraformValuesByJobIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertTemplateVersionTerraformValuesByJobID,
|
|
arg.JobID,
|
|
arg.CachedPlan,
|
|
arg.CachedModuleFiles,
|
|
arg.UpdatedAt,
|
|
arg.ProvisionerdVersion,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const getTemplateVersionVariables = `-- name: GetTemplateVersionVariables :many
|
|
SELECT template_version_id, name, description, type, value, default_value, required, sensitive FROM template_version_variables WHERE template_version_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionVariables(ctx context.Context, templateVersionID uuid.UUID) ([]TemplateVersionVariable, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionVariables, templateVersionID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersionVariable
|
|
for rows.Next() {
|
|
var i TemplateVersionVariable
|
|
if err := rows.Scan(
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.Type,
|
|
&i.Value,
|
|
&i.DefaultValue,
|
|
&i.Required,
|
|
&i.Sensitive,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertTemplateVersionVariable = `-- name: InsertTemplateVersionVariable :one
|
|
INSERT INTO
|
|
template_version_variables (
|
|
template_version_id,
|
|
name,
|
|
description,
|
|
type,
|
|
value,
|
|
default_value,
|
|
required,
|
|
sensitive
|
|
)
|
|
VALUES
|
|
(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7,
|
|
$8
|
|
) RETURNING template_version_id, name, description, type, value, default_value, required, sensitive
|
|
`
|
|
|
|
type InsertTemplateVersionVariableParams struct {
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
Name string `db:"name" json:"name"`
|
|
Description string `db:"description" json:"description"`
|
|
Type string `db:"type" json:"type"`
|
|
Value string `db:"value" json:"value"`
|
|
DefaultValue string `db:"default_value" json:"default_value"`
|
|
Required bool `db:"required" json:"required"`
|
|
Sensitive bool `db:"sensitive" json:"sensitive"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTemplateVersionVariable(ctx context.Context, arg InsertTemplateVersionVariableParams) (TemplateVersionVariable, error) {
|
|
row := q.db.QueryRowContext(ctx, insertTemplateVersionVariable,
|
|
arg.TemplateVersionID,
|
|
arg.Name,
|
|
arg.Description,
|
|
arg.Type,
|
|
arg.Value,
|
|
arg.DefaultValue,
|
|
arg.Required,
|
|
arg.Sensitive,
|
|
)
|
|
var i TemplateVersionVariable
|
|
err := row.Scan(
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.Type,
|
|
&i.Value,
|
|
&i.DefaultValue,
|
|
&i.Required,
|
|
&i.Sensitive,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateVersionWorkspaceTags = `-- name: GetTemplateVersionWorkspaceTags :many
|
|
SELECT template_version_id, key, value FROM template_version_workspace_tags WHERE template_version_id = $1 ORDER BY LOWER(key) ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionWorkspaceTags(ctx context.Context, templateVersionID uuid.UUID) ([]TemplateVersionWorkspaceTag, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionWorkspaceTags, templateVersionID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersionWorkspaceTag
|
|
for rows.Next() {
|
|
var i TemplateVersionWorkspaceTag
|
|
if err := rows.Scan(&i.TemplateVersionID, &i.Key, &i.Value); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertTemplateVersionWorkspaceTag = `-- name: InsertTemplateVersionWorkspaceTag :one
|
|
INSERT INTO
|
|
template_version_workspace_tags (
|
|
template_version_id,
|
|
key,
|
|
value
|
|
)
|
|
VALUES
|
|
(
|
|
$1,
|
|
$2,
|
|
$3
|
|
) RETURNING template_version_id, key, value
|
|
`
|
|
|
|
type InsertTemplateVersionWorkspaceTagParams struct {
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
Key string `db:"key" json:"key"`
|
|
Value string `db:"value" json:"value"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTemplateVersionWorkspaceTag(ctx context.Context, arg InsertTemplateVersionWorkspaceTagParams) (TemplateVersionWorkspaceTag, error) {
|
|
row := q.db.QueryRowContext(ctx, insertTemplateVersionWorkspaceTag, arg.TemplateVersionID, arg.Key, arg.Value)
|
|
var i TemplateVersionWorkspaceTag
|
|
err := row.Scan(&i.TemplateVersionID, &i.Key, &i.Value)
|
|
return i, err
|
|
}
|
|
|
|
const disableForeignKeysAndTriggers = `-- name: DisableForeignKeysAndTriggers :exec
|
|
DO $$
|
|
DECLARE
|
|
table_record record;
|
|
BEGIN
|
|
FOR table_record IN
|
|
SELECT table_schema, table_name
|
|
FROM information_schema.tables
|
|
WHERE table_schema NOT IN ('pg_catalog', 'information_schema')
|
|
AND table_type = 'BASE TABLE'
|
|
LOOP
|
|
EXECUTE format('ALTER TABLE %I.%I DISABLE TRIGGER ALL',
|
|
table_record.table_schema,
|
|
table_record.table_name);
|
|
END LOOP;
|
|
END;
|
|
$$
|
|
`
|
|
|
|
// Disable foreign keys and triggers for all tables.
|
|
// Deprecated: disable foreign keys was created to aid in migrating off
|
|
// of the test-only in-memory database. Do not use this in new code.
|
|
func (q *sqlQuerier) DisableForeignKeysAndTriggers(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, disableForeignKeysAndTriggers)
|
|
return err
|
|
}
|
|
|
|
const getUserLinkByLinkedID = `-- name: GetUserLinkByLinkedID :one
|
|
SELECT
|
|
user_links.user_id, user_links.login_type, user_links.linked_id, user_links.oauth_access_token, user_links.oauth_refresh_token, user_links.oauth_expiry, user_links.oauth_access_token_key_id, user_links.oauth_refresh_token_key_id, user_links.claims
|
|
FROM
|
|
user_links
|
|
INNER JOIN
|
|
users ON user_links.user_id = users.id
|
|
WHERE
|
|
linked_id = $1
|
|
AND
|
|
deleted = false
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserLinkByLinkedID(ctx context.Context, linkedID string) (UserLink, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserLinkByLinkedID, linkedID)
|
|
var i UserLink
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.Claims,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getUserLinkByUserIDLoginType = `-- name: GetUserLinkByUserIDLoginType :one
|
|
SELECT
|
|
user_id, login_type, linked_id, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, claims
|
|
FROM
|
|
user_links
|
|
WHERE
|
|
user_id = $1 AND login_type = $2
|
|
`
|
|
|
|
type GetUserLinkByUserIDLoginTypeParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetUserLinkByUserIDLoginType(ctx context.Context, arg GetUserLinkByUserIDLoginTypeParams) (UserLink, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserLinkByUserIDLoginType, arg.UserID, arg.LoginType)
|
|
var i UserLink
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.Claims,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getUserLinksByUserID = `-- name: GetUserLinksByUserID :many
|
|
SELECT user_id, login_type, linked_id, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, claims FROM user_links WHERE user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserLinksByUserID(ctx context.Context, userID uuid.UUID) ([]UserLink, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUserLinksByUserID, userID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []UserLink
|
|
for rows.Next() {
|
|
var i UserLink
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.Claims,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertUserLink = `-- name: InsertUserLink :one
|
|
INSERT INTO
|
|
user_links (
|
|
user_id,
|
|
login_type,
|
|
linked_id,
|
|
oauth_access_token,
|
|
oauth_access_token_key_id,
|
|
oauth_refresh_token,
|
|
oauth_refresh_token_key_id,
|
|
oauth_expiry,
|
|
claims
|
|
)
|
|
VALUES
|
|
( $1, $2, $3, $4, $5, $6, $7, $8, $9 ) RETURNING user_id, login_type, linked_id, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, claims
|
|
`
|
|
|
|
type InsertUserLinkParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
LinkedID string `db:"linked_id" json:"linked_id"`
|
|
OAuthAccessToken string `db:"oauth_access_token" json:"oauth_access_token"`
|
|
OAuthAccessTokenKeyID sql.NullString `db:"oauth_access_token_key_id" json:"oauth_access_token_key_id"`
|
|
OAuthRefreshToken string `db:"oauth_refresh_token" json:"oauth_refresh_token"`
|
|
OAuthRefreshTokenKeyID sql.NullString `db:"oauth_refresh_token_key_id" json:"oauth_refresh_token_key_id"`
|
|
OAuthExpiry time.Time `db:"oauth_expiry" json:"oauth_expiry"`
|
|
Claims UserLinkClaims `db:"claims" json:"claims"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertUserLink(ctx context.Context, arg InsertUserLinkParams) (UserLink, error) {
|
|
row := q.db.QueryRowContext(ctx, insertUserLink,
|
|
arg.UserID,
|
|
arg.LoginType,
|
|
arg.LinkedID,
|
|
arg.OAuthAccessToken,
|
|
arg.OAuthAccessTokenKeyID,
|
|
arg.OAuthRefreshToken,
|
|
arg.OAuthRefreshTokenKeyID,
|
|
arg.OAuthExpiry,
|
|
arg.Claims,
|
|
)
|
|
var i UserLink
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.Claims,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const oIDCClaimFieldValues = `-- name: OIDCClaimFieldValues :many
|
|
SELECT
|
|
-- DISTINCT to remove duplicates
|
|
DISTINCT jsonb_array_elements_text(CASE
|
|
-- When the type is an array, filter out any non-string elements.
|
|
-- This is to keep the return type consistent.
|
|
WHEN jsonb_typeof(claims->'merged_claims'->$1::text) = 'array' THEN
|
|
(
|
|
SELECT
|
|
jsonb_agg(element)
|
|
FROM
|
|
jsonb_array_elements(claims->'merged_claims'->$1::text) AS element
|
|
WHERE
|
|
-- Filtering out non-string elements
|
|
jsonb_typeof(element) = 'string'
|
|
)
|
|
-- Some IDPs return a single string instead of an array of strings.
|
|
WHEN jsonb_typeof(claims->'merged_claims'->$1::text) = 'string' THEN
|
|
jsonb_build_array(claims->'merged_claims'->$1::text)
|
|
END)
|
|
FROM
|
|
user_links
|
|
WHERE
|
|
-- IDP sync only supports string and array (of string) types
|
|
jsonb_typeof(claims->'merged_claims'->$1::text) = ANY(ARRAY['string', 'array'])
|
|
AND login_type = 'oidc'
|
|
AND CASE
|
|
WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
user_links.user_id = ANY(SELECT organization_members.user_id FROM organization_members WHERE organization_id = $2)
|
|
ELSE true
|
|
END
|
|
`
|
|
|
|
type OIDCClaimFieldValuesParams struct {
|
|
ClaimField string `db:"claim_field" json:"claim_field"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) OIDCClaimFieldValues(ctx context.Context, arg OIDCClaimFieldValuesParams) ([]string, error) {
|
|
rows, err := q.db.QueryContext(ctx, oIDCClaimFieldValues, arg.ClaimField, arg.OrganizationID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []string
|
|
for rows.Next() {
|
|
var jsonb_array_elements_text string
|
|
if err := rows.Scan(&jsonb_array_elements_text); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, jsonb_array_elements_text)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const oIDCClaimFields = `-- name: OIDCClaimFields :many
|
|
SELECT
|
|
DISTINCT jsonb_object_keys(claims->'merged_claims')
|
|
FROM
|
|
user_links
|
|
WHERE
|
|
-- Only return rows where the top level key exists
|
|
claims ? 'merged_claims' AND
|
|
-- 'null' is the default value for the id_token_claims field
|
|
-- jsonb 'null' is not the same as SQL NULL. Strip these out.
|
|
jsonb_typeof(claims->'merged_claims') != 'null' AND
|
|
login_type = 'oidc'
|
|
AND CASE WHEN $1 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
user_links.user_id = ANY(SELECT organization_members.user_id FROM organization_members WHERE organization_id = $1)
|
|
ELSE true
|
|
END
|
|
`
|
|
|
|
// OIDCClaimFields returns a list of distinct keys in the the merged_claims fields.
|
|
// This query is used to generate the list of available sync fields for idp sync settings.
|
|
func (q *sqlQuerier) OIDCClaimFields(ctx context.Context, organizationID uuid.UUID) ([]string, error) {
|
|
rows, err := q.db.QueryContext(ctx, oIDCClaimFields, organizationID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []string
|
|
for rows.Next() {
|
|
var jsonb_object_keys string
|
|
if err := rows.Scan(&jsonb_object_keys); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, jsonb_object_keys)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateUserLink = `-- name: UpdateUserLink :one
|
|
UPDATE
|
|
user_links
|
|
SET
|
|
oauth_access_token = $1,
|
|
oauth_access_token_key_id = $2,
|
|
oauth_refresh_token = $3,
|
|
oauth_refresh_token_key_id = $4,
|
|
oauth_expiry = $5,
|
|
claims = $6
|
|
WHERE
|
|
user_id = $7 AND login_type = $8 RETURNING user_id, login_type, linked_id, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, claims
|
|
`
|
|
|
|
type UpdateUserLinkParams struct {
|
|
OAuthAccessToken string `db:"oauth_access_token" json:"oauth_access_token"`
|
|
OAuthAccessTokenKeyID sql.NullString `db:"oauth_access_token_key_id" json:"oauth_access_token_key_id"`
|
|
OAuthRefreshToken string `db:"oauth_refresh_token" json:"oauth_refresh_token"`
|
|
OAuthRefreshTokenKeyID sql.NullString `db:"oauth_refresh_token_key_id" json:"oauth_refresh_token_key_id"`
|
|
OAuthExpiry time.Time `db:"oauth_expiry" json:"oauth_expiry"`
|
|
Claims UserLinkClaims `db:"claims" json:"claims"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserLink(ctx context.Context, arg UpdateUserLinkParams) (UserLink, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserLink,
|
|
arg.OAuthAccessToken,
|
|
arg.OAuthAccessTokenKeyID,
|
|
arg.OAuthRefreshToken,
|
|
arg.OAuthRefreshTokenKeyID,
|
|
arg.OAuthExpiry,
|
|
arg.Claims,
|
|
arg.UserID,
|
|
arg.LoginType,
|
|
)
|
|
var i UserLink
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.Claims,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserLinkedID = `-- name: UpdateUserLinkedID :one
|
|
UPDATE
|
|
user_links
|
|
SET
|
|
linked_id = $1
|
|
WHERE
|
|
user_id = $2 AND login_type = $3 RETURNING user_id, login_type, linked_id, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, claims
|
|
`
|
|
|
|
type UpdateUserLinkedIDParams struct {
|
|
LinkedID string `db:"linked_id" json:"linked_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserLinkedID(ctx context.Context, arg UpdateUserLinkedIDParams) (UserLink, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserLinkedID, arg.LinkedID, arg.UserID, arg.LoginType)
|
|
var i UserLink
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.Claims,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const allUserIDs = `-- name: AllUserIDs :many
|
|
SELECT DISTINCT id FROM USERS
|
|
WHERE CASE WHEN $1::bool THEN TRUE ELSE is_system = false END
|
|
`
|
|
|
|
// AllUserIDs returns all UserIDs regardless of user status or deletion.
|
|
func (q *sqlQuerier) AllUserIDs(ctx context.Context, includeSystem bool) ([]uuid.UUID, error) {
|
|
rows, err := q.db.QueryContext(ctx, allUserIDs, includeSystem)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []uuid.UUID
|
|
for rows.Next() {
|
|
var id uuid.UUID
|
|
if err := rows.Scan(&id); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, id)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getActiveUserCount = `-- name: GetActiveUserCount :one
|
|
SELECT
|
|
COUNT(*)
|
|
FROM
|
|
users
|
|
WHERE
|
|
status = 'active'::user_status AND deleted = false
|
|
AND CASE WHEN $1::bool THEN TRUE ELSE is_system = false END
|
|
`
|
|
|
|
func (q *sqlQuerier) GetActiveUserCount(ctx context.Context, includeSystem bool) (int64, error) {
|
|
row := q.db.QueryRowContext(ctx, getActiveUserCount, includeSystem)
|
|
var count int64
|
|
err := row.Scan(&count)
|
|
return count, err
|
|
}
|
|
|
|
const getAuthorizationUserRoles = `-- name: GetAuthorizationUserRoles :one
|
|
SELECT
|
|
-- username and email are returned just to help for logging purposes
|
|
-- status is used to enforce 'suspended' users, as all roles are ignored
|
|
-- when suspended.
|
|
id, username, status, email,
|
|
-- All user roles, including their org roles.
|
|
array_cat(
|
|
-- All users are members
|
|
array_append(users.rbac_roles, 'member'),
|
|
(
|
|
SELECT
|
|
-- The roles are returned as a flat array, org scoped and site side.
|
|
-- Concatenating the organization id scopes the organization roles.
|
|
array_agg(org_roles || ':' || organization_members.organization_id::text)
|
|
FROM
|
|
organization_members,
|
|
-- All org_members get the organization-member role for their orgs
|
|
unnest(
|
|
array_append(roles, 'organization-member')
|
|
) AS org_roles
|
|
WHERE
|
|
user_id = users.id
|
|
)
|
|
) :: text[] AS roles,
|
|
-- All groups the user is in.
|
|
(
|
|
SELECT
|
|
array_agg(
|
|
group_members.group_id :: text
|
|
)
|
|
FROM
|
|
group_members
|
|
WHERE
|
|
user_id = users.id
|
|
) :: text[] AS groups
|
|
FROM
|
|
users
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type GetAuthorizationUserRolesRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Username string `db:"username" json:"username"`
|
|
Status UserStatus `db:"status" json:"status"`
|
|
Email string `db:"email" json:"email"`
|
|
Roles []string `db:"roles" json:"roles"`
|
|
Groups []string `db:"groups" json:"groups"`
|
|
}
|
|
|
|
// This function returns roles for authorization purposes. Implied member roles
|
|
// are included.
|
|
func (q *sqlQuerier) GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUID) (GetAuthorizationUserRolesRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getAuthorizationUserRoles, userID)
|
|
var i GetAuthorizationUserRolesRow
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Username,
|
|
&i.Status,
|
|
&i.Email,
|
|
pq.Array(&i.Roles),
|
|
pq.Array(&i.Groups),
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getUserByEmailOrUsername = `-- name: GetUserByEmailOrUsername :one
|
|
SELECT
|
|
id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at, is_system
|
|
FROM
|
|
users
|
|
WHERE
|
|
(LOWER(username) = LOWER($1) OR LOWER(email) = LOWER($2)) AND
|
|
deleted = false
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetUserByEmailOrUsernameParams struct {
|
|
Username string `db:"username" json:"username"`
|
|
Email string `db:"email" json:"email"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetUserByEmailOrUsername(ctx context.Context, arg GetUserByEmailOrUsernameParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserByEmailOrUsername, arg.Username, arg.Email)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.HashedOneTimePasscode,
|
|
&i.OneTimePasscodeExpiresAt,
|
|
&i.IsSystem,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getUserByID = `-- name: GetUserByID :one
|
|
SELECT
|
|
id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at, is_system
|
|
FROM
|
|
users
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserByID(ctx context.Context, id uuid.UUID) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserByID, id)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.HashedOneTimePasscode,
|
|
&i.OneTimePasscodeExpiresAt,
|
|
&i.IsSystem,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getUserCount = `-- name: GetUserCount :one
|
|
SELECT
|
|
COUNT(*)
|
|
FROM
|
|
users
|
|
WHERE
|
|
deleted = false
|
|
AND CASE WHEN $1::bool THEN TRUE ELSE is_system = false END
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserCount(ctx context.Context, includeSystem bool) (int64, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserCount, includeSystem)
|
|
var count int64
|
|
err := row.Scan(&count)
|
|
return count, err
|
|
}
|
|
|
|
const getUserTerminalFont = `-- name: GetUserTerminalFont :one
|
|
SELECT
|
|
value as terminal_font
|
|
FROM
|
|
user_configs
|
|
WHERE
|
|
user_id = $1
|
|
AND key = 'terminal_font'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserTerminalFont(ctx context.Context, userID uuid.UUID) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserTerminalFont, userID)
|
|
var terminal_font string
|
|
err := row.Scan(&terminal_font)
|
|
return terminal_font, err
|
|
}
|
|
|
|
const getUserThemePreference = `-- name: GetUserThemePreference :one
|
|
SELECT
|
|
value as theme_preference
|
|
FROM
|
|
user_configs
|
|
WHERE
|
|
user_id = $1
|
|
AND key = 'theme_preference'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserThemePreference(ctx context.Context, userID uuid.UUID) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserThemePreference, userID)
|
|
var theme_preference string
|
|
err := row.Scan(&theme_preference)
|
|
return theme_preference, err
|
|
}
|
|
|
|
const getUsers = `-- name: GetUsers :many
|
|
SELECT
|
|
id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at, is_system, COUNT(*) OVER() AS count
|
|
FROM
|
|
users
|
|
WHERE
|
|
users.deleted = false
|
|
AND CASE
|
|
-- This allows using the last element on a page as effectively a cursor.
|
|
-- This is an important option for scripts that need to paginate without
|
|
-- duplicating or missing data.
|
|
WHEN $1 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN (
|
|
-- The pagination cursor is the last ID of the previous page.
|
|
-- The query is ordered by the username field, so select all
|
|
-- rows after the cursor.
|
|
(LOWER(username)) > (
|
|
SELECT
|
|
LOWER(username)
|
|
FROM
|
|
users
|
|
WHERE
|
|
id = $1
|
|
)
|
|
)
|
|
ELSE true
|
|
END
|
|
-- Start filters
|
|
-- Filter by name, email or username
|
|
AND CASE
|
|
WHEN $2 :: text != '' THEN (
|
|
email ILIKE concat('%', $2, '%')
|
|
OR username ILIKE concat('%', $2, '%')
|
|
)
|
|
ELSE true
|
|
END
|
|
-- Filter by status
|
|
AND CASE
|
|
-- @status needs to be a text because it can be empty, If it was
|
|
-- user_status enum, it would not.
|
|
WHEN cardinality($3 :: user_status[]) > 0 THEN
|
|
status = ANY($3 :: user_status[])
|
|
ELSE true
|
|
END
|
|
-- Filter by rbac_roles
|
|
AND CASE
|
|
-- @rbac_role allows filtering by rbac roles. If 'member' is included, show everyone, as
|
|
-- everyone is a member.
|
|
WHEN cardinality($4 :: text[]) > 0 AND 'member' != ANY($4 :: text[]) THEN
|
|
rbac_roles && $4 :: text[]
|
|
ELSE true
|
|
END
|
|
-- Filter by last_seen
|
|
AND CASE
|
|
WHEN $5 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
|
last_seen_at <= $5
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
WHEN $6 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
|
last_seen_at >= $6
|
|
ELSE true
|
|
END
|
|
-- Filter by created_at
|
|
AND CASE
|
|
WHEN $7 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
|
created_at <= $7
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
WHEN $8 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
|
created_at >= $8
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
WHEN $9::bool THEN TRUE
|
|
ELSE
|
|
is_system = false
|
|
END
|
|
AND CASE
|
|
WHEN $10 :: bigint != 0 THEN
|
|
github_com_user_id = $10
|
|
ELSE true
|
|
END
|
|
-- Filter by login_type
|
|
AND CASE
|
|
WHEN cardinality($11 :: login_type[]) > 0 THEN
|
|
login_type = ANY($11 :: login_type[])
|
|
ELSE true
|
|
END
|
|
-- End of filters
|
|
|
|
-- Authorize Filter clause will be injected below in GetAuthorizedUsers
|
|
-- @authorize_filter
|
|
ORDER BY
|
|
-- Deterministic and consistent ordering of all users. This is to ensure consistent pagination.
|
|
LOWER(username) ASC OFFSET $12
|
|
LIMIT
|
|
-- A null limit means "no limit", so 0 means return all
|
|
NULLIF($13 :: int, 0)
|
|
`
|
|
|
|
type GetUsersParams struct {
|
|
AfterID uuid.UUID `db:"after_id" json:"after_id"`
|
|
Search string `db:"search" json:"search"`
|
|
Status []UserStatus `db:"status" json:"status"`
|
|
RbacRole []string `db:"rbac_role" json:"rbac_role"`
|
|
LastSeenBefore time.Time `db:"last_seen_before" json:"last_seen_before"`
|
|
LastSeenAfter time.Time `db:"last_seen_after" json:"last_seen_after"`
|
|
CreatedBefore time.Time `db:"created_before" json:"created_before"`
|
|
CreatedAfter time.Time `db:"created_after" json:"created_after"`
|
|
IncludeSystem bool `db:"include_system" json:"include_system"`
|
|
GithubComUserID int64 `db:"github_com_user_id" json:"github_com_user_id"`
|
|
LoginType []LoginType `db:"login_type" json:"login_type"`
|
|
OffsetOpt int32 `db:"offset_opt" json:"offset_opt"`
|
|
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
|
}
|
|
|
|
type GetUsersRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Email string `db:"email" json:"email"`
|
|
Username string `db:"username" json:"username"`
|
|
HashedPassword []byte `db:"hashed_password" json:"hashed_password"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Status UserStatus `db:"status" json:"status"`
|
|
RBACRoles pq.StringArray `db:"rbac_roles" json:"rbac_roles"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
LastSeenAt time.Time `db:"last_seen_at" json:"last_seen_at"`
|
|
QuietHoursSchedule string `db:"quiet_hours_schedule" json:"quiet_hours_schedule"`
|
|
Name string `db:"name" json:"name"`
|
|
GithubComUserID sql.NullInt64 `db:"github_com_user_id" json:"github_com_user_id"`
|
|
HashedOneTimePasscode []byte `db:"hashed_one_time_passcode" json:"hashed_one_time_passcode"`
|
|
OneTimePasscodeExpiresAt sql.NullTime `db:"one_time_passcode_expires_at" json:"one_time_passcode_expires_at"`
|
|
IsSystem bool `db:"is_system" json:"is_system"`
|
|
Count int64 `db:"count" json:"count"`
|
|
}
|
|
|
|
// This will never return deleted users.
|
|
func (q *sqlQuerier) GetUsers(ctx context.Context, arg GetUsersParams) ([]GetUsersRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUsers,
|
|
arg.AfterID,
|
|
arg.Search,
|
|
pq.Array(arg.Status),
|
|
pq.Array(arg.RbacRole),
|
|
arg.LastSeenBefore,
|
|
arg.LastSeenAfter,
|
|
arg.CreatedBefore,
|
|
arg.CreatedAfter,
|
|
arg.IncludeSystem,
|
|
arg.GithubComUserID,
|
|
pq.Array(arg.LoginType),
|
|
arg.OffsetOpt,
|
|
arg.LimitOpt,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetUsersRow
|
|
for rows.Next() {
|
|
var i GetUsersRow
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.HashedOneTimePasscode,
|
|
&i.OneTimePasscodeExpiresAt,
|
|
&i.IsSystem,
|
|
&i.Count,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getUsersByIDs = `-- name: GetUsersByIDs :many
|
|
SELECT id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at, is_system FROM users WHERE id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
// This shouldn't check for deleted, because it's frequently used
|
|
// to look up references to actions. eg. a user could build a workspace
|
|
// for another user, then be deleted... we still want them to appear!
|
|
func (q *sqlQuerier) GetUsersByIDs(ctx context.Context, ids []uuid.UUID) ([]User, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUsersByIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []User
|
|
for rows.Next() {
|
|
var i User
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.HashedOneTimePasscode,
|
|
&i.OneTimePasscodeExpiresAt,
|
|
&i.IsSystem,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertUser = `-- name: InsertUser :one
|
|
INSERT INTO
|
|
users (
|
|
id,
|
|
email,
|
|
username,
|
|
name,
|
|
hashed_password,
|
|
created_at,
|
|
updated_at,
|
|
rbac_roles,
|
|
login_type,
|
|
status
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9,
|
|
-- if the status passed in is empty, fallback to dormant, which is what
|
|
-- we were doing before.
|
|
COALESCE(NULLIF($10::text, '')::user_status, 'dormant'::user_status)
|
|
) RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at, is_system
|
|
`
|
|
|
|
type InsertUserParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Email string `db:"email" json:"email"`
|
|
Username string `db:"username" json:"username"`
|
|
Name string `db:"name" json:"name"`
|
|
HashedPassword []byte `db:"hashed_password" json:"hashed_password"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
RBACRoles pq.StringArray `db:"rbac_roles" json:"rbac_roles"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
Status string `db:"status" json:"status"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertUser(ctx context.Context, arg InsertUserParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, insertUser,
|
|
arg.ID,
|
|
arg.Email,
|
|
arg.Username,
|
|
arg.Name,
|
|
arg.HashedPassword,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.RBACRoles,
|
|
arg.LoginType,
|
|
arg.Status,
|
|
)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.HashedOneTimePasscode,
|
|
&i.OneTimePasscodeExpiresAt,
|
|
&i.IsSystem,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateInactiveUsersToDormant = `-- name: UpdateInactiveUsersToDormant :many
|
|
UPDATE
|
|
users
|
|
SET
|
|
status = 'dormant'::user_status,
|
|
updated_at = $1
|
|
WHERE
|
|
last_seen_at < $2 :: timestamp
|
|
AND status = 'active'::user_status
|
|
AND NOT is_system
|
|
RETURNING id, email, username, last_seen_at
|
|
`
|
|
|
|
type UpdateInactiveUsersToDormantParams struct {
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
LastSeenAfter time.Time `db:"last_seen_after" json:"last_seen_after"`
|
|
}
|
|
|
|
type UpdateInactiveUsersToDormantRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Email string `db:"email" json:"email"`
|
|
Username string `db:"username" json:"username"`
|
|
LastSeenAt time.Time `db:"last_seen_at" json:"last_seen_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateInactiveUsersToDormant(ctx context.Context, arg UpdateInactiveUsersToDormantParams) ([]UpdateInactiveUsersToDormantRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, updateInactiveUsersToDormant, arg.UpdatedAt, arg.LastSeenAfter)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []UpdateInactiveUsersToDormantRow
|
|
for rows.Next() {
|
|
var i UpdateInactiveUsersToDormantRow
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.LastSeenAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateUserDeletedByID = `-- name: UpdateUserDeletedByID :exec
|
|
UPDATE
|
|
users
|
|
SET
|
|
deleted = true
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) UpdateUserDeletedByID(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, updateUserDeletedByID, id)
|
|
return err
|
|
}
|
|
|
|
const updateUserGithubComUserID = `-- name: UpdateUserGithubComUserID :exec
|
|
UPDATE
|
|
users
|
|
SET
|
|
github_com_user_id = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateUserGithubComUserIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
GithubComUserID sql.NullInt64 `db:"github_com_user_id" json:"github_com_user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserGithubComUserID(ctx context.Context, arg UpdateUserGithubComUserIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateUserGithubComUserID, arg.ID, arg.GithubComUserID)
|
|
return err
|
|
}
|
|
|
|
const updateUserHashedOneTimePasscode = `-- name: UpdateUserHashedOneTimePasscode :exec
|
|
UPDATE
|
|
users
|
|
SET
|
|
hashed_one_time_passcode = $2,
|
|
one_time_passcode_expires_at = $3
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateUserHashedOneTimePasscodeParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
HashedOneTimePasscode []byte `db:"hashed_one_time_passcode" json:"hashed_one_time_passcode"`
|
|
OneTimePasscodeExpiresAt sql.NullTime `db:"one_time_passcode_expires_at" json:"one_time_passcode_expires_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserHashedOneTimePasscode(ctx context.Context, arg UpdateUserHashedOneTimePasscodeParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateUserHashedOneTimePasscode, arg.ID, arg.HashedOneTimePasscode, arg.OneTimePasscodeExpiresAt)
|
|
return err
|
|
}
|
|
|
|
const updateUserHashedPassword = `-- name: UpdateUserHashedPassword :exec
|
|
UPDATE
|
|
users
|
|
SET
|
|
hashed_password = $2,
|
|
hashed_one_time_passcode = NULL,
|
|
one_time_passcode_expires_at = NULL
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateUserHashedPasswordParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
HashedPassword []byte `db:"hashed_password" json:"hashed_password"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserHashedPassword(ctx context.Context, arg UpdateUserHashedPasswordParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateUserHashedPassword, arg.ID, arg.HashedPassword)
|
|
return err
|
|
}
|
|
|
|
const updateUserLastSeenAt = `-- name: UpdateUserLastSeenAt :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
last_seen_at = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
id = $1 RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at, is_system
|
|
`
|
|
|
|
type UpdateUserLastSeenAtParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
LastSeenAt time.Time `db:"last_seen_at" json:"last_seen_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserLastSeenAt(ctx context.Context, arg UpdateUserLastSeenAtParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserLastSeenAt, arg.ID, arg.LastSeenAt, arg.UpdatedAt)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.HashedOneTimePasscode,
|
|
&i.OneTimePasscodeExpiresAt,
|
|
&i.IsSystem,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserLoginType = `-- name: UpdateUserLoginType :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
login_type = $1,
|
|
hashed_password = CASE WHEN $1 = 'password' :: login_type THEN
|
|
users.hashed_password
|
|
ELSE
|
|
-- If the login type is not password, then the password should be
|
|
-- cleared.
|
|
'':: bytea
|
|
END
|
|
WHERE
|
|
id = $2
|
|
AND NOT is_system
|
|
RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at, is_system
|
|
`
|
|
|
|
type UpdateUserLoginTypeParams struct {
|
|
NewLoginType LoginType `db:"new_login_type" json:"new_login_type"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserLoginType(ctx context.Context, arg UpdateUserLoginTypeParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserLoginType, arg.NewLoginType, arg.UserID)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.HashedOneTimePasscode,
|
|
&i.OneTimePasscodeExpiresAt,
|
|
&i.IsSystem,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserProfile = `-- name: UpdateUserProfile :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
email = $2,
|
|
username = $3,
|
|
avatar_url = $4,
|
|
updated_at = $5,
|
|
name = $6
|
|
WHERE
|
|
id = $1
|
|
RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at, is_system
|
|
`
|
|
|
|
type UpdateUserProfileParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Email string `db:"email" json:"email"`
|
|
Username string `db:"username" json:"username"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserProfile(ctx context.Context, arg UpdateUserProfileParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserProfile,
|
|
arg.ID,
|
|
arg.Email,
|
|
arg.Username,
|
|
arg.AvatarURL,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.HashedOneTimePasscode,
|
|
&i.OneTimePasscodeExpiresAt,
|
|
&i.IsSystem,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserQuietHoursSchedule = `-- name: UpdateUserQuietHoursSchedule :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
quiet_hours_schedule = $2
|
|
WHERE
|
|
id = $1
|
|
RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at, is_system
|
|
`
|
|
|
|
type UpdateUserQuietHoursScheduleParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
QuietHoursSchedule string `db:"quiet_hours_schedule" json:"quiet_hours_schedule"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserQuietHoursSchedule(ctx context.Context, arg UpdateUserQuietHoursScheduleParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserQuietHoursSchedule, arg.ID, arg.QuietHoursSchedule)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.HashedOneTimePasscode,
|
|
&i.OneTimePasscodeExpiresAt,
|
|
&i.IsSystem,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserRoles = `-- name: UpdateUserRoles :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
-- Remove all duplicates from the roles.
|
|
rbac_roles = ARRAY(SELECT DISTINCT UNNEST($1 :: text[]))
|
|
WHERE
|
|
id = $2
|
|
RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at, is_system
|
|
`
|
|
|
|
type UpdateUserRolesParams struct {
|
|
GrantedRoles []string `db:"granted_roles" json:"granted_roles"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserRoles(ctx context.Context, arg UpdateUserRolesParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserRoles, pq.Array(arg.GrantedRoles), arg.ID)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.HashedOneTimePasscode,
|
|
&i.OneTimePasscodeExpiresAt,
|
|
&i.IsSystem,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserStatus = `-- name: UpdateUserStatus :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
status = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
id = $1 RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, name, github_com_user_id, hashed_one_time_passcode, one_time_passcode_expires_at, is_system
|
|
`
|
|
|
|
type UpdateUserStatusParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Status UserStatus `db:"status" json:"status"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserStatus(ctx context.Context, arg UpdateUserStatusParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserStatus, arg.ID, arg.Status, arg.UpdatedAt)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.HashedOneTimePasscode,
|
|
&i.OneTimePasscodeExpiresAt,
|
|
&i.IsSystem,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserTerminalFont = `-- name: UpdateUserTerminalFont :one
|
|
INSERT INTO
|
|
user_configs (user_id, key, value)
|
|
VALUES
|
|
($1, 'terminal_font', $2)
|
|
ON CONFLICT
|
|
ON CONSTRAINT user_configs_pkey
|
|
DO UPDATE
|
|
SET
|
|
value = $2
|
|
WHERE user_configs.user_id = $1
|
|
AND user_configs.key = 'terminal_font'
|
|
RETURNING user_id, key, value
|
|
`
|
|
|
|
type UpdateUserTerminalFontParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
TerminalFont string `db:"terminal_font" json:"terminal_font"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserTerminalFont(ctx context.Context, arg UpdateUserTerminalFontParams) (UserConfig, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserTerminalFont, arg.UserID, arg.TerminalFont)
|
|
var i UserConfig
|
|
err := row.Scan(&i.UserID, &i.Key, &i.Value)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserThemePreference = `-- name: UpdateUserThemePreference :one
|
|
INSERT INTO
|
|
user_configs (user_id, key, value)
|
|
VALUES
|
|
($1, 'theme_preference', $2)
|
|
ON CONFLICT
|
|
ON CONSTRAINT user_configs_pkey
|
|
DO UPDATE
|
|
SET
|
|
value = $2
|
|
WHERE user_configs.user_id = $1
|
|
AND user_configs.key = 'theme_preference'
|
|
RETURNING user_id, key, value
|
|
`
|
|
|
|
type UpdateUserThemePreferenceParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
ThemePreference string `db:"theme_preference" json:"theme_preference"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserThemePreference(ctx context.Context, arg UpdateUserThemePreferenceParams) (UserConfig, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserThemePreference, arg.UserID, arg.ThemePreference)
|
|
var i UserConfig
|
|
err := row.Scan(&i.UserID, &i.Key, &i.Value)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceAgentDevcontainersByAgentID = `-- name: GetWorkspaceAgentDevcontainersByAgentID :many
|
|
SELECT
|
|
id, workspace_agent_id, created_at, workspace_folder, config_path, name
|
|
FROM
|
|
workspace_agent_devcontainers
|
|
WHERE
|
|
workspace_agent_id = $1
|
|
ORDER BY
|
|
created_at, id
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentDevcontainersByAgentID(ctx context.Context, workspaceAgentID uuid.UUID) ([]WorkspaceAgentDevcontainer, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentDevcontainersByAgentID, workspaceAgentID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentDevcontainer
|
|
for rows.Next() {
|
|
var i WorkspaceAgentDevcontainer
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.WorkspaceAgentID,
|
|
&i.CreatedAt,
|
|
&i.WorkspaceFolder,
|
|
&i.ConfigPath,
|
|
&i.Name,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAgentDevcontainers = `-- name: InsertWorkspaceAgentDevcontainers :many
|
|
INSERT INTO
|
|
workspace_agent_devcontainers (workspace_agent_id, created_at, id, name, workspace_folder, config_path)
|
|
SELECT
|
|
$1::uuid AS workspace_agent_id,
|
|
$2::timestamptz AS created_at,
|
|
unnest($3::uuid[]) AS id,
|
|
unnest($4::text[]) AS name,
|
|
unnest($5::text[]) AS workspace_folder,
|
|
unnest($6::text[]) AS config_path
|
|
RETURNING workspace_agent_devcontainers.id, workspace_agent_devcontainers.workspace_agent_id, workspace_agent_devcontainers.created_at, workspace_agent_devcontainers.workspace_folder, workspace_agent_devcontainers.config_path, workspace_agent_devcontainers.name
|
|
`
|
|
|
|
type InsertWorkspaceAgentDevcontainersParams struct {
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
ID []uuid.UUID `db:"id" json:"id"`
|
|
Name []string `db:"name" json:"name"`
|
|
WorkspaceFolder []string `db:"workspace_folder" json:"workspace_folder"`
|
|
ConfigPath []string `db:"config_path" json:"config_path"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentDevcontainers(ctx context.Context, arg InsertWorkspaceAgentDevcontainersParams) ([]WorkspaceAgentDevcontainer, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertWorkspaceAgentDevcontainers,
|
|
arg.WorkspaceAgentID,
|
|
arg.CreatedAt,
|
|
pq.Array(arg.ID),
|
|
pq.Array(arg.Name),
|
|
pq.Array(arg.WorkspaceFolder),
|
|
pq.Array(arg.ConfigPath),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentDevcontainer
|
|
for rows.Next() {
|
|
var i WorkspaceAgentDevcontainer
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.WorkspaceAgentID,
|
|
&i.CreatedAt,
|
|
&i.WorkspaceFolder,
|
|
&i.ConfigPath,
|
|
&i.Name,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const deleteWorkspaceAgentPortShare = `-- name: DeleteWorkspaceAgentPortShare :exec
|
|
DELETE FROM
|
|
workspace_agent_port_share
|
|
WHERE
|
|
workspace_id = $1
|
|
AND agent_name = $2
|
|
AND port = $3
|
|
`
|
|
|
|
type DeleteWorkspaceAgentPortShareParams struct {
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
AgentName string `db:"agent_name" json:"agent_name"`
|
|
Port int32 `db:"port" json:"port"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteWorkspaceAgentPortShare(ctx context.Context, arg DeleteWorkspaceAgentPortShareParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteWorkspaceAgentPortShare, arg.WorkspaceID, arg.AgentName, arg.Port)
|
|
return err
|
|
}
|
|
|
|
const deleteWorkspaceAgentPortSharesByTemplate = `-- name: DeleteWorkspaceAgentPortSharesByTemplate :exec
|
|
DELETE FROM
|
|
workspace_agent_port_share
|
|
WHERE
|
|
workspace_id IN (
|
|
SELECT
|
|
id
|
|
FROM
|
|
workspaces
|
|
WHERE
|
|
template_id = $1
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteWorkspaceAgentPortSharesByTemplate(ctx context.Context, templateID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteWorkspaceAgentPortSharesByTemplate, templateID)
|
|
return err
|
|
}
|
|
|
|
const getWorkspaceAgentPortShare = `-- name: GetWorkspaceAgentPortShare :one
|
|
SELECT
|
|
workspace_id, agent_name, port, share_level, protocol
|
|
FROM
|
|
workspace_agent_port_share
|
|
WHERE
|
|
workspace_id = $1
|
|
AND agent_name = $2
|
|
AND port = $3
|
|
`
|
|
|
|
type GetWorkspaceAgentPortShareParams struct {
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
AgentName string `db:"agent_name" json:"agent_name"`
|
|
Port int32 `db:"port" json:"port"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentPortShare(ctx context.Context, arg GetWorkspaceAgentPortShareParams) (WorkspaceAgentPortShare, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAgentPortShare, arg.WorkspaceID, arg.AgentName, arg.Port)
|
|
var i WorkspaceAgentPortShare
|
|
err := row.Scan(
|
|
&i.WorkspaceID,
|
|
&i.AgentName,
|
|
&i.Port,
|
|
&i.ShareLevel,
|
|
&i.Protocol,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const listWorkspaceAgentPortShares = `-- name: ListWorkspaceAgentPortShares :many
|
|
SELECT
|
|
workspace_id, agent_name, port, share_level, protocol
|
|
FROM
|
|
workspace_agent_port_share
|
|
WHERE
|
|
workspace_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) ListWorkspaceAgentPortShares(ctx context.Context, workspaceID uuid.UUID) ([]WorkspaceAgentPortShare, error) {
|
|
rows, err := q.db.QueryContext(ctx, listWorkspaceAgentPortShares, workspaceID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentPortShare
|
|
for rows.Next() {
|
|
var i WorkspaceAgentPortShare
|
|
if err := rows.Scan(
|
|
&i.WorkspaceID,
|
|
&i.AgentName,
|
|
&i.Port,
|
|
&i.ShareLevel,
|
|
&i.Protocol,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const reduceWorkspaceAgentShareLevelToAuthenticatedByTemplate = `-- name: ReduceWorkspaceAgentShareLevelToAuthenticatedByTemplate :exec
|
|
UPDATE
|
|
workspace_agent_port_share
|
|
SET
|
|
share_level = 'authenticated'
|
|
WHERE
|
|
share_level = 'public'
|
|
AND workspace_id IN (
|
|
SELECT
|
|
id
|
|
FROM
|
|
workspaces
|
|
WHERE
|
|
template_id = $1
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) ReduceWorkspaceAgentShareLevelToAuthenticatedByTemplate(ctx context.Context, templateID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, reduceWorkspaceAgentShareLevelToAuthenticatedByTemplate, templateID)
|
|
return err
|
|
}
|
|
|
|
const upsertWorkspaceAgentPortShare = `-- name: UpsertWorkspaceAgentPortShare :one
|
|
INSERT INTO
|
|
workspace_agent_port_share (
|
|
workspace_id,
|
|
agent_name,
|
|
port,
|
|
share_level,
|
|
protocol
|
|
)
|
|
VALUES (
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5
|
|
)
|
|
ON CONFLICT (
|
|
workspace_id,
|
|
agent_name,
|
|
port
|
|
)
|
|
DO UPDATE SET
|
|
share_level = $4,
|
|
protocol = $5
|
|
RETURNING workspace_id, agent_name, port, share_level, protocol
|
|
`
|
|
|
|
type UpsertWorkspaceAgentPortShareParams struct {
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
AgentName string `db:"agent_name" json:"agent_name"`
|
|
Port int32 `db:"port" json:"port"`
|
|
ShareLevel AppSharingLevel `db:"share_level" json:"share_level"`
|
|
Protocol PortShareProtocol `db:"protocol" json:"protocol"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertWorkspaceAgentPortShare(ctx context.Context, arg UpsertWorkspaceAgentPortShareParams) (WorkspaceAgentPortShare, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertWorkspaceAgentPortShare,
|
|
arg.WorkspaceID,
|
|
arg.AgentName,
|
|
arg.Port,
|
|
arg.ShareLevel,
|
|
arg.Protocol,
|
|
)
|
|
var i WorkspaceAgentPortShare
|
|
err := row.Scan(
|
|
&i.WorkspaceID,
|
|
&i.AgentName,
|
|
&i.Port,
|
|
&i.ShareLevel,
|
|
&i.Protocol,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const fetchMemoryResourceMonitorsByAgentID = `-- name: FetchMemoryResourceMonitorsByAgentID :one
|
|
SELECT
|
|
agent_id, enabled, threshold, created_at, updated_at, state, debounced_until
|
|
FROM
|
|
workspace_agent_memory_resource_monitors
|
|
WHERE
|
|
agent_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) FetchMemoryResourceMonitorsByAgentID(ctx context.Context, agentID uuid.UUID) (WorkspaceAgentMemoryResourceMonitor, error) {
|
|
row := q.db.QueryRowContext(ctx, fetchMemoryResourceMonitorsByAgentID, agentID)
|
|
var i WorkspaceAgentMemoryResourceMonitor
|
|
err := row.Scan(
|
|
&i.AgentID,
|
|
&i.Enabled,
|
|
&i.Threshold,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.State,
|
|
&i.DebouncedUntil,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const fetchMemoryResourceMonitorsUpdatedAfter = `-- name: FetchMemoryResourceMonitorsUpdatedAfter :many
|
|
SELECT
|
|
agent_id, enabled, threshold, created_at, updated_at, state, debounced_until
|
|
FROM
|
|
workspace_agent_memory_resource_monitors
|
|
WHERE
|
|
updated_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) FetchMemoryResourceMonitorsUpdatedAfter(ctx context.Context, updatedAt time.Time) ([]WorkspaceAgentMemoryResourceMonitor, error) {
|
|
rows, err := q.db.QueryContext(ctx, fetchMemoryResourceMonitorsUpdatedAfter, updatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentMemoryResourceMonitor
|
|
for rows.Next() {
|
|
var i WorkspaceAgentMemoryResourceMonitor
|
|
if err := rows.Scan(
|
|
&i.AgentID,
|
|
&i.Enabled,
|
|
&i.Threshold,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.State,
|
|
&i.DebouncedUntil,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const fetchVolumesResourceMonitorsByAgentID = `-- name: FetchVolumesResourceMonitorsByAgentID :many
|
|
SELECT
|
|
agent_id, enabled, threshold, path, created_at, updated_at, state, debounced_until
|
|
FROM
|
|
workspace_agent_volume_resource_monitors
|
|
WHERE
|
|
agent_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) FetchVolumesResourceMonitorsByAgentID(ctx context.Context, agentID uuid.UUID) ([]WorkspaceAgentVolumeResourceMonitor, error) {
|
|
rows, err := q.db.QueryContext(ctx, fetchVolumesResourceMonitorsByAgentID, agentID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentVolumeResourceMonitor
|
|
for rows.Next() {
|
|
var i WorkspaceAgentVolumeResourceMonitor
|
|
if err := rows.Scan(
|
|
&i.AgentID,
|
|
&i.Enabled,
|
|
&i.Threshold,
|
|
&i.Path,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.State,
|
|
&i.DebouncedUntil,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const fetchVolumesResourceMonitorsUpdatedAfter = `-- name: FetchVolumesResourceMonitorsUpdatedAfter :many
|
|
SELECT
|
|
agent_id, enabled, threshold, path, created_at, updated_at, state, debounced_until
|
|
FROM
|
|
workspace_agent_volume_resource_monitors
|
|
WHERE
|
|
updated_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) FetchVolumesResourceMonitorsUpdatedAfter(ctx context.Context, updatedAt time.Time) ([]WorkspaceAgentVolumeResourceMonitor, error) {
|
|
rows, err := q.db.QueryContext(ctx, fetchVolumesResourceMonitorsUpdatedAfter, updatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentVolumeResourceMonitor
|
|
for rows.Next() {
|
|
var i WorkspaceAgentVolumeResourceMonitor
|
|
if err := rows.Scan(
|
|
&i.AgentID,
|
|
&i.Enabled,
|
|
&i.Threshold,
|
|
&i.Path,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.State,
|
|
&i.DebouncedUntil,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertMemoryResourceMonitor = `-- name: InsertMemoryResourceMonitor :one
|
|
INSERT INTO
|
|
workspace_agent_memory_resource_monitors (
|
|
agent_id,
|
|
enabled,
|
|
state,
|
|
threshold,
|
|
created_at,
|
|
updated_at,
|
|
debounced_until
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7) RETURNING agent_id, enabled, threshold, created_at, updated_at, state, debounced_until
|
|
`
|
|
|
|
type InsertMemoryResourceMonitorParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
Enabled bool `db:"enabled" json:"enabled"`
|
|
State WorkspaceAgentMonitorState `db:"state" json:"state"`
|
|
Threshold int32 `db:"threshold" json:"threshold"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
DebouncedUntil time.Time `db:"debounced_until" json:"debounced_until"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertMemoryResourceMonitor(ctx context.Context, arg InsertMemoryResourceMonitorParams) (WorkspaceAgentMemoryResourceMonitor, error) {
|
|
row := q.db.QueryRowContext(ctx, insertMemoryResourceMonitor,
|
|
arg.AgentID,
|
|
arg.Enabled,
|
|
arg.State,
|
|
arg.Threshold,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.DebouncedUntil,
|
|
)
|
|
var i WorkspaceAgentMemoryResourceMonitor
|
|
err := row.Scan(
|
|
&i.AgentID,
|
|
&i.Enabled,
|
|
&i.Threshold,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.State,
|
|
&i.DebouncedUntil,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertVolumeResourceMonitor = `-- name: InsertVolumeResourceMonitor :one
|
|
INSERT INTO
|
|
workspace_agent_volume_resource_monitors (
|
|
agent_id,
|
|
path,
|
|
enabled,
|
|
state,
|
|
threshold,
|
|
created_at,
|
|
updated_at,
|
|
debounced_until
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8) RETURNING agent_id, enabled, threshold, path, created_at, updated_at, state, debounced_until
|
|
`
|
|
|
|
type InsertVolumeResourceMonitorParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
Path string `db:"path" json:"path"`
|
|
Enabled bool `db:"enabled" json:"enabled"`
|
|
State WorkspaceAgentMonitorState `db:"state" json:"state"`
|
|
Threshold int32 `db:"threshold" json:"threshold"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
DebouncedUntil time.Time `db:"debounced_until" json:"debounced_until"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertVolumeResourceMonitor(ctx context.Context, arg InsertVolumeResourceMonitorParams) (WorkspaceAgentVolumeResourceMonitor, error) {
|
|
row := q.db.QueryRowContext(ctx, insertVolumeResourceMonitor,
|
|
arg.AgentID,
|
|
arg.Path,
|
|
arg.Enabled,
|
|
arg.State,
|
|
arg.Threshold,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.DebouncedUntil,
|
|
)
|
|
var i WorkspaceAgentVolumeResourceMonitor
|
|
err := row.Scan(
|
|
&i.AgentID,
|
|
&i.Enabled,
|
|
&i.Threshold,
|
|
&i.Path,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.State,
|
|
&i.DebouncedUntil,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateMemoryResourceMonitor = `-- name: UpdateMemoryResourceMonitor :exec
|
|
UPDATE workspace_agent_memory_resource_monitors
|
|
SET
|
|
updated_at = $2,
|
|
state = $3,
|
|
debounced_until = $4
|
|
WHERE
|
|
agent_id = $1
|
|
`
|
|
|
|
type UpdateMemoryResourceMonitorParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
State WorkspaceAgentMonitorState `db:"state" json:"state"`
|
|
DebouncedUntil time.Time `db:"debounced_until" json:"debounced_until"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateMemoryResourceMonitor(ctx context.Context, arg UpdateMemoryResourceMonitorParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateMemoryResourceMonitor,
|
|
arg.AgentID,
|
|
arg.UpdatedAt,
|
|
arg.State,
|
|
arg.DebouncedUntil,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateVolumeResourceMonitor = `-- name: UpdateVolumeResourceMonitor :exec
|
|
UPDATE workspace_agent_volume_resource_monitors
|
|
SET
|
|
updated_at = $3,
|
|
state = $4,
|
|
debounced_until = $5
|
|
WHERE
|
|
agent_id = $1 AND path = $2
|
|
`
|
|
|
|
type UpdateVolumeResourceMonitorParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
Path string `db:"path" json:"path"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
State WorkspaceAgentMonitorState `db:"state" json:"state"`
|
|
DebouncedUntil time.Time `db:"debounced_until" json:"debounced_until"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateVolumeResourceMonitor(ctx context.Context, arg UpdateVolumeResourceMonitorParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateVolumeResourceMonitor,
|
|
arg.AgentID,
|
|
arg.Path,
|
|
arg.UpdatedAt,
|
|
arg.State,
|
|
arg.DebouncedUntil,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const deleteOldWorkspaceAgentLogs = `-- name: DeleteOldWorkspaceAgentLogs :exec
|
|
WITH
|
|
latest_builds AS (
|
|
SELECT
|
|
workspace_id, max(build_number) AS max_build_number
|
|
FROM
|
|
workspace_builds
|
|
GROUP BY
|
|
workspace_id
|
|
),
|
|
old_agents AS (
|
|
SELECT
|
|
wa.id
|
|
FROM
|
|
workspace_agents AS wa
|
|
JOIN
|
|
workspace_resources AS wr
|
|
ON
|
|
wa.resource_id = wr.id
|
|
JOIN
|
|
workspace_builds AS wb
|
|
ON
|
|
wb.job_id = wr.job_id
|
|
LEFT JOIN
|
|
latest_builds
|
|
ON
|
|
latest_builds.workspace_id = wb.workspace_id
|
|
AND
|
|
latest_builds.max_build_number = wb.build_number
|
|
WHERE
|
|
-- Filter out the latest builds for each workspace.
|
|
latest_builds.workspace_id IS NULL
|
|
AND CASE
|
|
-- If the last time the agent connected was before @threshold
|
|
WHEN wa.last_connected_at IS NOT NULL THEN
|
|
wa.last_connected_at < $1 :: timestamptz
|
|
-- The agent never connected, and was created before @threshold
|
|
ELSE wa.created_at < $1 :: timestamptz
|
|
END
|
|
)
|
|
DELETE FROM workspace_agent_logs WHERE agent_id IN (SELECT id FROM old_agents)
|
|
`
|
|
|
|
// If an agent hasn't connected in the last 7 days, we purge it's logs.
|
|
// Exception: if the logs are related to the latest build, we keep those around.
|
|
// Logs can take up a lot of space, so it's important we clean up frequently.
|
|
func (q *sqlQuerier) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOldWorkspaceAgentLogs, threshold)
|
|
return err
|
|
}
|
|
|
|
const deleteWorkspaceSubAgentByID = `-- name: DeleteWorkspaceSubAgentByID :exec
|
|
UPDATE
|
|
workspace_agents
|
|
SET
|
|
deleted = TRUE
|
|
WHERE
|
|
id = $1
|
|
AND parent_id IS NOT NULL
|
|
AND deleted = FALSE
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteWorkspaceSubAgentByID(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteWorkspaceSubAgentByID, id)
|
|
return err
|
|
}
|
|
|
|
const getWorkspaceAgentAndLatestBuildByAuthToken = `-- name: GetWorkspaceAgentAndLatestBuildByAuthToken :one
|
|
SELECT
|
|
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at,
|
|
workspace_agents.id, workspace_agents.created_at, workspace_agents.updated_at, workspace_agents.name, workspace_agents.first_connected_at, workspace_agents.last_connected_at, workspace_agents.disconnected_at, workspace_agents.resource_id, workspace_agents.auth_token, workspace_agents.auth_instance_id, workspace_agents.architecture, workspace_agents.environment_variables, workspace_agents.operating_system, workspace_agents.instance_metadata, workspace_agents.resource_metadata, workspace_agents.directory, workspace_agents.version, workspace_agents.last_connected_replica_id, workspace_agents.connection_timeout_seconds, workspace_agents.troubleshooting_url, workspace_agents.motd_file, workspace_agents.lifecycle_state, workspace_agents.expanded_directory, workspace_agents.logs_length, workspace_agents.logs_overflowed, workspace_agents.started_at, workspace_agents.ready_at, workspace_agents.subsystems, workspace_agents.display_apps, workspace_agents.api_version, workspace_agents.display_order, workspace_agents.parent_id, workspace_agents.api_key_scope, workspace_agents.deleted,
|
|
workspace_build_with_user.id, workspace_build_with_user.created_at, workspace_build_with_user.updated_at, workspace_build_with_user.workspace_id, workspace_build_with_user.template_version_id, workspace_build_with_user.build_number, workspace_build_with_user.transition, workspace_build_with_user.initiator_id, workspace_build_with_user.provisioner_state, workspace_build_with_user.job_id, workspace_build_with_user.deadline, workspace_build_with_user.reason, workspace_build_with_user.daily_cost, workspace_build_with_user.max_deadline, workspace_build_with_user.template_version_preset_id, workspace_build_with_user.has_ai_task, workspace_build_with_user.ai_tasks_sidebar_app_id, workspace_build_with_user.initiator_by_avatar_url, workspace_build_with_user.initiator_by_username, workspace_build_with_user.initiator_by_name
|
|
FROM
|
|
workspace_agents
|
|
JOIN
|
|
workspace_resources
|
|
ON
|
|
workspace_agents.resource_id = workspace_resources.id
|
|
JOIN
|
|
workspace_build_with_user
|
|
ON
|
|
workspace_resources.job_id = workspace_build_with_user.job_id
|
|
JOIN
|
|
workspaces
|
|
ON
|
|
workspace_build_with_user.workspace_id = workspaces.id
|
|
WHERE
|
|
-- This should only match 1 agent, so 1 returned row or 0.
|
|
workspace_agents.auth_token = $1::uuid
|
|
AND workspaces.deleted = FALSE
|
|
-- Filter out deleted sub agents.
|
|
AND workspace_agents.deleted = FALSE
|
|
-- Filter out builds that are not the latest.
|
|
AND workspace_build_with_user.build_number = (
|
|
-- Select from workspace_builds as it's one less join compared
|
|
-- to workspace_build_with_user.
|
|
SELECT
|
|
MAX(build_number)
|
|
FROM
|
|
workspace_builds
|
|
WHERE
|
|
workspace_id = workspace_build_with_user.workspace_id
|
|
)
|
|
`
|
|
|
|
type GetWorkspaceAgentAndLatestBuildByAuthTokenRow struct {
|
|
WorkspaceTable WorkspaceTable `db:"workspace_table" json:"workspace_table"`
|
|
WorkspaceAgent WorkspaceAgent `db:"workspace_agent" json:"workspace_agent"`
|
|
WorkspaceBuild WorkspaceBuild `db:"workspace_build" json:"workspace_build"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentAndLatestBuildByAuthToken(ctx context.Context, authToken uuid.UUID) (GetWorkspaceAgentAndLatestBuildByAuthTokenRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAgentAndLatestBuildByAuthToken, authToken)
|
|
var i GetWorkspaceAgentAndLatestBuildByAuthTokenRow
|
|
err := row.Scan(
|
|
&i.WorkspaceTable.ID,
|
|
&i.WorkspaceTable.CreatedAt,
|
|
&i.WorkspaceTable.UpdatedAt,
|
|
&i.WorkspaceTable.OwnerID,
|
|
&i.WorkspaceTable.OrganizationID,
|
|
&i.WorkspaceTable.TemplateID,
|
|
&i.WorkspaceTable.Deleted,
|
|
&i.WorkspaceTable.Name,
|
|
&i.WorkspaceTable.AutostartSchedule,
|
|
&i.WorkspaceTable.Ttl,
|
|
&i.WorkspaceTable.LastUsedAt,
|
|
&i.WorkspaceTable.DormantAt,
|
|
&i.WorkspaceTable.DeletingAt,
|
|
&i.WorkspaceTable.AutomaticUpdates,
|
|
&i.WorkspaceTable.Favorite,
|
|
&i.WorkspaceTable.NextStartAt,
|
|
&i.WorkspaceAgent.ID,
|
|
&i.WorkspaceAgent.CreatedAt,
|
|
&i.WorkspaceAgent.UpdatedAt,
|
|
&i.WorkspaceAgent.Name,
|
|
&i.WorkspaceAgent.FirstConnectedAt,
|
|
&i.WorkspaceAgent.LastConnectedAt,
|
|
&i.WorkspaceAgent.DisconnectedAt,
|
|
&i.WorkspaceAgent.ResourceID,
|
|
&i.WorkspaceAgent.AuthToken,
|
|
&i.WorkspaceAgent.AuthInstanceID,
|
|
&i.WorkspaceAgent.Architecture,
|
|
&i.WorkspaceAgent.EnvironmentVariables,
|
|
&i.WorkspaceAgent.OperatingSystem,
|
|
&i.WorkspaceAgent.InstanceMetadata,
|
|
&i.WorkspaceAgent.ResourceMetadata,
|
|
&i.WorkspaceAgent.Directory,
|
|
&i.WorkspaceAgent.Version,
|
|
&i.WorkspaceAgent.LastConnectedReplicaID,
|
|
&i.WorkspaceAgent.ConnectionTimeoutSeconds,
|
|
&i.WorkspaceAgent.TroubleshootingURL,
|
|
&i.WorkspaceAgent.MOTDFile,
|
|
&i.WorkspaceAgent.LifecycleState,
|
|
&i.WorkspaceAgent.ExpandedDirectory,
|
|
&i.WorkspaceAgent.LogsLength,
|
|
&i.WorkspaceAgent.LogsOverflowed,
|
|
&i.WorkspaceAgent.StartedAt,
|
|
&i.WorkspaceAgent.ReadyAt,
|
|
pq.Array(&i.WorkspaceAgent.Subsystems),
|
|
pq.Array(&i.WorkspaceAgent.DisplayApps),
|
|
&i.WorkspaceAgent.APIVersion,
|
|
&i.WorkspaceAgent.DisplayOrder,
|
|
&i.WorkspaceAgent.ParentID,
|
|
&i.WorkspaceAgent.APIKeyScope,
|
|
&i.WorkspaceAgent.Deleted,
|
|
&i.WorkspaceBuild.ID,
|
|
&i.WorkspaceBuild.CreatedAt,
|
|
&i.WorkspaceBuild.UpdatedAt,
|
|
&i.WorkspaceBuild.WorkspaceID,
|
|
&i.WorkspaceBuild.TemplateVersionID,
|
|
&i.WorkspaceBuild.BuildNumber,
|
|
&i.WorkspaceBuild.Transition,
|
|
&i.WorkspaceBuild.InitiatorID,
|
|
&i.WorkspaceBuild.ProvisionerState,
|
|
&i.WorkspaceBuild.JobID,
|
|
&i.WorkspaceBuild.Deadline,
|
|
&i.WorkspaceBuild.Reason,
|
|
&i.WorkspaceBuild.DailyCost,
|
|
&i.WorkspaceBuild.MaxDeadline,
|
|
&i.WorkspaceBuild.TemplateVersionPresetID,
|
|
&i.WorkspaceBuild.HasAITask,
|
|
&i.WorkspaceBuild.AITasksSidebarAppID,
|
|
&i.WorkspaceBuild.InitiatorByAvatarUrl,
|
|
&i.WorkspaceBuild.InitiatorByUsername,
|
|
&i.WorkspaceBuild.InitiatorByName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceAgentByID = `-- name: GetWorkspaceAgentByID :one
|
|
SELECT
|
|
id, created_at, updated_at, name, first_connected_at, last_connected_at, disconnected_at, resource_id, auth_token, auth_instance_id, architecture, environment_variables, operating_system, instance_metadata, resource_metadata, directory, version, last_connected_replica_id, connection_timeout_seconds, troubleshooting_url, motd_file, lifecycle_state, expanded_directory, logs_length, logs_overflowed, started_at, ready_at, subsystems, display_apps, api_version, display_order, parent_id, api_key_scope, deleted
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
id = $1
|
|
-- Filter out deleted sub agents.
|
|
AND deleted = FALSE
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentByID(ctx context.Context, id uuid.UUID) (WorkspaceAgent, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAgentByID, id)
|
|
var i WorkspaceAgent
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
&i.ParentID,
|
|
&i.APIKeyScope,
|
|
&i.Deleted,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceAgentByInstanceID = `-- name: GetWorkspaceAgentByInstanceID :one
|
|
SELECT
|
|
id, created_at, updated_at, name, first_connected_at, last_connected_at, disconnected_at, resource_id, auth_token, auth_instance_id, architecture, environment_variables, operating_system, instance_metadata, resource_metadata, directory, version, last_connected_replica_id, connection_timeout_seconds, troubleshooting_url, motd_file, lifecycle_state, expanded_directory, logs_length, logs_overflowed, started_at, ready_at, subsystems, display_apps, api_version, display_order, parent_id, api_key_scope, deleted
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
auth_instance_id = $1 :: TEXT
|
|
-- Filter out deleted sub agents.
|
|
AND deleted = FALSE
|
|
ORDER BY
|
|
created_at DESC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentByInstanceID(ctx context.Context, authInstanceID string) (WorkspaceAgent, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAgentByInstanceID, authInstanceID)
|
|
var i WorkspaceAgent
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
&i.ParentID,
|
|
&i.APIKeyScope,
|
|
&i.Deleted,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceAgentLifecycleStateByID = `-- name: GetWorkspaceAgentLifecycleStateByID :one
|
|
SELECT
|
|
lifecycle_state,
|
|
started_at,
|
|
ready_at
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type GetWorkspaceAgentLifecycleStateByIDRow struct {
|
|
LifecycleState WorkspaceAgentLifecycleState `db:"lifecycle_state" json:"lifecycle_state"`
|
|
StartedAt sql.NullTime `db:"started_at" json:"started_at"`
|
|
ReadyAt sql.NullTime `db:"ready_at" json:"ready_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentLifecycleStateByID(ctx context.Context, id uuid.UUID) (GetWorkspaceAgentLifecycleStateByIDRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAgentLifecycleStateByID, id)
|
|
var i GetWorkspaceAgentLifecycleStateByIDRow
|
|
err := row.Scan(&i.LifecycleState, &i.StartedAt, &i.ReadyAt)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceAgentLogSourcesByAgentIDs = `-- name: GetWorkspaceAgentLogSourcesByAgentIDs :many
|
|
SELECT workspace_agent_id, id, created_at, display_name, icon FROM workspace_agent_log_sources WHERE workspace_agent_id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentLogSourcesByAgentIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceAgentLogSource, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentLogSourcesByAgentIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentLogSource
|
|
for rows.Next() {
|
|
var i WorkspaceAgentLogSource
|
|
if err := rows.Scan(
|
|
&i.WorkspaceAgentID,
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentLogsAfter = `-- name: GetWorkspaceAgentLogsAfter :many
|
|
SELECT
|
|
agent_id, created_at, output, id, level, log_source_id
|
|
FROM
|
|
workspace_agent_logs
|
|
WHERE
|
|
agent_id = $1
|
|
AND (
|
|
id > $2
|
|
) ORDER BY id ASC
|
|
`
|
|
|
|
type GetWorkspaceAgentLogsAfterParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
CreatedAfter int64 `db:"created_after" json:"created_after"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentLogsAfter(ctx context.Context, arg GetWorkspaceAgentLogsAfterParams) ([]WorkspaceAgentLog, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentLogsAfter, arg.AgentID, arg.CreatedAfter)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentLog
|
|
for rows.Next() {
|
|
var i WorkspaceAgentLog
|
|
if err := rows.Scan(
|
|
&i.AgentID,
|
|
&i.CreatedAt,
|
|
&i.Output,
|
|
&i.ID,
|
|
&i.Level,
|
|
&i.LogSourceID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentMetadata = `-- name: GetWorkspaceAgentMetadata :many
|
|
SELECT
|
|
workspace_agent_id, display_name, key, script, value, error, timeout, interval, collected_at, display_order
|
|
FROM
|
|
workspace_agent_metadata
|
|
WHERE
|
|
workspace_agent_id = $1
|
|
AND CASE WHEN COALESCE(array_length($2::text[], 1), 0) > 0 THEN key = ANY($2::text[]) ELSE TRUE END
|
|
`
|
|
|
|
type GetWorkspaceAgentMetadataParams struct {
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
Keys []string `db:"keys" json:"keys"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentMetadata(ctx context.Context, arg GetWorkspaceAgentMetadataParams) ([]WorkspaceAgentMetadatum, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentMetadata, arg.WorkspaceAgentID, pq.Array(arg.Keys))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentMetadatum
|
|
for rows.Next() {
|
|
var i WorkspaceAgentMetadatum
|
|
if err := rows.Scan(
|
|
&i.WorkspaceAgentID,
|
|
&i.DisplayName,
|
|
&i.Key,
|
|
&i.Script,
|
|
&i.Value,
|
|
&i.Error,
|
|
&i.Timeout,
|
|
&i.Interval,
|
|
&i.CollectedAt,
|
|
&i.DisplayOrder,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentScriptTimingsByBuildID = `-- name: GetWorkspaceAgentScriptTimingsByBuildID :many
|
|
SELECT
|
|
DISTINCT ON (workspace_agent_script_timings.script_id) workspace_agent_script_timings.script_id, workspace_agent_script_timings.started_at, workspace_agent_script_timings.ended_at, workspace_agent_script_timings.exit_code, workspace_agent_script_timings.stage, workspace_agent_script_timings.status,
|
|
workspace_agent_scripts.display_name,
|
|
workspace_agents.id as workspace_agent_id,
|
|
workspace_agents.name as workspace_agent_name
|
|
FROM workspace_agent_script_timings
|
|
INNER JOIN workspace_agent_scripts ON workspace_agent_scripts.id = workspace_agent_script_timings.script_id
|
|
INNER JOIN workspace_agents ON workspace_agents.id = workspace_agent_scripts.workspace_agent_id
|
|
INNER JOIN workspace_resources ON workspace_resources.id = workspace_agents.resource_id
|
|
INNER JOIN workspace_builds ON workspace_builds.job_id = workspace_resources.job_id
|
|
WHERE workspace_builds.id = $1
|
|
ORDER BY workspace_agent_script_timings.script_id, workspace_agent_script_timings.started_at
|
|
`
|
|
|
|
type GetWorkspaceAgentScriptTimingsByBuildIDRow struct {
|
|
ScriptID uuid.UUID `db:"script_id" json:"script_id"`
|
|
StartedAt time.Time `db:"started_at" json:"started_at"`
|
|
EndedAt time.Time `db:"ended_at" json:"ended_at"`
|
|
ExitCode int32 `db:"exit_code" json:"exit_code"`
|
|
Stage WorkspaceAgentScriptTimingStage `db:"stage" json:"stage"`
|
|
Status WorkspaceAgentScriptTimingStatus `db:"status" json:"status"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
WorkspaceAgentName string `db:"workspace_agent_name" json:"workspace_agent_name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentScriptTimingsByBuildID(ctx context.Context, id uuid.UUID) ([]GetWorkspaceAgentScriptTimingsByBuildIDRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentScriptTimingsByBuildID, id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspaceAgentScriptTimingsByBuildIDRow
|
|
for rows.Next() {
|
|
var i GetWorkspaceAgentScriptTimingsByBuildIDRow
|
|
if err := rows.Scan(
|
|
&i.ScriptID,
|
|
&i.StartedAt,
|
|
&i.EndedAt,
|
|
&i.ExitCode,
|
|
&i.Stage,
|
|
&i.Status,
|
|
&i.DisplayName,
|
|
&i.WorkspaceAgentID,
|
|
&i.WorkspaceAgentName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentsByParentID = `-- name: GetWorkspaceAgentsByParentID :many
|
|
SELECT
|
|
id, created_at, updated_at, name, first_connected_at, last_connected_at, disconnected_at, resource_id, auth_token, auth_instance_id, architecture, environment_variables, operating_system, instance_metadata, resource_metadata, directory, version, last_connected_replica_id, connection_timeout_seconds, troubleshooting_url, motd_file, lifecycle_state, expanded_directory, logs_length, logs_overflowed, started_at, ready_at, subsystems, display_apps, api_version, display_order, parent_id, api_key_scope, deleted
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
parent_id = $1::uuid
|
|
AND deleted = FALSE
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentsByParentID(ctx context.Context, parentID uuid.UUID) ([]WorkspaceAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentsByParentID, parentID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgent
|
|
for rows.Next() {
|
|
var i WorkspaceAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
&i.ParentID,
|
|
&i.APIKeyScope,
|
|
&i.Deleted,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentsByResourceIDs = `-- name: GetWorkspaceAgentsByResourceIDs :many
|
|
SELECT
|
|
id, created_at, updated_at, name, first_connected_at, last_connected_at, disconnected_at, resource_id, auth_token, auth_instance_id, architecture, environment_variables, operating_system, instance_metadata, resource_metadata, directory, version, last_connected_replica_id, connection_timeout_seconds, troubleshooting_url, motd_file, lifecycle_state, expanded_directory, logs_length, logs_overflowed, started_at, ready_at, subsystems, display_apps, api_version, display_order, parent_id, api_key_scope, deleted
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
resource_id = ANY($1 :: uuid [ ])
|
|
-- Filter out deleted sub agents.
|
|
AND deleted = FALSE
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentsByResourceIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentsByResourceIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgent
|
|
for rows.Next() {
|
|
var i WorkspaceAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
&i.ParentID,
|
|
&i.APIKeyScope,
|
|
&i.Deleted,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentsByWorkspaceAndBuildNumber = `-- name: GetWorkspaceAgentsByWorkspaceAndBuildNumber :many
|
|
SELECT
|
|
workspace_agents.id, workspace_agents.created_at, workspace_agents.updated_at, workspace_agents.name, workspace_agents.first_connected_at, workspace_agents.last_connected_at, workspace_agents.disconnected_at, workspace_agents.resource_id, workspace_agents.auth_token, workspace_agents.auth_instance_id, workspace_agents.architecture, workspace_agents.environment_variables, workspace_agents.operating_system, workspace_agents.instance_metadata, workspace_agents.resource_metadata, workspace_agents.directory, workspace_agents.version, workspace_agents.last_connected_replica_id, workspace_agents.connection_timeout_seconds, workspace_agents.troubleshooting_url, workspace_agents.motd_file, workspace_agents.lifecycle_state, workspace_agents.expanded_directory, workspace_agents.logs_length, workspace_agents.logs_overflowed, workspace_agents.started_at, workspace_agents.ready_at, workspace_agents.subsystems, workspace_agents.display_apps, workspace_agents.api_version, workspace_agents.display_order, workspace_agents.parent_id, workspace_agents.api_key_scope, workspace_agents.deleted
|
|
FROM
|
|
workspace_agents
|
|
JOIN
|
|
workspace_resources ON workspace_agents.resource_id = workspace_resources.id
|
|
JOIN
|
|
workspace_builds ON workspace_resources.job_id = workspace_builds.job_id
|
|
WHERE
|
|
workspace_builds.workspace_id = $1 :: uuid AND
|
|
workspace_builds.build_number = $2 :: int
|
|
-- Filter out deleted sub agents.
|
|
AND workspace_agents.deleted = FALSE
|
|
`
|
|
|
|
type GetWorkspaceAgentsByWorkspaceAndBuildNumberParams struct {
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
BuildNumber int32 `db:"build_number" json:"build_number"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentsByWorkspaceAndBuildNumber(ctx context.Context, arg GetWorkspaceAgentsByWorkspaceAndBuildNumberParams) ([]WorkspaceAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentsByWorkspaceAndBuildNumber, arg.WorkspaceID, arg.BuildNumber)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgent
|
|
for rows.Next() {
|
|
var i WorkspaceAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
&i.ParentID,
|
|
&i.APIKeyScope,
|
|
&i.Deleted,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentsCreatedAfter = `-- name: GetWorkspaceAgentsCreatedAfter :many
|
|
SELECT id, created_at, updated_at, name, first_connected_at, last_connected_at, disconnected_at, resource_id, auth_token, auth_instance_id, architecture, environment_variables, operating_system, instance_metadata, resource_metadata, directory, version, last_connected_replica_id, connection_timeout_seconds, troubleshooting_url, motd_file, lifecycle_state, expanded_directory, logs_length, logs_overflowed, started_at, ready_at, subsystems, display_apps, api_version, display_order, parent_id, api_key_scope, deleted FROM workspace_agents
|
|
WHERE
|
|
created_at > $1
|
|
-- Filter out deleted sub agents.
|
|
AND deleted = FALSE
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentsCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentsCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgent
|
|
for rows.Next() {
|
|
var i WorkspaceAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
&i.ParentID,
|
|
&i.APIKeyScope,
|
|
&i.Deleted,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentsInLatestBuildByWorkspaceID = `-- name: GetWorkspaceAgentsInLatestBuildByWorkspaceID :many
|
|
SELECT
|
|
workspace_agents.id, workspace_agents.created_at, workspace_agents.updated_at, workspace_agents.name, workspace_agents.first_connected_at, workspace_agents.last_connected_at, workspace_agents.disconnected_at, workspace_agents.resource_id, workspace_agents.auth_token, workspace_agents.auth_instance_id, workspace_agents.architecture, workspace_agents.environment_variables, workspace_agents.operating_system, workspace_agents.instance_metadata, workspace_agents.resource_metadata, workspace_agents.directory, workspace_agents.version, workspace_agents.last_connected_replica_id, workspace_agents.connection_timeout_seconds, workspace_agents.troubleshooting_url, workspace_agents.motd_file, workspace_agents.lifecycle_state, workspace_agents.expanded_directory, workspace_agents.logs_length, workspace_agents.logs_overflowed, workspace_agents.started_at, workspace_agents.ready_at, workspace_agents.subsystems, workspace_agents.display_apps, workspace_agents.api_version, workspace_agents.display_order, workspace_agents.parent_id, workspace_agents.api_key_scope, workspace_agents.deleted
|
|
FROM
|
|
workspace_agents
|
|
JOIN
|
|
workspace_resources ON workspace_agents.resource_id = workspace_resources.id
|
|
JOIN
|
|
workspace_builds ON workspace_resources.job_id = workspace_builds.job_id
|
|
WHERE
|
|
workspace_builds.workspace_id = $1 :: uuid AND
|
|
workspace_builds.build_number = (
|
|
SELECT
|
|
MAX(build_number)
|
|
FROM
|
|
workspace_builds AS wb
|
|
WHERE
|
|
wb.workspace_id = $1 :: uuid
|
|
)
|
|
-- Filter out deleted sub agents.
|
|
AND workspace_agents.deleted = FALSE
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentsInLatestBuildByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) ([]WorkspaceAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentsInLatestBuildByWorkspaceID, workspaceID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgent
|
|
for rows.Next() {
|
|
var i WorkspaceAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
&i.ParentID,
|
|
&i.APIKeyScope,
|
|
&i.Deleted,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAgent = `-- name: InsertWorkspaceAgent :one
|
|
INSERT INTO
|
|
workspace_agents (
|
|
id,
|
|
parent_id,
|
|
created_at,
|
|
updated_at,
|
|
name,
|
|
resource_id,
|
|
auth_token,
|
|
auth_instance_id,
|
|
architecture,
|
|
environment_variables,
|
|
operating_system,
|
|
directory,
|
|
instance_metadata,
|
|
resource_metadata,
|
|
connection_timeout_seconds,
|
|
troubleshooting_url,
|
|
motd_file,
|
|
display_apps,
|
|
display_order,
|
|
api_key_scope
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20) RETURNING id, created_at, updated_at, name, first_connected_at, last_connected_at, disconnected_at, resource_id, auth_token, auth_instance_id, architecture, environment_variables, operating_system, instance_metadata, resource_metadata, directory, version, last_connected_replica_id, connection_timeout_seconds, troubleshooting_url, motd_file, lifecycle_state, expanded_directory, logs_length, logs_overflowed, started_at, ready_at, subsystems, display_apps, api_version, display_order, parent_id, api_key_scope, deleted
|
|
`
|
|
|
|
type InsertWorkspaceAgentParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
ParentID uuid.NullUUID `db:"parent_id" json:"parent_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
|
|
AuthToken uuid.UUID `db:"auth_token" json:"auth_token"`
|
|
AuthInstanceID sql.NullString `db:"auth_instance_id" json:"auth_instance_id"`
|
|
Architecture string `db:"architecture" json:"architecture"`
|
|
EnvironmentVariables pqtype.NullRawMessage `db:"environment_variables" json:"environment_variables"`
|
|
OperatingSystem string `db:"operating_system" json:"operating_system"`
|
|
Directory string `db:"directory" json:"directory"`
|
|
InstanceMetadata pqtype.NullRawMessage `db:"instance_metadata" json:"instance_metadata"`
|
|
ResourceMetadata pqtype.NullRawMessage `db:"resource_metadata" json:"resource_metadata"`
|
|
ConnectionTimeoutSeconds int32 `db:"connection_timeout_seconds" json:"connection_timeout_seconds"`
|
|
TroubleshootingURL string `db:"troubleshooting_url" json:"troubleshooting_url"`
|
|
MOTDFile string `db:"motd_file" json:"motd_file"`
|
|
DisplayApps []DisplayApp `db:"display_apps" json:"display_apps"`
|
|
DisplayOrder int32 `db:"display_order" json:"display_order"`
|
|
APIKeyScope AgentKeyScopeEnum `db:"api_key_scope" json:"api_key_scope"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgent(ctx context.Context, arg InsertWorkspaceAgentParams) (WorkspaceAgent, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspaceAgent,
|
|
arg.ID,
|
|
arg.ParentID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.ResourceID,
|
|
arg.AuthToken,
|
|
arg.AuthInstanceID,
|
|
arg.Architecture,
|
|
arg.EnvironmentVariables,
|
|
arg.OperatingSystem,
|
|
arg.Directory,
|
|
arg.InstanceMetadata,
|
|
arg.ResourceMetadata,
|
|
arg.ConnectionTimeoutSeconds,
|
|
arg.TroubleshootingURL,
|
|
arg.MOTDFile,
|
|
pq.Array(arg.DisplayApps),
|
|
arg.DisplayOrder,
|
|
arg.APIKeyScope,
|
|
)
|
|
var i WorkspaceAgent
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
&i.ParentID,
|
|
&i.APIKeyScope,
|
|
&i.Deleted,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertWorkspaceAgentLogSources = `-- name: InsertWorkspaceAgentLogSources :many
|
|
INSERT INTO
|
|
workspace_agent_log_sources (workspace_agent_id, created_at, id, display_name, icon)
|
|
SELECT
|
|
$1 :: uuid AS workspace_agent_id,
|
|
$2 :: timestamptz AS created_at,
|
|
unnest($3 :: uuid [ ]) AS id,
|
|
unnest($4 :: VARCHAR(127) [ ]) AS display_name,
|
|
unnest($5 :: text [ ]) AS icon
|
|
RETURNING workspace_agent_log_sources.workspace_agent_id, workspace_agent_log_sources.id, workspace_agent_log_sources.created_at, workspace_agent_log_sources.display_name, workspace_agent_log_sources.icon
|
|
`
|
|
|
|
type InsertWorkspaceAgentLogSourcesParams struct {
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
ID []uuid.UUID `db:"id" json:"id"`
|
|
DisplayName []string `db:"display_name" json:"display_name"`
|
|
Icon []string `db:"icon" json:"icon"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentLogSources(ctx context.Context, arg InsertWorkspaceAgentLogSourcesParams) ([]WorkspaceAgentLogSource, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertWorkspaceAgentLogSources,
|
|
arg.WorkspaceAgentID,
|
|
arg.CreatedAt,
|
|
pq.Array(arg.ID),
|
|
pq.Array(arg.DisplayName),
|
|
pq.Array(arg.Icon),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentLogSource
|
|
for rows.Next() {
|
|
var i WorkspaceAgentLogSource
|
|
if err := rows.Scan(
|
|
&i.WorkspaceAgentID,
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAgentLogs = `-- name: InsertWorkspaceAgentLogs :many
|
|
WITH new_length AS (
|
|
UPDATE workspace_agents SET
|
|
logs_length = logs_length + $6 WHERE workspace_agents.id = $1
|
|
)
|
|
INSERT INTO
|
|
workspace_agent_logs (agent_id, created_at, output, level, log_source_id)
|
|
SELECT
|
|
$1 :: uuid AS agent_id,
|
|
$2 :: timestamptz AS created_at,
|
|
unnest($3 :: VARCHAR(1024) [ ]) AS output,
|
|
unnest($4 :: log_level [ ]) AS level,
|
|
$5 :: uuid AS log_source_id
|
|
RETURNING workspace_agent_logs.agent_id, workspace_agent_logs.created_at, workspace_agent_logs.output, workspace_agent_logs.id, workspace_agent_logs.level, workspace_agent_logs.log_source_id
|
|
`
|
|
|
|
type InsertWorkspaceAgentLogsParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
Output []string `db:"output" json:"output"`
|
|
Level []LogLevel `db:"level" json:"level"`
|
|
LogSourceID uuid.UUID `db:"log_source_id" json:"log_source_id"`
|
|
OutputLength int32 `db:"output_length" json:"output_length"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentLogs(ctx context.Context, arg InsertWorkspaceAgentLogsParams) ([]WorkspaceAgentLog, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertWorkspaceAgentLogs,
|
|
arg.AgentID,
|
|
arg.CreatedAt,
|
|
pq.Array(arg.Output),
|
|
pq.Array(arg.Level),
|
|
arg.LogSourceID,
|
|
arg.OutputLength,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentLog
|
|
for rows.Next() {
|
|
var i WorkspaceAgentLog
|
|
if err := rows.Scan(
|
|
&i.AgentID,
|
|
&i.CreatedAt,
|
|
&i.Output,
|
|
&i.ID,
|
|
&i.Level,
|
|
&i.LogSourceID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAgentMetadata = `-- name: InsertWorkspaceAgentMetadata :exec
|
|
INSERT INTO
|
|
workspace_agent_metadata (
|
|
workspace_agent_id,
|
|
display_name,
|
|
key,
|
|
script,
|
|
timeout,
|
|
interval,
|
|
display_order
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7)
|
|
`
|
|
|
|
type InsertWorkspaceAgentMetadataParams struct {
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Key string `db:"key" json:"key"`
|
|
Script string `db:"script" json:"script"`
|
|
Timeout int64 `db:"timeout" json:"timeout"`
|
|
Interval int64 `db:"interval" json:"interval"`
|
|
DisplayOrder int32 `db:"display_order" json:"display_order"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentMetadata(ctx context.Context, arg InsertWorkspaceAgentMetadataParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertWorkspaceAgentMetadata,
|
|
arg.WorkspaceAgentID,
|
|
arg.DisplayName,
|
|
arg.Key,
|
|
arg.Script,
|
|
arg.Timeout,
|
|
arg.Interval,
|
|
arg.DisplayOrder,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const insertWorkspaceAgentScriptTimings = `-- name: InsertWorkspaceAgentScriptTimings :one
|
|
INSERT INTO
|
|
workspace_agent_script_timings (
|
|
script_id,
|
|
started_at,
|
|
ended_at,
|
|
exit_code,
|
|
stage,
|
|
status
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6)
|
|
RETURNING workspace_agent_script_timings.script_id, workspace_agent_script_timings.started_at, workspace_agent_script_timings.ended_at, workspace_agent_script_timings.exit_code, workspace_agent_script_timings.stage, workspace_agent_script_timings.status
|
|
`
|
|
|
|
type InsertWorkspaceAgentScriptTimingsParams struct {
|
|
ScriptID uuid.UUID `db:"script_id" json:"script_id"`
|
|
StartedAt time.Time `db:"started_at" json:"started_at"`
|
|
EndedAt time.Time `db:"ended_at" json:"ended_at"`
|
|
ExitCode int32 `db:"exit_code" json:"exit_code"`
|
|
Stage WorkspaceAgentScriptTimingStage `db:"stage" json:"stage"`
|
|
Status WorkspaceAgentScriptTimingStatus `db:"status" json:"status"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentScriptTimings(ctx context.Context, arg InsertWorkspaceAgentScriptTimingsParams) (WorkspaceAgentScriptTiming, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspaceAgentScriptTimings,
|
|
arg.ScriptID,
|
|
arg.StartedAt,
|
|
arg.EndedAt,
|
|
arg.ExitCode,
|
|
arg.Stage,
|
|
arg.Status,
|
|
)
|
|
var i WorkspaceAgentScriptTiming
|
|
err := row.Scan(
|
|
&i.ScriptID,
|
|
&i.StartedAt,
|
|
&i.EndedAt,
|
|
&i.ExitCode,
|
|
&i.Stage,
|
|
&i.Status,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateWorkspaceAgentConnectionByID = `-- name: UpdateWorkspaceAgentConnectionByID :exec
|
|
UPDATE
|
|
workspace_agents
|
|
SET
|
|
first_connected_at = $2,
|
|
last_connected_at = $3,
|
|
last_connected_replica_id = $4,
|
|
disconnected_at = $5,
|
|
updated_at = $6
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAgentConnectionByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
FirstConnectedAt sql.NullTime `db:"first_connected_at" json:"first_connected_at"`
|
|
LastConnectedAt sql.NullTime `db:"last_connected_at" json:"last_connected_at"`
|
|
LastConnectedReplicaID uuid.NullUUID `db:"last_connected_replica_id" json:"last_connected_replica_id"`
|
|
DisconnectedAt sql.NullTime `db:"disconnected_at" json:"disconnected_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAgentConnectionByID(ctx context.Context, arg UpdateWorkspaceAgentConnectionByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAgentConnectionByID,
|
|
arg.ID,
|
|
arg.FirstConnectedAt,
|
|
arg.LastConnectedAt,
|
|
arg.LastConnectedReplicaID,
|
|
arg.DisconnectedAt,
|
|
arg.UpdatedAt,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceAgentLifecycleStateByID = `-- name: UpdateWorkspaceAgentLifecycleStateByID :exec
|
|
UPDATE
|
|
workspace_agents
|
|
SET
|
|
lifecycle_state = $2,
|
|
started_at = $3,
|
|
ready_at = $4
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAgentLifecycleStateByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
LifecycleState WorkspaceAgentLifecycleState `db:"lifecycle_state" json:"lifecycle_state"`
|
|
StartedAt sql.NullTime `db:"started_at" json:"started_at"`
|
|
ReadyAt sql.NullTime `db:"ready_at" json:"ready_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAgentLifecycleStateByID(ctx context.Context, arg UpdateWorkspaceAgentLifecycleStateByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAgentLifecycleStateByID,
|
|
arg.ID,
|
|
arg.LifecycleState,
|
|
arg.StartedAt,
|
|
arg.ReadyAt,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceAgentLogOverflowByID = `-- name: UpdateWorkspaceAgentLogOverflowByID :exec
|
|
UPDATE
|
|
workspace_agents
|
|
SET
|
|
logs_overflowed = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAgentLogOverflowByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
LogsOverflowed bool `db:"logs_overflowed" json:"logs_overflowed"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAgentLogOverflowByID(ctx context.Context, arg UpdateWorkspaceAgentLogOverflowByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAgentLogOverflowByID, arg.ID, arg.LogsOverflowed)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceAgentMetadata = `-- name: UpdateWorkspaceAgentMetadata :exec
|
|
WITH metadata AS (
|
|
SELECT
|
|
unnest($2::text[]) AS key,
|
|
unnest($3::text[]) AS value,
|
|
unnest($4::text[]) AS error,
|
|
unnest($5::timestamptz[]) AS collected_at
|
|
)
|
|
UPDATE
|
|
workspace_agent_metadata wam
|
|
SET
|
|
value = m.value,
|
|
error = m.error,
|
|
collected_at = m.collected_at
|
|
FROM
|
|
metadata m
|
|
WHERE
|
|
wam.workspace_agent_id = $1
|
|
AND wam.key = m.key
|
|
`
|
|
|
|
type UpdateWorkspaceAgentMetadataParams struct {
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
Key []string `db:"key" json:"key"`
|
|
Value []string `db:"value" json:"value"`
|
|
Error []string `db:"error" json:"error"`
|
|
CollectedAt []time.Time `db:"collected_at" json:"collected_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAgentMetadata(ctx context.Context, arg UpdateWorkspaceAgentMetadataParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAgentMetadata,
|
|
arg.WorkspaceAgentID,
|
|
pq.Array(arg.Key),
|
|
pq.Array(arg.Value),
|
|
pq.Array(arg.Error),
|
|
pq.Array(arg.CollectedAt),
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceAgentStartupByID = `-- name: UpdateWorkspaceAgentStartupByID :exec
|
|
UPDATE
|
|
workspace_agents
|
|
SET
|
|
version = $2,
|
|
expanded_directory = $3,
|
|
subsystems = $4,
|
|
api_version = $5
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAgentStartupByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Version string `db:"version" json:"version"`
|
|
ExpandedDirectory string `db:"expanded_directory" json:"expanded_directory"`
|
|
Subsystems []WorkspaceAgentSubsystem `db:"subsystems" json:"subsystems"`
|
|
APIVersion string `db:"api_version" json:"api_version"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAgentStartupByID(ctx context.Context, arg UpdateWorkspaceAgentStartupByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAgentStartupByID,
|
|
arg.ID,
|
|
arg.Version,
|
|
arg.ExpandedDirectory,
|
|
pq.Array(arg.Subsystems),
|
|
arg.APIVersion,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const deleteOldWorkspaceAgentStats = `-- name: DeleteOldWorkspaceAgentStats :exec
|
|
DELETE FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
created_at < (
|
|
SELECT
|
|
COALESCE(
|
|
-- When generating initial template usage stats, all the
|
|
-- raw agent stats are needed, after that only ~30 mins
|
|
-- from last rollup is needed. Deployment stats seem to
|
|
-- use between 15 mins and 1 hour of data. We keep a
|
|
-- little bit more (1 day) just in case.
|
|
MAX(start_time) - '1 days'::interval,
|
|
-- Fall back to ~6 months ago if there are no template
|
|
-- usage stats so that we don't delete the data before
|
|
-- it's rolled up.
|
|
NOW() - '180 days'::interval
|
|
)
|
|
FROM
|
|
template_usage_stats
|
|
)
|
|
AND created_at < (
|
|
-- Delete at most in batches of 4 hours (with this batch size, assuming
|
|
-- 1 iteration / 10 minutes, we can clear out the previous 6 months of
|
|
-- data in 7.5 days) whilst keeping the DB load low.
|
|
SELECT
|
|
COALESCE(MIN(created_at) + '4 hours'::interval, NOW())
|
|
FROM
|
|
workspace_agent_stats
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteOldWorkspaceAgentStats(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOldWorkspaceAgentStats)
|
|
return err
|
|
}
|
|
|
|
const getDeploymentDAUs = `-- name: GetDeploymentDAUs :many
|
|
SELECT
|
|
(created_at at TIME ZONE cast($1::integer as text))::date as date,
|
|
user_id
|
|
FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
connection_count > 0
|
|
GROUP BY
|
|
date, user_id
|
|
ORDER BY
|
|
date ASC
|
|
`
|
|
|
|
type GetDeploymentDAUsRow struct {
|
|
Date time.Time `db:"date" json:"date"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetDeploymentDAUs(ctx context.Context, tzOffset int32) ([]GetDeploymentDAUsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getDeploymentDAUs, tzOffset)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetDeploymentDAUsRow
|
|
for rows.Next() {
|
|
var i GetDeploymentDAUsRow
|
|
if err := rows.Scan(&i.Date, &i.UserID); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getDeploymentWorkspaceAgentStats = `-- name: GetDeploymentWorkspaceAgentStats :one
|
|
WITH agent_stats AS (
|
|
SELECT
|
|
coalesce(SUM(rx_bytes), 0)::bigint AS workspace_rx_bytes,
|
|
coalesce(SUM(tx_bytes), 0)::bigint AS workspace_tx_bytes,
|
|
coalesce((PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_50,
|
|
coalesce((PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_95
|
|
FROM workspace_agent_stats
|
|
-- The greater than 0 is to support legacy agents that don't report connection_median_latency_ms.
|
|
WHERE workspace_agent_stats.created_at > $1 AND connection_median_latency_ms > 0
|
|
), latest_agent_stats AS (
|
|
SELECT
|
|
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
|
|
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
|
|
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
|
|
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty
|
|
FROM (
|
|
SELECT id, created_at, user_id, agent_id, workspace_id, template_id, connections_by_proto, connection_count, rx_packets, rx_bytes, tx_packets, tx_bytes, connection_median_latency_ms, session_count_vscode, session_count_jetbrains, session_count_reconnecting_pty, session_count_ssh, usage, ROW_NUMBER() OVER(PARTITION BY agent_id ORDER BY created_at DESC) AS rn
|
|
FROM workspace_agent_stats WHERE created_at > $1
|
|
) AS a WHERE a.rn = 1
|
|
)
|
|
SELECT workspace_rx_bytes, workspace_tx_bytes, workspace_connection_latency_50, workspace_connection_latency_95, session_count_vscode, session_count_ssh, session_count_jetbrains, session_count_reconnecting_pty FROM agent_stats, latest_agent_stats
|
|
`
|
|
|
|
type GetDeploymentWorkspaceAgentStatsRow struct {
|
|
WorkspaceRxBytes int64 `db:"workspace_rx_bytes" json:"workspace_rx_bytes"`
|
|
WorkspaceTxBytes int64 `db:"workspace_tx_bytes" json:"workspace_tx_bytes"`
|
|
WorkspaceConnectionLatency50 float64 `db:"workspace_connection_latency_50" json:"workspace_connection_latency_50"`
|
|
WorkspaceConnectionLatency95 float64 `db:"workspace_connection_latency_95" json:"workspace_connection_latency_95"`
|
|
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
|
|
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
|
|
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
|
|
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetDeploymentWorkspaceAgentStats(ctx context.Context, createdAt time.Time) (GetDeploymentWorkspaceAgentStatsRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getDeploymentWorkspaceAgentStats, createdAt)
|
|
var i GetDeploymentWorkspaceAgentStatsRow
|
|
err := row.Scan(
|
|
&i.WorkspaceRxBytes,
|
|
&i.WorkspaceTxBytes,
|
|
&i.WorkspaceConnectionLatency50,
|
|
&i.WorkspaceConnectionLatency95,
|
|
&i.SessionCountVSCode,
|
|
&i.SessionCountSSH,
|
|
&i.SessionCountJetBrains,
|
|
&i.SessionCountReconnectingPTY,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getDeploymentWorkspaceAgentUsageStats = `-- name: GetDeploymentWorkspaceAgentUsageStats :one
|
|
WITH agent_stats AS (
|
|
SELECT
|
|
coalesce(SUM(rx_bytes), 0)::bigint AS workspace_rx_bytes,
|
|
coalesce(SUM(tx_bytes), 0)::bigint AS workspace_tx_bytes,
|
|
coalesce((PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_50,
|
|
coalesce((PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_95
|
|
FROM workspace_agent_stats
|
|
-- The greater than 0 is to support legacy agents that don't report connection_median_latency_ms.
|
|
WHERE workspace_agent_stats.created_at > $1 AND connection_median_latency_ms > 0
|
|
),
|
|
minute_buckets AS (
|
|
SELECT
|
|
agent_id,
|
|
date_trunc('minute', created_at) AS minute_bucket,
|
|
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
|
|
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
|
|
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
|
|
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty
|
|
FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
created_at >= $1
|
|
AND created_at < date_trunc('minute', now()) -- Exclude current partial minute
|
|
AND usage = true
|
|
GROUP BY
|
|
agent_id,
|
|
minute_bucket
|
|
),
|
|
latest_buckets AS (
|
|
SELECT DISTINCT ON (agent_id)
|
|
agent_id,
|
|
minute_bucket,
|
|
session_count_vscode,
|
|
session_count_jetbrains,
|
|
session_count_reconnecting_pty,
|
|
session_count_ssh
|
|
FROM
|
|
minute_buckets
|
|
ORDER BY
|
|
agent_id,
|
|
minute_bucket DESC
|
|
),
|
|
latest_agent_stats AS (
|
|
SELECT
|
|
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
|
|
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
|
|
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
|
|
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty
|
|
FROM
|
|
latest_buckets
|
|
)
|
|
SELECT workspace_rx_bytes, workspace_tx_bytes, workspace_connection_latency_50, workspace_connection_latency_95, session_count_vscode, session_count_ssh, session_count_jetbrains, session_count_reconnecting_pty FROM agent_stats, latest_agent_stats
|
|
`
|
|
|
|
type GetDeploymentWorkspaceAgentUsageStatsRow struct {
|
|
WorkspaceRxBytes int64 `db:"workspace_rx_bytes" json:"workspace_rx_bytes"`
|
|
WorkspaceTxBytes int64 `db:"workspace_tx_bytes" json:"workspace_tx_bytes"`
|
|
WorkspaceConnectionLatency50 float64 `db:"workspace_connection_latency_50" json:"workspace_connection_latency_50"`
|
|
WorkspaceConnectionLatency95 float64 `db:"workspace_connection_latency_95" json:"workspace_connection_latency_95"`
|
|
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
|
|
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
|
|
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
|
|
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetDeploymentWorkspaceAgentUsageStats(ctx context.Context, createdAt time.Time) (GetDeploymentWorkspaceAgentUsageStatsRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getDeploymentWorkspaceAgentUsageStats, createdAt)
|
|
var i GetDeploymentWorkspaceAgentUsageStatsRow
|
|
err := row.Scan(
|
|
&i.WorkspaceRxBytes,
|
|
&i.WorkspaceTxBytes,
|
|
&i.WorkspaceConnectionLatency50,
|
|
&i.WorkspaceConnectionLatency95,
|
|
&i.SessionCountVSCode,
|
|
&i.SessionCountSSH,
|
|
&i.SessionCountJetBrains,
|
|
&i.SessionCountReconnectingPTY,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateDAUs = `-- name: GetTemplateDAUs :many
|
|
SELECT
|
|
(created_at at TIME ZONE cast($2::integer as text))::date as date,
|
|
user_id
|
|
FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
template_id = $1 AND
|
|
connection_count > 0
|
|
GROUP BY
|
|
date, user_id
|
|
ORDER BY
|
|
date ASC
|
|
`
|
|
|
|
type GetTemplateDAUsParams struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TzOffset int32 `db:"tz_offset" json:"tz_offset"`
|
|
}
|
|
|
|
type GetTemplateDAUsRow struct {
|
|
Date time.Time `db:"date" json:"date"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateDAUs(ctx context.Context, arg GetTemplateDAUsParams) ([]GetTemplateDAUsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateDAUs, arg.TemplateID, arg.TzOffset)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateDAUsRow
|
|
for rows.Next() {
|
|
var i GetTemplateDAUsRow
|
|
if err := rows.Scan(&i.Date, &i.UserID); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentStats = `-- name: GetWorkspaceAgentStats :many
|
|
WITH agent_stats AS (
|
|
SELECT
|
|
user_id,
|
|
agent_id,
|
|
workspace_id,
|
|
template_id,
|
|
MIN(created_at)::timestamptz AS aggregated_from,
|
|
coalesce(SUM(rx_bytes), 0)::bigint AS workspace_rx_bytes,
|
|
coalesce(SUM(tx_bytes), 0)::bigint AS workspace_tx_bytes,
|
|
coalesce((PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_50,
|
|
coalesce((PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_95
|
|
FROM workspace_agent_stats
|
|
-- The greater than 0 is to support legacy agents that don't report connection_median_latency_ms.
|
|
WHERE workspace_agent_stats.created_at > $1 AND connection_median_latency_ms > 0
|
|
GROUP BY user_id, agent_id, workspace_id, template_id
|
|
), latest_agent_stats AS (
|
|
SELECT
|
|
a.agent_id,
|
|
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
|
|
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
|
|
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
|
|
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty
|
|
FROM (
|
|
SELECT id, created_at, user_id, agent_id, workspace_id, template_id, connections_by_proto, connection_count, rx_packets, rx_bytes, tx_packets, tx_bytes, connection_median_latency_ms, session_count_vscode, session_count_jetbrains, session_count_reconnecting_pty, session_count_ssh, usage, ROW_NUMBER() OVER(PARTITION BY agent_id ORDER BY created_at DESC) AS rn
|
|
FROM workspace_agent_stats WHERE created_at > $1
|
|
) AS a WHERE a.rn = 1 GROUP BY a.user_id, a.agent_id, a.workspace_id, a.template_id
|
|
)
|
|
SELECT user_id, agent_stats.agent_id, workspace_id, template_id, aggregated_from, workspace_rx_bytes, workspace_tx_bytes, workspace_connection_latency_50, workspace_connection_latency_95, latest_agent_stats.agent_id, session_count_vscode, session_count_ssh, session_count_jetbrains, session_count_reconnecting_pty FROM agent_stats JOIN latest_agent_stats ON agent_stats.agent_id = latest_agent_stats.agent_id
|
|
`
|
|
|
|
type GetWorkspaceAgentStatsRow struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
AggregatedFrom time.Time `db:"aggregated_from" json:"aggregated_from"`
|
|
WorkspaceRxBytes int64 `db:"workspace_rx_bytes" json:"workspace_rx_bytes"`
|
|
WorkspaceTxBytes int64 `db:"workspace_tx_bytes" json:"workspace_tx_bytes"`
|
|
WorkspaceConnectionLatency50 float64 `db:"workspace_connection_latency_50" json:"workspace_connection_latency_50"`
|
|
WorkspaceConnectionLatency95 float64 `db:"workspace_connection_latency_95" json:"workspace_connection_latency_95"`
|
|
AgentID_2 uuid.UUID `db:"agent_id_2" json:"agent_id_2"`
|
|
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
|
|
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
|
|
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
|
|
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentStats(ctx context.Context, createdAt time.Time) ([]GetWorkspaceAgentStatsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentStats, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspaceAgentStatsRow
|
|
for rows.Next() {
|
|
var i GetWorkspaceAgentStatsRow
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.AgentID,
|
|
&i.WorkspaceID,
|
|
&i.TemplateID,
|
|
&i.AggregatedFrom,
|
|
&i.WorkspaceRxBytes,
|
|
&i.WorkspaceTxBytes,
|
|
&i.WorkspaceConnectionLatency50,
|
|
&i.WorkspaceConnectionLatency95,
|
|
&i.AgentID_2,
|
|
&i.SessionCountVSCode,
|
|
&i.SessionCountSSH,
|
|
&i.SessionCountJetBrains,
|
|
&i.SessionCountReconnectingPTY,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentStatsAndLabels = `-- name: GetWorkspaceAgentStatsAndLabels :many
|
|
WITH agent_stats AS (
|
|
SELECT
|
|
user_id,
|
|
agent_id,
|
|
workspace_id,
|
|
coalesce(SUM(rx_bytes), 0)::bigint AS rx_bytes,
|
|
coalesce(SUM(tx_bytes), 0)::bigint AS tx_bytes
|
|
FROM workspace_agent_stats
|
|
WHERE workspace_agent_stats.created_at > $1
|
|
GROUP BY user_id, agent_id, workspace_id
|
|
), latest_agent_stats AS (
|
|
SELECT
|
|
a.agent_id,
|
|
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
|
|
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
|
|
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
|
|
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty,
|
|
coalesce(SUM(connection_count), 0)::bigint AS connection_count,
|
|
coalesce(MAX(connection_median_latency_ms), 0)::float AS connection_median_latency_ms
|
|
FROM (
|
|
SELECT id, created_at, user_id, agent_id, workspace_id, template_id, connections_by_proto, connection_count, rx_packets, rx_bytes, tx_packets, tx_bytes, connection_median_latency_ms, session_count_vscode, session_count_jetbrains, session_count_reconnecting_pty, session_count_ssh, usage, ROW_NUMBER() OVER(PARTITION BY agent_id ORDER BY created_at DESC) AS rn
|
|
FROM workspace_agent_stats
|
|
-- The greater than 0 is to support legacy agents that don't report connection_median_latency_ms.
|
|
WHERE created_at > $1 AND connection_median_latency_ms > 0
|
|
) AS a
|
|
WHERE a.rn = 1
|
|
GROUP BY a.user_id, a.agent_id, a.workspace_id
|
|
)
|
|
SELECT
|
|
users.username, workspace_agents.name AS agent_name, workspaces.name AS workspace_name, rx_bytes, tx_bytes,
|
|
session_count_vscode, session_count_ssh, session_count_jetbrains, session_count_reconnecting_pty,
|
|
connection_count, connection_median_latency_ms
|
|
FROM
|
|
agent_stats
|
|
JOIN
|
|
latest_agent_stats
|
|
ON
|
|
agent_stats.agent_id = latest_agent_stats.agent_id
|
|
JOIN
|
|
users
|
|
ON
|
|
users.id = agent_stats.user_id
|
|
JOIN
|
|
workspace_agents
|
|
ON
|
|
workspace_agents.id = agent_stats.agent_id
|
|
JOIN
|
|
workspaces
|
|
ON
|
|
workspaces.id = agent_stats.workspace_id
|
|
`
|
|
|
|
type GetWorkspaceAgentStatsAndLabelsRow struct {
|
|
Username string `db:"username" json:"username"`
|
|
AgentName string `db:"agent_name" json:"agent_name"`
|
|
WorkspaceName string `db:"workspace_name" json:"workspace_name"`
|
|
RxBytes int64 `db:"rx_bytes" json:"rx_bytes"`
|
|
TxBytes int64 `db:"tx_bytes" json:"tx_bytes"`
|
|
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
|
|
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
|
|
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
|
|
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
|
|
ConnectionCount int64 `db:"connection_count" json:"connection_count"`
|
|
ConnectionMedianLatencyMS float64 `db:"connection_median_latency_ms" json:"connection_median_latency_ms"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentStatsAndLabels(ctx context.Context, createdAt time.Time) ([]GetWorkspaceAgentStatsAndLabelsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentStatsAndLabels, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspaceAgentStatsAndLabelsRow
|
|
for rows.Next() {
|
|
var i GetWorkspaceAgentStatsAndLabelsRow
|
|
if err := rows.Scan(
|
|
&i.Username,
|
|
&i.AgentName,
|
|
&i.WorkspaceName,
|
|
&i.RxBytes,
|
|
&i.TxBytes,
|
|
&i.SessionCountVSCode,
|
|
&i.SessionCountSSH,
|
|
&i.SessionCountJetBrains,
|
|
&i.SessionCountReconnectingPTY,
|
|
&i.ConnectionCount,
|
|
&i.ConnectionMedianLatencyMS,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentUsageStats = `-- name: GetWorkspaceAgentUsageStats :many
|
|
WITH agent_stats AS (
|
|
SELECT
|
|
user_id,
|
|
agent_id,
|
|
workspace_id,
|
|
template_id,
|
|
MIN(created_at)::timestamptz AS aggregated_from,
|
|
coalesce(SUM(rx_bytes), 0)::bigint AS workspace_rx_bytes,
|
|
coalesce(SUM(tx_bytes), 0)::bigint AS workspace_tx_bytes,
|
|
coalesce((PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_50,
|
|
coalesce((PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_95
|
|
FROM workspace_agent_stats
|
|
-- The greater than 0 is to support legacy agents that don't report connection_median_latency_ms.
|
|
WHERE workspace_agent_stats.created_at > $1 AND connection_median_latency_ms > 0
|
|
GROUP BY user_id, agent_id, workspace_id, template_id
|
|
),
|
|
minute_buckets AS (
|
|
SELECT
|
|
agent_id,
|
|
date_trunc('minute', created_at) AS minute_bucket,
|
|
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
|
|
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
|
|
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
|
|
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty
|
|
FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
created_at >= $1
|
|
AND created_at < date_trunc('minute', now()) -- Exclude current partial minute
|
|
AND usage = true
|
|
GROUP BY
|
|
agent_id,
|
|
minute_bucket,
|
|
user_id,
|
|
agent_id,
|
|
workspace_id,
|
|
template_id
|
|
),
|
|
latest_buckets AS (
|
|
SELECT DISTINCT ON (agent_id)
|
|
agent_id,
|
|
session_count_vscode,
|
|
session_count_ssh,
|
|
session_count_jetbrains,
|
|
session_count_reconnecting_pty
|
|
FROM
|
|
minute_buckets
|
|
ORDER BY
|
|
agent_id,
|
|
minute_bucket DESC
|
|
)
|
|
SELECT user_id,
|
|
agent_stats.agent_id,
|
|
workspace_id,
|
|
template_id,
|
|
aggregated_from,
|
|
workspace_rx_bytes,
|
|
workspace_tx_bytes,
|
|
workspace_connection_latency_50,
|
|
workspace_connection_latency_95,
|
|
coalesce(latest_buckets.agent_id,agent_stats.agent_id) AS agent_id,
|
|
coalesce(session_count_vscode, 0)::bigint AS session_count_vscode,
|
|
coalesce(session_count_ssh, 0)::bigint AS session_count_ssh,
|
|
coalesce(session_count_jetbrains, 0)::bigint AS session_count_jetbrains,
|
|
coalesce(session_count_reconnecting_pty, 0)::bigint AS session_count_reconnecting_pty
|
|
FROM agent_stats LEFT JOIN latest_buckets ON agent_stats.agent_id = latest_buckets.agent_id
|
|
`
|
|
|
|
type GetWorkspaceAgentUsageStatsRow struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
AggregatedFrom time.Time `db:"aggregated_from" json:"aggregated_from"`
|
|
WorkspaceRxBytes int64 `db:"workspace_rx_bytes" json:"workspace_rx_bytes"`
|
|
WorkspaceTxBytes int64 `db:"workspace_tx_bytes" json:"workspace_tx_bytes"`
|
|
WorkspaceConnectionLatency50 float64 `db:"workspace_connection_latency_50" json:"workspace_connection_latency_50"`
|
|
WorkspaceConnectionLatency95 float64 `db:"workspace_connection_latency_95" json:"workspace_connection_latency_95"`
|
|
AgentID_2 uuid.UUID `db:"agent_id_2" json:"agent_id_2"`
|
|
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
|
|
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
|
|
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
|
|
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
|
|
}
|
|
|
|
// `minute_buckets` could return 0 rows if there are no usage stats since `created_at`.
|
|
func (q *sqlQuerier) GetWorkspaceAgentUsageStats(ctx context.Context, createdAt time.Time) ([]GetWorkspaceAgentUsageStatsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentUsageStats, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspaceAgentUsageStatsRow
|
|
for rows.Next() {
|
|
var i GetWorkspaceAgentUsageStatsRow
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.AgentID,
|
|
&i.WorkspaceID,
|
|
&i.TemplateID,
|
|
&i.AggregatedFrom,
|
|
&i.WorkspaceRxBytes,
|
|
&i.WorkspaceTxBytes,
|
|
&i.WorkspaceConnectionLatency50,
|
|
&i.WorkspaceConnectionLatency95,
|
|
&i.AgentID_2,
|
|
&i.SessionCountVSCode,
|
|
&i.SessionCountSSH,
|
|
&i.SessionCountJetBrains,
|
|
&i.SessionCountReconnectingPTY,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentUsageStatsAndLabels = `-- name: GetWorkspaceAgentUsageStatsAndLabels :many
|
|
WITH agent_stats AS (
|
|
SELECT
|
|
user_id,
|
|
agent_id,
|
|
workspace_id,
|
|
coalesce(SUM(rx_bytes), 0)::bigint AS rx_bytes,
|
|
coalesce(SUM(tx_bytes), 0)::bigint AS tx_bytes,
|
|
coalesce(MAX(connection_median_latency_ms), 0)::float AS connection_median_latency_ms
|
|
FROM workspace_agent_stats
|
|
-- The greater than 0 is to support legacy agents that don't report connection_median_latency_ms.
|
|
WHERE workspace_agent_stats.created_at > $1 AND connection_median_latency_ms > 0
|
|
GROUP BY user_id, agent_id, workspace_id
|
|
), latest_agent_stats AS (
|
|
SELECT
|
|
agent_id,
|
|
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
|
|
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
|
|
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
|
|
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty,
|
|
coalesce(SUM(connection_count), 0)::bigint AS connection_count
|
|
FROM workspace_agent_stats
|
|
-- We only want the latest stats, but those stats might be
|
|
-- spread across multiple rows.
|
|
WHERE usage = true AND created_at > now() - '1 minute'::interval
|
|
GROUP BY user_id, agent_id, workspace_id
|
|
)
|
|
SELECT
|
|
users.username, workspace_agents.name AS agent_name, workspaces.name AS workspace_name, rx_bytes, tx_bytes,
|
|
coalesce(session_count_vscode, 0)::bigint AS session_count_vscode,
|
|
coalesce(session_count_ssh, 0)::bigint AS session_count_ssh,
|
|
coalesce(session_count_jetbrains, 0)::bigint AS session_count_jetbrains,
|
|
coalesce(session_count_reconnecting_pty, 0)::bigint AS session_count_reconnecting_pty,
|
|
coalesce(connection_count, 0)::bigint AS connection_count,
|
|
connection_median_latency_ms
|
|
FROM
|
|
agent_stats
|
|
LEFT JOIN
|
|
latest_agent_stats
|
|
ON
|
|
agent_stats.agent_id = latest_agent_stats.agent_id
|
|
JOIN
|
|
users
|
|
ON
|
|
users.id = agent_stats.user_id
|
|
JOIN
|
|
workspace_agents
|
|
ON
|
|
workspace_agents.id = agent_stats.agent_id
|
|
JOIN
|
|
workspaces
|
|
ON
|
|
workspaces.id = agent_stats.workspace_id
|
|
`
|
|
|
|
type GetWorkspaceAgentUsageStatsAndLabelsRow struct {
|
|
Username string `db:"username" json:"username"`
|
|
AgentName string `db:"agent_name" json:"agent_name"`
|
|
WorkspaceName string `db:"workspace_name" json:"workspace_name"`
|
|
RxBytes int64 `db:"rx_bytes" json:"rx_bytes"`
|
|
TxBytes int64 `db:"tx_bytes" json:"tx_bytes"`
|
|
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
|
|
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
|
|
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
|
|
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
|
|
ConnectionCount int64 `db:"connection_count" json:"connection_count"`
|
|
ConnectionMedianLatencyMS float64 `db:"connection_median_latency_ms" json:"connection_median_latency_ms"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentUsageStatsAndLabels(ctx context.Context, createdAt time.Time) ([]GetWorkspaceAgentUsageStatsAndLabelsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentUsageStatsAndLabels, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspaceAgentUsageStatsAndLabelsRow
|
|
for rows.Next() {
|
|
var i GetWorkspaceAgentUsageStatsAndLabelsRow
|
|
if err := rows.Scan(
|
|
&i.Username,
|
|
&i.AgentName,
|
|
&i.WorkspaceName,
|
|
&i.RxBytes,
|
|
&i.TxBytes,
|
|
&i.SessionCountVSCode,
|
|
&i.SessionCountSSH,
|
|
&i.SessionCountJetBrains,
|
|
&i.SessionCountReconnectingPTY,
|
|
&i.ConnectionCount,
|
|
&i.ConnectionMedianLatencyMS,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAgentStats = `-- name: InsertWorkspaceAgentStats :exec
|
|
INSERT INTO
|
|
workspace_agent_stats (
|
|
id,
|
|
created_at,
|
|
user_id,
|
|
workspace_id,
|
|
template_id,
|
|
agent_id,
|
|
connections_by_proto,
|
|
connection_count,
|
|
rx_packets,
|
|
rx_bytes,
|
|
tx_packets,
|
|
tx_bytes,
|
|
session_count_vscode,
|
|
session_count_jetbrains,
|
|
session_count_reconnecting_pty,
|
|
session_count_ssh,
|
|
connection_median_latency_ms,
|
|
usage
|
|
)
|
|
SELECT
|
|
unnest($1 :: uuid[]) AS id,
|
|
unnest($2 :: timestamptz[]) AS created_at,
|
|
unnest($3 :: uuid[]) AS user_id,
|
|
unnest($4 :: uuid[]) AS workspace_id,
|
|
unnest($5 :: uuid[]) AS template_id,
|
|
unnest($6 :: uuid[]) AS agent_id,
|
|
jsonb_array_elements($7 :: jsonb) AS connections_by_proto,
|
|
unnest($8 :: bigint[]) AS connection_count,
|
|
unnest($9 :: bigint[]) AS rx_packets,
|
|
unnest($10 :: bigint[]) AS rx_bytes,
|
|
unnest($11 :: bigint[]) AS tx_packets,
|
|
unnest($12 :: bigint[]) AS tx_bytes,
|
|
unnest($13 :: bigint[]) AS session_count_vscode,
|
|
unnest($14 :: bigint[]) AS session_count_jetbrains,
|
|
unnest($15 :: bigint[]) AS session_count_reconnecting_pty,
|
|
unnest($16 :: bigint[]) AS session_count_ssh,
|
|
unnest($17 :: double precision[]) AS connection_median_latency_ms,
|
|
unnest($18 :: boolean[]) AS usage
|
|
`
|
|
|
|
type InsertWorkspaceAgentStatsParams struct {
|
|
ID []uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt []time.Time `db:"created_at" json:"created_at"`
|
|
UserID []uuid.UUID `db:"user_id" json:"user_id"`
|
|
WorkspaceID []uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
TemplateID []uuid.UUID `db:"template_id" json:"template_id"`
|
|
AgentID []uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
ConnectionsByProto json.RawMessage `db:"connections_by_proto" json:"connections_by_proto"`
|
|
ConnectionCount []int64 `db:"connection_count" json:"connection_count"`
|
|
RxPackets []int64 `db:"rx_packets" json:"rx_packets"`
|
|
RxBytes []int64 `db:"rx_bytes" json:"rx_bytes"`
|
|
TxPackets []int64 `db:"tx_packets" json:"tx_packets"`
|
|
TxBytes []int64 `db:"tx_bytes" json:"tx_bytes"`
|
|
SessionCountVSCode []int64 `db:"session_count_vscode" json:"session_count_vscode"`
|
|
SessionCountJetBrains []int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
|
|
SessionCountReconnectingPTY []int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
|
|
SessionCountSSH []int64 `db:"session_count_ssh" json:"session_count_ssh"`
|
|
ConnectionMedianLatencyMS []float64 `db:"connection_median_latency_ms" json:"connection_median_latency_ms"`
|
|
Usage []bool `db:"usage" json:"usage"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentStats(ctx context.Context, arg InsertWorkspaceAgentStatsParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertWorkspaceAgentStats,
|
|
pq.Array(arg.ID),
|
|
pq.Array(arg.CreatedAt),
|
|
pq.Array(arg.UserID),
|
|
pq.Array(arg.WorkspaceID),
|
|
pq.Array(arg.TemplateID),
|
|
pq.Array(arg.AgentID),
|
|
arg.ConnectionsByProto,
|
|
pq.Array(arg.ConnectionCount),
|
|
pq.Array(arg.RxPackets),
|
|
pq.Array(arg.RxBytes),
|
|
pq.Array(arg.TxPackets),
|
|
pq.Array(arg.TxBytes),
|
|
pq.Array(arg.SessionCountVSCode),
|
|
pq.Array(arg.SessionCountJetBrains),
|
|
pq.Array(arg.SessionCountReconnectingPTY),
|
|
pq.Array(arg.SessionCountSSH),
|
|
pq.Array(arg.ConnectionMedianLatencyMS),
|
|
pq.Array(arg.Usage),
|
|
)
|
|
return err
|
|
}
|
|
|
|
const upsertWorkspaceAppAuditSession = `-- name: UpsertWorkspaceAppAuditSession :one
|
|
INSERT INTO
|
|
workspace_app_audit_sessions (
|
|
id,
|
|
agent_id,
|
|
app_id,
|
|
user_id,
|
|
ip,
|
|
user_agent,
|
|
slug_or_port,
|
|
status_code,
|
|
started_at,
|
|
updated_at
|
|
)
|
|
VALUES
|
|
(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7,
|
|
$8,
|
|
$9,
|
|
$10
|
|
)
|
|
ON CONFLICT
|
|
(agent_id, app_id, user_id, ip, user_agent, slug_or_port, status_code)
|
|
DO
|
|
UPDATE
|
|
SET
|
|
-- ID is used to know if session was reset on upsert.
|
|
id = CASE
|
|
WHEN workspace_app_audit_sessions.updated_at > NOW() - ($11::bigint || ' ms')::interval
|
|
THEN workspace_app_audit_sessions.id
|
|
ELSE EXCLUDED.id
|
|
END,
|
|
started_at = CASE
|
|
WHEN workspace_app_audit_sessions.updated_at > NOW() - ($11::bigint || ' ms')::interval
|
|
THEN workspace_app_audit_sessions.started_at
|
|
ELSE EXCLUDED.started_at
|
|
END,
|
|
updated_at = EXCLUDED.updated_at
|
|
RETURNING
|
|
id = $1 AS new_or_stale
|
|
`
|
|
|
|
type UpsertWorkspaceAppAuditSessionParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
AppID uuid.UUID `db:"app_id" json:"app_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Ip string `db:"ip" json:"ip"`
|
|
UserAgent string `db:"user_agent" json:"user_agent"`
|
|
SlugOrPort string `db:"slug_or_port" json:"slug_or_port"`
|
|
StatusCode int32 `db:"status_code" json:"status_code"`
|
|
StartedAt time.Time `db:"started_at" json:"started_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
StaleIntervalMS int64 `db:"stale_interval_ms" json:"stale_interval_ms"`
|
|
}
|
|
|
|
// The returned boolean, new_or_stale, can be used to deduce if a new session
|
|
// was started. This means that a new row was inserted (no previous session) or
|
|
// the updated_at is older than stale interval.
|
|
func (q *sqlQuerier) UpsertWorkspaceAppAuditSession(ctx context.Context, arg UpsertWorkspaceAppAuditSessionParams) (bool, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertWorkspaceAppAuditSession,
|
|
arg.ID,
|
|
arg.AgentID,
|
|
arg.AppID,
|
|
arg.UserID,
|
|
arg.Ip,
|
|
arg.UserAgent,
|
|
arg.SlugOrPort,
|
|
arg.StatusCode,
|
|
arg.StartedAt,
|
|
arg.UpdatedAt,
|
|
arg.StaleIntervalMS,
|
|
)
|
|
var new_or_stale bool
|
|
err := row.Scan(&new_or_stale)
|
|
return new_or_stale, err
|
|
}
|
|
|
|
const getLatestWorkspaceAppStatusesByWorkspaceIDs = `-- name: GetLatestWorkspaceAppStatusesByWorkspaceIDs :many
|
|
SELECT DISTINCT ON (workspace_id)
|
|
id, created_at, agent_id, app_id, workspace_id, state, message, uri
|
|
FROM workspace_app_statuses
|
|
WHERE workspace_id = ANY($1 :: uuid[])
|
|
ORDER BY workspace_id, created_at DESC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLatestWorkspaceAppStatusesByWorkspaceIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceAppStatus, error) {
|
|
rows, err := q.db.QueryContext(ctx, getLatestWorkspaceAppStatusesByWorkspaceIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAppStatus
|
|
for rows.Next() {
|
|
var i WorkspaceAppStatus
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.AppID,
|
|
&i.WorkspaceID,
|
|
&i.State,
|
|
&i.Message,
|
|
&i.Uri,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAppByAgentIDAndSlug = `-- name: GetWorkspaceAppByAgentIDAndSlug :one
|
|
SELECT id, created_at, agent_id, display_name, icon, command, url, healthcheck_url, healthcheck_interval, healthcheck_threshold, health, subdomain, sharing_level, slug, external, display_order, hidden, open_in, display_group FROM workspace_apps WHERE agent_id = $1 AND slug = $2
|
|
`
|
|
|
|
type GetWorkspaceAppByAgentIDAndSlugParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
Slug string `db:"slug" json:"slug"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAppByAgentIDAndSlug(ctx context.Context, arg GetWorkspaceAppByAgentIDAndSlugParams) (WorkspaceApp, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAppByAgentIDAndSlug, arg.AgentID, arg.Slug)
|
|
var i WorkspaceApp
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Command,
|
|
&i.Url,
|
|
&i.HealthcheckUrl,
|
|
&i.HealthcheckInterval,
|
|
&i.HealthcheckThreshold,
|
|
&i.Health,
|
|
&i.Subdomain,
|
|
&i.SharingLevel,
|
|
&i.Slug,
|
|
&i.External,
|
|
&i.DisplayOrder,
|
|
&i.Hidden,
|
|
&i.OpenIn,
|
|
&i.DisplayGroup,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceAppStatusesByAppIDs = `-- name: GetWorkspaceAppStatusesByAppIDs :many
|
|
SELECT id, created_at, agent_id, app_id, workspace_id, state, message, uri FROM workspace_app_statuses WHERE app_id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAppStatusesByAppIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceAppStatus, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAppStatusesByAppIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAppStatus
|
|
for rows.Next() {
|
|
var i WorkspaceAppStatus
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.AppID,
|
|
&i.WorkspaceID,
|
|
&i.State,
|
|
&i.Message,
|
|
&i.Uri,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAppsByAgentID = `-- name: GetWorkspaceAppsByAgentID :many
|
|
SELECT id, created_at, agent_id, display_name, icon, command, url, healthcheck_url, healthcheck_interval, healthcheck_threshold, health, subdomain, sharing_level, slug, external, display_order, hidden, open_in, display_group FROM workspace_apps WHERE agent_id = $1 ORDER BY slug ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAppsByAgentID(ctx context.Context, agentID uuid.UUID) ([]WorkspaceApp, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAppsByAgentID, agentID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceApp
|
|
for rows.Next() {
|
|
var i WorkspaceApp
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Command,
|
|
&i.Url,
|
|
&i.HealthcheckUrl,
|
|
&i.HealthcheckInterval,
|
|
&i.HealthcheckThreshold,
|
|
&i.Health,
|
|
&i.Subdomain,
|
|
&i.SharingLevel,
|
|
&i.Slug,
|
|
&i.External,
|
|
&i.DisplayOrder,
|
|
&i.Hidden,
|
|
&i.OpenIn,
|
|
&i.DisplayGroup,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAppsByAgentIDs = `-- name: GetWorkspaceAppsByAgentIDs :many
|
|
SELECT id, created_at, agent_id, display_name, icon, command, url, healthcheck_url, healthcheck_interval, healthcheck_threshold, health, subdomain, sharing_level, slug, external, display_order, hidden, open_in, display_group FROM workspace_apps WHERE agent_id = ANY($1 :: uuid [ ]) ORDER BY slug ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAppsByAgentIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceApp, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAppsByAgentIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceApp
|
|
for rows.Next() {
|
|
var i WorkspaceApp
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Command,
|
|
&i.Url,
|
|
&i.HealthcheckUrl,
|
|
&i.HealthcheckInterval,
|
|
&i.HealthcheckThreshold,
|
|
&i.Health,
|
|
&i.Subdomain,
|
|
&i.SharingLevel,
|
|
&i.Slug,
|
|
&i.External,
|
|
&i.DisplayOrder,
|
|
&i.Hidden,
|
|
&i.OpenIn,
|
|
&i.DisplayGroup,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAppsCreatedAfter = `-- name: GetWorkspaceAppsCreatedAfter :many
|
|
SELECT id, created_at, agent_id, display_name, icon, command, url, healthcheck_url, healthcheck_interval, healthcheck_threshold, health, subdomain, sharing_level, slug, external, display_order, hidden, open_in, display_group FROM workspace_apps WHERE created_at > $1 ORDER BY slug ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAppsCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceApp, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAppsCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceApp
|
|
for rows.Next() {
|
|
var i WorkspaceApp
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Command,
|
|
&i.Url,
|
|
&i.HealthcheckUrl,
|
|
&i.HealthcheckInterval,
|
|
&i.HealthcheckThreshold,
|
|
&i.Health,
|
|
&i.Subdomain,
|
|
&i.SharingLevel,
|
|
&i.Slug,
|
|
&i.External,
|
|
&i.DisplayOrder,
|
|
&i.Hidden,
|
|
&i.OpenIn,
|
|
&i.DisplayGroup,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAppStatus = `-- name: InsertWorkspaceAppStatus :one
|
|
INSERT INTO workspace_app_statuses (id, created_at, workspace_id, agent_id, app_id, state, message, uri)
|
|
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
|
RETURNING id, created_at, agent_id, app_id, workspace_id, state, message, uri
|
|
`
|
|
|
|
type InsertWorkspaceAppStatusParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
AppID uuid.UUID `db:"app_id" json:"app_id"`
|
|
State WorkspaceAppStatusState `db:"state" json:"state"`
|
|
Message string `db:"message" json:"message"`
|
|
Uri sql.NullString `db:"uri" json:"uri"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAppStatus(ctx context.Context, arg InsertWorkspaceAppStatusParams) (WorkspaceAppStatus, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspaceAppStatus,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.WorkspaceID,
|
|
arg.AgentID,
|
|
arg.AppID,
|
|
arg.State,
|
|
arg.Message,
|
|
arg.Uri,
|
|
)
|
|
var i WorkspaceAppStatus
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.AppID,
|
|
&i.WorkspaceID,
|
|
&i.State,
|
|
&i.Message,
|
|
&i.Uri,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateWorkspaceAppHealthByID = `-- name: UpdateWorkspaceAppHealthByID :exec
|
|
UPDATE
|
|
workspace_apps
|
|
SET
|
|
health = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAppHealthByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Health WorkspaceAppHealth `db:"health" json:"health"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAppHealthByID(ctx context.Context, arg UpdateWorkspaceAppHealthByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAppHealthByID, arg.ID, arg.Health)
|
|
return err
|
|
}
|
|
|
|
const upsertWorkspaceApp = `-- name: UpsertWorkspaceApp :one
|
|
INSERT INTO
|
|
workspace_apps (
|
|
id,
|
|
created_at,
|
|
agent_id,
|
|
slug,
|
|
display_name,
|
|
icon,
|
|
command,
|
|
url,
|
|
external,
|
|
subdomain,
|
|
sharing_level,
|
|
healthcheck_url,
|
|
healthcheck_interval,
|
|
healthcheck_threshold,
|
|
health,
|
|
display_order,
|
|
hidden,
|
|
open_in,
|
|
display_group
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19)
|
|
ON CONFLICT (id) DO UPDATE SET
|
|
display_name = EXCLUDED.display_name,
|
|
icon = EXCLUDED.icon,
|
|
command = EXCLUDED.command,
|
|
url = EXCLUDED.url,
|
|
external = EXCLUDED.external,
|
|
subdomain = EXCLUDED.subdomain,
|
|
sharing_level = EXCLUDED.sharing_level,
|
|
healthcheck_url = EXCLUDED.healthcheck_url,
|
|
healthcheck_interval = EXCLUDED.healthcheck_interval,
|
|
healthcheck_threshold = EXCLUDED.healthcheck_threshold,
|
|
health = EXCLUDED.health,
|
|
display_order = EXCLUDED.display_order,
|
|
hidden = EXCLUDED.hidden,
|
|
open_in = EXCLUDED.open_in,
|
|
display_group = EXCLUDED.display_group,
|
|
agent_id = EXCLUDED.agent_id,
|
|
slug = EXCLUDED.slug
|
|
RETURNING id, created_at, agent_id, display_name, icon, command, url, healthcheck_url, healthcheck_interval, healthcheck_threshold, health, subdomain, sharing_level, slug, external, display_order, hidden, open_in, display_group
|
|
`
|
|
|
|
type UpsertWorkspaceAppParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
Slug string `db:"slug" json:"slug"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
Command sql.NullString `db:"command" json:"command"`
|
|
Url sql.NullString `db:"url" json:"url"`
|
|
External bool `db:"external" json:"external"`
|
|
Subdomain bool `db:"subdomain" json:"subdomain"`
|
|
SharingLevel AppSharingLevel `db:"sharing_level" json:"sharing_level"`
|
|
HealthcheckUrl string `db:"healthcheck_url" json:"healthcheck_url"`
|
|
HealthcheckInterval int32 `db:"healthcheck_interval" json:"healthcheck_interval"`
|
|
HealthcheckThreshold int32 `db:"healthcheck_threshold" json:"healthcheck_threshold"`
|
|
Health WorkspaceAppHealth `db:"health" json:"health"`
|
|
DisplayOrder int32 `db:"display_order" json:"display_order"`
|
|
Hidden bool `db:"hidden" json:"hidden"`
|
|
OpenIn WorkspaceAppOpenIn `db:"open_in" json:"open_in"`
|
|
DisplayGroup sql.NullString `db:"display_group" json:"display_group"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertWorkspaceApp(ctx context.Context, arg UpsertWorkspaceAppParams) (WorkspaceApp, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertWorkspaceApp,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.AgentID,
|
|
arg.Slug,
|
|
arg.DisplayName,
|
|
arg.Icon,
|
|
arg.Command,
|
|
arg.Url,
|
|
arg.External,
|
|
arg.Subdomain,
|
|
arg.SharingLevel,
|
|
arg.HealthcheckUrl,
|
|
arg.HealthcheckInterval,
|
|
arg.HealthcheckThreshold,
|
|
arg.Health,
|
|
arg.DisplayOrder,
|
|
arg.Hidden,
|
|
arg.OpenIn,
|
|
arg.DisplayGroup,
|
|
)
|
|
var i WorkspaceApp
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Command,
|
|
&i.Url,
|
|
&i.HealthcheckUrl,
|
|
&i.HealthcheckInterval,
|
|
&i.HealthcheckThreshold,
|
|
&i.Health,
|
|
&i.Subdomain,
|
|
&i.SharingLevel,
|
|
&i.Slug,
|
|
&i.External,
|
|
&i.DisplayOrder,
|
|
&i.Hidden,
|
|
&i.OpenIn,
|
|
&i.DisplayGroup,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertWorkspaceAppStats = `-- name: InsertWorkspaceAppStats :exec
|
|
INSERT INTO
|
|
workspace_app_stats (
|
|
user_id,
|
|
workspace_id,
|
|
agent_id,
|
|
access_method,
|
|
slug_or_port,
|
|
session_id,
|
|
session_started_at,
|
|
session_ended_at,
|
|
requests
|
|
)
|
|
SELECT
|
|
unnest($1::uuid[]) AS user_id,
|
|
unnest($2::uuid[]) AS workspace_id,
|
|
unnest($3::uuid[]) AS agent_id,
|
|
unnest($4::text[]) AS access_method,
|
|
unnest($5::text[]) AS slug_or_port,
|
|
unnest($6::uuid[]) AS session_id,
|
|
unnest($7::timestamptz[]) AS session_started_at,
|
|
unnest($8::timestamptz[]) AS session_ended_at,
|
|
unnest($9::int[]) AS requests
|
|
ON CONFLICT
|
|
(user_id, agent_id, session_id)
|
|
DO
|
|
UPDATE SET
|
|
session_ended_at = EXCLUDED.session_ended_at,
|
|
requests = EXCLUDED.requests
|
|
WHERE
|
|
workspace_app_stats.user_id = EXCLUDED.user_id
|
|
AND workspace_app_stats.agent_id = EXCLUDED.agent_id
|
|
AND workspace_app_stats.session_id = EXCLUDED.session_id
|
|
-- Since stats are updated in place as time progresses, we only
|
|
-- want to update this row if it's fresh.
|
|
AND workspace_app_stats.session_ended_at <= EXCLUDED.session_ended_at
|
|
AND workspace_app_stats.requests <= EXCLUDED.requests
|
|
`
|
|
|
|
type InsertWorkspaceAppStatsParams struct {
|
|
UserID []uuid.UUID `db:"user_id" json:"user_id"`
|
|
WorkspaceID []uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
AgentID []uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
AccessMethod []string `db:"access_method" json:"access_method"`
|
|
SlugOrPort []string `db:"slug_or_port" json:"slug_or_port"`
|
|
SessionID []uuid.UUID `db:"session_id" json:"session_id"`
|
|
SessionStartedAt []time.Time `db:"session_started_at" json:"session_started_at"`
|
|
SessionEndedAt []time.Time `db:"session_ended_at" json:"session_ended_at"`
|
|
Requests []int32 `db:"requests" json:"requests"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAppStats(ctx context.Context, arg InsertWorkspaceAppStatsParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertWorkspaceAppStats,
|
|
pq.Array(arg.UserID),
|
|
pq.Array(arg.WorkspaceID),
|
|
pq.Array(arg.AgentID),
|
|
pq.Array(arg.AccessMethod),
|
|
pq.Array(arg.SlugOrPort),
|
|
pq.Array(arg.SessionID),
|
|
pq.Array(arg.SessionStartedAt),
|
|
pq.Array(arg.SessionEndedAt),
|
|
pq.Array(arg.Requests),
|
|
)
|
|
return err
|
|
}
|
|
|
|
const getUserWorkspaceBuildParameters = `-- name: GetUserWorkspaceBuildParameters :many
|
|
SELECT name, value
|
|
FROM (
|
|
SELECT DISTINCT ON (tvp.name)
|
|
tvp.name,
|
|
wbp.value,
|
|
wb.created_at
|
|
FROM
|
|
workspace_build_parameters wbp
|
|
JOIN
|
|
workspace_builds wb ON wb.id = wbp.workspace_build_id
|
|
JOIN
|
|
workspaces w ON w.id = wb.workspace_id
|
|
JOIN
|
|
template_version_parameters tvp ON tvp.template_version_id = wb.template_version_id
|
|
WHERE
|
|
w.owner_id = $1
|
|
AND wb.transition = 'start'
|
|
AND w.template_id = $2
|
|
AND tvp.ephemeral = false
|
|
AND tvp.name = wbp.name
|
|
ORDER BY
|
|
tvp.name, wb.created_at DESC
|
|
) q1
|
|
ORDER BY created_at DESC, name
|
|
LIMIT 100
|
|
`
|
|
|
|
type GetUserWorkspaceBuildParametersParams struct {
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
}
|
|
|
|
type GetUserWorkspaceBuildParametersRow struct {
|
|
Name string `db:"name" json:"name"`
|
|
Value string `db:"value" json:"value"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetUserWorkspaceBuildParameters(ctx context.Context, arg GetUserWorkspaceBuildParametersParams) ([]GetUserWorkspaceBuildParametersRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUserWorkspaceBuildParameters, arg.OwnerID, arg.TemplateID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetUserWorkspaceBuildParametersRow
|
|
for rows.Next() {
|
|
var i GetUserWorkspaceBuildParametersRow
|
|
if err := rows.Scan(&i.Name, &i.Value); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceBuildParameters = `-- name: GetWorkspaceBuildParameters :many
|
|
SELECT
|
|
workspace_build_id, name, value
|
|
FROM
|
|
workspace_build_parameters
|
|
WHERE
|
|
workspace_build_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildParameters(ctx context.Context, workspaceBuildID uuid.UUID) ([]WorkspaceBuildParameter, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceBuildParameters, workspaceBuildID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuildParameter
|
|
for rows.Next() {
|
|
var i WorkspaceBuildParameter
|
|
if err := rows.Scan(&i.WorkspaceBuildID, &i.Name, &i.Value); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceBuildParameters = `-- name: InsertWorkspaceBuildParameters :exec
|
|
INSERT INTO
|
|
workspace_build_parameters (workspace_build_id, name, value)
|
|
SELECT
|
|
$1 :: uuid AS workspace_build_id,
|
|
unnest($2 :: text[]) AS name,
|
|
unnest($3 :: text[]) AS value
|
|
RETURNING workspace_build_id, name, value
|
|
`
|
|
|
|
type InsertWorkspaceBuildParametersParams struct {
|
|
WorkspaceBuildID uuid.UUID `db:"workspace_build_id" json:"workspace_build_id"`
|
|
Name []string `db:"name" json:"name"`
|
|
Value []string `db:"value" json:"value"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceBuildParameters(ctx context.Context, arg InsertWorkspaceBuildParametersParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertWorkspaceBuildParameters, arg.WorkspaceBuildID, pq.Array(arg.Name), pq.Array(arg.Value))
|
|
return err
|
|
}
|
|
|
|
const getActiveWorkspaceBuildsByTemplateID = `-- name: GetActiveWorkspaceBuildsByTemplateID :many
|
|
SELECT wb.id, wb.created_at, wb.updated_at, wb.workspace_id, wb.template_version_id, wb.build_number, wb.transition, wb.initiator_id, wb.provisioner_state, wb.job_id, wb.deadline, wb.reason, wb.daily_cost, wb.max_deadline, wb.template_version_preset_id, wb.has_ai_task, wb.ai_tasks_sidebar_app_id, wb.initiator_by_avatar_url, wb.initiator_by_username, wb.initiator_by_name
|
|
FROM (
|
|
SELECT
|
|
workspace_id, MAX(build_number) as max_build_number
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
workspace_id IN (
|
|
SELECT
|
|
id
|
|
FROM
|
|
workspaces
|
|
WHERE
|
|
template_id = $1
|
|
)
|
|
GROUP BY
|
|
workspace_id
|
|
) m
|
|
JOIN
|
|
workspace_build_with_user AS wb
|
|
ON m.workspace_id = wb.workspace_id AND m.max_build_number = wb.build_number
|
|
JOIN
|
|
provisioner_jobs AS pj
|
|
ON wb.job_id = pj.id
|
|
WHERE
|
|
wb.transition = 'start'::workspace_transition
|
|
AND
|
|
pj.completed_at IS NOT NULL
|
|
`
|
|
|
|
func (q *sqlQuerier) GetActiveWorkspaceBuildsByTemplateID(ctx context.Context, templateID uuid.UUID) ([]WorkspaceBuild, error) {
|
|
rows, err := q.db.QueryContext(ctx, getActiveWorkspaceBuildsByTemplateID, templateID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuild
|
|
for rows.Next() {
|
|
var i WorkspaceBuild
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.TemplateVersionPresetID,
|
|
&i.HasAITask,
|
|
&i.AITasksSidebarAppID,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
&i.InitiatorByName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getFailedWorkspaceBuildsByTemplateID = `-- name: GetFailedWorkspaceBuildsByTemplateID :many
|
|
SELECT
|
|
tv.name AS template_version_name,
|
|
u.username AS workspace_owner_username,
|
|
w.name AS workspace_name,
|
|
w.id AS workspace_id,
|
|
wb.build_number AS workspace_build_number
|
|
FROM
|
|
workspace_build_with_user AS wb
|
|
JOIN
|
|
workspaces AS w
|
|
ON
|
|
wb.workspace_id = w.id
|
|
JOIN
|
|
users AS u
|
|
ON
|
|
w.owner_id = u.id
|
|
JOIN
|
|
provisioner_jobs AS pj
|
|
ON
|
|
wb.job_id = pj.id
|
|
JOIN
|
|
templates AS t
|
|
ON
|
|
w.template_id = t.id
|
|
JOIN
|
|
template_versions AS tv
|
|
ON
|
|
wb.template_version_id = tv.id
|
|
WHERE
|
|
w.template_id = $1
|
|
AND wb.created_at >= $2
|
|
AND pj.completed_at IS NOT NULL
|
|
AND pj.job_status = 'failed'
|
|
ORDER BY
|
|
tv.name ASC, wb.build_number DESC
|
|
`
|
|
|
|
type GetFailedWorkspaceBuildsByTemplateIDParams struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
Since time.Time `db:"since" json:"since"`
|
|
}
|
|
|
|
type GetFailedWorkspaceBuildsByTemplateIDRow struct {
|
|
TemplateVersionName string `db:"template_version_name" json:"template_version_name"`
|
|
WorkspaceOwnerUsername string `db:"workspace_owner_username" json:"workspace_owner_username"`
|
|
WorkspaceName string `db:"workspace_name" json:"workspace_name"`
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
WorkspaceBuildNumber int32 `db:"workspace_build_number" json:"workspace_build_number"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetFailedWorkspaceBuildsByTemplateID(ctx context.Context, arg GetFailedWorkspaceBuildsByTemplateIDParams) ([]GetFailedWorkspaceBuildsByTemplateIDRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getFailedWorkspaceBuildsByTemplateID, arg.TemplateID, arg.Since)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetFailedWorkspaceBuildsByTemplateIDRow
|
|
for rows.Next() {
|
|
var i GetFailedWorkspaceBuildsByTemplateIDRow
|
|
if err := rows.Scan(
|
|
&i.TemplateVersionName,
|
|
&i.WorkspaceOwnerUsername,
|
|
&i.WorkspaceName,
|
|
&i.WorkspaceID,
|
|
&i.WorkspaceBuildNumber,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getLatestWorkspaceBuildByWorkspaceID = `-- name: GetLatestWorkspaceBuildByWorkspaceID :one
|
|
SELECT
|
|
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_tasks_sidebar_app_id, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
workspace_id = $1
|
|
ORDER BY
|
|
build_number desc
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLatestWorkspaceBuildByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) (WorkspaceBuild, error) {
|
|
row := q.db.QueryRowContext(ctx, getLatestWorkspaceBuildByWorkspaceID, workspaceID)
|
|
var i WorkspaceBuild
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.TemplateVersionPresetID,
|
|
&i.HasAITask,
|
|
&i.AITasksSidebarAppID,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
&i.InitiatorByName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getLatestWorkspaceBuilds = `-- name: GetLatestWorkspaceBuilds :many
|
|
SELECT wb.id, wb.created_at, wb.updated_at, wb.workspace_id, wb.template_version_id, wb.build_number, wb.transition, wb.initiator_id, wb.provisioner_state, wb.job_id, wb.deadline, wb.reason, wb.daily_cost, wb.max_deadline, wb.template_version_preset_id, wb.has_ai_task, wb.ai_tasks_sidebar_app_id, wb.initiator_by_avatar_url, wb.initiator_by_username, wb.initiator_by_name
|
|
FROM (
|
|
SELECT
|
|
workspace_id, MAX(build_number) as max_build_number
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
GROUP BY
|
|
workspace_id
|
|
) m
|
|
JOIN
|
|
workspace_build_with_user AS wb
|
|
ON m.workspace_id = wb.workspace_id AND m.max_build_number = wb.build_number
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLatestWorkspaceBuilds(ctx context.Context) ([]WorkspaceBuild, error) {
|
|
rows, err := q.db.QueryContext(ctx, getLatestWorkspaceBuilds)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuild
|
|
for rows.Next() {
|
|
var i WorkspaceBuild
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.TemplateVersionPresetID,
|
|
&i.HasAITask,
|
|
&i.AITasksSidebarAppID,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
&i.InitiatorByName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getLatestWorkspaceBuildsByWorkspaceIDs = `-- name: GetLatestWorkspaceBuildsByWorkspaceIDs :many
|
|
SELECT wb.id, wb.created_at, wb.updated_at, wb.workspace_id, wb.template_version_id, wb.build_number, wb.transition, wb.initiator_id, wb.provisioner_state, wb.job_id, wb.deadline, wb.reason, wb.daily_cost, wb.max_deadline, wb.template_version_preset_id, wb.has_ai_task, wb.ai_tasks_sidebar_app_id, wb.initiator_by_avatar_url, wb.initiator_by_username, wb.initiator_by_name
|
|
FROM (
|
|
SELECT
|
|
workspace_id, MAX(build_number) as max_build_number
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
workspace_id = ANY($1 :: uuid [ ])
|
|
GROUP BY
|
|
workspace_id
|
|
) m
|
|
JOIN
|
|
workspace_build_with_user AS wb
|
|
ON m.workspace_id = wb.workspace_id AND m.max_build_number = wb.build_number
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLatestWorkspaceBuildsByWorkspaceIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceBuild, error) {
|
|
rows, err := q.db.QueryContext(ctx, getLatestWorkspaceBuildsByWorkspaceIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuild
|
|
for rows.Next() {
|
|
var i WorkspaceBuild
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.TemplateVersionPresetID,
|
|
&i.HasAITask,
|
|
&i.AITasksSidebarAppID,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
&i.InitiatorByName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceBuildByID = `-- name: GetWorkspaceBuildByID :one
|
|
SELECT
|
|
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_tasks_sidebar_app_id, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildByID(ctx context.Context, id uuid.UUID) (WorkspaceBuild, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceBuildByID, id)
|
|
var i WorkspaceBuild
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.TemplateVersionPresetID,
|
|
&i.HasAITask,
|
|
&i.AITasksSidebarAppID,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
&i.InitiatorByName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceBuildByJobID = `-- name: GetWorkspaceBuildByJobID :one
|
|
SELECT
|
|
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_tasks_sidebar_app_id, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
job_id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildByJobID(ctx context.Context, jobID uuid.UUID) (WorkspaceBuild, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceBuildByJobID, jobID)
|
|
var i WorkspaceBuild
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.TemplateVersionPresetID,
|
|
&i.HasAITask,
|
|
&i.AITasksSidebarAppID,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
&i.InitiatorByName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceBuildByWorkspaceIDAndBuildNumber = `-- name: GetWorkspaceBuildByWorkspaceIDAndBuildNumber :one
|
|
SELECT
|
|
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_tasks_sidebar_app_id, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
workspace_id = $1
|
|
AND build_number = $2
|
|
`
|
|
|
|
type GetWorkspaceBuildByWorkspaceIDAndBuildNumberParams struct {
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
BuildNumber int32 `db:"build_number" json:"build_number"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx context.Context, arg GetWorkspaceBuildByWorkspaceIDAndBuildNumberParams) (WorkspaceBuild, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceBuildByWorkspaceIDAndBuildNumber, arg.WorkspaceID, arg.BuildNumber)
|
|
var i WorkspaceBuild
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.TemplateVersionPresetID,
|
|
&i.HasAITask,
|
|
&i.AITasksSidebarAppID,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
&i.InitiatorByName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceBuildStatsByTemplates = `-- name: GetWorkspaceBuildStatsByTemplates :many
|
|
SELECT
|
|
w.template_id,
|
|
t.name AS template_name,
|
|
t.display_name AS template_display_name,
|
|
t.organization_id AS template_organization_id,
|
|
COUNT(*) AS total_builds,
|
|
COUNT(CASE WHEN pj.job_status = 'failed' THEN 1 END) AS failed_builds
|
|
FROM
|
|
workspace_build_with_user AS wb
|
|
JOIN
|
|
workspaces AS w ON
|
|
wb.workspace_id = w.id
|
|
JOIN
|
|
provisioner_jobs AS pj ON
|
|
wb.job_id = pj.id
|
|
JOIN
|
|
templates AS t ON
|
|
w.template_id = t.id
|
|
WHERE
|
|
wb.created_at >= $1
|
|
AND pj.completed_at IS NOT NULL
|
|
GROUP BY
|
|
w.template_id, template_name, template_display_name, template_organization_id
|
|
ORDER BY
|
|
template_name ASC
|
|
`
|
|
|
|
type GetWorkspaceBuildStatsByTemplatesRow struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TemplateName string `db:"template_name" json:"template_name"`
|
|
TemplateDisplayName string `db:"template_display_name" json:"template_display_name"`
|
|
TemplateOrganizationID uuid.UUID `db:"template_organization_id" json:"template_organization_id"`
|
|
TotalBuilds int64 `db:"total_builds" json:"total_builds"`
|
|
FailedBuilds int64 `db:"failed_builds" json:"failed_builds"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildStatsByTemplates(ctx context.Context, since time.Time) ([]GetWorkspaceBuildStatsByTemplatesRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceBuildStatsByTemplates, since)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspaceBuildStatsByTemplatesRow
|
|
for rows.Next() {
|
|
var i GetWorkspaceBuildStatsByTemplatesRow
|
|
if err := rows.Scan(
|
|
&i.TemplateID,
|
|
&i.TemplateName,
|
|
&i.TemplateDisplayName,
|
|
&i.TemplateOrganizationID,
|
|
&i.TotalBuilds,
|
|
&i.FailedBuilds,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceBuildsByWorkspaceID = `-- name: GetWorkspaceBuildsByWorkspaceID :many
|
|
SELECT
|
|
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_tasks_sidebar_app_id, initiator_by_avatar_url, initiator_by_username, initiator_by_name
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
workspace_builds.workspace_id = $1
|
|
AND workspace_builds.created_at > $2
|
|
AND CASE
|
|
-- This allows using the last element on a page as effectively a cursor.
|
|
-- This is an important option for scripts that need to paginate without
|
|
-- duplicating or missing data.
|
|
WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN (
|
|
-- The pagination cursor is the last ID of the previous page.
|
|
-- The query is ordered by the build_number field, so select all
|
|
-- rows after the cursor.
|
|
build_number > (
|
|
SELECT
|
|
build_number
|
|
FROM
|
|
workspace_builds
|
|
WHERE
|
|
id = $3
|
|
)
|
|
)
|
|
ELSE true
|
|
END
|
|
ORDER BY
|
|
build_number desc OFFSET $4
|
|
LIMIT
|
|
-- A null limit means "no limit", so 0 means return all
|
|
NULLIF($5 :: int, 0)
|
|
`
|
|
|
|
type GetWorkspaceBuildsByWorkspaceIDParams struct {
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
Since time.Time `db:"since" json:"since"`
|
|
AfterID uuid.UUID `db:"after_id" json:"after_id"`
|
|
OffsetOpt int32 `db:"offset_opt" json:"offset_opt"`
|
|
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildsByWorkspaceID(ctx context.Context, arg GetWorkspaceBuildsByWorkspaceIDParams) ([]WorkspaceBuild, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceBuildsByWorkspaceID,
|
|
arg.WorkspaceID,
|
|
arg.Since,
|
|
arg.AfterID,
|
|
arg.OffsetOpt,
|
|
arg.LimitOpt,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuild
|
|
for rows.Next() {
|
|
var i WorkspaceBuild
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.TemplateVersionPresetID,
|
|
&i.HasAITask,
|
|
&i.AITasksSidebarAppID,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
&i.InitiatorByName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceBuildsCreatedAfter = `-- name: GetWorkspaceBuildsCreatedAfter :many
|
|
SELECT id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, template_version_preset_id, has_ai_task, ai_tasks_sidebar_app_id, initiator_by_avatar_url, initiator_by_username, initiator_by_name FROM workspace_build_with_user WHERE created_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildsCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceBuild, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceBuildsCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuild
|
|
for rows.Next() {
|
|
var i WorkspaceBuild
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.TemplateVersionPresetID,
|
|
&i.HasAITask,
|
|
&i.AITasksSidebarAppID,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
&i.InitiatorByName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceBuild = `-- name: InsertWorkspaceBuild :exec
|
|
INSERT INTO
|
|
workspace_builds (
|
|
id,
|
|
created_at,
|
|
updated_at,
|
|
workspace_id,
|
|
template_version_id,
|
|
"build_number",
|
|
transition,
|
|
initiator_id,
|
|
job_id,
|
|
provisioner_state,
|
|
deadline,
|
|
max_deadline,
|
|
reason,
|
|
template_version_preset_id,
|
|
has_ai_task
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
|
|
`
|
|
|
|
type InsertWorkspaceBuildParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
BuildNumber int32 `db:"build_number" json:"build_number"`
|
|
Transition WorkspaceTransition `db:"transition" json:"transition"`
|
|
InitiatorID uuid.UUID `db:"initiator_id" json:"initiator_id"`
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
ProvisionerState []byte `db:"provisioner_state" json:"provisioner_state"`
|
|
Deadline time.Time `db:"deadline" json:"deadline"`
|
|
MaxDeadline time.Time `db:"max_deadline" json:"max_deadline"`
|
|
Reason BuildReason `db:"reason" json:"reason"`
|
|
TemplateVersionPresetID uuid.NullUUID `db:"template_version_preset_id" json:"template_version_preset_id"`
|
|
HasAITask sql.NullBool `db:"has_ai_task" json:"has_ai_task"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceBuild(ctx context.Context, arg InsertWorkspaceBuildParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertWorkspaceBuild,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.WorkspaceID,
|
|
arg.TemplateVersionID,
|
|
arg.BuildNumber,
|
|
arg.Transition,
|
|
arg.InitiatorID,
|
|
arg.JobID,
|
|
arg.ProvisionerState,
|
|
arg.Deadline,
|
|
arg.MaxDeadline,
|
|
arg.Reason,
|
|
arg.TemplateVersionPresetID,
|
|
arg.HasAITask,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceBuildCostByID = `-- name: UpdateWorkspaceBuildCostByID :exec
|
|
UPDATE
|
|
workspace_builds
|
|
SET
|
|
daily_cost = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceBuildCostByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
DailyCost int32 `db:"daily_cost" json:"daily_cost"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceBuildCostByID(ctx context.Context, arg UpdateWorkspaceBuildCostByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceBuildCostByID, arg.ID, arg.DailyCost)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceBuildDeadlineByID = `-- name: UpdateWorkspaceBuildDeadlineByID :exec
|
|
UPDATE
|
|
workspace_builds
|
|
SET
|
|
deadline = $1::timestamptz,
|
|
max_deadline = $2::timestamptz,
|
|
updated_at = $3::timestamptz
|
|
WHERE id = $4::uuid
|
|
`
|
|
|
|
type UpdateWorkspaceBuildDeadlineByIDParams struct {
|
|
Deadline time.Time `db:"deadline" json:"deadline"`
|
|
MaxDeadline time.Time `db:"max_deadline" json:"max_deadline"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceBuildDeadlineByID(ctx context.Context, arg UpdateWorkspaceBuildDeadlineByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceBuildDeadlineByID,
|
|
arg.Deadline,
|
|
arg.MaxDeadline,
|
|
arg.UpdatedAt,
|
|
arg.ID,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceBuildProvisionerStateByID = `-- name: UpdateWorkspaceBuildProvisionerStateByID :exec
|
|
UPDATE
|
|
workspace_builds
|
|
SET
|
|
provisioner_state = $1::bytea,
|
|
updated_at = $2::timestamptz
|
|
WHERE id = $3::uuid
|
|
`
|
|
|
|
type UpdateWorkspaceBuildProvisionerStateByIDParams struct {
|
|
ProvisionerState []byte `db:"provisioner_state" json:"provisioner_state"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceBuildProvisionerStateByID(ctx context.Context, arg UpdateWorkspaceBuildProvisionerStateByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceBuildProvisionerStateByID, arg.ProvisionerState, arg.UpdatedAt, arg.ID)
|
|
return err
|
|
}
|
|
|
|
const getWorkspaceModulesByJobID = `-- name: GetWorkspaceModulesByJobID :many
|
|
SELECT
|
|
id, job_id, transition, source, version, key, created_at
|
|
FROM
|
|
workspace_modules
|
|
WHERE
|
|
job_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceModulesByJobID(ctx context.Context, jobID uuid.UUID) ([]WorkspaceModule, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceModulesByJobID, jobID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceModule
|
|
for rows.Next() {
|
|
var i WorkspaceModule
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Source,
|
|
&i.Version,
|
|
&i.Key,
|
|
&i.CreatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceModulesCreatedAfter = `-- name: GetWorkspaceModulesCreatedAfter :many
|
|
SELECT id, job_id, transition, source, version, key, created_at FROM workspace_modules WHERE created_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceModulesCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceModule, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceModulesCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceModule
|
|
for rows.Next() {
|
|
var i WorkspaceModule
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Source,
|
|
&i.Version,
|
|
&i.Key,
|
|
&i.CreatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceModule = `-- name: InsertWorkspaceModule :one
|
|
INSERT INTO
|
|
workspace_modules (id, job_id, transition, source, version, key, created_at)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7) RETURNING id, job_id, transition, source, version, key, created_at
|
|
`
|
|
|
|
type InsertWorkspaceModuleParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
Transition WorkspaceTransition `db:"transition" json:"transition"`
|
|
Source string `db:"source" json:"source"`
|
|
Version string `db:"version" json:"version"`
|
|
Key string `db:"key" json:"key"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceModule(ctx context.Context, arg InsertWorkspaceModuleParams) (WorkspaceModule, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspaceModule,
|
|
arg.ID,
|
|
arg.JobID,
|
|
arg.Transition,
|
|
arg.Source,
|
|
arg.Version,
|
|
arg.Key,
|
|
arg.CreatedAt,
|
|
)
|
|
var i WorkspaceModule
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Source,
|
|
&i.Version,
|
|
&i.Key,
|
|
&i.CreatedAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceResourceByID = `-- name: GetWorkspaceResourceByID :one
|
|
SELECT
|
|
id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost, module_path
|
|
FROM
|
|
workspace_resources
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourceByID(ctx context.Context, id uuid.UUID) (WorkspaceResource, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceResourceByID, id)
|
|
var i WorkspaceResource
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Type,
|
|
&i.Name,
|
|
&i.Hide,
|
|
&i.Icon,
|
|
&i.InstanceType,
|
|
&i.DailyCost,
|
|
&i.ModulePath,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceResourceMetadataByResourceIDs = `-- name: GetWorkspaceResourceMetadataByResourceIDs :many
|
|
SELECT
|
|
workspace_resource_id, key, value, sensitive, id
|
|
FROM
|
|
workspace_resource_metadata
|
|
WHERE
|
|
workspace_resource_id = ANY($1 :: uuid [ ]) ORDER BY id ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourceMetadataByResourceIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceResourceMetadatum, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceResourceMetadataByResourceIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResourceMetadatum
|
|
for rows.Next() {
|
|
var i WorkspaceResourceMetadatum
|
|
if err := rows.Scan(
|
|
&i.WorkspaceResourceID,
|
|
&i.Key,
|
|
&i.Value,
|
|
&i.Sensitive,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceResourceMetadataCreatedAfter = `-- name: GetWorkspaceResourceMetadataCreatedAfter :many
|
|
SELECT workspace_resource_id, key, value, sensitive, id FROM workspace_resource_metadata WHERE workspace_resource_id = ANY(
|
|
SELECT id FROM workspace_resources WHERE created_at > $1
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourceMetadataCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceResourceMetadatum, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceResourceMetadataCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResourceMetadatum
|
|
for rows.Next() {
|
|
var i WorkspaceResourceMetadatum
|
|
if err := rows.Scan(
|
|
&i.WorkspaceResourceID,
|
|
&i.Key,
|
|
&i.Value,
|
|
&i.Sensitive,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceResourcesByJobID = `-- name: GetWorkspaceResourcesByJobID :many
|
|
SELECT
|
|
id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost, module_path
|
|
FROM
|
|
workspace_resources
|
|
WHERE
|
|
job_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourcesByJobID(ctx context.Context, jobID uuid.UUID) ([]WorkspaceResource, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceResourcesByJobID, jobID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResource
|
|
for rows.Next() {
|
|
var i WorkspaceResource
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Type,
|
|
&i.Name,
|
|
&i.Hide,
|
|
&i.Icon,
|
|
&i.InstanceType,
|
|
&i.DailyCost,
|
|
&i.ModulePath,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceResourcesByJobIDs = `-- name: GetWorkspaceResourcesByJobIDs :many
|
|
SELECT
|
|
id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost, module_path
|
|
FROM
|
|
workspace_resources
|
|
WHERE
|
|
job_id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourcesByJobIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceResource, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceResourcesByJobIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResource
|
|
for rows.Next() {
|
|
var i WorkspaceResource
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Type,
|
|
&i.Name,
|
|
&i.Hide,
|
|
&i.Icon,
|
|
&i.InstanceType,
|
|
&i.DailyCost,
|
|
&i.ModulePath,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceResourcesCreatedAfter = `-- name: GetWorkspaceResourcesCreatedAfter :many
|
|
SELECT id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost, module_path FROM workspace_resources WHERE created_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourcesCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceResource, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceResourcesCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResource
|
|
for rows.Next() {
|
|
var i WorkspaceResource
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Type,
|
|
&i.Name,
|
|
&i.Hide,
|
|
&i.Icon,
|
|
&i.InstanceType,
|
|
&i.DailyCost,
|
|
&i.ModulePath,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceResource = `-- name: InsertWorkspaceResource :one
|
|
INSERT INTO
|
|
workspace_resources (id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost, module_path)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) RETURNING id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost, module_path
|
|
`
|
|
|
|
type InsertWorkspaceResourceParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
Transition WorkspaceTransition `db:"transition" json:"transition"`
|
|
Type string `db:"type" json:"type"`
|
|
Name string `db:"name" json:"name"`
|
|
Hide bool `db:"hide" json:"hide"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
InstanceType sql.NullString `db:"instance_type" json:"instance_type"`
|
|
DailyCost int32 `db:"daily_cost" json:"daily_cost"`
|
|
ModulePath sql.NullString `db:"module_path" json:"module_path"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceResource(ctx context.Context, arg InsertWorkspaceResourceParams) (WorkspaceResource, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspaceResource,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.JobID,
|
|
arg.Transition,
|
|
arg.Type,
|
|
arg.Name,
|
|
arg.Hide,
|
|
arg.Icon,
|
|
arg.InstanceType,
|
|
arg.DailyCost,
|
|
arg.ModulePath,
|
|
)
|
|
var i WorkspaceResource
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Type,
|
|
&i.Name,
|
|
&i.Hide,
|
|
&i.Icon,
|
|
&i.InstanceType,
|
|
&i.DailyCost,
|
|
&i.ModulePath,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertWorkspaceResourceMetadata = `-- name: InsertWorkspaceResourceMetadata :many
|
|
INSERT INTO
|
|
workspace_resource_metadata
|
|
SELECT
|
|
$1 :: uuid AS workspace_resource_id,
|
|
unnest($2 :: text [ ]) AS key,
|
|
unnest($3 :: text [ ]) AS value,
|
|
unnest($4 :: boolean [ ]) AS sensitive RETURNING workspace_resource_id, key, value, sensitive, id
|
|
`
|
|
|
|
type InsertWorkspaceResourceMetadataParams struct {
|
|
WorkspaceResourceID uuid.UUID `db:"workspace_resource_id" json:"workspace_resource_id"`
|
|
Key []string `db:"key" json:"key"`
|
|
Value []string `db:"value" json:"value"`
|
|
Sensitive []bool `db:"sensitive" json:"sensitive"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceResourceMetadata(ctx context.Context, arg InsertWorkspaceResourceMetadataParams) ([]WorkspaceResourceMetadatum, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertWorkspaceResourceMetadata,
|
|
arg.WorkspaceResourceID,
|
|
pq.Array(arg.Key),
|
|
pq.Array(arg.Value),
|
|
pq.Array(arg.Sensitive),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResourceMetadatum
|
|
for rows.Next() {
|
|
var i WorkspaceResourceMetadatum
|
|
if err := rows.Scan(
|
|
&i.WorkspaceResourceID,
|
|
&i.Key,
|
|
&i.Value,
|
|
&i.Sensitive,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const batchUpdateWorkspaceLastUsedAt = `-- name: BatchUpdateWorkspaceLastUsedAt :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
last_used_at = $1
|
|
WHERE
|
|
id = ANY($2 :: uuid[])
|
|
AND
|
|
-- Do not overwrite with older data
|
|
last_used_at < $1
|
|
`
|
|
|
|
type BatchUpdateWorkspaceLastUsedAtParams struct {
|
|
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
}
|
|
|
|
func (q *sqlQuerier) BatchUpdateWorkspaceLastUsedAt(ctx context.Context, arg BatchUpdateWorkspaceLastUsedAtParams) error {
|
|
_, err := q.db.ExecContext(ctx, batchUpdateWorkspaceLastUsedAt, arg.LastUsedAt, pq.Array(arg.IDs))
|
|
return err
|
|
}
|
|
|
|
const batchUpdateWorkspaceNextStartAt = `-- name: BatchUpdateWorkspaceNextStartAt :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
next_start_at = CASE
|
|
WHEN batch.next_start_at = '0001-01-01 00:00:00+00'::timestamptz THEN NULL
|
|
ELSE batch.next_start_at
|
|
END
|
|
FROM (
|
|
SELECT
|
|
unnest($1::uuid[]) AS id,
|
|
unnest($2::timestamptz[]) AS next_start_at
|
|
) AS batch
|
|
WHERE
|
|
workspaces.id = batch.id
|
|
`
|
|
|
|
type BatchUpdateWorkspaceNextStartAtParams struct {
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
NextStartAts []time.Time `db:"next_start_ats" json:"next_start_ats"`
|
|
}
|
|
|
|
func (q *sqlQuerier) BatchUpdateWorkspaceNextStartAt(ctx context.Context, arg BatchUpdateWorkspaceNextStartAtParams) error {
|
|
_, err := q.db.ExecContext(ctx, batchUpdateWorkspaceNextStartAt, pq.Array(arg.IDs), pq.Array(arg.NextStartAts))
|
|
return err
|
|
}
|
|
|
|
const favoriteWorkspace = `-- name: FavoriteWorkspace :exec
|
|
UPDATE workspaces SET favorite = true WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) FavoriteWorkspace(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, favoriteWorkspace, id)
|
|
return err
|
|
}
|
|
|
|
const getDeploymentWorkspaceStats = `-- name: GetDeploymentWorkspaceStats :one
|
|
WITH workspaces_with_jobs AS (
|
|
SELECT
|
|
latest_build.transition, latest_build.provisioner_job_id, latest_build.started_at, latest_build.updated_at, latest_build.canceled_at, latest_build.completed_at, latest_build.error FROM workspaces
|
|
LEFT JOIN LATERAL (
|
|
SELECT
|
|
workspace_builds.transition,
|
|
provisioner_jobs.id AS provisioner_job_id,
|
|
provisioner_jobs.started_at,
|
|
provisioner_jobs.updated_at,
|
|
provisioner_jobs.canceled_at,
|
|
provisioner_jobs.completed_at,
|
|
provisioner_jobs.error
|
|
FROM
|
|
workspace_builds
|
|
LEFT JOIN
|
|
provisioner_jobs
|
|
ON
|
|
provisioner_jobs.id = workspace_builds.job_id
|
|
WHERE
|
|
workspace_builds.workspace_id = workspaces.id
|
|
ORDER BY
|
|
build_number DESC
|
|
LIMIT
|
|
1
|
|
) latest_build ON TRUE WHERE deleted = false
|
|
), pending_workspaces AS (
|
|
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
|
|
started_at IS NULL
|
|
), building_workspaces AS (
|
|
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
|
|
started_at IS NOT NULL AND
|
|
canceled_at IS NULL AND
|
|
completed_at IS NULL AND
|
|
updated_at - INTERVAL '30 seconds' < NOW()
|
|
), running_workspaces AS (
|
|
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
|
|
completed_at IS NOT NULL AND
|
|
canceled_at IS NULL AND
|
|
error IS NULL AND
|
|
transition = 'start'::workspace_transition
|
|
), failed_workspaces AS (
|
|
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
|
|
(canceled_at IS NOT NULL AND
|
|
error IS NOT NULL) OR
|
|
(completed_at IS NOT NULL AND
|
|
error IS NOT NULL)
|
|
), stopped_workspaces AS (
|
|
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
|
|
completed_at IS NOT NULL AND
|
|
canceled_at IS NULL AND
|
|
error IS NULL AND
|
|
transition = 'stop'::workspace_transition
|
|
)
|
|
SELECT
|
|
pending_workspaces.count AS pending_workspaces,
|
|
building_workspaces.count AS building_workspaces,
|
|
running_workspaces.count AS running_workspaces,
|
|
failed_workspaces.count AS failed_workspaces,
|
|
stopped_workspaces.count AS stopped_workspaces
|
|
FROM pending_workspaces, building_workspaces, running_workspaces, failed_workspaces, stopped_workspaces
|
|
`
|
|
|
|
type GetDeploymentWorkspaceStatsRow struct {
|
|
PendingWorkspaces int64 `db:"pending_workspaces" json:"pending_workspaces"`
|
|
BuildingWorkspaces int64 `db:"building_workspaces" json:"building_workspaces"`
|
|
RunningWorkspaces int64 `db:"running_workspaces" json:"running_workspaces"`
|
|
FailedWorkspaces int64 `db:"failed_workspaces" json:"failed_workspaces"`
|
|
StoppedWorkspaces int64 `db:"stopped_workspaces" json:"stopped_workspaces"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetDeploymentWorkspaceStats(ctx context.Context) (GetDeploymentWorkspaceStatsRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getDeploymentWorkspaceStats)
|
|
var i GetDeploymentWorkspaceStatsRow
|
|
err := row.Scan(
|
|
&i.PendingWorkspaces,
|
|
&i.BuildingWorkspaces,
|
|
&i.RunningWorkspaces,
|
|
&i.FailedWorkspaces,
|
|
&i.StoppedWorkspaces,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceByAgentID = `-- name: GetWorkspaceByAgentID :one
|
|
SELECT
|
|
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
|
|
FROM
|
|
workspaces_expanded as workspaces
|
|
WHERE
|
|
workspaces.id = (
|
|
SELECT
|
|
workspace_id
|
|
FROM
|
|
workspace_builds
|
|
WHERE
|
|
workspace_builds.job_id = (
|
|
SELECT
|
|
job_id
|
|
FROM
|
|
workspace_resources
|
|
WHERE
|
|
workspace_resources.id = (
|
|
SELECT
|
|
resource_id
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
workspace_agents.id = $1
|
|
)
|
|
)
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceByAgentID(ctx context.Context, agentID uuid.UUID) (Workspace, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceByAgentID, agentID)
|
|
var i Workspace
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.NextStartAt,
|
|
&i.OwnerAvatarUrl,
|
|
&i.OwnerUsername,
|
|
&i.OwnerName,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
&i.OrganizationDescription,
|
|
&i.TemplateName,
|
|
&i.TemplateDisplayName,
|
|
&i.TemplateIcon,
|
|
&i.TemplateDescription,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceByID = `-- name: GetWorkspaceByID :one
|
|
SELECT
|
|
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
|
|
FROM
|
|
workspaces_expanded
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceByID(ctx context.Context, id uuid.UUID) (Workspace, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceByID, id)
|
|
var i Workspace
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.NextStartAt,
|
|
&i.OwnerAvatarUrl,
|
|
&i.OwnerUsername,
|
|
&i.OwnerName,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
&i.OrganizationDescription,
|
|
&i.TemplateName,
|
|
&i.TemplateDisplayName,
|
|
&i.TemplateIcon,
|
|
&i.TemplateDescription,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceByOwnerIDAndName = `-- name: GetWorkspaceByOwnerIDAndName :one
|
|
SELECT
|
|
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
|
|
FROM
|
|
workspaces_expanded as workspaces
|
|
WHERE
|
|
owner_id = $1
|
|
AND deleted = $2
|
|
AND LOWER("name") = LOWER($3)
|
|
ORDER BY created_at DESC
|
|
`
|
|
|
|
type GetWorkspaceByOwnerIDAndNameParams struct {
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceByOwnerIDAndName(ctx context.Context, arg GetWorkspaceByOwnerIDAndNameParams) (Workspace, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceByOwnerIDAndName, arg.OwnerID, arg.Deleted, arg.Name)
|
|
var i Workspace
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.NextStartAt,
|
|
&i.OwnerAvatarUrl,
|
|
&i.OwnerUsername,
|
|
&i.OwnerName,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
&i.OrganizationDescription,
|
|
&i.TemplateName,
|
|
&i.TemplateDisplayName,
|
|
&i.TemplateIcon,
|
|
&i.TemplateDescription,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceByResourceID = `-- name: GetWorkspaceByResourceID :one
|
|
SELECT
|
|
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
|
|
FROM
|
|
workspaces_expanded as workspaces
|
|
WHERE
|
|
workspaces.id = (
|
|
SELECT
|
|
workspace_id
|
|
FROM
|
|
workspace_builds
|
|
WHERE
|
|
workspace_builds.job_id = (
|
|
SELECT
|
|
job_id
|
|
FROM
|
|
workspace_resources
|
|
WHERE
|
|
workspace_resources.id = $1
|
|
)
|
|
)
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceByResourceID(ctx context.Context, resourceID uuid.UUID) (Workspace, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceByResourceID, resourceID)
|
|
var i Workspace
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.NextStartAt,
|
|
&i.OwnerAvatarUrl,
|
|
&i.OwnerUsername,
|
|
&i.OwnerName,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
&i.OrganizationDescription,
|
|
&i.TemplateName,
|
|
&i.TemplateDisplayName,
|
|
&i.TemplateIcon,
|
|
&i.TemplateDescription,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceByWorkspaceAppID = `-- name: GetWorkspaceByWorkspaceAppID :one
|
|
SELECT
|
|
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at, owner_avatar_url, owner_username, owner_name, organization_name, organization_display_name, organization_icon, organization_description, template_name, template_display_name, template_icon, template_description
|
|
FROM
|
|
workspaces_expanded as workspaces
|
|
WHERE
|
|
workspaces.id = (
|
|
SELECT
|
|
workspace_id
|
|
FROM
|
|
workspace_builds
|
|
WHERE
|
|
workspace_builds.job_id = (
|
|
SELECT
|
|
job_id
|
|
FROM
|
|
workspace_resources
|
|
WHERE
|
|
workspace_resources.id = (
|
|
SELECT
|
|
resource_id
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
workspace_agents.id = (
|
|
SELECT
|
|
agent_id
|
|
FROM
|
|
workspace_apps
|
|
WHERE
|
|
workspace_apps.id = $1
|
|
)
|
|
)
|
|
)
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceByWorkspaceAppID(ctx context.Context, workspaceAppID uuid.UUID) (Workspace, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceByWorkspaceAppID, workspaceAppID)
|
|
var i Workspace
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.NextStartAt,
|
|
&i.OwnerAvatarUrl,
|
|
&i.OwnerUsername,
|
|
&i.OwnerName,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
&i.OrganizationDescription,
|
|
&i.TemplateName,
|
|
&i.TemplateDisplayName,
|
|
&i.TemplateIcon,
|
|
&i.TemplateDescription,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceUniqueOwnerCountByTemplateIDs = `-- name: GetWorkspaceUniqueOwnerCountByTemplateIDs :many
|
|
SELECT templates.id AS template_id, COUNT(DISTINCT workspaces.owner_id) AS unique_owners_sum
|
|
FROM templates
|
|
LEFT JOIN workspaces ON workspaces.template_id = templates.id AND workspaces.deleted = false
|
|
WHERE templates.id = ANY($1 :: uuid[])
|
|
GROUP BY templates.id
|
|
`
|
|
|
|
type GetWorkspaceUniqueOwnerCountByTemplateIDsRow struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
UniqueOwnersSum int64 `db:"unique_owners_sum" json:"unique_owners_sum"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceUniqueOwnerCountByTemplateIDs(ctx context.Context, templateIds []uuid.UUID) ([]GetWorkspaceUniqueOwnerCountByTemplateIDsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceUniqueOwnerCountByTemplateIDs, pq.Array(templateIds))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspaceUniqueOwnerCountByTemplateIDsRow
|
|
for rows.Next() {
|
|
var i GetWorkspaceUniqueOwnerCountByTemplateIDsRow
|
|
if err := rows.Scan(&i.TemplateID, &i.UniqueOwnersSum); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaces = `-- name: GetWorkspaces :many
|
|
WITH
|
|
build_params AS (
|
|
SELECT
|
|
LOWER(unnest($1 :: text[])) AS name,
|
|
LOWER(unnest($2 :: text[])) AS value
|
|
),
|
|
filtered_workspaces AS (
|
|
SELECT
|
|
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at, workspaces.owner_avatar_url, workspaces.owner_username, workspaces.owner_name, workspaces.organization_name, workspaces.organization_display_name, workspaces.organization_icon, workspaces.organization_description, workspaces.template_name, workspaces.template_display_name, workspaces.template_icon, workspaces.template_description,
|
|
latest_build.template_version_id,
|
|
latest_build.template_version_name,
|
|
latest_build.completed_at as latest_build_completed_at,
|
|
latest_build.canceled_at as latest_build_canceled_at,
|
|
latest_build.error as latest_build_error,
|
|
latest_build.transition as latest_build_transition,
|
|
latest_build.job_status as latest_build_status,
|
|
latest_build.has_ai_task as latest_build_has_ai_task
|
|
FROM
|
|
workspaces_expanded as workspaces
|
|
JOIN
|
|
users
|
|
ON
|
|
workspaces.owner_id = users.id
|
|
LEFT JOIN LATERAL (
|
|
SELECT
|
|
workspace_builds.id,
|
|
workspace_builds.transition,
|
|
workspace_builds.template_version_id,
|
|
workspace_builds.has_ai_task,
|
|
template_versions.name AS template_version_name,
|
|
provisioner_jobs.id AS provisioner_job_id,
|
|
provisioner_jobs.started_at,
|
|
provisioner_jobs.updated_at,
|
|
provisioner_jobs.canceled_at,
|
|
provisioner_jobs.completed_at,
|
|
provisioner_jobs.error,
|
|
provisioner_jobs.job_status
|
|
FROM
|
|
workspace_builds
|
|
JOIN
|
|
provisioner_jobs
|
|
ON
|
|
provisioner_jobs.id = workspace_builds.job_id
|
|
LEFT JOIN
|
|
template_versions
|
|
ON
|
|
template_versions.id = workspace_builds.template_version_id
|
|
WHERE
|
|
workspace_builds.workspace_id = workspaces.id
|
|
ORDER BY
|
|
build_number DESC
|
|
LIMIT
|
|
1
|
|
) latest_build ON TRUE
|
|
LEFT JOIN LATERAL (
|
|
SELECT
|
|
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, use_classic_parameter_flow
|
|
FROM
|
|
templates
|
|
WHERE
|
|
templates.id = workspaces.template_id
|
|
) template ON true
|
|
WHERE
|
|
-- Optionally include deleted workspaces
|
|
workspaces.deleted = $3
|
|
AND CASE
|
|
WHEN $4 :: text != '' THEN
|
|
CASE
|
|
-- Some workspace specific status refer to the transition
|
|
-- type. By default, the standard provisioner job status
|
|
-- search strings are supported.
|
|
-- 'running' states
|
|
WHEN $4 = 'starting' THEN
|
|
latest_build.job_status = 'running'::provisioner_job_status AND
|
|
latest_build.transition = 'start'::workspace_transition
|
|
WHEN $4 = 'stopping' THEN
|
|
latest_build.job_status = 'running'::provisioner_job_status AND
|
|
latest_build.transition = 'stop'::workspace_transition
|
|
WHEN $4 = 'deleting' THEN
|
|
latest_build.job_status = 'running' AND
|
|
latest_build.transition = 'delete'::workspace_transition
|
|
|
|
-- 'succeeded' states
|
|
WHEN $4 = 'deleted' THEN
|
|
latest_build.job_status = 'succeeded'::provisioner_job_status AND
|
|
latest_build.transition = 'delete'::workspace_transition
|
|
WHEN $4 = 'stopped' THEN
|
|
latest_build.job_status = 'succeeded'::provisioner_job_status AND
|
|
latest_build.transition = 'stop'::workspace_transition
|
|
WHEN $4 = 'started' THEN
|
|
latest_build.job_status = 'succeeded'::provisioner_job_status AND
|
|
latest_build.transition = 'start'::workspace_transition
|
|
|
|
-- Special case where the provisioner status and workspace status
|
|
-- differ. A workspace is "running" if the job is "succeeded" and
|
|
-- the transition is "start". This is because a workspace starts
|
|
-- running when a job is complete.
|
|
WHEN $4 = 'running' THEN
|
|
latest_build.job_status = 'succeeded'::provisioner_job_status AND
|
|
latest_build.transition = 'start'::workspace_transition
|
|
|
|
WHEN $4 != '' THEN
|
|
-- By default just match the job status exactly
|
|
latest_build.job_status = $4::provisioner_job_status
|
|
ELSE
|
|
true
|
|
END
|
|
ELSE true
|
|
END
|
|
-- Filter by owner_id
|
|
AND CASE
|
|
WHEN $5 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
workspaces.owner_id = $5
|
|
ELSE true
|
|
END
|
|
-- Filter by organization_id
|
|
AND CASE
|
|
WHEN $6 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
workspaces.organization_id = $6
|
|
ELSE true
|
|
END
|
|
-- Filter by build parameter
|
|
-- @has_param will match any build that includes the parameter.
|
|
AND CASE WHEN array_length($7 :: text[], 1) > 0 THEN
|
|
EXISTS (
|
|
SELECT
|
|
1
|
|
FROM
|
|
workspace_build_parameters
|
|
WHERE
|
|
workspace_build_parameters.workspace_build_id = latest_build.id AND
|
|
-- ILIKE is case insensitive
|
|
workspace_build_parameters.name ILIKE ANY($7)
|
|
)
|
|
ELSE true
|
|
END
|
|
-- @param_value will match param name an value.
|
|
-- requires 2 arrays, @param_names and @param_values to be passed in.
|
|
-- Array index must match between the 2 arrays for name=value
|
|
AND CASE WHEN array_length($1 :: text[], 1) > 0 THEN
|
|
EXISTS (
|
|
SELECT
|
|
1
|
|
FROM
|
|
workspace_build_parameters
|
|
INNER JOIN
|
|
build_params
|
|
ON
|
|
LOWER(workspace_build_parameters.name) = build_params.name AND
|
|
LOWER(workspace_build_parameters.value) = build_params.value AND
|
|
workspace_build_parameters.workspace_build_id = latest_build.id
|
|
)
|
|
ELSE true
|
|
END
|
|
|
|
-- Filter by owner_name
|
|
AND CASE
|
|
WHEN $8 :: text != '' THEN
|
|
workspaces.owner_id = (SELECT id FROM users WHERE lower(users.username) = lower($8) AND deleted = false)
|
|
ELSE true
|
|
END
|
|
-- Filter by template_name
|
|
-- There can be more than 1 template with the same name across organizations.
|
|
-- Use the organization filter to restrict to 1 org if needed.
|
|
AND CASE
|
|
WHEN $9 :: text != '' THEN
|
|
workspaces.template_id = ANY(SELECT id FROM templates WHERE lower(name) = lower($9) AND deleted = false)
|
|
ELSE true
|
|
END
|
|
-- Filter by template_ids
|
|
AND CASE
|
|
WHEN array_length($10 :: uuid[], 1) > 0 THEN
|
|
workspaces.template_id = ANY($10)
|
|
ELSE true
|
|
END
|
|
-- Filter by workspace_ids
|
|
AND CASE
|
|
WHEN array_length($11 :: uuid[], 1) > 0 THEN
|
|
workspaces.id = ANY($11)
|
|
ELSE true
|
|
END
|
|
-- Filter by name, matching on substring
|
|
AND CASE
|
|
WHEN $12 :: text != '' THEN
|
|
workspaces.name ILIKE '%' || $12 || '%'
|
|
ELSE true
|
|
END
|
|
-- Filter by agent status
|
|
-- has-agent: is only applicable for workspaces in "start" transition. Stopped and deleted workspaces don't have agents.
|
|
AND CASE
|
|
WHEN $13 :: text != '' THEN
|
|
(
|
|
SELECT COUNT(*)
|
|
FROM
|
|
workspace_resources
|
|
JOIN
|
|
workspace_agents
|
|
ON
|
|
workspace_agents.resource_id = workspace_resources.id
|
|
WHERE
|
|
workspace_resources.job_id = latest_build.provisioner_job_id AND
|
|
latest_build.transition = 'start'::workspace_transition AND
|
|
-- Filter out deleted sub agents.
|
|
workspace_agents.deleted = FALSE AND
|
|
$13 = (
|
|
CASE
|
|
WHEN workspace_agents.first_connected_at IS NULL THEN
|
|
CASE
|
|
WHEN workspace_agents.connection_timeout_seconds > 0 AND NOW() - workspace_agents.created_at > workspace_agents.connection_timeout_seconds * INTERVAL '1 second' THEN
|
|
'timeout'
|
|
ELSE
|
|
'connecting'
|
|
END
|
|
WHEN workspace_agents.disconnected_at > workspace_agents.last_connected_at THEN
|
|
'disconnected'
|
|
WHEN NOW() - workspace_agents.last_connected_at > INTERVAL '1 second' * $14 :: bigint THEN
|
|
'disconnected'
|
|
WHEN workspace_agents.last_connected_at IS NOT NULL THEN
|
|
'connected'
|
|
ELSE
|
|
NULL
|
|
END
|
|
)
|
|
) > 0
|
|
ELSE true
|
|
END
|
|
-- Filter by dormant workspaces.
|
|
AND CASE
|
|
WHEN $15 :: boolean != 'false' THEN
|
|
dormant_at IS NOT NULL
|
|
ELSE true
|
|
END
|
|
-- Filter by last_used
|
|
AND CASE
|
|
WHEN $16 :: timestamp with time zone > '0001-01-01 00:00:00Z' THEN
|
|
workspaces.last_used_at <= $16
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
WHEN $17 :: timestamp with time zone > '0001-01-01 00:00:00Z' THEN
|
|
workspaces.last_used_at >= $17
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
WHEN $18 :: boolean IS NOT NULL THEN
|
|
(latest_build.template_version_id = template.active_version_id) = $18 :: boolean
|
|
ELSE true
|
|
END
|
|
-- Filter by has_ai_task in latest build
|
|
AND CASE
|
|
WHEN $19 :: boolean IS NOT NULL THEN
|
|
(COALESCE(latest_build.has_ai_task, false) OR (
|
|
-- If the build has no AI task, it means that the provisioner job is in progress
|
|
-- and we don't know if it has an AI task yet. In this case, we optimistically
|
|
-- assume that it has an AI task if the AI Prompt parameter is not empty. This
|
|
-- lets the AI Task frontend spawn a task and see it immediately after instead of
|
|
-- having to wait for the build to complete.
|
|
latest_build.has_ai_task IS NULL AND
|
|
latest_build.completed_at IS NULL AND
|
|
EXISTS (
|
|
SELECT 1
|
|
FROM workspace_build_parameters
|
|
WHERE workspace_build_parameters.workspace_build_id = latest_build.id
|
|
AND workspace_build_parameters.name = 'AI Prompt'
|
|
AND workspace_build_parameters.value != ''
|
|
)
|
|
)) = ($19 :: boolean)
|
|
ELSE true
|
|
END
|
|
-- Authorize Filter clause will be injected below in GetAuthorizedWorkspaces
|
|
-- @authorize_filter
|
|
), filtered_workspaces_order AS (
|
|
SELECT
|
|
fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.next_start_at, fw.owner_avatar_url, fw.owner_username, fw.owner_name, fw.organization_name, fw.organization_display_name, fw.organization_icon, fw.organization_description, fw.template_name, fw.template_display_name, fw.template_icon, fw.template_description, fw.template_version_id, fw.template_version_name, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status, fw.latest_build_has_ai_task
|
|
FROM
|
|
filtered_workspaces fw
|
|
ORDER BY
|
|
-- To ensure that 'favorite' workspaces show up first in the list only for their owner.
|
|
CASE WHEN owner_id = $20 AND favorite THEN 0 ELSE 1 END ASC,
|
|
(latest_build_completed_at IS NOT NULL AND
|
|
latest_build_canceled_at IS NULL AND
|
|
latest_build_error IS NULL AND
|
|
latest_build_transition = 'start'::workspace_transition) DESC,
|
|
LOWER(owner_username) ASC,
|
|
LOWER(name) ASC
|
|
LIMIT
|
|
CASE
|
|
WHEN $22 :: integer > 0 THEN
|
|
$22
|
|
END
|
|
OFFSET
|
|
$21
|
|
), filtered_workspaces_order_with_summary AS (
|
|
SELECT
|
|
fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.next_start_at, fwo.owner_avatar_url, fwo.owner_username, fwo.owner_name, fwo.organization_name, fwo.organization_display_name, fwo.organization_icon, fwo.organization_description, fwo.template_name, fwo.template_display_name, fwo.template_icon, fwo.template_description, fwo.template_version_id, fwo.template_version_name, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status, fwo.latest_build_has_ai_task
|
|
FROM
|
|
filtered_workspaces_order fwo
|
|
-- Return a technical summary row with total count of workspaces.
|
|
-- It is used to present the correct count if pagination goes beyond the offset.
|
|
UNION ALL
|
|
SELECT
|
|
'00000000-0000-0000-0000-000000000000'::uuid, -- id
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- created_at
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- updated_at
|
|
'00000000-0000-0000-0000-000000000000'::uuid, -- owner_id
|
|
'00000000-0000-0000-0000-000000000000'::uuid, -- organization_id
|
|
'00000000-0000-0000-0000-000000000000'::uuid, -- template_id
|
|
false, -- deleted
|
|
'**TECHNICAL_ROW**', -- name
|
|
'', -- autostart_schedule
|
|
0, -- ttl
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- last_used_at
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- dormant_at
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- deleting_at
|
|
'never'::automatic_updates, -- automatic_updates
|
|
false, -- favorite
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- next_start_at
|
|
'', -- owner_avatar_url
|
|
'', -- owner_username
|
|
'', -- owner_name
|
|
'', -- organization_name
|
|
'', -- organization_display_name
|
|
'', -- organization_icon
|
|
'', -- organization_description
|
|
'', -- template_name
|
|
'', -- template_display_name
|
|
'', -- template_icon
|
|
'', -- template_description
|
|
-- Extra columns added to ` + "`" + `filtered_workspaces` + "`" + `
|
|
'00000000-0000-0000-0000-000000000000'::uuid, -- template_version_id
|
|
'', -- template_version_name
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- latest_build_completed_at,
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- latest_build_canceled_at,
|
|
'', -- latest_build_error
|
|
'start'::workspace_transition, -- latest_build_transition
|
|
'unknown'::provisioner_job_status, -- latest_build_status
|
|
false -- latest_build_has_ai_task
|
|
WHERE
|
|
$23 :: boolean = true
|
|
), total_count AS (
|
|
SELECT
|
|
count(*) AS count
|
|
FROM
|
|
filtered_workspaces
|
|
)
|
|
SELECT
|
|
fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.next_start_at, fwos.owner_avatar_url, fwos.owner_username, fwos.owner_name, fwos.organization_name, fwos.organization_display_name, fwos.organization_icon, fwos.organization_description, fwos.template_name, fwos.template_display_name, fwos.template_icon, fwos.template_description, fwos.template_version_id, fwos.template_version_name, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status, fwos.latest_build_has_ai_task,
|
|
tc.count
|
|
FROM
|
|
filtered_workspaces_order_with_summary fwos
|
|
CROSS JOIN
|
|
total_count tc
|
|
`
|
|
|
|
type GetWorkspacesParams struct {
|
|
ParamNames []string `db:"param_names" json:"param_names"`
|
|
ParamValues []string `db:"param_values" json:"param_values"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
Status string `db:"status" json:"status"`
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
HasParam []string `db:"has_param" json:"has_param"`
|
|
OwnerUsername string `db:"owner_username" json:"owner_username"`
|
|
TemplateName string `db:"template_name" json:"template_name"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
WorkspaceIds []uuid.UUID `db:"workspace_ids" json:"workspace_ids"`
|
|
Name string `db:"name" json:"name"`
|
|
HasAgent string `db:"has_agent" json:"has_agent"`
|
|
AgentInactiveDisconnectTimeoutSeconds int64 `db:"agent_inactive_disconnect_timeout_seconds" json:"agent_inactive_disconnect_timeout_seconds"`
|
|
Dormant bool `db:"dormant" json:"dormant"`
|
|
LastUsedBefore time.Time `db:"last_used_before" json:"last_used_before"`
|
|
LastUsedAfter time.Time `db:"last_used_after" json:"last_used_after"`
|
|
UsingActive sql.NullBool `db:"using_active" json:"using_active"`
|
|
HasAITask sql.NullBool `db:"has_ai_task" json:"has_ai_task"`
|
|
RequesterID uuid.UUID `db:"requester_id" json:"requester_id"`
|
|
Offset int32 `db:"offset_" json:"offset_"`
|
|
Limit int32 `db:"limit_" json:"limit_"`
|
|
WithSummary bool `db:"with_summary" json:"with_summary"`
|
|
}
|
|
|
|
type GetWorkspacesRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
Name string `db:"name" json:"name"`
|
|
AutostartSchedule sql.NullString `db:"autostart_schedule" json:"autostart_schedule"`
|
|
Ttl sql.NullInt64 `db:"ttl" json:"ttl"`
|
|
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
|
|
DormantAt sql.NullTime `db:"dormant_at" json:"dormant_at"`
|
|
DeletingAt sql.NullTime `db:"deleting_at" json:"deleting_at"`
|
|
AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"`
|
|
Favorite bool `db:"favorite" json:"favorite"`
|
|
NextStartAt sql.NullTime `db:"next_start_at" json:"next_start_at"`
|
|
OwnerAvatarUrl string `db:"owner_avatar_url" json:"owner_avatar_url"`
|
|
OwnerUsername string `db:"owner_username" json:"owner_username"`
|
|
OwnerName string `db:"owner_name" json:"owner_name"`
|
|
OrganizationName string `db:"organization_name" json:"organization_name"`
|
|
OrganizationDisplayName string `db:"organization_display_name" json:"organization_display_name"`
|
|
OrganizationIcon string `db:"organization_icon" json:"organization_icon"`
|
|
OrganizationDescription string `db:"organization_description" json:"organization_description"`
|
|
TemplateName string `db:"template_name" json:"template_name"`
|
|
TemplateDisplayName string `db:"template_display_name" json:"template_display_name"`
|
|
TemplateIcon string `db:"template_icon" json:"template_icon"`
|
|
TemplateDescription string `db:"template_description" json:"template_description"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
TemplateVersionName sql.NullString `db:"template_version_name" json:"template_version_name"`
|
|
LatestBuildCompletedAt sql.NullTime `db:"latest_build_completed_at" json:"latest_build_completed_at"`
|
|
LatestBuildCanceledAt sql.NullTime `db:"latest_build_canceled_at" json:"latest_build_canceled_at"`
|
|
LatestBuildError sql.NullString `db:"latest_build_error" json:"latest_build_error"`
|
|
LatestBuildTransition WorkspaceTransition `db:"latest_build_transition" json:"latest_build_transition"`
|
|
LatestBuildStatus ProvisionerJobStatus `db:"latest_build_status" json:"latest_build_status"`
|
|
LatestBuildHasAITask sql.NullBool `db:"latest_build_has_ai_task" json:"latest_build_has_ai_task"`
|
|
Count int64 `db:"count" json:"count"`
|
|
}
|
|
|
|
// build_params is used to filter by build parameters if present.
|
|
// It has to be a CTE because the set returning function 'unnest' cannot
|
|
// be used in a WHERE clause.
|
|
func (q *sqlQuerier) GetWorkspaces(ctx context.Context, arg GetWorkspacesParams) ([]GetWorkspacesRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaces,
|
|
pq.Array(arg.ParamNames),
|
|
pq.Array(arg.ParamValues),
|
|
arg.Deleted,
|
|
arg.Status,
|
|
arg.OwnerID,
|
|
arg.OrganizationID,
|
|
pq.Array(arg.HasParam),
|
|
arg.OwnerUsername,
|
|
arg.TemplateName,
|
|
pq.Array(arg.TemplateIDs),
|
|
pq.Array(arg.WorkspaceIds),
|
|
arg.Name,
|
|
arg.HasAgent,
|
|
arg.AgentInactiveDisconnectTimeoutSeconds,
|
|
arg.Dormant,
|
|
arg.LastUsedBefore,
|
|
arg.LastUsedAfter,
|
|
arg.UsingActive,
|
|
arg.HasAITask,
|
|
arg.RequesterID,
|
|
arg.Offset,
|
|
arg.Limit,
|
|
arg.WithSummary,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspacesRow
|
|
for rows.Next() {
|
|
var i GetWorkspacesRow
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.NextStartAt,
|
|
&i.OwnerAvatarUrl,
|
|
&i.OwnerUsername,
|
|
&i.OwnerName,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
&i.OrganizationDescription,
|
|
&i.TemplateName,
|
|
&i.TemplateDisplayName,
|
|
&i.TemplateIcon,
|
|
&i.TemplateDescription,
|
|
&i.TemplateVersionID,
|
|
&i.TemplateVersionName,
|
|
&i.LatestBuildCompletedAt,
|
|
&i.LatestBuildCanceledAt,
|
|
&i.LatestBuildError,
|
|
&i.LatestBuildTransition,
|
|
&i.LatestBuildStatus,
|
|
&i.LatestBuildHasAITask,
|
|
&i.Count,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspacesAndAgentsByOwnerID = `-- name: GetWorkspacesAndAgentsByOwnerID :many
|
|
SELECT
|
|
workspaces.id as id,
|
|
workspaces.name as name,
|
|
job_status,
|
|
transition,
|
|
(array_agg(ROW(agent_id, agent_name)::agent_id_name_pair) FILTER (WHERE agent_id IS NOT NULL))::agent_id_name_pair[] as agents
|
|
FROM workspaces
|
|
LEFT JOIN LATERAL (
|
|
SELECT
|
|
workspace_id,
|
|
job_id,
|
|
transition,
|
|
job_status
|
|
FROM workspace_builds
|
|
JOIN provisioner_jobs ON provisioner_jobs.id = workspace_builds.job_id
|
|
WHERE workspace_builds.workspace_id = workspaces.id
|
|
ORDER BY build_number DESC
|
|
LIMIT 1
|
|
) latest_build ON true
|
|
LEFT JOIN LATERAL (
|
|
SELECT
|
|
workspace_agents.id as agent_id,
|
|
workspace_agents.name as agent_name,
|
|
job_id
|
|
FROM workspace_resources
|
|
JOIN workspace_agents ON (
|
|
workspace_agents.resource_id = workspace_resources.id
|
|
-- Filter out deleted sub agents.
|
|
AND workspace_agents.deleted = FALSE
|
|
)
|
|
WHERE job_id = latest_build.job_id
|
|
) resources ON true
|
|
WHERE
|
|
-- Filter by owner_id
|
|
workspaces.owner_id = $1 :: uuid
|
|
AND workspaces.deleted = false
|
|
-- Authorize Filter clause will be injected below in GetAuthorizedWorkspacesAndAgentsByOwnerID
|
|
-- @authorize_filter
|
|
GROUP BY workspaces.id, workspaces.name, latest_build.job_status, latest_build.job_id, latest_build.transition
|
|
`
|
|
|
|
type GetWorkspacesAndAgentsByOwnerIDRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
JobStatus ProvisionerJobStatus `db:"job_status" json:"job_status"`
|
|
Transition WorkspaceTransition `db:"transition" json:"transition"`
|
|
Agents []AgentIDNamePair `db:"agents" json:"agents"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspacesAndAgentsByOwnerID(ctx context.Context, ownerID uuid.UUID) ([]GetWorkspacesAndAgentsByOwnerIDRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspacesAndAgentsByOwnerID, ownerID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspacesAndAgentsByOwnerIDRow
|
|
for rows.Next() {
|
|
var i GetWorkspacesAndAgentsByOwnerIDRow
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.JobStatus,
|
|
&i.Transition,
|
|
pq.Array(&i.Agents),
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspacesByTemplateID = `-- name: GetWorkspacesByTemplateID :many
|
|
SELECT id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at FROM workspaces WHERE template_id = $1 AND deleted = false
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspacesByTemplateID(ctx context.Context, templateID uuid.UUID) ([]WorkspaceTable, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspacesByTemplateID, templateID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceTable
|
|
for rows.Next() {
|
|
var i WorkspaceTable
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.NextStartAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspacesEligibleForTransition = `-- name: GetWorkspacesEligibleForTransition :many
|
|
SELECT
|
|
workspaces.id,
|
|
workspaces.name
|
|
FROM
|
|
workspaces
|
|
LEFT JOIN
|
|
workspace_builds ON workspace_builds.workspace_id = workspaces.id
|
|
INNER JOIN
|
|
provisioner_jobs ON workspace_builds.job_id = provisioner_jobs.id
|
|
INNER JOIN
|
|
templates ON workspaces.template_id = templates.id
|
|
INNER JOIN
|
|
users ON workspaces.owner_id = users.id
|
|
WHERE
|
|
workspace_builds.build_number = (
|
|
SELECT
|
|
MAX(build_number)
|
|
FROM
|
|
workspace_builds
|
|
WHERE
|
|
workspace_builds.workspace_id = workspaces.id
|
|
) AND
|
|
|
|
(
|
|
-- A workspace may be eligible for autostop if the following are true:
|
|
-- * The provisioner job has not failed.
|
|
-- * The workspace is not dormant.
|
|
-- * The workspace build was a start transition.
|
|
-- * The workspace's owner is suspended OR the workspace build deadline has passed.
|
|
(
|
|
provisioner_jobs.job_status != 'failed'::provisioner_job_status AND
|
|
workspaces.dormant_at IS NULL AND
|
|
workspace_builds.transition = 'start'::workspace_transition AND (
|
|
users.status = 'suspended'::user_status OR (
|
|
workspace_builds.deadline != '0001-01-01 00:00:00+00'::timestamptz AND
|
|
workspace_builds.deadline < $1 :: timestamptz
|
|
)
|
|
)
|
|
) OR
|
|
|
|
-- A workspace may be eligible for autostart if the following are true:
|
|
-- * The workspace's owner is active.
|
|
-- * The provisioner job did not fail.
|
|
-- * The workspace build was a stop transition.
|
|
-- * The workspace is not dormant
|
|
-- * The workspace has an autostart schedule.
|
|
-- * It is after the workspace's next start time.
|
|
(
|
|
users.status = 'active'::user_status AND
|
|
provisioner_jobs.job_status != 'failed'::provisioner_job_status AND
|
|
workspace_builds.transition = 'stop'::workspace_transition AND
|
|
workspaces.dormant_at IS NULL AND
|
|
workspaces.autostart_schedule IS NOT NULL AND
|
|
(
|
|
-- next_start_at might be null in these two scenarios:
|
|
-- * A coder instance was updated and we haven't updated next_start_at yet.
|
|
-- * A database trigger made it null because of an update to a related column.
|
|
--
|
|
-- When this occurs, we return the workspace so the Coder server can
|
|
-- compute a valid next start at and update it.
|
|
workspaces.next_start_at IS NULL OR
|
|
workspaces.next_start_at <= $1 :: timestamptz
|
|
)
|
|
) OR
|
|
|
|
-- A workspace may be eligible for dormant stop if the following are true:
|
|
-- * The workspace is not dormant.
|
|
-- * The template has set a time 'til dormant.
|
|
-- * The workspace has been unused for longer than the time 'til dormancy.
|
|
(
|
|
workspaces.dormant_at IS NULL AND
|
|
templates.time_til_dormant > 0 AND
|
|
($1 :: timestamptz) - workspaces.last_used_at > (INTERVAL '1 millisecond' * (templates.time_til_dormant / 1000000))
|
|
) OR
|
|
|
|
-- A workspace may be eligible for deletion if the following are true:
|
|
-- * The workspace is dormant.
|
|
-- * The workspace is scheduled to be deleted.
|
|
-- * If there was a prior attempt to delete the workspace that failed:
|
|
-- * This attempt was at least 24 hours ago.
|
|
(
|
|
workspaces.dormant_at IS NOT NULL AND
|
|
workspaces.deleting_at IS NOT NULL AND
|
|
workspaces.deleting_at < $1 :: timestamptz AND
|
|
templates.time_til_dormant_autodelete > 0 AND
|
|
CASE
|
|
WHEN (
|
|
workspace_builds.transition = 'delete'::workspace_transition AND
|
|
provisioner_jobs.job_status = 'failed'::provisioner_job_status
|
|
) THEN (
|
|
(
|
|
provisioner_jobs.canceled_at IS NOT NULL OR
|
|
provisioner_jobs.completed_at IS NOT NULL
|
|
) AND (
|
|
($1 :: timestamptz) - (CASE
|
|
WHEN provisioner_jobs.canceled_at IS NOT NULL THEN provisioner_jobs.canceled_at
|
|
ELSE provisioner_jobs.completed_at
|
|
END) > INTERVAL '24 hours'
|
|
)
|
|
)
|
|
ELSE true
|
|
END
|
|
) OR
|
|
|
|
-- A workspace may be eligible for failed stop if the following are true:
|
|
-- * The template has a failure ttl set.
|
|
-- * The workspace build was a start transition.
|
|
-- * The provisioner job failed.
|
|
-- * The provisioner job had completed.
|
|
-- * The provisioner job has been completed for longer than the failure ttl.
|
|
(
|
|
templates.failure_ttl > 0 AND
|
|
workspace_builds.transition = 'start'::workspace_transition AND
|
|
provisioner_jobs.job_status = 'failed'::provisioner_job_status AND
|
|
provisioner_jobs.completed_at IS NOT NULL AND
|
|
($1 :: timestamptz) - provisioner_jobs.completed_at > (INTERVAL '1 millisecond' * (templates.failure_ttl / 1000000))
|
|
)
|
|
) AND workspaces.deleted = 'false'
|
|
`
|
|
|
|
type GetWorkspacesEligibleForTransitionRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspacesEligibleForTransition(ctx context.Context, now time.Time) ([]GetWorkspacesEligibleForTransitionRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspacesEligibleForTransition, now)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspacesEligibleForTransitionRow
|
|
for rows.Next() {
|
|
var i GetWorkspacesEligibleForTransitionRow
|
|
if err := rows.Scan(&i.ID, &i.Name); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspace = `-- name: InsertWorkspace :one
|
|
INSERT INTO
|
|
workspaces (
|
|
id,
|
|
created_at,
|
|
updated_at,
|
|
owner_id,
|
|
organization_id,
|
|
template_id,
|
|
name,
|
|
autostart_schedule,
|
|
ttl,
|
|
last_used_at,
|
|
automatic_updates,
|
|
next_start_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at
|
|
`
|
|
|
|
type InsertWorkspaceParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
Name string `db:"name" json:"name"`
|
|
AutostartSchedule sql.NullString `db:"autostart_schedule" json:"autostart_schedule"`
|
|
Ttl sql.NullInt64 `db:"ttl" json:"ttl"`
|
|
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
|
|
AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"`
|
|
NextStartAt sql.NullTime `db:"next_start_at" json:"next_start_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspace(ctx context.Context, arg InsertWorkspaceParams) (WorkspaceTable, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspace,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.OwnerID,
|
|
arg.OrganizationID,
|
|
arg.TemplateID,
|
|
arg.Name,
|
|
arg.AutostartSchedule,
|
|
arg.Ttl,
|
|
arg.LastUsedAt,
|
|
arg.AutomaticUpdates,
|
|
arg.NextStartAt,
|
|
)
|
|
var i WorkspaceTable
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.NextStartAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const unfavoriteWorkspace = `-- name: UnfavoriteWorkspace :exec
|
|
UPDATE workspaces SET favorite = false WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) UnfavoriteWorkspace(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, unfavoriteWorkspace, id)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateWorkspacesLastUsedAt = `-- name: UpdateTemplateWorkspacesLastUsedAt :exec
|
|
UPDATE workspaces
|
|
SET
|
|
last_used_at = $1::timestamptz
|
|
WHERE
|
|
template_id = $2
|
|
`
|
|
|
|
type UpdateTemplateWorkspacesLastUsedAtParams struct {
|
|
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateWorkspacesLastUsedAt(ctx context.Context, arg UpdateTemplateWorkspacesLastUsedAtParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateWorkspacesLastUsedAt, arg.LastUsedAt, arg.TemplateID)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspace = `-- name: UpdateWorkspace :one
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
name = $2
|
|
WHERE
|
|
id = $1
|
|
AND deleted = false
|
|
RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at
|
|
`
|
|
|
|
type UpdateWorkspaceParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspace(ctx context.Context, arg UpdateWorkspaceParams) (WorkspaceTable, error) {
|
|
row := q.db.QueryRowContext(ctx, updateWorkspace, arg.ID, arg.Name)
|
|
var i WorkspaceTable
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.NextStartAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateWorkspaceAutomaticUpdates = `-- name: UpdateWorkspaceAutomaticUpdates :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
automatic_updates = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAutomaticUpdatesParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAutomaticUpdates(ctx context.Context, arg UpdateWorkspaceAutomaticUpdatesParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAutomaticUpdates, arg.ID, arg.AutomaticUpdates)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceAutostart = `-- name: UpdateWorkspaceAutostart :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
autostart_schedule = $2,
|
|
next_start_at = $3
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAutostartParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
AutostartSchedule sql.NullString `db:"autostart_schedule" json:"autostart_schedule"`
|
|
NextStartAt sql.NullTime `db:"next_start_at" json:"next_start_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAutostart(ctx context.Context, arg UpdateWorkspaceAutostartParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAutostart, arg.ID, arg.AutostartSchedule, arg.NextStartAt)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceDeletedByID = `-- name: UpdateWorkspaceDeletedByID :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
deleted = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceDeletedByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceDeletedByID(ctx context.Context, arg UpdateWorkspaceDeletedByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceDeletedByID, arg.ID, arg.Deleted)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceDormantDeletingAt = `-- name: UpdateWorkspaceDormantDeletingAt :one
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
dormant_at = $2,
|
|
-- When a workspace is active we want to update the last_used_at to avoid the workspace going
|
|
-- immediately dormant. If we're transition the workspace to dormant then we leave it alone.
|
|
last_used_at = CASE WHEN $2::timestamptz IS NULL THEN
|
|
now() at time zone 'utc'
|
|
ELSE
|
|
last_used_at
|
|
END,
|
|
-- If dormant_at is null (meaning active) or the template-defined time_til_dormant_autodelete is 0 we should set
|
|
-- deleting_at to NULL else set it to the dormant_at + time_til_dormant_autodelete duration.
|
|
deleting_at = CASE WHEN $2::timestamptz IS NULL OR templates.time_til_dormant_autodelete = 0 THEN
|
|
NULL
|
|
ELSE
|
|
$2::timestamptz + (INTERVAL '1 millisecond' * (templates.time_til_dormant_autodelete / 1000000))
|
|
END
|
|
FROM
|
|
templates
|
|
WHERE
|
|
workspaces.id = $1
|
|
AND templates.id = workspaces.template_id
|
|
RETURNING
|
|
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite, workspaces.next_start_at
|
|
`
|
|
|
|
type UpdateWorkspaceDormantDeletingAtParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
DormantAt sql.NullTime `db:"dormant_at" json:"dormant_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceDormantDeletingAt(ctx context.Context, arg UpdateWorkspaceDormantDeletingAtParams) (WorkspaceTable, error) {
|
|
row := q.db.QueryRowContext(ctx, updateWorkspaceDormantDeletingAt, arg.ID, arg.DormantAt)
|
|
var i WorkspaceTable
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.NextStartAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateWorkspaceLastUsedAt = `-- name: UpdateWorkspaceLastUsedAt :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
last_used_at = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceLastUsedAtParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceLastUsedAt(ctx context.Context, arg UpdateWorkspaceLastUsedAtParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceLastUsedAt, arg.ID, arg.LastUsedAt)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceNextStartAt = `-- name: UpdateWorkspaceNextStartAt :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
next_start_at = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceNextStartAtParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
NextStartAt sql.NullTime `db:"next_start_at" json:"next_start_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceNextStartAt(ctx context.Context, arg UpdateWorkspaceNextStartAtParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceNextStartAt, arg.ID, arg.NextStartAt)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceTTL = `-- name: UpdateWorkspaceTTL :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
ttl = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceTTLParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Ttl sql.NullInt64 `db:"ttl" json:"ttl"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceTTL(ctx context.Context, arg UpdateWorkspaceTTLParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceTTL, arg.ID, arg.Ttl)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspacesDormantDeletingAtByTemplateID = `-- name: UpdateWorkspacesDormantDeletingAtByTemplateID :many
|
|
UPDATE workspaces
|
|
SET
|
|
deleting_at = CASE
|
|
WHEN $1::bigint = 0 THEN NULL
|
|
WHEN $2::timestamptz > '0001-01-01 00:00:00+00'::timestamptz THEN ($2::timestamptz) + interval '1 milliseconds' * $1::bigint
|
|
ELSE dormant_at + interval '1 milliseconds' * $1::bigint
|
|
END,
|
|
dormant_at = CASE WHEN $2::timestamptz > '0001-01-01 00:00:00+00'::timestamptz THEN $2::timestamptz ELSE dormant_at END
|
|
WHERE
|
|
template_id = $3
|
|
AND
|
|
dormant_at IS NOT NULL
|
|
RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite, next_start_at
|
|
`
|
|
|
|
type UpdateWorkspacesDormantDeletingAtByTemplateIDParams struct {
|
|
TimeTilDormantAutodeleteMs int64 `db:"time_til_dormant_autodelete_ms" json:"time_til_dormant_autodelete_ms"`
|
|
DormantAt time.Time `db:"dormant_at" json:"dormant_at"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspacesDormantDeletingAtByTemplateID(ctx context.Context, arg UpdateWorkspacesDormantDeletingAtByTemplateIDParams) ([]WorkspaceTable, error) {
|
|
rows, err := q.db.QueryContext(ctx, updateWorkspacesDormantDeletingAtByTemplateID, arg.TimeTilDormantAutodeleteMs, arg.DormantAt, arg.TemplateID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceTable
|
|
for rows.Next() {
|
|
var i WorkspaceTable
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.NextStartAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateWorkspacesTTLByTemplateID = `-- name: UpdateWorkspacesTTLByTemplateID :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
ttl = $2
|
|
WHERE
|
|
template_id = $1
|
|
`
|
|
|
|
type UpdateWorkspacesTTLByTemplateIDParams struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
Ttl sql.NullInt64 `db:"ttl" json:"ttl"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspacesTTLByTemplateID(ctx context.Context, arg UpdateWorkspacesTTLByTemplateIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspacesTTLByTemplateID, arg.TemplateID, arg.Ttl)
|
|
return err
|
|
}
|
|
|
|
const getWorkspaceAgentScriptsByAgentIDs = `-- name: GetWorkspaceAgentScriptsByAgentIDs :many
|
|
SELECT workspace_agent_id, log_source_id, log_path, created_at, script, cron, start_blocks_login, run_on_start, run_on_stop, timeout_seconds, display_name, id FROM workspace_agent_scripts WHERE workspace_agent_id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentScriptsByAgentIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceAgentScript, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentScriptsByAgentIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentScript
|
|
for rows.Next() {
|
|
var i WorkspaceAgentScript
|
|
if err := rows.Scan(
|
|
&i.WorkspaceAgentID,
|
|
&i.LogSourceID,
|
|
&i.LogPath,
|
|
&i.CreatedAt,
|
|
&i.Script,
|
|
&i.Cron,
|
|
&i.StartBlocksLogin,
|
|
&i.RunOnStart,
|
|
&i.RunOnStop,
|
|
&i.TimeoutSeconds,
|
|
&i.DisplayName,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAgentScripts = `-- name: InsertWorkspaceAgentScripts :many
|
|
INSERT INTO
|
|
workspace_agent_scripts (workspace_agent_id, created_at, log_source_id, log_path, script, cron, start_blocks_login, run_on_start, run_on_stop, timeout_seconds, display_name, id)
|
|
SELECT
|
|
$1 :: uuid AS workspace_agent_id,
|
|
$2 :: timestamptz AS created_at,
|
|
unnest($3 :: uuid [ ]) AS log_source_id,
|
|
unnest($4 :: text [ ]) AS log_path,
|
|
unnest($5 :: text [ ]) AS script,
|
|
unnest($6 :: text [ ]) AS cron,
|
|
unnest($7 :: boolean [ ]) AS start_blocks_login,
|
|
unnest($8 :: boolean [ ]) AS run_on_start,
|
|
unnest($9 :: boolean [ ]) AS run_on_stop,
|
|
unnest($10 :: integer [ ]) AS timeout_seconds,
|
|
unnest($11 :: text [ ]) AS display_name,
|
|
unnest($12 :: uuid [ ]) AS id
|
|
RETURNING workspace_agent_scripts.workspace_agent_id, workspace_agent_scripts.log_source_id, workspace_agent_scripts.log_path, workspace_agent_scripts.created_at, workspace_agent_scripts.script, workspace_agent_scripts.cron, workspace_agent_scripts.start_blocks_login, workspace_agent_scripts.run_on_start, workspace_agent_scripts.run_on_stop, workspace_agent_scripts.timeout_seconds, workspace_agent_scripts.display_name, workspace_agent_scripts.id
|
|
`
|
|
|
|
type InsertWorkspaceAgentScriptsParams struct {
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
LogSourceID []uuid.UUID `db:"log_source_id" json:"log_source_id"`
|
|
LogPath []string `db:"log_path" json:"log_path"`
|
|
Script []string `db:"script" json:"script"`
|
|
Cron []string `db:"cron" json:"cron"`
|
|
StartBlocksLogin []bool `db:"start_blocks_login" json:"start_blocks_login"`
|
|
RunOnStart []bool `db:"run_on_start" json:"run_on_start"`
|
|
RunOnStop []bool `db:"run_on_stop" json:"run_on_stop"`
|
|
TimeoutSeconds []int32 `db:"timeout_seconds" json:"timeout_seconds"`
|
|
DisplayName []string `db:"display_name" json:"display_name"`
|
|
ID []uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentScripts(ctx context.Context, arg InsertWorkspaceAgentScriptsParams) ([]WorkspaceAgentScript, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertWorkspaceAgentScripts,
|
|
arg.WorkspaceAgentID,
|
|
arg.CreatedAt,
|
|
pq.Array(arg.LogSourceID),
|
|
pq.Array(arg.LogPath),
|
|
pq.Array(arg.Script),
|
|
pq.Array(arg.Cron),
|
|
pq.Array(arg.StartBlocksLogin),
|
|
pq.Array(arg.RunOnStart),
|
|
pq.Array(arg.RunOnStop),
|
|
pq.Array(arg.TimeoutSeconds),
|
|
pq.Array(arg.DisplayName),
|
|
pq.Array(arg.ID),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentScript
|
|
for rows.Next() {
|
|
var i WorkspaceAgentScript
|
|
if err := rows.Scan(
|
|
&i.WorkspaceAgentID,
|
|
&i.LogSourceID,
|
|
&i.LogPath,
|
|
&i.CreatedAt,
|
|
&i.Script,
|
|
&i.Cron,
|
|
&i.StartBlocksLogin,
|
|
&i.RunOnStart,
|
|
&i.RunOnStop,
|
|
&i.TimeoutSeconds,
|
|
&i.DisplayName,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|