mirror of
https://github.com/coder/coder.git
synced 2025-07-01 16:07:26 +00:00
Updates the `DeleteOldWorkspaceAgentLogs` to: - Retain logs for the most recent build regardless of age, - Delete logs for agents that never connected and were created before the cutoff for deleting logs while still retaining the logs most recent build.
14683 lines
432 KiB
Go
14683 lines
432 KiB
Go
// Code generated by sqlc. DO NOT EDIT.
|
|
// versions:
|
|
// sqlc v1.25.0
|
|
|
|
package database
|
|
|
|
import (
|
|
"context"
|
|
"database/sql"
|
|
"encoding/json"
|
|
"time"
|
|
|
|
"github.com/google/uuid"
|
|
"github.com/lib/pq"
|
|
"github.com/sqlc-dev/pqtype"
|
|
)
|
|
|
|
const activityBumpWorkspace = `-- name: ActivityBumpWorkspace :exec
|
|
WITH latest AS (
|
|
SELECT
|
|
workspace_builds.id::uuid AS build_id,
|
|
workspace_builds.deadline::timestamp with time zone AS build_deadline,
|
|
workspace_builds.max_deadline::timestamp with time zone AS build_max_deadline,
|
|
workspace_builds.transition AS build_transition,
|
|
provisioner_jobs.completed_at::timestamp with time zone AS job_completed_at,
|
|
templates.activity_bump AS activity_bump,
|
|
(
|
|
CASE
|
|
-- If the extension would push us over the next_autostart
|
|
-- interval, then extend the deadline by the full TTL (NOT
|
|
-- activity bump) from the autostart time. This will essentially
|
|
-- be as if the workspace auto started at the given time and the
|
|
-- original TTL was applied.
|
|
--
|
|
-- Sadly we can't define ` + "`" + `activity_bump_interval` + "`" + ` above since
|
|
-- it won't be available for this CASE statement, so we have to
|
|
-- copy the cast twice.
|
|
WHEN NOW() + (templates.activity_bump / 1000 / 1000 / 1000 || ' seconds')::interval > $1 :: timestamptz
|
|
-- If the autostart is behind now(), then the
|
|
-- autostart schedule is either the 0 time and not provided,
|
|
-- or it was the autostart in the past, which is no longer
|
|
-- relevant. If autostart is > 0 and in the past, then
|
|
-- that is a mistake by the caller.
|
|
AND $1 > NOW()
|
|
THEN
|
|
-- Extend to the autostart, then add the activity bump
|
|
(($1 :: timestamptz) - NOW()) + CASE
|
|
WHEN templates.allow_user_autostop
|
|
THEN (workspaces.ttl / 1000 / 1000 / 1000 || ' seconds')::interval
|
|
ELSE (templates.default_ttl / 1000 / 1000 / 1000 || ' seconds')::interval
|
|
END
|
|
|
|
-- Default to the activity bump duration.
|
|
ELSE
|
|
(templates.activity_bump / 1000 / 1000 / 1000 || ' seconds')::interval
|
|
END
|
|
) AS ttl_interval
|
|
FROM workspace_builds
|
|
JOIN provisioner_jobs
|
|
ON provisioner_jobs.id = workspace_builds.job_id
|
|
JOIN workspaces
|
|
ON workspaces.id = workspace_builds.workspace_id
|
|
JOIN templates
|
|
ON templates.id = workspaces.template_id
|
|
WHERE workspace_builds.workspace_id = $2::uuid
|
|
ORDER BY workspace_builds.build_number DESC
|
|
LIMIT 1
|
|
)
|
|
UPDATE
|
|
workspace_builds wb
|
|
SET
|
|
updated_at = NOW(),
|
|
deadline = CASE
|
|
WHEN l.build_max_deadline = '0001-01-01 00:00:00+00'
|
|
-- Never reduce the deadline from activity.
|
|
THEN GREATEST(wb.deadline, NOW() + l.ttl_interval)
|
|
ELSE LEAST(GREATEST(wb.deadline, NOW() + l.ttl_interval), l.build_max_deadline)
|
|
END
|
|
FROM latest l
|
|
WHERE wb.id = l.build_id
|
|
AND l.job_completed_at IS NOT NULL
|
|
AND l.activity_bump > 0
|
|
AND l.build_transition = 'start'
|
|
AND l.ttl_interval > '0 seconds'::interval
|
|
AND l.build_deadline != '0001-01-01 00:00:00+00'
|
|
AND l.build_deadline - (l.ttl_interval * 0.95) < NOW()
|
|
`
|
|
|
|
type ActivityBumpWorkspaceParams struct {
|
|
NextAutostart time.Time `db:"next_autostart" json:"next_autostart"`
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
}
|
|
|
|
// Bumps the workspace deadline by the template's configured "activity_bump"
|
|
// duration (default 1h). If the workspace bump will cross an autostart
|
|
// threshold, then the bump is autostart + TTL. This is the deadline behavior if
|
|
// the workspace was to autostart from a stopped state.
|
|
//
|
|
// Max deadline is respected, and the deadline will never be bumped past it.
|
|
// The deadline will never decrease.
|
|
// We only bump if the template has an activity bump duration set.
|
|
// We only bump if the raw interval is positive and non-zero.
|
|
// We only bump if workspace shutdown is manual.
|
|
// We only bump when 5% of the deadline has elapsed.
|
|
func (q *sqlQuerier) ActivityBumpWorkspace(ctx context.Context, arg ActivityBumpWorkspaceParams) error {
|
|
_, err := q.db.ExecContext(ctx, activityBumpWorkspace, arg.NextAutostart, arg.WorkspaceID)
|
|
return err
|
|
}
|
|
|
|
const deleteAPIKeyByID = `-- name: DeleteAPIKeyByID :exec
|
|
DELETE FROM
|
|
api_keys
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteAPIKeyByID(ctx context.Context, id string) error {
|
|
_, err := q.db.ExecContext(ctx, deleteAPIKeyByID, id)
|
|
return err
|
|
}
|
|
|
|
const deleteAPIKeysByUserID = `-- name: DeleteAPIKeysByUserID :exec
|
|
DELETE FROM
|
|
api_keys
|
|
WHERE
|
|
user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteAPIKeysByUserID(ctx context.Context, userID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteAPIKeysByUserID, userID)
|
|
return err
|
|
}
|
|
|
|
const deleteApplicationConnectAPIKeysByUserID = `-- name: DeleteApplicationConnectAPIKeysByUserID :exec
|
|
DELETE FROM
|
|
api_keys
|
|
WHERE
|
|
user_id = $1 AND
|
|
scope = 'application_connect'::api_key_scope
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteApplicationConnectAPIKeysByUserID(ctx context.Context, userID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteApplicationConnectAPIKeysByUserID, userID)
|
|
return err
|
|
}
|
|
|
|
const getAPIKeyByID = `-- name: GetAPIKeyByID :one
|
|
SELECT
|
|
id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name
|
|
FROM
|
|
api_keys
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAPIKeyByID(ctx context.Context, id string) (APIKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getAPIKeyByID, id)
|
|
var i APIKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getAPIKeyByName = `-- name: GetAPIKeyByName :one
|
|
SELECT
|
|
id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name
|
|
FROM
|
|
api_keys
|
|
WHERE
|
|
user_id = $1 AND
|
|
token_name = $2 AND
|
|
token_name != ''
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetAPIKeyByNameParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
TokenName string `db:"token_name" json:"token_name"`
|
|
}
|
|
|
|
// there is no unique constraint on empty token names
|
|
func (q *sqlQuerier) GetAPIKeyByName(ctx context.Context, arg GetAPIKeyByNameParams) (APIKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getAPIKeyByName, arg.UserID, arg.TokenName)
|
|
var i APIKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getAPIKeysByLoginType = `-- name: GetAPIKeysByLoginType :many
|
|
SELECT id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name FROM api_keys WHERE login_type = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAPIKeysByLoginType(ctx context.Context, loginType LoginType) ([]APIKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAPIKeysByLoginType, loginType)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []APIKey
|
|
for rows.Next() {
|
|
var i APIKey
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getAPIKeysByUserID = `-- name: GetAPIKeysByUserID :many
|
|
SELECT id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name FROM api_keys WHERE login_type = $1 AND user_id = $2
|
|
`
|
|
|
|
type GetAPIKeysByUserIDParams struct {
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetAPIKeysByUserID(ctx context.Context, arg GetAPIKeysByUserIDParams) ([]APIKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAPIKeysByUserID, arg.LoginType, arg.UserID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []APIKey
|
|
for rows.Next() {
|
|
var i APIKey
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getAPIKeysLastUsedAfter = `-- name: GetAPIKeysLastUsedAfter :many
|
|
SELECT id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name FROM api_keys WHERE last_used > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAPIKeysLastUsedAfter(ctx context.Context, lastUsed time.Time) ([]APIKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAPIKeysLastUsedAfter, lastUsed)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []APIKey
|
|
for rows.Next() {
|
|
var i APIKey
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertAPIKey = `-- name: InsertAPIKey :one
|
|
INSERT INTO
|
|
api_keys (
|
|
id,
|
|
lifetime_seconds,
|
|
hashed_secret,
|
|
ip_address,
|
|
user_id,
|
|
last_used,
|
|
expires_at,
|
|
created_at,
|
|
updated_at,
|
|
login_type,
|
|
scope,
|
|
token_name
|
|
)
|
|
VALUES
|
|
($1,
|
|
-- If the lifetime is set to 0, default to 24hrs
|
|
CASE $2::bigint
|
|
WHEN 0 THEN 86400
|
|
ELSE $2::bigint
|
|
END
|
|
, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, hashed_secret, user_id, last_used, expires_at, created_at, updated_at, login_type, lifetime_seconds, ip_address, scope, token_name
|
|
`
|
|
|
|
type InsertAPIKeyParams struct {
|
|
ID string `db:"id" json:"id"`
|
|
LifetimeSeconds int64 `db:"lifetime_seconds" json:"lifetime_seconds"`
|
|
HashedSecret []byte `db:"hashed_secret" json:"hashed_secret"`
|
|
IPAddress pqtype.Inet `db:"ip_address" json:"ip_address"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
LastUsed time.Time `db:"last_used" json:"last_used"`
|
|
ExpiresAt time.Time `db:"expires_at" json:"expires_at"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
Scope APIKeyScope `db:"scope" json:"scope"`
|
|
TokenName string `db:"token_name" json:"token_name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertAPIKey(ctx context.Context, arg InsertAPIKeyParams) (APIKey, error) {
|
|
row := q.db.QueryRowContext(ctx, insertAPIKey,
|
|
arg.ID,
|
|
arg.LifetimeSeconds,
|
|
arg.HashedSecret,
|
|
arg.IPAddress,
|
|
arg.UserID,
|
|
arg.LastUsed,
|
|
arg.ExpiresAt,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.LoginType,
|
|
arg.Scope,
|
|
arg.TokenName,
|
|
)
|
|
var i APIKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.LastUsed,
|
|
&i.ExpiresAt,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LoginType,
|
|
&i.LifetimeSeconds,
|
|
&i.IPAddress,
|
|
&i.Scope,
|
|
&i.TokenName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateAPIKeyByID = `-- name: UpdateAPIKeyByID :exec
|
|
UPDATE
|
|
api_keys
|
|
SET
|
|
last_used = $2,
|
|
expires_at = $3,
|
|
ip_address = $4
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateAPIKeyByIDParams struct {
|
|
ID string `db:"id" json:"id"`
|
|
LastUsed time.Time `db:"last_used" json:"last_used"`
|
|
ExpiresAt time.Time `db:"expires_at" json:"expires_at"`
|
|
IPAddress pqtype.Inet `db:"ip_address" json:"ip_address"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateAPIKeyByID(ctx context.Context, arg UpdateAPIKeyByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateAPIKeyByID,
|
|
arg.ID,
|
|
arg.LastUsed,
|
|
arg.ExpiresAt,
|
|
arg.IPAddress,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const getAuditLogsOffset = `-- name: GetAuditLogsOffset :many
|
|
SELECT
|
|
audit_logs.id, audit_logs.time, audit_logs.user_id, audit_logs.organization_id, audit_logs.ip, audit_logs.user_agent, audit_logs.resource_type, audit_logs.resource_id, audit_logs.resource_target, audit_logs.action, audit_logs.diff, audit_logs.status_code, audit_logs.additional_fields, audit_logs.request_id, audit_logs.resource_icon,
|
|
-- sqlc.embed(users) would be nice but it does not seem to play well with
|
|
-- left joins.
|
|
users.username AS user_username,
|
|
users.name AS user_name,
|
|
users.email AS user_email,
|
|
users.created_at AS user_created_at,
|
|
users.updated_at AS user_updated_at,
|
|
users.last_seen_at AS user_last_seen_at,
|
|
users.status AS user_status,
|
|
users.login_type AS user_login_type,
|
|
users.rbac_roles AS user_roles,
|
|
users.avatar_url AS user_avatar_url,
|
|
users.deleted AS user_deleted,
|
|
users.theme_preference AS user_theme_preference,
|
|
users.quiet_hours_schedule AS user_quiet_hours_schedule,
|
|
COALESCE(organizations.name, '') AS organization_name,
|
|
COALESCE(organizations.display_name, '') AS organization_display_name,
|
|
COALESCE(organizations.icon, '') AS organization_icon,
|
|
COUNT(audit_logs.*) OVER () AS count
|
|
FROM
|
|
audit_logs
|
|
LEFT JOIN users ON audit_logs.user_id = users.id
|
|
LEFT JOIN
|
|
-- First join on workspaces to get the initial workspace create
|
|
-- to workspace build 1 id. This is because the first create is
|
|
-- is a different audit log than subsequent starts.
|
|
workspaces ON
|
|
audit_logs.resource_type = 'workspace' AND
|
|
audit_logs.resource_id = workspaces.id
|
|
LEFT JOIN
|
|
workspace_builds ON
|
|
-- Get the reason from the build if the resource type
|
|
-- is a workspace_build
|
|
(
|
|
audit_logs.resource_type = 'workspace_build'
|
|
AND audit_logs.resource_id = workspace_builds.id
|
|
)
|
|
OR
|
|
-- Get the reason from the build #1 if this is the first
|
|
-- workspace create.
|
|
(
|
|
audit_logs.resource_type = 'workspace' AND
|
|
audit_logs.action = 'create' AND
|
|
workspaces.id = workspace_builds.workspace_id AND
|
|
workspace_builds.build_number = 1
|
|
)
|
|
LEFT JOIN organizations ON audit_logs.organization_id = organizations.id
|
|
WHERE
|
|
-- Filter resource_type
|
|
CASE
|
|
WHEN $1 :: text != '' THEN
|
|
resource_type = $1 :: resource_type
|
|
ELSE true
|
|
END
|
|
-- Filter resource_id
|
|
AND CASE
|
|
WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
resource_id = $2
|
|
ELSE true
|
|
END
|
|
-- Filter organization_id
|
|
AND CASE
|
|
WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
audit_logs.organization_id = $3
|
|
ELSE true
|
|
END
|
|
-- Filter by resource_target
|
|
AND CASE
|
|
WHEN $4 :: text != '' THEN
|
|
resource_target = $4
|
|
ELSE true
|
|
END
|
|
-- Filter action
|
|
AND CASE
|
|
WHEN $5 :: text != '' THEN
|
|
action = $5 :: audit_action
|
|
ELSE true
|
|
END
|
|
-- Filter by user_id
|
|
AND CASE
|
|
WHEN $6 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
user_id = $6
|
|
ELSE true
|
|
END
|
|
-- Filter by username
|
|
AND CASE
|
|
WHEN $7 :: text != '' THEN
|
|
user_id = (SELECT id FROM users WHERE lower(username) = lower($7) AND deleted = false)
|
|
ELSE true
|
|
END
|
|
-- Filter by user_email
|
|
AND CASE
|
|
WHEN $8 :: text != '' THEN
|
|
users.email = $8
|
|
ELSE true
|
|
END
|
|
-- Filter by date_from
|
|
AND CASE
|
|
WHEN $9 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
|
"time" >= $9
|
|
ELSE true
|
|
END
|
|
-- Filter by date_to
|
|
AND CASE
|
|
WHEN $10 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
|
"time" <= $10
|
|
ELSE true
|
|
END
|
|
-- Filter by build_reason
|
|
AND CASE
|
|
WHEN $11::text != '' THEN
|
|
workspace_builds.reason::text = $11
|
|
ELSE true
|
|
END
|
|
|
|
-- Authorize Filter clause will be injected below in GetAuthorizedAuditLogsOffset
|
|
-- @authorize_filter
|
|
ORDER BY
|
|
"time" DESC
|
|
LIMIT
|
|
-- a limit of 0 means "no limit". The audit log table is unbounded
|
|
-- in size, and is expected to be quite large. Implement a default
|
|
-- limit of 100 to prevent accidental excessively large queries.
|
|
COALESCE(NULLIF($13 :: int, 0), 100)
|
|
OFFSET
|
|
$12
|
|
`
|
|
|
|
type GetAuditLogsOffsetParams struct {
|
|
ResourceType string `db:"resource_type" json:"resource_type"`
|
|
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
ResourceTarget string `db:"resource_target" json:"resource_target"`
|
|
Action string `db:"action" json:"action"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Username string `db:"username" json:"username"`
|
|
Email string `db:"email" json:"email"`
|
|
DateFrom time.Time `db:"date_from" json:"date_from"`
|
|
DateTo time.Time `db:"date_to" json:"date_to"`
|
|
BuildReason string `db:"build_reason" json:"build_reason"`
|
|
OffsetOpt int32 `db:"offset_opt" json:"offset_opt"`
|
|
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
|
}
|
|
|
|
type GetAuditLogsOffsetRow struct {
|
|
AuditLog AuditLog `db:"audit_log" json:"audit_log"`
|
|
UserUsername sql.NullString `db:"user_username" json:"user_username"`
|
|
UserName sql.NullString `db:"user_name" json:"user_name"`
|
|
UserEmail sql.NullString `db:"user_email" json:"user_email"`
|
|
UserCreatedAt sql.NullTime `db:"user_created_at" json:"user_created_at"`
|
|
UserUpdatedAt sql.NullTime `db:"user_updated_at" json:"user_updated_at"`
|
|
UserLastSeenAt sql.NullTime `db:"user_last_seen_at" json:"user_last_seen_at"`
|
|
UserStatus NullUserStatus `db:"user_status" json:"user_status"`
|
|
UserLoginType NullLoginType `db:"user_login_type" json:"user_login_type"`
|
|
UserRoles pq.StringArray `db:"user_roles" json:"user_roles"`
|
|
UserAvatarUrl sql.NullString `db:"user_avatar_url" json:"user_avatar_url"`
|
|
UserDeleted sql.NullBool `db:"user_deleted" json:"user_deleted"`
|
|
UserThemePreference sql.NullString `db:"user_theme_preference" json:"user_theme_preference"`
|
|
UserQuietHoursSchedule sql.NullString `db:"user_quiet_hours_schedule" json:"user_quiet_hours_schedule"`
|
|
OrganizationName string `db:"organization_name" json:"organization_name"`
|
|
OrganizationDisplayName string `db:"organization_display_name" json:"organization_display_name"`
|
|
OrganizationIcon string `db:"organization_icon" json:"organization_icon"`
|
|
Count int64 `db:"count" json:"count"`
|
|
}
|
|
|
|
// GetAuditLogsBefore retrieves `row_limit` number of audit logs before the provided
|
|
// ID.
|
|
func (q *sqlQuerier) GetAuditLogsOffset(ctx context.Context, arg GetAuditLogsOffsetParams) ([]GetAuditLogsOffsetRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAuditLogsOffset,
|
|
arg.ResourceType,
|
|
arg.ResourceID,
|
|
arg.OrganizationID,
|
|
arg.ResourceTarget,
|
|
arg.Action,
|
|
arg.UserID,
|
|
arg.Username,
|
|
arg.Email,
|
|
arg.DateFrom,
|
|
arg.DateTo,
|
|
arg.BuildReason,
|
|
arg.OffsetOpt,
|
|
arg.LimitOpt,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetAuditLogsOffsetRow
|
|
for rows.Next() {
|
|
var i GetAuditLogsOffsetRow
|
|
if err := rows.Scan(
|
|
&i.AuditLog.ID,
|
|
&i.AuditLog.Time,
|
|
&i.AuditLog.UserID,
|
|
&i.AuditLog.OrganizationID,
|
|
&i.AuditLog.Ip,
|
|
&i.AuditLog.UserAgent,
|
|
&i.AuditLog.ResourceType,
|
|
&i.AuditLog.ResourceID,
|
|
&i.AuditLog.ResourceTarget,
|
|
&i.AuditLog.Action,
|
|
&i.AuditLog.Diff,
|
|
&i.AuditLog.StatusCode,
|
|
&i.AuditLog.AdditionalFields,
|
|
&i.AuditLog.RequestID,
|
|
&i.AuditLog.ResourceIcon,
|
|
&i.UserUsername,
|
|
&i.UserName,
|
|
&i.UserEmail,
|
|
&i.UserCreatedAt,
|
|
&i.UserUpdatedAt,
|
|
&i.UserLastSeenAt,
|
|
&i.UserStatus,
|
|
&i.UserLoginType,
|
|
&i.UserRoles,
|
|
&i.UserAvatarUrl,
|
|
&i.UserDeleted,
|
|
&i.UserThemePreference,
|
|
&i.UserQuietHoursSchedule,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
&i.Count,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertAuditLog = `-- name: InsertAuditLog :one
|
|
INSERT INTO
|
|
audit_logs (
|
|
id,
|
|
"time",
|
|
user_id,
|
|
organization_id,
|
|
ip,
|
|
user_agent,
|
|
resource_type,
|
|
resource_id,
|
|
resource_target,
|
|
action,
|
|
diff,
|
|
status_code,
|
|
additional_fields,
|
|
request_id,
|
|
resource_icon
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15) RETURNING id, time, user_id, organization_id, ip, user_agent, resource_type, resource_id, resource_target, action, diff, status_code, additional_fields, request_id, resource_icon
|
|
`
|
|
|
|
type InsertAuditLogParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Time time.Time `db:"time" json:"time"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Ip pqtype.Inet `db:"ip" json:"ip"`
|
|
UserAgent sql.NullString `db:"user_agent" json:"user_agent"`
|
|
ResourceType ResourceType `db:"resource_type" json:"resource_type"`
|
|
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
|
|
ResourceTarget string `db:"resource_target" json:"resource_target"`
|
|
Action AuditAction `db:"action" json:"action"`
|
|
Diff json.RawMessage `db:"diff" json:"diff"`
|
|
StatusCode int32 `db:"status_code" json:"status_code"`
|
|
AdditionalFields json.RawMessage `db:"additional_fields" json:"additional_fields"`
|
|
RequestID uuid.UUID `db:"request_id" json:"request_id"`
|
|
ResourceIcon string `db:"resource_icon" json:"resource_icon"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertAuditLog(ctx context.Context, arg InsertAuditLogParams) (AuditLog, error) {
|
|
row := q.db.QueryRowContext(ctx, insertAuditLog,
|
|
arg.ID,
|
|
arg.Time,
|
|
arg.UserID,
|
|
arg.OrganizationID,
|
|
arg.Ip,
|
|
arg.UserAgent,
|
|
arg.ResourceType,
|
|
arg.ResourceID,
|
|
arg.ResourceTarget,
|
|
arg.Action,
|
|
arg.Diff,
|
|
arg.StatusCode,
|
|
arg.AdditionalFields,
|
|
arg.RequestID,
|
|
arg.ResourceIcon,
|
|
)
|
|
var i AuditLog
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Time,
|
|
&i.UserID,
|
|
&i.OrganizationID,
|
|
&i.Ip,
|
|
&i.UserAgent,
|
|
&i.ResourceType,
|
|
&i.ResourceID,
|
|
&i.ResourceTarget,
|
|
&i.Action,
|
|
&i.Diff,
|
|
&i.StatusCode,
|
|
&i.AdditionalFields,
|
|
&i.RequestID,
|
|
&i.ResourceIcon,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getDBCryptKeys = `-- name: GetDBCryptKeys :many
|
|
SELECT number, active_key_digest, revoked_key_digest, created_at, revoked_at, test FROM dbcrypt_keys ORDER BY number ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetDBCryptKeys(ctx context.Context) ([]DBCryptKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, getDBCryptKeys)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []DBCryptKey
|
|
for rows.Next() {
|
|
var i DBCryptKey
|
|
if err := rows.Scan(
|
|
&i.Number,
|
|
&i.ActiveKeyDigest,
|
|
&i.RevokedKeyDigest,
|
|
&i.CreatedAt,
|
|
&i.RevokedAt,
|
|
&i.Test,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertDBCryptKey = `-- name: InsertDBCryptKey :exec
|
|
INSERT INTO dbcrypt_keys
|
|
(number, active_key_digest, created_at, test)
|
|
VALUES ($1::int, $2::text, CURRENT_TIMESTAMP, $3::text)
|
|
`
|
|
|
|
type InsertDBCryptKeyParams struct {
|
|
Number int32 `db:"number" json:"number"`
|
|
ActiveKeyDigest string `db:"active_key_digest" json:"active_key_digest"`
|
|
Test string `db:"test" json:"test"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertDBCryptKey(ctx context.Context, arg InsertDBCryptKeyParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertDBCryptKey, arg.Number, arg.ActiveKeyDigest, arg.Test)
|
|
return err
|
|
}
|
|
|
|
const revokeDBCryptKey = `-- name: RevokeDBCryptKey :exec
|
|
UPDATE dbcrypt_keys
|
|
SET
|
|
revoked_key_digest = active_key_digest,
|
|
active_key_digest = revoked_key_digest,
|
|
revoked_at = CURRENT_TIMESTAMP
|
|
WHERE
|
|
active_key_digest = $1::text
|
|
AND
|
|
revoked_key_digest IS NULL
|
|
`
|
|
|
|
func (q *sqlQuerier) RevokeDBCryptKey(ctx context.Context, activeKeyDigest string) error {
|
|
_, err := q.db.ExecContext(ctx, revokeDBCryptKey, activeKeyDigest)
|
|
return err
|
|
}
|
|
|
|
const deleteExternalAuthLink = `-- name: DeleteExternalAuthLink :exec
|
|
DELETE FROM external_auth_links WHERE provider_id = $1 AND user_id = $2
|
|
`
|
|
|
|
type DeleteExternalAuthLinkParams struct {
|
|
ProviderID string `db:"provider_id" json:"provider_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteExternalAuthLink(ctx context.Context, arg DeleteExternalAuthLinkParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteExternalAuthLink, arg.ProviderID, arg.UserID)
|
|
return err
|
|
}
|
|
|
|
const getExternalAuthLink = `-- name: GetExternalAuthLink :one
|
|
SELECT provider_id, user_id, created_at, updated_at, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, oauth_extra FROM external_auth_links WHERE provider_id = $1 AND user_id = $2
|
|
`
|
|
|
|
type GetExternalAuthLinkParams struct {
|
|
ProviderID string `db:"provider_id" json:"provider_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetExternalAuthLink(ctx context.Context, arg GetExternalAuthLinkParams) (ExternalAuthLink, error) {
|
|
row := q.db.QueryRowContext(ctx, getExternalAuthLink, arg.ProviderID, arg.UserID)
|
|
var i ExternalAuthLink
|
|
err := row.Scan(
|
|
&i.ProviderID,
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.OAuthExtra,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getExternalAuthLinksByUserID = `-- name: GetExternalAuthLinksByUserID :many
|
|
SELECT provider_id, user_id, created_at, updated_at, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, oauth_extra FROM external_auth_links WHERE user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetExternalAuthLinksByUserID(ctx context.Context, userID uuid.UUID) ([]ExternalAuthLink, error) {
|
|
rows, err := q.db.QueryContext(ctx, getExternalAuthLinksByUserID, userID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ExternalAuthLink
|
|
for rows.Next() {
|
|
var i ExternalAuthLink
|
|
if err := rows.Scan(
|
|
&i.ProviderID,
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.OAuthExtra,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertExternalAuthLink = `-- name: InsertExternalAuthLink :one
|
|
INSERT INTO external_auth_links (
|
|
provider_id,
|
|
user_id,
|
|
created_at,
|
|
updated_at,
|
|
oauth_access_token,
|
|
oauth_access_token_key_id,
|
|
oauth_refresh_token,
|
|
oauth_refresh_token_key_id,
|
|
oauth_expiry,
|
|
oauth_extra
|
|
) VALUES (
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7,
|
|
$8,
|
|
$9,
|
|
$10
|
|
) RETURNING provider_id, user_id, created_at, updated_at, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, oauth_extra
|
|
`
|
|
|
|
type InsertExternalAuthLinkParams struct {
|
|
ProviderID string `db:"provider_id" json:"provider_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OAuthAccessToken string `db:"oauth_access_token" json:"oauth_access_token"`
|
|
OAuthAccessTokenKeyID sql.NullString `db:"oauth_access_token_key_id" json:"oauth_access_token_key_id"`
|
|
OAuthRefreshToken string `db:"oauth_refresh_token" json:"oauth_refresh_token"`
|
|
OAuthRefreshTokenKeyID sql.NullString `db:"oauth_refresh_token_key_id" json:"oauth_refresh_token_key_id"`
|
|
OAuthExpiry time.Time `db:"oauth_expiry" json:"oauth_expiry"`
|
|
OAuthExtra pqtype.NullRawMessage `db:"oauth_extra" json:"oauth_extra"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertExternalAuthLink(ctx context.Context, arg InsertExternalAuthLinkParams) (ExternalAuthLink, error) {
|
|
row := q.db.QueryRowContext(ctx, insertExternalAuthLink,
|
|
arg.ProviderID,
|
|
arg.UserID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.OAuthAccessToken,
|
|
arg.OAuthAccessTokenKeyID,
|
|
arg.OAuthRefreshToken,
|
|
arg.OAuthRefreshTokenKeyID,
|
|
arg.OAuthExpiry,
|
|
arg.OAuthExtra,
|
|
)
|
|
var i ExternalAuthLink
|
|
err := row.Scan(
|
|
&i.ProviderID,
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.OAuthExtra,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateExternalAuthLink = `-- name: UpdateExternalAuthLink :one
|
|
UPDATE external_auth_links SET
|
|
updated_at = $3,
|
|
oauth_access_token = $4,
|
|
oauth_access_token_key_id = $5,
|
|
oauth_refresh_token = $6,
|
|
oauth_refresh_token_key_id = $7,
|
|
oauth_expiry = $8,
|
|
oauth_extra = $9
|
|
WHERE provider_id = $1 AND user_id = $2 RETURNING provider_id, user_id, created_at, updated_at, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, oauth_extra
|
|
`
|
|
|
|
type UpdateExternalAuthLinkParams struct {
|
|
ProviderID string `db:"provider_id" json:"provider_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OAuthAccessToken string `db:"oauth_access_token" json:"oauth_access_token"`
|
|
OAuthAccessTokenKeyID sql.NullString `db:"oauth_access_token_key_id" json:"oauth_access_token_key_id"`
|
|
OAuthRefreshToken string `db:"oauth_refresh_token" json:"oauth_refresh_token"`
|
|
OAuthRefreshTokenKeyID sql.NullString `db:"oauth_refresh_token_key_id" json:"oauth_refresh_token_key_id"`
|
|
OAuthExpiry time.Time `db:"oauth_expiry" json:"oauth_expiry"`
|
|
OAuthExtra pqtype.NullRawMessage `db:"oauth_extra" json:"oauth_extra"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateExternalAuthLink(ctx context.Context, arg UpdateExternalAuthLinkParams) (ExternalAuthLink, error) {
|
|
row := q.db.QueryRowContext(ctx, updateExternalAuthLink,
|
|
arg.ProviderID,
|
|
arg.UserID,
|
|
arg.UpdatedAt,
|
|
arg.OAuthAccessToken,
|
|
arg.OAuthAccessTokenKeyID,
|
|
arg.OAuthRefreshToken,
|
|
arg.OAuthRefreshTokenKeyID,
|
|
arg.OAuthExpiry,
|
|
arg.OAuthExtra,
|
|
)
|
|
var i ExternalAuthLink
|
|
err := row.Scan(
|
|
&i.ProviderID,
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.OAuthExtra,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getFileByHashAndCreator = `-- name: GetFileByHashAndCreator :one
|
|
SELECT
|
|
hash, created_at, created_by, mimetype, data, id
|
|
FROM
|
|
files
|
|
WHERE
|
|
hash = $1
|
|
AND
|
|
created_by = $2
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetFileByHashAndCreatorParams struct {
|
|
Hash string `db:"hash" json:"hash"`
|
|
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetFileByHashAndCreator(ctx context.Context, arg GetFileByHashAndCreatorParams) (File, error) {
|
|
row := q.db.QueryRowContext(ctx, getFileByHashAndCreator, arg.Hash, arg.CreatedBy)
|
|
var i File
|
|
err := row.Scan(
|
|
&i.Hash,
|
|
&i.CreatedAt,
|
|
&i.CreatedBy,
|
|
&i.Mimetype,
|
|
&i.Data,
|
|
&i.ID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getFileByID = `-- name: GetFileByID :one
|
|
SELECT
|
|
hash, created_at, created_by, mimetype, data, id
|
|
FROM
|
|
files
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetFileByID(ctx context.Context, id uuid.UUID) (File, error) {
|
|
row := q.db.QueryRowContext(ctx, getFileByID, id)
|
|
var i File
|
|
err := row.Scan(
|
|
&i.Hash,
|
|
&i.CreatedAt,
|
|
&i.CreatedBy,
|
|
&i.Mimetype,
|
|
&i.Data,
|
|
&i.ID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getFileTemplates = `-- name: GetFileTemplates :many
|
|
SELECT
|
|
files.id AS file_id,
|
|
files.created_by AS file_created_by,
|
|
templates.id AS template_id,
|
|
templates.organization_id AS template_organization_id,
|
|
templates.created_by AS template_created_by,
|
|
templates.user_acl,
|
|
templates.group_acl
|
|
FROM
|
|
templates
|
|
INNER JOIN
|
|
template_versions
|
|
ON templates.id = template_versions.template_id
|
|
INNER JOIN
|
|
provisioner_jobs
|
|
ON job_id = provisioner_jobs.id
|
|
INNER JOIN
|
|
files
|
|
ON files.id = provisioner_jobs.file_id
|
|
WHERE
|
|
-- Only fetch template version associated files.
|
|
storage_method = 'file'
|
|
AND provisioner_jobs.type = 'template_version_import'
|
|
AND file_id = $1
|
|
`
|
|
|
|
type GetFileTemplatesRow struct {
|
|
FileID uuid.UUID `db:"file_id" json:"file_id"`
|
|
FileCreatedBy uuid.UUID `db:"file_created_by" json:"file_created_by"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TemplateOrganizationID uuid.UUID `db:"template_organization_id" json:"template_organization_id"`
|
|
TemplateCreatedBy uuid.UUID `db:"template_created_by" json:"template_created_by"`
|
|
UserACL TemplateACL `db:"user_acl" json:"user_acl"`
|
|
GroupACL TemplateACL `db:"group_acl" json:"group_acl"`
|
|
}
|
|
|
|
// Get all templates that use a file.
|
|
func (q *sqlQuerier) GetFileTemplates(ctx context.Context, fileID uuid.UUID) ([]GetFileTemplatesRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getFileTemplates, fileID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetFileTemplatesRow
|
|
for rows.Next() {
|
|
var i GetFileTemplatesRow
|
|
if err := rows.Scan(
|
|
&i.FileID,
|
|
&i.FileCreatedBy,
|
|
&i.TemplateID,
|
|
&i.TemplateOrganizationID,
|
|
&i.TemplateCreatedBy,
|
|
&i.UserACL,
|
|
&i.GroupACL,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertFile = `-- name: InsertFile :one
|
|
INSERT INTO
|
|
files (id, hash, created_at, created_by, mimetype, "data")
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6) RETURNING hash, created_at, created_by, mimetype, data, id
|
|
`
|
|
|
|
type InsertFileParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Hash string `db:"hash" json:"hash"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
|
|
Mimetype string `db:"mimetype" json:"mimetype"`
|
|
Data []byte `db:"data" json:"data"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertFile(ctx context.Context, arg InsertFileParams) (File, error) {
|
|
row := q.db.QueryRowContext(ctx, insertFile,
|
|
arg.ID,
|
|
arg.Hash,
|
|
arg.CreatedAt,
|
|
arg.CreatedBy,
|
|
arg.Mimetype,
|
|
arg.Data,
|
|
)
|
|
var i File
|
|
err := row.Scan(
|
|
&i.Hash,
|
|
&i.CreatedAt,
|
|
&i.CreatedBy,
|
|
&i.Mimetype,
|
|
&i.Data,
|
|
&i.ID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const deleteGitSSHKey = `-- name: DeleteGitSSHKey :exec
|
|
DELETE FROM
|
|
gitsshkeys
|
|
WHERE
|
|
user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteGitSSHKey(ctx context.Context, userID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteGitSSHKey, userID)
|
|
return err
|
|
}
|
|
|
|
const getGitSSHKey = `-- name: GetGitSSHKey :one
|
|
SELECT
|
|
user_id, created_at, updated_at, private_key, public_key
|
|
FROM
|
|
gitsshkeys
|
|
WHERE
|
|
user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetGitSSHKey(ctx context.Context, userID uuid.UUID) (GitSSHKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getGitSSHKey, userID)
|
|
var i GitSSHKey
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.PrivateKey,
|
|
&i.PublicKey,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertGitSSHKey = `-- name: InsertGitSSHKey :one
|
|
INSERT INTO
|
|
gitsshkeys (
|
|
user_id,
|
|
created_at,
|
|
updated_at,
|
|
private_key,
|
|
public_key
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5) RETURNING user_id, created_at, updated_at, private_key, public_key
|
|
`
|
|
|
|
type InsertGitSSHKeyParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
PrivateKey string `db:"private_key" json:"private_key"`
|
|
PublicKey string `db:"public_key" json:"public_key"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertGitSSHKey(ctx context.Context, arg InsertGitSSHKeyParams) (GitSSHKey, error) {
|
|
row := q.db.QueryRowContext(ctx, insertGitSSHKey,
|
|
arg.UserID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.PrivateKey,
|
|
arg.PublicKey,
|
|
)
|
|
var i GitSSHKey
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.PrivateKey,
|
|
&i.PublicKey,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateGitSSHKey = `-- name: UpdateGitSSHKey :one
|
|
UPDATE
|
|
gitsshkeys
|
|
SET
|
|
updated_at = $2,
|
|
private_key = $3,
|
|
public_key = $4
|
|
WHERE
|
|
user_id = $1
|
|
RETURNING
|
|
user_id, created_at, updated_at, private_key, public_key
|
|
`
|
|
|
|
type UpdateGitSSHKeyParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
PrivateKey string `db:"private_key" json:"private_key"`
|
|
PublicKey string `db:"public_key" json:"public_key"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateGitSSHKey(ctx context.Context, arg UpdateGitSSHKeyParams) (GitSSHKey, error) {
|
|
row := q.db.QueryRowContext(ctx, updateGitSSHKey,
|
|
arg.UserID,
|
|
arg.UpdatedAt,
|
|
arg.PrivateKey,
|
|
arg.PublicKey,
|
|
)
|
|
var i GitSSHKey
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.PrivateKey,
|
|
&i.PublicKey,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const deleteGroupMemberFromGroup = `-- name: DeleteGroupMemberFromGroup :exec
|
|
DELETE FROM
|
|
group_members
|
|
WHERE
|
|
user_id = $1 AND
|
|
group_id = $2
|
|
`
|
|
|
|
type DeleteGroupMemberFromGroupParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
GroupID uuid.UUID `db:"group_id" json:"group_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteGroupMemberFromGroup(ctx context.Context, arg DeleteGroupMemberFromGroupParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteGroupMemberFromGroup, arg.UserID, arg.GroupID)
|
|
return err
|
|
}
|
|
|
|
const getGroupMembers = `-- name: GetGroupMembers :many
|
|
SELECT user_id, user_email, user_username, user_hashed_password, user_created_at, user_updated_at, user_status, user_rbac_roles, user_login_type, user_avatar_url, user_deleted, user_last_seen_at, user_quiet_hours_schedule, user_theme_preference, user_name, user_github_com_user_id, organization_id, group_name, group_id FROM group_members_expanded
|
|
`
|
|
|
|
func (q *sqlQuerier) GetGroupMembers(ctx context.Context) ([]GroupMember, error) {
|
|
rows, err := q.db.QueryContext(ctx, getGroupMembers)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GroupMember
|
|
for rows.Next() {
|
|
var i GroupMember
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.UserEmail,
|
|
&i.UserUsername,
|
|
&i.UserHashedPassword,
|
|
&i.UserCreatedAt,
|
|
&i.UserUpdatedAt,
|
|
&i.UserStatus,
|
|
pq.Array(&i.UserRbacRoles),
|
|
&i.UserLoginType,
|
|
&i.UserAvatarUrl,
|
|
&i.UserDeleted,
|
|
&i.UserLastSeenAt,
|
|
&i.UserQuietHoursSchedule,
|
|
&i.UserThemePreference,
|
|
&i.UserName,
|
|
&i.UserGithubComUserID,
|
|
&i.OrganizationID,
|
|
&i.GroupName,
|
|
&i.GroupID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getGroupMembersByGroupID = `-- name: GetGroupMembersByGroupID :many
|
|
SELECT user_id, user_email, user_username, user_hashed_password, user_created_at, user_updated_at, user_status, user_rbac_roles, user_login_type, user_avatar_url, user_deleted, user_last_seen_at, user_quiet_hours_schedule, user_theme_preference, user_name, user_github_com_user_id, organization_id, group_name, group_id FROM group_members_expanded WHERE group_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetGroupMembersByGroupID(ctx context.Context, groupID uuid.UUID) ([]GroupMember, error) {
|
|
rows, err := q.db.QueryContext(ctx, getGroupMembersByGroupID, groupID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GroupMember
|
|
for rows.Next() {
|
|
var i GroupMember
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.UserEmail,
|
|
&i.UserUsername,
|
|
&i.UserHashedPassword,
|
|
&i.UserCreatedAt,
|
|
&i.UserUpdatedAt,
|
|
&i.UserStatus,
|
|
pq.Array(&i.UserRbacRoles),
|
|
&i.UserLoginType,
|
|
&i.UserAvatarUrl,
|
|
&i.UserDeleted,
|
|
&i.UserLastSeenAt,
|
|
&i.UserQuietHoursSchedule,
|
|
&i.UserThemePreference,
|
|
&i.UserName,
|
|
&i.UserGithubComUserID,
|
|
&i.OrganizationID,
|
|
&i.GroupName,
|
|
&i.GroupID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getGroupMembersCountByGroupID = `-- name: GetGroupMembersCountByGroupID :one
|
|
SELECT COUNT(*) FROM group_members_expanded WHERE group_id = $1
|
|
`
|
|
|
|
// Returns the total count of members in a group. Shows the total
|
|
// count even if the caller does not have read access to ResourceGroupMember.
|
|
// They only need ResourceGroup read access.
|
|
func (q *sqlQuerier) GetGroupMembersCountByGroupID(ctx context.Context, groupID uuid.UUID) (int64, error) {
|
|
row := q.db.QueryRowContext(ctx, getGroupMembersCountByGroupID, groupID)
|
|
var count int64
|
|
err := row.Scan(&count)
|
|
return count, err
|
|
}
|
|
|
|
const insertGroupMember = `-- name: InsertGroupMember :exec
|
|
INSERT INTO
|
|
group_members (user_id, group_id)
|
|
VALUES
|
|
($1, $2)
|
|
`
|
|
|
|
type InsertGroupMemberParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
GroupID uuid.UUID `db:"group_id" json:"group_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertGroupMember(ctx context.Context, arg InsertGroupMemberParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertGroupMember, arg.UserID, arg.GroupID)
|
|
return err
|
|
}
|
|
|
|
const insertUserGroupsByName = `-- name: InsertUserGroupsByName :exec
|
|
WITH groups AS (
|
|
SELECT
|
|
id
|
|
FROM
|
|
groups
|
|
WHERE
|
|
groups.organization_id = $2 AND
|
|
groups.name = ANY($3 :: text [])
|
|
)
|
|
INSERT INTO
|
|
group_members (user_id, group_id)
|
|
SELECT
|
|
$1,
|
|
groups.id
|
|
FROM
|
|
groups
|
|
`
|
|
|
|
type InsertUserGroupsByNameParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
GroupNames []string `db:"group_names" json:"group_names"`
|
|
}
|
|
|
|
// InsertUserGroupsByName adds a user to all provided groups, if they exist.
|
|
func (q *sqlQuerier) InsertUserGroupsByName(ctx context.Context, arg InsertUserGroupsByNameParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertUserGroupsByName, arg.UserID, arg.OrganizationID, pq.Array(arg.GroupNames))
|
|
return err
|
|
}
|
|
|
|
const removeUserFromAllGroups = `-- name: RemoveUserFromAllGroups :exec
|
|
DELETE FROM
|
|
group_members
|
|
WHERE
|
|
user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) RemoveUserFromAllGroups(ctx context.Context, userID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, removeUserFromAllGroups, userID)
|
|
return err
|
|
}
|
|
|
|
const deleteGroupByID = `-- name: DeleteGroupByID :exec
|
|
DELETE FROM
|
|
groups
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteGroupByID(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteGroupByID, id)
|
|
return err
|
|
}
|
|
|
|
const getGroupByID = `-- name: GetGroupByID :one
|
|
SELECT
|
|
id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
FROM
|
|
groups
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetGroupByID(ctx context.Context, id uuid.UUID) (Group, error) {
|
|
row := q.db.QueryRowContext(ctx, getGroupByID, id)
|
|
var i Group
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getGroupByOrgAndName = `-- name: GetGroupByOrgAndName :one
|
|
SELECT
|
|
id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
FROM
|
|
groups
|
|
WHERE
|
|
organization_id = $1
|
|
AND
|
|
name = $2
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetGroupByOrgAndNameParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetGroupByOrgAndName(ctx context.Context, arg GetGroupByOrgAndNameParams) (Group, error) {
|
|
row := q.db.QueryRowContext(ctx, getGroupByOrgAndName, arg.OrganizationID, arg.Name)
|
|
var i Group
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getGroups = `-- name: GetGroups :many
|
|
SELECT
|
|
groups.id, groups.name, groups.organization_id, groups.avatar_url, groups.quota_allowance, groups.display_name, groups.source,
|
|
organizations.name AS organization_name,
|
|
organizations.display_name AS organization_display_name
|
|
FROM
|
|
groups
|
|
INNER JOIN
|
|
organizations ON groups.organization_id = organizations.id
|
|
WHERE
|
|
true
|
|
AND CASE
|
|
WHEN $1:: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
groups.organization_id = $1
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
-- Filter to only include groups a user is a member of
|
|
WHEN $2::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
EXISTS (
|
|
SELECT
|
|
1
|
|
FROM
|
|
-- this view handles the 'everyone' group in orgs.
|
|
group_members_expanded
|
|
WHERE
|
|
group_members_expanded.group_id = groups.id
|
|
AND
|
|
group_members_expanded.user_id = $2
|
|
)
|
|
ELSE true
|
|
END
|
|
`
|
|
|
|
type GetGroupsParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
HasMemberID uuid.UUID `db:"has_member_id" json:"has_member_id"`
|
|
}
|
|
|
|
type GetGroupsRow struct {
|
|
Group Group `db:"group" json:"group"`
|
|
OrganizationName string `db:"organization_name" json:"organization_name"`
|
|
OrganizationDisplayName string `db:"organization_display_name" json:"organization_display_name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetGroups(ctx context.Context, arg GetGroupsParams) ([]GetGroupsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getGroups, arg.OrganizationID, arg.HasMemberID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetGroupsRow
|
|
for rows.Next() {
|
|
var i GetGroupsRow
|
|
if err := rows.Scan(
|
|
&i.Group.ID,
|
|
&i.Group.Name,
|
|
&i.Group.OrganizationID,
|
|
&i.Group.AvatarURL,
|
|
&i.Group.QuotaAllowance,
|
|
&i.Group.DisplayName,
|
|
&i.Group.Source,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertAllUsersGroup = `-- name: InsertAllUsersGroup :one
|
|
INSERT INTO groups (
|
|
id,
|
|
name,
|
|
organization_id
|
|
)
|
|
VALUES
|
|
($1, 'Everyone', $1) RETURNING id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
`
|
|
|
|
// We use the organization_id as the id
|
|
// for simplicity since all users is
|
|
// every member of the org.
|
|
func (q *sqlQuerier) InsertAllUsersGroup(ctx context.Context, organizationID uuid.UUID) (Group, error) {
|
|
row := q.db.QueryRowContext(ctx, insertAllUsersGroup, organizationID)
|
|
var i Group
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertGroup = `-- name: InsertGroup :one
|
|
INSERT INTO groups (
|
|
id,
|
|
name,
|
|
display_name,
|
|
organization_id,
|
|
avatar_url,
|
|
quota_allowance
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6) RETURNING id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
`
|
|
|
|
type InsertGroupParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
QuotaAllowance int32 `db:"quota_allowance" json:"quota_allowance"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertGroup(ctx context.Context, arg InsertGroupParams) (Group, error) {
|
|
row := q.db.QueryRowContext(ctx, insertGroup,
|
|
arg.ID,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.OrganizationID,
|
|
arg.AvatarURL,
|
|
arg.QuotaAllowance,
|
|
)
|
|
var i Group
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertMissingGroups = `-- name: InsertMissingGroups :many
|
|
INSERT INTO groups (
|
|
id,
|
|
name,
|
|
organization_id,
|
|
source
|
|
)
|
|
SELECT
|
|
gen_random_uuid(),
|
|
group_name,
|
|
$1,
|
|
$2
|
|
FROM
|
|
UNNEST($3 :: text[]) AS group_name
|
|
ON CONFLICT DO NOTHING
|
|
RETURNING id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
`
|
|
|
|
type InsertMissingGroupsParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Source GroupSource `db:"source" json:"source"`
|
|
GroupNames []string `db:"group_names" json:"group_names"`
|
|
}
|
|
|
|
// Inserts any group by name that does not exist. All new groups are given
|
|
// a random uuid, are inserted into the same organization. They have the default
|
|
// values for avatar, display name, and quota allowance (all zero values).
|
|
// If the name conflicts, do nothing.
|
|
func (q *sqlQuerier) InsertMissingGroups(ctx context.Context, arg InsertMissingGroupsParams) ([]Group, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertMissingGroups, arg.OrganizationID, arg.Source, pq.Array(arg.GroupNames))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Group
|
|
for rows.Next() {
|
|
var i Group
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateGroupByID = `-- name: UpdateGroupByID :one
|
|
UPDATE
|
|
groups
|
|
SET
|
|
name = $1,
|
|
display_name = $2,
|
|
avatar_url = $3,
|
|
quota_allowance = $4
|
|
WHERE
|
|
id = $5
|
|
RETURNING id, name, organization_id, avatar_url, quota_allowance, display_name, source
|
|
`
|
|
|
|
type UpdateGroupByIDParams struct {
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
QuotaAllowance int32 `db:"quota_allowance" json:"quota_allowance"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateGroupByID(ctx context.Context, arg UpdateGroupByIDParams) (Group, error) {
|
|
row := q.db.QueryRowContext(ctx, updateGroupByID,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.AvatarURL,
|
|
arg.QuotaAllowance,
|
|
arg.ID,
|
|
)
|
|
var i Group
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.OrganizationID,
|
|
&i.AvatarURL,
|
|
&i.QuotaAllowance,
|
|
&i.DisplayName,
|
|
&i.Source,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateAppInsights = `-- name: GetTemplateAppInsights :many
|
|
WITH
|
|
-- Create a list of all unique apps by template, this is used to
|
|
-- filter out irrelevant template usage stats.
|
|
apps AS (
|
|
SELECT DISTINCT ON (ws.template_id, app.slug)
|
|
ws.template_id,
|
|
app.slug,
|
|
app.display_name,
|
|
app.icon
|
|
FROM
|
|
workspaces ws
|
|
JOIN
|
|
workspace_builds AS build
|
|
ON
|
|
build.workspace_id = ws.id
|
|
JOIN
|
|
workspace_resources AS resource
|
|
ON
|
|
resource.job_id = build.job_id
|
|
JOIN
|
|
workspace_agents AS agent
|
|
ON
|
|
agent.resource_id = resource.id
|
|
JOIN
|
|
workspace_apps AS app
|
|
ON
|
|
app.agent_id = agent.id
|
|
WHERE
|
|
-- Partial query parameter filter.
|
|
CASE WHEN COALESCE(array_length($1::uuid[], 1), 0) > 0 THEN ws.template_id = ANY($1::uuid[]) ELSE TRUE END
|
|
ORDER BY
|
|
ws.template_id, app.slug, app.created_at DESC
|
|
),
|
|
-- Join apps and template usage stats to filter out irrelevant rows.
|
|
-- Note that this way of joining will eliminate all data-points that
|
|
-- aren't for "real" apps. That means ports are ignored (even though
|
|
-- they're part of the dataset), as well as are "[terminal]" entries
|
|
-- which are alternate datapoints for reconnecting pty usage.
|
|
template_usage_stats_with_apps AS (
|
|
SELECT
|
|
tus.start_time,
|
|
tus.template_id,
|
|
tus.user_id,
|
|
apps.slug,
|
|
apps.display_name,
|
|
apps.icon,
|
|
(tus.app_usage_mins -> apps.slug)::smallint AS usage_mins
|
|
FROM
|
|
apps
|
|
JOIN
|
|
template_usage_stats AS tus
|
|
ON
|
|
-- Query parameter filter.
|
|
tus.start_time >= $2::timestamptz
|
|
AND tus.end_time <= $3::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($1::uuid[], 1), 0) > 0 THEN tus.template_id = ANY($1::uuid[]) ELSE TRUE END
|
|
-- Primary join condition.
|
|
AND tus.template_id = apps.template_id
|
|
AND tus.app_usage_mins ? apps.slug -- Key exists in object.
|
|
),
|
|
-- Group the app insights by interval, user and unique app. This
|
|
-- allows us to deduplicate a user using the same app across
|
|
-- multiple templates.
|
|
app_insights AS (
|
|
SELECT
|
|
user_id,
|
|
slug,
|
|
display_name,
|
|
icon,
|
|
-- See motivation in GetTemplateInsights for LEAST(SUM(n), 30).
|
|
LEAST(SUM(usage_mins), 30) AS usage_mins
|
|
FROM
|
|
template_usage_stats_with_apps
|
|
GROUP BY
|
|
start_time, user_id, slug, display_name, icon
|
|
),
|
|
-- Analyze the users unique app usage across all templates. Count
|
|
-- usage across consecutive intervals as continuous usage.
|
|
times_used AS (
|
|
SELECT DISTINCT ON (user_id, slug, display_name, icon, uniq)
|
|
slug,
|
|
display_name,
|
|
icon,
|
|
-- Turn start_time into a unique identifier that identifies a users
|
|
-- continuous app usage. The value of uniq is otherwise garbage.
|
|
--
|
|
-- Since we're aggregating per user app usage across templates,
|
|
-- there can be duplicate start_times. To handle this, we use the
|
|
-- dense_rank() function, otherwise row_number() would suffice.
|
|
start_time - (
|
|
dense_rank() OVER (
|
|
PARTITION BY
|
|
user_id, slug, display_name, icon
|
|
ORDER BY
|
|
start_time
|
|
) * '30 minutes'::interval
|
|
) AS uniq
|
|
FROM
|
|
template_usage_stats_with_apps
|
|
),
|
|
-- Even though we allow identical apps to be aggregated across
|
|
-- templates, we still want to be able to report which templates
|
|
-- the data comes from.
|
|
templates AS (
|
|
SELECT
|
|
slug,
|
|
display_name,
|
|
icon,
|
|
array_agg(DISTINCT template_id)::uuid[] AS template_ids
|
|
FROM
|
|
template_usage_stats_with_apps
|
|
GROUP BY
|
|
slug, display_name, icon
|
|
)
|
|
|
|
SELECT
|
|
t.template_ids,
|
|
COUNT(DISTINCT ai.user_id) AS active_users,
|
|
ai.slug,
|
|
ai.display_name,
|
|
ai.icon,
|
|
(SUM(ai.usage_mins) * 60)::bigint AS usage_seconds,
|
|
COALESCE((
|
|
SELECT
|
|
COUNT(*)
|
|
FROM
|
|
times_used
|
|
WHERE
|
|
times_used.slug = ai.slug
|
|
AND times_used.display_name = ai.display_name
|
|
AND times_used.icon = ai.icon
|
|
), 0)::bigint AS times_used
|
|
FROM
|
|
app_insights AS ai
|
|
JOIN
|
|
templates AS t
|
|
ON
|
|
t.slug = ai.slug
|
|
AND t.display_name = ai.display_name
|
|
AND t.icon = ai.icon
|
|
GROUP BY
|
|
t.template_ids, ai.slug, ai.display_name, ai.icon
|
|
`
|
|
|
|
type GetTemplateAppInsightsParams struct {
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
}
|
|
|
|
type GetTemplateAppInsightsRow struct {
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
ActiveUsers int64 `db:"active_users" json:"active_users"`
|
|
Slug string `db:"slug" json:"slug"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
UsageSeconds int64 `db:"usage_seconds" json:"usage_seconds"`
|
|
TimesUsed int64 `db:"times_used" json:"times_used"`
|
|
}
|
|
|
|
// GetTemplateAppInsights returns the aggregate usage of each app in a given
|
|
// timeframe. The result can be filtered on template_ids, meaning only user data
|
|
// from workspaces based on those templates will be included.
|
|
func (q *sqlQuerier) GetTemplateAppInsights(ctx context.Context, arg GetTemplateAppInsightsParams) ([]GetTemplateAppInsightsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateAppInsights, pq.Array(arg.TemplateIDs), arg.StartTime, arg.EndTime)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateAppInsightsRow
|
|
for rows.Next() {
|
|
var i GetTemplateAppInsightsRow
|
|
if err := rows.Scan(
|
|
pq.Array(&i.TemplateIDs),
|
|
&i.ActiveUsers,
|
|
&i.Slug,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.UsageSeconds,
|
|
&i.TimesUsed,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateAppInsightsByTemplate = `-- name: GetTemplateAppInsightsByTemplate :many
|
|
WITH
|
|
-- This CTE is used to explode app usage into minute buckets, then
|
|
-- flatten the users app usage within the template so that usage in
|
|
-- multiple workspaces under one template is only counted once for
|
|
-- every minute.
|
|
app_insights AS (
|
|
SELECT
|
|
w.template_id,
|
|
was.user_id,
|
|
-- Both app stats and agent stats track web terminal usage, but
|
|
-- by different means. The app stats value should be more
|
|
-- accurate so we don't want to discard it just yet.
|
|
CASE
|
|
WHEN was.access_method = 'terminal'
|
|
THEN '[terminal]' -- Unique name, app names can't contain brackets.
|
|
ELSE was.slug_or_port
|
|
END::text AS app_name,
|
|
COALESCE(wa.display_name, '') AS display_name,
|
|
(wa.slug IS NOT NULL)::boolean AS is_app,
|
|
COUNT(DISTINCT s.minute_bucket) AS app_minutes
|
|
FROM
|
|
workspace_app_stats AS was
|
|
JOIN
|
|
workspaces AS w
|
|
ON
|
|
w.id = was.workspace_id
|
|
-- We do a left join here because we want to include user IDs that have used
|
|
-- e.g. ports when counting active users.
|
|
LEFT JOIN
|
|
workspace_apps wa
|
|
ON
|
|
wa.agent_id = was.agent_id
|
|
AND wa.slug = was.slug_or_port
|
|
-- Generate a series of minute buckets for each session for computing the
|
|
-- mintes/bucket.
|
|
CROSS JOIN
|
|
generate_series(
|
|
date_trunc('minute', was.session_started_at),
|
|
-- Subtract 1 μs to avoid creating an extra series.
|
|
date_trunc('minute', was.session_ended_at - '1 microsecond'::interval),
|
|
'1 minute'::interval
|
|
) AS s(minute_bucket)
|
|
WHERE
|
|
s.minute_bucket >= $1::timestamptz
|
|
AND s.minute_bucket < $2::timestamptz
|
|
GROUP BY
|
|
w.template_id, was.user_id, was.access_method, was.slug_or_port, wa.display_name, wa.slug
|
|
)
|
|
|
|
SELECT
|
|
template_id,
|
|
app_name AS slug_or_port,
|
|
display_name AS display_name,
|
|
COUNT(DISTINCT user_id)::bigint AS active_users,
|
|
(SUM(app_minutes) * 60)::bigint AS usage_seconds
|
|
FROM
|
|
app_insights
|
|
WHERE
|
|
is_app IS TRUE
|
|
GROUP BY
|
|
template_id, slug_or_port, display_name
|
|
`
|
|
|
|
type GetTemplateAppInsightsByTemplateParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
}
|
|
|
|
type GetTemplateAppInsightsByTemplateRow struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
SlugOrPort string `db:"slug_or_port" json:"slug_or_port"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
ActiveUsers int64 `db:"active_users" json:"active_users"`
|
|
UsageSeconds int64 `db:"usage_seconds" json:"usage_seconds"`
|
|
}
|
|
|
|
// GetTemplateAppInsightsByTemplate is used for Prometheus metrics. Keep
|
|
// in sync with GetTemplateAppInsights and UpsertTemplateUsageStats.
|
|
func (q *sqlQuerier) GetTemplateAppInsightsByTemplate(ctx context.Context, arg GetTemplateAppInsightsByTemplateParams) ([]GetTemplateAppInsightsByTemplateRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateAppInsightsByTemplate, arg.StartTime, arg.EndTime)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateAppInsightsByTemplateRow
|
|
for rows.Next() {
|
|
var i GetTemplateAppInsightsByTemplateRow
|
|
if err := rows.Scan(
|
|
&i.TemplateID,
|
|
&i.SlugOrPort,
|
|
&i.DisplayName,
|
|
&i.ActiveUsers,
|
|
&i.UsageSeconds,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateInsights = `-- name: GetTemplateInsights :one
|
|
WITH
|
|
insights AS (
|
|
SELECT
|
|
user_id,
|
|
-- See motivation in GetTemplateInsights for LEAST(SUM(n), 30).
|
|
LEAST(SUM(usage_mins), 30) AS usage_mins,
|
|
LEAST(SUM(ssh_mins), 30) AS ssh_mins,
|
|
LEAST(SUM(sftp_mins), 30) AS sftp_mins,
|
|
LEAST(SUM(reconnecting_pty_mins), 30) AS reconnecting_pty_mins,
|
|
LEAST(SUM(vscode_mins), 30) AS vscode_mins,
|
|
LEAST(SUM(jetbrains_mins), 30) AS jetbrains_mins
|
|
FROM
|
|
template_usage_stats
|
|
WHERE
|
|
start_time >= $1::timestamptz
|
|
AND end_time <= $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
GROUP BY
|
|
start_time, user_id
|
|
),
|
|
templates AS (
|
|
SELECT
|
|
array_agg(DISTINCT template_id) AS template_ids,
|
|
array_agg(DISTINCT template_id) FILTER (WHERE ssh_mins > 0) AS ssh_template_ids,
|
|
array_agg(DISTINCT template_id) FILTER (WHERE sftp_mins > 0) AS sftp_template_ids,
|
|
array_agg(DISTINCT template_id) FILTER (WHERE reconnecting_pty_mins > 0) AS reconnecting_pty_template_ids,
|
|
array_agg(DISTINCT template_id) FILTER (WHERE vscode_mins > 0) AS vscode_template_ids,
|
|
array_agg(DISTINCT template_id) FILTER (WHERE jetbrains_mins > 0) AS jetbrains_template_ids
|
|
FROM
|
|
template_usage_stats
|
|
WHERE
|
|
start_time >= $1::timestamptz
|
|
AND end_time <= $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
)
|
|
|
|
SELECT
|
|
COALESCE((SELECT template_ids FROM templates), '{}')::uuid[] AS template_ids, -- Includes app usage.
|
|
COALESCE((SELECT ssh_template_ids FROM templates), '{}')::uuid[] AS ssh_template_ids,
|
|
COALESCE((SELECT sftp_template_ids FROM templates), '{}')::uuid[] AS sftp_template_ids,
|
|
COALESCE((SELECT reconnecting_pty_template_ids FROM templates), '{}')::uuid[] AS reconnecting_pty_template_ids,
|
|
COALESCE((SELECT vscode_template_ids FROM templates), '{}')::uuid[] AS vscode_template_ids,
|
|
COALESCE((SELECT jetbrains_template_ids FROM templates), '{}')::uuid[] AS jetbrains_template_ids,
|
|
COALESCE(COUNT(DISTINCT user_id), 0)::bigint AS active_users, -- Includes app usage.
|
|
COALESCE(SUM(usage_mins) * 60, 0)::bigint AS usage_total_seconds, -- Includes app usage.
|
|
COALESCE(SUM(ssh_mins) * 60, 0)::bigint AS usage_ssh_seconds,
|
|
COALESCE(SUM(sftp_mins) * 60, 0)::bigint AS usage_sftp_seconds,
|
|
COALESCE(SUM(reconnecting_pty_mins) * 60, 0)::bigint AS usage_reconnecting_pty_seconds,
|
|
COALESCE(SUM(vscode_mins) * 60, 0)::bigint AS usage_vscode_seconds,
|
|
COALESCE(SUM(jetbrains_mins) * 60, 0)::bigint AS usage_jetbrains_seconds
|
|
FROM
|
|
insights
|
|
`
|
|
|
|
type GetTemplateInsightsParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
}
|
|
|
|
type GetTemplateInsightsRow struct {
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
SshTemplateIds []uuid.UUID `db:"ssh_template_ids" json:"ssh_template_ids"`
|
|
SftpTemplateIds []uuid.UUID `db:"sftp_template_ids" json:"sftp_template_ids"`
|
|
ReconnectingPtyTemplateIds []uuid.UUID `db:"reconnecting_pty_template_ids" json:"reconnecting_pty_template_ids"`
|
|
VscodeTemplateIds []uuid.UUID `db:"vscode_template_ids" json:"vscode_template_ids"`
|
|
JetbrainsTemplateIds []uuid.UUID `db:"jetbrains_template_ids" json:"jetbrains_template_ids"`
|
|
ActiveUsers int64 `db:"active_users" json:"active_users"`
|
|
UsageTotalSeconds int64 `db:"usage_total_seconds" json:"usage_total_seconds"`
|
|
UsageSshSeconds int64 `db:"usage_ssh_seconds" json:"usage_ssh_seconds"`
|
|
UsageSftpSeconds int64 `db:"usage_sftp_seconds" json:"usage_sftp_seconds"`
|
|
UsageReconnectingPtySeconds int64 `db:"usage_reconnecting_pty_seconds" json:"usage_reconnecting_pty_seconds"`
|
|
UsageVscodeSeconds int64 `db:"usage_vscode_seconds" json:"usage_vscode_seconds"`
|
|
UsageJetbrainsSeconds int64 `db:"usage_jetbrains_seconds" json:"usage_jetbrains_seconds"`
|
|
}
|
|
|
|
// GetTemplateInsights returns the aggregate user-produced usage of all
|
|
// workspaces in a given timeframe. The template IDs, active users, and
|
|
// usage_seconds all reflect any usage in the template, including apps.
|
|
//
|
|
// When combining data from multiple templates, we must make a guess at
|
|
// how the user behaved for the 30 minute interval. In this case we make
|
|
// the assumption that if the user used two workspaces for 15 minutes,
|
|
// they did so sequentially, thus we sum the usage up to a maximum of
|
|
// 30 minutes with LEAST(SUM(n), 30).
|
|
func (q *sqlQuerier) GetTemplateInsights(ctx context.Context, arg GetTemplateInsightsParams) (GetTemplateInsightsRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateInsights, arg.StartTime, arg.EndTime, pq.Array(arg.TemplateIDs))
|
|
var i GetTemplateInsightsRow
|
|
err := row.Scan(
|
|
pq.Array(&i.TemplateIDs),
|
|
pq.Array(&i.SshTemplateIds),
|
|
pq.Array(&i.SftpTemplateIds),
|
|
pq.Array(&i.ReconnectingPtyTemplateIds),
|
|
pq.Array(&i.VscodeTemplateIds),
|
|
pq.Array(&i.JetbrainsTemplateIds),
|
|
&i.ActiveUsers,
|
|
&i.UsageTotalSeconds,
|
|
&i.UsageSshSeconds,
|
|
&i.UsageSftpSeconds,
|
|
&i.UsageReconnectingPtySeconds,
|
|
&i.UsageVscodeSeconds,
|
|
&i.UsageJetbrainsSeconds,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateInsightsByInterval = `-- name: GetTemplateInsightsByInterval :many
|
|
WITH
|
|
ts AS (
|
|
SELECT
|
|
d::timestamptz AS from_,
|
|
LEAST(
|
|
(d::timestamptz + ($2::int || ' day')::interval)::timestamptz,
|
|
$3::timestamptz
|
|
)::timestamptz AS to_
|
|
FROM
|
|
generate_series(
|
|
$4::timestamptz,
|
|
-- Subtract 1 μs to avoid creating an extra series.
|
|
($3::timestamptz) - '1 microsecond'::interval,
|
|
($2::int || ' day')::interval
|
|
) AS d
|
|
)
|
|
|
|
SELECT
|
|
ts.from_ AS start_time,
|
|
ts.to_ AS end_time,
|
|
array_remove(array_agg(DISTINCT tus.template_id), NULL)::uuid[] AS template_ids,
|
|
COUNT(DISTINCT tus.user_id) AS active_users
|
|
FROM
|
|
ts
|
|
LEFT JOIN
|
|
template_usage_stats AS tus
|
|
ON
|
|
tus.start_time >= ts.from_
|
|
AND tus.start_time < ts.to_ -- End time exclusion criteria optimization for index.
|
|
AND tus.end_time <= ts.to_
|
|
AND CASE WHEN COALESCE(array_length($1::uuid[], 1), 0) > 0 THEN tus.template_id = ANY($1::uuid[]) ELSE TRUE END
|
|
GROUP BY
|
|
ts.from_, ts.to_
|
|
`
|
|
|
|
type GetTemplateInsightsByIntervalParams struct {
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
IntervalDays int32 `db:"interval_days" json:"interval_days"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
}
|
|
|
|
type GetTemplateInsightsByIntervalRow struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
ActiveUsers int64 `db:"active_users" json:"active_users"`
|
|
}
|
|
|
|
// GetTemplateInsightsByInterval returns all intervals between start and end
|
|
// time, if end time is a partial interval, it will be included in the results and
|
|
// that interval will be shorter than a full one. If there is no data for a selected
|
|
// interval/template, it will be included in the results with 0 active users.
|
|
func (q *sqlQuerier) GetTemplateInsightsByInterval(ctx context.Context, arg GetTemplateInsightsByIntervalParams) ([]GetTemplateInsightsByIntervalRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateInsightsByInterval,
|
|
pq.Array(arg.TemplateIDs),
|
|
arg.IntervalDays,
|
|
arg.EndTime,
|
|
arg.StartTime,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateInsightsByIntervalRow
|
|
for rows.Next() {
|
|
var i GetTemplateInsightsByIntervalRow
|
|
if err := rows.Scan(
|
|
&i.StartTime,
|
|
&i.EndTime,
|
|
pq.Array(&i.TemplateIDs),
|
|
&i.ActiveUsers,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateInsightsByTemplate = `-- name: GetTemplateInsightsByTemplate :many
|
|
WITH
|
|
-- This CTE is used to truncate agent usage into minute buckets, then
|
|
-- flatten the users agent usage within the template so that usage in
|
|
-- multiple workspaces under one template is only counted once for
|
|
-- every minute (per user).
|
|
insights AS (
|
|
SELECT
|
|
template_id,
|
|
user_id,
|
|
COUNT(DISTINCT CASE WHEN session_count_ssh > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS ssh_mins,
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
-- COUNT(DISTINCT CASE WHEN session_count_sftp > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS sftp_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_reconnecting_pty > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS reconnecting_pty_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_vscode > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS vscode_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_jetbrains > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS jetbrains_mins,
|
|
-- NOTE(mafredri): The agent stats are currently very unreliable, and
|
|
-- sometimes the connections are missing, even during active sessions.
|
|
-- Since we can't fully rely on this, we check for "any connection
|
|
-- within this bucket". A better solution here would be preferable.
|
|
MAX(connection_count) > 0 AS has_connection
|
|
FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
created_at >= $1::timestamptz
|
|
AND created_at < $2::timestamptz
|
|
-- Inclusion criteria to filter out empty results.
|
|
AND (
|
|
session_count_ssh > 0
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
-- OR session_count_sftp > 0
|
|
OR session_count_reconnecting_pty > 0
|
|
OR session_count_vscode > 0
|
|
OR session_count_jetbrains > 0
|
|
)
|
|
GROUP BY
|
|
template_id, user_id
|
|
)
|
|
|
|
SELECT
|
|
template_id,
|
|
COUNT(DISTINCT user_id)::bigint AS active_users,
|
|
(SUM(vscode_mins) * 60)::bigint AS usage_vscode_seconds,
|
|
(SUM(jetbrains_mins) * 60)::bigint AS usage_jetbrains_seconds,
|
|
(SUM(reconnecting_pty_mins) * 60)::bigint AS usage_reconnecting_pty_seconds,
|
|
(SUM(ssh_mins) * 60)::bigint AS usage_ssh_seconds
|
|
FROM
|
|
insights
|
|
WHERE
|
|
has_connection
|
|
GROUP BY
|
|
template_id
|
|
`
|
|
|
|
type GetTemplateInsightsByTemplateParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
}
|
|
|
|
type GetTemplateInsightsByTemplateRow struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
ActiveUsers int64 `db:"active_users" json:"active_users"`
|
|
UsageVscodeSeconds int64 `db:"usage_vscode_seconds" json:"usage_vscode_seconds"`
|
|
UsageJetbrainsSeconds int64 `db:"usage_jetbrains_seconds" json:"usage_jetbrains_seconds"`
|
|
UsageReconnectingPtySeconds int64 `db:"usage_reconnecting_pty_seconds" json:"usage_reconnecting_pty_seconds"`
|
|
UsageSshSeconds int64 `db:"usage_ssh_seconds" json:"usage_ssh_seconds"`
|
|
}
|
|
|
|
// GetTemplateInsightsByTemplate is used for Prometheus metrics. Keep
|
|
// in sync with GetTemplateInsights and UpsertTemplateUsageStats.
|
|
func (q *sqlQuerier) GetTemplateInsightsByTemplate(ctx context.Context, arg GetTemplateInsightsByTemplateParams) ([]GetTemplateInsightsByTemplateRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateInsightsByTemplate, arg.StartTime, arg.EndTime)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateInsightsByTemplateRow
|
|
for rows.Next() {
|
|
var i GetTemplateInsightsByTemplateRow
|
|
if err := rows.Scan(
|
|
&i.TemplateID,
|
|
&i.ActiveUsers,
|
|
&i.UsageVscodeSeconds,
|
|
&i.UsageJetbrainsSeconds,
|
|
&i.UsageReconnectingPtySeconds,
|
|
&i.UsageSshSeconds,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateParameterInsights = `-- name: GetTemplateParameterInsights :many
|
|
WITH latest_workspace_builds AS (
|
|
SELECT
|
|
wb.id,
|
|
wbmax.template_id,
|
|
wb.template_version_id
|
|
FROM (
|
|
SELECT
|
|
tv.template_id, wbmax.workspace_id, MAX(wbmax.build_number) as max_build_number
|
|
FROM workspace_builds wbmax
|
|
JOIN template_versions tv ON (tv.id = wbmax.template_version_id)
|
|
WHERE
|
|
wbmax.created_at >= $1::timestamptz
|
|
AND wbmax.created_at < $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN tv.template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
GROUP BY tv.template_id, wbmax.workspace_id
|
|
) wbmax
|
|
JOIN workspace_builds wb ON (
|
|
wb.workspace_id = wbmax.workspace_id
|
|
AND wb.build_number = wbmax.max_build_number
|
|
)
|
|
), unique_template_params AS (
|
|
SELECT
|
|
ROW_NUMBER() OVER () AS num,
|
|
array_agg(DISTINCT wb.template_id)::uuid[] AS template_ids,
|
|
array_agg(wb.id)::uuid[] AS workspace_build_ids,
|
|
tvp.name,
|
|
tvp.type,
|
|
tvp.display_name,
|
|
tvp.description,
|
|
tvp.options
|
|
FROM latest_workspace_builds wb
|
|
JOIN template_version_parameters tvp ON (tvp.template_version_id = wb.template_version_id)
|
|
GROUP BY tvp.name, tvp.type, tvp.display_name, tvp.description, tvp.options
|
|
)
|
|
|
|
SELECT
|
|
utp.num,
|
|
utp.template_ids,
|
|
utp.name,
|
|
utp.type,
|
|
utp.display_name,
|
|
utp.description,
|
|
utp.options,
|
|
wbp.value,
|
|
COUNT(wbp.value) AS count
|
|
FROM unique_template_params utp
|
|
JOIN workspace_build_parameters wbp ON (utp.workspace_build_ids @> ARRAY[wbp.workspace_build_id] AND utp.name = wbp.name)
|
|
GROUP BY utp.num, utp.template_ids, utp.name, utp.type, utp.display_name, utp.description, utp.options, wbp.value
|
|
`
|
|
|
|
type GetTemplateParameterInsightsParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
}
|
|
|
|
type GetTemplateParameterInsightsRow struct {
|
|
Num int64 `db:"num" json:"num"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
Name string `db:"name" json:"name"`
|
|
Type string `db:"type" json:"type"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Description string `db:"description" json:"description"`
|
|
Options json.RawMessage `db:"options" json:"options"`
|
|
Value string `db:"value" json:"value"`
|
|
Count int64 `db:"count" json:"count"`
|
|
}
|
|
|
|
// GetTemplateParameterInsights does for each template in a given timeframe,
|
|
// look for the latest workspace build (for every workspace) that has been
|
|
// created in the timeframe and return the aggregate usage counts of parameter
|
|
// values.
|
|
func (q *sqlQuerier) GetTemplateParameterInsights(ctx context.Context, arg GetTemplateParameterInsightsParams) ([]GetTemplateParameterInsightsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateParameterInsights, arg.StartTime, arg.EndTime, pq.Array(arg.TemplateIDs))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateParameterInsightsRow
|
|
for rows.Next() {
|
|
var i GetTemplateParameterInsightsRow
|
|
if err := rows.Scan(
|
|
&i.Num,
|
|
pq.Array(&i.TemplateIDs),
|
|
&i.Name,
|
|
&i.Type,
|
|
&i.DisplayName,
|
|
&i.Description,
|
|
&i.Options,
|
|
&i.Value,
|
|
&i.Count,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateUsageStats = `-- name: GetTemplateUsageStats :many
|
|
SELECT
|
|
start_time, end_time, template_id, user_id, median_latency_ms, usage_mins, ssh_mins, sftp_mins, reconnecting_pty_mins, vscode_mins, jetbrains_mins, app_usage_mins
|
|
FROM
|
|
template_usage_stats
|
|
WHERE
|
|
start_time >= $1::timestamptz
|
|
AND end_time <= $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
`
|
|
|
|
type GetTemplateUsageStatsParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateUsageStats(ctx context.Context, arg GetTemplateUsageStatsParams) ([]TemplateUsageStat, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateUsageStats, arg.StartTime, arg.EndTime, pq.Array(arg.TemplateIDs))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateUsageStat
|
|
for rows.Next() {
|
|
var i TemplateUsageStat
|
|
if err := rows.Scan(
|
|
&i.StartTime,
|
|
&i.EndTime,
|
|
&i.TemplateID,
|
|
&i.UserID,
|
|
&i.MedianLatencyMs,
|
|
&i.UsageMins,
|
|
&i.SshMins,
|
|
&i.SftpMins,
|
|
&i.ReconnectingPtyMins,
|
|
&i.VscodeMins,
|
|
&i.JetbrainsMins,
|
|
&i.AppUsageMins,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getUserActivityInsights = `-- name: GetUserActivityInsights :many
|
|
WITH
|
|
deployment_stats AS (
|
|
SELECT
|
|
start_time,
|
|
user_id,
|
|
array_agg(template_id) AS template_ids,
|
|
-- See motivation in GetTemplateInsights for LEAST(SUM(n), 30).
|
|
LEAST(SUM(usage_mins), 30) AS usage_mins
|
|
FROM
|
|
template_usage_stats
|
|
WHERE
|
|
start_time >= $1::timestamptz
|
|
AND end_time <= $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
GROUP BY
|
|
start_time, user_id
|
|
),
|
|
template_ids AS (
|
|
SELECT
|
|
user_id,
|
|
array_agg(DISTINCT template_id) AS ids
|
|
FROM
|
|
deployment_stats, unnest(template_ids) template_id
|
|
GROUP BY
|
|
user_id
|
|
)
|
|
|
|
SELECT
|
|
ds.user_id,
|
|
u.username,
|
|
u.avatar_url,
|
|
t.ids::uuid[] AS template_ids,
|
|
(SUM(ds.usage_mins) * 60)::bigint AS usage_seconds
|
|
FROM
|
|
deployment_stats ds
|
|
JOIN
|
|
users u
|
|
ON
|
|
u.id = ds.user_id
|
|
JOIN
|
|
template_ids t
|
|
ON
|
|
ds.user_id = t.user_id
|
|
GROUP BY
|
|
ds.user_id, u.username, u.avatar_url, t.ids
|
|
ORDER BY
|
|
ds.user_id ASC
|
|
`
|
|
|
|
type GetUserActivityInsightsParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
}
|
|
|
|
type GetUserActivityInsightsRow struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Username string `db:"username" json:"username"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
UsageSeconds int64 `db:"usage_seconds" json:"usage_seconds"`
|
|
}
|
|
|
|
// GetUserActivityInsights returns the ranking with top active users.
|
|
// The result can be filtered on template_ids, meaning only user data
|
|
// from workspaces based on those templates will be included.
|
|
// Note: The usage_seconds and usage_seconds_cumulative differ only when
|
|
// requesting deployment-wide (or multiple template) data. Cumulative
|
|
// produces a bloated value if a user has used multiple templates
|
|
// simultaneously.
|
|
func (q *sqlQuerier) GetUserActivityInsights(ctx context.Context, arg GetUserActivityInsightsParams) ([]GetUserActivityInsightsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUserActivityInsights, arg.StartTime, arg.EndTime, pq.Array(arg.TemplateIDs))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetUserActivityInsightsRow
|
|
for rows.Next() {
|
|
var i GetUserActivityInsightsRow
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.Username,
|
|
&i.AvatarURL,
|
|
pq.Array(&i.TemplateIDs),
|
|
&i.UsageSeconds,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getUserLatencyInsights = `-- name: GetUserLatencyInsights :many
|
|
SELECT
|
|
tus.user_id,
|
|
u.username,
|
|
u.avatar_url,
|
|
array_agg(DISTINCT tus.template_id)::uuid[] AS template_ids,
|
|
COALESCE((PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY tus.median_latency_ms)), -1)::float AS workspace_connection_latency_50,
|
|
COALESCE((PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY tus.median_latency_ms)), -1)::float AS workspace_connection_latency_95
|
|
FROM
|
|
template_usage_stats tus
|
|
JOIN
|
|
users u
|
|
ON
|
|
u.id = tus.user_id
|
|
WHERE
|
|
tus.start_time >= $1::timestamptz
|
|
AND tus.end_time <= $2::timestamptz
|
|
AND CASE WHEN COALESCE(array_length($3::uuid[], 1), 0) > 0 THEN tus.template_id = ANY($3::uuid[]) ELSE TRUE END
|
|
GROUP BY
|
|
tus.user_id, u.username, u.avatar_url
|
|
ORDER BY
|
|
tus.user_id ASC
|
|
`
|
|
|
|
type GetUserLatencyInsightsParams struct {
|
|
StartTime time.Time `db:"start_time" json:"start_time"`
|
|
EndTime time.Time `db:"end_time" json:"end_time"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
}
|
|
|
|
type GetUserLatencyInsightsRow struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Username string `db:"username" json:"username"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
WorkspaceConnectionLatency50 float64 `db:"workspace_connection_latency_50" json:"workspace_connection_latency_50"`
|
|
WorkspaceConnectionLatency95 float64 `db:"workspace_connection_latency_95" json:"workspace_connection_latency_95"`
|
|
}
|
|
|
|
// GetUserLatencyInsights returns the median and 95th percentile connection
|
|
// latency that users have experienced. The result can be filtered on
|
|
// template_ids, meaning only user data from workspaces based on those templates
|
|
// will be included.
|
|
func (q *sqlQuerier) GetUserLatencyInsights(ctx context.Context, arg GetUserLatencyInsightsParams) ([]GetUserLatencyInsightsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUserLatencyInsights, arg.StartTime, arg.EndTime, pq.Array(arg.TemplateIDs))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetUserLatencyInsightsRow
|
|
for rows.Next() {
|
|
var i GetUserLatencyInsightsRow
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.Username,
|
|
&i.AvatarURL,
|
|
pq.Array(&i.TemplateIDs),
|
|
&i.WorkspaceConnectionLatency50,
|
|
&i.WorkspaceConnectionLatency95,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const upsertTemplateUsageStats = `-- name: UpsertTemplateUsageStats :exec
|
|
WITH
|
|
latest_start AS (
|
|
SELECT
|
|
-- Truncate to hour so that we always look at even ranges of data.
|
|
date_trunc('hour', COALESCE(
|
|
MAX(start_time) - '1 hour'::interval,
|
|
-- Fallback when there are no template usage stats yet.
|
|
-- App stats can exist before this, but not agent stats,
|
|
-- limit the lookback to avoid inconsistency.
|
|
(SELECT MIN(created_at) FROM workspace_agent_stats)
|
|
)) AS t
|
|
FROM
|
|
template_usage_stats
|
|
),
|
|
workspace_app_stat_buckets AS (
|
|
SELECT
|
|
-- Truncate the minute to the nearest half hour, this is the bucket size
|
|
-- for the data.
|
|
date_trunc('hour', s.minute_bucket) + trunc(date_part('minute', s.minute_bucket) / 30) * 30 * '1 minute'::interval AS time_bucket,
|
|
w.template_id,
|
|
was.user_id,
|
|
-- Both app stats and agent stats track web terminal usage, but
|
|
-- by different means. The app stats value should be more
|
|
-- accurate so we don't want to discard it just yet.
|
|
CASE
|
|
WHEN was.access_method = 'terminal'
|
|
THEN '[terminal]' -- Unique name, app names can't contain brackets.
|
|
ELSE was.slug_or_port
|
|
END AS app_name,
|
|
COUNT(DISTINCT s.minute_bucket) AS app_minutes,
|
|
-- Store each unique minute bucket for later merge between datasets.
|
|
array_agg(DISTINCT s.minute_bucket) AS minute_buckets
|
|
FROM
|
|
workspace_app_stats AS was
|
|
JOIN
|
|
workspaces AS w
|
|
ON
|
|
w.id = was.workspace_id
|
|
-- Generate a series of minute buckets for each session for computing the
|
|
-- mintes/bucket.
|
|
CROSS JOIN
|
|
generate_series(
|
|
date_trunc('minute', was.session_started_at),
|
|
-- Subtract 1 μs to avoid creating an extra series.
|
|
date_trunc('minute', was.session_ended_at - '1 microsecond'::interval),
|
|
'1 minute'::interval
|
|
) AS s(minute_bucket)
|
|
WHERE
|
|
-- s.minute_bucket >= @start_time::timestamptz
|
|
-- AND s.minute_bucket < @end_time::timestamptz
|
|
s.minute_bucket >= (SELECT t FROM latest_start)
|
|
AND s.minute_bucket < NOW()
|
|
GROUP BY
|
|
time_bucket, w.template_id, was.user_id, was.access_method, was.slug_or_port
|
|
),
|
|
agent_stats_buckets AS (
|
|
SELECT
|
|
-- Truncate the minute to the nearest half hour, this is the bucket size
|
|
-- for the data.
|
|
date_trunc('hour', created_at) + trunc(date_part('minute', created_at) / 30) * 30 * '1 minute'::interval AS time_bucket,
|
|
template_id,
|
|
user_id,
|
|
-- Store each unique minute bucket for later merge between datasets.
|
|
array_agg(
|
|
DISTINCT CASE
|
|
WHEN
|
|
session_count_ssh > 0
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
-- OR session_count_sftp > 0
|
|
OR session_count_reconnecting_pty > 0
|
|
OR session_count_vscode > 0
|
|
OR session_count_jetbrains > 0
|
|
THEN
|
|
date_trunc('minute', created_at)
|
|
ELSE
|
|
NULL
|
|
END
|
|
) AS minute_buckets,
|
|
COUNT(DISTINCT CASE WHEN session_count_ssh > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS ssh_mins,
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
-- COUNT(DISTINCT CASE WHEN session_count_sftp > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS sftp_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_reconnecting_pty > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS reconnecting_pty_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_vscode > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS vscode_mins,
|
|
COUNT(DISTINCT CASE WHEN session_count_jetbrains > 0 THEN date_trunc('minute', created_at) ELSE NULL END) AS jetbrains_mins,
|
|
-- NOTE(mafredri): The agent stats are currently very unreliable, and
|
|
-- sometimes the connections are missing, even during active sessions.
|
|
-- Since we can't fully rely on this, we check for "any connection
|
|
-- during this half-hour". A better solution here would be preferable.
|
|
MAX(connection_count) > 0 AS has_connection
|
|
FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
-- created_at >= @start_time::timestamptz
|
|
-- AND created_at < @end_time::timestamptz
|
|
created_at >= (SELECT t FROM latest_start)
|
|
AND created_at < NOW()
|
|
-- Inclusion criteria to filter out empty results.
|
|
AND (
|
|
session_count_ssh > 0
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
-- OR session_count_sftp > 0
|
|
OR session_count_reconnecting_pty > 0
|
|
OR session_count_vscode > 0
|
|
OR session_count_jetbrains > 0
|
|
)
|
|
GROUP BY
|
|
time_bucket, template_id, user_id
|
|
),
|
|
stats AS (
|
|
SELECT
|
|
stats.time_bucket AS start_time,
|
|
stats.time_bucket + '30 minutes'::interval AS end_time,
|
|
stats.template_id,
|
|
stats.user_id,
|
|
-- Sum/distinct to handle zero/duplicate values due union and to unnest.
|
|
COUNT(DISTINCT minute_bucket) AS usage_mins,
|
|
array_agg(DISTINCT minute_bucket) AS minute_buckets,
|
|
SUM(DISTINCT stats.ssh_mins) AS ssh_mins,
|
|
SUM(DISTINCT stats.sftp_mins) AS sftp_mins,
|
|
SUM(DISTINCT stats.reconnecting_pty_mins) AS reconnecting_pty_mins,
|
|
SUM(DISTINCT stats.vscode_mins) AS vscode_mins,
|
|
SUM(DISTINCT stats.jetbrains_mins) AS jetbrains_mins,
|
|
-- This is what we unnested, re-nest as json.
|
|
jsonb_object_agg(stats.app_name, stats.app_minutes) FILTER (WHERE stats.app_name IS NOT NULL) AS app_usage_mins
|
|
FROM (
|
|
SELECT
|
|
time_bucket,
|
|
template_id,
|
|
user_id,
|
|
0 AS ssh_mins,
|
|
0 AS sftp_mins,
|
|
0 AS reconnecting_pty_mins,
|
|
0 AS vscode_mins,
|
|
0 AS jetbrains_mins,
|
|
app_name,
|
|
app_minutes,
|
|
minute_buckets
|
|
FROM
|
|
workspace_app_stat_buckets
|
|
|
|
UNION ALL
|
|
|
|
SELECT
|
|
time_bucket,
|
|
template_id,
|
|
user_id,
|
|
ssh_mins,
|
|
-- TODO(mafredri): Enable when we have the column.
|
|
0 AS sftp_mins,
|
|
reconnecting_pty_mins,
|
|
vscode_mins,
|
|
jetbrains_mins,
|
|
NULL AS app_name,
|
|
NULL AS app_minutes,
|
|
minute_buckets
|
|
FROM
|
|
agent_stats_buckets
|
|
WHERE
|
|
-- See note in the agent_stats_buckets CTE.
|
|
has_connection
|
|
) AS stats, unnest(minute_buckets) AS minute_bucket
|
|
GROUP BY
|
|
stats.time_bucket, stats.template_id, stats.user_id
|
|
),
|
|
minute_buckets AS (
|
|
-- Create distinct minute buckets for user-activity, so we can filter out
|
|
-- irrelevant latencies.
|
|
SELECT DISTINCT ON (stats.start_time, stats.template_id, stats.user_id, minute_bucket)
|
|
stats.start_time,
|
|
stats.template_id,
|
|
stats.user_id,
|
|
minute_bucket
|
|
FROM
|
|
stats, unnest(minute_buckets) AS minute_bucket
|
|
),
|
|
latencies AS (
|
|
-- Select all non-zero latencies for all the minutes that a user used the
|
|
-- workspace in some way.
|
|
SELECT
|
|
mb.start_time,
|
|
mb.template_id,
|
|
mb.user_id,
|
|
-- TODO(mafredri): We're doing medians on medians here, we may want to
|
|
-- improve upon this at some point.
|
|
PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY was.connection_median_latency_ms)::real AS median_latency_ms
|
|
FROM
|
|
minute_buckets AS mb
|
|
JOIN
|
|
workspace_agent_stats AS was
|
|
ON
|
|
was.created_at >= (SELECT t FROM latest_start)
|
|
AND was.created_at < NOW()
|
|
AND date_trunc('minute', was.created_at) = mb.minute_bucket
|
|
AND was.template_id = mb.template_id
|
|
AND was.user_id = mb.user_id
|
|
AND was.connection_median_latency_ms >= 0
|
|
GROUP BY
|
|
mb.start_time, mb.template_id, mb.user_id
|
|
)
|
|
|
|
INSERT INTO template_usage_stats AS tus (
|
|
start_time,
|
|
end_time,
|
|
template_id,
|
|
user_id,
|
|
usage_mins,
|
|
median_latency_ms,
|
|
ssh_mins,
|
|
sftp_mins,
|
|
reconnecting_pty_mins,
|
|
vscode_mins,
|
|
jetbrains_mins,
|
|
app_usage_mins
|
|
) (
|
|
SELECT
|
|
stats.start_time,
|
|
stats.end_time,
|
|
stats.template_id,
|
|
stats.user_id,
|
|
stats.usage_mins,
|
|
latencies.median_latency_ms,
|
|
stats.ssh_mins,
|
|
stats.sftp_mins,
|
|
stats.reconnecting_pty_mins,
|
|
stats.vscode_mins,
|
|
stats.jetbrains_mins,
|
|
stats.app_usage_mins
|
|
FROM
|
|
stats
|
|
LEFT JOIN
|
|
latencies
|
|
ON
|
|
-- The latencies group-by ensures there at most one row.
|
|
latencies.start_time = stats.start_time
|
|
AND latencies.template_id = stats.template_id
|
|
AND latencies.user_id = stats.user_id
|
|
)
|
|
ON CONFLICT
|
|
(start_time, template_id, user_id)
|
|
DO UPDATE
|
|
SET
|
|
usage_mins = EXCLUDED.usage_mins,
|
|
median_latency_ms = EXCLUDED.median_latency_ms,
|
|
ssh_mins = EXCLUDED.ssh_mins,
|
|
sftp_mins = EXCLUDED.sftp_mins,
|
|
reconnecting_pty_mins = EXCLUDED.reconnecting_pty_mins,
|
|
vscode_mins = EXCLUDED.vscode_mins,
|
|
jetbrains_mins = EXCLUDED.jetbrains_mins,
|
|
app_usage_mins = EXCLUDED.app_usage_mins
|
|
WHERE
|
|
(tus.*) IS DISTINCT FROM (EXCLUDED.*)
|
|
`
|
|
|
|
// This query aggregates the workspace_agent_stats and workspace_app_stats data
|
|
// into a single table for efficient storage and querying. Half-hour buckets are
|
|
// used to store the data, and the minutes are summed for each user and template
|
|
// combination. The result is stored in the template_usage_stats table.
|
|
func (q *sqlQuerier) UpsertTemplateUsageStats(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, upsertTemplateUsageStats)
|
|
return err
|
|
}
|
|
|
|
const getJFrogXrayScanByWorkspaceAndAgentID = `-- name: GetJFrogXrayScanByWorkspaceAndAgentID :one
|
|
SELECT
|
|
agent_id, workspace_id, critical, high, medium, results_url
|
|
FROM
|
|
jfrog_xray_scans
|
|
WHERE
|
|
agent_id = $1
|
|
AND
|
|
workspace_id = $2
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetJFrogXrayScanByWorkspaceAndAgentIDParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetJFrogXrayScanByWorkspaceAndAgentID(ctx context.Context, arg GetJFrogXrayScanByWorkspaceAndAgentIDParams) (JfrogXrayScan, error) {
|
|
row := q.db.QueryRowContext(ctx, getJFrogXrayScanByWorkspaceAndAgentID, arg.AgentID, arg.WorkspaceID)
|
|
var i JfrogXrayScan
|
|
err := row.Scan(
|
|
&i.AgentID,
|
|
&i.WorkspaceID,
|
|
&i.Critical,
|
|
&i.High,
|
|
&i.Medium,
|
|
&i.ResultsUrl,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const upsertJFrogXrayScanByWorkspaceAndAgentID = `-- name: UpsertJFrogXrayScanByWorkspaceAndAgentID :exec
|
|
INSERT INTO
|
|
jfrog_xray_scans (
|
|
agent_id,
|
|
workspace_id,
|
|
critical,
|
|
high,
|
|
medium,
|
|
results_url
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6)
|
|
ON CONFLICT (agent_id, workspace_id)
|
|
DO UPDATE SET critical = $3, high = $4, medium = $5, results_url = $6
|
|
`
|
|
|
|
type UpsertJFrogXrayScanByWorkspaceAndAgentIDParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
Critical int32 `db:"critical" json:"critical"`
|
|
High int32 `db:"high" json:"high"`
|
|
Medium int32 `db:"medium" json:"medium"`
|
|
ResultsUrl string `db:"results_url" json:"results_url"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertJFrogXrayScanByWorkspaceAndAgentID(ctx context.Context, arg UpsertJFrogXrayScanByWorkspaceAndAgentIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, upsertJFrogXrayScanByWorkspaceAndAgentID,
|
|
arg.AgentID,
|
|
arg.WorkspaceID,
|
|
arg.Critical,
|
|
arg.High,
|
|
arg.Medium,
|
|
arg.ResultsUrl,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const deleteLicense = `-- name: DeleteLicense :one
|
|
DELETE
|
|
FROM licenses
|
|
WHERE id = $1
|
|
RETURNING id
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteLicense(ctx context.Context, id int32) (int32, error) {
|
|
row := q.db.QueryRowContext(ctx, deleteLicense, id)
|
|
err := row.Scan(&id)
|
|
return id, err
|
|
}
|
|
|
|
const getLicenseByID = `-- name: GetLicenseByID :one
|
|
SELECT
|
|
id, uploaded_at, jwt, exp, uuid
|
|
FROM
|
|
licenses
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLicenseByID(ctx context.Context, id int32) (License, error) {
|
|
row := q.db.QueryRowContext(ctx, getLicenseByID, id)
|
|
var i License
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.UploadedAt,
|
|
&i.JWT,
|
|
&i.Exp,
|
|
&i.UUID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getLicenses = `-- name: GetLicenses :many
|
|
SELECT id, uploaded_at, jwt, exp, uuid
|
|
FROM licenses
|
|
ORDER BY (id)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLicenses(ctx context.Context) ([]License, error) {
|
|
rows, err := q.db.QueryContext(ctx, getLicenses)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []License
|
|
for rows.Next() {
|
|
var i License
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.UploadedAt,
|
|
&i.JWT,
|
|
&i.Exp,
|
|
&i.UUID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getUnexpiredLicenses = `-- name: GetUnexpiredLicenses :many
|
|
SELECT id, uploaded_at, jwt, exp, uuid
|
|
FROM licenses
|
|
WHERE exp > NOW()
|
|
ORDER BY (id)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUnexpiredLicenses(ctx context.Context) ([]License, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUnexpiredLicenses)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []License
|
|
for rows.Next() {
|
|
var i License
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.UploadedAt,
|
|
&i.JWT,
|
|
&i.Exp,
|
|
&i.UUID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertLicense = `-- name: InsertLicense :one
|
|
INSERT INTO
|
|
licenses (
|
|
uploaded_at,
|
|
jwt,
|
|
exp,
|
|
uuid
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4) RETURNING id, uploaded_at, jwt, exp, uuid
|
|
`
|
|
|
|
type InsertLicenseParams struct {
|
|
UploadedAt time.Time `db:"uploaded_at" json:"uploaded_at"`
|
|
JWT string `db:"jwt" json:"jwt"`
|
|
Exp time.Time `db:"exp" json:"exp"`
|
|
UUID uuid.UUID `db:"uuid" json:"uuid"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertLicense(ctx context.Context, arg InsertLicenseParams) (License, error) {
|
|
row := q.db.QueryRowContext(ctx, insertLicense,
|
|
arg.UploadedAt,
|
|
arg.JWT,
|
|
arg.Exp,
|
|
arg.UUID,
|
|
)
|
|
var i License
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.UploadedAt,
|
|
&i.JWT,
|
|
&i.Exp,
|
|
&i.UUID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const acquireLock = `-- name: AcquireLock :exec
|
|
SELECT pg_advisory_xact_lock($1)
|
|
`
|
|
|
|
// Blocks until the lock is acquired.
|
|
//
|
|
// This must be called from within a transaction. The lock will be automatically
|
|
// released when the transaction ends.
|
|
func (q *sqlQuerier) AcquireLock(ctx context.Context, pgAdvisoryXactLock int64) error {
|
|
_, err := q.db.ExecContext(ctx, acquireLock, pgAdvisoryXactLock)
|
|
return err
|
|
}
|
|
|
|
const tryAcquireLock = `-- name: TryAcquireLock :one
|
|
SELECT pg_try_advisory_xact_lock($1)
|
|
`
|
|
|
|
// Non blocking lock. Returns true if the lock was acquired, false otherwise.
|
|
//
|
|
// This must be called from within a transaction. The lock will be automatically
|
|
// released when the transaction ends.
|
|
func (q *sqlQuerier) TryAcquireLock(ctx context.Context, pgTryAdvisoryXactLock int64) (bool, error) {
|
|
row := q.db.QueryRowContext(ctx, tryAcquireLock, pgTryAdvisoryXactLock)
|
|
var pg_try_advisory_xact_lock bool
|
|
err := row.Scan(&pg_try_advisory_xact_lock)
|
|
return pg_try_advisory_xact_lock, err
|
|
}
|
|
|
|
const acquireNotificationMessages = `-- name: AcquireNotificationMessages :many
|
|
WITH acquired AS (
|
|
UPDATE
|
|
notification_messages
|
|
SET queued_seconds = GREATEST(0, EXTRACT(EPOCH FROM (NOW() - updated_at)))::FLOAT,
|
|
updated_at = NOW(),
|
|
status = 'leased'::notification_message_status,
|
|
status_reason = 'Leased by notifier ' || $1::uuid,
|
|
leased_until = NOW() + CONCAT($2::int, ' seconds')::interval
|
|
WHERE id IN (SELECT nm.id
|
|
FROM notification_messages AS nm
|
|
WHERE (
|
|
(
|
|
-- message is in acquirable states
|
|
nm.status IN (
|
|
'pending'::notification_message_status,
|
|
'temporary_failure'::notification_message_status
|
|
)
|
|
)
|
|
-- or somehow the message was left in leased for longer than its lease period
|
|
OR (
|
|
nm.status = 'leased'::notification_message_status
|
|
AND nm.leased_until < NOW()
|
|
)
|
|
)
|
|
AND (
|
|
-- exclude all messages which have exceeded the max attempts; these will be purged later
|
|
nm.attempt_count IS NULL OR nm.attempt_count < $3::int
|
|
)
|
|
-- if set, do not retry until we've exceeded the wait time
|
|
AND (
|
|
CASE
|
|
WHEN nm.next_retry_after IS NOT NULL THEN nm.next_retry_after < NOW()
|
|
ELSE true
|
|
END
|
|
)
|
|
ORDER BY nm.created_at ASC
|
|
-- Ensure that multiple concurrent readers cannot retrieve the same rows
|
|
FOR UPDATE OF nm
|
|
SKIP LOCKED
|
|
LIMIT $4)
|
|
RETURNING id, notification_template_id, user_id, method, status, status_reason, created_by, payload, attempt_count, targets, created_at, updated_at, leased_until, next_retry_after, queued_seconds, dedupe_hash)
|
|
SELECT
|
|
-- message
|
|
nm.id,
|
|
nm.payload,
|
|
nm.method,
|
|
nm.attempt_count::int AS attempt_count,
|
|
nm.queued_seconds::float AS queued_seconds,
|
|
-- template
|
|
nt.id AS template_id,
|
|
nt.title_template,
|
|
nt.body_template,
|
|
-- preferences
|
|
(CASE WHEN np.disabled IS NULL THEN false ELSE np.disabled END)::bool AS disabled
|
|
FROM acquired nm
|
|
JOIN notification_templates nt ON nm.notification_template_id = nt.id
|
|
LEFT JOIN notification_preferences AS np
|
|
ON (np.user_id = nm.user_id AND np.notification_template_id = nm.notification_template_id)
|
|
`
|
|
|
|
type AcquireNotificationMessagesParams struct {
|
|
NotifierID uuid.UUID `db:"notifier_id" json:"notifier_id"`
|
|
LeaseSeconds int32 `db:"lease_seconds" json:"lease_seconds"`
|
|
MaxAttemptCount int32 `db:"max_attempt_count" json:"max_attempt_count"`
|
|
Count int32 `db:"count" json:"count"`
|
|
}
|
|
|
|
type AcquireNotificationMessagesRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Payload json.RawMessage `db:"payload" json:"payload"`
|
|
Method NotificationMethod `db:"method" json:"method"`
|
|
AttemptCount int32 `db:"attempt_count" json:"attempt_count"`
|
|
QueuedSeconds float64 `db:"queued_seconds" json:"queued_seconds"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TitleTemplate string `db:"title_template" json:"title_template"`
|
|
BodyTemplate string `db:"body_template" json:"body_template"`
|
|
Disabled bool `db:"disabled" json:"disabled"`
|
|
}
|
|
|
|
// Acquires the lease for a given count of notification messages, to enable concurrent dequeuing and subsequent sending.
|
|
// Only rows that aren't already leased (or ones which are leased but have exceeded their lease period) are returned.
|
|
//
|
|
// A "lease" here refers to a notifier taking ownership of a notification_messages row. A lease survives for the duration
|
|
// of CODER_NOTIFICATIONS_LEASE_PERIOD. Once a message is delivered, its status is updated and the lease expires (set to NULL).
|
|
// If a message exceeds its lease, that implies the notifier did not shutdown cleanly, or the table update failed somehow,
|
|
// and the row will then be eligible to be dequeued by another notifier.
|
|
//
|
|
// SKIP LOCKED is used to jump over locked rows. This prevents multiple notifiers from acquiring the same messages.
|
|
// See: https://www.postgresql.org/docs/9.5/sql-select.html#SQL-FOR-UPDATE-SHARE
|
|
func (q *sqlQuerier) AcquireNotificationMessages(ctx context.Context, arg AcquireNotificationMessagesParams) ([]AcquireNotificationMessagesRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, acquireNotificationMessages,
|
|
arg.NotifierID,
|
|
arg.LeaseSeconds,
|
|
arg.MaxAttemptCount,
|
|
arg.Count,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []AcquireNotificationMessagesRow
|
|
for rows.Next() {
|
|
var i AcquireNotificationMessagesRow
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Payload,
|
|
&i.Method,
|
|
&i.AttemptCount,
|
|
&i.QueuedSeconds,
|
|
&i.TemplateID,
|
|
&i.TitleTemplate,
|
|
&i.BodyTemplate,
|
|
&i.Disabled,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const bulkMarkNotificationMessagesFailed = `-- name: BulkMarkNotificationMessagesFailed :execrows
|
|
UPDATE notification_messages
|
|
SET queued_seconds = 0,
|
|
updated_at = subquery.failed_at,
|
|
attempt_count = attempt_count + 1,
|
|
status = CASE
|
|
WHEN attempt_count + 1 < $1::int THEN subquery.status
|
|
ELSE 'permanent_failure'::notification_message_status END,
|
|
status_reason = subquery.status_reason,
|
|
leased_until = NULL,
|
|
next_retry_after = CASE
|
|
WHEN (attempt_count + 1 < $1::int)
|
|
THEN NOW() + CONCAT($2::int, ' seconds')::interval END
|
|
FROM (SELECT UNNEST($3::uuid[]) AS id,
|
|
UNNEST($4::timestamptz[]) AS failed_at,
|
|
UNNEST($5::notification_message_status[]) AS status,
|
|
UNNEST($6::text[]) AS status_reason) AS subquery
|
|
WHERE notification_messages.id = subquery.id
|
|
`
|
|
|
|
type BulkMarkNotificationMessagesFailedParams struct {
|
|
MaxAttempts int32 `db:"max_attempts" json:"max_attempts"`
|
|
RetryInterval int32 `db:"retry_interval" json:"retry_interval"`
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
FailedAts []time.Time `db:"failed_ats" json:"failed_ats"`
|
|
Statuses []NotificationMessageStatus `db:"statuses" json:"statuses"`
|
|
StatusReasons []string `db:"status_reasons" json:"status_reasons"`
|
|
}
|
|
|
|
func (q *sqlQuerier) BulkMarkNotificationMessagesFailed(ctx context.Context, arg BulkMarkNotificationMessagesFailedParams) (int64, error) {
|
|
result, err := q.db.ExecContext(ctx, bulkMarkNotificationMessagesFailed,
|
|
arg.MaxAttempts,
|
|
arg.RetryInterval,
|
|
pq.Array(arg.IDs),
|
|
pq.Array(arg.FailedAts),
|
|
pq.Array(arg.Statuses),
|
|
pq.Array(arg.StatusReasons),
|
|
)
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
return result.RowsAffected()
|
|
}
|
|
|
|
const bulkMarkNotificationMessagesSent = `-- name: BulkMarkNotificationMessagesSent :execrows
|
|
UPDATE notification_messages
|
|
SET queued_seconds = 0,
|
|
updated_at = new_values.sent_at,
|
|
attempt_count = attempt_count + 1,
|
|
status = 'sent'::notification_message_status,
|
|
status_reason = NULL,
|
|
leased_until = NULL,
|
|
next_retry_after = NULL
|
|
FROM (SELECT UNNEST($1::uuid[]) AS id,
|
|
UNNEST($2::timestamptz[]) AS sent_at)
|
|
AS new_values
|
|
WHERE notification_messages.id = new_values.id
|
|
`
|
|
|
|
type BulkMarkNotificationMessagesSentParams struct {
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
SentAts []time.Time `db:"sent_ats" json:"sent_ats"`
|
|
}
|
|
|
|
func (q *sqlQuerier) BulkMarkNotificationMessagesSent(ctx context.Context, arg BulkMarkNotificationMessagesSentParams) (int64, error) {
|
|
result, err := q.db.ExecContext(ctx, bulkMarkNotificationMessagesSent, pq.Array(arg.IDs), pq.Array(arg.SentAts))
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
return result.RowsAffected()
|
|
}
|
|
|
|
const deleteOldNotificationMessages = `-- name: DeleteOldNotificationMessages :exec
|
|
DELETE
|
|
FROM notification_messages
|
|
WHERE id IN
|
|
(SELECT id
|
|
FROM notification_messages AS nested
|
|
WHERE nested.updated_at < NOW() - INTERVAL '7 days')
|
|
`
|
|
|
|
// Delete all notification messages which have not been updated for over a week.
|
|
func (q *sqlQuerier) DeleteOldNotificationMessages(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOldNotificationMessages)
|
|
return err
|
|
}
|
|
|
|
const enqueueNotificationMessage = `-- name: EnqueueNotificationMessage :exec
|
|
INSERT INTO notification_messages (id, notification_template_id, user_id, method, payload, targets, created_by, created_at)
|
|
VALUES ($1,
|
|
$2,
|
|
$3,
|
|
$4::notification_method,
|
|
$5::jsonb,
|
|
$6,
|
|
$7,
|
|
$8)
|
|
`
|
|
|
|
type EnqueueNotificationMessageParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
NotificationTemplateID uuid.UUID `db:"notification_template_id" json:"notification_template_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
Method NotificationMethod `db:"method" json:"method"`
|
|
Payload json.RawMessage `db:"payload" json:"payload"`
|
|
Targets []uuid.UUID `db:"targets" json:"targets"`
|
|
CreatedBy string `db:"created_by" json:"created_by"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) EnqueueNotificationMessage(ctx context.Context, arg EnqueueNotificationMessageParams) error {
|
|
_, err := q.db.ExecContext(ctx, enqueueNotificationMessage,
|
|
arg.ID,
|
|
arg.NotificationTemplateID,
|
|
arg.UserID,
|
|
arg.Method,
|
|
arg.Payload,
|
|
pq.Array(arg.Targets),
|
|
arg.CreatedBy,
|
|
arg.CreatedAt,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const fetchNewMessageMetadata = `-- name: FetchNewMessageMetadata :one
|
|
SELECT nt.name AS notification_name,
|
|
nt.actions AS actions,
|
|
nt.method AS custom_method,
|
|
u.id AS user_id,
|
|
u.email AS user_email,
|
|
COALESCE(NULLIF(u.name, ''), NULLIF(u.username, ''))::text AS user_name,
|
|
u.username AS user_username
|
|
FROM notification_templates nt,
|
|
users u
|
|
WHERE nt.id = $1
|
|
AND u.id = $2
|
|
`
|
|
|
|
type FetchNewMessageMetadataParams struct {
|
|
NotificationTemplateID uuid.UUID `db:"notification_template_id" json:"notification_template_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
type FetchNewMessageMetadataRow struct {
|
|
NotificationName string `db:"notification_name" json:"notification_name"`
|
|
Actions []byte `db:"actions" json:"actions"`
|
|
CustomMethod NullNotificationMethod `db:"custom_method" json:"custom_method"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
UserEmail string `db:"user_email" json:"user_email"`
|
|
UserName string `db:"user_name" json:"user_name"`
|
|
UserUsername string `db:"user_username" json:"user_username"`
|
|
}
|
|
|
|
// This is used to build up the notification_message's JSON payload.
|
|
func (q *sqlQuerier) FetchNewMessageMetadata(ctx context.Context, arg FetchNewMessageMetadataParams) (FetchNewMessageMetadataRow, error) {
|
|
row := q.db.QueryRowContext(ctx, fetchNewMessageMetadata, arg.NotificationTemplateID, arg.UserID)
|
|
var i FetchNewMessageMetadataRow
|
|
err := row.Scan(
|
|
&i.NotificationName,
|
|
&i.Actions,
|
|
&i.CustomMethod,
|
|
&i.UserID,
|
|
&i.UserEmail,
|
|
&i.UserName,
|
|
&i.UserUsername,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getNotificationMessagesByStatus = `-- name: GetNotificationMessagesByStatus :many
|
|
SELECT id, notification_template_id, user_id, method, status, status_reason, created_by, payload, attempt_count, targets, created_at, updated_at, leased_until, next_retry_after, queued_seconds, dedupe_hash
|
|
FROM notification_messages
|
|
WHERE status = $1
|
|
LIMIT $2::int
|
|
`
|
|
|
|
type GetNotificationMessagesByStatusParams struct {
|
|
Status NotificationMessageStatus `db:"status" json:"status"`
|
|
Limit int32 `db:"limit" json:"limit"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetNotificationMessagesByStatus(ctx context.Context, arg GetNotificationMessagesByStatusParams) ([]NotificationMessage, error) {
|
|
rows, err := q.db.QueryContext(ctx, getNotificationMessagesByStatus, arg.Status, arg.Limit)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []NotificationMessage
|
|
for rows.Next() {
|
|
var i NotificationMessage
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.NotificationTemplateID,
|
|
&i.UserID,
|
|
&i.Method,
|
|
&i.Status,
|
|
&i.StatusReason,
|
|
&i.CreatedBy,
|
|
&i.Payload,
|
|
&i.AttemptCount,
|
|
pq.Array(&i.Targets),
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.LeasedUntil,
|
|
&i.NextRetryAfter,
|
|
&i.QueuedSeconds,
|
|
&i.DedupeHash,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getNotificationTemplateByID = `-- name: GetNotificationTemplateByID :one
|
|
SELECT id, name, title_template, body_template, actions, "group", method, kind
|
|
FROM notification_templates
|
|
WHERE id = $1::uuid
|
|
`
|
|
|
|
func (q *sqlQuerier) GetNotificationTemplateByID(ctx context.Context, id uuid.UUID) (NotificationTemplate, error) {
|
|
row := q.db.QueryRowContext(ctx, getNotificationTemplateByID, id)
|
|
var i NotificationTemplate
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.TitleTemplate,
|
|
&i.BodyTemplate,
|
|
&i.Actions,
|
|
&i.Group,
|
|
&i.Method,
|
|
&i.Kind,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getNotificationTemplatesByKind = `-- name: GetNotificationTemplatesByKind :many
|
|
SELECT id, name, title_template, body_template, actions, "group", method, kind
|
|
FROM notification_templates
|
|
WHERE kind = $1::notification_template_kind
|
|
ORDER BY name ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetNotificationTemplatesByKind(ctx context.Context, kind NotificationTemplateKind) ([]NotificationTemplate, error) {
|
|
rows, err := q.db.QueryContext(ctx, getNotificationTemplatesByKind, kind)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []NotificationTemplate
|
|
for rows.Next() {
|
|
var i NotificationTemplate
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.TitleTemplate,
|
|
&i.BodyTemplate,
|
|
&i.Actions,
|
|
&i.Group,
|
|
&i.Method,
|
|
&i.Kind,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getUserNotificationPreferences = `-- name: GetUserNotificationPreferences :many
|
|
SELECT user_id, notification_template_id, disabled, created_at, updated_at
|
|
FROM notification_preferences
|
|
WHERE user_id = $1::uuid
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserNotificationPreferences(ctx context.Context, userID uuid.UUID) ([]NotificationPreference, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUserNotificationPreferences, userID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []NotificationPreference
|
|
for rows.Next() {
|
|
var i NotificationPreference
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.NotificationTemplateID,
|
|
&i.Disabled,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateNotificationTemplateMethodByID = `-- name: UpdateNotificationTemplateMethodByID :one
|
|
UPDATE notification_templates
|
|
SET method = $1::notification_method
|
|
WHERE id = $2::uuid
|
|
RETURNING id, name, title_template, body_template, actions, "group", method, kind
|
|
`
|
|
|
|
type UpdateNotificationTemplateMethodByIDParams struct {
|
|
Method NullNotificationMethod `db:"method" json:"method"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateNotificationTemplateMethodByID(ctx context.Context, arg UpdateNotificationTemplateMethodByIDParams) (NotificationTemplate, error) {
|
|
row := q.db.QueryRowContext(ctx, updateNotificationTemplateMethodByID, arg.Method, arg.ID)
|
|
var i NotificationTemplate
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.TitleTemplate,
|
|
&i.BodyTemplate,
|
|
&i.Actions,
|
|
&i.Group,
|
|
&i.Method,
|
|
&i.Kind,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserNotificationPreferences = `-- name: UpdateUserNotificationPreferences :execrows
|
|
INSERT
|
|
INTO notification_preferences (user_id, notification_template_id, disabled)
|
|
SELECT $1::uuid, new_values.notification_template_id, new_values.disabled
|
|
FROM (SELECT UNNEST($2::uuid[]) AS notification_template_id,
|
|
UNNEST($3::bool[]) AS disabled) AS new_values
|
|
ON CONFLICT (user_id, notification_template_id) DO UPDATE
|
|
SET disabled = EXCLUDED.disabled,
|
|
updated_at = CURRENT_TIMESTAMP
|
|
`
|
|
|
|
type UpdateUserNotificationPreferencesParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
NotificationTemplateIds []uuid.UUID `db:"notification_template_ids" json:"notification_template_ids"`
|
|
Disableds []bool `db:"disableds" json:"disableds"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserNotificationPreferences(ctx context.Context, arg UpdateUserNotificationPreferencesParams) (int64, error) {
|
|
result, err := q.db.ExecContext(ctx, updateUserNotificationPreferences, arg.UserID, pq.Array(arg.NotificationTemplateIds), pq.Array(arg.Disableds))
|
|
if err != nil {
|
|
return 0, err
|
|
}
|
|
return result.RowsAffected()
|
|
}
|
|
|
|
const deleteOAuth2ProviderAppByID = `-- name: DeleteOAuth2ProviderAppByID :exec
|
|
DELETE FROM oauth2_provider_apps WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteOAuth2ProviderAppByID(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOAuth2ProviderAppByID, id)
|
|
return err
|
|
}
|
|
|
|
const deleteOAuth2ProviderAppCodeByID = `-- name: DeleteOAuth2ProviderAppCodeByID :exec
|
|
DELETE FROM oauth2_provider_app_codes WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteOAuth2ProviderAppCodeByID(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOAuth2ProviderAppCodeByID, id)
|
|
return err
|
|
}
|
|
|
|
const deleteOAuth2ProviderAppCodesByAppAndUserID = `-- name: DeleteOAuth2ProviderAppCodesByAppAndUserID :exec
|
|
DELETE FROM oauth2_provider_app_codes WHERE app_id = $1 AND user_id = $2
|
|
`
|
|
|
|
type DeleteOAuth2ProviderAppCodesByAppAndUserIDParams struct {
|
|
AppID uuid.UUID `db:"app_id" json:"app_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteOAuth2ProviderAppCodesByAppAndUserID(ctx context.Context, arg DeleteOAuth2ProviderAppCodesByAppAndUserIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOAuth2ProviderAppCodesByAppAndUserID, arg.AppID, arg.UserID)
|
|
return err
|
|
}
|
|
|
|
const deleteOAuth2ProviderAppSecretByID = `-- name: DeleteOAuth2ProviderAppSecretByID :exec
|
|
DELETE FROM oauth2_provider_app_secrets WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteOAuth2ProviderAppSecretByID(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOAuth2ProviderAppSecretByID, id)
|
|
return err
|
|
}
|
|
|
|
const deleteOAuth2ProviderAppTokensByAppAndUserID = `-- name: DeleteOAuth2ProviderAppTokensByAppAndUserID :exec
|
|
DELETE FROM
|
|
oauth2_provider_app_tokens
|
|
USING
|
|
oauth2_provider_app_secrets, api_keys
|
|
WHERE
|
|
oauth2_provider_app_secrets.id = oauth2_provider_app_tokens.app_secret_id
|
|
AND api_keys.id = oauth2_provider_app_tokens.api_key_id
|
|
AND oauth2_provider_app_secrets.app_id = $1
|
|
AND api_keys.user_id = $2
|
|
`
|
|
|
|
type DeleteOAuth2ProviderAppTokensByAppAndUserIDParams struct {
|
|
AppID uuid.UUID `db:"app_id" json:"app_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteOAuth2ProviderAppTokensByAppAndUserID(ctx context.Context, arg DeleteOAuth2ProviderAppTokensByAppAndUserIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOAuth2ProviderAppTokensByAppAndUserID, arg.AppID, arg.UserID)
|
|
return err
|
|
}
|
|
|
|
const getOAuth2ProviderAppByID = `-- name: GetOAuth2ProviderAppByID :one
|
|
SELECT id, created_at, updated_at, name, icon, callback_url FROM oauth2_provider_apps WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppByID(ctx context.Context, id uuid.UUID) (OAuth2ProviderApp, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppByID, id)
|
|
var i OAuth2ProviderApp
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Icon,
|
|
&i.CallbackURL,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderAppCodeByID = `-- name: GetOAuth2ProviderAppCodeByID :one
|
|
SELECT id, created_at, expires_at, secret_prefix, hashed_secret, user_id, app_id FROM oauth2_provider_app_codes WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppCodeByID(ctx context.Context, id uuid.UUID) (OAuth2ProviderAppCode, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppCodeByID, id)
|
|
var i OAuth2ProviderAppCode
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.ExpiresAt,
|
|
&i.SecretPrefix,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.AppID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderAppCodeByPrefix = `-- name: GetOAuth2ProviderAppCodeByPrefix :one
|
|
SELECT id, created_at, expires_at, secret_prefix, hashed_secret, user_id, app_id FROM oauth2_provider_app_codes WHERE secret_prefix = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppCodeByPrefix(ctx context.Context, secretPrefix []byte) (OAuth2ProviderAppCode, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppCodeByPrefix, secretPrefix)
|
|
var i OAuth2ProviderAppCode
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.ExpiresAt,
|
|
&i.SecretPrefix,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.AppID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderAppSecretByID = `-- name: GetOAuth2ProviderAppSecretByID :one
|
|
SELECT id, created_at, last_used_at, hashed_secret, display_secret, app_id, secret_prefix FROM oauth2_provider_app_secrets WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppSecretByID(ctx context.Context, id uuid.UUID) (OAuth2ProviderAppSecret, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppSecretByID, id)
|
|
var i OAuth2ProviderAppSecret
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.LastUsedAt,
|
|
&i.HashedSecret,
|
|
&i.DisplaySecret,
|
|
&i.AppID,
|
|
&i.SecretPrefix,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderAppSecretByPrefix = `-- name: GetOAuth2ProviderAppSecretByPrefix :one
|
|
SELECT id, created_at, last_used_at, hashed_secret, display_secret, app_id, secret_prefix FROM oauth2_provider_app_secrets WHERE secret_prefix = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppSecretByPrefix(ctx context.Context, secretPrefix []byte) (OAuth2ProviderAppSecret, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppSecretByPrefix, secretPrefix)
|
|
var i OAuth2ProviderAppSecret
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.LastUsedAt,
|
|
&i.HashedSecret,
|
|
&i.DisplaySecret,
|
|
&i.AppID,
|
|
&i.SecretPrefix,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderAppSecretsByAppID = `-- name: GetOAuth2ProviderAppSecretsByAppID :many
|
|
SELECT id, created_at, last_used_at, hashed_secret, display_secret, app_id, secret_prefix FROM oauth2_provider_app_secrets WHERE app_id = $1 ORDER BY (created_at, id) ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppSecretsByAppID(ctx context.Context, appID uuid.UUID) ([]OAuth2ProviderAppSecret, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOAuth2ProviderAppSecretsByAppID, appID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []OAuth2ProviderAppSecret
|
|
for rows.Next() {
|
|
var i OAuth2ProviderAppSecret
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.LastUsedAt,
|
|
&i.HashedSecret,
|
|
&i.DisplaySecret,
|
|
&i.AppID,
|
|
&i.SecretPrefix,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getOAuth2ProviderAppTokenByPrefix = `-- name: GetOAuth2ProviderAppTokenByPrefix :one
|
|
SELECT id, created_at, expires_at, hash_prefix, refresh_hash, app_secret_id, api_key_id FROM oauth2_provider_app_tokens WHERE hash_prefix = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppTokenByPrefix(ctx context.Context, hashPrefix []byte) (OAuth2ProviderAppToken, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuth2ProviderAppTokenByPrefix, hashPrefix)
|
|
var i OAuth2ProviderAppToken
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.ExpiresAt,
|
|
&i.HashPrefix,
|
|
&i.RefreshHash,
|
|
&i.AppSecretID,
|
|
&i.APIKeyID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOAuth2ProviderApps = `-- name: GetOAuth2ProviderApps :many
|
|
SELECT id, created_at, updated_at, name, icon, callback_url FROM oauth2_provider_apps ORDER BY (name, id) ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderApps(ctx context.Context) ([]OAuth2ProviderApp, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOAuth2ProviderApps)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []OAuth2ProviderApp
|
|
for rows.Next() {
|
|
var i OAuth2ProviderApp
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Icon,
|
|
&i.CallbackURL,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getOAuth2ProviderAppsByUserID = `-- name: GetOAuth2ProviderAppsByUserID :many
|
|
SELECT
|
|
COUNT(DISTINCT oauth2_provider_app_tokens.id) as token_count,
|
|
oauth2_provider_apps.id, oauth2_provider_apps.created_at, oauth2_provider_apps.updated_at, oauth2_provider_apps.name, oauth2_provider_apps.icon, oauth2_provider_apps.callback_url
|
|
FROM oauth2_provider_app_tokens
|
|
INNER JOIN oauth2_provider_app_secrets
|
|
ON oauth2_provider_app_secrets.id = oauth2_provider_app_tokens.app_secret_id
|
|
INNER JOIN oauth2_provider_apps
|
|
ON oauth2_provider_apps.id = oauth2_provider_app_secrets.app_id
|
|
INNER JOIN api_keys
|
|
ON api_keys.id = oauth2_provider_app_tokens.api_key_id
|
|
WHERE
|
|
api_keys.user_id = $1
|
|
GROUP BY
|
|
oauth2_provider_apps.id
|
|
`
|
|
|
|
type GetOAuth2ProviderAppsByUserIDRow struct {
|
|
TokenCount int64 `db:"token_count" json:"token_count"`
|
|
OAuth2ProviderApp OAuth2ProviderApp `db:"oauth2_provider_app" json:"oauth2_provider_app"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetOAuth2ProviderAppsByUserID(ctx context.Context, userID uuid.UUID) ([]GetOAuth2ProviderAppsByUserIDRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOAuth2ProviderAppsByUserID, userID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetOAuth2ProviderAppsByUserIDRow
|
|
for rows.Next() {
|
|
var i GetOAuth2ProviderAppsByUserIDRow
|
|
if err := rows.Scan(
|
|
&i.TokenCount,
|
|
&i.OAuth2ProviderApp.ID,
|
|
&i.OAuth2ProviderApp.CreatedAt,
|
|
&i.OAuth2ProviderApp.UpdatedAt,
|
|
&i.OAuth2ProviderApp.Name,
|
|
&i.OAuth2ProviderApp.Icon,
|
|
&i.OAuth2ProviderApp.CallbackURL,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertOAuth2ProviderApp = `-- name: InsertOAuth2ProviderApp :one
|
|
INSERT INTO oauth2_provider_apps (
|
|
id,
|
|
created_at,
|
|
updated_at,
|
|
name,
|
|
icon,
|
|
callback_url
|
|
) VALUES(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6
|
|
) RETURNING id, created_at, updated_at, name, icon, callback_url
|
|
`
|
|
|
|
type InsertOAuth2ProviderAppParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
CallbackURL string `db:"callback_url" json:"callback_url"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOAuth2ProviderApp(ctx context.Context, arg InsertOAuth2ProviderAppParams) (OAuth2ProviderApp, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOAuth2ProviderApp,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.Icon,
|
|
arg.CallbackURL,
|
|
)
|
|
var i OAuth2ProviderApp
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Icon,
|
|
&i.CallbackURL,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertOAuth2ProviderAppCode = `-- name: InsertOAuth2ProviderAppCode :one
|
|
INSERT INTO oauth2_provider_app_codes (
|
|
id,
|
|
created_at,
|
|
expires_at,
|
|
secret_prefix,
|
|
hashed_secret,
|
|
app_id,
|
|
user_id
|
|
) VALUES(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7
|
|
) RETURNING id, created_at, expires_at, secret_prefix, hashed_secret, user_id, app_id
|
|
`
|
|
|
|
type InsertOAuth2ProviderAppCodeParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
ExpiresAt time.Time `db:"expires_at" json:"expires_at"`
|
|
SecretPrefix []byte `db:"secret_prefix" json:"secret_prefix"`
|
|
HashedSecret []byte `db:"hashed_secret" json:"hashed_secret"`
|
|
AppID uuid.UUID `db:"app_id" json:"app_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOAuth2ProviderAppCode(ctx context.Context, arg InsertOAuth2ProviderAppCodeParams) (OAuth2ProviderAppCode, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOAuth2ProviderAppCode,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.ExpiresAt,
|
|
arg.SecretPrefix,
|
|
arg.HashedSecret,
|
|
arg.AppID,
|
|
arg.UserID,
|
|
)
|
|
var i OAuth2ProviderAppCode
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.ExpiresAt,
|
|
&i.SecretPrefix,
|
|
&i.HashedSecret,
|
|
&i.UserID,
|
|
&i.AppID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertOAuth2ProviderAppSecret = `-- name: InsertOAuth2ProviderAppSecret :one
|
|
INSERT INTO oauth2_provider_app_secrets (
|
|
id,
|
|
created_at,
|
|
secret_prefix,
|
|
hashed_secret,
|
|
display_secret,
|
|
app_id
|
|
) VALUES(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6
|
|
) RETURNING id, created_at, last_used_at, hashed_secret, display_secret, app_id, secret_prefix
|
|
`
|
|
|
|
type InsertOAuth2ProviderAppSecretParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
SecretPrefix []byte `db:"secret_prefix" json:"secret_prefix"`
|
|
HashedSecret []byte `db:"hashed_secret" json:"hashed_secret"`
|
|
DisplaySecret string `db:"display_secret" json:"display_secret"`
|
|
AppID uuid.UUID `db:"app_id" json:"app_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOAuth2ProviderAppSecret(ctx context.Context, arg InsertOAuth2ProviderAppSecretParams) (OAuth2ProviderAppSecret, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOAuth2ProviderAppSecret,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.SecretPrefix,
|
|
arg.HashedSecret,
|
|
arg.DisplaySecret,
|
|
arg.AppID,
|
|
)
|
|
var i OAuth2ProviderAppSecret
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.LastUsedAt,
|
|
&i.HashedSecret,
|
|
&i.DisplaySecret,
|
|
&i.AppID,
|
|
&i.SecretPrefix,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertOAuth2ProviderAppToken = `-- name: InsertOAuth2ProviderAppToken :one
|
|
INSERT INTO oauth2_provider_app_tokens (
|
|
id,
|
|
created_at,
|
|
expires_at,
|
|
hash_prefix,
|
|
refresh_hash,
|
|
app_secret_id,
|
|
api_key_id
|
|
) VALUES(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7
|
|
) RETURNING id, created_at, expires_at, hash_prefix, refresh_hash, app_secret_id, api_key_id
|
|
`
|
|
|
|
type InsertOAuth2ProviderAppTokenParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
ExpiresAt time.Time `db:"expires_at" json:"expires_at"`
|
|
HashPrefix []byte `db:"hash_prefix" json:"hash_prefix"`
|
|
RefreshHash []byte `db:"refresh_hash" json:"refresh_hash"`
|
|
AppSecretID uuid.UUID `db:"app_secret_id" json:"app_secret_id"`
|
|
APIKeyID string `db:"api_key_id" json:"api_key_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOAuth2ProviderAppToken(ctx context.Context, arg InsertOAuth2ProviderAppTokenParams) (OAuth2ProviderAppToken, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOAuth2ProviderAppToken,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.ExpiresAt,
|
|
arg.HashPrefix,
|
|
arg.RefreshHash,
|
|
arg.AppSecretID,
|
|
arg.APIKeyID,
|
|
)
|
|
var i OAuth2ProviderAppToken
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.ExpiresAt,
|
|
&i.HashPrefix,
|
|
&i.RefreshHash,
|
|
&i.AppSecretID,
|
|
&i.APIKeyID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateOAuth2ProviderAppByID = `-- name: UpdateOAuth2ProviderAppByID :one
|
|
UPDATE oauth2_provider_apps SET
|
|
updated_at = $2,
|
|
name = $3,
|
|
icon = $4,
|
|
callback_url = $5
|
|
WHERE id = $1 RETURNING id, created_at, updated_at, name, icon, callback_url
|
|
`
|
|
|
|
type UpdateOAuth2ProviderAppByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
CallbackURL string `db:"callback_url" json:"callback_url"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateOAuth2ProviderAppByID(ctx context.Context, arg UpdateOAuth2ProviderAppByIDParams) (OAuth2ProviderApp, error) {
|
|
row := q.db.QueryRowContext(ctx, updateOAuth2ProviderAppByID,
|
|
arg.ID,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.Icon,
|
|
arg.CallbackURL,
|
|
)
|
|
var i OAuth2ProviderApp
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Icon,
|
|
&i.CallbackURL,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateOAuth2ProviderAppSecretByID = `-- name: UpdateOAuth2ProviderAppSecretByID :one
|
|
UPDATE oauth2_provider_app_secrets SET
|
|
last_used_at = $2
|
|
WHERE id = $1 RETURNING id, created_at, last_used_at, hashed_secret, display_secret, app_id, secret_prefix
|
|
`
|
|
|
|
type UpdateOAuth2ProviderAppSecretByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
LastUsedAt sql.NullTime `db:"last_used_at" json:"last_used_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateOAuth2ProviderAppSecretByID(ctx context.Context, arg UpdateOAuth2ProviderAppSecretByIDParams) (OAuth2ProviderAppSecret, error) {
|
|
row := q.db.QueryRowContext(ctx, updateOAuth2ProviderAppSecretByID, arg.ID, arg.LastUsedAt)
|
|
var i OAuth2ProviderAppSecret
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.LastUsedAt,
|
|
&i.HashedSecret,
|
|
&i.DisplaySecret,
|
|
&i.AppID,
|
|
&i.SecretPrefix,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const deleteOrganizationMember = `-- name: DeleteOrganizationMember :exec
|
|
DELETE
|
|
FROM
|
|
organization_members
|
|
WHERE
|
|
organization_id = $1 AND
|
|
user_id = $2
|
|
`
|
|
|
|
type DeleteOrganizationMemberParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteOrganizationMember(ctx context.Context, arg DeleteOrganizationMemberParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOrganizationMember, arg.OrganizationID, arg.UserID)
|
|
return err
|
|
}
|
|
|
|
const getOrganizationIDsByMemberIDs = `-- name: GetOrganizationIDsByMemberIDs :many
|
|
SELECT
|
|
user_id, array_agg(organization_id) :: uuid [ ] AS "organization_IDs"
|
|
FROM
|
|
organization_members
|
|
WHERE
|
|
user_id = ANY($1 :: uuid [ ])
|
|
GROUP BY
|
|
user_id
|
|
`
|
|
|
|
type GetOrganizationIDsByMemberIDsRow struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
OrganizationIDs []uuid.UUID `db:"organization_IDs" json:"organization_IDs"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetOrganizationIDsByMemberIDs(ctx context.Context, ids []uuid.UUID) ([]GetOrganizationIDsByMemberIDsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOrganizationIDsByMemberIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetOrganizationIDsByMemberIDsRow
|
|
for rows.Next() {
|
|
var i GetOrganizationIDsByMemberIDsRow
|
|
if err := rows.Scan(&i.UserID, pq.Array(&i.OrganizationIDs)); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertOrganizationMember = `-- name: InsertOrganizationMember :one
|
|
INSERT INTO
|
|
organization_members (
|
|
organization_id,
|
|
user_id,
|
|
created_at,
|
|
updated_at,
|
|
roles
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5) RETURNING user_id, organization_id, created_at, updated_at, roles
|
|
`
|
|
|
|
type InsertOrganizationMemberParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Roles []string `db:"roles" json:"roles"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOrganizationMember(ctx context.Context, arg InsertOrganizationMemberParams) (OrganizationMember, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOrganizationMember,
|
|
arg.OrganizationID,
|
|
arg.UserID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
pq.Array(arg.Roles),
|
|
)
|
|
var i OrganizationMember
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
pq.Array(&i.Roles),
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const organizationMembers = `-- name: OrganizationMembers :many
|
|
SELECT
|
|
organization_members.user_id, organization_members.organization_id, organization_members.created_at, organization_members.updated_at, organization_members.roles,
|
|
users.username, users.avatar_url, users.name, users.email, users.rbac_roles as "global_roles"
|
|
FROM
|
|
organization_members
|
|
INNER JOIN
|
|
users ON organization_members.user_id = users.id
|
|
WHERE
|
|
-- Filter by organization id
|
|
CASE
|
|
WHEN $1 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
organization_id = $1
|
|
ELSE true
|
|
END
|
|
-- Filter by user id
|
|
AND CASE
|
|
WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
user_id = $2
|
|
ELSE true
|
|
END
|
|
`
|
|
|
|
type OrganizationMembersParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
type OrganizationMembersRow struct {
|
|
OrganizationMember OrganizationMember `db:"organization_member" json:"organization_member"`
|
|
Username string `db:"username" json:"username"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
Name string `db:"name" json:"name"`
|
|
Email string `db:"email" json:"email"`
|
|
GlobalRoles pq.StringArray `db:"global_roles" json:"global_roles"`
|
|
}
|
|
|
|
// Arguments are optional with uuid.Nil to ignore.
|
|
// - Use just 'organization_id' to get all members of an org
|
|
// - Use just 'user_id' to get all orgs a user is a member of
|
|
// - Use both to get a specific org member row
|
|
func (q *sqlQuerier) OrganizationMembers(ctx context.Context, arg OrganizationMembersParams) ([]OrganizationMembersRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, organizationMembers, arg.OrganizationID, arg.UserID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []OrganizationMembersRow
|
|
for rows.Next() {
|
|
var i OrganizationMembersRow
|
|
if err := rows.Scan(
|
|
&i.OrganizationMember.UserID,
|
|
&i.OrganizationMember.OrganizationID,
|
|
&i.OrganizationMember.CreatedAt,
|
|
&i.OrganizationMember.UpdatedAt,
|
|
pq.Array(&i.OrganizationMember.Roles),
|
|
&i.Username,
|
|
&i.AvatarURL,
|
|
&i.Name,
|
|
&i.Email,
|
|
&i.GlobalRoles,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateMemberRoles = `-- name: UpdateMemberRoles :one
|
|
UPDATE
|
|
organization_members
|
|
SET
|
|
-- Remove all duplicates from the roles.
|
|
roles = ARRAY(SELECT DISTINCT UNNEST($1 :: text[]))
|
|
WHERE
|
|
user_id = $2
|
|
AND organization_id = $3
|
|
RETURNING user_id, organization_id, created_at, updated_at, roles
|
|
`
|
|
|
|
type UpdateMemberRolesParams struct {
|
|
GrantedRoles []string `db:"granted_roles" json:"granted_roles"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
OrgID uuid.UUID `db:"org_id" json:"org_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateMemberRoles(ctx context.Context, arg UpdateMemberRolesParams) (OrganizationMember, error) {
|
|
row := q.db.QueryRowContext(ctx, updateMemberRoles, pq.Array(arg.GrantedRoles), arg.UserID, arg.OrgID)
|
|
var i OrganizationMember
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
pq.Array(&i.Roles),
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const deleteOrganization = `-- name: DeleteOrganization :exec
|
|
DELETE FROM
|
|
organizations
|
|
WHERE
|
|
id = $1 AND
|
|
is_default = false
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteOrganization(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOrganization, id)
|
|
return err
|
|
}
|
|
|
|
const getDefaultOrganization = `-- name: GetDefaultOrganization :one
|
|
SELECT
|
|
id, name, description, created_at, updated_at, is_default, display_name, icon
|
|
FROM
|
|
organizations
|
|
WHERE
|
|
is_default = true
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetDefaultOrganization(ctx context.Context) (Organization, error) {
|
|
row := q.db.QueryRowContext(ctx, getDefaultOrganization)
|
|
var i Organization
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOrganizationByID = `-- name: GetOrganizationByID :one
|
|
SELECT
|
|
id, name, description, created_at, updated_at, is_default, display_name, icon
|
|
FROM
|
|
organizations
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOrganizationByID(ctx context.Context, id uuid.UUID) (Organization, error) {
|
|
row := q.db.QueryRowContext(ctx, getOrganizationByID, id)
|
|
var i Organization
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOrganizationByName = `-- name: GetOrganizationByName :one
|
|
SELECT
|
|
id, name, description, created_at, updated_at, is_default, display_name, icon
|
|
FROM
|
|
organizations
|
|
WHERE
|
|
LOWER("name") = LOWER($1)
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOrganizationByName(ctx context.Context, name string) (Organization, error) {
|
|
row := q.db.QueryRowContext(ctx, getOrganizationByName, name)
|
|
var i Organization
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getOrganizations = `-- name: GetOrganizations :many
|
|
SELECT
|
|
id, name, description, created_at, updated_at, is_default, display_name, icon
|
|
FROM
|
|
organizations
|
|
WHERE
|
|
true
|
|
-- Filter by ids
|
|
AND CASE
|
|
WHEN array_length($1 :: uuid[], 1) > 0 THEN
|
|
id = ANY($1)
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
WHEN $2::text != '' THEN
|
|
LOWER("name") = LOWER($2)
|
|
ELSE true
|
|
END
|
|
`
|
|
|
|
type GetOrganizationsParams struct {
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetOrganizations(ctx context.Context, arg GetOrganizationsParams) ([]Organization, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOrganizations, pq.Array(arg.IDs), arg.Name)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Organization
|
|
for rows.Next() {
|
|
var i Organization
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getOrganizationsByUserID = `-- name: GetOrganizationsByUserID :many
|
|
SELECT
|
|
id, name, description, created_at, updated_at, is_default, display_name, icon
|
|
FROM
|
|
organizations
|
|
WHERE
|
|
id = ANY(
|
|
SELECT
|
|
organization_id
|
|
FROM
|
|
organization_members
|
|
WHERE
|
|
user_id = $1
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOrganizationsByUserID(ctx context.Context, userID uuid.UUID) ([]Organization, error) {
|
|
rows, err := q.db.QueryContext(ctx, getOrganizationsByUserID, userID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Organization
|
|
for rows.Next() {
|
|
var i Organization
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertOrganization = `-- name: InsertOrganization :one
|
|
INSERT INTO
|
|
organizations (id, "name", display_name, description, icon, created_at, updated_at, is_default)
|
|
VALUES
|
|
-- If no organizations exist, and this is the first, make it the default.
|
|
($1, $2, $3, $4, $5, $6, $7, (SELECT TRUE FROM organizations LIMIT 1) IS NULL) RETURNING id, name, description, created_at, updated_at, is_default, display_name, icon
|
|
`
|
|
|
|
type InsertOrganizationParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Description string `db:"description" json:"description"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertOrganization(ctx context.Context, arg InsertOrganizationParams) (Organization, error) {
|
|
row := q.db.QueryRowContext(ctx, insertOrganization,
|
|
arg.ID,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.Description,
|
|
arg.Icon,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
)
|
|
var i Organization
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateOrganization = `-- name: UpdateOrganization :one
|
|
UPDATE
|
|
organizations
|
|
SET
|
|
updated_at = $1,
|
|
name = $2,
|
|
display_name = $3,
|
|
description = $4,
|
|
icon = $5
|
|
WHERE
|
|
id = $6
|
|
RETURNING id, name, description, created_at, updated_at, is_default, display_name, icon
|
|
`
|
|
|
|
type UpdateOrganizationParams struct {
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Description string `db:"description" json:"description"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateOrganization(ctx context.Context, arg UpdateOrganizationParams) (Organization, error) {
|
|
row := q.db.QueryRowContext(ctx, updateOrganization,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.Description,
|
|
arg.Icon,
|
|
arg.ID,
|
|
)
|
|
var i Organization
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.IsDefault,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getParameterSchemasByJobID = `-- name: GetParameterSchemasByJobID :many
|
|
SELECT
|
|
id, created_at, job_id, name, description, default_source_scheme, default_source_value, allow_override_source, default_destination_scheme, allow_override_destination, default_refresh, redisplay_value, validation_error, validation_condition, validation_type_system, validation_value_type, index
|
|
FROM
|
|
parameter_schemas
|
|
WHERE
|
|
job_id = $1
|
|
ORDER BY
|
|
index
|
|
`
|
|
|
|
func (q *sqlQuerier) GetParameterSchemasByJobID(ctx context.Context, jobID uuid.UUID) ([]ParameterSchema, error) {
|
|
rows, err := q.db.QueryContext(ctx, getParameterSchemasByJobID, jobID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ParameterSchema
|
|
for rows.Next() {
|
|
var i ParameterSchema
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.DefaultSourceScheme,
|
|
&i.DefaultSourceValue,
|
|
&i.AllowOverrideSource,
|
|
&i.DefaultDestinationScheme,
|
|
&i.AllowOverrideDestination,
|
|
&i.DefaultRefresh,
|
|
&i.RedisplayValue,
|
|
&i.ValidationError,
|
|
&i.ValidationCondition,
|
|
&i.ValidationTypeSystem,
|
|
&i.ValidationValueType,
|
|
&i.Index,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const deleteOldProvisionerDaemons = `-- name: DeleteOldProvisionerDaemons :exec
|
|
DELETE FROM provisioner_daemons WHERE (
|
|
(created_at < (NOW() - INTERVAL '7 days') AND last_seen_at IS NULL) OR
|
|
(last_seen_at IS NOT NULL AND last_seen_at < (NOW() - INTERVAL '7 days'))
|
|
)
|
|
`
|
|
|
|
// Delete provisioner daemons that have been created at least a week ago
|
|
// and have not connected to coderd since a week.
|
|
// A provisioner daemon with "zeroed" last_seen_at column indicates possible
|
|
// connectivity issues (no provisioner daemon activity since registration).
|
|
func (q *sqlQuerier) DeleteOldProvisionerDaemons(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOldProvisionerDaemons)
|
|
return err
|
|
}
|
|
|
|
const getProvisionerDaemons = `-- name: GetProvisionerDaemons :many
|
|
SELECT
|
|
id, created_at, name, provisioners, replica_id, tags, last_seen_at, version, api_version, organization_id
|
|
FROM
|
|
provisioner_daemons
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerDaemons(ctx context.Context) ([]ProvisionerDaemon, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerDaemons)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerDaemon
|
|
for rows.Next() {
|
|
var i ProvisionerDaemon
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.Name,
|
|
pq.Array(&i.Provisioners),
|
|
&i.ReplicaID,
|
|
&i.Tags,
|
|
&i.LastSeenAt,
|
|
&i.Version,
|
|
&i.APIVersion,
|
|
&i.OrganizationID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerDaemonsByOrganization = `-- name: GetProvisionerDaemonsByOrganization :many
|
|
SELECT
|
|
id, created_at, name, provisioners, replica_id, tags, last_seen_at, version, api_version, organization_id
|
|
FROM
|
|
provisioner_daemons
|
|
WHERE
|
|
organization_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerDaemonsByOrganization(ctx context.Context, organizationID uuid.UUID) ([]ProvisionerDaemon, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerDaemonsByOrganization, organizationID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerDaemon
|
|
for rows.Next() {
|
|
var i ProvisionerDaemon
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.Name,
|
|
pq.Array(&i.Provisioners),
|
|
&i.ReplicaID,
|
|
&i.Tags,
|
|
&i.LastSeenAt,
|
|
&i.Version,
|
|
&i.APIVersion,
|
|
&i.OrganizationID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateProvisionerDaemonLastSeenAt = `-- name: UpdateProvisionerDaemonLastSeenAt :exec
|
|
UPDATE provisioner_daemons
|
|
SET
|
|
last_seen_at = $1
|
|
WHERE
|
|
id = $2
|
|
AND
|
|
last_seen_at <= $1
|
|
`
|
|
|
|
type UpdateProvisionerDaemonLastSeenAtParams struct {
|
|
LastSeenAt sql.NullTime `db:"last_seen_at" json:"last_seen_at"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateProvisionerDaemonLastSeenAt(ctx context.Context, arg UpdateProvisionerDaemonLastSeenAtParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateProvisionerDaemonLastSeenAt, arg.LastSeenAt, arg.ID)
|
|
return err
|
|
}
|
|
|
|
const upsertProvisionerDaemon = `-- name: UpsertProvisionerDaemon :one
|
|
INSERT INTO
|
|
provisioner_daemons (
|
|
id,
|
|
created_at,
|
|
"name",
|
|
provisioners,
|
|
tags,
|
|
last_seen_at,
|
|
"version",
|
|
organization_id,
|
|
api_version
|
|
)
|
|
VALUES (
|
|
gen_random_uuid(),
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7,
|
|
$8
|
|
) ON CONFLICT("organization_id", "name", LOWER(COALESCE(tags ->> 'owner'::text, ''::text))) DO UPDATE SET
|
|
provisioners = $3,
|
|
tags = $4,
|
|
last_seen_at = $5,
|
|
"version" = $6,
|
|
api_version = $8,
|
|
organization_id = $7
|
|
RETURNING id, created_at, name, provisioners, replica_id, tags, last_seen_at, version, api_version, organization_id
|
|
`
|
|
|
|
type UpsertProvisionerDaemonParams struct {
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
Name string `db:"name" json:"name"`
|
|
Provisioners []ProvisionerType `db:"provisioners" json:"provisioners"`
|
|
Tags StringMap `db:"tags" json:"tags"`
|
|
LastSeenAt sql.NullTime `db:"last_seen_at" json:"last_seen_at"`
|
|
Version string `db:"version" json:"version"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
APIVersion string `db:"api_version" json:"api_version"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertProvisionerDaemon(ctx context.Context, arg UpsertProvisionerDaemonParams) (ProvisionerDaemon, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertProvisionerDaemon,
|
|
arg.CreatedAt,
|
|
arg.Name,
|
|
pq.Array(arg.Provisioners),
|
|
arg.Tags,
|
|
arg.LastSeenAt,
|
|
arg.Version,
|
|
arg.OrganizationID,
|
|
arg.APIVersion,
|
|
)
|
|
var i ProvisionerDaemon
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.Name,
|
|
pq.Array(&i.Provisioners),
|
|
&i.ReplicaID,
|
|
&i.Tags,
|
|
&i.LastSeenAt,
|
|
&i.Version,
|
|
&i.APIVersion,
|
|
&i.OrganizationID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getProvisionerLogsAfterID = `-- name: GetProvisionerLogsAfterID :many
|
|
SELECT
|
|
job_id, created_at, source, level, stage, output, id
|
|
FROM
|
|
provisioner_job_logs
|
|
WHERE
|
|
job_id = $1
|
|
AND (
|
|
id > $2
|
|
) ORDER BY id ASC
|
|
`
|
|
|
|
type GetProvisionerLogsAfterIDParams struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
CreatedAfter int64 `db:"created_after" json:"created_after"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetProvisionerLogsAfterID(ctx context.Context, arg GetProvisionerLogsAfterIDParams) ([]ProvisionerJobLog, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerLogsAfterID, arg.JobID, arg.CreatedAfter)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJobLog
|
|
for rows.Next() {
|
|
var i ProvisionerJobLog
|
|
if err := rows.Scan(
|
|
&i.JobID,
|
|
&i.CreatedAt,
|
|
&i.Source,
|
|
&i.Level,
|
|
&i.Stage,
|
|
&i.Output,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertProvisionerJobLogs = `-- name: InsertProvisionerJobLogs :many
|
|
INSERT INTO
|
|
provisioner_job_logs
|
|
SELECT
|
|
$1 :: uuid AS job_id,
|
|
unnest($2 :: timestamptz [ ]) AS created_at,
|
|
unnest($3 :: log_source [ ]) AS source,
|
|
unnest($4 :: log_level [ ]) AS LEVEL,
|
|
unnest($5 :: VARCHAR(128) [ ]) AS stage,
|
|
unnest($6 :: VARCHAR(1024) [ ]) AS output RETURNING job_id, created_at, source, level, stage, output, id
|
|
`
|
|
|
|
type InsertProvisionerJobLogsParams struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
CreatedAt []time.Time `db:"created_at" json:"created_at"`
|
|
Source []LogSource `db:"source" json:"source"`
|
|
Level []LogLevel `db:"level" json:"level"`
|
|
Stage []string `db:"stage" json:"stage"`
|
|
Output []string `db:"output" json:"output"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertProvisionerJobLogs(ctx context.Context, arg InsertProvisionerJobLogsParams) ([]ProvisionerJobLog, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertProvisionerJobLogs,
|
|
arg.JobID,
|
|
pq.Array(arg.CreatedAt),
|
|
pq.Array(arg.Source),
|
|
pq.Array(arg.Level),
|
|
pq.Array(arg.Stage),
|
|
pq.Array(arg.Output),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJobLog
|
|
for rows.Next() {
|
|
var i ProvisionerJobLog
|
|
if err := rows.Scan(
|
|
&i.JobID,
|
|
&i.CreatedAt,
|
|
&i.Source,
|
|
&i.Level,
|
|
&i.Stage,
|
|
&i.Output,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const acquireProvisionerJob = `-- name: AcquireProvisionerJob :one
|
|
UPDATE
|
|
provisioner_jobs
|
|
SET
|
|
started_at = $1,
|
|
updated_at = $1,
|
|
worker_id = $2
|
|
WHERE
|
|
id = (
|
|
SELECT
|
|
id
|
|
FROM
|
|
provisioner_jobs AS nested
|
|
WHERE
|
|
nested.started_at IS NULL
|
|
AND nested.organization_id = $3
|
|
-- Ensure the caller has the correct provisioner.
|
|
AND nested.provisioner = ANY($4 :: provisioner_type [ ])
|
|
AND CASE
|
|
-- Special case for untagged provisioners: only match untagged jobs.
|
|
WHEN nested.tags :: jsonb = '{"scope": "organization", "owner": ""}' :: jsonb
|
|
THEN nested.tags :: jsonb = $5 :: jsonb
|
|
-- Ensure the caller satisfies all job tags.
|
|
ELSE nested.tags :: jsonb <@ $5 :: jsonb
|
|
END
|
|
ORDER BY
|
|
nested.created_at
|
|
FOR UPDATE
|
|
SKIP LOCKED
|
|
LIMIT
|
|
1
|
|
) RETURNING id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status
|
|
`
|
|
|
|
type AcquireProvisionerJobParams struct {
|
|
StartedAt sql.NullTime `db:"started_at" json:"started_at"`
|
|
WorkerID uuid.NullUUID `db:"worker_id" json:"worker_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Types []ProvisionerType `db:"types" json:"types"`
|
|
Tags json.RawMessage `db:"tags" json:"tags"`
|
|
}
|
|
|
|
// Acquires the lock for a single job that isn't started, completed,
|
|
// canceled, and that matches an array of provisioner types.
|
|
//
|
|
// SKIP LOCKED is used to jump over locked rows. This prevents
|
|
// multiple provisioners from acquiring the same jobs. See:
|
|
// https://www.postgresql.org/docs/9.5/sql-select.html#SQL-FOR-UPDATE-SHARE
|
|
func (q *sqlQuerier) AcquireProvisionerJob(ctx context.Context, arg AcquireProvisionerJobParams) (ProvisionerJob, error) {
|
|
row := q.db.QueryRowContext(ctx, acquireProvisionerJob,
|
|
arg.StartedAt,
|
|
arg.WorkerID,
|
|
arg.OrganizationID,
|
|
pq.Array(arg.Types),
|
|
arg.Tags,
|
|
)
|
|
var i ProvisionerJob
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getHungProvisionerJobs = `-- name: GetHungProvisionerJobs :many
|
|
SELECT
|
|
id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
updated_at < $1
|
|
AND started_at IS NOT NULL
|
|
AND completed_at IS NULL
|
|
`
|
|
|
|
func (q *sqlQuerier) GetHungProvisionerJobs(ctx context.Context, updatedAt time.Time) ([]ProvisionerJob, error) {
|
|
rows, err := q.db.QueryContext(ctx, getHungProvisionerJobs, updatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJob
|
|
for rows.Next() {
|
|
var i ProvisionerJob
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerJobByID = `-- name: GetProvisionerJobByID :one
|
|
SELECT
|
|
id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerJobByID(ctx context.Context, id uuid.UUID) (ProvisionerJob, error) {
|
|
row := q.db.QueryRowContext(ctx, getProvisionerJobByID, id)
|
|
var i ProvisionerJob
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getProvisionerJobsByIDs = `-- name: GetProvisionerJobsByIDs :many
|
|
SELECT
|
|
id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerJobsByIDs(ctx context.Context, ids []uuid.UUID) ([]ProvisionerJob, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerJobsByIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJob
|
|
for rows.Next() {
|
|
var i ProvisionerJob
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerJobsByIDsWithQueuePosition = `-- name: GetProvisionerJobsByIDsWithQueuePosition :many
|
|
WITH unstarted_jobs AS (
|
|
SELECT
|
|
id, created_at
|
|
FROM
|
|
provisioner_jobs
|
|
WHERE
|
|
started_at IS NULL
|
|
),
|
|
queue_position AS (
|
|
SELECT
|
|
id,
|
|
ROW_NUMBER() OVER (ORDER BY created_at ASC) AS queue_position
|
|
FROM
|
|
unstarted_jobs
|
|
),
|
|
queue_size AS (
|
|
SELECT COUNT(*) as count FROM unstarted_jobs
|
|
)
|
|
SELECT
|
|
pj.id, pj.created_at, pj.updated_at, pj.started_at, pj.canceled_at, pj.completed_at, pj.error, pj.organization_id, pj.initiator_id, pj.provisioner, pj.storage_method, pj.type, pj.input, pj.worker_id, pj.file_id, pj.tags, pj.error_code, pj.trace_metadata, pj.job_status,
|
|
COALESCE(qp.queue_position, 0) AS queue_position,
|
|
COALESCE(qs.count, 0) AS queue_size
|
|
FROM
|
|
provisioner_jobs pj
|
|
LEFT JOIN
|
|
queue_position qp ON qp.id = pj.id
|
|
LEFT JOIN
|
|
queue_size qs ON TRUE
|
|
WHERE
|
|
pj.id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
type GetProvisionerJobsByIDsWithQueuePositionRow struct {
|
|
ProvisionerJob ProvisionerJob `db:"provisioner_job" json:"provisioner_job"`
|
|
QueuePosition int64 `db:"queue_position" json:"queue_position"`
|
|
QueueSize int64 `db:"queue_size" json:"queue_size"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetProvisionerJobsByIDsWithQueuePosition(ctx context.Context, ids []uuid.UUID) ([]GetProvisionerJobsByIDsWithQueuePositionRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerJobsByIDsWithQueuePosition, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetProvisionerJobsByIDsWithQueuePositionRow
|
|
for rows.Next() {
|
|
var i GetProvisionerJobsByIDsWithQueuePositionRow
|
|
if err := rows.Scan(
|
|
&i.ProvisionerJob.ID,
|
|
&i.ProvisionerJob.CreatedAt,
|
|
&i.ProvisionerJob.UpdatedAt,
|
|
&i.ProvisionerJob.StartedAt,
|
|
&i.ProvisionerJob.CanceledAt,
|
|
&i.ProvisionerJob.CompletedAt,
|
|
&i.ProvisionerJob.Error,
|
|
&i.ProvisionerJob.OrganizationID,
|
|
&i.ProvisionerJob.InitiatorID,
|
|
&i.ProvisionerJob.Provisioner,
|
|
&i.ProvisionerJob.StorageMethod,
|
|
&i.ProvisionerJob.Type,
|
|
&i.ProvisionerJob.Input,
|
|
&i.ProvisionerJob.WorkerID,
|
|
&i.ProvisionerJob.FileID,
|
|
&i.ProvisionerJob.Tags,
|
|
&i.ProvisionerJob.ErrorCode,
|
|
&i.ProvisionerJob.TraceMetadata,
|
|
&i.ProvisionerJob.JobStatus,
|
|
&i.QueuePosition,
|
|
&i.QueueSize,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getProvisionerJobsCreatedAfter = `-- name: GetProvisionerJobsCreatedAfter :many
|
|
SELECT id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status FROM provisioner_jobs WHERE created_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerJobsCreatedAfter(ctx context.Context, createdAt time.Time) ([]ProvisionerJob, error) {
|
|
rows, err := q.db.QueryContext(ctx, getProvisionerJobsCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJob
|
|
for rows.Next() {
|
|
var i ProvisionerJob
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertProvisionerJob = `-- name: InsertProvisionerJob :one
|
|
INSERT INTO
|
|
provisioner_jobs (
|
|
id,
|
|
created_at,
|
|
updated_at,
|
|
organization_id,
|
|
initiator_id,
|
|
provisioner,
|
|
storage_method,
|
|
file_id,
|
|
"type",
|
|
"input",
|
|
tags,
|
|
trace_metadata
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING id, created_at, updated_at, started_at, canceled_at, completed_at, error, organization_id, initiator_id, provisioner, storage_method, type, input, worker_id, file_id, tags, error_code, trace_metadata, job_status
|
|
`
|
|
|
|
type InsertProvisionerJobParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
InitiatorID uuid.UUID `db:"initiator_id" json:"initiator_id"`
|
|
Provisioner ProvisionerType `db:"provisioner" json:"provisioner"`
|
|
StorageMethod ProvisionerStorageMethod `db:"storage_method" json:"storage_method"`
|
|
FileID uuid.UUID `db:"file_id" json:"file_id"`
|
|
Type ProvisionerJobType `db:"type" json:"type"`
|
|
Input json.RawMessage `db:"input" json:"input"`
|
|
Tags StringMap `db:"tags" json:"tags"`
|
|
TraceMetadata pqtype.NullRawMessage `db:"trace_metadata" json:"trace_metadata"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertProvisionerJob(ctx context.Context, arg InsertProvisionerJobParams) (ProvisionerJob, error) {
|
|
row := q.db.QueryRowContext(ctx, insertProvisionerJob,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.OrganizationID,
|
|
arg.InitiatorID,
|
|
arg.Provisioner,
|
|
arg.StorageMethod,
|
|
arg.FileID,
|
|
arg.Type,
|
|
arg.Input,
|
|
arg.Tags,
|
|
arg.TraceMetadata,
|
|
)
|
|
var i ProvisionerJob
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.StartedAt,
|
|
&i.CanceledAt,
|
|
&i.CompletedAt,
|
|
&i.Error,
|
|
&i.OrganizationID,
|
|
&i.InitiatorID,
|
|
&i.Provisioner,
|
|
&i.StorageMethod,
|
|
&i.Type,
|
|
&i.Input,
|
|
&i.WorkerID,
|
|
&i.FileID,
|
|
&i.Tags,
|
|
&i.ErrorCode,
|
|
&i.TraceMetadata,
|
|
&i.JobStatus,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertProvisionerJobTimings = `-- name: InsertProvisionerJobTimings :many
|
|
INSERT INTO provisioner_job_timings (job_id, started_at, ended_at, stage, source, action, resource)
|
|
SELECT
|
|
$1::uuid AS provisioner_job_id,
|
|
unnest($2::timestamptz[]),
|
|
unnest($3::timestamptz[]),
|
|
unnest($4::provisioner_job_timing_stage[]),
|
|
unnest($5::text[]),
|
|
unnest($6::text[]),
|
|
unnest($7::text[])
|
|
RETURNING job_id, started_at, ended_at, stage, source, action, resource
|
|
`
|
|
|
|
type InsertProvisionerJobTimingsParams struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
StartedAt []time.Time `db:"started_at" json:"started_at"`
|
|
EndedAt []time.Time `db:"ended_at" json:"ended_at"`
|
|
Stage []ProvisionerJobTimingStage `db:"stage" json:"stage"`
|
|
Source []string `db:"source" json:"source"`
|
|
Action []string `db:"action" json:"action"`
|
|
Resource []string `db:"resource" json:"resource"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertProvisionerJobTimings(ctx context.Context, arg InsertProvisionerJobTimingsParams) ([]ProvisionerJobTiming, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertProvisionerJobTimings,
|
|
arg.JobID,
|
|
pq.Array(arg.StartedAt),
|
|
pq.Array(arg.EndedAt),
|
|
pq.Array(arg.Stage),
|
|
pq.Array(arg.Source),
|
|
pq.Array(arg.Action),
|
|
pq.Array(arg.Resource),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerJobTiming
|
|
for rows.Next() {
|
|
var i ProvisionerJobTiming
|
|
if err := rows.Scan(
|
|
&i.JobID,
|
|
&i.StartedAt,
|
|
&i.EndedAt,
|
|
&i.Stage,
|
|
&i.Source,
|
|
&i.Action,
|
|
&i.Resource,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateProvisionerJobByID = `-- name: UpdateProvisionerJobByID :exec
|
|
UPDATE
|
|
provisioner_jobs
|
|
SET
|
|
updated_at = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateProvisionerJobByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateProvisionerJobByID(ctx context.Context, arg UpdateProvisionerJobByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateProvisionerJobByID, arg.ID, arg.UpdatedAt)
|
|
return err
|
|
}
|
|
|
|
const updateProvisionerJobWithCancelByID = `-- name: UpdateProvisionerJobWithCancelByID :exec
|
|
UPDATE
|
|
provisioner_jobs
|
|
SET
|
|
canceled_at = $2,
|
|
completed_at = $3
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateProvisionerJobWithCancelByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CanceledAt sql.NullTime `db:"canceled_at" json:"canceled_at"`
|
|
CompletedAt sql.NullTime `db:"completed_at" json:"completed_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateProvisionerJobWithCancelByID(ctx context.Context, arg UpdateProvisionerJobWithCancelByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateProvisionerJobWithCancelByID, arg.ID, arg.CanceledAt, arg.CompletedAt)
|
|
return err
|
|
}
|
|
|
|
const updateProvisionerJobWithCompleteByID = `-- name: UpdateProvisionerJobWithCompleteByID :exec
|
|
UPDATE
|
|
provisioner_jobs
|
|
SET
|
|
updated_at = $2,
|
|
completed_at = $3,
|
|
error = $4,
|
|
error_code = $5
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateProvisionerJobWithCompleteByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
CompletedAt sql.NullTime `db:"completed_at" json:"completed_at"`
|
|
Error sql.NullString `db:"error" json:"error"`
|
|
ErrorCode sql.NullString `db:"error_code" json:"error_code"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateProvisionerJobWithCompleteByID(ctx context.Context, arg UpdateProvisionerJobWithCompleteByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateProvisionerJobWithCompleteByID,
|
|
arg.ID,
|
|
arg.UpdatedAt,
|
|
arg.CompletedAt,
|
|
arg.Error,
|
|
arg.ErrorCode,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const deleteProvisionerKey = `-- name: DeleteProvisionerKey :exec
|
|
DELETE FROM
|
|
provisioner_keys
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteProvisionerKey(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteProvisionerKey, id)
|
|
return err
|
|
}
|
|
|
|
const getProvisionerKeyByHashedSecret = `-- name: GetProvisionerKeyByHashedSecret :one
|
|
SELECT
|
|
id, created_at, organization_id, name, hashed_secret, tags
|
|
FROM
|
|
provisioner_keys
|
|
WHERE
|
|
hashed_secret = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerKeyByHashedSecret(ctx context.Context, hashedSecret []byte) (ProvisionerKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getProvisionerKeyByHashedSecret, hashedSecret)
|
|
var i ProvisionerKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.OrganizationID,
|
|
&i.Name,
|
|
&i.HashedSecret,
|
|
&i.Tags,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getProvisionerKeyByID = `-- name: GetProvisionerKeyByID :one
|
|
SELECT
|
|
id, created_at, organization_id, name, hashed_secret, tags
|
|
FROM
|
|
provisioner_keys
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetProvisionerKeyByID(ctx context.Context, id uuid.UUID) (ProvisionerKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getProvisionerKeyByID, id)
|
|
var i ProvisionerKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.OrganizationID,
|
|
&i.Name,
|
|
&i.HashedSecret,
|
|
&i.Tags,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getProvisionerKeyByName = `-- name: GetProvisionerKeyByName :one
|
|
SELECT
|
|
id, created_at, organization_id, name, hashed_secret, tags
|
|
FROM
|
|
provisioner_keys
|
|
WHERE
|
|
organization_id = $1
|
|
AND
|
|
lower(name) = lower($2)
|
|
`
|
|
|
|
type GetProvisionerKeyByNameParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetProvisionerKeyByName(ctx context.Context, arg GetProvisionerKeyByNameParams) (ProvisionerKey, error) {
|
|
row := q.db.QueryRowContext(ctx, getProvisionerKeyByName, arg.OrganizationID, arg.Name)
|
|
var i ProvisionerKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.OrganizationID,
|
|
&i.Name,
|
|
&i.HashedSecret,
|
|
&i.Tags,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertProvisionerKey = `-- name: InsertProvisionerKey :one
|
|
INSERT INTO
|
|
provisioner_keys (
|
|
id,
|
|
created_at,
|
|
organization_id,
|
|
name,
|
|
hashed_secret,
|
|
tags
|
|
)
|
|
VALUES
|
|
($1, $2, $3, lower($6), $4, $5) RETURNING id, created_at, organization_id, name, hashed_secret, tags
|
|
`
|
|
|
|
type InsertProvisionerKeyParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
HashedSecret []byte `db:"hashed_secret" json:"hashed_secret"`
|
|
Tags StringMap `db:"tags" json:"tags"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertProvisionerKey(ctx context.Context, arg InsertProvisionerKeyParams) (ProvisionerKey, error) {
|
|
row := q.db.QueryRowContext(ctx, insertProvisionerKey,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.OrganizationID,
|
|
arg.HashedSecret,
|
|
arg.Tags,
|
|
arg.Name,
|
|
)
|
|
var i ProvisionerKey
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.OrganizationID,
|
|
&i.Name,
|
|
&i.HashedSecret,
|
|
&i.Tags,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const listProvisionerKeysByOrganization = `-- name: ListProvisionerKeysByOrganization :many
|
|
SELECT
|
|
id, created_at, organization_id, name, hashed_secret, tags
|
|
FROM
|
|
provisioner_keys
|
|
WHERE
|
|
organization_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) ListProvisionerKeysByOrganization(ctx context.Context, organizationID uuid.UUID) ([]ProvisionerKey, error) {
|
|
rows, err := q.db.QueryContext(ctx, listProvisionerKeysByOrganization, organizationID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []ProvisionerKey
|
|
for rows.Next() {
|
|
var i ProvisionerKey
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.OrganizationID,
|
|
&i.Name,
|
|
&i.HashedSecret,
|
|
&i.Tags,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceProxies = `-- name: GetWorkspaceProxies :many
|
|
SELECT
|
|
id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
FROM
|
|
workspace_proxies
|
|
WHERE
|
|
deleted = false
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceProxies(ctx context.Context) ([]WorkspaceProxy, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceProxies)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceProxy
|
|
for rows.Next() {
|
|
var i WorkspaceProxy
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceProxyByHostname = `-- name: GetWorkspaceProxyByHostname :one
|
|
SELECT
|
|
id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
FROM
|
|
workspace_proxies
|
|
WHERE
|
|
-- Validate that the @hostname has been sanitized and is not empty. This
|
|
-- doesn't prevent SQL injection (already prevented by using prepared
|
|
-- queries), but it does prevent carefully crafted hostnames from matching
|
|
-- when they shouldn't.
|
|
--
|
|
-- Periods don't need to be escaped because they're not special characters
|
|
-- in SQL matches unlike regular expressions.
|
|
$1 :: text SIMILAR TO '[a-zA-Z0-9._-]+' AND
|
|
deleted = false AND
|
|
|
|
-- Validate that the hostname matches either the wildcard hostname or the
|
|
-- access URL (ignoring scheme, port and path).
|
|
(
|
|
(
|
|
$2 :: bool = true AND
|
|
url SIMILAR TO '[^:]*://' || $1 :: text || '([:/]?%)*'
|
|
) OR
|
|
(
|
|
$3 :: bool = true AND
|
|
$1 :: text LIKE replace(wildcard_hostname, '*', '%')
|
|
)
|
|
)
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetWorkspaceProxyByHostnameParams struct {
|
|
Hostname string `db:"hostname" json:"hostname"`
|
|
AllowAccessUrl bool `db:"allow_access_url" json:"allow_access_url"`
|
|
AllowWildcardHostname bool `db:"allow_wildcard_hostname" json:"allow_wildcard_hostname"`
|
|
}
|
|
|
|
// Finds a workspace proxy that has an access URL or app hostname that matches
|
|
// the provided hostname. This is to check if a hostname matches any workspace
|
|
// proxy.
|
|
//
|
|
// The hostname must be sanitized to only contain [a-zA-Z0-9.-] before calling
|
|
// this query. The scheme, port and path should be stripped.
|
|
func (q *sqlQuerier) GetWorkspaceProxyByHostname(ctx context.Context, arg GetWorkspaceProxyByHostnameParams) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceProxyByHostname, arg.Hostname, arg.AllowAccessUrl, arg.AllowWildcardHostname)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceProxyByID = `-- name: GetWorkspaceProxyByID :one
|
|
SELECT
|
|
id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
FROM
|
|
workspace_proxies
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceProxyByID(ctx context.Context, id uuid.UUID) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceProxyByID, id)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceProxyByName = `-- name: GetWorkspaceProxyByName :one
|
|
SELECT
|
|
id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
FROM
|
|
workspace_proxies
|
|
WHERE
|
|
name = $1
|
|
AND deleted = false
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceProxyByName(ctx context.Context, name string) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceProxyByName, name)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertWorkspaceProxy = `-- name: InsertWorkspaceProxy :one
|
|
INSERT INTO
|
|
workspace_proxies (
|
|
id,
|
|
url,
|
|
wildcard_hostname,
|
|
name,
|
|
display_name,
|
|
icon,
|
|
derp_enabled,
|
|
derp_only,
|
|
token_hashed_secret,
|
|
created_at,
|
|
updated_at,
|
|
deleted
|
|
)
|
|
VALUES
|
|
($1, '', '', $2, $3, $4, $5, $6, $7, $8, $9, false) RETURNING id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
`
|
|
|
|
type InsertWorkspaceProxyParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
DerpEnabled bool `db:"derp_enabled" json:"derp_enabled"`
|
|
DerpOnly bool `db:"derp_only" json:"derp_only"`
|
|
TokenHashedSecret []byte `db:"token_hashed_secret" json:"token_hashed_secret"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceProxy(ctx context.Context, arg InsertWorkspaceProxyParams) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspaceProxy,
|
|
arg.ID,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.Icon,
|
|
arg.DerpEnabled,
|
|
arg.DerpOnly,
|
|
arg.TokenHashedSecret,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const registerWorkspaceProxy = `-- name: RegisterWorkspaceProxy :one
|
|
UPDATE
|
|
workspace_proxies
|
|
SET
|
|
url = $1 :: text,
|
|
wildcard_hostname = $2 :: text,
|
|
derp_enabled = $3 :: boolean,
|
|
derp_only = $4 :: boolean,
|
|
version = $5 :: text,
|
|
updated_at = Now()
|
|
WHERE
|
|
id = $6
|
|
RETURNING id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
`
|
|
|
|
type RegisterWorkspaceProxyParams struct {
|
|
Url string `db:"url" json:"url"`
|
|
WildcardHostname string `db:"wildcard_hostname" json:"wildcard_hostname"`
|
|
DerpEnabled bool `db:"derp_enabled" json:"derp_enabled"`
|
|
DerpOnly bool `db:"derp_only" json:"derp_only"`
|
|
Version string `db:"version" json:"version"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) RegisterWorkspaceProxy(ctx context.Context, arg RegisterWorkspaceProxyParams) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, registerWorkspaceProxy,
|
|
arg.Url,
|
|
arg.WildcardHostname,
|
|
arg.DerpEnabled,
|
|
arg.DerpOnly,
|
|
arg.Version,
|
|
arg.ID,
|
|
)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateWorkspaceProxy = `-- name: UpdateWorkspaceProxy :one
|
|
UPDATE
|
|
workspace_proxies
|
|
SET
|
|
-- These values should always be provided.
|
|
name = $1,
|
|
display_name = $2,
|
|
icon = $3,
|
|
-- Only update the token if a new one is provided.
|
|
-- So this is an optional field.
|
|
token_hashed_secret = CASE
|
|
WHEN length($4 :: bytea) > 0 THEN $4 :: bytea
|
|
ELSE workspace_proxies.token_hashed_secret
|
|
END,
|
|
-- Always update this timestamp.
|
|
updated_at = Now()
|
|
WHERE
|
|
id = $5
|
|
RETURNING id, name, display_name, icon, url, wildcard_hostname, created_at, updated_at, deleted, token_hashed_secret, region_id, derp_enabled, derp_only, version
|
|
`
|
|
|
|
type UpdateWorkspaceProxyParams struct {
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
TokenHashedSecret []byte `db:"token_hashed_secret" json:"token_hashed_secret"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
// This allows editing the properties of a workspace proxy.
|
|
func (q *sqlQuerier) UpdateWorkspaceProxy(ctx context.Context, arg UpdateWorkspaceProxyParams) (WorkspaceProxy, error) {
|
|
row := q.db.QueryRowContext(ctx, updateWorkspaceProxy,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.Icon,
|
|
arg.TokenHashedSecret,
|
|
arg.ID,
|
|
)
|
|
var i WorkspaceProxy
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Url,
|
|
&i.WildcardHostname,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Deleted,
|
|
&i.TokenHashedSecret,
|
|
&i.RegionID,
|
|
&i.DerpEnabled,
|
|
&i.DerpOnly,
|
|
&i.Version,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateWorkspaceProxyDeleted = `-- name: UpdateWorkspaceProxyDeleted :exec
|
|
UPDATE
|
|
workspace_proxies
|
|
SET
|
|
updated_at = Now(),
|
|
deleted = $1
|
|
WHERE
|
|
id = $2
|
|
`
|
|
|
|
type UpdateWorkspaceProxyDeletedParams struct {
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceProxyDeleted(ctx context.Context, arg UpdateWorkspaceProxyDeletedParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceProxyDeleted, arg.Deleted, arg.ID)
|
|
return err
|
|
}
|
|
|
|
const getQuotaAllowanceForUser = `-- name: GetQuotaAllowanceForUser :one
|
|
SELECT
|
|
coalesce(SUM(groups.quota_allowance), 0)::BIGINT
|
|
FROM
|
|
(
|
|
-- Select all groups this user is a member of. This will also include
|
|
-- the "Everyone" group for organizations the user is a member of.
|
|
SELECT user_id, user_email, user_username, user_hashed_password, user_created_at, user_updated_at, user_status, user_rbac_roles, user_login_type, user_avatar_url, user_deleted, user_last_seen_at, user_quiet_hours_schedule, user_theme_preference, user_name, user_github_com_user_id, organization_id, group_name, group_id FROM group_members_expanded
|
|
WHERE
|
|
$1 = user_id AND
|
|
$2 = group_members_expanded.organization_id
|
|
) AS members
|
|
INNER JOIN groups ON
|
|
members.group_id = groups.id
|
|
`
|
|
|
|
type GetQuotaAllowanceForUserParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetQuotaAllowanceForUser(ctx context.Context, arg GetQuotaAllowanceForUserParams) (int64, error) {
|
|
row := q.db.QueryRowContext(ctx, getQuotaAllowanceForUser, arg.UserID, arg.OrganizationID)
|
|
var column_1 int64
|
|
err := row.Scan(&column_1)
|
|
return column_1, err
|
|
}
|
|
|
|
const getQuotaConsumedForUser = `-- name: GetQuotaConsumedForUser :one
|
|
WITH latest_builds AS (
|
|
SELECT
|
|
DISTINCT ON
|
|
(workspace_id) id,
|
|
workspace_id,
|
|
daily_cost
|
|
FROM
|
|
workspace_builds wb
|
|
ORDER BY
|
|
workspace_id,
|
|
created_at DESC
|
|
)
|
|
SELECT
|
|
coalesce(SUM(daily_cost), 0)::BIGINT
|
|
FROM
|
|
workspaces
|
|
JOIN latest_builds ON
|
|
latest_builds.workspace_id = workspaces.id
|
|
WHERE NOT
|
|
deleted AND
|
|
workspaces.owner_id = $1 AND
|
|
workspaces.organization_id = $2
|
|
`
|
|
|
|
type GetQuotaConsumedForUserParams struct {
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetQuotaConsumedForUser(ctx context.Context, arg GetQuotaConsumedForUserParams) (int64, error) {
|
|
row := q.db.QueryRowContext(ctx, getQuotaConsumedForUser, arg.OwnerID, arg.OrganizationID)
|
|
var column_1 int64
|
|
err := row.Scan(&column_1)
|
|
return column_1, err
|
|
}
|
|
|
|
const deleteReplicasUpdatedBefore = `-- name: DeleteReplicasUpdatedBefore :exec
|
|
DELETE FROM replicas WHERE updated_at < $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteReplicasUpdatedBefore(ctx context.Context, updatedAt time.Time) error {
|
|
_, err := q.db.ExecContext(ctx, deleteReplicasUpdatedBefore, updatedAt)
|
|
return err
|
|
}
|
|
|
|
const getReplicaByID = `-- name: GetReplicaByID :one
|
|
SELECT id, created_at, started_at, stopped_at, updated_at, hostname, region_id, relay_address, database_latency, version, error, "primary" FROM replicas WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetReplicaByID(ctx context.Context, id uuid.UUID) (Replica, error) {
|
|
row := q.db.QueryRowContext(ctx, getReplicaByID, id)
|
|
var i Replica
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.StartedAt,
|
|
&i.StoppedAt,
|
|
&i.UpdatedAt,
|
|
&i.Hostname,
|
|
&i.RegionID,
|
|
&i.RelayAddress,
|
|
&i.DatabaseLatency,
|
|
&i.Version,
|
|
&i.Error,
|
|
&i.Primary,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getReplicasUpdatedAfter = `-- name: GetReplicasUpdatedAfter :many
|
|
SELECT id, created_at, started_at, stopped_at, updated_at, hostname, region_id, relay_address, database_latency, version, error, "primary" FROM replicas WHERE updated_at > $1 AND stopped_at IS NULL
|
|
`
|
|
|
|
func (q *sqlQuerier) GetReplicasUpdatedAfter(ctx context.Context, updatedAt time.Time) ([]Replica, error) {
|
|
rows, err := q.db.QueryContext(ctx, getReplicasUpdatedAfter, updatedAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Replica
|
|
for rows.Next() {
|
|
var i Replica
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.StartedAt,
|
|
&i.StoppedAt,
|
|
&i.UpdatedAt,
|
|
&i.Hostname,
|
|
&i.RegionID,
|
|
&i.RelayAddress,
|
|
&i.DatabaseLatency,
|
|
&i.Version,
|
|
&i.Error,
|
|
&i.Primary,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertReplica = `-- name: InsertReplica :one
|
|
INSERT INTO replicas (
|
|
id,
|
|
created_at,
|
|
started_at,
|
|
updated_at,
|
|
hostname,
|
|
region_id,
|
|
relay_address,
|
|
version,
|
|
database_latency,
|
|
"primary"
|
|
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) RETURNING id, created_at, started_at, stopped_at, updated_at, hostname, region_id, relay_address, database_latency, version, error, "primary"
|
|
`
|
|
|
|
type InsertReplicaParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
StartedAt time.Time `db:"started_at" json:"started_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Hostname string `db:"hostname" json:"hostname"`
|
|
RegionID int32 `db:"region_id" json:"region_id"`
|
|
RelayAddress string `db:"relay_address" json:"relay_address"`
|
|
Version string `db:"version" json:"version"`
|
|
DatabaseLatency int32 `db:"database_latency" json:"database_latency"`
|
|
Primary bool `db:"primary" json:"primary"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertReplica(ctx context.Context, arg InsertReplicaParams) (Replica, error) {
|
|
row := q.db.QueryRowContext(ctx, insertReplica,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.StartedAt,
|
|
arg.UpdatedAt,
|
|
arg.Hostname,
|
|
arg.RegionID,
|
|
arg.RelayAddress,
|
|
arg.Version,
|
|
arg.DatabaseLatency,
|
|
arg.Primary,
|
|
)
|
|
var i Replica
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.StartedAt,
|
|
&i.StoppedAt,
|
|
&i.UpdatedAt,
|
|
&i.Hostname,
|
|
&i.RegionID,
|
|
&i.RelayAddress,
|
|
&i.DatabaseLatency,
|
|
&i.Version,
|
|
&i.Error,
|
|
&i.Primary,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateReplica = `-- name: UpdateReplica :one
|
|
UPDATE replicas SET
|
|
updated_at = $2,
|
|
started_at = $3,
|
|
stopped_at = $4,
|
|
relay_address = $5,
|
|
region_id = $6,
|
|
hostname = $7,
|
|
version = $8,
|
|
error = $9,
|
|
database_latency = $10,
|
|
"primary" = $11
|
|
WHERE id = $1 RETURNING id, created_at, started_at, stopped_at, updated_at, hostname, region_id, relay_address, database_latency, version, error, "primary"
|
|
`
|
|
|
|
type UpdateReplicaParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
StartedAt time.Time `db:"started_at" json:"started_at"`
|
|
StoppedAt sql.NullTime `db:"stopped_at" json:"stopped_at"`
|
|
RelayAddress string `db:"relay_address" json:"relay_address"`
|
|
RegionID int32 `db:"region_id" json:"region_id"`
|
|
Hostname string `db:"hostname" json:"hostname"`
|
|
Version string `db:"version" json:"version"`
|
|
Error string `db:"error" json:"error"`
|
|
DatabaseLatency int32 `db:"database_latency" json:"database_latency"`
|
|
Primary bool `db:"primary" json:"primary"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateReplica(ctx context.Context, arg UpdateReplicaParams) (Replica, error) {
|
|
row := q.db.QueryRowContext(ctx, updateReplica,
|
|
arg.ID,
|
|
arg.UpdatedAt,
|
|
arg.StartedAt,
|
|
arg.StoppedAt,
|
|
arg.RelayAddress,
|
|
arg.RegionID,
|
|
arg.Hostname,
|
|
arg.Version,
|
|
arg.Error,
|
|
arg.DatabaseLatency,
|
|
arg.Primary,
|
|
)
|
|
var i Replica
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.StartedAt,
|
|
&i.StoppedAt,
|
|
&i.UpdatedAt,
|
|
&i.Hostname,
|
|
&i.RegionID,
|
|
&i.RelayAddress,
|
|
&i.DatabaseLatency,
|
|
&i.Version,
|
|
&i.Error,
|
|
&i.Primary,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const customRoles = `-- name: CustomRoles :many
|
|
SELECT
|
|
name, display_name, site_permissions, org_permissions, user_permissions, created_at, updated_at, organization_id, id
|
|
FROM
|
|
custom_roles
|
|
WHERE
|
|
true
|
|
-- @lookup_roles will filter for exact (role_name, org_id) pairs
|
|
-- To do this manually in SQL, you can construct an array and cast it:
|
|
-- cast(ARRAY[('customrole','ece79dac-926e-44ca-9790-2ff7c5eb6e0c')] AS name_organization_pair[])
|
|
AND CASE WHEN array_length($1 :: name_organization_pair[], 1) > 0 THEN
|
|
-- Using 'coalesce' to avoid troubles with null literals being an empty string.
|
|
(name, coalesce(organization_id, '00000000-0000-0000-0000-000000000000' ::uuid)) = ANY ($1::name_organization_pair[])
|
|
ELSE true
|
|
END
|
|
-- This allows fetching all roles, or just site wide roles
|
|
AND CASE WHEN $2 :: boolean THEN
|
|
organization_id IS null
|
|
ELSE true
|
|
END
|
|
-- Allows fetching all roles to a particular organization
|
|
AND CASE WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
organization_id = $3
|
|
ELSE true
|
|
END
|
|
`
|
|
|
|
type CustomRolesParams struct {
|
|
LookupRoles []NameOrganizationPair `db:"lookup_roles" json:"lookup_roles"`
|
|
ExcludeOrgRoles bool `db:"exclude_org_roles" json:"exclude_org_roles"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) CustomRoles(ctx context.Context, arg CustomRolesParams) ([]CustomRole, error) {
|
|
rows, err := q.db.QueryContext(ctx, customRoles, pq.Array(arg.LookupRoles), arg.ExcludeOrgRoles, arg.OrganizationID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []CustomRole
|
|
for rows.Next() {
|
|
var i CustomRole
|
|
if err := rows.Scan(
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.SitePermissions,
|
|
&i.OrgPermissions,
|
|
&i.UserPermissions,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const deleteCustomRole = `-- name: DeleteCustomRole :exec
|
|
DELETE FROM
|
|
custom_roles
|
|
WHERE
|
|
name = lower($1)
|
|
AND organization_id = $2
|
|
`
|
|
|
|
type DeleteCustomRoleParams struct {
|
|
Name string `db:"name" json:"name"`
|
|
OrganizationID uuid.NullUUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteCustomRole(ctx context.Context, arg DeleteCustomRoleParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteCustomRole, arg.Name, arg.OrganizationID)
|
|
return err
|
|
}
|
|
|
|
const insertCustomRole = `-- name: InsertCustomRole :one
|
|
INSERT INTO
|
|
custom_roles (
|
|
name,
|
|
display_name,
|
|
organization_id,
|
|
site_permissions,
|
|
org_permissions,
|
|
user_permissions,
|
|
created_at,
|
|
updated_at
|
|
)
|
|
VALUES (
|
|
-- Always force lowercase names
|
|
lower($1),
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
now(),
|
|
now()
|
|
)
|
|
RETURNING name, display_name, site_permissions, org_permissions, user_permissions, created_at, updated_at, organization_id, id
|
|
`
|
|
|
|
type InsertCustomRoleParams struct {
|
|
Name string `db:"name" json:"name"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
OrganizationID uuid.NullUUID `db:"organization_id" json:"organization_id"`
|
|
SitePermissions CustomRolePermissions `db:"site_permissions" json:"site_permissions"`
|
|
OrgPermissions CustomRolePermissions `db:"org_permissions" json:"org_permissions"`
|
|
UserPermissions CustomRolePermissions `db:"user_permissions" json:"user_permissions"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertCustomRole(ctx context.Context, arg InsertCustomRoleParams) (CustomRole, error) {
|
|
row := q.db.QueryRowContext(ctx, insertCustomRole,
|
|
arg.Name,
|
|
arg.DisplayName,
|
|
arg.OrganizationID,
|
|
arg.SitePermissions,
|
|
arg.OrgPermissions,
|
|
arg.UserPermissions,
|
|
)
|
|
var i CustomRole
|
|
err := row.Scan(
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.SitePermissions,
|
|
&i.OrgPermissions,
|
|
&i.UserPermissions,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.ID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateCustomRole = `-- name: UpdateCustomRole :one
|
|
UPDATE
|
|
custom_roles
|
|
SET
|
|
display_name = $1,
|
|
site_permissions = $2,
|
|
org_permissions = $3,
|
|
user_permissions = $4,
|
|
updated_at = now()
|
|
WHERE
|
|
name = lower($5)
|
|
AND organization_id = $6
|
|
RETURNING name, display_name, site_permissions, org_permissions, user_permissions, created_at, updated_at, organization_id, id
|
|
`
|
|
|
|
type UpdateCustomRoleParams struct {
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
SitePermissions CustomRolePermissions `db:"site_permissions" json:"site_permissions"`
|
|
OrgPermissions CustomRolePermissions `db:"org_permissions" json:"org_permissions"`
|
|
UserPermissions CustomRolePermissions `db:"user_permissions" json:"user_permissions"`
|
|
Name string `db:"name" json:"name"`
|
|
OrganizationID uuid.NullUUID `db:"organization_id" json:"organization_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateCustomRole(ctx context.Context, arg UpdateCustomRoleParams) (CustomRole, error) {
|
|
row := q.db.QueryRowContext(ctx, updateCustomRole,
|
|
arg.DisplayName,
|
|
arg.SitePermissions,
|
|
arg.OrgPermissions,
|
|
arg.UserPermissions,
|
|
arg.Name,
|
|
arg.OrganizationID,
|
|
)
|
|
var i CustomRole
|
|
err := row.Scan(
|
|
&i.Name,
|
|
&i.DisplayName,
|
|
&i.SitePermissions,
|
|
&i.OrgPermissions,
|
|
&i.UserPermissions,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.ID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getAnnouncementBanners = `-- name: GetAnnouncementBanners :one
|
|
SELECT value FROM site_configs WHERE key = 'announcement_banners'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAnnouncementBanners(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getAnnouncementBanners)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getAppSecurityKey = `-- name: GetAppSecurityKey :one
|
|
SELECT value FROM site_configs WHERE key = 'app_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAppSecurityKey(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getAppSecurityKey)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getApplicationName = `-- name: GetApplicationName :one
|
|
SELECT value FROM site_configs WHERE key = 'application_name'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetApplicationName(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getApplicationName)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getCoordinatorResumeTokenSigningKey = `-- name: GetCoordinatorResumeTokenSigningKey :one
|
|
SELECT value FROM site_configs WHERE key = 'coordinator_resume_token_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetCoordinatorResumeTokenSigningKey(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getCoordinatorResumeTokenSigningKey)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getDERPMeshKey = `-- name: GetDERPMeshKey :one
|
|
SELECT value FROM site_configs WHERE key = 'derp_mesh_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetDERPMeshKey(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getDERPMeshKey)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getDefaultProxyConfig = `-- name: GetDefaultProxyConfig :one
|
|
SELECT
|
|
COALESCE((SELECT value FROM site_configs WHERE key = 'default_proxy_display_name'), 'Default') :: text AS display_name,
|
|
COALESCE((SELECT value FROM site_configs WHERE key = 'default_proxy_icon_url'), '/emojis/1f3e1.png') :: text AS icon_url
|
|
`
|
|
|
|
type GetDefaultProxyConfigRow struct {
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
IconUrl string `db:"icon_url" json:"icon_url"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetDefaultProxyConfig(ctx context.Context) (GetDefaultProxyConfigRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getDefaultProxyConfig)
|
|
var i GetDefaultProxyConfigRow
|
|
err := row.Scan(&i.DisplayName, &i.IconUrl)
|
|
return i, err
|
|
}
|
|
|
|
const getDeploymentID = `-- name: GetDeploymentID :one
|
|
SELECT value FROM site_configs WHERE key = 'deployment_id'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetDeploymentID(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getDeploymentID)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getHealthSettings = `-- name: GetHealthSettings :one
|
|
SELECT
|
|
COALESCE((SELECT value FROM site_configs WHERE key = 'health_settings'), '{}') :: text AS health_settings
|
|
`
|
|
|
|
func (q *sqlQuerier) GetHealthSettings(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getHealthSettings)
|
|
var health_settings string
|
|
err := row.Scan(&health_settings)
|
|
return health_settings, err
|
|
}
|
|
|
|
const getLastUpdateCheck = `-- name: GetLastUpdateCheck :one
|
|
SELECT value FROM site_configs WHERE key = 'last_update_check'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLastUpdateCheck(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getLastUpdateCheck)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getLogoURL = `-- name: GetLogoURL :one
|
|
SELECT value FROM site_configs WHERE key = 'logo_url'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLogoURL(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getLogoURL)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const getNotificationsSettings = `-- name: GetNotificationsSettings :one
|
|
SELECT
|
|
COALESCE((SELECT value FROM site_configs WHERE key = 'notifications_settings'), '{}') :: text AS notifications_settings
|
|
`
|
|
|
|
func (q *sqlQuerier) GetNotificationsSettings(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getNotificationsSettings)
|
|
var notifications_settings string
|
|
err := row.Scan(¬ifications_settings)
|
|
return notifications_settings, err
|
|
}
|
|
|
|
const getOAuthSigningKey = `-- name: GetOAuthSigningKey :one
|
|
SELECT value FROM site_configs WHERE key = 'oauth_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetOAuthSigningKey(ctx context.Context) (string, error) {
|
|
row := q.db.QueryRowContext(ctx, getOAuthSigningKey)
|
|
var value string
|
|
err := row.Scan(&value)
|
|
return value, err
|
|
}
|
|
|
|
const insertDERPMeshKey = `-- name: InsertDERPMeshKey :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('derp_mesh_key', $1)
|
|
`
|
|
|
|
func (q *sqlQuerier) InsertDERPMeshKey(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, insertDERPMeshKey, value)
|
|
return err
|
|
}
|
|
|
|
const insertDeploymentID = `-- name: InsertDeploymentID :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('deployment_id', $1)
|
|
`
|
|
|
|
func (q *sqlQuerier) InsertDeploymentID(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, insertDeploymentID, value)
|
|
return err
|
|
}
|
|
|
|
const upsertAnnouncementBanners = `-- name: UpsertAnnouncementBanners :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('announcement_banners', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'announcement_banners'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertAnnouncementBanners(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertAnnouncementBanners, value)
|
|
return err
|
|
}
|
|
|
|
const upsertAppSecurityKey = `-- name: UpsertAppSecurityKey :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('app_signing_key', $1)
|
|
ON CONFLICT (key) DO UPDATE set value = $1 WHERE site_configs.key = 'app_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertAppSecurityKey(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertAppSecurityKey, value)
|
|
return err
|
|
}
|
|
|
|
const upsertApplicationName = `-- name: UpsertApplicationName :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('application_name', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'application_name'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertApplicationName(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertApplicationName, value)
|
|
return err
|
|
}
|
|
|
|
const upsertCoordinatorResumeTokenSigningKey = `-- name: UpsertCoordinatorResumeTokenSigningKey :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('coordinator_resume_token_signing_key', $1)
|
|
ON CONFLICT (key) DO UPDATE set value = $1 WHERE site_configs.key = 'coordinator_resume_token_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertCoordinatorResumeTokenSigningKey(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertCoordinatorResumeTokenSigningKey, value)
|
|
return err
|
|
}
|
|
|
|
const upsertDefaultProxy = `-- name: UpsertDefaultProxy :exec
|
|
INSERT INTO site_configs (key, value)
|
|
VALUES
|
|
('default_proxy_display_name', $1 :: text),
|
|
('default_proxy_icon_url', $2 :: text)
|
|
ON CONFLICT
|
|
(key)
|
|
DO UPDATE SET value = EXCLUDED.value WHERE site_configs.key = EXCLUDED.key
|
|
`
|
|
|
|
type UpsertDefaultProxyParams struct {
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
IconUrl string `db:"icon_url" json:"icon_url"`
|
|
}
|
|
|
|
// The default proxy is implied and not actually stored in the database.
|
|
// So we need to store it's configuration here for display purposes.
|
|
// The functional values are immutable and controlled implicitly.
|
|
func (q *sqlQuerier) UpsertDefaultProxy(ctx context.Context, arg UpsertDefaultProxyParams) error {
|
|
_, err := q.db.ExecContext(ctx, upsertDefaultProxy, arg.DisplayName, arg.IconUrl)
|
|
return err
|
|
}
|
|
|
|
const upsertHealthSettings = `-- name: UpsertHealthSettings :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('health_settings', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'health_settings'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertHealthSettings(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertHealthSettings, value)
|
|
return err
|
|
}
|
|
|
|
const upsertLastUpdateCheck = `-- name: UpsertLastUpdateCheck :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('last_update_check', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'last_update_check'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertLastUpdateCheck(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertLastUpdateCheck, value)
|
|
return err
|
|
}
|
|
|
|
const upsertLogoURL = `-- name: UpsertLogoURL :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('logo_url', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'logo_url'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertLogoURL(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertLogoURL, value)
|
|
return err
|
|
}
|
|
|
|
const upsertNotificationsSettings = `-- name: UpsertNotificationsSettings :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('notifications_settings', $1)
|
|
ON CONFLICT (key) DO UPDATE SET value = $1 WHERE site_configs.key = 'notifications_settings'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertNotificationsSettings(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertNotificationsSettings, value)
|
|
return err
|
|
}
|
|
|
|
const upsertOAuthSigningKey = `-- name: UpsertOAuthSigningKey :exec
|
|
INSERT INTO site_configs (key, value) VALUES ('oauth_signing_key', $1)
|
|
ON CONFLICT (key) DO UPDATE set value = $1 WHERE site_configs.key = 'oauth_signing_key'
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertOAuthSigningKey(ctx context.Context, value string) error {
|
|
_, err := q.db.ExecContext(ctx, upsertOAuthSigningKey, value)
|
|
return err
|
|
}
|
|
|
|
const cleanTailnetCoordinators = `-- name: CleanTailnetCoordinators :exec
|
|
DELETE
|
|
FROM tailnet_coordinators
|
|
WHERE heartbeat_at < now() - INTERVAL '24 HOURS'
|
|
`
|
|
|
|
func (q *sqlQuerier) CleanTailnetCoordinators(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, cleanTailnetCoordinators)
|
|
return err
|
|
}
|
|
|
|
const cleanTailnetLostPeers = `-- name: CleanTailnetLostPeers :exec
|
|
DELETE
|
|
FROM tailnet_peers
|
|
WHERE updated_at < now() - INTERVAL '24 HOURS' AND status = 'lost'::tailnet_status
|
|
`
|
|
|
|
func (q *sqlQuerier) CleanTailnetLostPeers(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, cleanTailnetLostPeers)
|
|
return err
|
|
}
|
|
|
|
const cleanTailnetTunnels = `-- name: CleanTailnetTunnels :exec
|
|
DELETE FROM tailnet_tunnels
|
|
WHERE updated_at < now() - INTERVAL '24 HOURS' AND
|
|
NOT EXISTS (
|
|
SELECT 1 FROM tailnet_peers
|
|
WHERE id = tailnet_tunnels.src_id AND coordinator_id = tailnet_tunnels.coordinator_id
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) CleanTailnetTunnels(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, cleanTailnetTunnels)
|
|
return err
|
|
}
|
|
|
|
const deleteAllTailnetClientSubscriptions = `-- name: DeleteAllTailnetClientSubscriptions :exec
|
|
DELETE
|
|
FROM tailnet_client_subscriptions
|
|
WHERE client_id = $1 and coordinator_id = $2
|
|
`
|
|
|
|
type DeleteAllTailnetClientSubscriptionsParams struct {
|
|
ClientID uuid.UUID `db:"client_id" json:"client_id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteAllTailnetClientSubscriptions(ctx context.Context, arg DeleteAllTailnetClientSubscriptionsParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteAllTailnetClientSubscriptions, arg.ClientID, arg.CoordinatorID)
|
|
return err
|
|
}
|
|
|
|
const deleteAllTailnetTunnels = `-- name: DeleteAllTailnetTunnels :exec
|
|
DELETE
|
|
FROM tailnet_tunnels
|
|
WHERE coordinator_id = $1 and src_id = $2
|
|
`
|
|
|
|
type DeleteAllTailnetTunnelsParams struct {
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
SrcID uuid.UUID `db:"src_id" json:"src_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteAllTailnetTunnels(ctx context.Context, arg DeleteAllTailnetTunnelsParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteAllTailnetTunnels, arg.CoordinatorID, arg.SrcID)
|
|
return err
|
|
}
|
|
|
|
const deleteCoordinator = `-- name: DeleteCoordinator :exec
|
|
DELETE
|
|
FROM tailnet_coordinators
|
|
WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteCoordinator(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteCoordinator, id)
|
|
return err
|
|
}
|
|
|
|
const deleteTailnetAgent = `-- name: DeleteTailnetAgent :one
|
|
DELETE
|
|
FROM tailnet_agents
|
|
WHERE id = $1 and coordinator_id = $2
|
|
RETURNING id, coordinator_id
|
|
`
|
|
|
|
type DeleteTailnetAgentParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
type DeleteTailnetAgentRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteTailnetAgent(ctx context.Context, arg DeleteTailnetAgentParams) (DeleteTailnetAgentRow, error) {
|
|
row := q.db.QueryRowContext(ctx, deleteTailnetAgent, arg.ID, arg.CoordinatorID)
|
|
var i DeleteTailnetAgentRow
|
|
err := row.Scan(&i.ID, &i.CoordinatorID)
|
|
return i, err
|
|
}
|
|
|
|
const deleteTailnetClient = `-- name: DeleteTailnetClient :one
|
|
DELETE
|
|
FROM tailnet_clients
|
|
WHERE id = $1 and coordinator_id = $2
|
|
RETURNING id, coordinator_id
|
|
`
|
|
|
|
type DeleteTailnetClientParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
type DeleteTailnetClientRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteTailnetClient(ctx context.Context, arg DeleteTailnetClientParams) (DeleteTailnetClientRow, error) {
|
|
row := q.db.QueryRowContext(ctx, deleteTailnetClient, arg.ID, arg.CoordinatorID)
|
|
var i DeleteTailnetClientRow
|
|
err := row.Scan(&i.ID, &i.CoordinatorID)
|
|
return i, err
|
|
}
|
|
|
|
const deleteTailnetClientSubscription = `-- name: DeleteTailnetClientSubscription :exec
|
|
DELETE
|
|
FROM tailnet_client_subscriptions
|
|
WHERE client_id = $1 and agent_id = $2 and coordinator_id = $3
|
|
`
|
|
|
|
type DeleteTailnetClientSubscriptionParams struct {
|
|
ClientID uuid.UUID `db:"client_id" json:"client_id"`
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteTailnetClientSubscription(ctx context.Context, arg DeleteTailnetClientSubscriptionParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteTailnetClientSubscription, arg.ClientID, arg.AgentID, arg.CoordinatorID)
|
|
return err
|
|
}
|
|
|
|
const deleteTailnetPeer = `-- name: DeleteTailnetPeer :one
|
|
DELETE
|
|
FROM tailnet_peers
|
|
WHERE id = $1 and coordinator_id = $2
|
|
RETURNING id, coordinator_id
|
|
`
|
|
|
|
type DeleteTailnetPeerParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
type DeleteTailnetPeerRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteTailnetPeer(ctx context.Context, arg DeleteTailnetPeerParams) (DeleteTailnetPeerRow, error) {
|
|
row := q.db.QueryRowContext(ctx, deleteTailnetPeer, arg.ID, arg.CoordinatorID)
|
|
var i DeleteTailnetPeerRow
|
|
err := row.Scan(&i.ID, &i.CoordinatorID)
|
|
return i, err
|
|
}
|
|
|
|
const deleteTailnetTunnel = `-- name: DeleteTailnetTunnel :one
|
|
DELETE
|
|
FROM tailnet_tunnels
|
|
WHERE coordinator_id = $1 and src_id = $2 and dst_id = $3
|
|
RETURNING coordinator_id, src_id, dst_id
|
|
`
|
|
|
|
type DeleteTailnetTunnelParams struct {
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
SrcID uuid.UUID `db:"src_id" json:"src_id"`
|
|
DstID uuid.UUID `db:"dst_id" json:"dst_id"`
|
|
}
|
|
|
|
type DeleteTailnetTunnelRow struct {
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
SrcID uuid.UUID `db:"src_id" json:"src_id"`
|
|
DstID uuid.UUID `db:"dst_id" json:"dst_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteTailnetTunnel(ctx context.Context, arg DeleteTailnetTunnelParams) (DeleteTailnetTunnelRow, error) {
|
|
row := q.db.QueryRowContext(ctx, deleteTailnetTunnel, arg.CoordinatorID, arg.SrcID, arg.DstID)
|
|
var i DeleteTailnetTunnelRow
|
|
err := row.Scan(&i.CoordinatorID, &i.SrcID, &i.DstID)
|
|
return i, err
|
|
}
|
|
|
|
const getAllTailnetAgents = `-- name: GetAllTailnetAgents :many
|
|
SELECT id, coordinator_id, updated_at, node
|
|
FROM tailnet_agents
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAllTailnetAgents(ctx context.Context) ([]TailnetAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAllTailnetAgents)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetAgent
|
|
for rows.Next() {
|
|
var i TailnetAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getAllTailnetCoordinators = `-- name: GetAllTailnetCoordinators :many
|
|
|
|
SELECT id, heartbeat_at FROM tailnet_coordinators
|
|
`
|
|
|
|
// For PG Coordinator HTMLDebug
|
|
func (q *sqlQuerier) GetAllTailnetCoordinators(ctx context.Context) ([]TailnetCoordinator, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAllTailnetCoordinators)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetCoordinator
|
|
for rows.Next() {
|
|
var i TailnetCoordinator
|
|
if err := rows.Scan(&i.ID, &i.HeartbeatAt); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getAllTailnetPeers = `-- name: GetAllTailnetPeers :many
|
|
SELECT id, coordinator_id, updated_at, node, status FROM tailnet_peers
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAllTailnetPeers(ctx context.Context) ([]TailnetPeer, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAllTailnetPeers)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetPeer
|
|
for rows.Next() {
|
|
var i TailnetPeer
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
&i.Status,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getAllTailnetTunnels = `-- name: GetAllTailnetTunnels :many
|
|
SELECT coordinator_id, src_id, dst_id, updated_at FROM tailnet_tunnels
|
|
`
|
|
|
|
func (q *sqlQuerier) GetAllTailnetTunnels(ctx context.Context) ([]TailnetTunnel, error) {
|
|
rows, err := q.db.QueryContext(ctx, getAllTailnetTunnels)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetTunnel
|
|
for rows.Next() {
|
|
var i TailnetTunnel
|
|
if err := rows.Scan(
|
|
&i.CoordinatorID,
|
|
&i.SrcID,
|
|
&i.DstID,
|
|
&i.UpdatedAt,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTailnetAgents = `-- name: GetTailnetAgents :many
|
|
SELECT id, coordinator_id, updated_at, node
|
|
FROM tailnet_agents
|
|
WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTailnetAgents(ctx context.Context, id uuid.UUID) ([]TailnetAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTailnetAgents, id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetAgent
|
|
for rows.Next() {
|
|
var i TailnetAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTailnetClientsForAgent = `-- name: GetTailnetClientsForAgent :many
|
|
SELECT id, coordinator_id, updated_at, node
|
|
FROM tailnet_clients
|
|
WHERE id IN (
|
|
SELECT tailnet_client_subscriptions.client_id
|
|
FROM tailnet_client_subscriptions
|
|
WHERE tailnet_client_subscriptions.agent_id = $1
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTailnetClientsForAgent(ctx context.Context, agentID uuid.UUID) ([]TailnetClient, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTailnetClientsForAgent, agentID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetClient
|
|
for rows.Next() {
|
|
var i TailnetClient
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTailnetPeers = `-- name: GetTailnetPeers :many
|
|
SELECT id, coordinator_id, updated_at, node, status FROM tailnet_peers WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTailnetPeers(ctx context.Context, id uuid.UUID) ([]TailnetPeer, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTailnetPeers, id)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TailnetPeer
|
|
for rows.Next() {
|
|
var i TailnetPeer
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
&i.Status,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTailnetTunnelPeerBindings = `-- name: GetTailnetTunnelPeerBindings :many
|
|
SELECT tailnet_tunnels.dst_id as peer_id, tailnet_peers.coordinator_id, tailnet_peers.updated_at, tailnet_peers.node, tailnet_peers.status
|
|
FROM tailnet_tunnels
|
|
INNER JOIN tailnet_peers ON tailnet_tunnels.dst_id = tailnet_peers.id
|
|
WHERE tailnet_tunnels.src_id = $1
|
|
UNION
|
|
SELECT tailnet_tunnels.src_id as peer_id, tailnet_peers.coordinator_id, tailnet_peers.updated_at, tailnet_peers.node, tailnet_peers.status
|
|
FROM tailnet_tunnels
|
|
INNER JOIN tailnet_peers ON tailnet_tunnels.src_id = tailnet_peers.id
|
|
WHERE tailnet_tunnels.dst_id = $1
|
|
`
|
|
|
|
type GetTailnetTunnelPeerBindingsRow struct {
|
|
PeerID uuid.UUID `db:"peer_id" json:"peer_id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Node []byte `db:"node" json:"node"`
|
|
Status TailnetStatus `db:"status" json:"status"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTailnetTunnelPeerBindings(ctx context.Context, srcID uuid.UUID) ([]GetTailnetTunnelPeerBindingsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTailnetTunnelPeerBindings, srcID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTailnetTunnelPeerBindingsRow
|
|
for rows.Next() {
|
|
var i GetTailnetTunnelPeerBindingsRow
|
|
if err := rows.Scan(
|
|
&i.PeerID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
&i.Status,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTailnetTunnelPeerIDs = `-- name: GetTailnetTunnelPeerIDs :many
|
|
SELECT dst_id as peer_id, coordinator_id, updated_at
|
|
FROM tailnet_tunnels
|
|
WHERE tailnet_tunnels.src_id = $1
|
|
UNION
|
|
SELECT src_id as peer_id, coordinator_id, updated_at
|
|
FROM tailnet_tunnels
|
|
WHERE tailnet_tunnels.dst_id = $1
|
|
`
|
|
|
|
type GetTailnetTunnelPeerIDsRow struct {
|
|
PeerID uuid.UUID `db:"peer_id" json:"peer_id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTailnetTunnelPeerIDs(ctx context.Context, srcID uuid.UUID) ([]GetTailnetTunnelPeerIDsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTailnetTunnelPeerIDs, srcID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTailnetTunnelPeerIDsRow
|
|
for rows.Next() {
|
|
var i GetTailnetTunnelPeerIDsRow
|
|
if err := rows.Scan(&i.PeerID, &i.CoordinatorID, &i.UpdatedAt); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateTailnetPeerStatusByCoordinator = `-- name: UpdateTailnetPeerStatusByCoordinator :exec
|
|
UPDATE
|
|
tailnet_peers
|
|
SET
|
|
status = $2
|
|
WHERE
|
|
coordinator_id = $1
|
|
`
|
|
|
|
type UpdateTailnetPeerStatusByCoordinatorParams struct {
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
Status TailnetStatus `db:"status" json:"status"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTailnetPeerStatusByCoordinator(ctx context.Context, arg UpdateTailnetPeerStatusByCoordinatorParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTailnetPeerStatusByCoordinator, arg.CoordinatorID, arg.Status)
|
|
return err
|
|
}
|
|
|
|
const upsertTailnetAgent = `-- name: UpsertTailnetAgent :one
|
|
INSERT INTO
|
|
tailnet_agents (
|
|
id,
|
|
coordinator_id,
|
|
node,
|
|
updated_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, now() at time zone 'utc')
|
|
ON CONFLICT (id, coordinator_id)
|
|
DO UPDATE SET
|
|
id = $1,
|
|
coordinator_id = $2,
|
|
node = $3,
|
|
updated_at = now() at time zone 'utc'
|
|
RETURNING id, coordinator_id, updated_at, node
|
|
`
|
|
|
|
type UpsertTailnetAgentParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
Node json.RawMessage `db:"node" json:"node"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertTailnetAgent(ctx context.Context, arg UpsertTailnetAgentParams) (TailnetAgent, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertTailnetAgent, arg.ID, arg.CoordinatorID, arg.Node)
|
|
var i TailnetAgent
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const upsertTailnetClient = `-- name: UpsertTailnetClient :one
|
|
INSERT INTO
|
|
tailnet_clients (
|
|
id,
|
|
coordinator_id,
|
|
node,
|
|
updated_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, now() at time zone 'utc')
|
|
ON CONFLICT (id, coordinator_id)
|
|
DO UPDATE SET
|
|
id = $1,
|
|
coordinator_id = $2,
|
|
node = $3,
|
|
updated_at = now() at time zone 'utc'
|
|
RETURNING id, coordinator_id, updated_at, node
|
|
`
|
|
|
|
type UpsertTailnetClientParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
Node json.RawMessage `db:"node" json:"node"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertTailnetClient(ctx context.Context, arg UpsertTailnetClientParams) (TailnetClient, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertTailnetClient, arg.ID, arg.CoordinatorID, arg.Node)
|
|
var i TailnetClient
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const upsertTailnetClientSubscription = `-- name: UpsertTailnetClientSubscription :exec
|
|
INSERT INTO
|
|
tailnet_client_subscriptions (
|
|
client_id,
|
|
coordinator_id,
|
|
agent_id,
|
|
updated_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, now() at time zone 'utc')
|
|
ON CONFLICT (client_id, coordinator_id, agent_id)
|
|
DO UPDATE SET
|
|
client_id = $1,
|
|
coordinator_id = $2,
|
|
agent_id = $3,
|
|
updated_at = now() at time zone 'utc'
|
|
`
|
|
|
|
type UpsertTailnetClientSubscriptionParams struct {
|
|
ClientID uuid.UUID `db:"client_id" json:"client_id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertTailnetClientSubscription(ctx context.Context, arg UpsertTailnetClientSubscriptionParams) error {
|
|
_, err := q.db.ExecContext(ctx, upsertTailnetClientSubscription, arg.ClientID, arg.CoordinatorID, arg.AgentID)
|
|
return err
|
|
}
|
|
|
|
const upsertTailnetCoordinator = `-- name: UpsertTailnetCoordinator :one
|
|
INSERT INTO
|
|
tailnet_coordinators (
|
|
id,
|
|
heartbeat_at
|
|
)
|
|
VALUES
|
|
($1, now() at time zone 'utc')
|
|
ON CONFLICT (id)
|
|
DO UPDATE SET
|
|
id = $1,
|
|
heartbeat_at = now() at time zone 'utc'
|
|
RETURNING id, heartbeat_at
|
|
`
|
|
|
|
func (q *sqlQuerier) UpsertTailnetCoordinator(ctx context.Context, id uuid.UUID) (TailnetCoordinator, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertTailnetCoordinator, id)
|
|
var i TailnetCoordinator
|
|
err := row.Scan(&i.ID, &i.HeartbeatAt)
|
|
return i, err
|
|
}
|
|
|
|
const upsertTailnetPeer = `-- name: UpsertTailnetPeer :one
|
|
INSERT INTO
|
|
tailnet_peers (
|
|
id,
|
|
coordinator_id,
|
|
node,
|
|
status,
|
|
updated_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, now() at time zone 'utc')
|
|
ON CONFLICT (id, coordinator_id)
|
|
DO UPDATE SET
|
|
id = $1,
|
|
coordinator_id = $2,
|
|
node = $3,
|
|
status = $4,
|
|
updated_at = now() at time zone 'utc'
|
|
RETURNING id, coordinator_id, updated_at, node, status
|
|
`
|
|
|
|
type UpsertTailnetPeerParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
Node []byte `db:"node" json:"node"`
|
|
Status TailnetStatus `db:"status" json:"status"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertTailnetPeer(ctx context.Context, arg UpsertTailnetPeerParams) (TailnetPeer, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertTailnetPeer,
|
|
arg.ID,
|
|
arg.CoordinatorID,
|
|
arg.Node,
|
|
arg.Status,
|
|
)
|
|
var i TailnetPeer
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CoordinatorID,
|
|
&i.UpdatedAt,
|
|
&i.Node,
|
|
&i.Status,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const upsertTailnetTunnel = `-- name: UpsertTailnetTunnel :one
|
|
INSERT INTO
|
|
tailnet_tunnels (
|
|
coordinator_id,
|
|
src_id,
|
|
dst_id,
|
|
updated_at
|
|
)
|
|
VALUES
|
|
($1, $2, $3, now() at time zone 'utc')
|
|
ON CONFLICT (coordinator_id, src_id, dst_id)
|
|
DO UPDATE SET
|
|
coordinator_id = $1,
|
|
src_id = $2,
|
|
dst_id = $3,
|
|
updated_at = now() at time zone 'utc'
|
|
RETURNING coordinator_id, src_id, dst_id, updated_at
|
|
`
|
|
|
|
type UpsertTailnetTunnelParams struct {
|
|
CoordinatorID uuid.UUID `db:"coordinator_id" json:"coordinator_id"`
|
|
SrcID uuid.UUID `db:"src_id" json:"src_id"`
|
|
DstID uuid.UUID `db:"dst_id" json:"dst_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertTailnetTunnel(ctx context.Context, arg UpsertTailnetTunnelParams) (TailnetTunnel, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertTailnetTunnel, arg.CoordinatorID, arg.SrcID, arg.DstID)
|
|
var i TailnetTunnel
|
|
err := row.Scan(
|
|
&i.CoordinatorID,
|
|
&i.SrcID,
|
|
&i.DstID,
|
|
&i.UpdatedAt,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateAverageBuildTime = `-- name: GetTemplateAverageBuildTime :one
|
|
WITH build_times AS (
|
|
SELECT
|
|
EXTRACT(EPOCH FROM (pj.completed_at - pj.started_at))::FLOAT AS exec_time_sec,
|
|
workspace_builds.transition
|
|
FROM
|
|
workspace_builds
|
|
JOIN template_versions ON
|
|
workspace_builds.template_version_id = template_versions.id
|
|
JOIN provisioner_jobs pj ON
|
|
workspace_builds.job_id = pj.id
|
|
WHERE
|
|
template_versions.template_id = $1 AND
|
|
(pj.completed_at IS NOT NULL) AND (pj.started_at IS NOT NULL) AND
|
|
(pj.started_at > $2) AND
|
|
(pj.canceled_at IS NULL) AND
|
|
((pj.error IS NULL) OR (pj.error = ''))
|
|
ORDER BY
|
|
workspace_builds.created_at DESC
|
|
)
|
|
SELECT
|
|
-- Postgres offers no clear way to DRY this short of a function or other
|
|
-- complexities.
|
|
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'start')), -1)::FLOAT AS start_50,
|
|
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'stop')), -1)::FLOAT AS stop_50,
|
|
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'delete')), -1)::FLOAT AS delete_50,
|
|
coalesce((PERCENTILE_DISC(0.95) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'start')), -1)::FLOAT AS start_95,
|
|
coalesce((PERCENTILE_DISC(0.95) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'stop')), -1)::FLOAT AS stop_95,
|
|
coalesce((PERCENTILE_DISC(0.95) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'delete')), -1)::FLOAT AS delete_95
|
|
FROM build_times
|
|
`
|
|
|
|
type GetTemplateAverageBuildTimeParams struct {
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
StartTime sql.NullTime `db:"start_time" json:"start_time"`
|
|
}
|
|
|
|
type GetTemplateAverageBuildTimeRow struct {
|
|
Start50 float64 `db:"start_50" json:"start_50"`
|
|
Stop50 float64 `db:"stop_50" json:"stop_50"`
|
|
Delete50 float64 `db:"delete_50" json:"delete_50"`
|
|
Start95 float64 `db:"start_95" json:"start_95"`
|
|
Stop95 float64 `db:"stop_95" json:"stop_95"`
|
|
Delete95 float64 `db:"delete_95" json:"delete_95"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateAverageBuildTime(ctx context.Context, arg GetTemplateAverageBuildTimeParams) (GetTemplateAverageBuildTimeRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateAverageBuildTime, arg.TemplateID, arg.StartTime)
|
|
var i GetTemplateAverageBuildTimeRow
|
|
err := row.Scan(
|
|
&i.Start50,
|
|
&i.Stop50,
|
|
&i.Delete50,
|
|
&i.Start95,
|
|
&i.Stop95,
|
|
&i.Delete95,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateByID = `-- name: GetTemplateByID :one
|
|
SELECT
|
|
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, created_by_avatar_url, created_by_username, organization_name, organization_display_name, organization_icon
|
|
FROM
|
|
template_with_names
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateByID(ctx context.Context, id uuid.UUID) (Template, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateByID, id)
|
|
var i Template
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.Provisioner,
|
|
&i.ActiveVersionID,
|
|
&i.Description,
|
|
&i.DefaultTTL,
|
|
&i.CreatedBy,
|
|
&i.Icon,
|
|
&i.UserACL,
|
|
&i.GroupACL,
|
|
&i.DisplayName,
|
|
&i.AllowUserCancelWorkspaceJobs,
|
|
&i.AllowUserAutostart,
|
|
&i.AllowUserAutostop,
|
|
&i.FailureTTL,
|
|
&i.TimeTilDormant,
|
|
&i.TimeTilDormantAutoDelete,
|
|
&i.AutostopRequirementDaysOfWeek,
|
|
&i.AutostopRequirementWeeks,
|
|
&i.AutostartBlockDaysOfWeek,
|
|
&i.RequireActiveVersion,
|
|
&i.Deprecated,
|
|
&i.ActivityBump,
|
|
&i.MaxPortSharingLevel,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateByOrganizationAndName = `-- name: GetTemplateByOrganizationAndName :one
|
|
SELECT
|
|
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, created_by_avatar_url, created_by_username, organization_name, organization_display_name, organization_icon
|
|
FROM
|
|
template_with_names AS templates
|
|
WHERE
|
|
organization_id = $1
|
|
AND deleted = $2
|
|
AND LOWER("name") = LOWER($3)
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetTemplateByOrganizationAndNameParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateByOrganizationAndName(ctx context.Context, arg GetTemplateByOrganizationAndNameParams) (Template, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateByOrganizationAndName, arg.OrganizationID, arg.Deleted, arg.Name)
|
|
var i Template
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.Provisioner,
|
|
&i.ActiveVersionID,
|
|
&i.Description,
|
|
&i.DefaultTTL,
|
|
&i.CreatedBy,
|
|
&i.Icon,
|
|
&i.UserACL,
|
|
&i.GroupACL,
|
|
&i.DisplayName,
|
|
&i.AllowUserCancelWorkspaceJobs,
|
|
&i.AllowUserAutostart,
|
|
&i.AllowUserAutostop,
|
|
&i.FailureTTL,
|
|
&i.TimeTilDormant,
|
|
&i.TimeTilDormantAutoDelete,
|
|
&i.AutostopRequirementDaysOfWeek,
|
|
&i.AutostopRequirementWeeks,
|
|
&i.AutostartBlockDaysOfWeek,
|
|
&i.RequireActiveVersion,
|
|
&i.Deprecated,
|
|
&i.ActivityBump,
|
|
&i.MaxPortSharingLevel,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplates = `-- name: GetTemplates :many
|
|
SELECT id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, created_by_avatar_url, created_by_username, organization_name, organization_display_name, organization_icon FROM template_with_names AS templates
|
|
ORDER BY (name, id) ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplates(ctx context.Context) ([]Template, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplates)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Template
|
|
for rows.Next() {
|
|
var i Template
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.Provisioner,
|
|
&i.ActiveVersionID,
|
|
&i.Description,
|
|
&i.DefaultTTL,
|
|
&i.CreatedBy,
|
|
&i.Icon,
|
|
&i.UserACL,
|
|
&i.GroupACL,
|
|
&i.DisplayName,
|
|
&i.AllowUserCancelWorkspaceJobs,
|
|
&i.AllowUserAutostart,
|
|
&i.AllowUserAutostop,
|
|
&i.FailureTTL,
|
|
&i.TimeTilDormant,
|
|
&i.TimeTilDormantAutoDelete,
|
|
&i.AutostopRequirementDaysOfWeek,
|
|
&i.AutostopRequirementWeeks,
|
|
&i.AutostartBlockDaysOfWeek,
|
|
&i.RequireActiveVersion,
|
|
&i.Deprecated,
|
|
&i.ActivityBump,
|
|
&i.MaxPortSharingLevel,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplatesWithFilter = `-- name: GetTemplatesWithFilter :many
|
|
SELECT
|
|
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level, created_by_avatar_url, created_by_username, organization_name, organization_display_name, organization_icon
|
|
FROM
|
|
template_with_names AS templates
|
|
WHERE
|
|
-- Optionally include deleted templates
|
|
templates.deleted = $1
|
|
-- Filter by organization_id
|
|
AND CASE
|
|
WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
organization_id = $2
|
|
ELSE true
|
|
END
|
|
-- Filter by exact name
|
|
AND CASE
|
|
WHEN $3 :: text != '' THEN
|
|
LOWER("name") = LOWER($3)
|
|
ELSE true
|
|
END
|
|
-- Filter by name, matching on substring
|
|
AND CASE
|
|
WHEN $4 :: text != '' THEN
|
|
lower(name) ILIKE '%' || lower($4) || '%'
|
|
ELSE true
|
|
END
|
|
-- Filter by ids
|
|
AND CASE
|
|
WHEN array_length($5 :: uuid[], 1) > 0 THEN
|
|
id = ANY($5)
|
|
ELSE true
|
|
END
|
|
-- Filter by deprecated
|
|
AND CASE
|
|
WHEN $6 :: boolean IS NOT NULL THEN
|
|
CASE
|
|
WHEN $6 :: boolean THEN
|
|
deprecated != ''
|
|
ELSE
|
|
deprecated = ''
|
|
END
|
|
ELSE true
|
|
END
|
|
-- Authorize Filter clause will be injected below in GetAuthorizedTemplates
|
|
-- @authorize_filter
|
|
ORDER BY (name, id) ASC
|
|
`
|
|
|
|
type GetTemplatesWithFilterParams struct {
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
ExactName string `db:"exact_name" json:"exact_name"`
|
|
FuzzyName string `db:"fuzzy_name" json:"fuzzy_name"`
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
Deprecated sql.NullBool `db:"deprecated" json:"deprecated"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplatesWithFilter(ctx context.Context, arg GetTemplatesWithFilterParams) ([]Template, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplatesWithFilter,
|
|
arg.Deleted,
|
|
arg.OrganizationID,
|
|
arg.ExactName,
|
|
arg.FuzzyName,
|
|
pq.Array(arg.IDs),
|
|
arg.Deprecated,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Template
|
|
for rows.Next() {
|
|
var i Template
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OrganizationID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.Provisioner,
|
|
&i.ActiveVersionID,
|
|
&i.Description,
|
|
&i.DefaultTTL,
|
|
&i.CreatedBy,
|
|
&i.Icon,
|
|
&i.UserACL,
|
|
&i.GroupACL,
|
|
&i.DisplayName,
|
|
&i.AllowUserCancelWorkspaceJobs,
|
|
&i.AllowUserAutostart,
|
|
&i.AllowUserAutostop,
|
|
&i.FailureTTL,
|
|
&i.TimeTilDormant,
|
|
&i.TimeTilDormantAutoDelete,
|
|
&i.AutostopRequirementDaysOfWeek,
|
|
&i.AutostopRequirementWeeks,
|
|
&i.AutostartBlockDaysOfWeek,
|
|
&i.RequireActiveVersion,
|
|
&i.Deprecated,
|
|
&i.ActivityBump,
|
|
&i.MaxPortSharingLevel,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
&i.OrganizationName,
|
|
&i.OrganizationDisplayName,
|
|
&i.OrganizationIcon,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertTemplate = `-- name: InsertTemplate :exec
|
|
INSERT INTO
|
|
templates (
|
|
id,
|
|
created_at,
|
|
updated_at,
|
|
organization_id,
|
|
"name",
|
|
provisioner,
|
|
active_version_id,
|
|
description,
|
|
created_by,
|
|
icon,
|
|
user_acl,
|
|
group_acl,
|
|
display_name,
|
|
allow_user_cancel_workspace_jobs,
|
|
max_port_sharing_level
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
|
|
`
|
|
|
|
type InsertTemplateParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Name string `db:"name" json:"name"`
|
|
Provisioner ProvisionerType `db:"provisioner" json:"provisioner"`
|
|
ActiveVersionID uuid.UUID `db:"active_version_id" json:"active_version_id"`
|
|
Description string `db:"description" json:"description"`
|
|
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
UserACL TemplateACL `db:"user_acl" json:"user_acl"`
|
|
GroupACL TemplateACL `db:"group_acl" json:"group_acl"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
AllowUserCancelWorkspaceJobs bool `db:"allow_user_cancel_workspace_jobs" json:"allow_user_cancel_workspace_jobs"`
|
|
MaxPortSharingLevel AppSharingLevel `db:"max_port_sharing_level" json:"max_port_sharing_level"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTemplate(ctx context.Context, arg InsertTemplateParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertTemplate,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.OrganizationID,
|
|
arg.Name,
|
|
arg.Provisioner,
|
|
arg.ActiveVersionID,
|
|
arg.Description,
|
|
arg.CreatedBy,
|
|
arg.Icon,
|
|
arg.UserACL,
|
|
arg.GroupACL,
|
|
arg.DisplayName,
|
|
arg.AllowUserCancelWorkspaceJobs,
|
|
arg.MaxPortSharingLevel,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateACLByID = `-- name: UpdateTemplateACLByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
group_acl = $1,
|
|
user_acl = $2
|
|
WHERE
|
|
id = $3
|
|
`
|
|
|
|
type UpdateTemplateACLByIDParams struct {
|
|
GroupACL TemplateACL `db:"group_acl" json:"group_acl"`
|
|
UserACL TemplateACL `db:"user_acl" json:"user_acl"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateACLByID(ctx context.Context, arg UpdateTemplateACLByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateACLByID, arg.GroupACL, arg.UserACL, arg.ID)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateAccessControlByID = `-- name: UpdateTemplateAccessControlByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
require_active_version = $2,
|
|
deprecated = $3
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateAccessControlByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
RequireActiveVersion bool `db:"require_active_version" json:"require_active_version"`
|
|
Deprecated string `db:"deprecated" json:"deprecated"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateAccessControlByID(ctx context.Context, arg UpdateTemplateAccessControlByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateAccessControlByID, arg.ID, arg.RequireActiveVersion, arg.Deprecated)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateActiveVersionByID = `-- name: UpdateTemplateActiveVersionByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
active_version_id = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateActiveVersionByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
ActiveVersionID uuid.UUID `db:"active_version_id" json:"active_version_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateActiveVersionByID(ctx context.Context, arg UpdateTemplateActiveVersionByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateActiveVersionByID, arg.ID, arg.ActiveVersionID, arg.UpdatedAt)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateDeletedByID = `-- name: UpdateTemplateDeletedByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
deleted = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateDeletedByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateDeletedByID(ctx context.Context, arg UpdateTemplateDeletedByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateDeletedByID, arg.ID, arg.Deleted, arg.UpdatedAt)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateMetaByID = `-- name: UpdateTemplateMetaByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
updated_at = $2,
|
|
description = $3,
|
|
name = $4,
|
|
icon = $5,
|
|
display_name = $6,
|
|
allow_user_cancel_workspace_jobs = $7,
|
|
group_acl = $8,
|
|
max_port_sharing_level = $9
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateMetaByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Description string `db:"description" json:"description"`
|
|
Name string `db:"name" json:"name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
AllowUserCancelWorkspaceJobs bool `db:"allow_user_cancel_workspace_jobs" json:"allow_user_cancel_workspace_jobs"`
|
|
GroupACL TemplateACL `db:"group_acl" json:"group_acl"`
|
|
MaxPortSharingLevel AppSharingLevel `db:"max_port_sharing_level" json:"max_port_sharing_level"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateMetaByID(ctx context.Context, arg UpdateTemplateMetaByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateMetaByID,
|
|
arg.ID,
|
|
arg.UpdatedAt,
|
|
arg.Description,
|
|
arg.Name,
|
|
arg.Icon,
|
|
arg.DisplayName,
|
|
arg.AllowUserCancelWorkspaceJobs,
|
|
arg.GroupACL,
|
|
arg.MaxPortSharingLevel,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateScheduleByID = `-- name: UpdateTemplateScheduleByID :exec
|
|
UPDATE
|
|
templates
|
|
SET
|
|
updated_at = $2,
|
|
allow_user_autostart = $3,
|
|
allow_user_autostop = $4,
|
|
default_ttl = $5,
|
|
activity_bump = $6,
|
|
autostop_requirement_days_of_week = $7,
|
|
autostop_requirement_weeks = $8,
|
|
autostart_block_days_of_week = $9,
|
|
failure_ttl = $10,
|
|
time_til_dormant = $11,
|
|
time_til_dormant_autodelete = $12
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateScheduleByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
AllowUserAutostart bool `db:"allow_user_autostart" json:"allow_user_autostart"`
|
|
AllowUserAutostop bool `db:"allow_user_autostop" json:"allow_user_autostop"`
|
|
DefaultTTL int64 `db:"default_ttl" json:"default_ttl"`
|
|
ActivityBump int64 `db:"activity_bump" json:"activity_bump"`
|
|
AutostopRequirementDaysOfWeek int16 `db:"autostop_requirement_days_of_week" json:"autostop_requirement_days_of_week"`
|
|
AutostopRequirementWeeks int64 `db:"autostop_requirement_weeks" json:"autostop_requirement_weeks"`
|
|
AutostartBlockDaysOfWeek int16 `db:"autostart_block_days_of_week" json:"autostart_block_days_of_week"`
|
|
FailureTTL int64 `db:"failure_ttl" json:"failure_ttl"`
|
|
TimeTilDormant int64 `db:"time_til_dormant" json:"time_til_dormant"`
|
|
TimeTilDormantAutoDelete int64 `db:"time_til_dormant_autodelete" json:"time_til_dormant_autodelete"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateScheduleByID(ctx context.Context, arg UpdateTemplateScheduleByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateScheduleByID,
|
|
arg.ID,
|
|
arg.UpdatedAt,
|
|
arg.AllowUserAutostart,
|
|
arg.AllowUserAutostop,
|
|
arg.DefaultTTL,
|
|
arg.ActivityBump,
|
|
arg.AutostopRequirementDaysOfWeek,
|
|
arg.AutostopRequirementWeeks,
|
|
arg.AutostartBlockDaysOfWeek,
|
|
arg.FailureTTL,
|
|
arg.TimeTilDormant,
|
|
arg.TimeTilDormantAutoDelete,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const getTemplateVersionParameters = `-- name: GetTemplateVersionParameters :many
|
|
SELECT template_version_id, name, description, type, mutable, default_value, icon, options, validation_regex, validation_min, validation_max, validation_error, validation_monotonic, required, display_name, display_order, ephemeral FROM template_version_parameters WHERE template_version_id = $1 ORDER BY display_order ASC, LOWER(name) ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionParameters(ctx context.Context, templateVersionID uuid.UUID) ([]TemplateVersionParameter, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionParameters, templateVersionID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersionParameter
|
|
for rows.Next() {
|
|
var i TemplateVersionParameter
|
|
if err := rows.Scan(
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.Type,
|
|
&i.Mutable,
|
|
&i.DefaultValue,
|
|
&i.Icon,
|
|
&i.Options,
|
|
&i.ValidationRegex,
|
|
&i.ValidationMin,
|
|
&i.ValidationMax,
|
|
&i.ValidationError,
|
|
&i.ValidationMonotonic,
|
|
&i.Required,
|
|
&i.DisplayName,
|
|
&i.DisplayOrder,
|
|
&i.Ephemeral,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertTemplateVersionParameter = `-- name: InsertTemplateVersionParameter :one
|
|
INSERT INTO
|
|
template_version_parameters (
|
|
template_version_id,
|
|
name,
|
|
description,
|
|
type,
|
|
mutable,
|
|
default_value,
|
|
icon,
|
|
options,
|
|
validation_regex,
|
|
validation_min,
|
|
validation_max,
|
|
validation_error,
|
|
validation_monotonic,
|
|
required,
|
|
display_name,
|
|
display_order,
|
|
ephemeral
|
|
)
|
|
VALUES
|
|
(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7,
|
|
$8,
|
|
$9,
|
|
$10,
|
|
$11,
|
|
$12,
|
|
$13,
|
|
$14,
|
|
$15,
|
|
$16,
|
|
$17
|
|
) RETURNING template_version_id, name, description, type, mutable, default_value, icon, options, validation_regex, validation_min, validation_max, validation_error, validation_monotonic, required, display_name, display_order, ephemeral
|
|
`
|
|
|
|
type InsertTemplateVersionParameterParams struct {
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
Name string `db:"name" json:"name"`
|
|
Description string `db:"description" json:"description"`
|
|
Type string `db:"type" json:"type"`
|
|
Mutable bool `db:"mutable" json:"mutable"`
|
|
DefaultValue string `db:"default_value" json:"default_value"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
Options json.RawMessage `db:"options" json:"options"`
|
|
ValidationRegex string `db:"validation_regex" json:"validation_regex"`
|
|
ValidationMin sql.NullInt32 `db:"validation_min" json:"validation_min"`
|
|
ValidationMax sql.NullInt32 `db:"validation_max" json:"validation_max"`
|
|
ValidationError string `db:"validation_error" json:"validation_error"`
|
|
ValidationMonotonic string `db:"validation_monotonic" json:"validation_monotonic"`
|
|
Required bool `db:"required" json:"required"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
DisplayOrder int32 `db:"display_order" json:"display_order"`
|
|
Ephemeral bool `db:"ephemeral" json:"ephemeral"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTemplateVersionParameter(ctx context.Context, arg InsertTemplateVersionParameterParams) (TemplateVersionParameter, error) {
|
|
row := q.db.QueryRowContext(ctx, insertTemplateVersionParameter,
|
|
arg.TemplateVersionID,
|
|
arg.Name,
|
|
arg.Description,
|
|
arg.Type,
|
|
arg.Mutable,
|
|
arg.DefaultValue,
|
|
arg.Icon,
|
|
arg.Options,
|
|
arg.ValidationRegex,
|
|
arg.ValidationMin,
|
|
arg.ValidationMax,
|
|
arg.ValidationError,
|
|
arg.ValidationMonotonic,
|
|
arg.Required,
|
|
arg.DisplayName,
|
|
arg.DisplayOrder,
|
|
arg.Ephemeral,
|
|
)
|
|
var i TemplateVersionParameter
|
|
err := row.Scan(
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.Type,
|
|
&i.Mutable,
|
|
&i.DefaultValue,
|
|
&i.Icon,
|
|
&i.Options,
|
|
&i.ValidationRegex,
|
|
&i.ValidationMin,
|
|
&i.ValidationMax,
|
|
&i.ValidationError,
|
|
&i.ValidationMonotonic,
|
|
&i.Required,
|
|
&i.DisplayName,
|
|
&i.DisplayOrder,
|
|
&i.Ephemeral,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const archiveUnusedTemplateVersions = `-- name: ArchiveUnusedTemplateVersions :many
|
|
UPDATE
|
|
template_versions
|
|
SET
|
|
archived = true,
|
|
updated_at = $1
|
|
FROM
|
|
-- Archive all versions that are returned from this query.
|
|
(
|
|
SELECT
|
|
scoped_template_versions.id
|
|
FROM
|
|
-- Scope an archive to a single template and ignore already archived template versions
|
|
(
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived
|
|
FROM
|
|
template_versions
|
|
WHERE
|
|
template_versions.template_id = $2 :: uuid
|
|
AND
|
|
archived = false
|
|
AND
|
|
-- This allows archiving a specific template version.
|
|
CASE
|
|
WHEN $3::uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
template_versions.id = $3 :: uuid
|
|
ELSE
|
|
true
|
|
END
|
|
) AS scoped_template_versions
|
|
LEFT JOIN
|
|
provisioner_jobs ON scoped_template_versions.job_id = provisioner_jobs.id
|
|
LEFT JOIN
|
|
templates ON scoped_template_versions.template_id = templates.id
|
|
WHERE
|
|
-- Actively used template versions (meaning the latest build is using
|
|
-- the version) are never archived. A "restart" command on the workspace,
|
|
-- even if failed, would use the version. So it cannot be archived until
|
|
-- the build is outdated.
|
|
NOT EXISTS (
|
|
-- Return all "used" versions, where "used" is defined as being
|
|
-- used by a latest workspace build.
|
|
SELECT template_version_id FROM (
|
|
SELECT
|
|
DISTINCT ON (workspace_id) template_version_id, transition
|
|
FROM
|
|
workspace_builds
|
|
ORDER BY workspace_id, build_number DESC
|
|
) AS used_versions
|
|
WHERE
|
|
used_versions.transition != 'delete'
|
|
AND
|
|
scoped_template_versions.id = used_versions.template_version_id
|
|
)
|
|
-- Also never archive the active template version
|
|
AND active_version_id != scoped_template_versions.id
|
|
AND CASE
|
|
-- Optionally, only archive versions that match a given
|
|
-- job status like 'failed'.
|
|
WHEN $4 :: provisioner_job_status IS NOT NULL THEN
|
|
provisioner_jobs.job_status = $4 :: provisioner_job_status
|
|
ELSE
|
|
true
|
|
END
|
|
-- Pending or running jobs should not be archived, as they are "in progress"
|
|
AND provisioner_jobs.job_status != 'running'
|
|
AND provisioner_jobs.job_status != 'pending'
|
|
) AS archived_versions
|
|
WHERE
|
|
template_versions.id IN (archived_versions.id)
|
|
RETURNING template_versions.id
|
|
`
|
|
|
|
type ArchiveUnusedTemplateVersionsParams struct {
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
JobStatus NullProvisionerJobStatus `db:"job_status" json:"job_status"`
|
|
}
|
|
|
|
// Archiving templates is a soft delete action, so is reversible.
|
|
// Archiving prevents the version from being used and discovered
|
|
// by listing.
|
|
// Only unused template versions will be archived, which are any versions not
|
|
// referenced by the latest build of a workspace.
|
|
func (q *sqlQuerier) ArchiveUnusedTemplateVersions(ctx context.Context, arg ArchiveUnusedTemplateVersionsParams) ([]uuid.UUID, error) {
|
|
rows, err := q.db.QueryContext(ctx, archiveUnusedTemplateVersions,
|
|
arg.UpdatedAt,
|
|
arg.TemplateID,
|
|
arg.TemplateVersionID,
|
|
arg.JobStatus,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []uuid.UUID
|
|
for rows.Next() {
|
|
var id uuid.UUID
|
|
if err := rows.Scan(&id); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, id)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getPreviousTemplateVersion = `-- name: GetPreviousTemplateVersion :one
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, created_by_avatar_url, created_by_username
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
created_at < (
|
|
SELECT created_at
|
|
FROM template_version_with_user AS tv
|
|
WHERE tv.organization_id = $1 AND tv.name = $2 AND tv.template_id = $3
|
|
)
|
|
AND organization_id = $1
|
|
AND template_id = $3
|
|
ORDER BY created_at DESC
|
|
LIMIT 1
|
|
`
|
|
|
|
type GetPreviousTemplateVersionParams struct {
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
Name string `db:"name" json:"name"`
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetPreviousTemplateVersion(ctx context.Context, arg GetPreviousTemplateVersionParams) (TemplateVersion, error) {
|
|
row := q.db.QueryRowContext(ctx, getPreviousTemplateVersion, arg.OrganizationID, arg.Name, arg.TemplateID)
|
|
var i TemplateVersion
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateVersionByID = `-- name: GetTemplateVersionByID :one
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, created_by_avatar_url, created_by_username
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionByID(ctx context.Context, id uuid.UUID) (TemplateVersion, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateVersionByID, id)
|
|
var i TemplateVersion
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateVersionByJobID = `-- name: GetTemplateVersionByJobID :one
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, created_by_avatar_url, created_by_username
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
job_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionByJobID(ctx context.Context, jobID uuid.UUID) (TemplateVersion, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateVersionByJobID, jobID)
|
|
var i TemplateVersion
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateVersionByTemplateIDAndName = `-- name: GetTemplateVersionByTemplateIDAndName :one
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, created_by_avatar_url, created_by_username
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
template_id = $1
|
|
AND "name" = $2
|
|
`
|
|
|
|
type GetTemplateVersionByTemplateIDAndNameParams struct {
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionByTemplateIDAndName(ctx context.Context, arg GetTemplateVersionByTemplateIDAndNameParams) (TemplateVersion, error) {
|
|
row := q.db.QueryRowContext(ctx, getTemplateVersionByTemplateIDAndName, arg.TemplateID, arg.Name)
|
|
var i TemplateVersion
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateVersionsByIDs = `-- name: GetTemplateVersionsByIDs :many
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, created_by_avatar_url, created_by_username
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionsByIDs(ctx context.Context, ids []uuid.UUID) ([]TemplateVersion, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionsByIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersion
|
|
for rows.Next() {
|
|
var i TemplateVersion
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateVersionsByTemplateID = `-- name: GetTemplateVersionsByTemplateID :many
|
|
SELECT
|
|
id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, created_by_avatar_url, created_by_username
|
|
FROM
|
|
template_version_with_user AS template_versions
|
|
WHERE
|
|
template_id = $1 :: uuid
|
|
AND CASE
|
|
-- If no filter is provided, default to returning ALL template versions.
|
|
-- The called should always provide a filter if they want to omit
|
|
-- archived versions.
|
|
WHEN $2 :: boolean IS NULL THEN true
|
|
ELSE template_versions.archived = $2 :: boolean
|
|
END
|
|
AND CASE
|
|
-- This allows using the last element on a page as effectively a cursor.
|
|
-- This is an important option for scripts that need to paginate without
|
|
-- duplicating or missing data.
|
|
WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN (
|
|
-- The pagination cursor is the last ID of the previous page.
|
|
-- The query is ordered by the created_at field, so select all
|
|
-- rows after the cursor.
|
|
(created_at, id) > (
|
|
SELECT
|
|
created_at, id
|
|
FROM
|
|
template_versions
|
|
WHERE
|
|
id = $3
|
|
)
|
|
)
|
|
ELSE true
|
|
END
|
|
ORDER BY
|
|
-- Deterministic and consistent ordering of all rows, even if they share
|
|
-- a timestamp. This is to ensure consistent pagination.
|
|
(created_at, id) ASC OFFSET $4
|
|
LIMIT
|
|
-- A null limit means "no limit", so 0 means return all
|
|
NULLIF($5 :: int, 0)
|
|
`
|
|
|
|
type GetTemplateVersionsByTemplateIDParams struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
Archived sql.NullBool `db:"archived" json:"archived"`
|
|
AfterID uuid.UUID `db:"after_id" json:"after_id"`
|
|
OffsetOpt int32 `db:"offset_opt" json:"offset_opt"`
|
|
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionsByTemplateID(ctx context.Context, arg GetTemplateVersionsByTemplateIDParams) ([]TemplateVersion, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionsByTemplateID,
|
|
arg.TemplateID,
|
|
arg.Archived,
|
|
arg.AfterID,
|
|
arg.OffsetOpt,
|
|
arg.LimitOpt,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersion
|
|
for rows.Next() {
|
|
var i TemplateVersion
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getTemplateVersionsCreatedAfter = `-- name: GetTemplateVersionsCreatedAfter :many
|
|
SELECT id, template_id, organization_id, created_at, updated_at, name, readme, job_id, created_by, external_auth_providers, message, archived, created_by_avatar_url, created_by_username FROM template_version_with_user AS template_versions WHERE created_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionsCreatedAfter(ctx context.Context, createdAt time.Time) ([]TemplateVersion, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionsCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersion
|
|
for rows.Next() {
|
|
var i TemplateVersion
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.TemplateID,
|
|
&i.OrganizationID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.Readme,
|
|
&i.JobID,
|
|
&i.CreatedBy,
|
|
&i.ExternalAuthProviders,
|
|
&i.Message,
|
|
&i.Archived,
|
|
&i.CreatedByAvatarURL,
|
|
&i.CreatedByUsername,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertTemplateVersion = `-- name: InsertTemplateVersion :exec
|
|
INSERT INTO
|
|
template_versions (
|
|
id,
|
|
template_id,
|
|
organization_id,
|
|
created_at,
|
|
updated_at,
|
|
"name",
|
|
message,
|
|
readme,
|
|
job_id,
|
|
created_by
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10)
|
|
`
|
|
|
|
type InsertTemplateVersionParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
Message string `db:"message" json:"message"`
|
|
Readme string `db:"readme" json:"readme"`
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
CreatedBy uuid.UUID `db:"created_by" json:"created_by"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTemplateVersion(ctx context.Context, arg InsertTemplateVersionParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertTemplateVersion,
|
|
arg.ID,
|
|
arg.TemplateID,
|
|
arg.OrganizationID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.Message,
|
|
arg.Readme,
|
|
arg.JobID,
|
|
arg.CreatedBy,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const unarchiveTemplateVersion = `-- name: UnarchiveTemplateVersion :exec
|
|
UPDATE
|
|
template_versions
|
|
SET
|
|
archived = false,
|
|
updated_at = $1
|
|
WHERE
|
|
id = $2
|
|
`
|
|
|
|
type UnarchiveTemplateVersionParams struct {
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
}
|
|
|
|
// This will always work regardless of the current state of the template version.
|
|
func (q *sqlQuerier) UnarchiveTemplateVersion(ctx context.Context, arg UnarchiveTemplateVersionParams) error {
|
|
_, err := q.db.ExecContext(ctx, unarchiveTemplateVersion, arg.UpdatedAt, arg.TemplateVersionID)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateVersionByID = `-- name: UpdateTemplateVersionByID :exec
|
|
UPDATE
|
|
template_versions
|
|
SET
|
|
template_id = $2,
|
|
updated_at = $3,
|
|
name = $4,
|
|
message = $5
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateTemplateVersionByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
TemplateID uuid.NullUUID `db:"template_id" json:"template_id"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
Message string `db:"message" json:"message"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateVersionByID(ctx context.Context, arg UpdateTemplateVersionByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateVersionByID,
|
|
arg.ID,
|
|
arg.TemplateID,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.Message,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateVersionDescriptionByJobID = `-- name: UpdateTemplateVersionDescriptionByJobID :exec
|
|
UPDATE
|
|
template_versions
|
|
SET
|
|
readme = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
job_id = $1
|
|
`
|
|
|
|
type UpdateTemplateVersionDescriptionByJobIDParams struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
Readme string `db:"readme" json:"readme"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateVersionDescriptionByJobID(ctx context.Context, arg UpdateTemplateVersionDescriptionByJobIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateVersionDescriptionByJobID, arg.JobID, arg.Readme, arg.UpdatedAt)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateVersionExternalAuthProvidersByJobID = `-- name: UpdateTemplateVersionExternalAuthProvidersByJobID :exec
|
|
UPDATE
|
|
template_versions
|
|
SET
|
|
external_auth_providers = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
job_id = $1
|
|
`
|
|
|
|
type UpdateTemplateVersionExternalAuthProvidersByJobIDParams struct {
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
ExternalAuthProviders json.RawMessage `db:"external_auth_providers" json:"external_auth_providers"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateVersionExternalAuthProvidersByJobID(ctx context.Context, arg UpdateTemplateVersionExternalAuthProvidersByJobIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateVersionExternalAuthProvidersByJobID, arg.JobID, arg.ExternalAuthProviders, arg.UpdatedAt)
|
|
return err
|
|
}
|
|
|
|
const getTemplateVersionVariables = `-- name: GetTemplateVersionVariables :many
|
|
SELECT template_version_id, name, description, type, value, default_value, required, sensitive FROM template_version_variables WHERE template_version_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionVariables(ctx context.Context, templateVersionID uuid.UUID) ([]TemplateVersionVariable, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionVariables, templateVersionID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersionVariable
|
|
for rows.Next() {
|
|
var i TemplateVersionVariable
|
|
if err := rows.Scan(
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.Type,
|
|
&i.Value,
|
|
&i.DefaultValue,
|
|
&i.Required,
|
|
&i.Sensitive,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertTemplateVersionVariable = `-- name: InsertTemplateVersionVariable :one
|
|
INSERT INTO
|
|
template_version_variables (
|
|
template_version_id,
|
|
name,
|
|
description,
|
|
type,
|
|
value,
|
|
default_value,
|
|
required,
|
|
sensitive
|
|
)
|
|
VALUES
|
|
(
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5,
|
|
$6,
|
|
$7,
|
|
$8
|
|
) RETURNING template_version_id, name, description, type, value, default_value, required, sensitive
|
|
`
|
|
|
|
type InsertTemplateVersionVariableParams struct {
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
Name string `db:"name" json:"name"`
|
|
Description string `db:"description" json:"description"`
|
|
Type string `db:"type" json:"type"`
|
|
Value string `db:"value" json:"value"`
|
|
DefaultValue string `db:"default_value" json:"default_value"`
|
|
Required bool `db:"required" json:"required"`
|
|
Sensitive bool `db:"sensitive" json:"sensitive"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTemplateVersionVariable(ctx context.Context, arg InsertTemplateVersionVariableParams) (TemplateVersionVariable, error) {
|
|
row := q.db.QueryRowContext(ctx, insertTemplateVersionVariable,
|
|
arg.TemplateVersionID,
|
|
arg.Name,
|
|
arg.Description,
|
|
arg.Type,
|
|
arg.Value,
|
|
arg.DefaultValue,
|
|
arg.Required,
|
|
arg.Sensitive,
|
|
)
|
|
var i TemplateVersionVariable
|
|
err := row.Scan(
|
|
&i.TemplateVersionID,
|
|
&i.Name,
|
|
&i.Description,
|
|
&i.Type,
|
|
&i.Value,
|
|
&i.DefaultValue,
|
|
&i.Required,
|
|
&i.Sensitive,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateVersionWorkspaceTags = `-- name: GetTemplateVersionWorkspaceTags :many
|
|
SELECT template_version_id, key, value FROM template_version_workspace_tags WHERE template_version_id = $1 ORDER BY LOWER(key) ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetTemplateVersionWorkspaceTags(ctx context.Context, templateVersionID uuid.UUID) ([]TemplateVersionWorkspaceTag, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateVersionWorkspaceTags, templateVersionID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []TemplateVersionWorkspaceTag
|
|
for rows.Next() {
|
|
var i TemplateVersionWorkspaceTag
|
|
if err := rows.Scan(&i.TemplateVersionID, &i.Key, &i.Value); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertTemplateVersionWorkspaceTag = `-- name: InsertTemplateVersionWorkspaceTag :one
|
|
INSERT INTO
|
|
template_version_workspace_tags (
|
|
template_version_id,
|
|
key,
|
|
value
|
|
)
|
|
VALUES
|
|
(
|
|
$1,
|
|
$2,
|
|
$3
|
|
) RETURNING template_version_id, key, value
|
|
`
|
|
|
|
type InsertTemplateVersionWorkspaceTagParams struct {
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
Key string `db:"key" json:"key"`
|
|
Value string `db:"value" json:"value"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertTemplateVersionWorkspaceTag(ctx context.Context, arg InsertTemplateVersionWorkspaceTagParams) (TemplateVersionWorkspaceTag, error) {
|
|
row := q.db.QueryRowContext(ctx, insertTemplateVersionWorkspaceTag, arg.TemplateVersionID, arg.Key, arg.Value)
|
|
var i TemplateVersionWorkspaceTag
|
|
err := row.Scan(&i.TemplateVersionID, &i.Key, &i.Value)
|
|
return i, err
|
|
}
|
|
|
|
const getUserLinkByLinkedID = `-- name: GetUserLinkByLinkedID :one
|
|
SELECT
|
|
user_links.user_id, user_links.login_type, user_links.linked_id, user_links.oauth_access_token, user_links.oauth_refresh_token, user_links.oauth_expiry, user_links.oauth_access_token_key_id, user_links.oauth_refresh_token_key_id, user_links.debug_context
|
|
FROM
|
|
user_links
|
|
INNER JOIN
|
|
users ON user_links.user_id = users.id
|
|
WHERE
|
|
linked_id = $1
|
|
AND
|
|
deleted = false
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserLinkByLinkedID(ctx context.Context, linkedID string) (UserLink, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserLinkByLinkedID, linkedID)
|
|
var i UserLink
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.DebugContext,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getUserLinkByUserIDLoginType = `-- name: GetUserLinkByUserIDLoginType :one
|
|
SELECT
|
|
user_id, login_type, linked_id, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, debug_context
|
|
FROM
|
|
user_links
|
|
WHERE
|
|
user_id = $1 AND login_type = $2
|
|
`
|
|
|
|
type GetUserLinkByUserIDLoginTypeParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetUserLinkByUserIDLoginType(ctx context.Context, arg GetUserLinkByUserIDLoginTypeParams) (UserLink, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserLinkByUserIDLoginType, arg.UserID, arg.LoginType)
|
|
var i UserLink
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.DebugContext,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getUserLinksByUserID = `-- name: GetUserLinksByUserID :many
|
|
SELECT user_id, login_type, linked_id, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, debug_context FROM user_links WHERE user_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserLinksByUserID(ctx context.Context, userID uuid.UUID) ([]UserLink, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUserLinksByUserID, userID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []UserLink
|
|
for rows.Next() {
|
|
var i UserLink
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.DebugContext,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertUserLink = `-- name: InsertUserLink :one
|
|
INSERT INTO
|
|
user_links (
|
|
user_id,
|
|
login_type,
|
|
linked_id,
|
|
oauth_access_token,
|
|
oauth_access_token_key_id,
|
|
oauth_refresh_token,
|
|
oauth_refresh_token_key_id,
|
|
oauth_expiry,
|
|
debug_context
|
|
)
|
|
VALUES
|
|
( $1, $2, $3, $4, $5, $6, $7, $8, $9 ) RETURNING user_id, login_type, linked_id, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, debug_context
|
|
`
|
|
|
|
type InsertUserLinkParams struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
LinkedID string `db:"linked_id" json:"linked_id"`
|
|
OAuthAccessToken string `db:"oauth_access_token" json:"oauth_access_token"`
|
|
OAuthAccessTokenKeyID sql.NullString `db:"oauth_access_token_key_id" json:"oauth_access_token_key_id"`
|
|
OAuthRefreshToken string `db:"oauth_refresh_token" json:"oauth_refresh_token"`
|
|
OAuthRefreshTokenKeyID sql.NullString `db:"oauth_refresh_token_key_id" json:"oauth_refresh_token_key_id"`
|
|
OAuthExpiry time.Time `db:"oauth_expiry" json:"oauth_expiry"`
|
|
DebugContext json.RawMessage `db:"debug_context" json:"debug_context"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertUserLink(ctx context.Context, arg InsertUserLinkParams) (UserLink, error) {
|
|
row := q.db.QueryRowContext(ctx, insertUserLink,
|
|
arg.UserID,
|
|
arg.LoginType,
|
|
arg.LinkedID,
|
|
arg.OAuthAccessToken,
|
|
arg.OAuthAccessTokenKeyID,
|
|
arg.OAuthRefreshToken,
|
|
arg.OAuthRefreshTokenKeyID,
|
|
arg.OAuthExpiry,
|
|
arg.DebugContext,
|
|
)
|
|
var i UserLink
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.DebugContext,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserLink = `-- name: UpdateUserLink :one
|
|
UPDATE
|
|
user_links
|
|
SET
|
|
oauth_access_token = $1,
|
|
oauth_access_token_key_id = $2,
|
|
oauth_refresh_token = $3,
|
|
oauth_refresh_token_key_id = $4,
|
|
oauth_expiry = $5,
|
|
debug_context = $6
|
|
WHERE
|
|
user_id = $7 AND login_type = $8 RETURNING user_id, login_type, linked_id, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, debug_context
|
|
`
|
|
|
|
type UpdateUserLinkParams struct {
|
|
OAuthAccessToken string `db:"oauth_access_token" json:"oauth_access_token"`
|
|
OAuthAccessTokenKeyID sql.NullString `db:"oauth_access_token_key_id" json:"oauth_access_token_key_id"`
|
|
OAuthRefreshToken string `db:"oauth_refresh_token" json:"oauth_refresh_token"`
|
|
OAuthRefreshTokenKeyID sql.NullString `db:"oauth_refresh_token_key_id" json:"oauth_refresh_token_key_id"`
|
|
OAuthExpiry time.Time `db:"oauth_expiry" json:"oauth_expiry"`
|
|
DebugContext json.RawMessage `db:"debug_context" json:"debug_context"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserLink(ctx context.Context, arg UpdateUserLinkParams) (UserLink, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserLink,
|
|
arg.OAuthAccessToken,
|
|
arg.OAuthAccessTokenKeyID,
|
|
arg.OAuthRefreshToken,
|
|
arg.OAuthRefreshTokenKeyID,
|
|
arg.OAuthExpiry,
|
|
arg.DebugContext,
|
|
arg.UserID,
|
|
arg.LoginType,
|
|
)
|
|
var i UserLink
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.DebugContext,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserLinkedID = `-- name: UpdateUserLinkedID :one
|
|
UPDATE
|
|
user_links
|
|
SET
|
|
linked_id = $1
|
|
WHERE
|
|
user_id = $2 AND login_type = $3 RETURNING user_id, login_type, linked_id, oauth_access_token, oauth_refresh_token, oauth_expiry, oauth_access_token_key_id, oauth_refresh_token_key_id, debug_context
|
|
`
|
|
|
|
type UpdateUserLinkedIDParams struct {
|
|
LinkedID string `db:"linked_id" json:"linked_id"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserLinkedID(ctx context.Context, arg UpdateUserLinkedIDParams) (UserLink, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserLinkedID, arg.LinkedID, arg.UserID, arg.LoginType)
|
|
var i UserLink
|
|
err := row.Scan(
|
|
&i.UserID,
|
|
&i.LoginType,
|
|
&i.LinkedID,
|
|
&i.OAuthAccessToken,
|
|
&i.OAuthRefreshToken,
|
|
&i.OAuthExpiry,
|
|
&i.OAuthAccessTokenKeyID,
|
|
&i.OAuthRefreshTokenKeyID,
|
|
&i.DebugContext,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const allUserIDs = `-- name: AllUserIDs :many
|
|
SELECT DISTINCT id FROM USERS
|
|
`
|
|
|
|
// AllUserIDs returns all UserIDs regardless of user status or deletion.
|
|
func (q *sqlQuerier) AllUserIDs(ctx context.Context) ([]uuid.UUID, error) {
|
|
rows, err := q.db.QueryContext(ctx, allUserIDs)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []uuid.UUID
|
|
for rows.Next() {
|
|
var id uuid.UUID
|
|
if err := rows.Scan(&id); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, id)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getActiveUserCount = `-- name: GetActiveUserCount :one
|
|
SELECT
|
|
COUNT(*)
|
|
FROM
|
|
users
|
|
WHERE
|
|
status = 'active'::user_status AND deleted = false
|
|
`
|
|
|
|
func (q *sqlQuerier) GetActiveUserCount(ctx context.Context) (int64, error) {
|
|
row := q.db.QueryRowContext(ctx, getActiveUserCount)
|
|
var count int64
|
|
err := row.Scan(&count)
|
|
return count, err
|
|
}
|
|
|
|
const getAuthorizationUserRoles = `-- name: GetAuthorizationUserRoles :one
|
|
SELECT
|
|
-- username is returned just to help for logging purposes
|
|
-- status is used to enforce 'suspended' users, as all roles are ignored
|
|
-- when suspended.
|
|
id, username, status,
|
|
-- All user roles, including their org roles.
|
|
array_cat(
|
|
-- All users are members
|
|
array_append(users.rbac_roles, 'member'),
|
|
(
|
|
SELECT
|
|
-- The roles are returned as a flat array, org scoped and site side.
|
|
-- Concatenating the organization id scopes the organization roles.
|
|
array_agg(org_roles || ':' || organization_members.organization_id::text)
|
|
FROM
|
|
organization_members,
|
|
-- All org_members get the organization-member role for their orgs
|
|
unnest(
|
|
array_append(roles, 'organization-member')
|
|
) AS org_roles
|
|
WHERE
|
|
user_id = users.id
|
|
)
|
|
) :: text[] AS roles,
|
|
-- All groups the user is in.
|
|
(
|
|
SELECT
|
|
array_agg(
|
|
group_members.group_id :: text
|
|
)
|
|
FROM
|
|
group_members
|
|
WHERE
|
|
user_id = users.id
|
|
) :: text[] AS groups
|
|
FROM
|
|
users
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type GetAuthorizationUserRolesRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Username string `db:"username" json:"username"`
|
|
Status UserStatus `db:"status" json:"status"`
|
|
Roles []string `db:"roles" json:"roles"`
|
|
Groups []string `db:"groups" json:"groups"`
|
|
}
|
|
|
|
// This function returns roles for authorization purposes. Implied member roles
|
|
// are included.
|
|
func (q *sqlQuerier) GetAuthorizationUserRoles(ctx context.Context, userID uuid.UUID) (GetAuthorizationUserRolesRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getAuthorizationUserRoles, userID)
|
|
var i GetAuthorizationUserRolesRow
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Username,
|
|
&i.Status,
|
|
pq.Array(&i.Roles),
|
|
pq.Array(&i.Groups),
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getUserByEmailOrUsername = `-- name: GetUserByEmailOrUsername :one
|
|
SELECT
|
|
id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id
|
|
FROM
|
|
users
|
|
WHERE
|
|
(LOWER(username) = LOWER($1) OR LOWER(email) = LOWER($2)) AND
|
|
deleted = false
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
type GetUserByEmailOrUsernameParams struct {
|
|
Username string `db:"username" json:"username"`
|
|
Email string `db:"email" json:"email"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetUserByEmailOrUsername(ctx context.Context, arg GetUserByEmailOrUsernameParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserByEmailOrUsername, arg.Username, arg.Email)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getUserByID = `-- name: GetUserByID :one
|
|
SELECT
|
|
id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id
|
|
FROM
|
|
users
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserByID(ctx context.Context, id uuid.UUID) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserByID, id)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getUserCount = `-- name: GetUserCount :one
|
|
SELECT
|
|
COUNT(*)
|
|
FROM
|
|
users
|
|
WHERE
|
|
deleted = false
|
|
`
|
|
|
|
func (q *sqlQuerier) GetUserCount(ctx context.Context) (int64, error) {
|
|
row := q.db.QueryRowContext(ctx, getUserCount)
|
|
var count int64
|
|
err := row.Scan(&count)
|
|
return count, err
|
|
}
|
|
|
|
const getUsers = `-- name: GetUsers :many
|
|
SELECT
|
|
id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id, COUNT(*) OVER() AS count
|
|
FROM
|
|
users
|
|
WHERE
|
|
users.deleted = false
|
|
AND CASE
|
|
-- This allows using the last element on a page as effectively a cursor.
|
|
-- This is an important option for scripts that need to paginate without
|
|
-- duplicating or missing data.
|
|
WHEN $1 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN (
|
|
-- The pagination cursor is the last ID of the previous page.
|
|
-- The query is ordered by the username field, so select all
|
|
-- rows after the cursor.
|
|
(LOWER(username)) > (
|
|
SELECT
|
|
LOWER(username)
|
|
FROM
|
|
users
|
|
WHERE
|
|
id = $1
|
|
)
|
|
)
|
|
ELSE true
|
|
END
|
|
-- Start filters
|
|
-- Filter by name, email or username
|
|
AND CASE
|
|
WHEN $2 :: text != '' THEN (
|
|
email ILIKE concat('%', $2, '%')
|
|
OR username ILIKE concat('%', $2, '%')
|
|
)
|
|
ELSE true
|
|
END
|
|
-- Filter by status
|
|
AND CASE
|
|
-- @status needs to be a text because it can be empty, If it was
|
|
-- user_status enum, it would not.
|
|
WHEN cardinality($3 :: user_status[]) > 0 THEN
|
|
status = ANY($3 :: user_status[])
|
|
ELSE true
|
|
END
|
|
-- Filter by rbac_roles
|
|
AND CASE
|
|
-- @rbac_role allows filtering by rbac roles. If 'member' is included, show everyone, as
|
|
-- everyone is a member.
|
|
WHEN cardinality($4 :: text[]) > 0 AND 'member' != ANY($4 :: text[]) THEN
|
|
rbac_roles && $4 :: text[]
|
|
ELSE true
|
|
END
|
|
-- Filter by last_seen
|
|
AND CASE
|
|
WHEN $5 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
|
last_seen_at <= $5
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
WHEN $6 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
|
|
last_seen_at >= $6
|
|
ELSE true
|
|
END
|
|
-- End of filters
|
|
|
|
-- Authorize Filter clause will be injected below in GetAuthorizedUsers
|
|
-- @authorize_filter
|
|
ORDER BY
|
|
-- Deterministic and consistent ordering of all users. This is to ensure consistent pagination.
|
|
LOWER(username) ASC OFFSET $7
|
|
LIMIT
|
|
-- A null limit means "no limit", so 0 means return all
|
|
NULLIF($8 :: int, 0)
|
|
`
|
|
|
|
type GetUsersParams struct {
|
|
AfterID uuid.UUID `db:"after_id" json:"after_id"`
|
|
Search string `db:"search" json:"search"`
|
|
Status []UserStatus `db:"status" json:"status"`
|
|
RbacRole []string `db:"rbac_role" json:"rbac_role"`
|
|
LastSeenBefore time.Time `db:"last_seen_before" json:"last_seen_before"`
|
|
LastSeenAfter time.Time `db:"last_seen_after" json:"last_seen_after"`
|
|
OffsetOpt int32 `db:"offset_opt" json:"offset_opt"`
|
|
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
|
}
|
|
|
|
type GetUsersRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Email string `db:"email" json:"email"`
|
|
Username string `db:"username" json:"username"`
|
|
HashedPassword []byte `db:"hashed_password" json:"hashed_password"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Status UserStatus `db:"status" json:"status"`
|
|
RBACRoles pq.StringArray `db:"rbac_roles" json:"rbac_roles"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
LastSeenAt time.Time `db:"last_seen_at" json:"last_seen_at"`
|
|
QuietHoursSchedule string `db:"quiet_hours_schedule" json:"quiet_hours_schedule"`
|
|
ThemePreference string `db:"theme_preference" json:"theme_preference"`
|
|
Name string `db:"name" json:"name"`
|
|
GithubComUserID sql.NullInt64 `db:"github_com_user_id" json:"github_com_user_id"`
|
|
Count int64 `db:"count" json:"count"`
|
|
}
|
|
|
|
// This will never return deleted users.
|
|
func (q *sqlQuerier) GetUsers(ctx context.Context, arg GetUsersParams) ([]GetUsersRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUsers,
|
|
arg.AfterID,
|
|
arg.Search,
|
|
pq.Array(arg.Status),
|
|
pq.Array(arg.RbacRole),
|
|
arg.LastSeenBefore,
|
|
arg.LastSeenAfter,
|
|
arg.OffsetOpt,
|
|
arg.LimitOpt,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetUsersRow
|
|
for rows.Next() {
|
|
var i GetUsersRow
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
&i.Count,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getUsersByIDs = `-- name: GetUsersByIDs :many
|
|
SELECT id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id FROM users WHERE id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
// This shouldn't check for deleted, because it's frequently used
|
|
// to look up references to actions. eg. a user could build a workspace
|
|
// for another user, then be deleted... we still want them to appear!
|
|
func (q *sqlQuerier) GetUsersByIDs(ctx context.Context, ids []uuid.UUID) ([]User, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUsersByIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []User
|
|
for rows.Next() {
|
|
var i User
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertUser = `-- name: InsertUser :one
|
|
INSERT INTO
|
|
users (
|
|
id,
|
|
email,
|
|
username,
|
|
name,
|
|
hashed_password,
|
|
created_at,
|
|
updated_at,
|
|
rbac_roles,
|
|
login_type
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id
|
|
`
|
|
|
|
type InsertUserParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Email string `db:"email" json:"email"`
|
|
Username string `db:"username" json:"username"`
|
|
Name string `db:"name" json:"name"`
|
|
HashedPassword []byte `db:"hashed_password" json:"hashed_password"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
RBACRoles pq.StringArray `db:"rbac_roles" json:"rbac_roles"`
|
|
LoginType LoginType `db:"login_type" json:"login_type"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertUser(ctx context.Context, arg InsertUserParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, insertUser,
|
|
arg.ID,
|
|
arg.Email,
|
|
arg.Username,
|
|
arg.Name,
|
|
arg.HashedPassword,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.RBACRoles,
|
|
arg.LoginType,
|
|
)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateInactiveUsersToDormant = `-- name: UpdateInactiveUsersToDormant :many
|
|
UPDATE
|
|
users
|
|
SET
|
|
status = 'dormant'::user_status,
|
|
updated_at = $1
|
|
WHERE
|
|
last_seen_at < $2 :: timestamp
|
|
AND status = 'active'::user_status
|
|
RETURNING id, email, last_seen_at
|
|
`
|
|
|
|
type UpdateInactiveUsersToDormantParams struct {
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
LastSeenAfter time.Time `db:"last_seen_after" json:"last_seen_after"`
|
|
}
|
|
|
|
type UpdateInactiveUsersToDormantRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Email string `db:"email" json:"email"`
|
|
LastSeenAt time.Time `db:"last_seen_at" json:"last_seen_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateInactiveUsersToDormant(ctx context.Context, arg UpdateInactiveUsersToDormantParams) ([]UpdateInactiveUsersToDormantRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, updateInactiveUsersToDormant, arg.UpdatedAt, arg.LastSeenAfter)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []UpdateInactiveUsersToDormantRow
|
|
for rows.Next() {
|
|
var i UpdateInactiveUsersToDormantRow
|
|
if err := rows.Scan(&i.ID, &i.Email, &i.LastSeenAt); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const updateUserAppearanceSettings = `-- name: UpdateUserAppearanceSettings :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
theme_preference = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
id = $1
|
|
RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id
|
|
`
|
|
|
|
type UpdateUserAppearanceSettingsParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
ThemePreference string `db:"theme_preference" json:"theme_preference"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserAppearanceSettings(ctx context.Context, arg UpdateUserAppearanceSettingsParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserAppearanceSettings, arg.ID, arg.ThemePreference, arg.UpdatedAt)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserDeletedByID = `-- name: UpdateUserDeletedByID :exec
|
|
UPDATE
|
|
users
|
|
SET
|
|
deleted = true
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) UpdateUserDeletedByID(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, updateUserDeletedByID, id)
|
|
return err
|
|
}
|
|
|
|
const updateUserGithubComUserID = `-- name: UpdateUserGithubComUserID :exec
|
|
UPDATE
|
|
users
|
|
SET
|
|
github_com_user_id = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateUserGithubComUserIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
GithubComUserID sql.NullInt64 `db:"github_com_user_id" json:"github_com_user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserGithubComUserID(ctx context.Context, arg UpdateUserGithubComUserIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateUserGithubComUserID, arg.ID, arg.GithubComUserID)
|
|
return err
|
|
}
|
|
|
|
const updateUserHashedPassword = `-- name: UpdateUserHashedPassword :exec
|
|
UPDATE
|
|
users
|
|
SET
|
|
hashed_password = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateUserHashedPasswordParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
HashedPassword []byte `db:"hashed_password" json:"hashed_password"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserHashedPassword(ctx context.Context, arg UpdateUserHashedPasswordParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateUserHashedPassword, arg.ID, arg.HashedPassword)
|
|
return err
|
|
}
|
|
|
|
const updateUserLastSeenAt = `-- name: UpdateUserLastSeenAt :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
last_seen_at = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
id = $1 RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id
|
|
`
|
|
|
|
type UpdateUserLastSeenAtParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
LastSeenAt time.Time `db:"last_seen_at" json:"last_seen_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserLastSeenAt(ctx context.Context, arg UpdateUserLastSeenAtParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserLastSeenAt, arg.ID, arg.LastSeenAt, arg.UpdatedAt)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserLoginType = `-- name: UpdateUserLoginType :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
login_type = $1,
|
|
hashed_password = CASE WHEN $1 = 'password' :: login_type THEN
|
|
users.hashed_password
|
|
ELSE
|
|
-- If the login type is not password, then the password should be
|
|
-- cleared.
|
|
'':: bytea
|
|
END
|
|
WHERE
|
|
id = $2 RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id
|
|
`
|
|
|
|
type UpdateUserLoginTypeParams struct {
|
|
NewLoginType LoginType `db:"new_login_type" json:"new_login_type"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserLoginType(ctx context.Context, arg UpdateUserLoginTypeParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserLoginType, arg.NewLoginType, arg.UserID)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserProfile = `-- name: UpdateUserProfile :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
email = $2,
|
|
username = $3,
|
|
avatar_url = $4,
|
|
updated_at = $5,
|
|
name = $6
|
|
WHERE
|
|
id = $1
|
|
RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id
|
|
`
|
|
|
|
type UpdateUserProfileParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Email string `db:"email" json:"email"`
|
|
Username string `db:"username" json:"username"`
|
|
AvatarURL string `db:"avatar_url" json:"avatar_url"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserProfile(ctx context.Context, arg UpdateUserProfileParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserProfile,
|
|
arg.ID,
|
|
arg.Email,
|
|
arg.Username,
|
|
arg.AvatarURL,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserQuietHoursSchedule = `-- name: UpdateUserQuietHoursSchedule :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
quiet_hours_schedule = $2
|
|
WHERE
|
|
id = $1
|
|
RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id
|
|
`
|
|
|
|
type UpdateUserQuietHoursScheduleParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
QuietHoursSchedule string `db:"quiet_hours_schedule" json:"quiet_hours_schedule"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserQuietHoursSchedule(ctx context.Context, arg UpdateUserQuietHoursScheduleParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserQuietHoursSchedule, arg.ID, arg.QuietHoursSchedule)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserRoles = `-- name: UpdateUserRoles :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
-- Remove all duplicates from the roles.
|
|
rbac_roles = ARRAY(SELECT DISTINCT UNNEST($1 :: text[]))
|
|
WHERE
|
|
id = $2
|
|
RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id
|
|
`
|
|
|
|
type UpdateUserRolesParams struct {
|
|
GrantedRoles []string `db:"granted_roles" json:"granted_roles"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserRoles(ctx context.Context, arg UpdateUserRolesParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserRoles, pq.Array(arg.GrantedRoles), arg.ID)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateUserStatus = `-- name: UpdateUserStatus :one
|
|
UPDATE
|
|
users
|
|
SET
|
|
status = $2,
|
|
updated_at = $3
|
|
WHERE
|
|
id = $1 RETURNING id, email, username, hashed_password, created_at, updated_at, status, rbac_roles, login_type, avatar_url, deleted, last_seen_at, quiet_hours_schedule, theme_preference, name, github_com_user_id
|
|
`
|
|
|
|
type UpdateUserStatusParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Status UserStatus `db:"status" json:"status"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateUserStatus(ctx context.Context, arg UpdateUserStatusParams) (User, error) {
|
|
row := q.db.QueryRowContext(ctx, updateUserStatus, arg.ID, arg.Status, arg.UpdatedAt)
|
|
var i User
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.Email,
|
|
&i.Username,
|
|
&i.HashedPassword,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Status,
|
|
&i.RBACRoles,
|
|
&i.LoginType,
|
|
&i.AvatarURL,
|
|
&i.Deleted,
|
|
&i.LastSeenAt,
|
|
&i.QuietHoursSchedule,
|
|
&i.ThemePreference,
|
|
&i.Name,
|
|
&i.GithubComUserID,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const deleteWorkspaceAgentPortShare = `-- name: DeleteWorkspaceAgentPortShare :exec
|
|
DELETE FROM
|
|
workspace_agent_port_share
|
|
WHERE
|
|
workspace_id = $1
|
|
AND agent_name = $2
|
|
AND port = $3
|
|
`
|
|
|
|
type DeleteWorkspaceAgentPortShareParams struct {
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
AgentName string `db:"agent_name" json:"agent_name"`
|
|
Port int32 `db:"port" json:"port"`
|
|
}
|
|
|
|
func (q *sqlQuerier) DeleteWorkspaceAgentPortShare(ctx context.Context, arg DeleteWorkspaceAgentPortShareParams) error {
|
|
_, err := q.db.ExecContext(ctx, deleteWorkspaceAgentPortShare, arg.WorkspaceID, arg.AgentName, arg.Port)
|
|
return err
|
|
}
|
|
|
|
const deleteWorkspaceAgentPortSharesByTemplate = `-- name: DeleteWorkspaceAgentPortSharesByTemplate :exec
|
|
DELETE FROM
|
|
workspace_agent_port_share
|
|
WHERE
|
|
workspace_id IN (
|
|
SELECT
|
|
id
|
|
FROM
|
|
workspaces
|
|
WHERE
|
|
template_id = $1
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteWorkspaceAgentPortSharesByTemplate(ctx context.Context, templateID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, deleteWorkspaceAgentPortSharesByTemplate, templateID)
|
|
return err
|
|
}
|
|
|
|
const getWorkspaceAgentPortShare = `-- name: GetWorkspaceAgentPortShare :one
|
|
SELECT
|
|
workspace_id, agent_name, port, share_level, protocol
|
|
FROM
|
|
workspace_agent_port_share
|
|
WHERE
|
|
workspace_id = $1
|
|
AND agent_name = $2
|
|
AND port = $3
|
|
`
|
|
|
|
type GetWorkspaceAgentPortShareParams struct {
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
AgentName string `db:"agent_name" json:"agent_name"`
|
|
Port int32 `db:"port" json:"port"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentPortShare(ctx context.Context, arg GetWorkspaceAgentPortShareParams) (WorkspaceAgentPortShare, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAgentPortShare, arg.WorkspaceID, arg.AgentName, arg.Port)
|
|
var i WorkspaceAgentPortShare
|
|
err := row.Scan(
|
|
&i.WorkspaceID,
|
|
&i.AgentName,
|
|
&i.Port,
|
|
&i.ShareLevel,
|
|
&i.Protocol,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const listWorkspaceAgentPortShares = `-- name: ListWorkspaceAgentPortShares :many
|
|
SELECT
|
|
workspace_id, agent_name, port, share_level, protocol
|
|
FROM
|
|
workspace_agent_port_share
|
|
WHERE
|
|
workspace_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) ListWorkspaceAgentPortShares(ctx context.Context, workspaceID uuid.UUID) ([]WorkspaceAgentPortShare, error) {
|
|
rows, err := q.db.QueryContext(ctx, listWorkspaceAgentPortShares, workspaceID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentPortShare
|
|
for rows.Next() {
|
|
var i WorkspaceAgentPortShare
|
|
if err := rows.Scan(
|
|
&i.WorkspaceID,
|
|
&i.AgentName,
|
|
&i.Port,
|
|
&i.ShareLevel,
|
|
&i.Protocol,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const reduceWorkspaceAgentShareLevelToAuthenticatedByTemplate = `-- name: ReduceWorkspaceAgentShareLevelToAuthenticatedByTemplate :exec
|
|
UPDATE
|
|
workspace_agent_port_share
|
|
SET
|
|
share_level = 'authenticated'
|
|
WHERE
|
|
share_level = 'public'
|
|
AND workspace_id IN (
|
|
SELECT
|
|
id
|
|
FROM
|
|
workspaces
|
|
WHERE
|
|
template_id = $1
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) ReduceWorkspaceAgentShareLevelToAuthenticatedByTemplate(ctx context.Context, templateID uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, reduceWorkspaceAgentShareLevelToAuthenticatedByTemplate, templateID)
|
|
return err
|
|
}
|
|
|
|
const upsertWorkspaceAgentPortShare = `-- name: UpsertWorkspaceAgentPortShare :one
|
|
INSERT INTO
|
|
workspace_agent_port_share (
|
|
workspace_id,
|
|
agent_name,
|
|
port,
|
|
share_level,
|
|
protocol
|
|
)
|
|
VALUES (
|
|
$1,
|
|
$2,
|
|
$3,
|
|
$4,
|
|
$5
|
|
)
|
|
ON CONFLICT (
|
|
workspace_id,
|
|
agent_name,
|
|
port
|
|
)
|
|
DO UPDATE SET
|
|
share_level = $4,
|
|
protocol = $5
|
|
RETURNING workspace_id, agent_name, port, share_level, protocol
|
|
`
|
|
|
|
type UpsertWorkspaceAgentPortShareParams struct {
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
AgentName string `db:"agent_name" json:"agent_name"`
|
|
Port int32 `db:"port" json:"port"`
|
|
ShareLevel AppSharingLevel `db:"share_level" json:"share_level"`
|
|
Protocol PortShareProtocol `db:"protocol" json:"protocol"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpsertWorkspaceAgentPortShare(ctx context.Context, arg UpsertWorkspaceAgentPortShareParams) (WorkspaceAgentPortShare, error) {
|
|
row := q.db.QueryRowContext(ctx, upsertWorkspaceAgentPortShare,
|
|
arg.WorkspaceID,
|
|
arg.AgentName,
|
|
arg.Port,
|
|
arg.ShareLevel,
|
|
arg.Protocol,
|
|
)
|
|
var i WorkspaceAgentPortShare
|
|
err := row.Scan(
|
|
&i.WorkspaceID,
|
|
&i.AgentName,
|
|
&i.Port,
|
|
&i.ShareLevel,
|
|
&i.Protocol,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const deleteOldWorkspaceAgentLogs = `-- name: DeleteOldWorkspaceAgentLogs :exec
|
|
WITH
|
|
latest_builds AS (
|
|
SELECT
|
|
workspace_id, max(build_number) AS max_build_number
|
|
FROM
|
|
workspace_builds
|
|
GROUP BY
|
|
workspace_id
|
|
),
|
|
old_agents AS (
|
|
SELECT
|
|
wa.id
|
|
FROM
|
|
workspace_agents AS wa
|
|
JOIN
|
|
workspace_resources AS wr
|
|
ON
|
|
wa.resource_id = wr.id
|
|
JOIN
|
|
workspace_builds AS wb
|
|
ON
|
|
wb.job_id = wr.job_id
|
|
LEFT JOIN
|
|
latest_builds
|
|
ON
|
|
latest_builds.workspace_id = wb.workspace_id
|
|
AND
|
|
latest_builds.max_build_number = wb.build_number
|
|
WHERE
|
|
-- Filter out the latest builds for each workspace.
|
|
latest_builds.workspace_id IS NULL
|
|
AND CASE
|
|
-- If the last time the agent connected was before @threshold
|
|
WHEN wa.last_connected_at IS NOT NULL THEN
|
|
wa.last_connected_at < $1 :: timestamptz
|
|
-- The agent never connected, and was created before @threshold
|
|
ELSE wa.created_at < $1 :: timestamptz
|
|
END
|
|
)
|
|
DELETE FROM workspace_agent_logs WHERE agent_id IN (SELECT id FROM old_agents)
|
|
`
|
|
|
|
// If an agent hasn't connected in the last 7 days, we purge it's logs.
|
|
// Exception: if the logs are related to the latest build, we keep those around.
|
|
// Logs can take up a lot of space, so it's important we clean up frequently.
|
|
func (q *sqlQuerier) DeleteOldWorkspaceAgentLogs(ctx context.Context, threshold time.Time) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOldWorkspaceAgentLogs, threshold)
|
|
return err
|
|
}
|
|
|
|
const getWorkspaceAgentAndLatestBuildByAuthToken = `-- name: GetWorkspaceAgentAndLatestBuildByAuthToken :one
|
|
SELECT
|
|
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite,
|
|
workspace_agents.id, workspace_agents.created_at, workspace_agents.updated_at, workspace_agents.name, workspace_agents.first_connected_at, workspace_agents.last_connected_at, workspace_agents.disconnected_at, workspace_agents.resource_id, workspace_agents.auth_token, workspace_agents.auth_instance_id, workspace_agents.architecture, workspace_agents.environment_variables, workspace_agents.operating_system, workspace_agents.instance_metadata, workspace_agents.resource_metadata, workspace_agents.directory, workspace_agents.version, workspace_agents.last_connected_replica_id, workspace_agents.connection_timeout_seconds, workspace_agents.troubleshooting_url, workspace_agents.motd_file, workspace_agents.lifecycle_state, workspace_agents.expanded_directory, workspace_agents.logs_length, workspace_agents.logs_overflowed, workspace_agents.started_at, workspace_agents.ready_at, workspace_agents.subsystems, workspace_agents.display_apps, workspace_agents.api_version, workspace_agents.display_order,
|
|
workspace_build_with_user.id, workspace_build_with_user.created_at, workspace_build_with_user.updated_at, workspace_build_with_user.workspace_id, workspace_build_with_user.template_version_id, workspace_build_with_user.build_number, workspace_build_with_user.transition, workspace_build_with_user.initiator_id, workspace_build_with_user.provisioner_state, workspace_build_with_user.job_id, workspace_build_with_user.deadline, workspace_build_with_user.reason, workspace_build_with_user.daily_cost, workspace_build_with_user.max_deadline, workspace_build_with_user.initiator_by_avatar_url, workspace_build_with_user.initiator_by_username
|
|
FROM
|
|
workspace_agents
|
|
JOIN
|
|
workspace_resources
|
|
ON
|
|
workspace_agents.resource_id = workspace_resources.id
|
|
JOIN
|
|
workspace_build_with_user
|
|
ON
|
|
workspace_resources.job_id = workspace_build_with_user.job_id
|
|
JOIN
|
|
workspaces
|
|
ON
|
|
workspace_build_with_user.workspace_id = workspaces.id
|
|
WHERE
|
|
-- This should only match 1 agent, so 1 returned row or 0.
|
|
workspace_agents.auth_token = $1::uuid
|
|
AND workspaces.deleted = FALSE
|
|
-- Filter out builds that are not the latest.
|
|
AND workspace_build_with_user.build_number = (
|
|
-- Select from workspace_builds as it's one less join compared
|
|
-- to workspace_build_with_user.
|
|
SELECT
|
|
MAX(build_number)
|
|
FROM
|
|
workspace_builds
|
|
WHERE
|
|
workspace_id = workspace_build_with_user.workspace_id
|
|
)
|
|
`
|
|
|
|
type GetWorkspaceAgentAndLatestBuildByAuthTokenRow struct {
|
|
Workspace Workspace `db:"workspace" json:"workspace"`
|
|
WorkspaceAgent WorkspaceAgent `db:"workspace_agent" json:"workspace_agent"`
|
|
WorkspaceBuild WorkspaceBuild `db:"workspace_build" json:"workspace_build"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentAndLatestBuildByAuthToken(ctx context.Context, authToken uuid.UUID) (GetWorkspaceAgentAndLatestBuildByAuthTokenRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAgentAndLatestBuildByAuthToken, authToken)
|
|
var i GetWorkspaceAgentAndLatestBuildByAuthTokenRow
|
|
err := row.Scan(
|
|
&i.Workspace.ID,
|
|
&i.Workspace.CreatedAt,
|
|
&i.Workspace.UpdatedAt,
|
|
&i.Workspace.OwnerID,
|
|
&i.Workspace.OrganizationID,
|
|
&i.Workspace.TemplateID,
|
|
&i.Workspace.Deleted,
|
|
&i.Workspace.Name,
|
|
&i.Workspace.AutostartSchedule,
|
|
&i.Workspace.Ttl,
|
|
&i.Workspace.LastUsedAt,
|
|
&i.Workspace.DormantAt,
|
|
&i.Workspace.DeletingAt,
|
|
&i.Workspace.AutomaticUpdates,
|
|
&i.Workspace.Favorite,
|
|
&i.WorkspaceAgent.ID,
|
|
&i.WorkspaceAgent.CreatedAt,
|
|
&i.WorkspaceAgent.UpdatedAt,
|
|
&i.WorkspaceAgent.Name,
|
|
&i.WorkspaceAgent.FirstConnectedAt,
|
|
&i.WorkspaceAgent.LastConnectedAt,
|
|
&i.WorkspaceAgent.DisconnectedAt,
|
|
&i.WorkspaceAgent.ResourceID,
|
|
&i.WorkspaceAgent.AuthToken,
|
|
&i.WorkspaceAgent.AuthInstanceID,
|
|
&i.WorkspaceAgent.Architecture,
|
|
&i.WorkspaceAgent.EnvironmentVariables,
|
|
&i.WorkspaceAgent.OperatingSystem,
|
|
&i.WorkspaceAgent.InstanceMetadata,
|
|
&i.WorkspaceAgent.ResourceMetadata,
|
|
&i.WorkspaceAgent.Directory,
|
|
&i.WorkspaceAgent.Version,
|
|
&i.WorkspaceAgent.LastConnectedReplicaID,
|
|
&i.WorkspaceAgent.ConnectionTimeoutSeconds,
|
|
&i.WorkspaceAgent.TroubleshootingURL,
|
|
&i.WorkspaceAgent.MOTDFile,
|
|
&i.WorkspaceAgent.LifecycleState,
|
|
&i.WorkspaceAgent.ExpandedDirectory,
|
|
&i.WorkspaceAgent.LogsLength,
|
|
&i.WorkspaceAgent.LogsOverflowed,
|
|
&i.WorkspaceAgent.StartedAt,
|
|
&i.WorkspaceAgent.ReadyAt,
|
|
pq.Array(&i.WorkspaceAgent.Subsystems),
|
|
pq.Array(&i.WorkspaceAgent.DisplayApps),
|
|
&i.WorkspaceAgent.APIVersion,
|
|
&i.WorkspaceAgent.DisplayOrder,
|
|
&i.WorkspaceBuild.ID,
|
|
&i.WorkspaceBuild.CreatedAt,
|
|
&i.WorkspaceBuild.UpdatedAt,
|
|
&i.WorkspaceBuild.WorkspaceID,
|
|
&i.WorkspaceBuild.TemplateVersionID,
|
|
&i.WorkspaceBuild.BuildNumber,
|
|
&i.WorkspaceBuild.Transition,
|
|
&i.WorkspaceBuild.InitiatorID,
|
|
&i.WorkspaceBuild.ProvisionerState,
|
|
&i.WorkspaceBuild.JobID,
|
|
&i.WorkspaceBuild.Deadline,
|
|
&i.WorkspaceBuild.Reason,
|
|
&i.WorkspaceBuild.DailyCost,
|
|
&i.WorkspaceBuild.MaxDeadline,
|
|
&i.WorkspaceBuild.InitiatorByAvatarUrl,
|
|
&i.WorkspaceBuild.InitiatorByUsername,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceAgentByID = `-- name: GetWorkspaceAgentByID :one
|
|
SELECT
|
|
id, created_at, updated_at, name, first_connected_at, last_connected_at, disconnected_at, resource_id, auth_token, auth_instance_id, architecture, environment_variables, operating_system, instance_metadata, resource_metadata, directory, version, last_connected_replica_id, connection_timeout_seconds, troubleshooting_url, motd_file, lifecycle_state, expanded_directory, logs_length, logs_overflowed, started_at, ready_at, subsystems, display_apps, api_version, display_order
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentByID(ctx context.Context, id uuid.UUID) (WorkspaceAgent, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAgentByID, id)
|
|
var i WorkspaceAgent
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceAgentByInstanceID = `-- name: GetWorkspaceAgentByInstanceID :one
|
|
SELECT
|
|
id, created_at, updated_at, name, first_connected_at, last_connected_at, disconnected_at, resource_id, auth_token, auth_instance_id, architecture, environment_variables, operating_system, instance_metadata, resource_metadata, directory, version, last_connected_replica_id, connection_timeout_seconds, troubleshooting_url, motd_file, lifecycle_state, expanded_directory, logs_length, logs_overflowed, started_at, ready_at, subsystems, display_apps, api_version, display_order
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
auth_instance_id = $1 :: TEXT
|
|
ORDER BY
|
|
created_at DESC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentByInstanceID(ctx context.Context, authInstanceID string) (WorkspaceAgent, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAgentByInstanceID, authInstanceID)
|
|
var i WorkspaceAgent
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceAgentLifecycleStateByID = `-- name: GetWorkspaceAgentLifecycleStateByID :one
|
|
SELECT
|
|
lifecycle_state,
|
|
started_at,
|
|
ready_at
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type GetWorkspaceAgentLifecycleStateByIDRow struct {
|
|
LifecycleState WorkspaceAgentLifecycleState `db:"lifecycle_state" json:"lifecycle_state"`
|
|
StartedAt sql.NullTime `db:"started_at" json:"started_at"`
|
|
ReadyAt sql.NullTime `db:"ready_at" json:"ready_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentLifecycleStateByID(ctx context.Context, id uuid.UUID) (GetWorkspaceAgentLifecycleStateByIDRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAgentLifecycleStateByID, id)
|
|
var i GetWorkspaceAgentLifecycleStateByIDRow
|
|
err := row.Scan(&i.LifecycleState, &i.StartedAt, &i.ReadyAt)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceAgentLogSourcesByAgentIDs = `-- name: GetWorkspaceAgentLogSourcesByAgentIDs :many
|
|
SELECT workspace_agent_id, id, created_at, display_name, icon FROM workspace_agent_log_sources WHERE workspace_agent_id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentLogSourcesByAgentIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceAgentLogSource, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentLogSourcesByAgentIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentLogSource
|
|
for rows.Next() {
|
|
var i WorkspaceAgentLogSource
|
|
if err := rows.Scan(
|
|
&i.WorkspaceAgentID,
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentLogsAfter = `-- name: GetWorkspaceAgentLogsAfter :many
|
|
SELECT
|
|
agent_id, created_at, output, id, level, log_source_id
|
|
FROM
|
|
workspace_agent_logs
|
|
WHERE
|
|
agent_id = $1
|
|
AND (
|
|
id > $2
|
|
) ORDER BY id ASC
|
|
`
|
|
|
|
type GetWorkspaceAgentLogsAfterParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
CreatedAfter int64 `db:"created_after" json:"created_after"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentLogsAfter(ctx context.Context, arg GetWorkspaceAgentLogsAfterParams) ([]WorkspaceAgentLog, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentLogsAfter, arg.AgentID, arg.CreatedAfter)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentLog
|
|
for rows.Next() {
|
|
var i WorkspaceAgentLog
|
|
if err := rows.Scan(
|
|
&i.AgentID,
|
|
&i.CreatedAt,
|
|
&i.Output,
|
|
&i.ID,
|
|
&i.Level,
|
|
&i.LogSourceID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentMetadata = `-- name: GetWorkspaceAgentMetadata :many
|
|
SELECT
|
|
workspace_agent_id, display_name, key, script, value, error, timeout, interval, collected_at, display_order
|
|
FROM
|
|
workspace_agent_metadata
|
|
WHERE
|
|
workspace_agent_id = $1
|
|
AND CASE WHEN COALESCE(array_length($2::text[], 1), 0) > 0 THEN key = ANY($2::text[]) ELSE TRUE END
|
|
`
|
|
|
|
type GetWorkspaceAgentMetadataParams struct {
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
Keys []string `db:"keys" json:"keys"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentMetadata(ctx context.Context, arg GetWorkspaceAgentMetadataParams) ([]WorkspaceAgentMetadatum, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentMetadata, arg.WorkspaceAgentID, pq.Array(arg.Keys))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentMetadatum
|
|
for rows.Next() {
|
|
var i WorkspaceAgentMetadatum
|
|
if err := rows.Scan(
|
|
&i.WorkspaceAgentID,
|
|
&i.DisplayName,
|
|
&i.Key,
|
|
&i.Script,
|
|
&i.Value,
|
|
&i.Error,
|
|
&i.Timeout,
|
|
&i.Interval,
|
|
&i.CollectedAt,
|
|
&i.DisplayOrder,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentsByResourceIDs = `-- name: GetWorkspaceAgentsByResourceIDs :many
|
|
SELECT
|
|
id, created_at, updated_at, name, first_connected_at, last_connected_at, disconnected_at, resource_id, auth_token, auth_instance_id, architecture, environment_variables, operating_system, instance_metadata, resource_metadata, directory, version, last_connected_replica_id, connection_timeout_seconds, troubleshooting_url, motd_file, lifecycle_state, expanded_directory, logs_length, logs_overflowed, started_at, ready_at, subsystems, display_apps, api_version, display_order
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
resource_id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentsByResourceIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentsByResourceIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgent
|
|
for rows.Next() {
|
|
var i WorkspaceAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentsCreatedAfter = `-- name: GetWorkspaceAgentsCreatedAfter :many
|
|
SELECT id, created_at, updated_at, name, first_connected_at, last_connected_at, disconnected_at, resource_id, auth_token, auth_instance_id, architecture, environment_variables, operating_system, instance_metadata, resource_metadata, directory, version, last_connected_replica_id, connection_timeout_seconds, troubleshooting_url, motd_file, lifecycle_state, expanded_directory, logs_length, logs_overflowed, started_at, ready_at, subsystems, display_apps, api_version, display_order FROM workspace_agents WHERE created_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentsCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentsCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgent
|
|
for rows.Next() {
|
|
var i WorkspaceAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentsInLatestBuildByWorkspaceID = `-- name: GetWorkspaceAgentsInLatestBuildByWorkspaceID :many
|
|
SELECT
|
|
workspace_agents.id, workspace_agents.created_at, workspace_agents.updated_at, workspace_agents.name, workspace_agents.first_connected_at, workspace_agents.last_connected_at, workspace_agents.disconnected_at, workspace_agents.resource_id, workspace_agents.auth_token, workspace_agents.auth_instance_id, workspace_agents.architecture, workspace_agents.environment_variables, workspace_agents.operating_system, workspace_agents.instance_metadata, workspace_agents.resource_metadata, workspace_agents.directory, workspace_agents.version, workspace_agents.last_connected_replica_id, workspace_agents.connection_timeout_seconds, workspace_agents.troubleshooting_url, workspace_agents.motd_file, workspace_agents.lifecycle_state, workspace_agents.expanded_directory, workspace_agents.logs_length, workspace_agents.logs_overflowed, workspace_agents.started_at, workspace_agents.ready_at, workspace_agents.subsystems, workspace_agents.display_apps, workspace_agents.api_version, workspace_agents.display_order
|
|
FROM
|
|
workspace_agents
|
|
JOIN
|
|
workspace_resources ON workspace_agents.resource_id = workspace_resources.id
|
|
JOIN
|
|
workspace_builds ON workspace_resources.job_id = workspace_builds.job_id
|
|
WHERE
|
|
workspace_builds.workspace_id = $1 :: uuid AND
|
|
workspace_builds.build_number = (
|
|
SELECT
|
|
MAX(build_number)
|
|
FROM
|
|
workspace_builds AS wb
|
|
WHERE
|
|
wb.workspace_id = $1 :: uuid
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentsInLatestBuildByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) ([]WorkspaceAgent, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentsInLatestBuildByWorkspaceID, workspaceID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgent
|
|
for rows.Next() {
|
|
var i WorkspaceAgent
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAgent = `-- name: InsertWorkspaceAgent :one
|
|
INSERT INTO
|
|
workspace_agents (
|
|
id,
|
|
created_at,
|
|
updated_at,
|
|
name,
|
|
resource_id,
|
|
auth_token,
|
|
auth_instance_id,
|
|
architecture,
|
|
environment_variables,
|
|
operating_system,
|
|
directory,
|
|
instance_metadata,
|
|
resource_metadata,
|
|
connection_timeout_seconds,
|
|
troubleshooting_url,
|
|
motd_file,
|
|
display_apps,
|
|
display_order
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) RETURNING id, created_at, updated_at, name, first_connected_at, last_connected_at, disconnected_at, resource_id, auth_token, auth_instance_id, architecture, environment_variables, operating_system, instance_metadata, resource_metadata, directory, version, last_connected_replica_id, connection_timeout_seconds, troubleshooting_url, motd_file, lifecycle_state, expanded_directory, logs_length, logs_overflowed, started_at, ready_at, subsystems, display_apps, api_version, display_order
|
|
`
|
|
|
|
type InsertWorkspaceAgentParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
Name string `db:"name" json:"name"`
|
|
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
|
|
AuthToken uuid.UUID `db:"auth_token" json:"auth_token"`
|
|
AuthInstanceID sql.NullString `db:"auth_instance_id" json:"auth_instance_id"`
|
|
Architecture string `db:"architecture" json:"architecture"`
|
|
EnvironmentVariables pqtype.NullRawMessage `db:"environment_variables" json:"environment_variables"`
|
|
OperatingSystem string `db:"operating_system" json:"operating_system"`
|
|
Directory string `db:"directory" json:"directory"`
|
|
InstanceMetadata pqtype.NullRawMessage `db:"instance_metadata" json:"instance_metadata"`
|
|
ResourceMetadata pqtype.NullRawMessage `db:"resource_metadata" json:"resource_metadata"`
|
|
ConnectionTimeoutSeconds int32 `db:"connection_timeout_seconds" json:"connection_timeout_seconds"`
|
|
TroubleshootingURL string `db:"troubleshooting_url" json:"troubleshooting_url"`
|
|
MOTDFile string `db:"motd_file" json:"motd_file"`
|
|
DisplayApps []DisplayApp `db:"display_apps" json:"display_apps"`
|
|
DisplayOrder int32 `db:"display_order" json:"display_order"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgent(ctx context.Context, arg InsertWorkspaceAgentParams) (WorkspaceAgent, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspaceAgent,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.Name,
|
|
arg.ResourceID,
|
|
arg.AuthToken,
|
|
arg.AuthInstanceID,
|
|
arg.Architecture,
|
|
arg.EnvironmentVariables,
|
|
arg.OperatingSystem,
|
|
arg.Directory,
|
|
arg.InstanceMetadata,
|
|
arg.ResourceMetadata,
|
|
arg.ConnectionTimeoutSeconds,
|
|
arg.TroubleshootingURL,
|
|
arg.MOTDFile,
|
|
pq.Array(arg.DisplayApps),
|
|
arg.DisplayOrder,
|
|
)
|
|
var i WorkspaceAgent
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.Name,
|
|
&i.FirstConnectedAt,
|
|
&i.LastConnectedAt,
|
|
&i.DisconnectedAt,
|
|
&i.ResourceID,
|
|
&i.AuthToken,
|
|
&i.AuthInstanceID,
|
|
&i.Architecture,
|
|
&i.EnvironmentVariables,
|
|
&i.OperatingSystem,
|
|
&i.InstanceMetadata,
|
|
&i.ResourceMetadata,
|
|
&i.Directory,
|
|
&i.Version,
|
|
&i.LastConnectedReplicaID,
|
|
&i.ConnectionTimeoutSeconds,
|
|
&i.TroubleshootingURL,
|
|
&i.MOTDFile,
|
|
&i.LifecycleState,
|
|
&i.ExpandedDirectory,
|
|
&i.LogsLength,
|
|
&i.LogsOverflowed,
|
|
&i.StartedAt,
|
|
&i.ReadyAt,
|
|
pq.Array(&i.Subsystems),
|
|
pq.Array(&i.DisplayApps),
|
|
&i.APIVersion,
|
|
&i.DisplayOrder,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertWorkspaceAgentLogSources = `-- name: InsertWorkspaceAgentLogSources :many
|
|
INSERT INTO
|
|
workspace_agent_log_sources (workspace_agent_id, created_at, id, display_name, icon)
|
|
SELECT
|
|
$1 :: uuid AS workspace_agent_id,
|
|
$2 :: timestamptz AS created_at,
|
|
unnest($3 :: uuid [ ]) AS id,
|
|
unnest($4 :: VARCHAR(127) [ ]) AS display_name,
|
|
unnest($5 :: text [ ]) AS icon
|
|
RETURNING workspace_agent_log_sources.workspace_agent_id, workspace_agent_log_sources.id, workspace_agent_log_sources.created_at, workspace_agent_log_sources.display_name, workspace_agent_log_sources.icon
|
|
`
|
|
|
|
type InsertWorkspaceAgentLogSourcesParams struct {
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
ID []uuid.UUID `db:"id" json:"id"`
|
|
DisplayName []string `db:"display_name" json:"display_name"`
|
|
Icon []string `db:"icon" json:"icon"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentLogSources(ctx context.Context, arg InsertWorkspaceAgentLogSourcesParams) ([]WorkspaceAgentLogSource, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertWorkspaceAgentLogSources,
|
|
arg.WorkspaceAgentID,
|
|
arg.CreatedAt,
|
|
pq.Array(arg.ID),
|
|
pq.Array(arg.DisplayName),
|
|
pq.Array(arg.Icon),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentLogSource
|
|
for rows.Next() {
|
|
var i WorkspaceAgentLogSource
|
|
if err := rows.Scan(
|
|
&i.WorkspaceAgentID,
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAgentLogs = `-- name: InsertWorkspaceAgentLogs :many
|
|
WITH new_length AS (
|
|
UPDATE workspace_agents SET
|
|
logs_length = logs_length + $6 WHERE workspace_agents.id = $1
|
|
)
|
|
INSERT INTO
|
|
workspace_agent_logs (agent_id, created_at, output, level, log_source_id)
|
|
SELECT
|
|
$1 :: uuid AS agent_id,
|
|
$2 :: timestamptz AS created_at,
|
|
unnest($3 :: VARCHAR(1024) [ ]) AS output,
|
|
unnest($4 :: log_level [ ]) AS level,
|
|
$5 :: uuid AS log_source_id
|
|
RETURNING workspace_agent_logs.agent_id, workspace_agent_logs.created_at, workspace_agent_logs.output, workspace_agent_logs.id, workspace_agent_logs.level, workspace_agent_logs.log_source_id
|
|
`
|
|
|
|
type InsertWorkspaceAgentLogsParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
Output []string `db:"output" json:"output"`
|
|
Level []LogLevel `db:"level" json:"level"`
|
|
LogSourceID uuid.UUID `db:"log_source_id" json:"log_source_id"`
|
|
OutputLength int32 `db:"output_length" json:"output_length"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentLogs(ctx context.Context, arg InsertWorkspaceAgentLogsParams) ([]WorkspaceAgentLog, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertWorkspaceAgentLogs,
|
|
arg.AgentID,
|
|
arg.CreatedAt,
|
|
pq.Array(arg.Output),
|
|
pq.Array(arg.Level),
|
|
arg.LogSourceID,
|
|
arg.OutputLength,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentLog
|
|
for rows.Next() {
|
|
var i WorkspaceAgentLog
|
|
if err := rows.Scan(
|
|
&i.AgentID,
|
|
&i.CreatedAt,
|
|
&i.Output,
|
|
&i.ID,
|
|
&i.Level,
|
|
&i.LogSourceID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAgentMetadata = `-- name: InsertWorkspaceAgentMetadata :exec
|
|
INSERT INTO
|
|
workspace_agent_metadata (
|
|
workspace_agent_id,
|
|
display_name,
|
|
key,
|
|
script,
|
|
timeout,
|
|
interval,
|
|
display_order
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7)
|
|
`
|
|
|
|
type InsertWorkspaceAgentMetadataParams struct {
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Key string `db:"key" json:"key"`
|
|
Script string `db:"script" json:"script"`
|
|
Timeout int64 `db:"timeout" json:"timeout"`
|
|
Interval int64 `db:"interval" json:"interval"`
|
|
DisplayOrder int32 `db:"display_order" json:"display_order"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentMetadata(ctx context.Context, arg InsertWorkspaceAgentMetadataParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertWorkspaceAgentMetadata,
|
|
arg.WorkspaceAgentID,
|
|
arg.DisplayName,
|
|
arg.Key,
|
|
arg.Script,
|
|
arg.Timeout,
|
|
arg.Interval,
|
|
arg.DisplayOrder,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceAgentConnectionByID = `-- name: UpdateWorkspaceAgentConnectionByID :exec
|
|
UPDATE
|
|
workspace_agents
|
|
SET
|
|
first_connected_at = $2,
|
|
last_connected_at = $3,
|
|
last_connected_replica_id = $4,
|
|
disconnected_at = $5,
|
|
updated_at = $6
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAgentConnectionByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
FirstConnectedAt sql.NullTime `db:"first_connected_at" json:"first_connected_at"`
|
|
LastConnectedAt sql.NullTime `db:"last_connected_at" json:"last_connected_at"`
|
|
LastConnectedReplicaID uuid.NullUUID `db:"last_connected_replica_id" json:"last_connected_replica_id"`
|
|
DisconnectedAt sql.NullTime `db:"disconnected_at" json:"disconnected_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAgentConnectionByID(ctx context.Context, arg UpdateWorkspaceAgentConnectionByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAgentConnectionByID,
|
|
arg.ID,
|
|
arg.FirstConnectedAt,
|
|
arg.LastConnectedAt,
|
|
arg.LastConnectedReplicaID,
|
|
arg.DisconnectedAt,
|
|
arg.UpdatedAt,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceAgentLifecycleStateByID = `-- name: UpdateWorkspaceAgentLifecycleStateByID :exec
|
|
UPDATE
|
|
workspace_agents
|
|
SET
|
|
lifecycle_state = $2,
|
|
started_at = $3,
|
|
ready_at = $4
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAgentLifecycleStateByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
LifecycleState WorkspaceAgentLifecycleState `db:"lifecycle_state" json:"lifecycle_state"`
|
|
StartedAt sql.NullTime `db:"started_at" json:"started_at"`
|
|
ReadyAt sql.NullTime `db:"ready_at" json:"ready_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAgentLifecycleStateByID(ctx context.Context, arg UpdateWorkspaceAgentLifecycleStateByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAgentLifecycleStateByID,
|
|
arg.ID,
|
|
arg.LifecycleState,
|
|
arg.StartedAt,
|
|
arg.ReadyAt,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceAgentLogOverflowByID = `-- name: UpdateWorkspaceAgentLogOverflowByID :exec
|
|
UPDATE
|
|
workspace_agents
|
|
SET
|
|
logs_overflowed = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAgentLogOverflowByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
LogsOverflowed bool `db:"logs_overflowed" json:"logs_overflowed"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAgentLogOverflowByID(ctx context.Context, arg UpdateWorkspaceAgentLogOverflowByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAgentLogOverflowByID, arg.ID, arg.LogsOverflowed)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceAgentMetadata = `-- name: UpdateWorkspaceAgentMetadata :exec
|
|
WITH metadata AS (
|
|
SELECT
|
|
unnest($2::text[]) AS key,
|
|
unnest($3::text[]) AS value,
|
|
unnest($4::text[]) AS error,
|
|
unnest($5::timestamptz[]) AS collected_at
|
|
)
|
|
UPDATE
|
|
workspace_agent_metadata wam
|
|
SET
|
|
value = m.value,
|
|
error = m.error,
|
|
collected_at = m.collected_at
|
|
FROM
|
|
metadata m
|
|
WHERE
|
|
wam.workspace_agent_id = $1
|
|
AND wam.key = m.key
|
|
`
|
|
|
|
type UpdateWorkspaceAgentMetadataParams struct {
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
Key []string `db:"key" json:"key"`
|
|
Value []string `db:"value" json:"value"`
|
|
Error []string `db:"error" json:"error"`
|
|
CollectedAt []time.Time `db:"collected_at" json:"collected_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAgentMetadata(ctx context.Context, arg UpdateWorkspaceAgentMetadataParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAgentMetadata,
|
|
arg.WorkspaceAgentID,
|
|
pq.Array(arg.Key),
|
|
pq.Array(arg.Value),
|
|
pq.Array(arg.Error),
|
|
pq.Array(arg.CollectedAt),
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceAgentStartupByID = `-- name: UpdateWorkspaceAgentStartupByID :exec
|
|
UPDATE
|
|
workspace_agents
|
|
SET
|
|
version = $2,
|
|
expanded_directory = $3,
|
|
subsystems = $4,
|
|
api_version = $5
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAgentStartupByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Version string `db:"version" json:"version"`
|
|
ExpandedDirectory string `db:"expanded_directory" json:"expanded_directory"`
|
|
Subsystems []WorkspaceAgentSubsystem `db:"subsystems" json:"subsystems"`
|
|
APIVersion string `db:"api_version" json:"api_version"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAgentStartupByID(ctx context.Context, arg UpdateWorkspaceAgentStartupByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAgentStartupByID,
|
|
arg.ID,
|
|
arg.Version,
|
|
arg.ExpandedDirectory,
|
|
pq.Array(arg.Subsystems),
|
|
arg.APIVersion,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const deleteOldWorkspaceAgentStats = `-- name: DeleteOldWorkspaceAgentStats :exec
|
|
DELETE FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
created_at < (
|
|
SELECT
|
|
COALESCE(
|
|
-- When generating initial template usage stats, all the
|
|
-- raw agent stats are needed, after that only ~30 mins
|
|
-- from last rollup is needed. Deployment stats seem to
|
|
-- use between 15 mins and 1 hour of data. We keep a
|
|
-- little bit more (1 day) just in case.
|
|
MAX(start_time) - '1 days'::interval,
|
|
-- Fall back to ~6 months ago if there are no template
|
|
-- usage stats so that we don't delete the data before
|
|
-- it's rolled up.
|
|
NOW() - '180 days'::interval
|
|
)
|
|
FROM
|
|
template_usage_stats
|
|
)
|
|
AND created_at < (
|
|
-- Delete at most in batches of 4 hours (with this batch size, assuming
|
|
-- 1 iteration / 10 minutes, we can clear out the previous 6 months of
|
|
-- data in 7.5 days) whilst keeping the DB load low.
|
|
SELECT
|
|
COALESCE(MIN(created_at) + '4 hours'::interval, NOW())
|
|
FROM
|
|
workspace_agent_stats
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) DeleteOldWorkspaceAgentStats(ctx context.Context) error {
|
|
_, err := q.db.ExecContext(ctx, deleteOldWorkspaceAgentStats)
|
|
return err
|
|
}
|
|
|
|
const getDeploymentDAUs = `-- name: GetDeploymentDAUs :many
|
|
SELECT
|
|
(created_at at TIME ZONE cast($1::integer as text))::date as date,
|
|
user_id
|
|
FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
connection_count > 0
|
|
GROUP BY
|
|
date, user_id
|
|
ORDER BY
|
|
date ASC
|
|
`
|
|
|
|
type GetDeploymentDAUsRow struct {
|
|
Date time.Time `db:"date" json:"date"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetDeploymentDAUs(ctx context.Context, tzOffset int32) ([]GetDeploymentDAUsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getDeploymentDAUs, tzOffset)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetDeploymentDAUsRow
|
|
for rows.Next() {
|
|
var i GetDeploymentDAUsRow
|
|
if err := rows.Scan(&i.Date, &i.UserID); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getDeploymentWorkspaceAgentStats = `-- name: GetDeploymentWorkspaceAgentStats :one
|
|
WITH agent_stats AS (
|
|
SELECT
|
|
coalesce(SUM(rx_bytes), 0)::bigint AS workspace_rx_bytes,
|
|
coalesce(SUM(tx_bytes), 0)::bigint AS workspace_tx_bytes,
|
|
coalesce((PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_50,
|
|
coalesce((PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_95
|
|
FROM workspace_agent_stats
|
|
-- The greater than 0 is to support legacy agents that don't report connection_median_latency_ms.
|
|
WHERE workspace_agent_stats.created_at > $1 AND connection_median_latency_ms > 0
|
|
), latest_agent_stats AS (
|
|
SELECT
|
|
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
|
|
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
|
|
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
|
|
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty
|
|
FROM (
|
|
SELECT id, created_at, user_id, agent_id, workspace_id, template_id, connections_by_proto, connection_count, rx_packets, rx_bytes, tx_packets, tx_bytes, connection_median_latency_ms, session_count_vscode, session_count_jetbrains, session_count_reconnecting_pty, session_count_ssh, ROW_NUMBER() OVER(PARTITION BY agent_id ORDER BY created_at DESC) AS rn
|
|
FROM workspace_agent_stats WHERE created_at > $1
|
|
) AS a WHERE a.rn = 1
|
|
)
|
|
SELECT workspace_rx_bytes, workspace_tx_bytes, workspace_connection_latency_50, workspace_connection_latency_95, session_count_vscode, session_count_ssh, session_count_jetbrains, session_count_reconnecting_pty FROM agent_stats, latest_agent_stats
|
|
`
|
|
|
|
type GetDeploymentWorkspaceAgentStatsRow struct {
|
|
WorkspaceRxBytes int64 `db:"workspace_rx_bytes" json:"workspace_rx_bytes"`
|
|
WorkspaceTxBytes int64 `db:"workspace_tx_bytes" json:"workspace_tx_bytes"`
|
|
WorkspaceConnectionLatency50 float64 `db:"workspace_connection_latency_50" json:"workspace_connection_latency_50"`
|
|
WorkspaceConnectionLatency95 float64 `db:"workspace_connection_latency_95" json:"workspace_connection_latency_95"`
|
|
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
|
|
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
|
|
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
|
|
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetDeploymentWorkspaceAgentStats(ctx context.Context, createdAt time.Time) (GetDeploymentWorkspaceAgentStatsRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getDeploymentWorkspaceAgentStats, createdAt)
|
|
var i GetDeploymentWorkspaceAgentStatsRow
|
|
err := row.Scan(
|
|
&i.WorkspaceRxBytes,
|
|
&i.WorkspaceTxBytes,
|
|
&i.WorkspaceConnectionLatency50,
|
|
&i.WorkspaceConnectionLatency95,
|
|
&i.SessionCountVSCode,
|
|
&i.SessionCountSSH,
|
|
&i.SessionCountJetBrains,
|
|
&i.SessionCountReconnectingPTY,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getTemplateDAUs = `-- name: GetTemplateDAUs :many
|
|
SELECT
|
|
(created_at at TIME ZONE cast($2::integer as text))::date as date,
|
|
user_id
|
|
FROM
|
|
workspace_agent_stats
|
|
WHERE
|
|
template_id = $1 AND
|
|
connection_count > 0
|
|
GROUP BY
|
|
date, user_id
|
|
ORDER BY
|
|
date ASC
|
|
`
|
|
|
|
type GetTemplateDAUsParams struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
TzOffset int32 `db:"tz_offset" json:"tz_offset"`
|
|
}
|
|
|
|
type GetTemplateDAUsRow struct {
|
|
Date time.Time `db:"date" json:"date"`
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetTemplateDAUs(ctx context.Context, arg GetTemplateDAUsParams) ([]GetTemplateDAUsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getTemplateDAUs, arg.TemplateID, arg.TzOffset)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetTemplateDAUsRow
|
|
for rows.Next() {
|
|
var i GetTemplateDAUsRow
|
|
if err := rows.Scan(&i.Date, &i.UserID); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentStats = `-- name: GetWorkspaceAgentStats :many
|
|
WITH agent_stats AS (
|
|
SELECT
|
|
user_id,
|
|
agent_id,
|
|
workspace_id,
|
|
template_id,
|
|
MIN(created_at)::timestamptz AS aggregated_from,
|
|
coalesce(SUM(rx_bytes), 0)::bigint AS workspace_rx_bytes,
|
|
coalesce(SUM(tx_bytes), 0)::bigint AS workspace_tx_bytes,
|
|
coalesce((PERCENTILE_CONT(0.5) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_50,
|
|
coalesce((PERCENTILE_CONT(0.95) WITHIN GROUP (ORDER BY connection_median_latency_ms)), -1)::FLOAT AS workspace_connection_latency_95
|
|
FROM workspace_agent_stats
|
|
-- The greater than 0 is to support legacy agents that don't report connection_median_latency_ms.
|
|
WHERE workspace_agent_stats.created_at > $1 AND connection_median_latency_ms > 0 GROUP BY user_id, agent_id, workspace_id, template_id
|
|
), latest_agent_stats AS (
|
|
SELECT
|
|
a.agent_id,
|
|
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
|
|
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
|
|
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
|
|
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty
|
|
FROM (
|
|
SELECT id, created_at, user_id, agent_id, workspace_id, template_id, connections_by_proto, connection_count, rx_packets, rx_bytes, tx_packets, tx_bytes, connection_median_latency_ms, session_count_vscode, session_count_jetbrains, session_count_reconnecting_pty, session_count_ssh, ROW_NUMBER() OVER(PARTITION BY agent_id ORDER BY created_at DESC) AS rn
|
|
FROM workspace_agent_stats WHERE created_at > $1
|
|
) AS a WHERE a.rn = 1 GROUP BY a.user_id, a.agent_id, a.workspace_id, a.template_id
|
|
)
|
|
SELECT user_id, agent_stats.agent_id, workspace_id, template_id, aggregated_from, workspace_rx_bytes, workspace_tx_bytes, workspace_connection_latency_50, workspace_connection_latency_95, latest_agent_stats.agent_id, session_count_vscode, session_count_ssh, session_count_jetbrains, session_count_reconnecting_pty FROM agent_stats JOIN latest_agent_stats ON agent_stats.agent_id = latest_agent_stats.agent_id
|
|
`
|
|
|
|
type GetWorkspaceAgentStatsRow struct {
|
|
UserID uuid.UUID `db:"user_id" json:"user_id"`
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
AggregatedFrom time.Time `db:"aggregated_from" json:"aggregated_from"`
|
|
WorkspaceRxBytes int64 `db:"workspace_rx_bytes" json:"workspace_rx_bytes"`
|
|
WorkspaceTxBytes int64 `db:"workspace_tx_bytes" json:"workspace_tx_bytes"`
|
|
WorkspaceConnectionLatency50 float64 `db:"workspace_connection_latency_50" json:"workspace_connection_latency_50"`
|
|
WorkspaceConnectionLatency95 float64 `db:"workspace_connection_latency_95" json:"workspace_connection_latency_95"`
|
|
AgentID_2 uuid.UUID `db:"agent_id_2" json:"agent_id_2"`
|
|
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
|
|
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
|
|
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
|
|
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentStats(ctx context.Context, createdAt time.Time) ([]GetWorkspaceAgentStatsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentStats, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspaceAgentStatsRow
|
|
for rows.Next() {
|
|
var i GetWorkspaceAgentStatsRow
|
|
if err := rows.Scan(
|
|
&i.UserID,
|
|
&i.AgentID,
|
|
&i.WorkspaceID,
|
|
&i.TemplateID,
|
|
&i.AggregatedFrom,
|
|
&i.WorkspaceRxBytes,
|
|
&i.WorkspaceTxBytes,
|
|
&i.WorkspaceConnectionLatency50,
|
|
&i.WorkspaceConnectionLatency95,
|
|
&i.AgentID_2,
|
|
&i.SessionCountVSCode,
|
|
&i.SessionCountSSH,
|
|
&i.SessionCountJetBrains,
|
|
&i.SessionCountReconnectingPTY,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentStatsAndLabels = `-- name: GetWorkspaceAgentStatsAndLabels :many
|
|
WITH agent_stats AS (
|
|
SELECT
|
|
user_id,
|
|
agent_id,
|
|
workspace_id,
|
|
coalesce(SUM(rx_bytes), 0)::bigint AS rx_bytes,
|
|
coalesce(SUM(tx_bytes), 0)::bigint AS tx_bytes
|
|
FROM workspace_agent_stats
|
|
WHERE workspace_agent_stats.created_at > $1
|
|
GROUP BY user_id, agent_id, workspace_id
|
|
), latest_agent_stats AS (
|
|
SELECT
|
|
a.agent_id,
|
|
coalesce(SUM(session_count_vscode), 0)::bigint AS session_count_vscode,
|
|
coalesce(SUM(session_count_ssh), 0)::bigint AS session_count_ssh,
|
|
coalesce(SUM(session_count_jetbrains), 0)::bigint AS session_count_jetbrains,
|
|
coalesce(SUM(session_count_reconnecting_pty), 0)::bigint AS session_count_reconnecting_pty,
|
|
coalesce(SUM(connection_count), 0)::bigint AS connection_count,
|
|
coalesce(MAX(connection_median_latency_ms), 0)::float AS connection_median_latency_ms
|
|
FROM (
|
|
SELECT id, created_at, user_id, agent_id, workspace_id, template_id, connections_by_proto, connection_count, rx_packets, rx_bytes, tx_packets, tx_bytes, connection_median_latency_ms, session_count_vscode, session_count_jetbrains, session_count_reconnecting_pty, session_count_ssh, ROW_NUMBER() OVER(PARTITION BY agent_id ORDER BY created_at DESC) AS rn
|
|
FROM workspace_agent_stats
|
|
-- The greater than 0 is to support legacy agents that don't report connection_median_latency_ms.
|
|
WHERE created_at > $1 AND connection_median_latency_ms > 0
|
|
) AS a
|
|
WHERE a.rn = 1
|
|
GROUP BY a.user_id, a.agent_id, a.workspace_id
|
|
)
|
|
SELECT
|
|
users.username, workspace_agents.name AS agent_name, workspaces.name AS workspace_name, rx_bytes, tx_bytes,
|
|
session_count_vscode, session_count_ssh, session_count_jetbrains, session_count_reconnecting_pty,
|
|
connection_count, connection_median_latency_ms
|
|
FROM
|
|
agent_stats
|
|
JOIN
|
|
latest_agent_stats
|
|
ON
|
|
agent_stats.agent_id = latest_agent_stats.agent_id
|
|
JOIN
|
|
users
|
|
ON
|
|
users.id = agent_stats.user_id
|
|
JOIN
|
|
workspace_agents
|
|
ON
|
|
workspace_agents.id = agent_stats.agent_id
|
|
JOIN
|
|
workspaces
|
|
ON
|
|
workspaces.id = agent_stats.workspace_id
|
|
`
|
|
|
|
type GetWorkspaceAgentStatsAndLabelsRow struct {
|
|
Username string `db:"username" json:"username"`
|
|
AgentName string `db:"agent_name" json:"agent_name"`
|
|
WorkspaceName string `db:"workspace_name" json:"workspace_name"`
|
|
RxBytes int64 `db:"rx_bytes" json:"rx_bytes"`
|
|
TxBytes int64 `db:"tx_bytes" json:"tx_bytes"`
|
|
SessionCountVSCode int64 `db:"session_count_vscode" json:"session_count_vscode"`
|
|
SessionCountSSH int64 `db:"session_count_ssh" json:"session_count_ssh"`
|
|
SessionCountJetBrains int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
|
|
SessionCountReconnectingPTY int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
|
|
ConnectionCount int64 `db:"connection_count" json:"connection_count"`
|
|
ConnectionMedianLatencyMS float64 `db:"connection_median_latency_ms" json:"connection_median_latency_ms"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentStatsAndLabels(ctx context.Context, createdAt time.Time) ([]GetWorkspaceAgentStatsAndLabelsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentStatsAndLabels, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspaceAgentStatsAndLabelsRow
|
|
for rows.Next() {
|
|
var i GetWorkspaceAgentStatsAndLabelsRow
|
|
if err := rows.Scan(
|
|
&i.Username,
|
|
&i.AgentName,
|
|
&i.WorkspaceName,
|
|
&i.RxBytes,
|
|
&i.TxBytes,
|
|
&i.SessionCountVSCode,
|
|
&i.SessionCountSSH,
|
|
&i.SessionCountJetBrains,
|
|
&i.SessionCountReconnectingPTY,
|
|
&i.ConnectionCount,
|
|
&i.ConnectionMedianLatencyMS,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAgentStats = `-- name: InsertWorkspaceAgentStats :exec
|
|
INSERT INTO
|
|
workspace_agent_stats (
|
|
id,
|
|
created_at,
|
|
user_id,
|
|
workspace_id,
|
|
template_id,
|
|
agent_id,
|
|
connections_by_proto,
|
|
connection_count,
|
|
rx_packets,
|
|
rx_bytes,
|
|
tx_packets,
|
|
tx_bytes,
|
|
session_count_vscode,
|
|
session_count_jetbrains,
|
|
session_count_reconnecting_pty,
|
|
session_count_ssh,
|
|
connection_median_latency_ms
|
|
)
|
|
SELECT
|
|
unnest($1 :: uuid[]) AS id,
|
|
unnest($2 :: timestamptz[]) AS created_at,
|
|
unnest($3 :: uuid[]) AS user_id,
|
|
unnest($4 :: uuid[]) AS workspace_id,
|
|
unnest($5 :: uuid[]) AS template_id,
|
|
unnest($6 :: uuid[]) AS agent_id,
|
|
jsonb_array_elements($7 :: jsonb) AS connections_by_proto,
|
|
unnest($8 :: bigint[]) AS connection_count,
|
|
unnest($9 :: bigint[]) AS rx_packets,
|
|
unnest($10 :: bigint[]) AS rx_bytes,
|
|
unnest($11 :: bigint[]) AS tx_packets,
|
|
unnest($12 :: bigint[]) AS tx_bytes,
|
|
unnest($13 :: bigint[]) AS session_count_vscode,
|
|
unnest($14 :: bigint[]) AS session_count_jetbrains,
|
|
unnest($15 :: bigint[]) AS session_count_reconnecting_pty,
|
|
unnest($16 :: bigint[]) AS session_count_ssh,
|
|
unnest($17 :: double precision[]) AS connection_median_latency_ms
|
|
`
|
|
|
|
type InsertWorkspaceAgentStatsParams struct {
|
|
ID []uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt []time.Time `db:"created_at" json:"created_at"`
|
|
UserID []uuid.UUID `db:"user_id" json:"user_id"`
|
|
WorkspaceID []uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
TemplateID []uuid.UUID `db:"template_id" json:"template_id"`
|
|
AgentID []uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
ConnectionsByProto json.RawMessage `db:"connections_by_proto" json:"connections_by_proto"`
|
|
ConnectionCount []int64 `db:"connection_count" json:"connection_count"`
|
|
RxPackets []int64 `db:"rx_packets" json:"rx_packets"`
|
|
RxBytes []int64 `db:"rx_bytes" json:"rx_bytes"`
|
|
TxPackets []int64 `db:"tx_packets" json:"tx_packets"`
|
|
TxBytes []int64 `db:"tx_bytes" json:"tx_bytes"`
|
|
SessionCountVSCode []int64 `db:"session_count_vscode" json:"session_count_vscode"`
|
|
SessionCountJetBrains []int64 `db:"session_count_jetbrains" json:"session_count_jetbrains"`
|
|
SessionCountReconnectingPTY []int64 `db:"session_count_reconnecting_pty" json:"session_count_reconnecting_pty"`
|
|
SessionCountSSH []int64 `db:"session_count_ssh" json:"session_count_ssh"`
|
|
ConnectionMedianLatencyMS []float64 `db:"connection_median_latency_ms" json:"connection_median_latency_ms"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentStats(ctx context.Context, arg InsertWorkspaceAgentStatsParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertWorkspaceAgentStats,
|
|
pq.Array(arg.ID),
|
|
pq.Array(arg.CreatedAt),
|
|
pq.Array(arg.UserID),
|
|
pq.Array(arg.WorkspaceID),
|
|
pq.Array(arg.TemplateID),
|
|
pq.Array(arg.AgentID),
|
|
arg.ConnectionsByProto,
|
|
pq.Array(arg.ConnectionCount),
|
|
pq.Array(arg.RxPackets),
|
|
pq.Array(arg.RxBytes),
|
|
pq.Array(arg.TxPackets),
|
|
pq.Array(arg.TxBytes),
|
|
pq.Array(arg.SessionCountVSCode),
|
|
pq.Array(arg.SessionCountJetBrains),
|
|
pq.Array(arg.SessionCountReconnectingPTY),
|
|
pq.Array(arg.SessionCountSSH),
|
|
pq.Array(arg.ConnectionMedianLatencyMS),
|
|
)
|
|
return err
|
|
}
|
|
|
|
const getWorkspaceAppByAgentIDAndSlug = `-- name: GetWorkspaceAppByAgentIDAndSlug :one
|
|
SELECT id, created_at, agent_id, display_name, icon, command, url, healthcheck_url, healthcheck_interval, healthcheck_threshold, health, subdomain, sharing_level, slug, external, display_order FROM workspace_apps WHERE agent_id = $1 AND slug = $2
|
|
`
|
|
|
|
type GetWorkspaceAppByAgentIDAndSlugParams struct {
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
Slug string `db:"slug" json:"slug"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAppByAgentIDAndSlug(ctx context.Context, arg GetWorkspaceAppByAgentIDAndSlugParams) (WorkspaceApp, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceAppByAgentIDAndSlug, arg.AgentID, arg.Slug)
|
|
var i WorkspaceApp
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Command,
|
|
&i.Url,
|
|
&i.HealthcheckUrl,
|
|
&i.HealthcheckInterval,
|
|
&i.HealthcheckThreshold,
|
|
&i.Health,
|
|
&i.Subdomain,
|
|
&i.SharingLevel,
|
|
&i.Slug,
|
|
&i.External,
|
|
&i.DisplayOrder,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceAppsByAgentID = `-- name: GetWorkspaceAppsByAgentID :many
|
|
SELECT id, created_at, agent_id, display_name, icon, command, url, healthcheck_url, healthcheck_interval, healthcheck_threshold, health, subdomain, sharing_level, slug, external, display_order FROM workspace_apps WHERE agent_id = $1 ORDER BY slug ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAppsByAgentID(ctx context.Context, agentID uuid.UUID) ([]WorkspaceApp, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAppsByAgentID, agentID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceApp
|
|
for rows.Next() {
|
|
var i WorkspaceApp
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Command,
|
|
&i.Url,
|
|
&i.HealthcheckUrl,
|
|
&i.HealthcheckInterval,
|
|
&i.HealthcheckThreshold,
|
|
&i.Health,
|
|
&i.Subdomain,
|
|
&i.SharingLevel,
|
|
&i.Slug,
|
|
&i.External,
|
|
&i.DisplayOrder,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAppsByAgentIDs = `-- name: GetWorkspaceAppsByAgentIDs :many
|
|
SELECT id, created_at, agent_id, display_name, icon, command, url, healthcheck_url, healthcheck_interval, healthcheck_threshold, health, subdomain, sharing_level, slug, external, display_order FROM workspace_apps WHERE agent_id = ANY($1 :: uuid [ ]) ORDER BY slug ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAppsByAgentIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceApp, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAppsByAgentIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceApp
|
|
for rows.Next() {
|
|
var i WorkspaceApp
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Command,
|
|
&i.Url,
|
|
&i.HealthcheckUrl,
|
|
&i.HealthcheckInterval,
|
|
&i.HealthcheckThreshold,
|
|
&i.Health,
|
|
&i.Subdomain,
|
|
&i.SharingLevel,
|
|
&i.Slug,
|
|
&i.External,
|
|
&i.DisplayOrder,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAppsCreatedAfter = `-- name: GetWorkspaceAppsCreatedAfter :many
|
|
SELECT id, created_at, agent_id, display_name, icon, command, url, healthcheck_url, healthcheck_interval, healthcheck_threshold, health, subdomain, sharing_level, slug, external, display_order FROM workspace_apps WHERE created_at > $1 ORDER BY slug ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAppsCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceApp, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAppsCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceApp
|
|
for rows.Next() {
|
|
var i WorkspaceApp
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Command,
|
|
&i.Url,
|
|
&i.HealthcheckUrl,
|
|
&i.HealthcheckInterval,
|
|
&i.HealthcheckThreshold,
|
|
&i.Health,
|
|
&i.Subdomain,
|
|
&i.SharingLevel,
|
|
&i.Slug,
|
|
&i.External,
|
|
&i.DisplayOrder,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceApp = `-- name: InsertWorkspaceApp :one
|
|
INSERT INTO
|
|
workspace_apps (
|
|
id,
|
|
created_at,
|
|
agent_id,
|
|
slug,
|
|
display_name,
|
|
icon,
|
|
command,
|
|
url,
|
|
external,
|
|
subdomain,
|
|
sharing_level,
|
|
healthcheck_url,
|
|
healthcheck_interval,
|
|
healthcheck_threshold,
|
|
health,
|
|
display_order
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16) RETURNING id, created_at, agent_id, display_name, icon, command, url, healthcheck_url, healthcheck_interval, healthcheck_threshold, health, subdomain, sharing_level, slug, external, display_order
|
|
`
|
|
|
|
type InsertWorkspaceAppParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
AgentID uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
Slug string `db:"slug" json:"slug"`
|
|
DisplayName string `db:"display_name" json:"display_name"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
Command sql.NullString `db:"command" json:"command"`
|
|
Url sql.NullString `db:"url" json:"url"`
|
|
External bool `db:"external" json:"external"`
|
|
Subdomain bool `db:"subdomain" json:"subdomain"`
|
|
SharingLevel AppSharingLevel `db:"sharing_level" json:"sharing_level"`
|
|
HealthcheckUrl string `db:"healthcheck_url" json:"healthcheck_url"`
|
|
HealthcheckInterval int32 `db:"healthcheck_interval" json:"healthcheck_interval"`
|
|
HealthcheckThreshold int32 `db:"healthcheck_threshold" json:"healthcheck_threshold"`
|
|
Health WorkspaceAppHealth `db:"health" json:"health"`
|
|
DisplayOrder int32 `db:"display_order" json:"display_order"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceApp(ctx context.Context, arg InsertWorkspaceAppParams) (WorkspaceApp, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspaceApp,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.AgentID,
|
|
arg.Slug,
|
|
arg.DisplayName,
|
|
arg.Icon,
|
|
arg.Command,
|
|
arg.Url,
|
|
arg.External,
|
|
arg.Subdomain,
|
|
arg.SharingLevel,
|
|
arg.HealthcheckUrl,
|
|
arg.HealthcheckInterval,
|
|
arg.HealthcheckThreshold,
|
|
arg.Health,
|
|
arg.DisplayOrder,
|
|
)
|
|
var i WorkspaceApp
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.AgentID,
|
|
&i.DisplayName,
|
|
&i.Icon,
|
|
&i.Command,
|
|
&i.Url,
|
|
&i.HealthcheckUrl,
|
|
&i.HealthcheckInterval,
|
|
&i.HealthcheckThreshold,
|
|
&i.Health,
|
|
&i.Subdomain,
|
|
&i.SharingLevel,
|
|
&i.Slug,
|
|
&i.External,
|
|
&i.DisplayOrder,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateWorkspaceAppHealthByID = `-- name: UpdateWorkspaceAppHealthByID :exec
|
|
UPDATE
|
|
workspace_apps
|
|
SET
|
|
health = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAppHealthByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Health WorkspaceAppHealth `db:"health" json:"health"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAppHealthByID(ctx context.Context, arg UpdateWorkspaceAppHealthByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAppHealthByID, arg.ID, arg.Health)
|
|
return err
|
|
}
|
|
|
|
const insertWorkspaceAppStats = `-- name: InsertWorkspaceAppStats :exec
|
|
INSERT INTO
|
|
workspace_app_stats (
|
|
user_id,
|
|
workspace_id,
|
|
agent_id,
|
|
access_method,
|
|
slug_or_port,
|
|
session_id,
|
|
session_started_at,
|
|
session_ended_at,
|
|
requests
|
|
)
|
|
SELECT
|
|
unnest($1::uuid[]) AS user_id,
|
|
unnest($2::uuid[]) AS workspace_id,
|
|
unnest($3::uuid[]) AS agent_id,
|
|
unnest($4::text[]) AS access_method,
|
|
unnest($5::text[]) AS slug_or_port,
|
|
unnest($6::uuid[]) AS session_id,
|
|
unnest($7::timestamptz[]) AS session_started_at,
|
|
unnest($8::timestamptz[]) AS session_ended_at,
|
|
unnest($9::int[]) AS requests
|
|
ON CONFLICT
|
|
(user_id, agent_id, session_id)
|
|
DO
|
|
UPDATE SET
|
|
session_ended_at = EXCLUDED.session_ended_at,
|
|
requests = EXCLUDED.requests
|
|
WHERE
|
|
workspace_app_stats.user_id = EXCLUDED.user_id
|
|
AND workspace_app_stats.agent_id = EXCLUDED.agent_id
|
|
AND workspace_app_stats.session_id = EXCLUDED.session_id
|
|
-- Since stats are updated in place as time progresses, we only
|
|
-- want to update this row if it's fresh.
|
|
AND workspace_app_stats.session_ended_at <= EXCLUDED.session_ended_at
|
|
AND workspace_app_stats.requests <= EXCLUDED.requests
|
|
`
|
|
|
|
type InsertWorkspaceAppStatsParams struct {
|
|
UserID []uuid.UUID `db:"user_id" json:"user_id"`
|
|
WorkspaceID []uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
AgentID []uuid.UUID `db:"agent_id" json:"agent_id"`
|
|
AccessMethod []string `db:"access_method" json:"access_method"`
|
|
SlugOrPort []string `db:"slug_or_port" json:"slug_or_port"`
|
|
SessionID []uuid.UUID `db:"session_id" json:"session_id"`
|
|
SessionStartedAt []time.Time `db:"session_started_at" json:"session_started_at"`
|
|
SessionEndedAt []time.Time `db:"session_ended_at" json:"session_ended_at"`
|
|
Requests []int32 `db:"requests" json:"requests"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAppStats(ctx context.Context, arg InsertWorkspaceAppStatsParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertWorkspaceAppStats,
|
|
pq.Array(arg.UserID),
|
|
pq.Array(arg.WorkspaceID),
|
|
pq.Array(arg.AgentID),
|
|
pq.Array(arg.AccessMethod),
|
|
pq.Array(arg.SlugOrPort),
|
|
pq.Array(arg.SessionID),
|
|
pq.Array(arg.SessionStartedAt),
|
|
pq.Array(arg.SessionEndedAt),
|
|
pq.Array(arg.Requests),
|
|
)
|
|
return err
|
|
}
|
|
|
|
const getUserWorkspaceBuildParameters = `-- name: GetUserWorkspaceBuildParameters :many
|
|
SELECT name, value
|
|
FROM (
|
|
SELECT DISTINCT ON (tvp.name)
|
|
tvp.name,
|
|
wbp.value,
|
|
wb.created_at
|
|
FROM
|
|
workspace_build_parameters wbp
|
|
JOIN
|
|
workspace_builds wb ON wb.id = wbp.workspace_build_id
|
|
JOIN
|
|
workspaces w ON w.id = wb.workspace_id
|
|
JOIN
|
|
template_version_parameters tvp ON tvp.template_version_id = wb.template_version_id
|
|
WHERE
|
|
w.owner_id = $1
|
|
AND wb.transition = 'start'
|
|
AND w.template_id = $2
|
|
AND tvp.ephemeral = false
|
|
AND tvp.name = wbp.name
|
|
ORDER BY
|
|
tvp.name, wb.created_at DESC
|
|
) q1
|
|
ORDER BY created_at DESC, name
|
|
LIMIT 100
|
|
`
|
|
|
|
type GetUserWorkspaceBuildParametersParams struct {
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
}
|
|
|
|
type GetUserWorkspaceBuildParametersRow struct {
|
|
Name string `db:"name" json:"name"`
|
|
Value string `db:"value" json:"value"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetUserWorkspaceBuildParameters(ctx context.Context, arg GetUserWorkspaceBuildParametersParams) ([]GetUserWorkspaceBuildParametersRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getUserWorkspaceBuildParameters, arg.OwnerID, arg.TemplateID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetUserWorkspaceBuildParametersRow
|
|
for rows.Next() {
|
|
var i GetUserWorkspaceBuildParametersRow
|
|
if err := rows.Scan(&i.Name, &i.Value); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceBuildParameters = `-- name: GetWorkspaceBuildParameters :many
|
|
SELECT
|
|
workspace_build_id, name, value
|
|
FROM
|
|
workspace_build_parameters
|
|
WHERE
|
|
workspace_build_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildParameters(ctx context.Context, workspaceBuildID uuid.UUID) ([]WorkspaceBuildParameter, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceBuildParameters, workspaceBuildID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuildParameter
|
|
for rows.Next() {
|
|
var i WorkspaceBuildParameter
|
|
if err := rows.Scan(&i.WorkspaceBuildID, &i.Name, &i.Value); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceBuildParameters = `-- name: InsertWorkspaceBuildParameters :exec
|
|
INSERT INTO
|
|
workspace_build_parameters (workspace_build_id, name, value)
|
|
SELECT
|
|
$1 :: uuid AS workspace_build_id,
|
|
unnest($2 :: text[]) AS name,
|
|
unnest($3 :: text[]) AS value
|
|
RETURNING workspace_build_id, name, value
|
|
`
|
|
|
|
type InsertWorkspaceBuildParametersParams struct {
|
|
WorkspaceBuildID uuid.UUID `db:"workspace_build_id" json:"workspace_build_id"`
|
|
Name []string `db:"name" json:"name"`
|
|
Value []string `db:"value" json:"value"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceBuildParameters(ctx context.Context, arg InsertWorkspaceBuildParametersParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertWorkspaceBuildParameters, arg.WorkspaceBuildID, pq.Array(arg.Name), pq.Array(arg.Value))
|
|
return err
|
|
}
|
|
|
|
const getActiveWorkspaceBuildsByTemplateID = `-- name: GetActiveWorkspaceBuildsByTemplateID :many
|
|
SELECT wb.id, wb.created_at, wb.updated_at, wb.workspace_id, wb.template_version_id, wb.build_number, wb.transition, wb.initiator_id, wb.provisioner_state, wb.job_id, wb.deadline, wb.reason, wb.daily_cost, wb.max_deadline, wb.initiator_by_avatar_url, wb.initiator_by_username
|
|
FROM (
|
|
SELECT
|
|
workspace_id, MAX(build_number) as max_build_number
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
workspace_id IN (
|
|
SELECT
|
|
id
|
|
FROM
|
|
workspaces
|
|
WHERE
|
|
template_id = $1
|
|
)
|
|
GROUP BY
|
|
workspace_id
|
|
) m
|
|
JOIN
|
|
workspace_build_with_user AS wb
|
|
ON m.workspace_id = wb.workspace_id AND m.max_build_number = wb.build_number
|
|
JOIN
|
|
provisioner_jobs AS pj
|
|
ON wb.job_id = pj.id
|
|
WHERE
|
|
wb.transition = 'start'::workspace_transition
|
|
AND
|
|
pj.completed_at IS NOT NULL
|
|
`
|
|
|
|
func (q *sqlQuerier) GetActiveWorkspaceBuildsByTemplateID(ctx context.Context, templateID uuid.UUID) ([]WorkspaceBuild, error) {
|
|
rows, err := q.db.QueryContext(ctx, getActiveWorkspaceBuildsByTemplateID, templateID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuild
|
|
for rows.Next() {
|
|
var i WorkspaceBuild
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getLatestWorkspaceBuildByWorkspaceID = `-- name: GetLatestWorkspaceBuildByWorkspaceID :one
|
|
SELECT
|
|
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, initiator_by_avatar_url, initiator_by_username
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
workspace_id = $1
|
|
ORDER BY
|
|
build_number desc
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLatestWorkspaceBuildByWorkspaceID(ctx context.Context, workspaceID uuid.UUID) (WorkspaceBuild, error) {
|
|
row := q.db.QueryRowContext(ctx, getLatestWorkspaceBuildByWorkspaceID, workspaceID)
|
|
var i WorkspaceBuild
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getLatestWorkspaceBuilds = `-- name: GetLatestWorkspaceBuilds :many
|
|
SELECT wb.id, wb.created_at, wb.updated_at, wb.workspace_id, wb.template_version_id, wb.build_number, wb.transition, wb.initiator_id, wb.provisioner_state, wb.job_id, wb.deadline, wb.reason, wb.daily_cost, wb.max_deadline, wb.initiator_by_avatar_url, wb.initiator_by_username
|
|
FROM (
|
|
SELECT
|
|
workspace_id, MAX(build_number) as max_build_number
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
GROUP BY
|
|
workspace_id
|
|
) m
|
|
JOIN
|
|
workspace_build_with_user AS wb
|
|
ON m.workspace_id = wb.workspace_id AND m.max_build_number = wb.build_number
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLatestWorkspaceBuilds(ctx context.Context) ([]WorkspaceBuild, error) {
|
|
rows, err := q.db.QueryContext(ctx, getLatestWorkspaceBuilds)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuild
|
|
for rows.Next() {
|
|
var i WorkspaceBuild
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getLatestWorkspaceBuildsByWorkspaceIDs = `-- name: GetLatestWorkspaceBuildsByWorkspaceIDs :many
|
|
SELECT wb.id, wb.created_at, wb.updated_at, wb.workspace_id, wb.template_version_id, wb.build_number, wb.transition, wb.initiator_id, wb.provisioner_state, wb.job_id, wb.deadline, wb.reason, wb.daily_cost, wb.max_deadline, wb.initiator_by_avatar_url, wb.initiator_by_username
|
|
FROM (
|
|
SELECT
|
|
workspace_id, MAX(build_number) as max_build_number
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
workspace_id = ANY($1 :: uuid [ ])
|
|
GROUP BY
|
|
workspace_id
|
|
) m
|
|
JOIN
|
|
workspace_build_with_user AS wb
|
|
ON m.workspace_id = wb.workspace_id AND m.max_build_number = wb.build_number
|
|
`
|
|
|
|
func (q *sqlQuerier) GetLatestWorkspaceBuildsByWorkspaceIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceBuild, error) {
|
|
rows, err := q.db.QueryContext(ctx, getLatestWorkspaceBuildsByWorkspaceIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuild
|
|
for rows.Next() {
|
|
var i WorkspaceBuild
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceBuildByID = `-- name: GetWorkspaceBuildByID :one
|
|
SELECT
|
|
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, initiator_by_avatar_url, initiator_by_username
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildByID(ctx context.Context, id uuid.UUID) (WorkspaceBuild, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceBuildByID, id)
|
|
var i WorkspaceBuild
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceBuildByJobID = `-- name: GetWorkspaceBuildByJobID :one
|
|
SELECT
|
|
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, initiator_by_avatar_url, initiator_by_username
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
job_id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildByJobID(ctx context.Context, jobID uuid.UUID) (WorkspaceBuild, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceBuildByJobID, jobID)
|
|
var i WorkspaceBuild
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceBuildByWorkspaceIDAndBuildNumber = `-- name: GetWorkspaceBuildByWorkspaceIDAndBuildNumber :one
|
|
SELECT
|
|
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, initiator_by_avatar_url, initiator_by_username
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
workspace_id = $1
|
|
AND build_number = $2
|
|
`
|
|
|
|
type GetWorkspaceBuildByWorkspaceIDAndBuildNumberParams struct {
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
BuildNumber int32 `db:"build_number" json:"build_number"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildByWorkspaceIDAndBuildNumber(ctx context.Context, arg GetWorkspaceBuildByWorkspaceIDAndBuildNumberParams) (WorkspaceBuild, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceBuildByWorkspaceIDAndBuildNumber, arg.WorkspaceID, arg.BuildNumber)
|
|
var i WorkspaceBuild
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceBuildsByWorkspaceID = `-- name: GetWorkspaceBuildsByWorkspaceID :many
|
|
SELECT
|
|
id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, initiator_by_avatar_url, initiator_by_username
|
|
FROM
|
|
workspace_build_with_user AS workspace_builds
|
|
WHERE
|
|
workspace_builds.workspace_id = $1
|
|
AND workspace_builds.created_at > $2
|
|
AND CASE
|
|
-- This allows using the last element on a page as effectively a cursor.
|
|
-- This is an important option for scripts that need to paginate without
|
|
-- duplicating or missing data.
|
|
WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN (
|
|
-- The pagination cursor is the last ID of the previous page.
|
|
-- The query is ordered by the build_number field, so select all
|
|
-- rows after the cursor.
|
|
build_number > (
|
|
SELECT
|
|
build_number
|
|
FROM
|
|
workspace_builds
|
|
WHERE
|
|
id = $3
|
|
)
|
|
)
|
|
ELSE true
|
|
END
|
|
ORDER BY
|
|
build_number desc OFFSET $4
|
|
LIMIT
|
|
-- A null limit means "no limit", so 0 means return all
|
|
NULLIF($5 :: int, 0)
|
|
`
|
|
|
|
type GetWorkspaceBuildsByWorkspaceIDParams struct {
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
Since time.Time `db:"since" json:"since"`
|
|
AfterID uuid.UUID `db:"after_id" json:"after_id"`
|
|
OffsetOpt int32 `db:"offset_opt" json:"offset_opt"`
|
|
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildsByWorkspaceID(ctx context.Context, arg GetWorkspaceBuildsByWorkspaceIDParams) ([]WorkspaceBuild, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceBuildsByWorkspaceID,
|
|
arg.WorkspaceID,
|
|
arg.Since,
|
|
arg.AfterID,
|
|
arg.OffsetOpt,
|
|
arg.LimitOpt,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuild
|
|
for rows.Next() {
|
|
var i WorkspaceBuild
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceBuildsCreatedAfter = `-- name: GetWorkspaceBuildsCreatedAfter :many
|
|
SELECT id, created_at, updated_at, workspace_id, template_version_id, build_number, transition, initiator_id, provisioner_state, job_id, deadline, reason, daily_cost, max_deadline, initiator_by_avatar_url, initiator_by_username FROM workspace_build_with_user WHERE created_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceBuildsCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceBuild, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceBuildsCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceBuild
|
|
for rows.Next() {
|
|
var i WorkspaceBuild
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.WorkspaceID,
|
|
&i.TemplateVersionID,
|
|
&i.BuildNumber,
|
|
&i.Transition,
|
|
&i.InitiatorID,
|
|
&i.ProvisionerState,
|
|
&i.JobID,
|
|
&i.Deadline,
|
|
&i.Reason,
|
|
&i.DailyCost,
|
|
&i.MaxDeadline,
|
|
&i.InitiatorByAvatarUrl,
|
|
&i.InitiatorByUsername,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceBuild = `-- name: InsertWorkspaceBuild :exec
|
|
INSERT INTO
|
|
workspace_builds (
|
|
id,
|
|
created_at,
|
|
updated_at,
|
|
workspace_id,
|
|
template_version_id,
|
|
"build_number",
|
|
transition,
|
|
initiator_id,
|
|
job_id,
|
|
provisioner_state,
|
|
deadline,
|
|
max_deadline,
|
|
reason
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
|
|
`
|
|
|
|
type InsertWorkspaceBuildParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
WorkspaceID uuid.UUID `db:"workspace_id" json:"workspace_id"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
BuildNumber int32 `db:"build_number" json:"build_number"`
|
|
Transition WorkspaceTransition `db:"transition" json:"transition"`
|
|
InitiatorID uuid.UUID `db:"initiator_id" json:"initiator_id"`
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
ProvisionerState []byte `db:"provisioner_state" json:"provisioner_state"`
|
|
Deadline time.Time `db:"deadline" json:"deadline"`
|
|
MaxDeadline time.Time `db:"max_deadline" json:"max_deadline"`
|
|
Reason BuildReason `db:"reason" json:"reason"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceBuild(ctx context.Context, arg InsertWorkspaceBuildParams) error {
|
|
_, err := q.db.ExecContext(ctx, insertWorkspaceBuild,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.WorkspaceID,
|
|
arg.TemplateVersionID,
|
|
arg.BuildNumber,
|
|
arg.Transition,
|
|
arg.InitiatorID,
|
|
arg.JobID,
|
|
arg.ProvisionerState,
|
|
arg.Deadline,
|
|
arg.MaxDeadline,
|
|
arg.Reason,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceBuildCostByID = `-- name: UpdateWorkspaceBuildCostByID :exec
|
|
UPDATE
|
|
workspace_builds
|
|
SET
|
|
daily_cost = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceBuildCostByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
DailyCost int32 `db:"daily_cost" json:"daily_cost"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceBuildCostByID(ctx context.Context, arg UpdateWorkspaceBuildCostByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceBuildCostByID, arg.ID, arg.DailyCost)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceBuildDeadlineByID = `-- name: UpdateWorkspaceBuildDeadlineByID :exec
|
|
UPDATE
|
|
workspace_builds
|
|
SET
|
|
deadline = $1::timestamptz,
|
|
max_deadline = $2::timestamptz,
|
|
updated_at = $3::timestamptz
|
|
WHERE id = $4::uuid
|
|
`
|
|
|
|
type UpdateWorkspaceBuildDeadlineByIDParams struct {
|
|
Deadline time.Time `db:"deadline" json:"deadline"`
|
|
MaxDeadline time.Time `db:"max_deadline" json:"max_deadline"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceBuildDeadlineByID(ctx context.Context, arg UpdateWorkspaceBuildDeadlineByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceBuildDeadlineByID,
|
|
arg.Deadline,
|
|
arg.MaxDeadline,
|
|
arg.UpdatedAt,
|
|
arg.ID,
|
|
)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceBuildProvisionerStateByID = `-- name: UpdateWorkspaceBuildProvisionerStateByID :exec
|
|
UPDATE
|
|
workspace_builds
|
|
SET
|
|
provisioner_state = $1::bytea,
|
|
updated_at = $2::timestamptz
|
|
WHERE id = $3::uuid
|
|
`
|
|
|
|
type UpdateWorkspaceBuildProvisionerStateByIDParams struct {
|
|
ProvisionerState []byte `db:"provisioner_state" json:"provisioner_state"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceBuildProvisionerStateByID(ctx context.Context, arg UpdateWorkspaceBuildProvisionerStateByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceBuildProvisionerStateByID, arg.ProvisionerState, arg.UpdatedAt, arg.ID)
|
|
return err
|
|
}
|
|
|
|
const getWorkspaceResourceByID = `-- name: GetWorkspaceResourceByID :one
|
|
SELECT
|
|
id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost
|
|
FROM
|
|
workspace_resources
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourceByID(ctx context.Context, id uuid.UUID) (WorkspaceResource, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceResourceByID, id)
|
|
var i WorkspaceResource
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Type,
|
|
&i.Name,
|
|
&i.Hide,
|
|
&i.Icon,
|
|
&i.InstanceType,
|
|
&i.DailyCost,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceResourceMetadataByResourceIDs = `-- name: GetWorkspaceResourceMetadataByResourceIDs :many
|
|
SELECT
|
|
workspace_resource_id, key, value, sensitive, id
|
|
FROM
|
|
workspace_resource_metadata
|
|
WHERE
|
|
workspace_resource_id = ANY($1 :: uuid [ ]) ORDER BY id ASC
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourceMetadataByResourceIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceResourceMetadatum, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceResourceMetadataByResourceIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResourceMetadatum
|
|
for rows.Next() {
|
|
var i WorkspaceResourceMetadatum
|
|
if err := rows.Scan(
|
|
&i.WorkspaceResourceID,
|
|
&i.Key,
|
|
&i.Value,
|
|
&i.Sensitive,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceResourceMetadataCreatedAfter = `-- name: GetWorkspaceResourceMetadataCreatedAfter :many
|
|
SELECT workspace_resource_id, key, value, sensitive, id FROM workspace_resource_metadata WHERE workspace_resource_id = ANY(
|
|
SELECT id FROM workspace_resources WHERE created_at > $1
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourceMetadataCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceResourceMetadatum, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceResourceMetadataCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResourceMetadatum
|
|
for rows.Next() {
|
|
var i WorkspaceResourceMetadatum
|
|
if err := rows.Scan(
|
|
&i.WorkspaceResourceID,
|
|
&i.Key,
|
|
&i.Value,
|
|
&i.Sensitive,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceResourcesByJobID = `-- name: GetWorkspaceResourcesByJobID :many
|
|
SELECT
|
|
id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost
|
|
FROM
|
|
workspace_resources
|
|
WHERE
|
|
job_id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourcesByJobID(ctx context.Context, jobID uuid.UUID) ([]WorkspaceResource, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceResourcesByJobID, jobID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResource
|
|
for rows.Next() {
|
|
var i WorkspaceResource
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Type,
|
|
&i.Name,
|
|
&i.Hide,
|
|
&i.Icon,
|
|
&i.InstanceType,
|
|
&i.DailyCost,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceResourcesByJobIDs = `-- name: GetWorkspaceResourcesByJobIDs :many
|
|
SELECT
|
|
id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost
|
|
FROM
|
|
workspace_resources
|
|
WHERE
|
|
job_id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourcesByJobIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceResource, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceResourcesByJobIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResource
|
|
for rows.Next() {
|
|
var i WorkspaceResource
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Type,
|
|
&i.Name,
|
|
&i.Hide,
|
|
&i.Icon,
|
|
&i.InstanceType,
|
|
&i.DailyCost,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceResourcesCreatedAfter = `-- name: GetWorkspaceResourcesCreatedAfter :many
|
|
SELECT id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost FROM workspace_resources WHERE created_at > $1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceResourcesCreatedAfter(ctx context.Context, createdAt time.Time) ([]WorkspaceResource, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceResourcesCreatedAfter, createdAt)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResource
|
|
for rows.Next() {
|
|
var i WorkspaceResource
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Type,
|
|
&i.Name,
|
|
&i.Hide,
|
|
&i.Icon,
|
|
&i.InstanceType,
|
|
&i.DailyCost,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceResource = `-- name: InsertWorkspaceResource :one
|
|
INSERT INTO
|
|
workspace_resources (id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) RETURNING id, created_at, job_id, transition, type, name, hide, icon, instance_type, daily_cost
|
|
`
|
|
|
|
type InsertWorkspaceResourceParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
JobID uuid.UUID `db:"job_id" json:"job_id"`
|
|
Transition WorkspaceTransition `db:"transition" json:"transition"`
|
|
Type string `db:"type" json:"type"`
|
|
Name string `db:"name" json:"name"`
|
|
Hide bool `db:"hide" json:"hide"`
|
|
Icon string `db:"icon" json:"icon"`
|
|
InstanceType sql.NullString `db:"instance_type" json:"instance_type"`
|
|
DailyCost int32 `db:"daily_cost" json:"daily_cost"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceResource(ctx context.Context, arg InsertWorkspaceResourceParams) (WorkspaceResource, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspaceResource,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.JobID,
|
|
arg.Transition,
|
|
arg.Type,
|
|
arg.Name,
|
|
arg.Hide,
|
|
arg.Icon,
|
|
arg.InstanceType,
|
|
arg.DailyCost,
|
|
)
|
|
var i WorkspaceResource
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.JobID,
|
|
&i.Transition,
|
|
&i.Type,
|
|
&i.Name,
|
|
&i.Hide,
|
|
&i.Icon,
|
|
&i.InstanceType,
|
|
&i.DailyCost,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const insertWorkspaceResourceMetadata = `-- name: InsertWorkspaceResourceMetadata :many
|
|
INSERT INTO
|
|
workspace_resource_metadata
|
|
SELECT
|
|
$1 :: uuid AS workspace_resource_id,
|
|
unnest($2 :: text [ ]) AS key,
|
|
unnest($3 :: text [ ]) AS value,
|
|
unnest($4 :: boolean [ ]) AS sensitive RETURNING workspace_resource_id, key, value, sensitive, id
|
|
`
|
|
|
|
type InsertWorkspaceResourceMetadataParams struct {
|
|
WorkspaceResourceID uuid.UUID `db:"workspace_resource_id" json:"workspace_resource_id"`
|
|
Key []string `db:"key" json:"key"`
|
|
Value []string `db:"value" json:"value"`
|
|
Sensitive []bool `db:"sensitive" json:"sensitive"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceResourceMetadata(ctx context.Context, arg InsertWorkspaceResourceMetadataParams) ([]WorkspaceResourceMetadatum, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertWorkspaceResourceMetadata,
|
|
arg.WorkspaceResourceID,
|
|
pq.Array(arg.Key),
|
|
pq.Array(arg.Value),
|
|
pq.Array(arg.Sensitive),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceResourceMetadatum
|
|
for rows.Next() {
|
|
var i WorkspaceResourceMetadatum
|
|
if err := rows.Scan(
|
|
&i.WorkspaceResourceID,
|
|
&i.Key,
|
|
&i.Value,
|
|
&i.Sensitive,
|
|
&i.ID,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const batchUpdateWorkspaceLastUsedAt = `-- name: BatchUpdateWorkspaceLastUsedAt :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
last_used_at = $1
|
|
WHERE
|
|
id = ANY($2 :: uuid[])
|
|
AND
|
|
-- Do not overwrite with older data
|
|
last_used_at < $1
|
|
`
|
|
|
|
type BatchUpdateWorkspaceLastUsedAtParams struct {
|
|
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
|
|
IDs []uuid.UUID `db:"ids" json:"ids"`
|
|
}
|
|
|
|
func (q *sqlQuerier) BatchUpdateWorkspaceLastUsedAt(ctx context.Context, arg BatchUpdateWorkspaceLastUsedAtParams) error {
|
|
_, err := q.db.ExecContext(ctx, batchUpdateWorkspaceLastUsedAt, arg.LastUsedAt, pq.Array(arg.IDs))
|
|
return err
|
|
}
|
|
|
|
const favoriteWorkspace = `-- name: FavoriteWorkspace :exec
|
|
UPDATE workspaces SET favorite = true WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) FavoriteWorkspace(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, favoriteWorkspace, id)
|
|
return err
|
|
}
|
|
|
|
const getDeploymentWorkspaceStats = `-- name: GetDeploymentWorkspaceStats :one
|
|
WITH workspaces_with_jobs AS (
|
|
SELECT
|
|
latest_build.transition, latest_build.provisioner_job_id, latest_build.started_at, latest_build.updated_at, latest_build.canceled_at, latest_build.completed_at, latest_build.error FROM workspaces
|
|
LEFT JOIN LATERAL (
|
|
SELECT
|
|
workspace_builds.transition,
|
|
provisioner_jobs.id AS provisioner_job_id,
|
|
provisioner_jobs.started_at,
|
|
provisioner_jobs.updated_at,
|
|
provisioner_jobs.canceled_at,
|
|
provisioner_jobs.completed_at,
|
|
provisioner_jobs.error
|
|
FROM
|
|
workspace_builds
|
|
LEFT JOIN
|
|
provisioner_jobs
|
|
ON
|
|
provisioner_jobs.id = workspace_builds.job_id
|
|
WHERE
|
|
workspace_builds.workspace_id = workspaces.id
|
|
ORDER BY
|
|
build_number DESC
|
|
LIMIT
|
|
1
|
|
) latest_build ON TRUE WHERE deleted = false
|
|
), pending_workspaces AS (
|
|
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
|
|
started_at IS NULL
|
|
), building_workspaces AS (
|
|
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
|
|
started_at IS NOT NULL AND
|
|
canceled_at IS NULL AND
|
|
completed_at IS NULL AND
|
|
updated_at - INTERVAL '30 seconds' < NOW()
|
|
), running_workspaces AS (
|
|
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
|
|
completed_at IS NOT NULL AND
|
|
canceled_at IS NULL AND
|
|
error IS NULL AND
|
|
transition = 'start'::workspace_transition
|
|
), failed_workspaces AS (
|
|
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
|
|
(canceled_at IS NOT NULL AND
|
|
error IS NOT NULL) OR
|
|
(completed_at IS NOT NULL AND
|
|
error IS NOT NULL)
|
|
), stopped_workspaces AS (
|
|
SELECT COUNT(*) AS count FROM workspaces_with_jobs WHERE
|
|
completed_at IS NOT NULL AND
|
|
canceled_at IS NULL AND
|
|
error IS NULL AND
|
|
transition = 'stop'::workspace_transition
|
|
)
|
|
SELECT
|
|
pending_workspaces.count AS pending_workspaces,
|
|
building_workspaces.count AS building_workspaces,
|
|
running_workspaces.count AS running_workspaces,
|
|
failed_workspaces.count AS failed_workspaces,
|
|
stopped_workspaces.count AS stopped_workspaces
|
|
FROM pending_workspaces, building_workspaces, running_workspaces, failed_workspaces, stopped_workspaces
|
|
`
|
|
|
|
type GetDeploymentWorkspaceStatsRow struct {
|
|
PendingWorkspaces int64 `db:"pending_workspaces" json:"pending_workspaces"`
|
|
BuildingWorkspaces int64 `db:"building_workspaces" json:"building_workspaces"`
|
|
RunningWorkspaces int64 `db:"running_workspaces" json:"running_workspaces"`
|
|
FailedWorkspaces int64 `db:"failed_workspaces" json:"failed_workspaces"`
|
|
StoppedWorkspaces int64 `db:"stopped_workspaces" json:"stopped_workspaces"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetDeploymentWorkspaceStats(ctx context.Context) (GetDeploymentWorkspaceStatsRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getDeploymentWorkspaceStats)
|
|
var i GetDeploymentWorkspaceStatsRow
|
|
err := row.Scan(
|
|
&i.PendingWorkspaces,
|
|
&i.BuildingWorkspaces,
|
|
&i.RunningWorkspaces,
|
|
&i.FailedWorkspaces,
|
|
&i.StoppedWorkspaces,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceByAgentID = `-- name: GetWorkspaceByAgentID :one
|
|
SELECT
|
|
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite,
|
|
templates.name as template_name
|
|
FROM
|
|
workspaces
|
|
INNER JOIN
|
|
templates ON workspaces.template_id = templates.id
|
|
WHERE
|
|
workspaces.id = (
|
|
SELECT
|
|
workspace_id
|
|
FROM
|
|
workspace_builds
|
|
WHERE
|
|
workspace_builds.job_id = (
|
|
SELECT
|
|
job_id
|
|
FROM
|
|
workspace_resources
|
|
WHERE
|
|
workspace_resources.id = (
|
|
SELECT
|
|
resource_id
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
workspace_agents.id = $1
|
|
)
|
|
)
|
|
)
|
|
`
|
|
|
|
type GetWorkspaceByAgentIDRow struct {
|
|
Workspace Workspace `db:"workspace" json:"workspace"`
|
|
TemplateName string `db:"template_name" json:"template_name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceByAgentID(ctx context.Context, agentID uuid.UUID) (GetWorkspaceByAgentIDRow, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceByAgentID, agentID)
|
|
var i GetWorkspaceByAgentIDRow
|
|
err := row.Scan(
|
|
&i.Workspace.ID,
|
|
&i.Workspace.CreatedAt,
|
|
&i.Workspace.UpdatedAt,
|
|
&i.Workspace.OwnerID,
|
|
&i.Workspace.OrganizationID,
|
|
&i.Workspace.TemplateID,
|
|
&i.Workspace.Deleted,
|
|
&i.Workspace.Name,
|
|
&i.Workspace.AutostartSchedule,
|
|
&i.Workspace.Ttl,
|
|
&i.Workspace.LastUsedAt,
|
|
&i.Workspace.DormantAt,
|
|
&i.Workspace.DeletingAt,
|
|
&i.Workspace.AutomaticUpdates,
|
|
&i.Workspace.Favorite,
|
|
&i.TemplateName,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceByID = `-- name: GetWorkspaceByID :one
|
|
SELECT
|
|
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite
|
|
FROM
|
|
workspaces
|
|
WHERE
|
|
id = $1
|
|
LIMIT
|
|
1
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceByID(ctx context.Context, id uuid.UUID) (Workspace, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceByID, id)
|
|
var i Workspace
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceByOwnerIDAndName = `-- name: GetWorkspaceByOwnerIDAndName :one
|
|
SELECT
|
|
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite
|
|
FROM
|
|
workspaces
|
|
WHERE
|
|
owner_id = $1
|
|
AND deleted = $2
|
|
AND LOWER("name") = LOWER($3)
|
|
ORDER BY created_at DESC
|
|
`
|
|
|
|
type GetWorkspaceByOwnerIDAndNameParams struct {
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceByOwnerIDAndName(ctx context.Context, arg GetWorkspaceByOwnerIDAndNameParams) (Workspace, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceByOwnerIDAndName, arg.OwnerID, arg.Deleted, arg.Name)
|
|
var i Workspace
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceByWorkspaceAppID = `-- name: GetWorkspaceByWorkspaceAppID :one
|
|
SELECT
|
|
id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite
|
|
FROM
|
|
workspaces
|
|
WHERE
|
|
workspaces.id = (
|
|
SELECT
|
|
workspace_id
|
|
FROM
|
|
workspace_builds
|
|
WHERE
|
|
workspace_builds.job_id = (
|
|
SELECT
|
|
job_id
|
|
FROM
|
|
workspace_resources
|
|
WHERE
|
|
workspace_resources.id = (
|
|
SELECT
|
|
resource_id
|
|
FROM
|
|
workspace_agents
|
|
WHERE
|
|
workspace_agents.id = (
|
|
SELECT
|
|
agent_id
|
|
FROM
|
|
workspace_apps
|
|
WHERE
|
|
workspace_apps.id = $1
|
|
)
|
|
)
|
|
)
|
|
)
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceByWorkspaceAppID(ctx context.Context, workspaceAppID uuid.UUID) (Workspace, error) {
|
|
row := q.db.QueryRowContext(ctx, getWorkspaceByWorkspaceAppID, workspaceAppID)
|
|
var i Workspace
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const getWorkspaceUniqueOwnerCountByTemplateIDs = `-- name: GetWorkspaceUniqueOwnerCountByTemplateIDs :many
|
|
SELECT
|
|
template_id, COUNT(DISTINCT owner_id) AS unique_owners_sum
|
|
FROM
|
|
workspaces
|
|
WHERE
|
|
template_id = ANY($1 :: uuid[]) AND deleted = false
|
|
GROUP BY template_id
|
|
`
|
|
|
|
type GetWorkspaceUniqueOwnerCountByTemplateIDsRow struct {
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
UniqueOwnersSum int64 `db:"unique_owners_sum" json:"unique_owners_sum"`
|
|
}
|
|
|
|
func (q *sqlQuerier) GetWorkspaceUniqueOwnerCountByTemplateIDs(ctx context.Context, templateIds []uuid.UUID) ([]GetWorkspaceUniqueOwnerCountByTemplateIDsRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceUniqueOwnerCountByTemplateIDs, pq.Array(templateIds))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspaceUniqueOwnerCountByTemplateIDsRow
|
|
for rows.Next() {
|
|
var i GetWorkspaceUniqueOwnerCountByTemplateIDsRow
|
|
if err := rows.Scan(&i.TemplateID, &i.UniqueOwnersSum); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaces = `-- name: GetWorkspaces :many
|
|
WITH
|
|
build_params AS (
|
|
SELECT
|
|
LOWER(unnest($1 :: text[])) AS name,
|
|
LOWER(unnest($2 :: text[])) AS value
|
|
),
|
|
filtered_workspaces AS (
|
|
SELECT
|
|
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite,
|
|
COALESCE(template.name, 'unknown') as template_name,
|
|
latest_build.template_version_id,
|
|
latest_build.template_version_name,
|
|
users.username as username,
|
|
latest_build.completed_at as latest_build_completed_at,
|
|
latest_build.canceled_at as latest_build_canceled_at,
|
|
latest_build.error as latest_build_error,
|
|
latest_build.transition as latest_build_transition,
|
|
latest_build.job_status as latest_build_status
|
|
FROM
|
|
workspaces
|
|
JOIN
|
|
users
|
|
ON
|
|
workspaces.owner_id = users.id
|
|
LEFT JOIN LATERAL (
|
|
SELECT
|
|
workspace_builds.id,
|
|
workspace_builds.transition,
|
|
workspace_builds.template_version_id,
|
|
template_versions.name AS template_version_name,
|
|
provisioner_jobs.id AS provisioner_job_id,
|
|
provisioner_jobs.started_at,
|
|
provisioner_jobs.updated_at,
|
|
provisioner_jobs.canceled_at,
|
|
provisioner_jobs.completed_at,
|
|
provisioner_jobs.error,
|
|
provisioner_jobs.job_status
|
|
FROM
|
|
workspace_builds
|
|
JOIN
|
|
provisioner_jobs
|
|
ON
|
|
provisioner_jobs.id = workspace_builds.job_id
|
|
LEFT JOIN
|
|
template_versions
|
|
ON
|
|
template_versions.id = workspace_builds.template_version_id
|
|
WHERE
|
|
workspace_builds.workspace_id = workspaces.id
|
|
ORDER BY
|
|
build_number DESC
|
|
LIMIT
|
|
1
|
|
) latest_build ON TRUE
|
|
LEFT JOIN LATERAL (
|
|
SELECT
|
|
id, created_at, updated_at, organization_id, deleted, name, provisioner, active_version_id, description, default_ttl, created_by, icon, user_acl, group_acl, display_name, allow_user_cancel_workspace_jobs, allow_user_autostart, allow_user_autostop, failure_ttl, time_til_dormant, time_til_dormant_autodelete, autostop_requirement_days_of_week, autostop_requirement_weeks, autostart_block_days_of_week, require_active_version, deprecated, activity_bump, max_port_sharing_level
|
|
FROM
|
|
templates
|
|
WHERE
|
|
templates.id = workspaces.template_id
|
|
) template ON true
|
|
WHERE
|
|
-- Optionally include deleted workspaces
|
|
workspaces.deleted = $3
|
|
AND CASE
|
|
WHEN $4 :: text != '' THEN
|
|
CASE
|
|
-- Some workspace specific status refer to the transition
|
|
-- type. By default, the standard provisioner job status
|
|
-- search strings are supported.
|
|
-- 'running' states
|
|
WHEN $4 = 'starting' THEN
|
|
latest_build.job_status = 'running'::provisioner_job_status AND
|
|
latest_build.transition = 'start'::workspace_transition
|
|
WHEN $4 = 'stopping' THEN
|
|
latest_build.job_status = 'running'::provisioner_job_status AND
|
|
latest_build.transition = 'stop'::workspace_transition
|
|
WHEN $4 = 'deleting' THEN
|
|
latest_build.job_status = 'running' AND
|
|
latest_build.transition = 'delete'::workspace_transition
|
|
|
|
-- 'succeeded' states
|
|
WHEN $4 = 'deleted' THEN
|
|
latest_build.job_status = 'succeeded'::provisioner_job_status AND
|
|
latest_build.transition = 'delete'::workspace_transition
|
|
WHEN $4 = 'stopped' THEN
|
|
latest_build.job_status = 'succeeded'::provisioner_job_status AND
|
|
latest_build.transition = 'stop'::workspace_transition
|
|
WHEN $4 = 'started' THEN
|
|
latest_build.job_status = 'succeeded'::provisioner_job_status AND
|
|
latest_build.transition = 'start'::workspace_transition
|
|
|
|
-- Special case where the provisioner status and workspace status
|
|
-- differ. A workspace is "running" if the job is "succeeded" and
|
|
-- the transition is "start". This is because a workspace starts
|
|
-- running when a job is complete.
|
|
WHEN $4 = 'running' THEN
|
|
latest_build.job_status = 'succeeded'::provisioner_job_status AND
|
|
latest_build.transition = 'start'::workspace_transition
|
|
|
|
WHEN $4 != '' THEN
|
|
-- By default just match the job status exactly
|
|
latest_build.job_status = $4::provisioner_job_status
|
|
ELSE
|
|
true
|
|
END
|
|
ELSE true
|
|
END
|
|
-- Filter by owner_id
|
|
AND CASE
|
|
WHEN $5 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
workspaces.owner_id = $5
|
|
ELSE true
|
|
END
|
|
-- Filter by organization_id
|
|
AND CASE
|
|
WHEN $6 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
|
|
workspaces.organization_id = $6
|
|
ELSE true
|
|
END
|
|
-- Filter by build parameter
|
|
-- @has_param will match any build that includes the parameter.
|
|
AND CASE WHEN array_length($7 :: text[], 1) > 0 THEN
|
|
EXISTS (
|
|
SELECT
|
|
1
|
|
FROM
|
|
workspace_build_parameters
|
|
WHERE
|
|
workspace_build_parameters.workspace_build_id = latest_build.id AND
|
|
-- ILIKE is case insensitive
|
|
workspace_build_parameters.name ILIKE ANY($7)
|
|
)
|
|
ELSE true
|
|
END
|
|
-- @param_value will match param name an value.
|
|
-- requires 2 arrays, @param_names and @param_values to be passed in.
|
|
-- Array index must match between the 2 arrays for name=value
|
|
AND CASE WHEN array_length($1 :: text[], 1) > 0 THEN
|
|
EXISTS (
|
|
SELECT
|
|
1
|
|
FROM
|
|
workspace_build_parameters
|
|
INNER JOIN
|
|
build_params
|
|
ON
|
|
LOWER(workspace_build_parameters.name) = build_params.name AND
|
|
LOWER(workspace_build_parameters.value) = build_params.value AND
|
|
workspace_build_parameters.workspace_build_id = latest_build.id
|
|
)
|
|
ELSE true
|
|
END
|
|
|
|
-- Filter by owner_name
|
|
AND CASE
|
|
WHEN $8 :: text != '' THEN
|
|
workspaces.owner_id = (SELECT id FROM users WHERE lower(username) = lower($8) AND deleted = false)
|
|
ELSE true
|
|
END
|
|
-- Filter by template_name
|
|
-- There can be more than 1 template with the same name across organizations.
|
|
-- Use the organization filter to restrict to 1 org if needed.
|
|
AND CASE
|
|
WHEN $9 :: text != '' THEN
|
|
workspaces.template_id = ANY(SELECT id FROM templates WHERE lower(name) = lower($9) AND deleted = false)
|
|
ELSE true
|
|
END
|
|
-- Filter by template_ids
|
|
AND CASE
|
|
WHEN array_length($10 :: uuid[], 1) > 0 THEN
|
|
workspaces.template_id = ANY($10)
|
|
ELSE true
|
|
END
|
|
-- Filter by workspace_ids
|
|
AND CASE
|
|
WHEN array_length($11 :: uuid[], 1) > 0 THEN
|
|
workspaces.id = ANY($11)
|
|
ELSE true
|
|
END
|
|
-- Filter by name, matching on substring
|
|
AND CASE
|
|
WHEN $12 :: text != '' THEN
|
|
workspaces.name ILIKE '%' || $12 || '%'
|
|
ELSE true
|
|
END
|
|
-- Filter by agent status
|
|
-- has-agent: is only applicable for workspaces in "start" transition. Stopped and deleted workspaces don't have agents.
|
|
AND CASE
|
|
WHEN $13 :: text != '' THEN
|
|
(
|
|
SELECT COUNT(*)
|
|
FROM
|
|
workspace_resources
|
|
JOIN
|
|
workspace_agents
|
|
ON
|
|
workspace_agents.resource_id = workspace_resources.id
|
|
WHERE
|
|
workspace_resources.job_id = latest_build.provisioner_job_id AND
|
|
latest_build.transition = 'start'::workspace_transition AND
|
|
$13 = (
|
|
CASE
|
|
WHEN workspace_agents.first_connected_at IS NULL THEN
|
|
CASE
|
|
WHEN workspace_agents.connection_timeout_seconds > 0 AND NOW() - workspace_agents.created_at > workspace_agents.connection_timeout_seconds * INTERVAL '1 second' THEN
|
|
'timeout'
|
|
ELSE
|
|
'connecting'
|
|
END
|
|
WHEN workspace_agents.disconnected_at > workspace_agents.last_connected_at THEN
|
|
'disconnected'
|
|
WHEN NOW() - workspace_agents.last_connected_at > INTERVAL '1 second' * $14 :: bigint THEN
|
|
'disconnected'
|
|
WHEN workspace_agents.last_connected_at IS NOT NULL THEN
|
|
'connected'
|
|
ELSE
|
|
NULL
|
|
END
|
|
)
|
|
) > 0
|
|
ELSE true
|
|
END
|
|
-- Filter by dormant workspaces.
|
|
AND CASE
|
|
WHEN $15 :: boolean != 'false' THEN
|
|
dormant_at IS NOT NULL
|
|
ELSE true
|
|
END
|
|
-- Filter by last_used
|
|
AND CASE
|
|
WHEN $16 :: timestamp with time zone > '0001-01-01 00:00:00Z' THEN
|
|
workspaces.last_used_at <= $16
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
WHEN $17 :: timestamp with time zone > '0001-01-01 00:00:00Z' THEN
|
|
workspaces.last_used_at >= $17
|
|
ELSE true
|
|
END
|
|
AND CASE
|
|
WHEN $18 :: boolean IS NOT NULL THEN
|
|
(latest_build.template_version_id = template.active_version_id) = $18 :: boolean
|
|
ELSE true
|
|
END
|
|
-- Authorize Filter clause will be injected below in GetAuthorizedWorkspaces
|
|
-- @authorize_filter
|
|
), filtered_workspaces_order AS (
|
|
SELECT
|
|
fw.id, fw.created_at, fw.updated_at, fw.owner_id, fw.organization_id, fw.template_id, fw.deleted, fw.name, fw.autostart_schedule, fw.ttl, fw.last_used_at, fw.dormant_at, fw.deleting_at, fw.automatic_updates, fw.favorite, fw.template_name, fw.template_version_id, fw.template_version_name, fw.username, fw.latest_build_completed_at, fw.latest_build_canceled_at, fw.latest_build_error, fw.latest_build_transition, fw.latest_build_status
|
|
FROM
|
|
filtered_workspaces fw
|
|
ORDER BY
|
|
-- To ensure that 'favorite' workspaces show up first in the list only for their owner.
|
|
CASE WHEN owner_id = $19 AND favorite THEN 0 ELSE 1 END ASC,
|
|
(latest_build_completed_at IS NOT NULL AND
|
|
latest_build_canceled_at IS NULL AND
|
|
latest_build_error IS NULL AND
|
|
latest_build_transition = 'start'::workspace_transition) DESC,
|
|
LOWER(username) ASC,
|
|
LOWER(name) ASC
|
|
LIMIT
|
|
CASE
|
|
WHEN $21 :: integer > 0 THEN
|
|
$21
|
|
END
|
|
OFFSET
|
|
$20
|
|
), filtered_workspaces_order_with_summary AS (
|
|
SELECT
|
|
fwo.id, fwo.created_at, fwo.updated_at, fwo.owner_id, fwo.organization_id, fwo.template_id, fwo.deleted, fwo.name, fwo.autostart_schedule, fwo.ttl, fwo.last_used_at, fwo.dormant_at, fwo.deleting_at, fwo.automatic_updates, fwo.favorite, fwo.template_name, fwo.template_version_id, fwo.template_version_name, fwo.username, fwo.latest_build_completed_at, fwo.latest_build_canceled_at, fwo.latest_build_error, fwo.latest_build_transition, fwo.latest_build_status
|
|
FROM
|
|
filtered_workspaces_order fwo
|
|
-- Return a technical summary row with total count of workspaces.
|
|
-- It is used to present the correct count if pagination goes beyond the offset.
|
|
UNION ALL
|
|
SELECT
|
|
'00000000-0000-0000-0000-000000000000'::uuid, -- id
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- created_at
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- updated_at
|
|
'00000000-0000-0000-0000-000000000000'::uuid, -- owner_id
|
|
'00000000-0000-0000-0000-000000000000'::uuid, -- organization_id
|
|
'00000000-0000-0000-0000-000000000000'::uuid, -- template_id
|
|
false, -- deleted
|
|
'**TECHNICAL_ROW**', -- name
|
|
'', -- autostart_schedule
|
|
0, -- ttl
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- last_used_at
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- dormant_at
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- deleting_at
|
|
'never'::automatic_updates, -- automatic_updates
|
|
false, -- favorite
|
|
-- Extra columns added to ` + "`" + `filtered_workspaces` + "`" + `
|
|
'', -- template_name
|
|
'00000000-0000-0000-0000-000000000000'::uuid, -- template_version_id
|
|
'', -- template_version_name
|
|
'', -- username
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- latest_build_completed_at,
|
|
'0001-01-01 00:00:00+00'::timestamptz, -- latest_build_canceled_at,
|
|
'', -- latest_build_error
|
|
'start'::workspace_transition, -- latest_build_transition
|
|
'unknown'::provisioner_job_status -- latest_build_status
|
|
WHERE
|
|
$22 :: boolean = true
|
|
), total_count AS (
|
|
SELECT
|
|
count(*) AS count
|
|
FROM
|
|
filtered_workspaces
|
|
)
|
|
SELECT
|
|
fwos.id, fwos.created_at, fwos.updated_at, fwos.owner_id, fwos.organization_id, fwos.template_id, fwos.deleted, fwos.name, fwos.autostart_schedule, fwos.ttl, fwos.last_used_at, fwos.dormant_at, fwos.deleting_at, fwos.automatic_updates, fwos.favorite, fwos.template_name, fwos.template_version_id, fwos.template_version_name, fwos.username, fwos.latest_build_completed_at, fwos.latest_build_canceled_at, fwos.latest_build_error, fwos.latest_build_transition, fwos.latest_build_status,
|
|
tc.count
|
|
FROM
|
|
filtered_workspaces_order_with_summary fwos
|
|
CROSS JOIN
|
|
total_count tc
|
|
`
|
|
|
|
type GetWorkspacesParams struct {
|
|
ParamNames []string `db:"param_names" json:"param_names"`
|
|
ParamValues []string `db:"param_values" json:"param_values"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
Status string `db:"status" json:"status"`
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
HasParam []string `db:"has_param" json:"has_param"`
|
|
OwnerUsername string `db:"owner_username" json:"owner_username"`
|
|
TemplateName string `db:"template_name" json:"template_name"`
|
|
TemplateIDs []uuid.UUID `db:"template_ids" json:"template_ids"`
|
|
WorkspaceIds []uuid.UUID `db:"workspace_ids" json:"workspace_ids"`
|
|
Name string `db:"name" json:"name"`
|
|
HasAgent string `db:"has_agent" json:"has_agent"`
|
|
AgentInactiveDisconnectTimeoutSeconds int64 `db:"agent_inactive_disconnect_timeout_seconds" json:"agent_inactive_disconnect_timeout_seconds"`
|
|
Dormant bool `db:"dormant" json:"dormant"`
|
|
LastUsedBefore time.Time `db:"last_used_before" json:"last_used_before"`
|
|
LastUsedAfter time.Time `db:"last_used_after" json:"last_used_after"`
|
|
UsingActive sql.NullBool `db:"using_active" json:"using_active"`
|
|
RequesterID uuid.UUID `db:"requester_id" json:"requester_id"`
|
|
Offset int32 `db:"offset_" json:"offset_"`
|
|
Limit int32 `db:"limit_" json:"limit_"`
|
|
WithSummary bool `db:"with_summary" json:"with_summary"`
|
|
}
|
|
|
|
type GetWorkspacesRow struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
Name string `db:"name" json:"name"`
|
|
AutostartSchedule sql.NullString `db:"autostart_schedule" json:"autostart_schedule"`
|
|
Ttl sql.NullInt64 `db:"ttl" json:"ttl"`
|
|
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
|
|
DormantAt sql.NullTime `db:"dormant_at" json:"dormant_at"`
|
|
DeletingAt sql.NullTime `db:"deleting_at" json:"deleting_at"`
|
|
AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"`
|
|
Favorite bool `db:"favorite" json:"favorite"`
|
|
TemplateName string `db:"template_name" json:"template_name"`
|
|
TemplateVersionID uuid.UUID `db:"template_version_id" json:"template_version_id"`
|
|
TemplateVersionName sql.NullString `db:"template_version_name" json:"template_version_name"`
|
|
Username string `db:"username" json:"username"`
|
|
LatestBuildCompletedAt sql.NullTime `db:"latest_build_completed_at" json:"latest_build_completed_at"`
|
|
LatestBuildCanceledAt sql.NullTime `db:"latest_build_canceled_at" json:"latest_build_canceled_at"`
|
|
LatestBuildError sql.NullString `db:"latest_build_error" json:"latest_build_error"`
|
|
LatestBuildTransition WorkspaceTransition `db:"latest_build_transition" json:"latest_build_transition"`
|
|
LatestBuildStatus ProvisionerJobStatus `db:"latest_build_status" json:"latest_build_status"`
|
|
Count int64 `db:"count" json:"count"`
|
|
}
|
|
|
|
// build_params is used to filter by build parameters if present.
|
|
// It has to be a CTE because the set returning function 'unnest' cannot
|
|
// be used in a WHERE clause.
|
|
func (q *sqlQuerier) GetWorkspaces(ctx context.Context, arg GetWorkspacesParams) ([]GetWorkspacesRow, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaces,
|
|
pq.Array(arg.ParamNames),
|
|
pq.Array(arg.ParamValues),
|
|
arg.Deleted,
|
|
arg.Status,
|
|
arg.OwnerID,
|
|
arg.OrganizationID,
|
|
pq.Array(arg.HasParam),
|
|
arg.OwnerUsername,
|
|
arg.TemplateName,
|
|
pq.Array(arg.TemplateIDs),
|
|
pq.Array(arg.WorkspaceIds),
|
|
arg.Name,
|
|
arg.HasAgent,
|
|
arg.AgentInactiveDisconnectTimeoutSeconds,
|
|
arg.Dormant,
|
|
arg.LastUsedBefore,
|
|
arg.LastUsedAfter,
|
|
arg.UsingActive,
|
|
arg.RequesterID,
|
|
arg.Offset,
|
|
arg.Limit,
|
|
arg.WithSummary,
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []GetWorkspacesRow
|
|
for rows.Next() {
|
|
var i GetWorkspacesRow
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
&i.TemplateName,
|
|
&i.TemplateVersionID,
|
|
&i.TemplateVersionName,
|
|
&i.Username,
|
|
&i.LatestBuildCompletedAt,
|
|
&i.LatestBuildCanceledAt,
|
|
&i.LatestBuildError,
|
|
&i.LatestBuildTransition,
|
|
&i.LatestBuildStatus,
|
|
&i.Count,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspacesEligibleForTransition = `-- name: GetWorkspacesEligibleForTransition :many
|
|
SELECT
|
|
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite
|
|
FROM
|
|
workspaces
|
|
LEFT JOIN
|
|
workspace_builds ON workspace_builds.workspace_id = workspaces.id
|
|
INNER JOIN
|
|
provisioner_jobs ON workspace_builds.job_id = provisioner_jobs.id
|
|
INNER JOIN
|
|
templates ON workspaces.template_id = templates.id
|
|
INNER JOIN
|
|
users ON workspaces.owner_id = users.id
|
|
WHERE
|
|
workspace_builds.build_number = (
|
|
SELECT
|
|
MAX(build_number)
|
|
FROM
|
|
workspace_builds
|
|
WHERE
|
|
workspace_builds.workspace_id = workspaces.id
|
|
) AND
|
|
|
|
(
|
|
-- If the workspace build was a start transition, the workspace is
|
|
-- potentially eligible for autostop if it's past the deadline. The
|
|
-- deadline is computed at build time upon success and is bumped based
|
|
-- on activity (up the max deadline if set). We don't need to check
|
|
-- license here since that's done when the values are written to the build.
|
|
(
|
|
workspace_builds.transition = 'start'::workspace_transition AND
|
|
workspace_builds.deadline IS NOT NULL AND
|
|
workspace_builds.deadline < $1 :: timestamptz
|
|
) OR
|
|
|
|
-- If the workspace build was a stop transition, the workspace is
|
|
-- potentially eligible for autostart if it has a schedule set. The
|
|
-- caller must check if the template allows autostart in a license-aware
|
|
-- fashion as we cannot check it here.
|
|
(
|
|
workspace_builds.transition = 'stop'::workspace_transition AND
|
|
workspaces.autostart_schedule IS NOT NULL
|
|
) OR
|
|
|
|
-- If the workspace's most recent job resulted in an error
|
|
-- it may be eligible for failed stop.
|
|
(
|
|
provisioner_jobs.error IS NOT NULL AND
|
|
provisioner_jobs.error != '' AND
|
|
workspace_builds.transition = 'start'::workspace_transition
|
|
) OR
|
|
|
|
-- If the workspace's template has an inactivity_ttl set
|
|
-- it may be eligible for dormancy.
|
|
(
|
|
templates.time_til_dormant > 0 AND
|
|
workspaces.dormant_at IS NULL
|
|
) OR
|
|
|
|
-- If the workspace's template has a time_til_dormant_autodelete set
|
|
-- and the workspace is already dormant.
|
|
(
|
|
templates.time_til_dormant_autodelete > 0 AND
|
|
workspaces.dormant_at IS NOT NULL
|
|
) OR
|
|
|
|
-- If the user account is suspended, and the workspace is running.
|
|
(
|
|
users.status = 'suspended'::user_status AND
|
|
workspace_builds.transition = 'start'::workspace_transition
|
|
)
|
|
) AND workspaces.deleted = 'false'
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspacesEligibleForTransition(ctx context.Context, now time.Time) ([]Workspace, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspacesEligibleForTransition, now)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Workspace
|
|
for rows.Next() {
|
|
var i Workspace
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspace = `-- name: InsertWorkspace :one
|
|
INSERT INTO
|
|
workspaces (
|
|
id,
|
|
created_at,
|
|
updated_at,
|
|
owner_id,
|
|
organization_id,
|
|
template_id,
|
|
name,
|
|
autostart_schedule,
|
|
ttl,
|
|
last_used_at,
|
|
automatic_updates
|
|
)
|
|
VALUES
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11) RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite
|
|
`
|
|
|
|
type InsertWorkspaceParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
UpdatedAt time.Time `db:"updated_at" json:"updated_at"`
|
|
OwnerID uuid.UUID `db:"owner_id" json:"owner_id"`
|
|
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
Name string `db:"name" json:"name"`
|
|
AutostartSchedule sql.NullString `db:"autostart_schedule" json:"autostart_schedule"`
|
|
Ttl sql.NullInt64 `db:"ttl" json:"ttl"`
|
|
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
|
|
AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspace(ctx context.Context, arg InsertWorkspaceParams) (Workspace, error) {
|
|
row := q.db.QueryRowContext(ctx, insertWorkspace,
|
|
arg.ID,
|
|
arg.CreatedAt,
|
|
arg.UpdatedAt,
|
|
arg.OwnerID,
|
|
arg.OrganizationID,
|
|
arg.TemplateID,
|
|
arg.Name,
|
|
arg.AutostartSchedule,
|
|
arg.Ttl,
|
|
arg.LastUsedAt,
|
|
arg.AutomaticUpdates,
|
|
)
|
|
var i Workspace
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const unfavoriteWorkspace = `-- name: UnfavoriteWorkspace :exec
|
|
UPDATE workspaces SET favorite = false WHERE id = $1
|
|
`
|
|
|
|
func (q *sqlQuerier) UnfavoriteWorkspace(ctx context.Context, id uuid.UUID) error {
|
|
_, err := q.db.ExecContext(ctx, unfavoriteWorkspace, id)
|
|
return err
|
|
}
|
|
|
|
const updateTemplateWorkspacesLastUsedAt = `-- name: UpdateTemplateWorkspacesLastUsedAt :exec
|
|
UPDATE workspaces
|
|
SET
|
|
last_used_at = $1::timestamptz
|
|
WHERE
|
|
template_id = $2
|
|
`
|
|
|
|
type UpdateTemplateWorkspacesLastUsedAtParams struct {
|
|
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateTemplateWorkspacesLastUsedAt(ctx context.Context, arg UpdateTemplateWorkspacesLastUsedAtParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateTemplateWorkspacesLastUsedAt, arg.LastUsedAt, arg.TemplateID)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspace = `-- name: UpdateWorkspace :one
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
name = $2
|
|
WHERE
|
|
id = $1
|
|
AND deleted = false
|
|
RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite
|
|
`
|
|
|
|
type UpdateWorkspaceParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Name string `db:"name" json:"name"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspace(ctx context.Context, arg UpdateWorkspaceParams) (Workspace, error) {
|
|
row := q.db.QueryRowContext(ctx, updateWorkspace, arg.ID, arg.Name)
|
|
var i Workspace
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateWorkspaceAutomaticUpdates = `-- name: UpdateWorkspaceAutomaticUpdates :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
automatic_updates = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAutomaticUpdatesParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
AutomaticUpdates AutomaticUpdates `db:"automatic_updates" json:"automatic_updates"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAutomaticUpdates(ctx context.Context, arg UpdateWorkspaceAutomaticUpdatesParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAutomaticUpdates, arg.ID, arg.AutomaticUpdates)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceAutostart = `-- name: UpdateWorkspaceAutostart :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
autostart_schedule = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceAutostartParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
AutostartSchedule sql.NullString `db:"autostart_schedule" json:"autostart_schedule"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceAutostart(ctx context.Context, arg UpdateWorkspaceAutostartParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceAutostart, arg.ID, arg.AutostartSchedule)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceDeletedByID = `-- name: UpdateWorkspaceDeletedByID :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
deleted = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceDeletedByIDParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Deleted bool `db:"deleted" json:"deleted"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceDeletedByID(ctx context.Context, arg UpdateWorkspaceDeletedByIDParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceDeletedByID, arg.ID, arg.Deleted)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceDormantDeletingAt = `-- name: UpdateWorkspaceDormantDeletingAt :one
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
dormant_at = $2,
|
|
-- When a workspace is active we want to update the last_used_at to avoid the workspace going
|
|
-- immediately dormant. If we're transition the workspace to dormant then we leave it alone.
|
|
last_used_at = CASE WHEN $2::timestamptz IS NULL THEN
|
|
now() at time zone 'utc'
|
|
ELSE
|
|
last_used_at
|
|
END,
|
|
-- If dormant_at is null (meaning active) or the template-defined time_til_dormant_autodelete is 0 we should set
|
|
-- deleting_at to NULL else set it to the dormant_at + time_til_dormant_autodelete duration.
|
|
deleting_at = CASE WHEN $2::timestamptz IS NULL OR templates.time_til_dormant_autodelete = 0 THEN
|
|
NULL
|
|
ELSE
|
|
$2::timestamptz + (INTERVAL '1 millisecond' * (templates.time_til_dormant_autodelete / 1000000))
|
|
END
|
|
FROM
|
|
templates
|
|
WHERE
|
|
workspaces.id = $1
|
|
AND templates.id = workspaces.template_id
|
|
RETURNING
|
|
workspaces.id, workspaces.created_at, workspaces.updated_at, workspaces.owner_id, workspaces.organization_id, workspaces.template_id, workspaces.deleted, workspaces.name, workspaces.autostart_schedule, workspaces.ttl, workspaces.last_used_at, workspaces.dormant_at, workspaces.deleting_at, workspaces.automatic_updates, workspaces.favorite
|
|
`
|
|
|
|
type UpdateWorkspaceDormantDeletingAtParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
DormantAt sql.NullTime `db:"dormant_at" json:"dormant_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceDormantDeletingAt(ctx context.Context, arg UpdateWorkspaceDormantDeletingAtParams) (Workspace, error) {
|
|
row := q.db.QueryRowContext(ctx, updateWorkspaceDormantDeletingAt, arg.ID, arg.DormantAt)
|
|
var i Workspace
|
|
err := row.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
)
|
|
return i, err
|
|
}
|
|
|
|
const updateWorkspaceLastUsedAt = `-- name: UpdateWorkspaceLastUsedAt :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
last_used_at = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceLastUsedAtParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
LastUsedAt time.Time `db:"last_used_at" json:"last_used_at"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceLastUsedAt(ctx context.Context, arg UpdateWorkspaceLastUsedAtParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceLastUsedAt, arg.ID, arg.LastUsedAt)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspaceTTL = `-- name: UpdateWorkspaceTTL :exec
|
|
UPDATE
|
|
workspaces
|
|
SET
|
|
ttl = $2
|
|
WHERE
|
|
id = $1
|
|
`
|
|
|
|
type UpdateWorkspaceTTLParams struct {
|
|
ID uuid.UUID `db:"id" json:"id"`
|
|
Ttl sql.NullInt64 `db:"ttl" json:"ttl"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspaceTTL(ctx context.Context, arg UpdateWorkspaceTTLParams) error {
|
|
_, err := q.db.ExecContext(ctx, updateWorkspaceTTL, arg.ID, arg.Ttl)
|
|
return err
|
|
}
|
|
|
|
const updateWorkspacesDormantDeletingAtByTemplateID = `-- name: UpdateWorkspacesDormantDeletingAtByTemplateID :many
|
|
UPDATE workspaces
|
|
SET
|
|
deleting_at = CASE
|
|
WHEN $1::bigint = 0 THEN NULL
|
|
WHEN $2::timestamptz > '0001-01-01 00:00:00+00'::timestamptz THEN ($2::timestamptz) + interval '1 milliseconds' * $1::bigint
|
|
ELSE dormant_at + interval '1 milliseconds' * $1::bigint
|
|
END,
|
|
dormant_at = CASE WHEN $2::timestamptz > '0001-01-01 00:00:00+00'::timestamptz THEN $2::timestamptz ELSE dormant_at END
|
|
WHERE
|
|
template_id = $3
|
|
AND
|
|
dormant_at IS NOT NULL
|
|
RETURNING id, created_at, updated_at, owner_id, organization_id, template_id, deleted, name, autostart_schedule, ttl, last_used_at, dormant_at, deleting_at, automatic_updates, favorite
|
|
`
|
|
|
|
type UpdateWorkspacesDormantDeletingAtByTemplateIDParams struct {
|
|
TimeTilDormantAutodeleteMs int64 `db:"time_til_dormant_autodelete_ms" json:"time_til_dormant_autodelete_ms"`
|
|
DormantAt time.Time `db:"dormant_at" json:"dormant_at"`
|
|
TemplateID uuid.UUID `db:"template_id" json:"template_id"`
|
|
}
|
|
|
|
func (q *sqlQuerier) UpdateWorkspacesDormantDeletingAtByTemplateID(ctx context.Context, arg UpdateWorkspacesDormantDeletingAtByTemplateIDParams) ([]Workspace, error) {
|
|
rows, err := q.db.QueryContext(ctx, updateWorkspacesDormantDeletingAtByTemplateID, arg.TimeTilDormantAutodeleteMs, arg.DormantAt, arg.TemplateID)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []Workspace
|
|
for rows.Next() {
|
|
var i Workspace
|
|
if err := rows.Scan(
|
|
&i.ID,
|
|
&i.CreatedAt,
|
|
&i.UpdatedAt,
|
|
&i.OwnerID,
|
|
&i.OrganizationID,
|
|
&i.TemplateID,
|
|
&i.Deleted,
|
|
&i.Name,
|
|
&i.AutostartSchedule,
|
|
&i.Ttl,
|
|
&i.LastUsedAt,
|
|
&i.DormantAt,
|
|
&i.DeletingAt,
|
|
&i.AutomaticUpdates,
|
|
&i.Favorite,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const getWorkspaceAgentScriptsByAgentIDs = `-- name: GetWorkspaceAgentScriptsByAgentIDs :many
|
|
SELECT workspace_agent_id, log_source_id, log_path, created_at, script, cron, start_blocks_login, run_on_start, run_on_stop, timeout_seconds FROM workspace_agent_scripts WHERE workspace_agent_id = ANY($1 :: uuid [ ])
|
|
`
|
|
|
|
func (q *sqlQuerier) GetWorkspaceAgentScriptsByAgentIDs(ctx context.Context, ids []uuid.UUID) ([]WorkspaceAgentScript, error) {
|
|
rows, err := q.db.QueryContext(ctx, getWorkspaceAgentScriptsByAgentIDs, pq.Array(ids))
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentScript
|
|
for rows.Next() {
|
|
var i WorkspaceAgentScript
|
|
if err := rows.Scan(
|
|
&i.WorkspaceAgentID,
|
|
&i.LogSourceID,
|
|
&i.LogPath,
|
|
&i.CreatedAt,
|
|
&i.Script,
|
|
&i.Cron,
|
|
&i.StartBlocksLogin,
|
|
&i.RunOnStart,
|
|
&i.RunOnStop,
|
|
&i.TimeoutSeconds,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|
|
|
|
const insertWorkspaceAgentScripts = `-- name: InsertWorkspaceAgentScripts :many
|
|
INSERT INTO
|
|
workspace_agent_scripts (workspace_agent_id, created_at, log_source_id, log_path, script, cron, start_blocks_login, run_on_start, run_on_stop, timeout_seconds)
|
|
SELECT
|
|
$1 :: uuid AS workspace_agent_id,
|
|
$2 :: timestamptz AS created_at,
|
|
unnest($3 :: uuid [ ]) AS log_source_id,
|
|
unnest($4 :: text [ ]) AS log_path,
|
|
unnest($5 :: text [ ]) AS script,
|
|
unnest($6 :: text [ ]) AS cron,
|
|
unnest($7 :: boolean [ ]) AS start_blocks_login,
|
|
unnest($8 :: boolean [ ]) AS run_on_start,
|
|
unnest($9 :: boolean [ ]) AS run_on_stop,
|
|
unnest($10 :: integer [ ]) AS timeout_seconds
|
|
RETURNING workspace_agent_scripts.workspace_agent_id, workspace_agent_scripts.log_source_id, workspace_agent_scripts.log_path, workspace_agent_scripts.created_at, workspace_agent_scripts.script, workspace_agent_scripts.cron, workspace_agent_scripts.start_blocks_login, workspace_agent_scripts.run_on_start, workspace_agent_scripts.run_on_stop, workspace_agent_scripts.timeout_seconds
|
|
`
|
|
|
|
type InsertWorkspaceAgentScriptsParams struct {
|
|
WorkspaceAgentID uuid.UUID `db:"workspace_agent_id" json:"workspace_agent_id"`
|
|
CreatedAt time.Time `db:"created_at" json:"created_at"`
|
|
LogSourceID []uuid.UUID `db:"log_source_id" json:"log_source_id"`
|
|
LogPath []string `db:"log_path" json:"log_path"`
|
|
Script []string `db:"script" json:"script"`
|
|
Cron []string `db:"cron" json:"cron"`
|
|
StartBlocksLogin []bool `db:"start_blocks_login" json:"start_blocks_login"`
|
|
RunOnStart []bool `db:"run_on_start" json:"run_on_start"`
|
|
RunOnStop []bool `db:"run_on_stop" json:"run_on_stop"`
|
|
TimeoutSeconds []int32 `db:"timeout_seconds" json:"timeout_seconds"`
|
|
}
|
|
|
|
func (q *sqlQuerier) InsertWorkspaceAgentScripts(ctx context.Context, arg InsertWorkspaceAgentScriptsParams) ([]WorkspaceAgentScript, error) {
|
|
rows, err := q.db.QueryContext(ctx, insertWorkspaceAgentScripts,
|
|
arg.WorkspaceAgentID,
|
|
arg.CreatedAt,
|
|
pq.Array(arg.LogSourceID),
|
|
pq.Array(arg.LogPath),
|
|
pq.Array(arg.Script),
|
|
pq.Array(arg.Cron),
|
|
pq.Array(arg.StartBlocksLogin),
|
|
pq.Array(arg.RunOnStart),
|
|
pq.Array(arg.RunOnStop),
|
|
pq.Array(arg.TimeoutSeconds),
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rows.Close()
|
|
var items []WorkspaceAgentScript
|
|
for rows.Next() {
|
|
var i WorkspaceAgentScript
|
|
if err := rows.Scan(
|
|
&i.WorkspaceAgentID,
|
|
&i.LogSourceID,
|
|
&i.LogPath,
|
|
&i.CreatedAt,
|
|
&i.Script,
|
|
&i.Cron,
|
|
&i.StartBlocksLogin,
|
|
&i.RunOnStart,
|
|
&i.RunOnStop,
|
|
&i.TimeoutSeconds,
|
|
); err != nil {
|
|
return nil, err
|
|
}
|
|
items = append(items, i)
|
|
}
|
|
if err := rows.Close(); err != nil {
|
|
return nil, err
|
|
}
|
|
if err := rows.Err(); err != nil {
|
|
return nil, err
|
|
}
|
|
return items, nil
|
|
}
|