Support all transitions in build progress bar (#4575)

* Use null types instead of -1 for simplicity

* Fix pgcrypto bug in migration 59

* Add stories

* Fix visual stutter
This commit is contained in:
Ammar Bandukwala
2022-10-16 23:34:03 -05:00
committed by GitHub
parent ee2c29d520
commit dc3519e973
19 changed files with 309 additions and 124 deletions

View File

@ -235,15 +235,17 @@ func (q *fakeQuerier) GetTemplateDAUs(_ context.Context, templateID uuid.UUID) (
return rs, nil
}
func (q *fakeQuerier) GetTemplateAverageBuildTime(ctx context.Context, arg database.GetTemplateAverageBuildTimeParams) (float64, error) {
var times []float64
func (q *fakeQuerier) GetTemplateAverageBuildTime(ctx context.Context, arg database.GetTemplateAverageBuildTimeParams) (database.GetTemplateAverageBuildTimeRow, error) {
var emptyRow database.GetTemplateAverageBuildTimeRow
var (
startTimes []float64
stopTimes []float64
deleteTimes []float64
)
for _, wb := range q.workspaceBuilds {
if wb.Transition != database.WorkspaceTransitionStart {
continue
}
version, err := q.GetTemplateVersionByID(ctx, wb.TemplateVersionID)
if err != nil {
return -1, err
return emptyRow, err
}
if version.TemplateID != arg.TemplateID {
continue
@ -251,17 +253,32 @@ func (q *fakeQuerier) GetTemplateAverageBuildTime(ctx context.Context, arg datab
job, err := q.GetProvisionerJobByID(ctx, wb.JobID)
if err != nil {
return -1, err
return emptyRow, err
}
if job.CompletedAt.Valid {
times = append(times, job.CompletedAt.Time.Sub(job.StartedAt.Time).Seconds())
took := job.CompletedAt.Time.Sub(job.StartedAt.Time).Seconds()
if wb.Transition == database.WorkspaceTransitionStart {
startTimes = append(startTimes, took)
} else if wb.Transition == database.WorkspaceTransitionStop {
stopTimes = append(stopTimes, took)
} else if wb.Transition == database.WorkspaceTransitionDelete {
deleteTimes = append(deleteTimes, took)
}
}
}
sort.Float64s(times)
if len(times) == 0 {
return -1, nil
tryMedian := func(fs []float64) float64 {
if len(fs) == 0 {
return -1
}
sort.Float64s(fs)
return fs[len(fs)/2]
}
return times[len(times)/2], nil
var row database.GetTemplateAverageBuildTimeRow
row.DeleteMedian = tryMedian(deleteTimes)
row.StopMedian = tryMedian(stopTimes)
row.StartMedian = tryMedian(startTimes)
return row, nil
}
func (q *fakeQuerier) ParameterValue(_ context.Context, id uuid.UUID) (database.ParameterValue, error) {

View File

@ -1,5 +1,9 @@
-- Code generated by 'make coderd/database/generate'. DO NOT EDIT.
CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA public;
COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions';
CREATE TYPE api_key_scope AS ENUM (
'all',
'application_connect'

View File

@ -4,8 +4,8 @@
-- template to be able to push and read files used for template
-- versions they create.
-- Prior to this collisions on file.hash were not an issue
-- since users who could push files could also read all files.
--
-- since users who could push files could also read all files.
--
-- This migration also adds a 'files.id' column as the primary
-- key. As a side effect the provisioner_jobs must now reference
-- the files.id column since the 'hash' column is now ambiguous.
@ -14,10 +14,13 @@ BEGIN;
-- Drop the primary key on hash.
ALTER TABLE files DROP CONSTRAINT files_pkey;
-- This extension is required by gen_random_uuid
CREATE EXTENSION IF NOT EXISTS pgcrypto;
-- Add an 'id' column and designate it the primary key.
ALTER TABLE files ADD COLUMN
ALTER TABLE files ADD COLUMN
id uuid NOT NULL PRIMARY KEY DEFAULT gen_random_uuid ();
-- Update the constraint to include the user who created it.
ALTER TABLE files ADD UNIQUE(hash, created_by);

View File

@ -67,7 +67,7 @@ type sqlcQuerier interface {
GetProvisionerJobsByIDs(ctx context.Context, ids []uuid.UUID) ([]ProvisionerJob, error)
GetProvisionerJobsCreatedAfter(ctx context.Context, createdAt time.Time) ([]ProvisionerJob, error)
GetProvisionerLogsByIDBetween(ctx context.Context, arg GetProvisionerLogsByIDBetweenParams) ([]ProvisionerJobLog, error)
GetTemplateAverageBuildTime(ctx context.Context, arg GetTemplateAverageBuildTimeParams) (float64, error)
GetTemplateAverageBuildTime(ctx context.Context, arg GetTemplateAverageBuildTimeParams) (GetTemplateAverageBuildTimeRow, error)
GetTemplateByID(ctx context.Context, id uuid.UUID) (Template, error)
GetTemplateByOrganizationAndName(ctx context.Context, arg GetTemplateByOrganizationAndNameParams) (Template, error)
GetTemplateDAUs(ctx context.Context, templateID uuid.UUID) ([]GetTemplateDAUsRow, error)

View File

@ -2600,7 +2600,8 @@ func (q *sqlQuerier) InsertDeploymentID(ctx context.Context, value string) error
const getTemplateAverageBuildTime = `-- name: GetTemplateAverageBuildTime :one
WITH build_times AS (
SELECT
EXTRACT(EPOCH FROM (pj.completed_at - pj.started_at))::FLOAT AS exec_time_sec
EXTRACT(EPOCH FROM (pj.completed_at - pj.started_at))::FLOAT AS exec_time_sec,
workspace_builds.transition
FROM
workspace_builds
JOIN template_versions ON
@ -2609,7 +2610,6 @@ JOIN provisioner_jobs pj ON
workspace_builds.job_id = pj.id
WHERE
template_versions.template_id = $1 AND
(workspace_builds.transition = 'start') AND
(pj.completed_at IS NOT NULL) AND (pj.started_at IS NOT NULL) AND
(pj.started_at > $2) AND
(pj.canceled_at IS NULL) AND
@ -2617,7 +2617,12 @@ WHERE
ORDER BY
workspace_builds.created_at DESC
)
SELECT coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec)), -1)::FLOAT
SELECT
-- Postgres offers no clear way to DRY this short of a function or other
-- complexities.
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'start')), -1)::FLOAT AS start_median,
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'stop')), -1)::FLOAT AS stop_median,
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'delete')), -1)::FLOAT AS delete_median
FROM build_times
`
@ -2626,11 +2631,17 @@ type GetTemplateAverageBuildTimeParams struct {
StartTime sql.NullTime `db:"start_time" json:"start_time"`
}
func (q *sqlQuerier) GetTemplateAverageBuildTime(ctx context.Context, arg GetTemplateAverageBuildTimeParams) (float64, error) {
type GetTemplateAverageBuildTimeRow struct {
StartMedian float64 `db:"start_median" json:"start_median"`
StopMedian float64 `db:"stop_median" json:"stop_median"`
DeleteMedian float64 `db:"delete_median" json:"delete_median"`
}
func (q *sqlQuerier) GetTemplateAverageBuildTime(ctx context.Context, arg GetTemplateAverageBuildTimeParams) (GetTemplateAverageBuildTimeRow, error) {
row := q.db.QueryRowContext(ctx, getTemplateAverageBuildTime, arg.TemplateID, arg.StartTime)
var column_1 float64
err := row.Scan(&column_1)
return column_1, err
var i GetTemplateAverageBuildTimeRow
err := row.Scan(&i.StartMedian, &i.StopMedian, &i.DeleteMedian)
return i, err
}
const getTemplateByID = `-- name: GetTemplateByID :one

View File

@ -109,7 +109,8 @@ RETURNING
-- name: GetTemplateAverageBuildTime :one
WITH build_times AS (
SELECT
EXTRACT(EPOCH FROM (pj.completed_at - pj.started_at))::FLOAT AS exec_time_sec
EXTRACT(EPOCH FROM (pj.completed_at - pj.started_at))::FLOAT AS exec_time_sec,
workspace_builds.transition
FROM
workspace_builds
JOIN template_versions ON
@ -118,7 +119,6 @@ JOIN provisioner_jobs pj ON
workspace_builds.job_id = pj.id
WHERE
template_versions.template_id = @template_id AND
(workspace_builds.transition = 'start') AND
(pj.completed_at IS NOT NULL) AND (pj.started_at IS NOT NULL) AND
(pj.started_at > @start_time) AND
(pj.canceled_at IS NULL) AND
@ -126,6 +126,11 @@ WHERE
ORDER BY
workspace_builds.created_at DESC
)
SELECT coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec)), -1)::FLOAT
SELECT
-- Postgres offers no clear way to DRY this short of a function or other
-- complexities.
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'start')), -1)::FLOAT AS start_median,
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'stop')), -1)::FLOAT AS stop_median,
coalesce((PERCENTILE_DISC(0.5) WITHIN GROUP(ORDER BY exec_time_sec) FILTER (WHERE transition = 'delete')), -1)::FLOAT AS delete_median
FROM build_times
;

View File

@ -29,7 +29,7 @@ type Cache struct {
templateDAUResponses atomic.Pointer[map[uuid.UUID]codersdk.TemplateDAUsResponse]
templateUniqueUsers atomic.Pointer[map[uuid.UUID]int]
templateAverageBuildTime atomic.Pointer[map[uuid.UUID]time.Duration]
templateAverageBuildTime atomic.Pointer[map[uuid.UUID]database.GetTemplateAverageBuildTimeRow]
done chan struct{}
cancel func()
@ -130,9 +130,9 @@ func (c *Cache) refresh(ctx context.Context) error {
}
var (
templateDAUs = make(map[uuid.UUID]codersdk.TemplateDAUsResponse, len(templates))
templateUniqueUsers = make(map[uuid.UUID]int)
templateAverageBuildTimeSec = make(map[uuid.UUID]time.Duration)
templateDAUs = make(map[uuid.UUID]codersdk.TemplateDAUsResponse, len(templates))
templateUniqueUsers = make(map[uuid.UUID]int)
templateAverageBuildTimes = make(map[uuid.UUID]database.GetTemplateAverageBuildTimeRow)
)
for _, template := range templates {
rows, err := c.database.GetTemplateDAUs(ctx, template.ID)
@ -141,6 +141,7 @@ func (c *Cache) refresh(ctx context.Context) error {
}
templateDAUs[template.ID] = convertDAUResponse(rows)
templateUniqueUsers[template.ID] = countUniqueUsers(rows)
templateAvgBuildTime, err := c.database.GetTemplateAverageBuildTime(ctx, database.GetTemplateAverageBuildTimeParams{
TemplateID: uuid.NullUUID{
UUID: template.ID,
@ -151,14 +152,15 @@ func (c *Cache) refresh(ctx context.Context) error {
Valid: true,
},
})
if err != nil {
return err
}
templateAverageBuildTimeSec[template.ID] = time.Duration(float64(time.Second) * templateAvgBuildTime)
templateAverageBuildTimes[template.ID] = templateAvgBuildTime
}
c.templateDAUResponses.Store(&templateDAUs)
c.templateUniqueUsers.Store(&templateUniqueUsers)
c.templateAverageBuildTime.Store(&templateAverageBuildTimeSec)
c.templateAverageBuildTime.Store(&templateAverageBuildTimes)
return nil
}
@ -239,17 +241,32 @@ func (c *Cache) TemplateUniqueUsers(id uuid.UUID) (int, bool) {
return resp, true
}
func (c *Cache) TemplateAverageBuildTime(id uuid.UUID) (time.Duration, bool) {
func (c *Cache) TemplateBuildTimeStats(id uuid.UUID) codersdk.TemplateBuildTimeStats {
var unknown codersdk.TemplateBuildTimeStats
m := c.templateAverageBuildTime.Load()
if m == nil {
// Data loading.
return -1, false
return unknown
}
resp, ok := (*m)[id]
if !ok || resp <= 0 {
if !ok {
// No data or not enough builds.
return -1, false
return unknown
}
convertMedian := func(m float64) *int64 {
if m <= 0 {
return nil
}
i := int64(m * 1000)
return &i
}
return codersdk.TemplateBuildTimeStats{
StartMillis: convertMedian(resp.StartMedian),
StopMillis: convertMedian(resp.StopMedian),
DeleteMillis: convertMedian(resp.DeleteMedian),
}
return resp, true
}

View File

@ -7,6 +7,7 @@ import (
"time"
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"cdr.dev/slog/sloggers/slogtest"
@ -214,27 +215,30 @@ func TestCache_BuildTime(t *testing.T) {
}
type args struct {
rows []jobParams
rows []jobParams
transition database.WorkspaceTransition
}
type want struct {
buildTime time.Duration
buildTimeMs int64
loads bool
}
tests := []struct {
name string
args args
want want
}{
{"empty", args{}, want{-1}},
{"one", args{
{"empty", args{}, want{-1, false}},
{"one/start", args{
rows: []jobParams{
{
startedAt: clockTime(someDay, 10, 1, 0),
completedAt: clockTime(someDay, 10, 1, 10),
},
},
}, want{time.Second * 10},
transition: database.WorkspaceTransitionStart,
}, want{10 * 1000, true},
},
{"two", args{
{"two/stop", args{
rows: []jobParams{
{
startedAt: clockTime(someDay, 10, 1, 0),
@ -245,9 +249,10 @@ func TestCache_BuildTime(t *testing.T) {
completedAt: clockTime(someDay, 10, 1, 50),
},
},
}, want{time.Second * 50},
transition: database.WorkspaceTransitionStop,
}, want{50 * 1000, true},
},
{"three", args{
{"three/delete", args{
rows: []jobParams{
{
startedAt: clockTime(someDay, 10, 1, 0),
@ -261,7 +266,8 @@ func TestCache_BuildTime(t *testing.T) {
completedAt: clockTime(someDay, 10, 1, 20),
},
},
}, want{time.Second * 20},
transition: database.WorkspaceTransitionDelete,
}, want{20 * 1000, true},
},
}
@ -289,9 +295,8 @@ func TestCache_BuildTime(t *testing.T) {
})
require.NoError(t, err)
gotBuildTime, ok := cache.TemplateAverageBuildTime(template.ID)
require.False(t, ok, "template shouldn't have loaded yet")
require.EqualValues(t, -1, gotBuildTime)
gotStats := cache.TemplateBuildTimeStats(template.ID)
require.Empty(t, gotStats, "should not have loaded yet")
for _, row := range tt.args.rows {
_, err := db.InsertProvisionerJob(ctx, database.InsertProvisionerJobParams{
@ -311,7 +316,7 @@ func TestCache_BuildTime(t *testing.T) {
_, err = db.InsertWorkspaceBuild(ctx, database.InsertWorkspaceBuildParams{
TemplateVersionID: templateVersion.ID,
JobID: job.ID,
Transition: database.WorkspaceTransitionStart,
Transition: tt.args.transition,
})
require.NoError(t, err)
@ -322,28 +327,38 @@ func TestCache_BuildTime(t *testing.T) {
require.NoError(t, err)
}
if tt.want.buildTime > 0 {
if tt.want.loads {
require.Eventuallyf(t, func() bool {
_, ok := cache.TemplateAverageBuildTime(template.ID)
return ok
stats := cache.TemplateBuildTimeStats(template.ID)
return assert.NotEmpty(t, stats)
}, testutil.WaitShort, testutil.IntervalMedium,
"TemplateDAUs never populated",
"BuildTime never populated",
)
gotBuildTime, ok = cache.TemplateAverageBuildTime(template.ID)
require.True(t, ok)
require.Equal(t, tt.want.buildTime, gotBuildTime)
gotStats = cache.TemplateBuildTimeStats(template.ID)
if tt.args.transition == database.WorkspaceTransitionDelete {
require.Nil(t, gotStats.StopMillis)
require.Nil(t, gotStats.StartMillis)
require.Equal(t, tt.want.buildTimeMs, *gotStats.DeleteMillis)
}
if tt.args.transition == database.WorkspaceTransitionStart {
require.Nil(t, gotStats.StopMillis)
require.Nil(t, gotStats.DeleteMillis)
require.Equal(t, tt.want.buildTimeMs, *gotStats.StartMillis)
}
if tt.args.transition == database.WorkspaceTransitionStop {
require.Nil(t, gotStats.StartMillis)
require.Nil(t, gotStats.DeleteMillis)
require.Equal(t, tt.want.buildTimeMs, *gotStats.StopMillis)
}
} else {
require.Never(t, func() bool {
_, ok := cache.TemplateAverageBuildTime(template.ID)
return ok
stats := cache.TemplateBuildTimeStats(template.ID)
return !assert.Empty(t, stats)
}, testutil.WaitShort/2, testutil.IntervalMedium,
"TemplateDAUs never populated",
"BuildTimeStats populated",
)
gotBuildTime, ok = cache.TemplateAverageBuildTime(template.ID)
require.False(t, ok)
require.Less(t, gotBuildTime, time.Duration(0))
}
})
}

View File

@ -774,13 +774,7 @@ func (api *API) convertTemplate(
) codersdk.Template {
activeCount, _ := api.metricsCache.TemplateUniqueUsers(template.ID)
var averageBuildTimeMillis int64
averageBuildTime, ok := api.metricsCache.TemplateAverageBuildTime(template.ID)
if !ok {
averageBuildTimeMillis = -1
} else {
averageBuildTimeMillis = int64(averageBuildTime / time.Millisecond)
}
buildTimeStats := api.metricsCache.TemplateBuildTimeStats(template.ID)
return codersdk.Template{
ID: template.ID,
@ -792,7 +786,7 @@ func (api *API) convertTemplate(
ActiveVersionID: template.ActiveVersionID,
WorkspaceOwnerCount: workspaceOwnerCount,
ActiveUserCount: activeCount,
AverageBuildTimeMillis: averageBuildTimeMillis,
BuildTimeStats: buildTimeStats,
Description: template.Description,
Icon: template.Icon,
MaxTTLMillis: time.Duration(template.MaxTtl).Milliseconds(),

View File

@ -594,7 +594,7 @@ func TestTemplateMetrics(t *testing.T) {
})
template := coderdtest.CreateTemplate(t, client, user.OrganizationID, version.ID)
require.Equal(t, -1, template.ActiveUserCount)
require.EqualValues(t, -1, template.AverageBuildTimeMillis)
require.Empty(t, template.BuildTimeStats)
coderdtest.AwaitTemplateVersionJob(t, client, version.ID)
workspace := coderdtest.CreateWorkspace(t, client, user.OrganizationID, template.ID)
@ -661,7 +661,8 @@ func TestTemplateMetrics(t *testing.T) {
template, err = client.Template(ctx, template.ID)
require.NoError(t, err)
require.Equal(t, 1, template.ActiveUserCount)
require.Greater(t, template.AverageBuildTimeMillis, int64(1))
require.NotNil(t, template.BuildTimeStats.StartMillis, template.BuildTimeStats)
require.Greater(t, *template.BuildTimeStats.StartMillis, int64(1))
workspaces, err = client.Workspaces(ctx, codersdk.WorkspaceFilter{})
require.NoError(t, err)