chore: add tx metrics and logs for serialization errors (#15215)

Before db_metrics were all or nothing. Now `InTx` metrics are always recorded, and query metrics are opt in.


Adds instrumentation & logging around serialization failures in the database.
This commit is contained in:
Steven Masley
2024-10-25 12:14:15 -04:00
committed by GitHub
parent df34858c3c
commit ccfffc6911
32 changed files with 3123 additions and 2800 deletions

View File

@ -285,7 +285,10 @@ func (e *Executor) runOnce(t time.Time) Stats {
// Run with RepeatableRead isolation so that the build process sees the same data
// as our calculation that determines whether an autobuild is necessary.
}, &sql.TxOptions{Isolation: sql.LevelRepeatableRead})
}, &database.TxOptions{
Isolation: sql.LevelRepeatableRead,
TxIdentifier: "lifecycle",
})
if auditLog != nil {
// If the transition didn't succeed then updating the workspace
// to indicate dormant didn't either.

View File

@ -0,0 +1,3 @@
// Package promhelp provides helper functions for asserting Prometheus
// metric values in unit tests.
package promhelp

View File

@ -0,0 +1,87 @@
package promhelp
import (
"context"
"io"
"maps"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
ptestutil "github.com/prometheus/client_golang/prometheus/testutil"
io_prometheus_client "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
)
// RegistryDump returns the http page for a given registry's metrics.
// Very useful for visual debugging.
func RegistryDump(reg *prometheus.Registry) string {
h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{})
rec := httptest.NewRecorder()
req, _ := http.NewRequestWithContext(context.Background(), http.MethodGet, "/", nil)
h.ServeHTTP(rec, req)
resp := rec.Result()
data, _ := io.ReadAll(resp.Body)
_ = resp.Body.Close()
return string(data)
}
// Compare can be used to compare a registry to some prometheus formatted
// text. If any values differ, an error is returned.
// If metric names are passed in, only those metrics will be compared.
// Usage: `Compare(reg, RegistryDump(reg))`
func Compare(reg prometheus.Gatherer, compare string, metricNames ...string) error {
return ptestutil.GatherAndCompare(reg, strings.NewReader(compare), metricNames...)
}
// HistogramValue returns the value of a histogram metric with the given name and labels.
func HistogramValue(t testing.TB, reg prometheus.Gatherer, metricName string, labels prometheus.Labels) *io_prometheus_client.Histogram {
t.Helper()
labeled := MetricValue(t, reg, metricName, labels)
require.NotNilf(t, labeled, "metric %q with labels %v not found", metricName, labels)
return labeled.GetHistogram()
}
// GaugeValue returns the value of a gauge metric with the given name and labels.
func GaugeValue(t testing.TB, reg prometheus.Gatherer, metricName string, labels prometheus.Labels) int {
t.Helper()
labeled := MetricValue(t, reg, metricName, labels)
require.NotNilf(t, labeled, "metric %q with labels %v not found", metricName, labels)
return int(labeled.GetGauge().GetValue())
}
// CounterValue returns the value of a counter metric with the given name and labels.
func CounterValue(t testing.TB, reg prometheus.Gatherer, metricName string, labels prometheus.Labels) int {
t.Helper()
labeled := MetricValue(t, reg, metricName, labels)
require.NotNilf(t, labeled, "metric %q with labels %v not found", metricName, labels)
return int(labeled.GetCounter().GetValue())
}
func MetricValue(t testing.TB, reg prometheus.Gatherer, metricName string, labels prometheus.Labels) *io_prometheus_client.Metric {
t.Helper()
metrics, err := reg.Gather()
require.NoError(t, err)
for _, m := range metrics {
if m.GetName() == metricName {
for _, labeled := range m.GetMetric() {
mLabels := make(prometheus.Labels)
for _, v := range labeled.GetLabel() {
mLabels[v.GetName()] = v.GetValue()
}
if maps.Equal(mLabels, labels) {
return labeled
}
}
}
}
return nil
}

View File

@ -161,8 +161,9 @@ func (k *rotator) rotateKeys(ctx context.Context) error {
}
}
return nil
}, &sql.TxOptions{
Isolation: sql.LevelRepeatableRead,
}, &database.TxOptions{
Isolation: sql.LevelRepeatableRead,
TxIdentifier: "rotate_keys",
})
}

View File

@ -28,7 +28,7 @@ type Store interface {
wrapper
Ping(ctx context.Context) (time.Duration, error)
InTx(func(Store) error, *sql.TxOptions) error
InTx(func(Store) error, *TxOptions) error
}
type wrapper interface {
@ -57,6 +57,43 @@ func New(sdb *sql.DB) Store {
}
}
// TxOptions is used to pass some execution metadata to the callers.
// Ideally we could throw this into a context, but no context is used for
// transactions. So instead, the return context is attached to the options
// passed in.
// This metadata should not be returned in the method signature, because it
// is only used for metric tracking. It should never be used by business logic.
type TxOptions struct {
// Isolation is the transaction isolation level.
// If zero, the driver or database's default level is used.
Isolation sql.IsolationLevel
ReadOnly bool
// -- Coder specific metadata --
// TxIdentifier is a unique identifier for the transaction to be used
// in metrics. Can be any string.
TxIdentifier string
// Set by InTx
executionCount int
}
// IncrementExecutionCount is a helper function for external packages
// to increment the unexported count.
// Mainly for `dbmem`.
func IncrementExecutionCount(opts *TxOptions) {
opts.executionCount++
}
func (o TxOptions) ExecutionCount() int {
return o.executionCount
}
func (o *TxOptions) WithID(id string) *TxOptions {
o.TxIdentifier = id
return o
}
// queries encompasses both are sqlc generated
// queries and our custom queries.
type querier interface {
@ -80,11 +117,24 @@ func (q *sqlQuerier) Ping(ctx context.Context) (time.Duration, error) {
return time.Since(start), err
}
func (q *sqlQuerier) InTx(function func(Store) error, txOpts *sql.TxOptions) error {
func DefaultTXOptions() *TxOptions {
return &TxOptions{
Isolation: sql.LevelDefault,
ReadOnly: false,
}
}
func (q *sqlQuerier) InTx(function func(Store) error, txOpts *TxOptions) error {
_, inTx := q.db.(*sqlx.Tx)
isolation := sql.LevelDefault
if txOpts != nil {
isolation = txOpts.Isolation
if txOpts == nil {
// create a default txOpts if left to nil
txOpts = DefaultTXOptions()
}
sqlOpts := &sql.TxOptions{
Isolation: txOpts.Isolation,
ReadOnly: txOpts.ReadOnly,
}
// If we are not already in a transaction, and we are running in serializable
@ -92,13 +142,14 @@ func (q *sqlQuerier) InTx(function func(Store) error, txOpts *sql.TxOptions) err
// prepared to allow retries if using serializable mode.
// If we are in a transaction already, the parent InTx call will handle the retry.
// We do not want to duplicate those retries.
if !inTx && isolation == sql.LevelSerializable {
if !inTx && sqlOpts.Isolation == sql.LevelSerializable {
// This is an arbitrarily chosen number.
const retryAmount = 3
var err error
attempts := 0
for attempts = 0; attempts < retryAmount; attempts++ {
err = q.runTx(function, txOpts)
txOpts.executionCount++
err = q.runTx(function, sqlOpts)
if err == nil {
// Transaction succeeded.
return nil
@ -111,7 +162,9 @@ func (q *sqlQuerier) InTx(function func(Store) error, txOpts *sql.TxOptions) err
// Transaction kept failing in serializable mode.
return xerrors.Errorf("transaction failed after %d attempts: %w", attempts, err)
}
return q.runTx(function, txOpts)
txOpts.executionCount++
return q.runTx(function, sqlOpts)
}
// InTx performs database operations inside a transaction.

View File

@ -27,7 +27,7 @@ func TestSerializedRetry(t *testing.T) {
db := database.New(sqlDB)
called := 0
txOpts := &sql.TxOptions{Isolation: sql.LevelSerializable}
txOpts := &database.TxOptions{Isolation: sql.LevelSerializable}
err := db.InTx(func(tx database.Store) error {
// Test nested error
return tx.InTx(func(tx database.Store) error {

View File

@ -558,7 +558,7 @@ func (q *querier) Ping(ctx context.Context) (time.Duration, error) {
}
// InTx runs the given function in a transaction.
func (q *querier) InTx(function func(querier database.Store) error, txOpts *sql.TxOptions) error {
func (q *querier) InTx(function func(querier database.Store) error, txOpts *database.TxOptions) error {
return q.db.InTx(func(tx database.Store) error {
// Wrap the transaction store in a querier.
wrapped := New(tx, q.auth, q.log, q.acs)

View File

@ -365,7 +365,7 @@ func (tx *fakeTx) releaseLocks() {
}
// InTx doesn't rollback data properly for in-memory yet.
func (q *FakeQuerier) InTx(fn func(database.Store) error, _ *sql.TxOptions) error {
func (q *FakeQuerier) InTx(fn func(database.Store) error, opts *database.TxOptions) error {
q.mutex.Lock()
defer q.mutex.Unlock()
tx := &fakeTx{
@ -374,6 +374,9 @@ func (q *FakeQuerier) InTx(fn func(database.Store) error, _ *sql.TxOptions) erro
}
defer tx.releaseLocks()
if opts != nil {
database.IncrementExecutionCount(opts)
}
return fn(tx)
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,109 @@
package dbmetrics_test
import (
"bytes"
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
"golang.org/x/xerrors"
"cdr.dev/slog"
"cdr.dev/slog/sloggers/sloghuman"
"cdr.dev/slog/sloggers/slogtest"
"github.com/coder/coder/v2/coderd/coderdtest/promhelp"
"github.com/coder/coder/v2/coderd/database"
"github.com/coder/coder/v2/coderd/database/dbmem"
"github.com/coder/coder/v2/coderd/database/dbmetrics"
)
func TestInTxMetrics(t *testing.T) {
t.Parallel()
successLabels := prometheus.Labels{
"success": "true",
"id": "",
}
const inTxHistMetricName = "coderd_db_tx_duration_seconds"
const inTxCountMetricName = "coderd_db_tx_executions_count"
t.Run("QueryMetrics", func(t *testing.T) {
t.Parallel()
db := dbmem.New()
reg := prometheus.NewRegistry()
db = dbmetrics.NewQueryMetrics(db, slogtest.Make(t, nil), reg)
err := db.InTx(func(s database.Store) error {
return nil
}, nil)
require.NoError(t, err)
// Check that the metrics are registered
inTxMetric := promhelp.HistogramValue(t, reg, inTxHistMetricName, successLabels)
require.NotNil(t, inTxMetric)
require.Equal(t, uint64(1), inTxMetric.GetSampleCount())
})
t.Run("DBMetrics", func(t *testing.T) {
t.Parallel()
db := dbmem.New()
reg := prometheus.NewRegistry()
db = dbmetrics.NewDBMetrics(db, slogtest.Make(t, nil), reg)
err := db.InTx(func(s database.Store) error {
return nil
}, nil)
require.NoError(t, err)
// Check that the metrics are registered
inTxMetric := promhelp.HistogramValue(t, reg, inTxHistMetricName, successLabels)
require.NotNil(t, inTxMetric)
require.Equal(t, uint64(1), inTxMetric.GetSampleCount())
})
// Test log output and metrics on failures
// Log example:
// [erro] database transaction hit serialization error and had to retry success=false executions=2 id=foobar_factory
t.Run("SerializationError", func(t *testing.T) {
t.Parallel()
var output bytes.Buffer
logger := slog.Make(sloghuman.Sink(&output))
reg := prometheus.NewRegistry()
db := dbmetrics.NewDBMetrics(dbmem.New(), logger, reg)
const id = "foobar_factory"
txOpts := database.DefaultTXOptions().WithID(id)
database.IncrementExecutionCount(txOpts) // 2 executions
err := db.InTx(func(s database.Store) error {
return xerrors.Errorf("some dumb error")
}, txOpts)
require.Error(t, err)
// Check that the metrics are registered
inTxHistMetric := promhelp.HistogramValue(t, reg, inTxHistMetricName, prometheus.Labels{
"success": "false",
"id": id,
})
require.NotNil(t, inTxHistMetric)
require.Equal(t, uint64(1), inTxHistMetric.GetSampleCount())
inTxCountMetric := promhelp.CounterValue(t, reg, inTxCountMetricName, prometheus.Labels{
"success": "false",
"retries": "1",
"id": id,
})
require.NotNil(t, inTxCountMetric)
require.Equal(t, 1, inTxCountMetric)
// Also check the logs
require.Contains(t, output.String(), "some dumb error")
require.Contains(t, output.String(), "database transaction hit serialization error and had to retry")
require.Contains(t, output.String(), "success=false")
require.Contains(t, output.String(), "executions=2")
require.Contains(t, output.String(), "id="+id)
})
}

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,6 @@ package dbmock
import (
context "context"
sql "database/sql"
reflect "reflect"
time "time"
@ -3489,7 +3488,7 @@ func (mr *MockStoreMockRecorder) GetWorkspacesEligibleForTransition(arg0, arg1 a
}
// InTx mocks base method.
func (m *MockStore) InTx(arg0 func(database.Store) error, arg1 *sql.TxOptions) error {
func (m *MockStore) InTx(arg0 func(database.Store) error, arg1 *database.TxOptions) error {
m.ctrl.T.Helper()
ret := m.ctrl.Call(m, "InTx", arg0, arg1)
ret0, _ := ret[0].(error)

View File

@ -66,7 +66,7 @@ func New(ctx context.Context, logger slog.Logger, db database.Store, clk quartz.
logger.Info(ctx, "purged old database entries", slog.F("duration", clk.Since(start)))
return nil
}, nil); err != nil {
}, database.DefaultTXOptions().WithID("db_purge")); err != nil {
logger.Error(ctx, "failed to purge old database entries", slog.Error(err))
return
}

View File

@ -108,7 +108,7 @@ func (r *Rolluper) start(ctx context.Context) {
ev.TemplateUsageStats = true
return tx.UpsertTemplateUsageStats(ctx)
}, nil)
}, database.DefaultTXOptions().WithID("db_rollup"))
})
err := eg.Wait()

View File

@ -38,7 +38,7 @@ type wrapUpsertDB struct {
resume <-chan struct{}
}
func (w *wrapUpsertDB) InTx(fn func(database.Store) error, opts *sql.TxOptions) error {
func (w *wrapUpsertDB) InTx(fn func(database.Store) error, opts *database.TxOptions) error {
return w.Store.InTx(func(tx database.Store) error {
return fn(&wrapUpsertDB{Store: tx, resume: w.resume})
}, opts)

View File

@ -33,7 +33,7 @@ func ReadModifyUpdate(db Store, f func(tx Store) error,
) error {
var err error
for retries := 0; retries < maxRetries; retries++ {
err = db.InTx(f, &sql.TxOptions{
err = db.InTx(f, &TxOptions{
Isolation: sql.LevelRepeatableRead,
})
var pqe *pq.Error

View File

@ -19,7 +19,7 @@ func TestReadModifyUpdate_OK(t *testing.T) {
mDB := dbmock.NewMockStore(gomock.NewController(t))
mDB.EXPECT().
InTx(gomock.Any(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead}).
InTx(gomock.Any(), &database.TxOptions{Isolation: sql.LevelRepeatableRead}).
Times(1).
Return(nil)
err := database.ReadModifyUpdate(mDB, func(tx database.Store) error {
@ -34,11 +34,11 @@ func TestReadModifyUpdate_RetryOK(t *testing.T) {
mDB := dbmock.NewMockStore(gomock.NewController(t))
firstUpdate := mDB.EXPECT().
InTx(gomock.Any(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead}).
InTx(gomock.Any(), &database.TxOptions{Isolation: sql.LevelRepeatableRead}).
Times(1).
Return(&pq.Error{Code: pq.ErrorCode("40001")})
mDB.EXPECT().
InTx(gomock.Any(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead}).
InTx(gomock.Any(), &database.TxOptions{Isolation: sql.LevelRepeatableRead}).
After(firstUpdate).
Times(1).
Return(nil)
@ -55,7 +55,7 @@ func TestReadModifyUpdate_HardError(t *testing.T) {
mDB := dbmock.NewMockStore(gomock.NewController(t))
mDB.EXPECT().
InTx(gomock.Any(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead}).
InTx(gomock.Any(), &database.TxOptions{Isolation: sql.LevelRepeatableRead}).
Times(1).
Return(xerrors.New("a bad thing happened"))
@ -71,7 +71,7 @@ func TestReadModifyUpdate_TooManyRetries(t *testing.T) {
mDB := dbmock.NewMockStore(gomock.NewController(t))
mDB.EXPECT().
InTx(gomock.Any(), &sql.TxOptions{Isolation: sql.LevelRepeatableRead}).
InTx(gomock.Any(), &database.TxOptions{Isolation: sql.LevelRepeatableRead}).
Times(5).
Return(&pq.Error{Code: pq.ErrorCode("40001")})
err := database.ReadModifyUpdate(mDB, func(tx database.Store) error {

View File

@ -2,7 +2,6 @@ package idpsync_test
import (
"context"
"database/sql"
"encoding/json"
"testing"
@ -324,7 +323,7 @@ func TestNoopNoDiff(t *testing.T) {
// and 'UpdateMemberRoles'.
mDB.EXPECT().InTx(
gomock.Any(), gomock.Any(),
).DoAndReturn(func(f func(database.Store) error, _ *sql.TxOptions) error {
).DoAndReturn(func(f func(database.Store) error, _ *database.TxOptions) error {
err := f(mDB)
return err
})

View File

@ -3,24 +3,19 @@ package promoauth_test
import (
"context"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
ptestutil "github.com/prometheus/client_golang/prometheus/testutil"
io_prometheus_client "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/exp/maps"
"golang.org/x/oauth2"
"github.com/coder/coder/v2/coderd/coderdtest/oidctest"
"github.com/coder/coder/v2/coderd/coderdtest/promhelp"
"github.com/coder/coder/v2/coderd/externalauth"
"github.com/coder/coder/v2/coderd/promoauth"
"github.com/coder/coder/v2/testutil"
@ -34,7 +29,7 @@ func TestInstrument(t *testing.T) {
reg := prometheus.NewRegistry()
t.Cleanup(func() {
if t.Failed() {
t.Log(registryDump(reg))
t.Log(promhelp.RegistryDump(reg))
}
})
@ -46,7 +41,7 @@ func TestInstrument(t *testing.T) {
const metricname = "coderd_oauth2_external_requests_total"
count := func(source string) int {
labels["source"] = source
return counterValue(t, reg, "coderd_oauth2_external_requests_total", labels)
return promhelp.CounterValue(t, reg, "coderd_oauth2_external_requests_total", labels)
}
factory := promoauth.NewFactory(reg)
@ -58,7 +53,7 @@ func TestInstrument(t *testing.T) {
}
// 0 Requests before we start
require.Nil(t, metricValue(t, reg, metricname, labels), "no metrics at start")
require.Nil(t, promhelp.MetricValue(t, reg, metricname, labels), "no metrics at start")
noClientCtx := ctx
// This should never be done, but promoauth should not break the default client
@ -94,7 +89,7 @@ func TestInstrument(t *testing.T) {
// Verify the default client was not broken. This check is added because we
// extend the http.DefaultTransport. If a `.Clone()` is not done, this can be
// mis-used. It is cheap to run this quick check.
snapshot := registryDump(reg)
snapshot := promhelp.RegistryDump(reg)
req, err := http.NewRequestWithContext(ctx, http.MethodGet,
must[*url.URL](t)(idp.IssuerURL().Parse("/.well-known/openid-configuration")).String(), nil)
require.NoError(t, err)
@ -103,7 +98,7 @@ func TestInstrument(t *testing.T) {
require.NoError(t, err)
_ = resp.Body.Close()
require.NoError(t, compare(reg, snapshot), "http default client corrupted")
require.NoError(t, promhelp.Compare(reg, snapshot), "http default client corrupted")
}
func TestGithubRateLimits(t *testing.T) {
@ -214,37 +209,26 @@ func TestGithubRateLimits(t *testing.T) {
}
pass := true
if !c.ExpectNoMetrics {
pass = pass && assert.Equal(t, gaugeValue(t, reg, "coderd_oauth2_external_requests_rate_limit_total", labels), c.Limit, "limit")
pass = pass && assert.Equal(t, gaugeValue(t, reg, "coderd_oauth2_external_requests_rate_limit_remaining", labels), c.Remaining, "remaining")
pass = pass && assert.Equal(t, gaugeValue(t, reg, "coderd_oauth2_external_requests_rate_limit_used", labels), c.Used, "used")
pass = pass && assert.Equal(t, promhelp.GaugeValue(t, reg, "coderd_oauth2_external_requests_rate_limit_total", labels), c.Limit, "limit")
pass = pass && assert.Equal(t, promhelp.GaugeValue(t, reg, "coderd_oauth2_external_requests_rate_limit_remaining", labels), c.Remaining, "remaining")
pass = pass && assert.Equal(t, promhelp.GaugeValue(t, reg, "coderd_oauth2_external_requests_rate_limit_used", labels), c.Used, "used")
if !c.at.IsZero() {
until := c.Reset.Sub(c.at)
// Float accuracy is not great, so we allow a delta of 2
pass = pass && assert.InDelta(t, gaugeValue(t, reg, "coderd_oauth2_external_requests_rate_limit_reset_in_seconds", labels), int(until.Seconds()), 2, "reset in")
pass = pass && assert.InDelta(t, promhelp.GaugeValue(t, reg, "coderd_oauth2_external_requests_rate_limit_reset_in_seconds", labels), int(until.Seconds()), 2, "reset in")
}
} else {
pass = pass && assert.Nil(t, metricValue(t, reg, "coderd_oauth2_external_requests_rate_limit_total", labels), "not exists")
pass = pass && assert.Nil(t, promhelp.MetricValue(t, reg, "coderd_oauth2_external_requests_rate_limit_total", labels), "not exists")
}
// Helpful debugging
if !pass {
t.Log(registryDump(reg))
t.Log(promhelp.RegistryDump(reg))
}
})
}
}
func registryDump(reg *prometheus.Registry) string {
h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{})
rec := httptest.NewRecorder()
req, _ := http.NewRequestWithContext(context.Background(), http.MethodGet, "/", nil)
h.ServeHTTP(rec, req)
resp := rec.Result()
data, _ := io.ReadAll(resp.Body)
_ = resp.Body.Close()
return string(data)
}
func must[V any](t *testing.T) func(v V, err error) V {
return func(v V, err error) V {
t.Helper()
@ -252,39 +236,3 @@ func must[V any](t *testing.T) func(v V, err error) V {
return v
}
}
func gaugeValue(t testing.TB, reg prometheus.Gatherer, metricName string, labels prometheus.Labels) int {
labeled := metricValue(t, reg, metricName, labels)
require.NotNilf(t, labeled, "metric %q with labels %v not found", metricName, labels)
return int(labeled.GetGauge().GetValue())
}
func counterValue(t testing.TB, reg prometheus.Gatherer, metricName string, labels prometheus.Labels) int {
labeled := metricValue(t, reg, metricName, labels)
require.NotNilf(t, labeled, "metric %q with labels %v not found", metricName, labels)
return int(labeled.GetCounter().GetValue())
}
func compare(reg prometheus.Gatherer, compare string) error {
return ptestutil.GatherAndCompare(reg, strings.NewReader(compare))
}
func metricValue(t testing.TB, reg prometheus.Gatherer, metricName string, labels prometheus.Labels) *io_prometheus_client.Metric {
metrics, err := reg.Gather()
require.NoError(t, err)
for _, m := range metrics {
if m.GetName() == metricName {
for _, labeled := range m.GetMetric() {
mLables := make(prometheus.Labels)
for _, v := range labeled.GetLabel() {
mLables[v.GetName()] = v.GetValue()
}
if maps.Equal(mLables, labels) {
return labeled
}
}
}
}
return nil
}

View File

@ -467,7 +467,7 @@ func (api *API) postTemplateByOrganization(rw http.ResponseWriter, r *http.Reque
templateVersionAudit.New = newTemplateVersion
return nil
}, nil)
}, database.DefaultTXOptions().WithID("postTemplate"))
if err != nil {
httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{
Message: "Internal error inserting template.",

View File

@ -735,9 +735,9 @@ func expectDB(t *testing.T, opts ...txExpect) *dbmock.MockStore {
// we expect to be run in a transaction; we use mTx to record the
// "in transaction" calls.
mDB.EXPECT().InTx(
gomock.Any(), gomock.Eq(&sql.TxOptions{Isolation: sql.LevelRepeatableRead}),
gomock.Any(), gomock.Eq(&database.TxOptions{Isolation: sql.LevelRepeatableRead}),
).
DoAndReturn(func(f func(database.Store) error, _ *sql.TxOptions) error {
DoAndReturn(func(f func(database.Store) error, _ *database.TxOptions) error {
err := f(mTx)
return err
})
@ -763,7 +763,7 @@ func withTemplate(mTx *dbmock.MockStore) {
// withInTx runs the given functions on the same db mock.
func withInTx(mTx *dbmock.MockStore) {
mTx.EXPECT().InTx(gomock.Any(), gomock.Any()).Times(1).DoAndReturn(
func(f func(store database.Store) error, _ *sql.TxOptions) error {
func(f func(store database.Store) error, _ *database.TxOptions) error {
return f(mTx)
},
)