chore: implement sane default pagination limit for audit logs (#13676)

* chore: implement sane default pagination limit for audit logs
This commit is contained in:
Steven Masley
2024-06-28 02:38:04 -10:00
committed by GitHub
parent 1a877716ca
commit 3cc86cf62d
10 changed files with 78 additions and 45 deletions

View File

@ -53,8 +53,8 @@ func (api *API) auditLogs(rw http.ResponseWriter, r *http.Request) {
})
return
}
filter.Offset = int32(page.Offset)
filter.Limit = int32(page.Limit)
filter.OffsetOpt = int32(page.Offset)
filter.LimitOpt = int32(page.Limit)
if filter.Username == "me" {
filter.UserID = apiKey.UserID

View File

@ -343,9 +343,6 @@ func TestAuditLogsFilter(t *testing.T) {
t.Parallel()
auditLogs, err := client.AuditLogs(ctx, codersdk.AuditLogsRequest{
SearchQuery: testCase.SearchQuery,
Pagination: codersdk.Pagination{
Limit: 25,
},
})
if testCase.ExpectedError {
require.Error(t, err, "expected error")

View File

@ -263,7 +263,7 @@ func (s *MethodTestSuite) TestAuditLogs() {
_ = dbgen.AuditLog(s.T(), db, database.AuditLog{})
_ = dbgen.AuditLog(s.T(), db, database.AuditLog{})
check.Args(database.GetAuditLogsOffsetParams{
Limit: 10,
LimitOpt: 10,
}).Asserts(rbac.ResourceAuditLog, policy.ActionRead)
}))
}

View File

@ -19,7 +19,7 @@ func TestGenerator(t *testing.T) {
t.Parallel()
db := dbmem.New()
_ = dbgen.AuditLog(t, db, database.AuditLog{})
logs := must(db.GetAuditLogsOffset(context.Background(), database.GetAuditLogsOffsetParams{Limit: 1}))
logs := must(db.GetAuditLogsOffset(context.Background(), database.GetAuditLogsOffsetParams{LimitOpt: 1}))
require.Len(t, logs, 1)
})

View File

@ -1965,12 +1965,17 @@ func (q *FakeQuerier) GetAuditLogsOffset(_ context.Context, arg database.GetAudi
q.mutex.RLock()
defer q.mutex.RUnlock()
logs := make([]database.GetAuditLogsOffsetRow, 0, arg.Limit)
if arg.LimitOpt == 0 {
// Default to 100 is set in the SQL query.
arg.LimitOpt = 100
}
logs := make([]database.GetAuditLogsOffsetRow, 0, arg.LimitOpt)
// q.auditLogs are already sorted by time DESC, so no need to sort after the fact.
for _, alog := range q.auditLogs {
if arg.Offset > 0 {
arg.Offset--
if arg.OffsetOpt > 0 {
arg.OffsetOpt--
continue
}
if arg.OrganizationID != uuid.Nil && arg.OrganizationID != alog.OrganizationID {
@ -2047,7 +2052,7 @@ func (q *FakeQuerier) GetAuditLogsOffset(_ context.Context, arg database.GetAudi
Count: 0,
})
if len(logs) >= int(arg.Limit) {
if len(logs) >= int(arg.LimitOpt) {
break
}
}

View File

@ -516,6 +516,29 @@ func TestDefaultOrg(t *testing.T) {
require.True(t, all[0].IsDefault, "first org should always be default")
}
func TestAuditLogDefaultLimit(t *testing.T) {
t.Parallel()
if testing.Short() {
t.SkipNow()
}
sqlDB := testSQLDB(t)
err := migrations.Up(sqlDB)
require.NoError(t, err)
db := database.New(sqlDB)
for i := 0; i < 110; i++ {
dbgen.AuditLog(t, db, database.AuditLog{})
}
ctx := testutil.Context(t, testutil.WaitShort)
rows, err := db.GetAuditLogsOffset(ctx, database.GetAuditLogsOffsetParams{})
require.NoError(t, err)
// The length should match the default limit of the SQL query.
// Updating the sql query requires changing the number below to match.
require.Len(t, rows, 100)
}
// TestReadCustomRoles tests the input params returns the correct set of roles.
func TestReadCustomRoles(t *testing.T) {
t.Parallel()

View File

@ -490,81 +490,82 @@ FROM
WHERE
-- Filter resource_type
CASE
WHEN $3 :: text != '' THEN
resource_type = $3 :: resource_type
WHEN $1 :: text != '' THEN
resource_type = $1 :: resource_type
ELSE true
END
-- Filter resource_id
AND CASE
WHEN $4 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
resource_id = $4
WHEN $2 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
resource_id = $2
ELSE true
END
-- Filter organization_id
AND CASE
WHEN $5 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
audit_logs.organization_id = $5
WHEN $3 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
audit_logs.organization_id = $3
ELSE true
END
-- Filter by resource_target
AND CASE
WHEN $6 :: text != '' THEN
resource_target = $6
WHEN $4 :: text != '' THEN
resource_target = $4
ELSE true
END
-- Filter action
AND CASE
WHEN $7 :: text != '' THEN
action = $7 :: audit_action
WHEN $5 :: text != '' THEN
action = $5 :: audit_action
ELSE true
END
-- Filter by user_id
AND CASE
WHEN $8 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
user_id = $8
WHEN $6 :: uuid != '00000000-0000-0000-0000-000000000000'::uuid THEN
user_id = $6
ELSE true
END
-- Filter by username
AND CASE
WHEN $9 :: text != '' THEN
user_id = (SELECT id FROM users WHERE lower(username) = lower($9) AND deleted = false)
WHEN $7 :: text != '' THEN
user_id = (SELECT id FROM users WHERE lower(username) = lower($7) AND deleted = false)
ELSE true
END
-- Filter by user_email
AND CASE
WHEN $10 :: text != '' THEN
users.email = $10
WHEN $8 :: text != '' THEN
users.email = $8
ELSE true
END
-- Filter by date_from
AND CASE
WHEN $11 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
"time" >= $11
WHEN $9 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
"time" >= $9
ELSE true
END
-- Filter by date_to
AND CASE
WHEN $12 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
"time" <= $12
WHEN $10 :: timestamp with time zone != '0001-01-01 00:00:00Z' THEN
"time" <= $10
ELSE true
END
-- Filter by build_reason
AND CASE
WHEN $13::text != '' THEN
workspace_builds.reason::text = $13
WHEN $11::text != '' THEN
workspace_builds.reason::text = $11
ELSE true
END
ORDER BY
"time" DESC
LIMIT
$1
-- a limit of 0 means "no limit". The audit log table is unbounded
-- in size, and is expected to be quite large. Implement a default
-- limit of 100 to prevent accidental excessively large queries.
COALESCE(NULLIF($13 :: int, 0), 100)
OFFSET
$2
$12
`
type GetAuditLogsOffsetParams struct {
Limit int32 `db:"limit" json:"limit"`
Offset int32 `db:"offset" json:"offset"`
ResourceType string `db:"resource_type" json:"resource_type"`
ResourceID uuid.UUID `db:"resource_id" json:"resource_id"`
OrganizationID uuid.UUID `db:"organization_id" json:"organization_id"`
@ -576,6 +577,8 @@ type GetAuditLogsOffsetParams struct {
DateFrom time.Time `db:"date_from" json:"date_from"`
DateTo time.Time `db:"date_to" json:"date_to"`
BuildReason string `db:"build_reason" json:"build_reason"`
OffsetOpt int32 `db:"offset_opt" json:"offset_opt"`
LimitOpt int32 `db:"limit_opt" json:"limit_opt"`
}
type GetAuditLogsOffsetRow struct {
@ -614,8 +617,6 @@ type GetAuditLogsOffsetRow struct {
// ID.
func (q *sqlQuerier) GetAuditLogsOffset(ctx context.Context, arg GetAuditLogsOffsetParams) ([]GetAuditLogsOffsetRow, error) {
rows, err := q.db.QueryContext(ctx, getAuditLogsOffset,
arg.Limit,
arg.Offset,
arg.ResourceType,
arg.ResourceID,
arg.OrganizationID,
@ -627,6 +628,8 @@ func (q *sqlQuerier) GetAuditLogsOffset(ctx context.Context, arg GetAuditLogsOff
arg.DateFrom,
arg.DateTo,
arg.BuildReason,
arg.OffsetOpt,
arg.LimitOpt,
)
if err != nil {
return nil, err

View File

@ -116,9 +116,12 @@ WHERE
ORDER BY
"time" DESC
LIMIT
$1
-- a limit of 0 means "no limit". The audit log table is unbounded
-- in size, and is expected to be quite large. Implement a default
-- limit of 100 to prevent accidental excessively large queries.
COALESCE(NULLIF(@limit_opt :: int, 0), 100)
OFFSET
$2;
@offset_opt;
-- name: InsertAuditLog :one
INSERT INTO

View File

@ -17,8 +17,10 @@ func parsePagination(w http.ResponseWriter, r *http.Request) (p codersdk.Paginat
parser := httpapi.NewQueryParamParser()
params := codersdk.Pagination{
AfterID: parser.UUID(queryParams, uuid.Nil, "after_id"),
Limit: int(parser.PositiveInt32(queryParams, 0, "limit")),
Offset: int(parser.PositiveInt32(queryParams, 0, "offset")),
// A limit of 0 should be interpreted by the SQL query as "null" or
// "no limit". Do not make this value anything besides 0.
Limit: int(parser.PositiveInt32(queryParams, 0, "limit")),
Offset: int(parser.PositiveInt32(queryParams, 0, "offset")),
}
if len(parser.Errors) > 0 {
httpapi.Write(ctx, w, http.StatusBadRequest, codersdk.Response{