1
0
mirror of https://github.com/Infisical/infisical.git synced 2025-03-29 22:02:57 +00:00

Compare commits

...

1 Commits

Author SHA1 Message Date
3886b42df3 Revert "misc: audit log migration + special handing" 2024-10-08 15:21:56 +08:00
25 changed files with 86 additions and 491 deletions

@ -1,2 +1 @@
DB_CONNECTION_URI=
AUDIT_LOGS_DB_CONNECTION_URI=

@ -45,19 +45,13 @@
"test:e2e-coverage": "vitest run --coverage -c vitest.e2e.config.ts",
"generate:component": "tsx ./scripts/create-backend-file.ts",
"generate:schema": "tsx ./scripts/generate-schema-types.ts",
"auditlog-migration:latest": "knex --knexfile ./src/db/auditlog-knexfile.ts --client pg migrate:latest",
"auditlog-migration:up": "knex --knexfile ./src/db/auditlog-knexfile.ts --client pg migrate:up",
"auditlog-migration:down": "knex --knexfile ./src/db/auditlog-knexfile.ts --client pg migrate:down",
"auditlog-migration:list": "knex --knexfile ./src/db/auditlog-knexfile.ts --client pg migrate:list",
"auditlog-migration:status": "knex --knexfile ./src/db/auditlog-knexfile.ts --client pg migrate:status",
"auditlog-migration:rollback": "knex --knexfile ./src/db/auditlog-knexfile.ts migrate:rollback",
"migration:new": "tsx ./scripts/create-migration.ts",
"migration:up": "npm run auditlog-migration:up && knex --knexfile ./src/db/knexfile.ts --client pg migrate:up",
"migration:down": "npm run auditlog-migration:down && knex --knexfile ./src/db/knexfile.ts --client pg migrate:down",
"migration:list": "npm run auditlog-migration:list && knex --knexfile ./src/db/knexfile.ts --client pg migrate:list",
"migration:latest": "npm run auditlog-migration:latest && knex --knexfile ./src/db/knexfile.ts --client pg migrate:latest",
"migration:status": "npm run auditlog-migration:status && knex --knexfile ./src/db/knexfile.ts --client pg migrate:status",
"migration:rollback": "npm run auditlog-migration:rollback && knex --knexfile ./src/db/knexfile.ts migrate:rollback",
"migration:up": "knex --knexfile ./src/db/knexfile.ts --client pg migrate:up",
"migration:down": "knex --knexfile ./src/db/knexfile.ts --client pg migrate:down",
"migration:list": "knex --knexfile ./src/db/knexfile.ts --client pg migrate:list",
"migration:latest": "knex --knexfile ./src/db/knexfile.ts --client pg migrate:latest",
"migration:status": "knex --knexfile ./src/db/knexfile.ts --client pg migrate:status",
"migration:rollback": "knex --knexfile ./src/db/knexfile.ts migrate:rollback",
"seed:new": "tsx ./scripts/create-seed-file.ts",
"seed": "knex --knexfile ./src/db/knexfile.ts --client pg seed:run",
"db:reset": "npm run migration:rollback -- --all && npm run migration:latest"

@ -90,7 +90,7 @@ const main = async () => {
.whereRaw("table_schema = current_schema()")
.select<{ tableName: string }[]>("table_name as tableName")
.orderBy("table_name")
).filter((el) => !el.tableName.includes("_migrations") && !el.tableName.includes("partitioned_audit_logs_"));
).filter((el) => !el.tableName.includes("_migrations"));
for (let i = 0; i < tables.length; i += 1) {
const { tableName } = tables[i];

@ -170,9 +170,6 @@ import {
TOrgRoles,
TOrgRolesInsert,
TOrgRolesUpdate,
TPartitionedAuditLogs,
TPartitionedAuditLogsInsert,
TPartitionedAuditLogsUpdate,
TPkiAlerts,
TPkiAlertsInsert,
TPkiAlertsUpdate,
@ -718,11 +715,6 @@ declare module "knex/types/tables" {
TAuditLogStreamsInsert,
TAuditLogStreamsUpdate
>;
[TableName.PartitionedAuditLog]: KnexOriginal.CompositeTableType<
TPartitionedAuditLogs,
TPartitionedAuditLogsInsert,
TPartitionedAuditLogsUpdate
>;
[TableName.GitAppInstallSession]: KnexOriginal.CompositeTableType<
TGitAppInstallSessions,
TGitAppInstallSessionsInsert,

@ -1,75 +0,0 @@
// eslint-disable-next-line
import "ts-node/register";
import dotenv from "dotenv";
import type { Knex } from "knex";
import path from "path";
// Update with your config settings. .
dotenv.config({
path: path.join(__dirname, "../../../.env.migration")
});
dotenv.config({
path: path.join(__dirname, "../../../.env")
});
if (!process.env.AUDIT_LOGS_DB_CONNECTION_URI && !process.env.AUDIT_LOGS_DB_HOST) {
console.info("Dedicated audit log database not found. No further migrations necessary");
process.exit(0);
}
console.info("Executing migration on audit log database...");
export default {
development: {
client: "postgres",
connection: {
connectionString: process.env.AUDIT_LOGS_DB_CONNECTION_URI,
host: process.env.AUDIT_LOGS_DB_HOST,
port: process.env.AUDIT_LOGS_DB_PORT,
user: process.env.AUDIT_LOGS_DB_USER,
database: process.env.AUDIT_LOGS_DB_NAME,
password: process.env.AUDIT_LOGS_DB_PASSWORD,
ssl: process.env.AUDIT_LOGS_DB_ROOT_CERT
? {
rejectUnauthorized: true,
ca: Buffer.from(process.env.AUDIT_LOGS_DB_ROOT_CERT, "base64").toString("ascii")
}
: false
},
pool: {
min: 2,
max: 10
},
seeds: {
directory: "./seeds"
},
migrations: {
tableName: "infisical_migrations"
}
},
production: {
client: "postgres",
connection: {
connectionString: process.env.AUDIT_LOGS_DB_CONNECTION_URI,
host: process.env.AUDIT_LOGS_DB_HOST,
port: process.env.AUDIT_LOGS_DB_PORT,
user: process.env.AUDIT_LOGS_DB_USER,
database: process.env.AUDIT_LOGS_DB_NAME,
password: process.env.AUDIT_LOGS_DB_PASSWORD,
ssl: process.env.AUDIT_LOGS_DB_ROOT_CERT
? {
rejectUnauthorized: true,
ca: Buffer.from(process.env.AUDIT_LOGS_DB_ROOT_CERT, "base64").toString("ascii")
}
: false
},
pool: {
min: 2,
max: 10
},
migrations: {
tableName: "infisical_migrations"
}
}
} as Knex.Config;

@ -1,2 +1,2 @@
export type { TDbClient } from "./instance";
export { initAuditLogDbConnection, initDbConnection } from "./instance";
export { initDbConnection } from "./instance";

@ -70,45 +70,3 @@ export const initDbConnection = ({
return db;
};
export const initAuditLogDbConnection = ({
dbConnectionUri,
dbRootCert
}: {
dbConnectionUri: string;
dbRootCert?: string;
}) => {
// akhilmhdh: the default Knex is knex.Knex<any, any[]>. but when assigned with knex({<config>}) the value is knex.Knex<any, unknown[]>
// this was causing issue with files like `snapshot-dal` `findRecursivelySnapshots` this i am explicitly putting the any and unknown[]
// eslint-disable-next-line
const db: Knex<any, unknown[]> = knex({
client: "pg",
connection: {
connectionString: dbConnectionUri,
host: process.env.AUDIT_LOGS_DB_HOST,
// @ts-expect-error I have no clue why only for the port there is a type error
// eslint-disable-next-line
port: process.env.AUDIT_LOGS_DB_PORT,
user: process.env.AUDIT_LOGS_DB_USER,
database: process.env.AUDIT_LOGS_DB_NAME,
password: process.env.AUDIT_LOGS_DB_PASSWORD,
ssl: dbRootCert
? {
rejectUnauthorized: true,
ca: Buffer.from(dbRootCert, "base64").toString("ascii")
}
: false
}
});
// we add these overrides so that auditLogDb and the primary DB are interchangeable
db.primaryNode = () => {
return db;
};
db.replicaNode = () => {
return db;
};
return db;
};

@ -1,48 +0,0 @@
import { Knex } from "knex";
import { TableName } from "../schemas";
export async function up(knex: Knex): Promise<void> {
if (await knex.schema.hasTable(TableName.AuditLog)) {
const doesProjectIdExist = await knex.schema.hasColumn(TableName.AuditLog, "projectId");
const doesOrgIdExist = await knex.schema.hasColumn(TableName.AuditLog, "orgId");
const doesProjectNameExist = await knex.schema.hasColumn(TableName.AuditLog, "projectName");
await knex.schema.alterTable(TableName.AuditLog, (t) => {
if (doesOrgIdExist) {
t.dropForeign("orgId");
}
if (doesProjectIdExist) {
t.dropForeign("projectId");
}
// add normalized field
if (!doesProjectNameExist) {
t.string("projectName");
}
});
}
}
export async function down(knex: Knex): Promise<void> {
const doesProjectIdExist = await knex.schema.hasColumn(TableName.AuditLog, "projectId");
const doesOrgIdExist = await knex.schema.hasColumn(TableName.AuditLog, "orgId");
const doesProjectNameExist = await knex.schema.hasColumn(TableName.AuditLog, "projectName");
if (await knex.schema.hasTable(TableName.AuditLog)) {
await knex.schema.alterTable(TableName.AuditLog, (t) => {
if (doesOrgIdExist) {
t.foreign("orgId").references("id").inTable(TableName.Organization).onDelete("CASCADE");
}
if (doesProjectIdExist) {
t.foreign("projectId").references("id").inTable(TableName.Project).onDelete("CASCADE");
}
// remove normalized field
if (doesProjectNameExist) {
t.dropColumn("projectName");
}
});
}
}

@ -1,164 +0,0 @@
import { Knex } from "knex";
import { TableName } from "../schemas";
const formatPartitionDate = (date: Date) => {
const year = date.getFullYear();
const month = String(date.getMonth() + 1).padStart(2, "0");
const day = String(date.getDate()).padStart(2, "0");
return `${year}-${month}-${day}`;
};
const createAuditLogPartition = async (knex: Knex, startDate: Date, endDate: Date) => {
const startDateStr = formatPartitionDate(startDate);
const endDateStr = formatPartitionDate(endDate);
const partitionName = `${TableName.PartitionedAuditLog}_${startDateStr.replace(/-/g, "")}_${endDateStr.replace(
/-/g,
""
)}`;
await knex.schema.raw(
`CREATE TABLE ${partitionName} PARTITION OF ${TableName.PartitionedAuditLog} FOR VALUES FROM ('${startDateStr}') TO ('${endDateStr}')`
);
};
const isUsingDedicatedAuditLogDb = Boolean(process.env.AUDIT_LOGS_DB_CONNECTION_URI);
export async function up(knex: Knex): Promise<void> {
if (!isUsingDedicatedAuditLogDb && (await knex.schema.hasTable(TableName.AuditLog))) {
console.info("Dropping primary key of Audit Log table...");
await knex.schema.alterTable(TableName.AuditLog, (t) => {
// remove existing keys
t.dropPrimary();
});
}
// create a new partitioned table for audit logs
if (!(await knex.schema.hasTable(TableName.PartitionedAuditLog))) {
const createTableSql = knex.schema
.createTable(TableName.PartitionedAuditLog, (t) => {
t.uuid("id").defaultTo(knex.fn.uuid());
t.string("actor").notNullable();
t.jsonb("actorMetadata").notNullable();
t.string("ipAddress");
t.string("eventType").notNullable();
t.jsonb("eventMetadata");
t.string("userAgent");
t.string("userAgentType");
t.datetime("expiresAt");
t.timestamps(true, true, true);
t.uuid("orgId");
t.string("projectId");
t.string("projectName");
t.primary(["id", "createdAt"]);
})
.toString();
console.info("Creating partition table...");
await knex.schema.raw(`
${createTableSql} PARTITION BY RANGE ("createdAt");
`);
console.log("Adding indices...");
await knex.schema.alterTable(TableName.PartitionedAuditLog, (t) => {
t.index(["projectId", "createdAt"]);
t.index(["orgId", "createdAt"]);
t.index("expiresAt");
t.index("orgId");
t.index("projectId");
});
console.log("Adding GIN indices...");
await knex.raw(
`CREATE INDEX IF NOT EXISTS "audit_logs_actorMetadata_idx" ON ${TableName.PartitionedAuditLog} USING gin("actorMetadata" jsonb_path_ops)`
);
console.log("GIN index for actorMetadata done");
await knex.raw(
`CREATE INDEX IF NOT EXISTS "audit_logs_eventMetadata_idx" ON ${TableName.PartitionedAuditLog} USING gin("eventMetadata" jsonb_path_ops)`
);
console.log("GIN index for eventMetadata done");
// create default partition
console.log("Creating default partition...");
await knex.schema.raw(
`CREATE TABLE ${TableName.PartitionedAuditLog}_default PARTITION OF ${TableName.PartitionedAuditLog} DEFAULT`
);
const nextDate = new Date();
nextDate.setDate(nextDate.getDate() + 1);
const nextDateStr = formatPartitionDate(nextDate);
// attach existing audit log table as a partition ONLY if using the same DB
if (!isUsingDedicatedAuditLogDb) {
console.log("Attaching existing audit log table as a partition...");
await knex.schema.raw(`
ALTER TABLE ${TableName.AuditLog} ADD CONSTRAINT audit_log_old
CHECK ( "createdAt" < DATE '${nextDateStr}' );
ALTER TABLE ${TableName.PartitionedAuditLog} ATTACH PARTITION ${TableName.AuditLog}
FOR VALUES FROM (MINVALUE) TO ('${nextDateStr}' );
`);
}
// create partition from now until end of month
console.log("Creating audit log partitions ahead of time... next date:", nextDateStr);
await createAuditLogPartition(knex, nextDate, new Date(nextDate.getFullYear(), nextDate.getMonth() + 1));
// create partitions 4 years ahead
const partitionMonths = 4 * 12;
const partitionPromises: Promise<void>[] = [];
for (let x = 1; x <= partitionMonths; x += 1) {
partitionPromises.push(
createAuditLogPartition(
knex,
new Date(nextDate.getFullYear(), nextDate.getMonth() + x, 1),
new Date(nextDate.getFullYear(), nextDate.getMonth() + (x + 1), 1)
)
);
}
await Promise.all(partitionPromises);
console.log("Partition migration complete");
}
}
export async function down(knex: Knex): Promise<void> {
const partitionSearchResult = await knex.raw(`
SELECT inhrelid::regclass::text
FROM pg_inherits
WHERE inhparent::regclass::text = '${TableName.PartitionedAuditLog}'
AND inhrelid::regclass::text = '${TableName.AuditLog}'
`);
const isAuditLogAPartition = partitionSearchResult.rows.length > 0;
if (isAuditLogAPartition) {
// detach audit log from partition
console.log("Detaching original audit log table from new partition table...");
await knex.schema.raw(`
ALTER TABLE ${TableName.PartitionedAuditLog} DETACH PARTITION ${TableName.AuditLog};
ALTER TABLE ${TableName.AuditLog} DROP CONSTRAINT audit_log_old;
`);
// revert audit log modifications
console.log("Reverting changes made to the audit log table...");
if (await knex.schema.hasTable(TableName.AuditLog)) {
await knex.schema.alterTable(TableName.AuditLog, (t) => {
// we drop this first because adding to the partition results in a new primary key
t.dropPrimary();
// add back the original keys of the audit logs table
t.primary(["id"], {
constraintName: "audit_logs_pkey"
});
});
}
}
await knex.schema.dropTableIfExists(TableName.PartitionedAuditLog);
console.log("Partition rollback complete");
}

@ -55,7 +55,6 @@ export * from "./org-bots";
export * from "./org-memberships";
export * from "./org-roles";
export * from "./organizations";
export * from "./partitioned-audit-logs";
export * from "./pki-alerts";
export * from "./pki-collection-items";
export * from "./pki-collections";

@ -90,7 +90,6 @@ export enum TableName {
OidcConfig = "oidc_configs",
LdapGroupMap = "ldap_group_maps",
AuditLog = "audit_logs",
PartitionedAuditLog = "partitioned_audit_logs",
AuditLogStream = "audit_log_streams",
GitAppInstallSession = "git_app_install_sessions",
GitAppOrg = "git_app_org",

@ -1,29 +0,0 @@
// Code generated by automation script, DO NOT EDIT.
// Automated by pulling database and generating zod schema
// To update. Just run npm run generate:schema
// Written by akhilmhdh.
import { z } from "zod";
import { TImmutableDBKeys } from "./models";
export const PartitionedAuditLogsSchema = z.object({
id: z.string().uuid(),
actor: z.string(),
actorMetadata: z.unknown(),
ipAddress: z.string().nullable().optional(),
eventType: z.string(),
eventMetadata: z.unknown().nullable().optional(),
userAgent: z.string().nullable().optional(),
userAgentType: z.string().nullable().optional(),
expiresAt: z.date().nullable().optional(),
createdAt: z.date(),
updatedAt: z.date(),
orgId: z.string().uuid().nullable().optional(),
projectId: z.string().nullable().optional(),
projectName: z.string().nullable().optional()
});
export type TPartitionedAuditLogs = z.infer<typeof PartitionedAuditLogsSchema>;
export type TPartitionedAuditLogsInsert = Omit<z.input<typeof PartitionedAuditLogsSchema>, TImmutableDBKeys>;
export type TPartitionedAuditLogsUpdate = Partial<Omit<z.input<typeof PartitionedAuditLogsSchema>, TImmutableDBKeys>>;

@ -1,6 +1,6 @@
import { z } from "zod";
import { PartitionedAuditLogsSchema, SecretSnapshotsSchema } from "@app/db/schemas";
import { AuditLogsSchema, SecretSnapshotsSchema } from "@app/db/schemas";
import { EventType, UserAgentType } from "@app/ee/services/audit-log/audit-log-types";
import { AUDIT_LOGS, PROJECTS } from "@app/lib/api-docs";
import { getLastMidnightDateISO, removeTrailingSlash } from "@app/lib/fn";
@ -120,7 +120,7 @@ export const registerProjectRouter = async (server: FastifyZodProvider) => {
}),
response: {
200: z.object({
auditLogs: PartitionedAuditLogsSchema.omit({
auditLogs: AuditLogsSchema.omit({
eventMetadata: true,
eventType: true,
actor: true,

@ -1,9 +1,8 @@
// weird commonjs-related error in the CI requires us to do the import like this
import knex from "knex";
import { Knex } from "knex";
import { TDbClient } from "@app/db";
import { TableName } from "@app/db/schemas";
import { DatabaseError, GatewayTimeoutError } from "@app/lib/errors";
import { AuditLogsSchema, TableName } from "@app/db/schemas";
import { DatabaseError } from "@app/lib/errors";
import { ormify, selectAllTableCols } from "@app/lib/knex";
import { logger } from "@app/lib/logger";
import { QueueName } from "@app/queue";
@ -26,7 +25,7 @@ type TFindQuery = {
};
export const auditLogDALFactory = (db: TDbClient) => {
const auditLogOrm = ormify(db, TableName.PartitionedAuditLog);
const auditLogOrm = ormify(db, TableName.AuditLog);
const find = async (
{
@ -47,7 +46,7 @@ export const auditLogDALFactory = (db: TDbClient) => {
eventType?: EventType[];
eventMetadata?: Record<string, string>;
},
tx?: knex.Knex
tx?: Knex
) => {
if (!orgId && !projectId) {
throw new Error("Either orgId or projectId must be provided");
@ -55,13 +54,14 @@ export const auditLogDALFactory = (db: TDbClient) => {
try {
// Find statements
const sqlQuery = (tx || db.replicaNode())(TableName.PartitionedAuditLog)
const sqlQuery = (tx || db.replicaNode())(TableName.AuditLog)
.leftJoin(TableName.Project, `${TableName.AuditLog}.projectId`, `${TableName.Project}.id`)
// eslint-disable-next-line func-names
.where(function () {
if (orgId) {
void this.where(`${TableName.PartitionedAuditLog}.orgId`, orgId);
void this.where(`${TableName.Project}.orgId`, orgId).orWhere(`${TableName.AuditLog}.orgId`, orgId);
} else if (projectId) {
void this.where(`${TableName.PartitionedAuditLog}.projectId`, projectId);
void this.where(`${TableName.AuditLog}.projectId`, projectId);
}
});
@ -71,20 +71,24 @@ export const auditLogDALFactory = (db: TDbClient) => {
// Select statements
void sqlQuery
.select(selectAllTableCols(TableName.PartitionedAuditLog))
.select(selectAllTableCols(TableName.AuditLog))
.select(
db.ref("name").withSchema(TableName.Project).as("projectName"),
db.ref("slug").withSchema(TableName.Project).as("projectSlug")
)
.limit(limit)
.offset(offset)
.orderBy(`${TableName.PartitionedAuditLog}.createdAt`, "desc");
.orderBy(`${TableName.AuditLog}.createdAt`, "desc");
// Special case: Filter by actor ID
if (actorId) {
void sqlQuery.whereRaw(`"actorMetadata" @> jsonb_build_object('userId', ?::text)`, [actorId]);
void sqlQuery.whereRaw(`"actorMetadata"->>'userId' = ?`, [actorId]);
}
// Special case: Filter by key/value pairs in eventMetadata field
if (eventMetadata && Object.keys(eventMetadata).length) {
Object.entries(eventMetadata).forEach(([key, value]) => {
void sqlQuery.whereRaw(`"eventMetadata" @> jsonb_build_object(?::text, ?::text)`, [key, value]);
void sqlQuery.whereRaw(`"eventMetadata"->>'${key}' = ?`, [value]);
});
}
@ -100,30 +104,35 @@ export const auditLogDALFactory = (db: TDbClient) => {
// Filter by date range
if (startDate) {
void sqlQuery.where(`${TableName.PartitionedAuditLog}.createdAt`, ">=", startDate);
void sqlQuery.where(`${TableName.AuditLog}.createdAt`, ">=", startDate);
}
if (endDate) {
void sqlQuery.where(`${TableName.PartitionedAuditLog}.createdAt`, "<=", endDate);
void sqlQuery.where(`${TableName.AuditLog}.createdAt`, "<=", endDate);
}
const docs = await sqlQuery;
// we timeout long running queries to prevent DB resource issues (2 minutes)
const docs = await sqlQuery.timeout(1000 * 120);
return docs.map((doc) => {
// Our type system refuses to acknowledge that the project name and slug are present in the doc, due to the disjointed query structure above.
// This is a quick and dirty way to get around the types.
const projectDoc = doc as unknown as { projectName: string; projectSlug: string };
return docs;
return {
...AuditLogsSchema.parse(doc),
...(projectDoc?.projectSlug && {
project: {
name: projectDoc.projectName,
slug: projectDoc.projectSlug
}
})
};
});
} catch (error) {
if (error instanceof knex.KnexTimeoutError) {
throw new GatewayTimeoutError({
error,
message: "Failed to fetch audit logs due to timeout. Add more search filters."
});
}
throw new DatabaseError({ error });
}
};
// delete all audit log that have expired
const pruneAuditLog = async (tx?: knex.Knex) => {
const pruneAuditLog = async (tx?: Knex) => {
const AUDIT_LOG_PRUNE_BATCH_SIZE = 10000;
const MAX_RETRY_ON_FAILURE = 3;
@ -135,13 +144,12 @@ export const auditLogDALFactory = (db: TDbClient) => {
logger.info(`${QueueName.DailyResourceCleanUp}: audit log started`);
do {
try {
const findExpiredLogSubQuery = (tx || db)(TableName.PartitionedAuditLog)
const findExpiredLogSubQuery = (tx || db)(TableName.AuditLog)
.where("expiresAt", "<", today)
.select("id")
.limit(AUDIT_LOG_PRUNE_BATCH_SIZE);
// eslint-disable-next-line no-await-in-loop
deletedAuditLogIds = await (tx || db)(TableName.PartitionedAuditLog)
deletedAuditLogIds = await (tx || db)(TableName.AuditLog)
.whereIn("id", findExpiredLogSubQuery)
.del()
.returning("id");

@ -74,7 +74,6 @@ export const auditLogQueueServiceFactory = ({
actorMetadata: actor.metadata,
userAgent,
projectId,
projectName: project?.name,
ipAddress,
orgId,
eventType: event.type,

@ -34,12 +34,6 @@ const envSchema = z
DB_CONNECTION_URI: zpStr(z.string().describe("Postgres database connection string")).default(
`postgresql://${process.env.DB_USER}:${process.env.DB_PASSWORD}@${process.env.DB_HOST}:${process.env.DB_PORT}/${process.env.DB_NAME}`
),
AUDIT_LOGS_DB_CONNECTION_URI: zpStr(
z.string().describe("Postgres database connection string for Audit logs").optional()
),
AUDIT_LOGS_DB_ROOT_CERT: zpStr(
z.string().describe("Postgres database base64-encoded CA cert for Audit logs").optional()
),
MAX_LEASE_LIMIT: z.coerce.number().default(10000),
DB_ROOT_CERT: zpStr(z.string().describe("Postgres database base64-encoded CA cert").optional()),
DB_HOST: zpStr(z.string().describe("Postgres database host").optional()),

@ -23,18 +23,6 @@ export class InternalServerError extends Error {
}
}
export class GatewayTimeoutError extends Error {
name: string;
error: unknown;
constructor({ name, error, message }: { message?: string; name?: string; error?: unknown }) {
super(message || "Timeout error");
this.name = name || "GatewayTimeoutError";
this.error = error;
}
}
export class UnauthorizedError extends Error {
name: string;

@ -1,7 +1,7 @@
import dotenv from "dotenv";
import path from "path";
import { initAuditLogDbConnection, initDbConnection } from "./db";
import { initDbConnection } from "./db";
import { keyStoreFactory } from "./keystore/keystore";
import { formatSmtpConfig, initEnvConfig, IS_PACKAGED } from "./lib/config/env";
import { isMigrationMode } from "./lib/fn";
@ -25,13 +25,6 @@ const run = async () => {
}))
});
const auditLogDb = appCfg.AUDIT_LOGS_DB_CONNECTION_URI
? initAuditLogDbConnection({
dbConnectionUri: appCfg.AUDIT_LOGS_DB_CONNECTION_URI,
dbRootCert: appCfg.AUDIT_LOGS_DB_ROOT_CERT
})
: undefined;
// Case: App is running in packaged mode (binary), and migration mode is enabled.
// Run the migrations and exit the process after completion.
if (IS_PACKAGED && isMigrationMode()) {
@ -53,7 +46,7 @@ const run = async () => {
const queue = queueServiceFactory(appCfg.REDIS_URL);
const keyStore = keyStoreFactory(appCfg.REDIS_URL);
const server = await main({ db, auditLogDb, smtp, logger, queue, keyStore });
const server = await main({ db, smtp, logger, queue, keyStore });
const bootstrap = await bootstrapCheck({ db });
// eslint-disable-next-line

@ -30,7 +30,6 @@ import { fastifySwagger } from "./plugins/swagger";
import { registerRoutes } from "./routes";
type TMain = {
auditLogDb?: Knex;
db: Knex;
smtp: TSmtpService;
logger?: Logger;
@ -39,7 +38,7 @@ type TMain = {
};
// Run the server!
export const main = async ({ db, auditLogDb, smtp, logger, queue, keyStore }: TMain) => {
export const main = async ({ db, smtp, logger, queue, keyStore }: TMain) => {
const appCfg = getConfig();
const server = fastify({
logger: appCfg.NODE_ENV === "test" ? false : logger,
@ -95,7 +94,7 @@ export const main = async ({ db, auditLogDb, smtp, logger, queue, keyStore }: TM
await server.register(maintenanceMode);
await server.register(registerRoutes, { smtp, queue, db, auditLogDb, keyStore });
await server.register(registerRoutes, { smtp, queue, db, keyStore });
if (appCfg.isProductionMode) {
await server.register(registerExternalNextjs, {

@ -7,7 +7,6 @@ import {
BadRequestError,
DatabaseError,
ForbiddenRequestError,
GatewayTimeoutError,
InternalServerError,
NotFoundError,
ScimRequestError,
@ -26,8 +25,7 @@ enum HttpStatusCodes {
Unauthorized = 401,
Forbidden = 403,
// eslint-disable-next-line @typescript-eslint/no-shadow
InternalServerError = 500,
GatewayTimeout = 504
InternalServerError = 500
}
export const fastifyErrHandler = fastifyPlugin(async (server: FastifyZodProvider) => {
@ -49,10 +47,6 @@ export const fastifyErrHandler = fastifyPlugin(async (server: FastifyZodProvider
void res
.status(HttpStatusCodes.InternalServerError)
.send({ statusCode: HttpStatusCodes.InternalServerError, message: "Something went wrong", error: error.name });
} else if (error instanceof GatewayTimeoutError) {
void res
.status(HttpStatusCodes.GatewayTimeout)
.send({ statusCode: HttpStatusCodes.GatewayTimeout, message: error.message, error: error.name });
} else if (error instanceof ZodError) {
void res
.status(HttpStatusCodes.Unauthorized)

@ -214,12 +214,11 @@ import { registerV3Routes } from "./v3";
export const registerRoutes = async (
server: FastifyZodProvider,
{
auditLogDb,
db,
smtp: smtpService,
queue: queueService,
keyStore
}: { auditLogDb?: Knex; db: Knex; smtp: TSmtpService; queue: TQueueServiceFactory; keyStore: TKeyStoreFactory }
}: { db: Knex; smtp: TSmtpService; queue: TQueueServiceFactory; keyStore: TKeyStoreFactory }
) => {
const appCfg = getConfig();
if (!appCfg.DISABLE_SECRET_SCANNING) {
@ -284,7 +283,7 @@ export const registerRoutes = async (
const identityOidcAuthDAL = identityOidcAuthDALFactory(db);
const identityAzureAuthDAL = identityAzureAuthDALFactory(db);
const auditLogDAL = auditLogDALFactory(auditLogDb ?? db);
const auditLogDAL = auditLogDALFactory(db);
const auditLogStreamDAL = auditLogStreamDALFactory(db);
const trustedIpDAL = trustedIpDALFactory(db);
const telemetryDAL = telemetryDALFactory(db);

@ -1,12 +1,12 @@
import { z } from "zod";
import {
AuditLogsSchema,
GroupsSchema,
IncidentContactsSchema,
OrganizationsSchema,
OrgMembershipsSchema,
OrgRolesSchema,
PartitionedAuditLogsSchema,
UsersSchema
} from "@app/db/schemas";
import { EventType, UserAgentType } from "@app/ee/services/audit-log/audit-log-types";
@ -117,7 +117,7 @@ export const registerOrgRouter = async (server: FastifyZodProvider) => {
response: {
200: z.object({
auditLogs: PartitionedAuditLogsSchema.omit({
auditLogs: AuditLogsSchema.omit({
eventMetadata: true,
eventType: true,
actor: true,
@ -125,6 +125,12 @@ export const registerOrgRouter = async (server: FastifyZodProvider) => {
})
.merge(
z.object({
project: z
.object({
name: z.string(),
slug: z.string()
})
.optional(),
event: z.object({
type: z.string(),
metadata: z.any()
@ -162,7 +168,6 @@ export const registerOrgRouter = async (server: FastifyZodProvider) => {
actorAuthMethod: req.permission.authMethod,
actor: req.permission.type
});
return { auditLogs };
}
});

@ -1,7 +1,5 @@
import { useInfiniteQuery, UseInfiniteQueryOptions, useQuery } from "@tanstack/react-query";
import { AxiosError } from "axios";
import { createNotification } from "@app/components/notifications";
import { apiRequest } from "@app/config/request";
import { Actor, AuditLog, TGetAuditLogsFilter } from "./types";
@ -30,37 +28,27 @@ export const useGetAuditLogs = (
return useInfiniteQuery({
queryKey: auditLogKeys.getAuditLogs(projectId, filters),
queryFn: async ({ pageParam }) => {
try {
const { data } = await apiRequest.get<{ auditLogs: AuditLog[] }>(
"/api/v1/organization/audit-logs",
{
params: {
...filters,
offset: pageParam,
startDate: filters?.startDate?.toISOString(),
endDate: filters?.endDate?.toISOString(),
...(filters.eventMetadata && Object.keys(filters.eventMetadata).length
? {
eventMetadata: Object.entries(filters.eventMetadata)
.map(([key, value]) => `${key}=${value}`)
.join(",")
}
: {}),
...(filters.eventType?.length ? { eventType: filters.eventType.join(",") } : {}),
...(projectId ? { projectId } : {})
}
const { data } = await apiRequest.get<{ auditLogs: AuditLog[] }>(
"/api/v1/organization/audit-logs",
{
params: {
...filters,
offset: pageParam,
startDate: filters?.startDate?.toISOString(),
endDate: filters?.endDate?.toISOString(),
...(filters.eventMetadata && Object.keys(filters.eventMetadata).length
? {
eventMetadata: Object.entries(filters.eventMetadata)
.map(([key, value]) => `${key}=${value}`)
.join(",")
}
: {}),
...(filters.eventType?.length ? { eventType: filters.eventType.join(",") } : {}),
...(projectId ? { projectId } : {})
}
);
return data.auditLogs;
} catch (error) {
if (error instanceof AxiosError) {
createNotification({
type: "error",
text: error.response?.data.message
});
}
return [];
}
);
return data.auditLogs;
},
getNextPageParam: (lastPage, pages) =>
lastPage.length !== 0 ? pages.length * filters.limit : undefined,

@ -886,5 +886,8 @@ export type AuditLog = {
userAgentType: UserAgentType;
createdAt: string;
updatedAt: string;
projectName?: string;
project?: {
name: string;
slug: string;
};
};

@ -573,7 +573,7 @@ export const LogsTableRow = ({ auditLog, isOrgAuditLogs, showActorColumn }: Prop
<Tr className={`log-${auditLog.id} h-10 border-x-0 border-b border-t-0`}>
<Td>{formatDate(auditLog.createdAt)}</Td>
<Td>{`${eventToNameMap[auditLog.event.type]}`}</Td>
{isOrgAuditLogs && <Td>{auditLog?.projectName ?? "N/A"}</Td>}
{isOrgAuditLogs && <Td>{auditLog?.project?.name ?? "N/A"}</Td>}
{showActorColumn && renderActor(auditLog.actor)}
{renderSource()}
{renderMetadata(auditLog.event)}