mirror of
https://github.com/Infisical/infisical.git
synced 2025-03-25 14:05:03 +00:00
Compare commits
23 Commits
secrets-ov
...
infisical/
Author | SHA1 | Date | |
---|---|---|---|
7154b19703 | |||
9ce465b3e2 | |||
598e5c0be5 | |||
72f08a6b89 | |||
55d8762351 | |||
3c92ec4dc3 | |||
f2224262a4 | |||
23eac40740 | |||
4ae88c0447 | |||
7aecaad050 | |||
7adc103ed2 | |||
5bdbf37171 | |||
4f874734ab | |||
eb6fd8259b | |||
1766a44dd0 | |||
624c9ef8da | |||
dfd4b13574 | |||
22b57b7a74 | |||
1ba0b9c204 | |||
110d0e95b0 | |||
a8c0bbb7ca | |||
6af8a4fab8 | |||
43ecd31b74 |
.github/workflows
README.mdbackend/src
db
migrations
20250204025010_app-connections-and-secret-syncs-unique-constraint.ts20250205045509_increase-gcp-auth-limit.ts
schemas
ee/services/audit-log
lib/error-codes
server/routes
services
app-connection
secret-folder
secret-sync
docs
helm-charts
k8-operator
config/samples/crd/infisicalsecret
controllers/infisicalsecret
4
.github/workflows/helm_chart_release.yml → .github/workflows/helm-release-infisical-core.yml
vendored
4
.github/workflows/helm_chart_release.yml → .github/workflows/helm-release-infisical-core.yml
vendored
@ -1,4 +1,4 @@
|
||||
name: Release Helm Charts
|
||||
name: Release Infisical Core Helm chart
|
||||
|
||||
on: [workflow_dispatch]
|
||||
|
||||
@ -17,6 +17,6 @@ jobs:
|
||||
- name: Install Cloudsmith CLI
|
||||
run: pip install --upgrade cloudsmith-cli
|
||||
- name: Build and push helm package to Cloudsmith
|
||||
run: cd helm-charts && sh upload-to-cloudsmith.sh
|
||||
run: cd helm-charts && sh upload-infisical-core-helm-cloudsmith.sh
|
||||
env:
|
||||
CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}
|
@ -1,4 +1,4 @@
|
||||
name: Release Docker image for K8 operator
|
||||
name: Release image + Helm chart K8s Operator
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
@ -35,3 +35,18 @@ jobs:
|
||||
tags: |
|
||||
infisical/kubernetes-operator:latest
|
||||
infisical/kubernetes-operator:${{ steps.extract_version.outputs.version }}
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v3
|
||||
with:
|
||||
version: v3.10.0
|
||||
- name: Install python
|
||||
uses: actions/setup-python@v4
|
||||
- name: Install Cloudsmith CLI
|
||||
run: pip install --upgrade cloudsmith-cli
|
||||
- name: Build and push helm package to Cloudsmith
|
||||
run: cd helm-charts && sh upload-k8s-operator-cloudsmith.sh
|
||||
env:
|
||||
CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}
|
||||
|
@ -125,7 +125,7 @@ Install pre commit hook to scan each commit before you push to your repository
|
||||
infisical scan install --pre-commit-hook
|
||||
```
|
||||
|
||||
Lean about Infisical's code scanning feature [here](https://infisical.com/docs/cli/scanning-overview)
|
||||
Learn about Infisical's code scanning feature [here](https://infisical.com/docs/cli/scanning-overview)
|
||||
|
||||
## Open-source vs. paid
|
||||
|
||||
|
23
backend/src/db/migrations/20250204025010_app-connections-and-secret-syncs-unique-constraint.ts
Normal file
23
backend/src/db/migrations/20250204025010_app-connections-and-secret-syncs-unique-constraint.ts
Normal file
@ -0,0 +1,23 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "@app/db/schemas";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
await knex.schema.alterTable(TableName.AppConnection, (t) => {
|
||||
t.unique(["orgId", "name"]);
|
||||
});
|
||||
|
||||
await knex.schema.alterTable(TableName.SecretSync, (t) => {
|
||||
t.unique(["projectId", "name"]);
|
||||
});
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
await knex.schema.alterTable(TableName.AppConnection, (t) => {
|
||||
t.dropUnique(["orgId", "name"]);
|
||||
});
|
||||
|
||||
await knex.schema.alterTable(TableName.SecretSync, (t) => {
|
||||
t.dropUnique(["projectId", "name"]);
|
||||
});
|
||||
}
|
@ -0,0 +1,36 @@
|
||||
import { Knex } from "knex";
|
||||
import { TableName } from "../schemas";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
const hasTable = await knex.schema.hasTable(TableName.IdentityGcpAuth);
|
||||
const hasAllowedProjectsColumn = await knex.schema.hasColumn(TableName.IdentityGcpAuth, "allowedProjects");
|
||||
const hasAllowedServiceAccountsColumn = await knex.schema.hasColumn(
|
||||
TableName.IdentityGcpAuth,
|
||||
"allowedServiceAccounts"
|
||||
);
|
||||
const hasAllowedZones = await knex.schema.hasColumn(TableName.IdentityGcpAuth, "allowedZones");
|
||||
if (hasTable) {
|
||||
await knex.schema.alterTable(TableName.IdentityGcpAuth, (t) => {
|
||||
if (hasAllowedProjectsColumn) t.string("allowedProjects", 2500).alter();
|
||||
if (hasAllowedServiceAccountsColumn) t.string("allowedServiceAccounts", 5000).alter();
|
||||
if (hasAllowedZones) t.string("allowedZones", 2500).alter();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
const hasTable = await knex.schema.hasTable(TableName.IdentityGcpAuth);
|
||||
const hasAllowedProjectsColumn = await knex.schema.hasColumn(TableName.IdentityGcpAuth, "allowedProjects");
|
||||
const hasAllowedServiceAccountsColumn = await knex.schema.hasColumn(
|
||||
TableName.IdentityGcpAuth,
|
||||
"allowedServiceAccounts"
|
||||
);
|
||||
const hasAllowedZones = await knex.schema.hasColumn(TableName.IdentityGcpAuth, "allowedZones");
|
||||
if (hasTable) {
|
||||
await knex.schema.alterTable(TableName.IdentityGcpAuth, (t) => {
|
||||
if (hasAllowedProjectsColumn) t.string("allowedProjects").alter();
|
||||
if (hasAllowedServiceAccountsColumn) t.string("allowedServiceAccounts").alter();
|
||||
if (hasAllowedZones) t.string("allowedZones").alter();
|
||||
});
|
||||
}
|
||||
}
|
@ -17,9 +17,9 @@ export const IdentityGcpAuthsSchema = z.object({
|
||||
updatedAt: z.date(),
|
||||
identityId: z.string().uuid(),
|
||||
type: z.string(),
|
||||
allowedServiceAccounts: z.string(),
|
||||
allowedProjects: z.string(),
|
||||
allowedZones: z.string()
|
||||
allowedServiceAccounts: z.string().nullable().optional(),
|
||||
allowedProjects: z.string().nullable().optional(),
|
||||
allowedZones: z.string().nullable().optional()
|
||||
});
|
||||
|
||||
export type TIdentityGcpAuths = z.infer<typeof IdentityGcpAuthsSchema>;
|
||||
|
@ -762,9 +762,9 @@ interface AddIdentityGcpAuthEvent {
|
||||
metadata: {
|
||||
identityId: string;
|
||||
type: string;
|
||||
allowedServiceAccounts: string;
|
||||
allowedProjects: string;
|
||||
allowedZones: string;
|
||||
allowedServiceAccounts?: string | null;
|
||||
allowedProjects?: string | null;
|
||||
allowedZones?: string | null;
|
||||
accessTokenTTL: number;
|
||||
accessTokenMaxTTL: number;
|
||||
accessTokenNumUsesLimit: number;
|
||||
@ -784,9 +784,9 @@ interface UpdateIdentityGcpAuthEvent {
|
||||
metadata: {
|
||||
identityId: string;
|
||||
type?: string;
|
||||
allowedServiceAccounts?: string;
|
||||
allowedProjects?: string;
|
||||
allowedZones?: string;
|
||||
allowedServiceAccounts?: string | null;
|
||||
allowedProjects?: string | null;
|
||||
allowedZones?: string | null;
|
||||
accessTokenTTL?: number;
|
||||
accessTokenMaxTTL?: number;
|
||||
accessTokenNumUsesLimit?: number;
|
||||
|
4
backend/src/lib/error-codes/database.ts
Normal file
4
backend/src/lib/error-codes/database.ts
Normal file
@ -0,0 +1,4 @@
|
||||
export enum DatabaseErrorCode {
|
||||
ForeignKeyViolation = "23503",
|
||||
UniqueViolation = "23505"
|
||||
}
|
1
backend/src/lib/error-codes/index.ts
Normal file
1
backend/src/lib/error-codes/index.ts
Normal file
@ -0,0 +1 @@
|
||||
export * from "./database";
|
@ -110,7 +110,6 @@ export const secretRawSchema = z.object({
|
||||
secretReminderNote: z.string().nullable().optional(),
|
||||
secretReminderRepeatDays: z.number().nullable().optional(),
|
||||
skipMultilineEncoding: z.boolean().default(false).nullable().optional(),
|
||||
metadata: z.unknown().nullable().optional(),
|
||||
createdAt: z.date(),
|
||||
updatedAt: z.date()
|
||||
});
|
||||
|
@ -3,6 +3,7 @@ import { ForbiddenError, subject } from "@casl/ability";
|
||||
import { OrgPermissionAppConnectionActions, OrgPermissionSubjects } from "@app/ee/services/permission/org-permission";
|
||||
import { TPermissionServiceFactory } from "@app/ee/services/permission/permission-service";
|
||||
import { generateHash } from "@app/lib/crypto/encryption";
|
||||
import { DatabaseErrorCode } from "@app/lib/error-codes";
|
||||
import { BadRequestError, DatabaseError, NotFoundError } from "@app/lib/errors";
|
||||
import { DiscriminativePick, OrgServiceActor } from "@app/lib/types";
|
||||
import { AppConnection } from "@app/services/app-connection/app-connection-enums";
|
||||
@ -144,54 +145,40 @@ export const appConnectionServiceFactory = ({
|
||||
OrgPermissionSubjects.AppConnections
|
||||
);
|
||||
|
||||
const appConnection = await appConnectionDAL.transaction(async (tx) => {
|
||||
const isConflictingName = Boolean(
|
||||
await appConnectionDAL.findOne(
|
||||
{
|
||||
name: params.name,
|
||||
orgId: actor.orgId
|
||||
},
|
||||
tx
|
||||
)
|
||||
);
|
||||
const validatedCredentials = await validateAppConnectionCredentials({
|
||||
app,
|
||||
credentials,
|
||||
method,
|
||||
orgId: actor.orgId
|
||||
} as TAppConnectionConfig);
|
||||
|
||||
if (isConflictingName)
|
||||
throw new BadRequestError({
|
||||
message: `An App Connection with the name "${params.name}" already exists`
|
||||
});
|
||||
const encryptedCredentials = await encryptAppConnectionCredentials({
|
||||
credentials: validatedCredentials,
|
||||
orgId: actor.orgId,
|
||||
kmsService
|
||||
});
|
||||
|
||||
const validatedCredentials = await validateAppConnectionCredentials({
|
||||
app,
|
||||
credentials,
|
||||
method,
|
||||
orgId: actor.orgId
|
||||
} as TAppConnectionConfig);
|
||||
|
||||
const encryptedCredentials = await encryptAppConnectionCredentials({
|
||||
credentials: validatedCredentials,
|
||||
try {
|
||||
const connection = await appConnectionDAL.create({
|
||||
orgId: actor.orgId,
|
||||
kmsService
|
||||
encryptedCredentials,
|
||||
method,
|
||||
app,
|
||||
...params
|
||||
});
|
||||
|
||||
const connection = await appConnectionDAL.create(
|
||||
{
|
||||
orgId: actor.orgId,
|
||||
encryptedCredentials,
|
||||
method,
|
||||
app,
|
||||
...params
|
||||
},
|
||||
tx
|
||||
);
|
||||
|
||||
return {
|
||||
...connection,
|
||||
credentialsHash: generateHash(connection.encryptedCredentials),
|
||||
credentials: validatedCredentials
|
||||
};
|
||||
});
|
||||
} as TAppConnection;
|
||||
} catch (err) {
|
||||
if (err instanceof DatabaseError && (err.error as { code: string })?.code === DatabaseErrorCode.UniqueViolation) {
|
||||
throw new BadRequestError({ message: `An App Connection with the name "${params.name}" already exists` });
|
||||
}
|
||||
|
||||
return appConnection as TAppConnection;
|
||||
throw err;
|
||||
}
|
||||
};
|
||||
|
||||
const updateAppConnection = async (
|
||||
@ -215,72 +202,55 @@ export const appConnectionServiceFactory = ({
|
||||
OrgPermissionSubjects.AppConnections
|
||||
);
|
||||
|
||||
const updatedAppConnection = await appConnectionDAL.transaction(async (tx) => {
|
||||
if (params.name && appConnection.name !== params.name) {
|
||||
const isConflictingName = Boolean(
|
||||
await appConnectionDAL.findOne(
|
||||
{
|
||||
name: params.name,
|
||||
orgId: appConnection.orgId
|
||||
},
|
||||
tx
|
||||
)
|
||||
);
|
||||
let encryptedCredentials: undefined | Buffer;
|
||||
|
||||
if (isConflictingName)
|
||||
throw new BadRequestError({
|
||||
message: `An App Connection with the name "${params.name}" already exists`
|
||||
});
|
||||
}
|
||||
if (credentials) {
|
||||
const { app, method } = appConnection as DiscriminativePick<TAppConnectionConfig, "app" | "method">;
|
||||
|
||||
let encryptedCredentials: undefined | Buffer;
|
||||
|
||||
if (credentials) {
|
||||
const { app, method } = appConnection as DiscriminativePick<TAppConnectionConfig, "app" | "method">;
|
||||
|
||||
if (
|
||||
!VALIDATE_APP_CONNECTION_CREDENTIALS_MAP[app].safeParse({
|
||||
method,
|
||||
credentials
|
||||
}).success
|
||||
)
|
||||
throw new BadRequestError({
|
||||
message: `Invalid credential format for ${
|
||||
APP_CONNECTION_NAME_MAP[app]
|
||||
} Connection with method ${getAppConnectionMethodName(method)}`
|
||||
});
|
||||
|
||||
const validatedCredentials = await validateAppConnectionCredentials({
|
||||
app,
|
||||
orgId: actor.orgId,
|
||||
credentials,
|
||||
method
|
||||
} as TAppConnectionConfig);
|
||||
|
||||
if (!validatedCredentials)
|
||||
throw new BadRequestError({ message: "Unable to validate connection - check credentials" });
|
||||
|
||||
encryptedCredentials = await encryptAppConnectionCredentials({
|
||||
credentials: validatedCredentials,
|
||||
orgId: actor.orgId,
|
||||
kmsService
|
||||
if (
|
||||
!VALIDATE_APP_CONNECTION_CREDENTIALS_MAP[app].safeParse({
|
||||
method,
|
||||
credentials
|
||||
}).success
|
||||
)
|
||||
throw new BadRequestError({
|
||||
message: `Invalid credential format for ${
|
||||
APP_CONNECTION_NAME_MAP[app]
|
||||
} Connection with method ${getAppConnectionMethodName(method)}`
|
||||
});
|
||||
|
||||
const validatedCredentials = await validateAppConnectionCredentials({
|
||||
app,
|
||||
orgId: actor.orgId,
|
||||
credentials,
|
||||
method
|
||||
} as TAppConnectionConfig);
|
||||
|
||||
if (!validatedCredentials)
|
||||
throw new BadRequestError({ message: "Unable to validate connection - check credentials" });
|
||||
|
||||
encryptedCredentials = await encryptAppConnectionCredentials({
|
||||
credentials: validatedCredentials,
|
||||
orgId: actor.orgId,
|
||||
kmsService
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const updatedConnection = await appConnectionDAL.updateById(connectionId, {
|
||||
orgId: actor.orgId,
|
||||
encryptedCredentials,
|
||||
...params
|
||||
});
|
||||
|
||||
return await decryptAppConnection(updatedConnection, kmsService);
|
||||
} catch (err) {
|
||||
if (err instanceof DatabaseError && (err.error as { code: string })?.code === DatabaseErrorCode.UniqueViolation) {
|
||||
throw new BadRequestError({ message: `An App Connection with the name "${params.name}" already exists` });
|
||||
}
|
||||
|
||||
const updatedConnection = await appConnectionDAL.updateById(
|
||||
connectionId,
|
||||
{
|
||||
orgId: actor.orgId,
|
||||
encryptedCredentials,
|
||||
...params
|
||||
},
|
||||
tx
|
||||
);
|
||||
|
||||
return updatedConnection;
|
||||
});
|
||||
|
||||
return decryptAppConnection(updatedAppConnection, kmsService);
|
||||
throw err;
|
||||
}
|
||||
};
|
||||
|
||||
const deleteAppConnection = async (app: AppConnection, connectionId: string, actor: OrgServiceActor) => {
|
||||
@ -311,7 +281,10 @@ export const appConnectionServiceFactory = ({
|
||||
|
||||
return await decryptAppConnection(deletedAppConnection, kmsService);
|
||||
} catch (err) {
|
||||
if (err instanceof DatabaseError && (err.error as { code: string })?.code === "23503") {
|
||||
if (
|
||||
err instanceof DatabaseError &&
|
||||
(err.error as { code: string })?.code === DatabaseErrorCode.ForeignKeyViolation
|
||||
) {
|
||||
throw new BadRequestError({
|
||||
message:
|
||||
"Cannot delete App Connection with existing connections. Remove all existing connections and try again."
|
||||
|
@ -493,6 +493,7 @@ export const secretFolderDALFactory = (db: TDbClient) => {
|
||||
db.ref("parents.environment")
|
||||
)
|
||||
.from(TableName.SecretFolder)
|
||||
.where(`${TableName.SecretFolder}.isReserved`, false)
|
||||
.join("parents", `${TableName.SecretFolder}.parentId`, "parents.id");
|
||||
})
|
||||
)
|
||||
|
@ -123,47 +123,39 @@ export const secretSyncDALFactory = (
|
||||
};
|
||||
|
||||
const create = async (data: Parameters<(typeof secretSyncOrm)["create"]>[0]) => {
|
||||
try {
|
||||
const secretSync = (await secretSyncOrm.transaction(async (tx) => {
|
||||
const sync = await secretSyncOrm.create(data, tx);
|
||||
const secretSync = (await secretSyncOrm.transaction(async (tx) => {
|
||||
const sync = await secretSyncOrm.create(data, tx);
|
||||
|
||||
return baseSecretSyncQuery({
|
||||
filter: { id: sync.id },
|
||||
db,
|
||||
tx
|
||||
}).first();
|
||||
}))!;
|
||||
return baseSecretSyncQuery({
|
||||
filter: { id: sync.id },
|
||||
db,
|
||||
tx
|
||||
}).first();
|
||||
}))!;
|
||||
|
||||
// TODO (scott): replace with cached folder path once implemented
|
||||
const [folderWithPath] = secretSync.folderId
|
||||
? await folderDAL.findSecretPathByFolderIds(secretSync.projectId, [secretSync.folderId])
|
||||
: [];
|
||||
return expandSecretSync(secretSync, folderWithPath);
|
||||
} catch (error) {
|
||||
throw new DatabaseError({ error, name: "Create - Secret Sync" });
|
||||
}
|
||||
// TODO (scott): replace with cached folder path once implemented
|
||||
const [folderWithPath] = secretSync.folderId
|
||||
? await folderDAL.findSecretPathByFolderIds(secretSync.projectId, [secretSync.folderId])
|
||||
: [];
|
||||
return expandSecretSync(secretSync, folderWithPath);
|
||||
};
|
||||
|
||||
const updateById = async (syncId: string, data: Parameters<(typeof secretSyncOrm)["updateById"]>[1]) => {
|
||||
try {
|
||||
const secretSync = (await secretSyncOrm.transaction(async (tx) => {
|
||||
const sync = await secretSyncOrm.updateById(syncId, data, tx);
|
||||
const secretSync = (await secretSyncOrm.transaction(async (tx) => {
|
||||
const sync = await secretSyncOrm.updateById(syncId, data, tx);
|
||||
|
||||
return baseSecretSyncQuery({
|
||||
filter: { id: sync.id },
|
||||
db,
|
||||
tx
|
||||
}).first();
|
||||
}))!;
|
||||
return baseSecretSyncQuery({
|
||||
filter: { id: sync.id },
|
||||
db,
|
||||
tx
|
||||
}).first();
|
||||
}))!;
|
||||
|
||||
// TODO (scott): replace with cached folder path once implemented
|
||||
const [folderWithPath] = secretSync.folderId
|
||||
? await folderDAL.findSecretPathByFolderIds(secretSync.projectId, [secretSync.folderId])
|
||||
: [];
|
||||
return expandSecretSync(secretSync, folderWithPath);
|
||||
} catch (error) {
|
||||
throw new DatabaseError({ error, name: "Update by ID - Secret Sync" });
|
||||
}
|
||||
// TODO (scott): replace with cached folder path once implemented
|
||||
const [folderWithPath] = secretSync.folderId
|
||||
? await folderDAL.findSecretPathByFolderIds(secretSync.projectId, [secretSync.folderId])
|
||||
: [];
|
||||
return expandSecretSync(secretSync, folderWithPath);
|
||||
};
|
||||
|
||||
const findOne = async (filter: Parameters<(typeof secretSyncOrm)["findOne"]>[0], tx?: Knex) => {
|
||||
|
@ -8,7 +8,8 @@ import {
|
||||
ProjectPermissionSub
|
||||
} from "@app/ee/services/permission/project-permission";
|
||||
import { KeyStorePrefixes, TKeyStoreFactory } from "@app/keystore/keystore";
|
||||
import { BadRequestError, NotFoundError } from "@app/lib/errors";
|
||||
import { DatabaseErrorCode } from "@app/lib/error-codes";
|
||||
import { BadRequestError, DatabaseError, NotFoundError } from "@app/lib/errors";
|
||||
import { OrgServiceActor } from "@app/lib/types";
|
||||
import { TAppConnectionServiceFactory } from "@app/services/app-connection/app-connection-service";
|
||||
import { TProjectBotServiceFactory } from "@app/services/project-bot/project-bot-service";
|
||||
@ -197,37 +198,26 @@ export const secretSyncServiceFactory = ({
|
||||
// validates permission to connect and app is valid for sync destination
|
||||
await appConnectionService.connectAppConnectionById(destinationApp, params.connectionId, actor);
|
||||
|
||||
const secretSync = await secretSyncDAL.transaction(async (tx) => {
|
||||
const isConflictingName = Boolean(
|
||||
(
|
||||
await secretSyncDAL.find(
|
||||
{
|
||||
name: params.name,
|
||||
projectId
|
||||
},
|
||||
tx
|
||||
)
|
||||
).length
|
||||
);
|
||||
|
||||
if (isConflictingName)
|
||||
throw new BadRequestError({
|
||||
message: `A Secret Sync with the name "${params.name}" already exists for the project with ID "${folder.projectId}"`
|
||||
});
|
||||
|
||||
const sync = await secretSyncDAL.create({
|
||||
try {
|
||||
const secretSync = await secretSyncDAL.create({
|
||||
folderId: folder.id,
|
||||
...params,
|
||||
...(params.isAutoSyncEnabled && { syncStatus: SecretSyncStatus.Pending }),
|
||||
projectId
|
||||
});
|
||||
|
||||
return sync;
|
||||
});
|
||||
if (secretSync.isAutoSyncEnabled) await secretSyncQueue.queueSecretSyncSyncSecretsById({ syncId: secretSync.id });
|
||||
|
||||
if (secretSync.isAutoSyncEnabled) await secretSyncQueue.queueSecretSyncSyncSecretsById({ syncId: secretSync.id });
|
||||
return secretSync as TSecretSync;
|
||||
} catch (err) {
|
||||
if (err instanceof DatabaseError && (err.error as { code: string })?.code === DatabaseErrorCode.UniqueViolation) {
|
||||
throw new BadRequestError({
|
||||
message: `A Secret Sync with the name "${params.name}" already exists for the project with ID "${folder.projectId}"`
|
||||
});
|
||||
}
|
||||
|
||||
return secretSync as TSecretSync;
|
||||
throw err;
|
||||
}
|
||||
};
|
||||
|
||||
const updateSecretSync = async (
|
||||
@ -260,78 +250,65 @@ export const secretSyncServiceFactory = ({
|
||||
message: `Secret sync with ID "${secretSync.id}" is not configured for ${SECRET_SYNC_NAME_MAP[destination]}`
|
||||
});
|
||||
|
||||
const updatedSecretSync = await secretSyncDAL.transaction(async (tx) => {
|
||||
let { folderId } = secretSync;
|
||||
let { folderId } = secretSync;
|
||||
|
||||
if (params.connectionId) {
|
||||
const destinationApp = SECRET_SYNC_CONNECTION_MAP[secretSync.destination as SecretSync];
|
||||
if (params.connectionId) {
|
||||
const destinationApp = SECRET_SYNC_CONNECTION_MAP[secretSync.destination as SecretSync];
|
||||
|
||||
// validates permission to connect and app is valid for sync destination
|
||||
await appConnectionService.connectAppConnectionById(destinationApp, params.connectionId, actor);
|
||||
}
|
||||
// validates permission to connect and app is valid for sync destination
|
||||
await appConnectionService.connectAppConnectionById(destinationApp, params.connectionId, actor);
|
||||
}
|
||||
|
||||
if (
|
||||
(secretPath && secretPath !== secretSync.folder?.path) ||
|
||||
(environment && environment !== secretSync.environment?.slug)
|
||||
) {
|
||||
const updatedEnvironment = environment ?? secretSync.environment?.slug;
|
||||
const updatedSecretPath = secretPath ?? secretSync.folder?.path;
|
||||
if (
|
||||
(secretPath && secretPath !== secretSync.folder?.path) ||
|
||||
(environment && environment !== secretSync.environment?.slug)
|
||||
) {
|
||||
const updatedEnvironment = environment ?? secretSync.environment?.slug;
|
||||
const updatedSecretPath = secretPath ?? secretSync.folder?.path;
|
||||
|
||||
if (!updatedEnvironment || !updatedSecretPath)
|
||||
throw new BadRequestError({ message: "Must specify both source environment and secret path" });
|
||||
if (!updatedEnvironment || !updatedSecretPath)
|
||||
throw new BadRequestError({ message: "Must specify both source environment and secret path" });
|
||||
|
||||
ForbiddenError.from(permission).throwUnlessCan(
|
||||
ProjectPermissionActions.Read,
|
||||
subject(ProjectPermissionSub.Secrets, {
|
||||
environment: updatedEnvironment,
|
||||
secretPath: updatedSecretPath
|
||||
})
|
||||
);
|
||||
ForbiddenError.from(permission).throwUnlessCan(
|
||||
ProjectPermissionActions.Read,
|
||||
subject(ProjectPermissionSub.Secrets, {
|
||||
environment: updatedEnvironment,
|
||||
secretPath: updatedSecretPath
|
||||
})
|
||||
);
|
||||
|
||||
const newFolder = await folderDAL.findBySecretPath(secretSync.projectId, updatedEnvironment, updatedSecretPath);
|
||||
const newFolder = await folderDAL.findBySecretPath(secretSync.projectId, updatedEnvironment, updatedSecretPath);
|
||||
|
||||
if (!newFolder)
|
||||
throw new BadRequestError({
|
||||
message: `Could not find folder with path "${secretPath}" in environment "${environment}" for project with ID "${secretSync.projectId}"`
|
||||
});
|
||||
if (!newFolder)
|
||||
throw new BadRequestError({
|
||||
message: `Could not find folder with path "${secretPath}" in environment "${environment}" for project with ID "${secretSync.projectId}"`
|
||||
});
|
||||
|
||||
folderId = newFolder.id;
|
||||
}
|
||||
folderId = newFolder.id;
|
||||
}
|
||||
|
||||
if (params.name && secretSync.name !== params.name) {
|
||||
const isConflictingName = Boolean(
|
||||
(
|
||||
await secretSyncDAL.find(
|
||||
{
|
||||
name: params.name,
|
||||
projectId: secretSync.projectId
|
||||
},
|
||||
tx
|
||||
)
|
||||
).length
|
||||
);
|
||||
const isAutoSyncEnabled = params.isAutoSyncEnabled ?? secretSync.isAutoSyncEnabled;
|
||||
|
||||
if (isConflictingName)
|
||||
throw new BadRequestError({
|
||||
message: `A Secret Sync with the name "${params.name}" already exists for project with ID "${secretSync.projectId}"`
|
||||
});
|
||||
}
|
||||
|
||||
const isAutoSyncEnabled = params.isAutoSyncEnabled ?? secretSync.isAutoSyncEnabled;
|
||||
|
||||
const updatedSync = await secretSyncDAL.updateById(syncId, {
|
||||
try {
|
||||
const updatedSecretSync = await secretSyncDAL.updateById(syncId, {
|
||||
...params,
|
||||
...(isAutoSyncEnabled && folderId && { syncStatus: SecretSyncStatus.Pending }),
|
||||
folderId
|
||||
});
|
||||
|
||||
return updatedSync;
|
||||
});
|
||||
if (updatedSecretSync.isAutoSyncEnabled)
|
||||
await secretSyncQueue.queueSecretSyncSyncSecretsById({ syncId: secretSync.id });
|
||||
|
||||
if (updatedSecretSync.isAutoSyncEnabled)
|
||||
await secretSyncQueue.queueSecretSyncSyncSecretsById({ syncId: secretSync.id });
|
||||
return updatedSecretSync as TSecretSync;
|
||||
} catch (err) {
|
||||
if (err instanceof DatabaseError && (err.error as { code: string })?.code === DatabaseErrorCode.UniqueViolation) {
|
||||
throw new BadRequestError({
|
||||
message: `A Secret Sync with the name "${params.name}" already exists for the project with ID "${secretSync.projectId}"`
|
||||
});
|
||||
}
|
||||
|
||||
return updatedSecretSync as TSecretSync;
|
||||
throw err;
|
||||
}
|
||||
};
|
||||
|
||||
const deleteSecretSync = async (
|
||||
|
@ -30,13 +30,13 @@ description: "How to sync secrets from Infisical to Azure App Configuration"
|
||||
|
||||
Press create integration to start syncing secrets to Azure App Configuration.
|
||||
|
||||
<Note>
|
||||
<Warning>
|
||||
The Azure App Configuration integration requires the following permissions to be set on the user / service principal
|
||||
for Infisical to sync secrets to Azure App Configuration: `Read Key-Value`, `Write Key-Value`, `Delete Key-Value`.
|
||||
|
||||
Any role with these permissions would work such as the **App Configuration Data Owner** role. Alternatively, you can use the
|
||||
**App Configuration Data Reader** role for read-only access or **App Configuration Data Contributor** role for read/write access.
|
||||
</Note>
|
||||
</Warning>
|
||||
|
||||
</Step>
|
||||
<Step title="Additional Configuration">
|
||||
|
@ -1,97 +0,0 @@
|
||||
---
|
||||
title: "Flows"
|
||||
description: "Infisical's core flows have strong cryptographic underpinnings."
|
||||
---
|
||||
|
||||
## Signup
|
||||
|
||||
When a user signs up for an account using email/password, they verify their email by correctly entering the 6-digit OTP code sent to it.
|
||||
|
||||
After this procedure, the user creates a password that is checked against strict requirements to ensure that it has sufficient entropy; this is critical because passwords have both authentication-related and cryptographic implications in Infisical. In accordance to the [secure remote password protocol (SRP)](https://en.wikipedia.org/wiki/Secure_Remote_Password_protocol), the password is used to generate a salt and X; this is kept handy on the client side.
|
||||
|
||||
Next, a few user-associated symmetric keys are generated for subsequent use:
|
||||
|
||||
- The password is transformed into a 256-bit symmetric key, called the generated key, using the [Argon2id](https://en.wikipedia.org/wiki/Argon2) key derivation function.
|
||||
- A 256-bit symmetric key, called the protected key, is generated.
|
||||
- A public-private key pair is generated.
|
||||
|
||||
The symmetric keys are used in sequence to encrypt the user’s private key:
|
||||
|
||||
- The protected key is used to encrypt the private key.
|
||||
- The generated key is used to encrypt the protected key.
|
||||
|
||||
Finally, the encrypted private key, the protected key, salt, and X are sent to the Infisical API to be stored in the storage backend. Note that the top-level secret used to secure the user’s account and private key is their password. Therefore, it must be unknown to the Infisical API and strong by nature.
|
||||
|
||||
## Login
|
||||
|
||||
When a user logs in, they enter their password to authenticate with Infisical via SRP. If successful, the encrypted protected key and encrypted private key are returned to the client side.
|
||||
|
||||
The password is then used in reverse sequence to decrypt the private key:
|
||||
|
||||
- The password is transformed back into the generated key.
|
||||
- The generated key is used to decrypt the encrypted protected key.
|
||||
- The protected key is used to decrypt the encrypted private key.
|
||||
|
||||
The private key is stored on the client side and kept handy.
|
||||
|
||||
## Single sign-on
|
||||
|
||||
When a SSO authentication method like Google, GitHub, or SAML SSO is used to login or signup to Infisical, the process is identical to logging in with email/password except that it is contingent on first successfully logging in via the authentication provider. This means, for example, a user with Google SSO enabled must first log in with Google and then enter their password for Infisical to complete logging into the platform.
|
||||
|
||||
This approach implies that the user’s password assumes only the role of a master decryption key or secret. It also ensures that the authentication provider does not know this top-level secret, keeping the platform zero-knowledge as intended.
|
||||
|
||||
## Account recovery
|
||||
|
||||
When a user signs up for Infisical, they are issued a backup PDF containing a symmetric key that can be used to recover their account by decrypting a copy of that user’s private key; using the backup PDF is the only way to recover a user’s account in the event of a lockout - this is intentional by design of Infisical’s zero-knowledge architecture.
|
||||
|
||||
We strongly encourage all users to download, print, and keep their backup PDFs in a secure location.
|
||||
|
||||
## Secrets
|
||||
|
||||
In Infisical, secrets belong to environments in projects, and projects belong to organizations. Each project can be thought of as a vault and has its own symmetric key, called the project key. The project key is used to encrypt the secrets contained in that project.
|
||||
|
||||
Similar to each user’s private key, the project key is sensitive and must remain unknown to the server to preserve the zero-knowledge aspect of Infisical; knowledge of the project key would allow the server to decrypt the secrets of that project which would be undesirable if the server is compromised.
|
||||
|
||||
In order to preserve the zero-knowledge aspect of Infisical, each project key is encrypted on the client side before being sent to the server. More specifically, for each project, we make copies of its project key for each member of that project; each copy is encrypted under that member’s public key and only then sent off to the server for storage. A few relevant sequences:
|
||||
|
||||
- The initial member of a project generates its project key, encrypts it under their public key, and uploads it to the server for storage.
|
||||
- When a new member is added to the project, an existing member of the project (e.g. the initial member) fetches their copy of the project key, decrypts that copy, encrypts it under the public key of the new member, and uploads it to the server for storage.
|
||||
- When a member is removed from a project, their copy of the project key is hard deleted from the storage backend.
|
||||
|
||||
When dealing with secrets, this implies a specific sequence of decryption/encryption steps to fetch and create/update them. Assuming that we’re dealing with the Infisical Web UI, let’s start with fetching secrets which happens after the user logs in and selects a project:
|
||||
|
||||
- The user fetches encrypted secrets back to the client side.
|
||||
- The user also fetches the encrypted project key, encrypted under their public key, for these secrets.
|
||||
- The encrypted project key is decrypted by the user’s private key which is kept handy on the client side.
|
||||
- The project key is finally used to decrypt the secrets belonging to the project.
|
||||
- The secrets are displayed to the user in the Infisical Web UI.
|
||||
|
||||
Similarly, when a user creates/updates a secret, the reverse sequence is performed:
|
||||
|
||||
- The user fetches the encrypted project key, encrypted under the user’s public key.
|
||||
- The project key is decrypted by the user’s private key which is kept handy on the client side.
|
||||
- The user encrypts the new/updated secret under the project key.
|
||||
- The user sends the new/updated secret to the server for storage.
|
||||
|
||||
These sequences are performed across various Infisical clients including the web UI, CLI, SDKs, and K8s operators when dealing with the Infisical API. They are also relevant in the implementations of Infisical’s versioning features like secret versions and snapshots.
|
||||
|
||||
## Native integrations
|
||||
|
||||
Previously, we mentioned that Infisical is zero-knowledge; this is partly true because Infisical can be used this way. Under certain circumstances, however, a user can explicitly share their copy of the project key with the server to enable more advanced features like native integrations.
|
||||
|
||||
The way a project key is shared with Infisical is via an abstraction that we call a bot. Each project has a bot with a public-private key pair generated on the server; the private key of each bot is symmetrically encrypted by the root encryption key of the server. This implies a few things:
|
||||
|
||||
- The server may partake in the sharing of project keys via its own public-private keys bound to each project bot.
|
||||
- The server root encryption key must be kept secure.
|
||||
|
||||
With that, let’s discuss native integrations. A native integrations is a connection between Infisical and a target platform like GitHub, GitLab, or Vercel that allows secrets to be synced from Infisical to the target platform using its API. Since native integrations require secrets to be sent over in plaintext, they require the server to have access to the secrets. The sequence for how integrations are implemented is fairly simple:
|
||||
|
||||
- A user explicitly shares copy of the project key with the server via the Infisical Web UI. In this step, the user fetches the public key of the bot assigned to that project, encrypts the project key under that public key, and sends it back to the server.
|
||||
- The user selects a target platform to integrate with their project and enters details such as the source environment within the project to send secrets from as well as the project and environment in the target platform to sync secrets to.
|
||||
- The user creates the integration, triggering the first sync wherein Infisical decrypts the project’s key, uses it to decrypt the secrets of that project, and sends the secrets to the target platform.
|
||||
- Finally, on any subsequent mutations applied to the source environment of an active integration, Infisical automatically triggers a re-sync to the target platform. This keeps Infisical as a ground source-of-truth for a team’s secrets.
|
||||
|
||||
## Resources
|
||||
|
||||
- For in depth details, consult the code.
|
||||
- To get started with Infisical, try out the [Getting Started](https://infisical.com/docs/documentation/getting-started/introduction) overview.
|
@ -6,24 +6,23 @@ description: "Read how Infisical works under the hood."
|
||||
This section covers the internals of Infisical including its technical underpinnings, architecture, and security properties.
|
||||
|
||||
<Note>
|
||||
Knowledge of this section is recommended but not required to use Infisical. However, if you're operating Infisical, we recommend understanding the internals.
|
||||
Knowledge of this section is recommended but not required to use Infisical.
|
||||
However, if you're operating Infisical, we recommend understanding the
|
||||
internals.
|
||||
</Note>
|
||||
|
||||
## Learn More
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card href="./components" title="Components" icon="boxes-stacked" color="#000000">
|
||||
Learn about the fundamental parts of Infisical.
|
||||
</Card>
|
||||
<Card href="./flows" title="Flows" icon="bars-staggered" color="#000000">
|
||||
Find out more about the structure of core user flows in Infisical.
|
||||
</Card>
|
||||
<Card
|
||||
href="./security"
|
||||
title="Security"
|
||||
icon="shield"
|
||||
href="./components"
|
||||
title="Components"
|
||||
icon="boxes-stacked"
|
||||
color="#000000"
|
||||
>
|
||||
Learn about the fundamental parts of Infisical.
|
||||
</Card>
|
||||
<Card href="./security" title="Security" icon="shield" color="#000000">
|
||||
Read about most common security-related topics and questions.
|
||||
</Card>
|
||||
<Card
|
||||
@ -32,6 +31,8 @@ This section covers the internals of Infisical including its technical underpinn
|
||||
icon="ticket"
|
||||
color="#000000"
|
||||
>
|
||||
Learn best practices for utilizing Infisical service tokens. Please note that service tokens are now deprecated and will be removed entirely in the future.
|
||||
Learn best practices for utilizing Infisical service tokens. Please note
|
||||
that service tokens are now deprecated and will be removed entirely in the
|
||||
future.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
@ -3,25 +3,25 @@ title: "Security"
|
||||
description: "Infisical's security model includes many considerations and initiatives."
|
||||
---
|
||||
|
||||
Given that Infisical is a secret management platform that manages sensitive data, the Infisical security model is very important.
|
||||
The goal of Infisical's security model is to ensure the security and integrity of all of its managed data as well as all associated operations.
|
||||
As a security infrastructure platform dealing with highly-sensitive data, Infisical follows a robust security model with the goal of ensuring the security and integrity of all its managed data and associated components.
|
||||
|
||||
This means that data at rest and in transit must be secure from eavesdropping or tampering. All clients must be authenticated and authorized to access data. Additionally, all interactions must be auditable and traced uniquely back to their source.
|
||||
As part of the security model, data at rest and in transit must be secure from eavesdropping or tampering, clients must be authenticated and authorized to access data, and all operations in the platform are audited and can be traced back to their source.
|
||||
|
||||
This page documents security measures used by [Infisical](https://github.com/Infisical/infisical), the software, and [Infisical Cloud](https://infisical.com/), a separate managed service offering for the software.
|
||||
|
||||
## Threat model
|
||||
|
||||
Infisical’s threat model spans communication, storage, response mechanisms, failover strategies, and more.
|
||||
Infisical’s (the software) threat model spans communication, storage, response mechanisms, and more.
|
||||
|
||||
- Eavesdropping on communications: Infisical ensures end-to-end encryption for all client interactions with the Infisical API.
|
||||
- Eavesdropping on communications: Infisical secures client communication with the server and from the server to the storage backend.
|
||||
- Tampering with data (at rest or in transit): Infisical implements data integrity checks to detect tampering. If inconsistencies are found, Infisical aborts transactions and raises alerts.
|
||||
- Unauthorized access (lacking authentication/authorization): Infisical mandates rigorous authentication and authorization checks for all inbound requests; it also offers multi-factor authentication and role-based access controls.
|
||||
- Actions without accountability: Infisical logs all project-level events, including policy updates, queries/mutations applied to secrets, and more. Every event is timestamped and information about actor, source (i.e. IP address, user-agent, etc.), and relevant metadata is included.
|
||||
- Breach of data storage confidentiality: Infisical encrypts all stored secrets using proven cryptographic techniques such as AES-256-GCM for symmetric encryption.
|
||||
- Loss of service availability or secret data due to failures: Infisical leverages the robust container orchestration capabilities of Kubernetes and the inherent high availability features of Bitnami MongoDB to ensure resilience and fault tolerance. By deploying multiple replicas of Infisical application on Kubernetes, operations can continue even if a single instance fails.
|
||||
- Unauthorized access (lacking authentication/authorization): Infisical mandates rigorous authentication and authorization checks for all inbound requests; it also offers multi-factor authentication and role/attribute-based access controls.
|
||||
- Actions without accountability: Infisical logs events, including policy updates, queries/mutations applied to secrets, certificates, and more. Every event is timestamped and information about actor, source (i.e. IP address, user-agent, etc.), and relevant metadata is included.
|
||||
- Breach of data storage confidentiality: Infisical encrypts all stored secrets using proven cryptographic techniques for symmetric encryption.
|
||||
- Unrecognized suspicious activities: Infisical monitors for any anomalous activities such as authentication attempts from previously unseen sources.
|
||||
- Unidentified system vulnerabilities: Infisical undergoes penetration tests and vulnerability assessments twice a year; we act on findings to bolster the system's defense mechanisms.
|
||||
- Unidentified system vulnerabilities: Infisical undergoes penetration tests and vulnerability assessments twice a year; we act on findings to bolster the system’s defense mechanisms.
|
||||
|
||||
That said, Infisical does not consider the following as part of its threat model:
|
||||
Infisical (the software) does not consider the following as part of its threat model:
|
||||
|
||||
- Uncontrolled access to the storage mechanism: An attacker with unfettered access to the storage system can manipulate data in unpredictable ways, including erasing or tampering with stored secrets. Furthermore, the attacker could potentially implement state rollbacks to favor their objectives.
|
||||
- Disclosure of secret presence: If an adversary gains read access to the storage backend, they might discern the existence of certain secrets, even if the actual contents remain encrypted and concealed.
|
||||
@ -30,137 +30,74 @@ That said, Infisical does not consider the following as part of its threat model
|
||||
- Breaches via compromised clients: If a system or application accessing Infisical is compromised, and its credentials to the platform are exposed, an attacker might gain access at the privilege level of that compromised entity.
|
||||
- Configuration tampering by administrators: Any configuration data, whether supplied through admin interfaces or configuration files, needs scrutiny. If an attacker can manipulate these configurations, it poses risks to data confidentiality and integrity.
|
||||
- Physical access to deployment infrastructure: An attacker with physical access to the servers or infrastructure where Infisical is deployed can potentially compromise the system in ways that are challenging to guard against, such as direct hardware tampering or booting from malicious media.
|
||||
- Social engineering attacks on personnel: Attacks that target personnel, tricking them into divulging sensitive information or performing compromising actions, fall outside the platform's direct defensive purview.
|
||||
- Social engineering attacks on personnel: Attacks that target personnel, tricking them into divulging sensitive information or performing compromising actions, fall outside the platform’s direct defensive purview.
|
||||
|
||||
It's essential to note that while these points fall outside the platform's direct threat model, they still form crucial considerations for an overarching security strategy.
|
||||
Note that while these points fall outside the Infisical’s threat model, they remain considerations in the broader platform architecture.
|
||||
|
||||
## External threat overview
|
||||
|
||||
Infisical's architecture consists of various systems:
|
||||
Infisical’s architecture consists of various systems which together we refer to as the Infisical platform:
|
||||
|
||||
- Infisical API
|
||||
- Storage backend
|
||||
- Redis
|
||||
- Infisical Web UI
|
||||
- Infisical clients
|
||||
- Server: The Infisical API that serves requests.
|
||||
- Clients: The Web UI and other applications that send requests to the server.
|
||||
- Storage backend: PostgreSQL used by the server to persist data.
|
||||
- Redis: Used by Infisical for caching, queueing and cron job scheduling.
|
||||
|
||||
The Infisical API requires that the Infisical Web UI and all Infisical clients are authenticated and authorized for every inbound request. If using [Infisical Cloud](https://app.infisical.com), all traffic is routed through [Cloudflare](https://www.cloudflare.com) which enforces TLS and requires a minimum of TLS 1.2.
|
||||
The server requires clients to be authenticated and authorized for every inbound request. If using [Infisical Cloud](https://infisical.com/), all traffic is routed through [Cloudflare](https://www.cloudflare.com/) which enforces TLS and requires a minimum of TLS 1.2.
|
||||
|
||||
The Infisical API is untrusted by design when dealing with secrets. All secrets are encrypted/decrypted on the client-side before reaching the Infisical API by default; granting Infisical access to secrets afterward is optional and up to your organization.
|
||||
The server mandates that each request includes a valid token (issued for a user or machine identity) used to identify the client before performing any actions on the platform. Clients without a valid token can only access login endpoints with the exception of a few intentionally unauthenticated endpoints. For tokens issued for machine identities, Infisical provides significant configuration, including support for native authentication methods (e.g. [AWS](https://infisical.com/docs/documentation/platform/identities/aws-auth), [Azure](https://infisical.com/docs/documentation/platform/identities/azure-auth), [Kubernetes](https://infisical.com/docs/documentation/platform/identities/kubernetes-auth), etc.); custom TTLs to restrict token lifespan; IP restrictions to enforce network-based access controls; and usage caps to limit the maximum number of times that a token can be used.
|
||||
|
||||
The storage backend used by Infisical is also untrusted by design. All sensitive data is encrypted either symmetrically with AES-256-GCM or asymmetrically with x25519-xsalsa20-poly1305 prior to entering the storage backend, depending on the context either on the client-side or server-side. Moreover, Infisical communicates with the storage backend over TLS to provide an added layer of security.
|
||||
When accessing Infisical via web browser, JWT tokens are stored in browser memory and appended to outbound requests requiring authentication; refresh tokens are stored in HttpOnly cookies and included in requests as part of token renewal. Note also that Infisical utilizes the latest HTTP security headers and employs a strict Content-Security-Policy to mitigate XSS.
|
||||
|
||||
To mitigate abuse and enhance system stability, the server enforces configurable rate limiting on read, write, and secrets operations. This prevents excessive API requests from impacting system performance while ensuring fair usage across clients.
|
||||
|
||||
Once traffic enters the server, any sensitive data (e.g. secrets, certificates entering the server), where applicable, is encrypted using a 256-bit [Advanced Encryption Standard (AES)](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard) cipher in the [Galois Counter Mode (GCM)](https://en.wikipedia.org/wiki/Galois/Counter_Mode) with 96-bit nonces prior to being persisted in the storage backend. Encryption is an integral part of Infisical’s platform-wide cryptographic architecture, which also supports seal-wrapping with external KMS and HSMs. Before responding to a client request, the server securely retrieves and decrypts requested data from the storage backend. Each decryption operation includes integrity verification to ensure data has not been altered or tampered with.
|
||||
|
||||
## Internal threat overview
|
||||
|
||||
Within Infisical, a critical security concern is an attacker gaining access to sensitive data that they are not permitted to, especially if they already has some degree of access to the system. There are currently two authentication methods categories used by clients for where we apply robust authentication and authorization logic.
|
||||
Within Infisical, an internal threat and critical security concern is an attacker gaining access to sensitive data that they are not permitted to, especially if they are able to authenticate with some degree of access to the system.
|
||||
|
||||
### JWT / API Key
|
||||
Before a client can perform any actions on the platform, it must authenticate with the server using a supported authentication method such as username-password, SAML, SSO, LDAP, AWS/GCP/Azure, OIDC, or Kubernetes authentication. A successful authentication results in the issuance of a client (JWT) token containing a reference to the user or machine identity bound to it.
|
||||
|
||||
This token category is used by users and included in requests made from the Infisical Web UI or elsewhere to the Infisical API.
|
||||
When a client uses the token to make authenticated requests against the server, Infisical validates the token and maps the bound-identity to access control policies that exist at the organization and project level, both types of namespaces within the platform. The access control policies are configured by operators of Infisical ahead of time and may involve role-based, attribute-based, and one-off “additional privilege” resource constraints. Given the robustness of the access control system, we recommend reading the full documentation for it.
|
||||
|
||||
Each token is authenticated against the API and mapped to an existing user in Infisical. If no existing user is found for the token, the request is rejected by the API. Each token assumes the permission set of the user that it is mapped to. For example, if a user corresponding to a token is not allowed access to a certain organization or project, then the token is also not be valid for any requests concerning those specific resources.
|
||||
For example, an operator of Infisical may define the following constraints to restrict client access to particular resources:
|
||||
|
||||
In the event of compromise, an attacker could use the token to impersonate the associated user and perform actions within the permission set of that user. While they could retrieve secrets for a project that the user is part of, they could not, however, decrypt secrets if the project follows Infisical's default zero-knowledge architecture. In any case, it would be critical for the user to invalidate this token and change their password immediately to prevent further unintended actions and consequences.
|
||||
|
||||
### Service token
|
||||
|
||||
This token category is provisioned by users for applications and infrastructure to perform secret operations against the Infisical API.
|
||||
|
||||
Each token is scoped to a project in Infisical and configurable with an expiration date and permission set (also known as **scopes**) for specific environment(s) and path(s) within them. For example, you may provision an application a service token to authenticate against the Infisical API and retrieve secrets from some `/environment-variables` path in the production environment of a project. If the token is tried for another project, environment, or path outside of its permission set, then it is rejected by the API.
|
||||
|
||||
It should also be noted that projects in Infisical can be configured to restrict service token access to specific IP addresses or CIDR ranges; this can be useful for limiting access to traffic coming from corporate networks.
|
||||
|
||||
In the event of compromise, an attacker could use a service token to access the secrets that it is provisioned for. It would be critical here for project administrator(s) to revoke the token immediately to prevent further unintended access to resources; it would also be advisable currently to transfer secrets to a new project where a new project key is created on the client-side.
|
||||
- Read and write access to a secret resource via an additional privilege attached to the bound-identity.
|
||||
- Read-only access to a secret resource via one or multiple roles attached to the bound-identity.
|
||||
- Read-only access to a secret resource via a group membership for which the associated bound-identity is part of; the group itself is assigned one or multiple roles with access to the secret resource.
|
||||
|
||||
## Cryptography
|
||||
|
||||
Infisical uses AES-256-GCM for symmetric encryption and x25519-xsalsa20-poly1305 for asymmetric encryption operations; asymmetric algorithms are implemented with the [TweetNaCl.js](https://tweetnacl.js.org/#/) library which has been well-audited and recommended for use by cybersecurity firm Cure53. Lastly, the secure remote password (SRP) implementation uses [jsrp](https://github.com/alax/jsrp) package for user authentication.
|
||||
All symmetric encryption operations, with the exception of those proxied through external KMS and HSM systems, in Infisical use a software-backed, 256-bit Advanced Encryption Standard (AES) cipher in the Galois Counter Mode (GCM) with 96-bit nonces — AES-256-GCM.
|
||||
|
||||
By default, Infisical employs a zero-knowledge-first approach to securely storing and sharing secrets.
|
||||
Infisical employs a multilayer approach to its encryption architecture with components that can be optionally linked to external KMS or HSM systems. At a high-level, a master key, backed by an operator-provided key, is used to encrypt (internal) “KMS” keys that are used to then encrypt data keys; the data keys are used to protect sensitive data stored in Infisical. The keys in the architecture are stored encrypted in the storage backend, retrieved, decrypted, and only then used as part of server operations when needed. Since server configuration is needed to decrypt any keys as part of the encryption architecture, accessing any sensitive data in Infisical requires access to both server configuration and data in the storage backend. Note that the platform’s encryption architecture has components that can be linked to external KMS and HSM systems; opting for these make the use of the software more FIPS aligned.
|
||||
|
||||
- Each secret belongs to a project and is symmetrically encrypted by that project's unique key. Each member of a project is shared a copy of the project key, encrypted under their public key, when they are first invited to join the project.
|
||||
Since these encryption operations occur on the client-side, the Infisical API is not able to view the value of any secret and the default zero-knowledge property of Infisical is retained; as you'd expect, it follows that decryption operations also occur on the client-side.
|
||||
- An exception to the zero-knowledge property occurs when a member of a project explicitly shares that project's unique key with Infisical. It is often necessary to share the project key with Infisical in order to use features like native integrations and secret rotation that wouldn't be possible to offer otherwise.
|
||||
To be specific:
|
||||
|
||||
## Infrastructure
|
||||
- The architecture starts with a 256-bit master key that can be secured by a root key which can either be a 128-bit key, passed into the server by an operator of Infisical as an environment variable, or an external key from an HSM module such as [Thales Luna HSM](https://cpl.thalesgroup.com/encryption/data-protection-on-demand/services/luna-cloud-hsm) or [AWS Cloud HSM](https://aws.amazon.com/cloudhsm/) linked via specified configuration parameters.
|
||||
- The master key secures 256-bit keys in Infisical henceforth referred to as KMS keys.
|
||||
- Each organization in Infisical has its own KMS key and a separate data key; the KMS key is used to secure the data key which encrypts organization-level data.
|
||||
- Each project in Infisical has a designated KMS key and a separate data key; the KMS key is used to secure the data key which encrypts project-level data. Note that a project KMS key can be substituted for an external key from a KMS such as [AWS KMS](https://infisical.com/docs/documentation/platform/kms-configuration/aws-kms), [AWS Cloud HSM](https://infisical.com/docs/documentation/platform/kms-configuration/aws-hsm), and [GCP KMS](https://infisical.com/docs/documentation/platform/kms-configuration/gcp-kms). We recommend reading the fuller [documentation](https://infisical.com/docs/documentation/platform/kms-configuration/overview) or integrating with an external KMS
|
||||
|
||||
### High availability
|
||||
## Infrastructure & High availability (Infisical Cloud)
|
||||
|
||||
Infisical Cloud uses a number of strategies to keep services running smoothly and ensure data stays available, even during failures; we document these strategies below:
|
||||
|
||||
- Multi-AZ AWS RDS: Infisical Cloud runs AWS Relational Database Service (RDS) with Multi-AZ deployments to improve availability and durability. This setup keeps a standby replica in a different Availability Zone (AZ) and automatically fails over if the primary instance goes down. Continuous backups and replication help protect data and minimize interruptions.
|
||||
- Multi-AZ ElastiCache (Redis): For caching, Infisical Cloud runs Amazon ElastiCache (Redis) in a Multi-AZ setup. This means data is replicated across different AZs, so if one goes down, the system can automatically fail over to a healthy node. This helps keep response times low and ensures caching stays reliable.
|
||||
- Multi-AZ ECS for Container Orchestration: Infisical Cloud runs on Amazon Elastic Container Service (ECS) across multiple availability zones, making sure containers stay available even if an AZ fails. If one zone has an issue, traffic automatically shifts to healthy instances in other zones, keeping downtime to a minimum.
|
||||
|
||||
Infisical Cloud utilizes several strategies to ensure high availability, leveraging AWS services to maintain continuous operation and data integrity.
|
||||
|
||||
#### Multi-AZ AWS RDS
|
||||
## Cross-Region Replication for Disaster Recovery (Infisical Cloud)
|
||||
|
||||
Infisical Cloud uses AWS Relational Database Service (RDS) with Multi-AZ deployments.
|
||||
This configuration ensures that the database service is highly available and durable.
|
||||
AWS RDS automatically provisions and maintains a synchronous standby replica of the database in a different Availability Zone (AZ).
|
||||
This setup facilitates immediate failover to the standby in the event of an AZ failure, thereby ensuring that database operations can continue with minimal interruption.
|
||||
The continuous backup and replication to the standby instance safeguard data against loss and ensure its availability even during system failures.
|
||||
To handle regional failures, Infisical Cloud keeps standby regions updated and ready to take over when needed.
|
||||
|
||||
#### Multi-AZ ECS for Container Orchestration
|
||||
- ElastiCache (Redis): Data is replicated across regions using AWS Global Datastore, keeping cached data consistent and available even if a primary region goes down.
|
||||
- RDS (PostgreSQL): Cross-region read replicas ensure database data is available in multiple locations, allowing for failover in case of a regional outage.
|
||||
|
||||
Infisical Cloud leverages Amazon Elastic Container Service (ECS) in a Multi-AZ configuration for container orchestration.
|
||||
This arrangement enables the management and operation of containers across multiple availability zones, increasing the application's fault tolerance.
|
||||
Should there be an AZ failure, load is seamlessly sent to an operational AZ, thus minimizing downtime and preserving service availability.
|
||||
|
||||
#### Standby Regions for Regional Failover
|
||||
|
||||
To fight regional outages, secondary regions are always in standby mode and maintained with up-to-date configurations and data, ready to take over in case the primary region fails.
|
||||
The standby regions enable a rapid transition and service continuity with minimal disruption in the event of a complete regional failure, ensuring that Infisical Cloud services remain accessible.
|
||||
|
||||
### Snapshots
|
||||
|
||||
A snapshot is a complete copy of data in the storage backend at a point in time.
|
||||
|
||||
If using [Infisical Cloud](https://app.infisical.com), snapshots of MongoDB databases are taken regularly; this can be enabled on your own storage backend as well.
|
||||
|
||||
### Offline usage
|
||||
|
||||
Many teams and organizations use the [Infisical CLI](https://infisical.com/docs/cli/overview) to fetch and inject secrets back from Infisical into their applications and infrastructure locally; the CLI has offline fallback capabilities.
|
||||
|
||||
If you have previously retrieved secrets for a specific project and environment, the `run/secret` command will utilize the saved secrets, even when offline, on subsequent fetch attempts to ensure that you always have access to secrets.
|
||||
|
||||
## Platform
|
||||
|
||||
### Web application
|
||||
|
||||
Infisical utilizes the latest HTTP security headers and employs a strict Content-Security-Policy to mitigate XSS.
|
||||
|
||||
JWT tokens are stored in browser memory and appended to outbound requests requiring authentication; refresh tokens are stored in `HttpOnly` cookies and included in future requests to `/api/token` for JWT token renewal.
|
||||
|
||||
### User authentication
|
||||
|
||||
Infisical supports several authentication methods including email/password, Google SSO, GitHub SSO, SAML 2.0 (Okta, Azure, JumpCloud), and OpenID Connect; Infisical also currently offers email-based 2FA with authenticator app methods coming in Q1 2024.
|
||||
|
||||
Infisical uses the [secure remote password protocol](https://en.wikipedia.org/wiki/Secure_Remote_Password_protocol#:~:text=The%20SRP%20protocol%20has%20a,the%20user%20to%20the%20server), commonly found in other zero-knowledge platform architectures, for authentication.
|
||||
Put simply, the protocol enables Infisical to validate a user's knowledge of their password without ever seeing it by constructing a mutual secret; we use this protocol because each user's password is used to seed the generation of a master encryption/decryption key via KDF for that user which the platform
|
||||
should not see.
|
||||
|
||||
Lastly, Infisical enforces strong password requirements according to the guidance set forth in [NIST Special Publication 800–63B](https://pages.nist.gov/800-63-3/sp800-63b.html#appA). Since passwords in Infisical also has cryptographic implications, Infisical validates each password on client-side to meet minimum length and entropy requirements; Infisical also considers each password against the [Have I Been Pwned (HIBP) API](https://haveibeenpwned.com), which checks the password against around 700M breached passwords, in a privacy-preserving way.
|
||||
|
||||
<Note>
|
||||
Since Infisical's unique zero-knowledge architecture requires a master decryption key for every user account, users with Google SSO, GitHub SSO, or SAML 2.0 enabled must still enter a secret after the
|
||||
authentication step to access their secrets in Infisical. In practice, this implies stronger security since users must successfully authenticate with a single sign-on provider and provide a master decryption key
|
||||
to access the platform.
|
||||
|
||||
We strongly encourage users to generate and store their passwords / master decryption key in a password manager, such as 1Password, Bitwarden, or Dashlane.
|
||||
|
||||
</Note>
|
||||
|
||||
## Role-based access control (RBAC)
|
||||
|
||||
[Infisical's RBAC](https://infisical.com/docs/documentation/platform/role-based-access-controls) feature enables organization owners and administrators to manage fine-grained access policies for members of their organization in Infisical; with RBAC, administrators can define custom roles with permission sets to be conveniently assigned to other members.
|
||||
|
||||
For example, you can define a role provisioning access to secrets in a specific project and environment in it with read-only permissions; the role can be assigned to members of an organization in Infisical.
|
||||
|
||||
### Audit logging
|
||||
|
||||
Infisical's audit logging feature spans 25+ events, tracking everything from permission changes to queries and mutations applied to secrets, for security and compliance teams at enterprises to monitor information access in the event of any suspicious activity or incident review. Every event is timestamped and information about actor, source (i.e. IP address, user-agent, etc.), and relevant metadata is included.
|
||||
|
||||
### IP allowlisting
|
||||
|
||||
Infisical's IP allowlisting feature can be configured to restrict client access to specific IP addresses or CIDR ranges. This applies to any client using service tokens and can be useful, for example, for limiting access to traffic coming from corporate networks.
|
||||
|
||||
By default, each project is initialized with the `0.0.0.0/0` entry, representing all possible IPv4 addresses. For enhanced security, we strongly recommend replacing the default entry with your client IPs to tighten access to your secrets.
|
||||
With standby regions and automated failovers in place, Infisical Cloud faces minimal service disruptions even during large-scale outages.
|
||||
|
||||
## Penetration testing
|
||||
|
||||
@ -179,7 +116,7 @@ Whether or not Infisical or your employees can access data in the Infisical inst
|
||||
|
||||
It should be noted that, even on Infisical Cloud, it is physically impossible for employees of Infisical to view the values of secrets if users have not explicitly granted Infisical access to their project (i.e. opted out of zero-knowledge).
|
||||
|
||||
Please email security@infisical.com if you have any specific inquiries about employee data access policies.
|
||||
Please email security@infisical.com if you have any specific inquiries about employee data and security policies.
|
||||
|
||||
## Get in touch
|
||||
|
||||
|
@ -1039,7 +1039,6 @@
|
||||
"internals/overview",
|
||||
"internals/permissions",
|
||||
"internals/components",
|
||||
"internals/flows",
|
||||
"internals/security",
|
||||
"internals/service-tokens"
|
||||
]
|
||||
|
@ -13,9 +13,9 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: v0.8.9
|
||||
version: v0.8.11
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "v0.8.9"
|
||||
appVersion: "v0.8.11"
|
||||
|
@ -32,7 +32,7 @@ controllerManager:
|
||||
- ALL
|
||||
image:
|
||||
repository: infisical/kubernetes-operator
|
||||
tag: v0.8.8
|
||||
tag: v0.8.11
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
|
8
helm-charts/upload-infisical-core-helm-cloudsmith.sh
Executable file
8
helm-charts/upload-infisical-core-helm-cloudsmith.sh
Executable file
@ -0,0 +1,8 @@
|
||||
cd "infisical-standalone-postgres"
|
||||
helm dependency update
|
||||
helm package .
|
||||
for i in *.tgz; do
|
||||
[ -f "$i" ] || break
|
||||
cloudsmith push helm --republish infisical/helm-charts "$i"
|
||||
done
|
||||
cd ..
|
8
helm-charts/upload-k8s-operator-cloudsmith.sh
Executable file
8
helm-charts/upload-k8s-operator-cloudsmith.sh
Executable file
@ -0,0 +1,8 @@
|
||||
cd secrets-operator
|
||||
helm dependency update
|
||||
helm package .
|
||||
for i in *.tgz; do
|
||||
[ -f "$i" ] || break
|
||||
cloudsmith push helm --republish infisical/helm-charts "$i"
|
||||
done
|
||||
cd ..
|
@ -104,7 +104,7 @@ spec:
|
||||
includeAllSecrets: true
|
||||
data:
|
||||
SSH_KEY: "{{ .KEY.SecretPath }} {{ .KEY.Value }}"
|
||||
BINARY_KEY: "{{ toBase64DecodedString .BINARY_KEY_BASE64.Value }}"
|
||||
BINARY_KEY: "{{ decodeBase64ToBytes .BINARY_KEY_BASE64.Value }}"
|
||||
creationPolicy: "Orphan" ## Owner | Orphan
|
||||
# secretType: kubernetes.io/dockerconfigjson
|
||||
|
||||
|
@ -156,12 +156,12 @@ func (r *InfisicalSecretReconciler) getInfisicalServiceAccountCredentialsFromKub
|
||||
}
|
||||
|
||||
var infisicalSecretTemplateFunctions = template.FuncMap{
|
||||
"decodeBase64ToBytes": func(encodedString string) []byte {
|
||||
"decodeBase64ToBytes": func(encodedString string) string {
|
||||
decoded, err := base64.StdEncoding.DecodeString(encodedString)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error: %v", err))
|
||||
}
|
||||
return decoded
|
||||
return string(decoded)
|
||||
},
|
||||
}
|
||||
|
||||
@ -222,7 +222,6 @@ func (r *InfisicalSecretReconciler) createInfisicalManagedKubeSecret(ctx context
|
||||
}
|
||||
|
||||
annotations[constants.SECRET_VERSION_ANNOTATION] = ETag
|
||||
|
||||
// create a new secret as specified by the managed secret spec of CRD
|
||||
newKubeSecretInstance := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
Reference in New Issue
Block a user