mirror of
https://github.com/Infisical/infisical.git
synced 2025-04-06 22:14:48 +00:00
Compare commits
25 Commits
maidul-adi
...
oidc-guide
Author | SHA1 | Date | |
---|---|---|---|
8e70731c4c | |||
21c6700db2 | |||
619062033b | |||
36973b1b5c | |||
1ca578ee03 | |||
8a7f7ac9fd | |||
049fd8e769 | |||
2c825616a6 | |||
febbd4ade5 | |||
874dc01692 | |||
b44b8bf647 | |||
258e561b84 | |||
5802638fc4 | |||
e2e7004583 | |||
7826324435 | |||
2bfc1caec5 | |||
4b9e3e44e2 | |||
b2a680ebd7 | |||
b269bb81fe | |||
5ca7ff4f2d | |||
ec12d57862 | |||
2d16f5f258 | |||
93912da528 | |||
ffc5e61faa | |||
70e68f4441 |
@ -535,6 +535,107 @@ describe.each([{ auth: AuthMode.JWT }, { auth: AuthMode.IDENTITY_ACCESS_TOKEN }]
|
||||
);
|
||||
});
|
||||
|
||||
test.each(secretTestCases)("Bulk upsert secrets in path $path", async ({ secret, path }) => {
|
||||
const updateSharedSecRes = await testServer.inject({
|
||||
method: "PATCH",
|
||||
url: `/api/v3/secrets/batch/raw`,
|
||||
headers: {
|
||||
authorization: `Bearer ${authToken}`
|
||||
},
|
||||
body: {
|
||||
workspaceId: seedData1.projectV3.id,
|
||||
environment: seedData1.environment.slug,
|
||||
secretPath: path,
|
||||
mode: "upsert",
|
||||
secrets: Array.from(Array(5)).map((_e, i) => ({
|
||||
secretKey: `BULK-${secret.key}-${i + 1}`,
|
||||
secretValue: "update-value",
|
||||
secretComment: secret.comment
|
||||
}))
|
||||
}
|
||||
});
|
||||
expect(updateSharedSecRes.statusCode).toBe(200);
|
||||
const updateSharedSecPayload = JSON.parse(updateSharedSecRes.payload);
|
||||
expect(updateSharedSecPayload).toHaveProperty("secrets");
|
||||
|
||||
// bulk ones should exist
|
||||
const secrets = await getSecrets(seedData1.environment.slug, path);
|
||||
expect(secrets).toEqual(
|
||||
expect.arrayContaining(
|
||||
Array.from(Array(5)).map((_e, i) =>
|
||||
expect.objectContaining({
|
||||
secretKey: `BULK-${secret.key}-${i + 1}`,
|
||||
secretValue: "update-value",
|
||||
type: SecretType.Shared
|
||||
})
|
||||
)
|
||||
)
|
||||
);
|
||||
await Promise.all(
|
||||
Array.from(Array(5)).map((_e, i) => deleteSecret({ path, key: `BULK-${secret.key}-${i + 1}` }))
|
||||
);
|
||||
});
|
||||
|
||||
test("Bulk upsert secrets in path multiple paths", async () => {
|
||||
const firstBatchSecrets = Array.from(Array(5)).map((_e, i) => ({
|
||||
secretKey: `BULK-KEY-${secretTestCases[0].secret.key}-${i + 1}`,
|
||||
secretValue: "update-value",
|
||||
secretComment: "comment",
|
||||
secretPath: secretTestCases[0].path
|
||||
}));
|
||||
const secondBatchSecrets = Array.from(Array(5)).map((_e, i) => ({
|
||||
secretKey: `BULK-KEY-${secretTestCases[1].secret.key}-${i + 1}`,
|
||||
secretValue: "update-value",
|
||||
secretComment: "comment",
|
||||
secretPath: secretTestCases[1].path
|
||||
}));
|
||||
const testSecrets = [...firstBatchSecrets, ...secondBatchSecrets];
|
||||
|
||||
const updateSharedSecRes = await testServer.inject({
|
||||
method: "PATCH",
|
||||
url: `/api/v3/secrets/batch/raw`,
|
||||
headers: {
|
||||
authorization: `Bearer ${authToken}`
|
||||
},
|
||||
body: {
|
||||
workspaceId: seedData1.projectV3.id,
|
||||
environment: seedData1.environment.slug,
|
||||
mode: "upsert",
|
||||
secrets: testSecrets
|
||||
}
|
||||
});
|
||||
expect(updateSharedSecRes.statusCode).toBe(200);
|
||||
const updateSharedSecPayload = JSON.parse(updateSharedSecRes.payload);
|
||||
expect(updateSharedSecPayload).toHaveProperty("secrets");
|
||||
|
||||
// bulk ones should exist
|
||||
const firstBatchSecretsOnInfisical = await getSecrets(seedData1.environment.slug, secretTestCases[0].path);
|
||||
expect(firstBatchSecretsOnInfisical).toEqual(
|
||||
expect.arrayContaining(
|
||||
firstBatchSecrets.map((el) =>
|
||||
expect.objectContaining({
|
||||
secretKey: el.secretKey,
|
||||
secretValue: "update-value",
|
||||
type: SecretType.Shared
|
||||
})
|
||||
)
|
||||
)
|
||||
);
|
||||
const secondBatchSecretsOnInfisical = await getSecrets(seedData1.environment.slug, secretTestCases[1].path);
|
||||
expect(secondBatchSecretsOnInfisical).toEqual(
|
||||
expect.arrayContaining(
|
||||
secondBatchSecrets.map((el) =>
|
||||
expect.objectContaining({
|
||||
secretKey: el.secretKey,
|
||||
secretValue: "update-value",
|
||||
type: SecretType.Shared
|
||||
})
|
||||
)
|
||||
)
|
||||
);
|
||||
await Promise.all(testSecrets.map((el) => deleteSecret({ path: el.secretPath, key: el.secretKey })));
|
||||
});
|
||||
|
||||
test.each(secretTestCases)("Bulk delete secrets in path $path", async ({ secret, path }) => {
|
||||
await Promise.all(
|
||||
Array.from(Array(5)).map((_e, i) => createSecret({ ...secret, key: `BULK-${secret.key}-${i + 1}`, path }))
|
||||
|
@ -352,6 +352,7 @@ interface CreateSecretBatchEvent {
|
||||
secrets: Array<{
|
||||
secretId: string;
|
||||
secretKey: string;
|
||||
secretPath?: string;
|
||||
secretVersion: number;
|
||||
secretMetadata?: TSecretMetadata;
|
||||
}>;
|
||||
@ -374,8 +375,14 @@ interface UpdateSecretBatchEvent {
|
||||
type: EventType.UPDATE_SECRETS;
|
||||
metadata: {
|
||||
environment: string;
|
||||
secretPath: string;
|
||||
secrets: Array<{ secretId: string; secretKey: string; secretVersion: number; secretMetadata?: TSecretMetadata }>;
|
||||
secretPath?: string;
|
||||
secrets: Array<{
|
||||
secretId: string;
|
||||
secretKey: string;
|
||||
secretVersion: number;
|
||||
secretMetadata?: TSecretMetadata;
|
||||
secretPath?: string;
|
||||
}>;
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -721,7 +721,8 @@ export const RAW_SECRETS = {
|
||||
secretName: "The name of the secret to update.",
|
||||
secretComment: "Update comment to the secret.",
|
||||
environment: "The slug of the environment where the secret is located.",
|
||||
secretPath: "The path of the secret to update.",
|
||||
mode: "Defines how the system should handle missing secrets during an update.",
|
||||
secretPath: "The default path for secrets to update or upsert, if not provided in the secret details.",
|
||||
secretValue: "The new value of the secret.",
|
||||
skipMultilineEncoding: "Skip multiline encoding for the secret value.",
|
||||
type: "The type of the secret to update.",
|
||||
|
@ -20,6 +20,7 @@ import { ActorType, AuthMode } from "@app/services/auth/auth-type";
|
||||
import { ProjectFilterType } from "@app/services/project/project-types";
|
||||
import { ResourceMetadataSchema } from "@app/services/resource-metadata/resource-metadata-schema";
|
||||
import { SecretOperations, SecretProtectionType } from "@app/services/secret/secret-types";
|
||||
import { SecretUpdateMode } from "@app/services/secret-v2-bridge/secret-v2-bridge-types";
|
||||
import { PostHogEventTypes } from "@app/services/telemetry/telemetry-types";
|
||||
|
||||
import { secretRawSchema } from "../sanitizedSchemas";
|
||||
@ -2030,6 +2031,11 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
|
||||
.default("/")
|
||||
.transform(removeTrailingSlash)
|
||||
.describe(RAW_SECRETS.UPDATE.secretPath),
|
||||
mode: z
|
||||
.nativeEnum(SecretUpdateMode)
|
||||
.optional()
|
||||
.default(SecretUpdateMode.FailOnNotFound)
|
||||
.describe(RAW_SECRETS.UPDATE.mode),
|
||||
secrets: z
|
||||
.object({
|
||||
secretKey: SecretNameSchema.describe(RAW_SECRETS.UPDATE.secretName),
|
||||
@ -2037,6 +2043,12 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
|
||||
.string()
|
||||
.transform((val) => (val.at(-1) === "\n" ? `${val.trim()}\n` : val.trim()))
|
||||
.describe(RAW_SECRETS.UPDATE.secretValue),
|
||||
secretPath: z
|
||||
.string()
|
||||
.trim()
|
||||
.transform(removeTrailingSlash)
|
||||
.optional()
|
||||
.describe(RAW_SECRETS.UPDATE.secretPath),
|
||||
secretComment: z.string().trim().optional().describe(RAW_SECRETS.UPDATE.secretComment),
|
||||
skipMultilineEncoding: z.boolean().optional().describe(RAW_SECRETS.UPDATE.skipMultilineEncoding),
|
||||
newSecretName: SecretNameSchema.optional().describe(RAW_SECRETS.UPDATE.newSecretName),
|
||||
@ -2073,7 +2085,8 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
|
||||
environment,
|
||||
projectSlug,
|
||||
projectId: req.body.workspaceId,
|
||||
secrets: inputSecrets
|
||||
secrets: inputSecrets,
|
||||
mode: req.body.mode
|
||||
});
|
||||
if (secretOperation.type === SecretProtectionType.Approval) {
|
||||
return { approval: secretOperation.approval };
|
||||
@ -2092,15 +2105,39 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
|
||||
metadata: {
|
||||
environment: req.body.environment,
|
||||
secretPath: req.body.secretPath,
|
||||
secrets: secrets.map((secret) => ({
|
||||
secretId: secret.id,
|
||||
secretKey: secret.secretKey,
|
||||
secretVersion: secret.version,
|
||||
secretMetadata: secretMetadataMap.get(secret.secretKey)
|
||||
}))
|
||||
secrets: secrets
|
||||
.filter((el) => el.version > 1)
|
||||
.map((secret) => ({
|
||||
secretId: secret.id,
|
||||
secretPath: secret.secretPath,
|
||||
secretKey: secret.secretKey,
|
||||
secretVersion: secret.version,
|
||||
secretMetadata: secretMetadataMap.get(secret.secretKey)
|
||||
}))
|
||||
}
|
||||
}
|
||||
});
|
||||
const createdSecrets = secrets.filter((el) => el.version === 1);
|
||||
if (createdSecrets.length) {
|
||||
await server.services.auditLog.createAuditLog({
|
||||
projectId: secrets[0].workspace,
|
||||
...req.auditLogInfo,
|
||||
event: {
|
||||
type: EventType.CREATE_SECRETS,
|
||||
metadata: {
|
||||
environment: req.body.environment,
|
||||
secretPath: req.body.secretPath,
|
||||
secrets: createdSecrets.map((secret) => ({
|
||||
secretId: secret.id,
|
||||
secretPath: secret.secretPath,
|
||||
secretKey: secret.secretKey,
|
||||
secretVersion: secret.version,
|
||||
secretMetadata: secretMetadataMap.get(secret.secretKey)
|
||||
}))
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
await server.services.telemetry.sendPostHogEvents({
|
||||
event: PostHogEventTypes.SecretUpdated,
|
||||
|
@ -1,7 +1,15 @@
|
||||
import { ForbiddenError, PureAbility, subject } from "@casl/ability";
|
||||
import { Knex } from "knex";
|
||||
import { z } from "zod";
|
||||
|
||||
import { ActionProjectType, ProjectMembershipRole, SecretsV2Schema, SecretType, TableName } from "@app/db/schemas";
|
||||
import {
|
||||
ActionProjectType,
|
||||
ProjectMembershipRole,
|
||||
SecretsV2Schema,
|
||||
SecretType,
|
||||
TableName,
|
||||
TSecretsV2
|
||||
} from "@app/db/schemas";
|
||||
import { TPermissionServiceFactory } from "@app/ee/services/permission/permission-service";
|
||||
import { ProjectPermissionActions, ProjectPermissionSub } from "@app/ee/services/permission/project-permission";
|
||||
import { TSecretApprovalPolicyServiceFactory } from "@app/ee/services/secret-approval-policy/secret-approval-policy-service";
|
||||
@ -36,6 +44,7 @@ import {
|
||||
} from "./secret-v2-bridge-fns";
|
||||
import {
|
||||
SecretOperations,
|
||||
SecretUpdateMode,
|
||||
TBackFillSecretReferencesDTO,
|
||||
TCreateManySecretDTO,
|
||||
TCreateSecretDTO,
|
||||
@ -103,12 +112,13 @@ export const secretV2BridgeServiceFactory = ({
|
||||
const $validateSecretReferences = async (
|
||||
projectId: string,
|
||||
permission: PureAbility,
|
||||
references: ReturnType<typeof getAllSecretReferences>["nestedReferences"]
|
||||
references: ReturnType<typeof getAllSecretReferences>["nestedReferences"],
|
||||
tx?: Knex
|
||||
) => {
|
||||
if (!references.length) return;
|
||||
|
||||
const uniqueReferenceEnvironmentSlugs = Array.from(new Set(references.map((el) => el.environment)));
|
||||
const referencesEnvironments = await projectEnvDAL.findBySlugs(projectId, uniqueReferenceEnvironmentSlugs);
|
||||
const referencesEnvironments = await projectEnvDAL.findBySlugs(projectId, uniqueReferenceEnvironmentSlugs, tx);
|
||||
if (referencesEnvironments.length !== uniqueReferenceEnvironmentSlugs.length)
|
||||
throw new BadRequestError({
|
||||
message: `Referenced environment not found. Missing ${diff(
|
||||
@ -122,36 +132,41 @@ export const secretV2BridgeServiceFactory = ({
|
||||
references.map((el) => ({
|
||||
secretPath: el.secretPath,
|
||||
envId: referencesEnvironmentGroupBySlug[el.environment][0].id
|
||||
}))
|
||||
})),
|
||||
tx
|
||||
);
|
||||
const referencesFolderGroupByPath = groupBy(referredFolders.filter(Boolean), (i) => `${i?.envId}-${i?.path}`);
|
||||
const referredSecrets = await secretDAL.find({
|
||||
$complex: {
|
||||
operator: "or",
|
||||
value: references.map((el) => {
|
||||
const folderId =
|
||||
referencesFolderGroupByPath[`${referencesEnvironmentGroupBySlug[el.environment][0].id}-${el.secretPath}`][0]
|
||||
?.id;
|
||||
if (!folderId) throw new BadRequestError({ message: `Referenced path ${el.secretPath} doesn't exist` });
|
||||
const referredSecrets = await secretDAL.find(
|
||||
{
|
||||
$complex: {
|
||||
operator: "or",
|
||||
value: references.map((el) => {
|
||||
const folderId =
|
||||
referencesFolderGroupByPath[
|
||||
`${referencesEnvironmentGroupBySlug[el.environment][0].id}-${el.secretPath}`
|
||||
][0]?.id;
|
||||
if (!folderId) throw new BadRequestError({ message: `Referenced path ${el.secretPath} doesn't exist` });
|
||||
|
||||
return {
|
||||
operator: "and",
|
||||
value: [
|
||||
{
|
||||
operator: "eq",
|
||||
field: "folderId",
|
||||
value: folderId
|
||||
},
|
||||
{
|
||||
operator: "eq",
|
||||
field: `${TableName.SecretV2}.key` as "key",
|
||||
value: el.secretKey
|
||||
}
|
||||
]
|
||||
};
|
||||
})
|
||||
}
|
||||
});
|
||||
return {
|
||||
operator: "and",
|
||||
value: [
|
||||
{
|
||||
operator: "eq",
|
||||
field: "folderId",
|
||||
value: folderId
|
||||
},
|
||||
{
|
||||
operator: "eq",
|
||||
field: `${TableName.SecretV2}.key` as "key",
|
||||
value: el.secretKey
|
||||
}
|
||||
]
|
||||
};
|
||||
})
|
||||
}
|
||||
},
|
||||
{ tx }
|
||||
);
|
||||
|
||||
if (
|
||||
referredSecrets.length !==
|
||||
@ -1245,8 +1260,9 @@ export const secretV2BridgeServiceFactory = ({
|
||||
actorAuthMethod,
|
||||
environment,
|
||||
projectId,
|
||||
secretPath,
|
||||
secrets: inputSecrets
|
||||
secretPath: defaultSecretPath = "/",
|
||||
secrets: inputSecrets,
|
||||
mode: updateMode
|
||||
}: TUpdateManySecretDTO) => {
|
||||
const { permission } = await permissionService.getProjectPermission({
|
||||
actor,
|
||||
@ -1257,196 +1273,280 @@ export const secretV2BridgeServiceFactory = ({
|
||||
actionProjectType: ActionProjectType.SecretManager
|
||||
});
|
||||
|
||||
const folder = await folderDAL.findBySecretPath(projectId, environment, secretPath);
|
||||
if (!folder)
|
||||
const secretsToUpdateGroupByPath = groupBy(inputSecrets, (el) => el.secretPath || defaultSecretPath);
|
||||
const projectEnvironment = await projectEnvDAL.findOne({ projectId, slug: environment });
|
||||
if (!projectEnvironment) {
|
||||
throw new NotFoundError({
|
||||
message: `Folder with path '${secretPath}' in environment with slug '${environment}' not found`,
|
||||
message: `Environment with slug '${environment}' in project with ID '${projectId}' not found`
|
||||
});
|
||||
}
|
||||
|
||||
const folders = await folderDAL.findByManySecretPath(
|
||||
Object.keys(secretsToUpdateGroupByPath).map((el) => ({ envId: projectEnvironment.id, secretPath: el }))
|
||||
);
|
||||
if (folders.length !== Object.keys(secretsToUpdateGroupByPath).length)
|
||||
throw new NotFoundError({
|
||||
message: `Folder with path '${null}' in environment with slug '${environment}' not found`,
|
||||
name: "UpdateManySecret"
|
||||
});
|
||||
const folderId = folder.id;
|
||||
|
||||
const secretsToUpdate = await secretDAL.find({
|
||||
folderId,
|
||||
$complex: {
|
||||
operator: "and",
|
||||
value: [
|
||||
{
|
||||
operator: "or",
|
||||
value: inputSecrets.map((el) => ({
|
||||
operator: "and",
|
||||
value: [
|
||||
{
|
||||
operator: "eq",
|
||||
field: `${TableName.SecretV2}.key` as "key",
|
||||
value: el.secretKey
|
||||
},
|
||||
{
|
||||
operator: "eq",
|
||||
field: "type",
|
||||
value: SecretType.Shared
|
||||
}
|
||||
]
|
||||
}))
|
||||
}
|
||||
]
|
||||
}
|
||||
});
|
||||
if (secretsToUpdate.length !== inputSecrets.length) {
|
||||
const secretsToUpdateNames = secretsToUpdate.map((secret) => secret.key);
|
||||
const invalidSecrets = inputSecrets.filter((secret) => !secretsToUpdateNames.includes(secret.secretKey));
|
||||
throw new NotFoundError({
|
||||
message: `Secret does not exist: ${invalidSecrets.map((el) => el.secretKey).join(",")}`
|
||||
});
|
||||
}
|
||||
const secretsToUpdateInDBGroupedByKey = groupBy(secretsToUpdate, (i) => i.key);
|
||||
|
||||
secretsToUpdate.forEach((el) => {
|
||||
ForbiddenError.from(permission).throwUnlessCan(
|
||||
ProjectPermissionActions.Edit,
|
||||
subject(ProjectPermissionSub.Secrets, {
|
||||
environment,
|
||||
secretPath,
|
||||
secretName: el.key,
|
||||
secretTags: el.tags.map((i) => i.slug)
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
// get all tags
|
||||
const sanitizedTagIds = inputSecrets.flatMap(({ tagIds = [] }) => tagIds);
|
||||
const tags = sanitizedTagIds.length ? await secretTagDAL.findManyTagsById(projectId, sanitizedTagIds) : [];
|
||||
if (tags.length !== sanitizedTagIds.length) throw new NotFoundError({ message: "Tag not found" });
|
||||
const tagsGroupByID = groupBy(tags, (i) => i.id);
|
||||
|
||||
// check again to avoid non authorized tags are removed
|
||||
inputSecrets.forEach((el) => {
|
||||
ForbiddenError.from(permission).throwUnlessCan(
|
||||
ProjectPermissionActions.Edit,
|
||||
subject(ProjectPermissionSub.Secrets, {
|
||||
environment,
|
||||
secretPath,
|
||||
secretName: el.secretKey,
|
||||
secretTags: (el.tagIds || []).map((i) => tagsGroupByID[i][0].slug)
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
// now find any secret that needs to update its name
|
||||
// same process as above
|
||||
const secretsWithNewName = inputSecrets.filter(({ newSecretName }) => Boolean(newSecretName));
|
||||
if (secretsWithNewName.length) {
|
||||
const secrets = await secretDAL.find({
|
||||
folderId,
|
||||
$complex: {
|
||||
operator: "and",
|
||||
value: [
|
||||
{
|
||||
operator: "or",
|
||||
value: secretsWithNewName.map((el) => ({
|
||||
operator: "and",
|
||||
value: [
|
||||
{
|
||||
operator: "eq",
|
||||
field: `${TableName.SecretV2}.key` as "key",
|
||||
value: el.secretKey
|
||||
},
|
||||
{
|
||||
operator: "eq",
|
||||
field: "type",
|
||||
value: SecretType.Shared
|
||||
}
|
||||
]
|
||||
}))
|
||||
}
|
||||
]
|
||||
}
|
||||
});
|
||||
if (secrets.length)
|
||||
throw new BadRequestError({
|
||||
message: `Secret with new name already exists: ${secretsWithNewName.map((el) => el.newSecretName).join(",")}`
|
||||
});
|
||||
|
||||
secretsWithNewName.forEach((el) => {
|
||||
ForbiddenError.from(permission).throwUnlessCan(
|
||||
ProjectPermissionActions.Create,
|
||||
subject(ProjectPermissionSub.Secrets, {
|
||||
environment,
|
||||
secretPath,
|
||||
secretName: el.newSecretName as string,
|
||||
secretTags: (el.tagIds || []).map((i) => tagsGroupByID[i][0].slug)
|
||||
})
|
||||
);
|
||||
});
|
||||
}
|
||||
// now get all secret references made and validate the permission
|
||||
const secretReferencesGroupByInputSecretKey: Record<string, ReturnType<typeof getAllSecretReferences>> = {};
|
||||
const secretReferences: TSecretReference[] = [];
|
||||
inputSecrets.forEach((el) => {
|
||||
if (el.secretValue) {
|
||||
const references = getAllSecretReferences(el.secretValue);
|
||||
secretReferencesGroupByInputSecretKey[el.secretKey] = references;
|
||||
secretReferences.push(...references.nestedReferences);
|
||||
references.localReferences.forEach((localRefKey) => {
|
||||
secretReferences.push({ secretKey: localRefKey, secretPath, environment });
|
||||
});
|
||||
}
|
||||
});
|
||||
await $validateSecretReferences(projectId, permission, secretReferences);
|
||||
|
||||
const { encryptor: secretManagerEncryptor, decryptor: secretManagerDecryptor } =
|
||||
await kmsService.createCipherPairWithDataKey({ type: KmsDataKey.SecretManager, projectId });
|
||||
|
||||
const secrets = await secretDAL.transaction(async (tx) =>
|
||||
fnSecretBulkUpdate({
|
||||
folderId,
|
||||
orgId: actorOrgId,
|
||||
tx,
|
||||
inputSecrets: inputSecrets.map((el) => {
|
||||
const originalSecret = secretsToUpdateInDBGroupedByKey[el.secretKey][0];
|
||||
const encryptedValue =
|
||||
typeof el.secretValue !== "undefined"
|
||||
? {
|
||||
encryptedValue: secretManagerEncryptor({ plainText: Buffer.from(el.secretValue) }).cipherTextBlob,
|
||||
references: secretReferencesGroupByInputSecretKey[el.secretKey]?.nestedReferences
|
||||
}
|
||||
: {};
|
||||
const updatedSecrets: Array<TSecretsV2 & { secretPath: string }> = [];
|
||||
await secretDAL.transaction(async (tx) => {
|
||||
for await (const folder of folders) {
|
||||
if (!folder) throw new NotFoundError({ message: "Folder not found" });
|
||||
|
||||
return {
|
||||
filter: { id: originalSecret.id, type: SecretType.Shared },
|
||||
data: {
|
||||
reminderRepeatDays: el.secretReminderRepeatDays,
|
||||
encryptedComment: setKnexStringValue(
|
||||
el.secretComment,
|
||||
(value) => secretManagerEncryptor({ plainText: Buffer.from(value) }).cipherTextBlob
|
||||
),
|
||||
reminderNote: el.secretReminderNote,
|
||||
skipMultilineEncoding: el.skipMultilineEncoding,
|
||||
key: el.newSecretName || el.secretKey,
|
||||
tags: el.tagIds,
|
||||
secretMetadata: el.secretMetadata,
|
||||
...encryptedValue
|
||||
const folderId = folder.id;
|
||||
const secretPath = folder.path;
|
||||
let secretsToUpdate = secretsToUpdateGroupByPath[secretPath];
|
||||
const secretsToUpdateInDB = await secretDAL.find(
|
||||
{
|
||||
folderId,
|
||||
$complex: {
|
||||
operator: "and",
|
||||
value: [
|
||||
{
|
||||
operator: "or",
|
||||
value: secretsToUpdate.map((el) => ({
|
||||
operator: "and",
|
||||
value: [
|
||||
{
|
||||
operator: "eq",
|
||||
field: `${TableName.SecretV2}.key` as "key",
|
||||
value: el.secretKey
|
||||
},
|
||||
{
|
||||
operator: "eq",
|
||||
field: "type",
|
||||
value: SecretType.Shared
|
||||
}
|
||||
]
|
||||
}))
|
||||
}
|
||||
]
|
||||
}
|
||||
};
|
||||
}),
|
||||
secretDAL,
|
||||
secretVersionDAL,
|
||||
secretTagDAL,
|
||||
secretVersionTagDAL,
|
||||
resourceMetadataDAL
|
||||
})
|
||||
);
|
||||
await snapshotService.performSnapshot(folderId);
|
||||
await secretQueueService.syncSecrets({
|
||||
actor,
|
||||
actorId,
|
||||
secretPath,
|
||||
projectId,
|
||||
orgId: actorOrgId,
|
||||
environmentSlug: folder.environment.slug
|
||||
},
|
||||
{ tx }
|
||||
);
|
||||
if (secretsToUpdateInDB.length !== secretsToUpdate.length && updateMode === SecretUpdateMode.FailOnNotFound)
|
||||
throw new NotFoundError({
|
||||
message: `Secret does not exist: ${diff(
|
||||
secretsToUpdate.map((el) => el.secretKey),
|
||||
secretsToUpdateInDB.map((el) => el.key)
|
||||
).join(", ")} in path ${folder.path}`
|
||||
});
|
||||
|
||||
const secretsToUpdateInDBGroupedByKey = groupBy(secretsToUpdateInDB, (i) => i.key);
|
||||
const secretsToCreate = secretsToUpdate.filter((el) => !secretsToUpdateInDBGroupedByKey?.[el.secretKey]);
|
||||
secretsToUpdate = secretsToUpdate.filter((el) => secretsToUpdateInDBGroupedByKey?.[el.secretKey]);
|
||||
|
||||
secretsToUpdateInDB.forEach((el) => {
|
||||
ForbiddenError.from(permission).throwUnlessCan(
|
||||
ProjectPermissionActions.Edit,
|
||||
subject(ProjectPermissionSub.Secrets, {
|
||||
environment,
|
||||
secretPath,
|
||||
secretName: el.key,
|
||||
secretTags: el.tags.map((i) => i.slug)
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
// get all tags
|
||||
const sanitizedTagIds = secretsToUpdate.flatMap(({ tagIds = [] }) => tagIds);
|
||||
const tags = sanitizedTagIds.length ? await secretTagDAL.findManyTagsById(projectId, sanitizedTagIds, tx) : [];
|
||||
if (tags.length !== sanitizedTagIds.length) throw new NotFoundError({ message: "Tag not found" });
|
||||
const tagsGroupByID = groupBy(tags, (i) => i.id);
|
||||
|
||||
// check create permission allowed in upsert mode
|
||||
if (updateMode === SecretUpdateMode.Upsert) {
|
||||
secretsToCreate.forEach((el) => {
|
||||
ForbiddenError.from(permission).throwUnlessCan(
|
||||
ProjectPermissionActions.Create,
|
||||
subject(ProjectPermissionSub.Secrets, {
|
||||
environment,
|
||||
secretPath,
|
||||
secretName: el.secretKey,
|
||||
secretTags: (el.tagIds || []).map((i) => tagsGroupByID[i][0].slug)
|
||||
})
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
// check again to avoid non authorized tags are removed
|
||||
secretsToUpdate.forEach((el) => {
|
||||
ForbiddenError.from(permission).throwUnlessCan(
|
||||
ProjectPermissionActions.Edit,
|
||||
subject(ProjectPermissionSub.Secrets, {
|
||||
environment,
|
||||
secretPath,
|
||||
secretName: el.secretKey,
|
||||
secretTags: (el.tagIds || []).map((i) => tagsGroupByID[i][0].slug)
|
||||
})
|
||||
);
|
||||
});
|
||||
|
||||
// now find any secret that needs to update its name
|
||||
// same process as above
|
||||
const secretsWithNewName = secretsToUpdate.filter(({ newSecretName }) => Boolean(newSecretName));
|
||||
if (secretsWithNewName.length) {
|
||||
const secrets = await secretDAL.find(
|
||||
{
|
||||
folderId,
|
||||
$complex: {
|
||||
operator: "and",
|
||||
value: [
|
||||
{
|
||||
operator: "or",
|
||||
value: secretsWithNewName.map((el) => ({
|
||||
operator: "and",
|
||||
value: [
|
||||
{
|
||||
operator: "eq",
|
||||
field: `${TableName.SecretV2}.key` as "key",
|
||||
value: el.secretKey
|
||||
},
|
||||
{
|
||||
operator: "eq",
|
||||
field: "type",
|
||||
value: SecretType.Shared
|
||||
}
|
||||
]
|
||||
}))
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{ tx }
|
||||
);
|
||||
if (secrets.length)
|
||||
throw new BadRequestError({
|
||||
message: `Secret with new name already exists: ${secretsWithNewName
|
||||
.map((el) => el.newSecretName)
|
||||
.join(", ")}`
|
||||
});
|
||||
|
||||
secretsWithNewName.forEach((el) => {
|
||||
ForbiddenError.from(permission).throwUnlessCan(
|
||||
ProjectPermissionActions.Create,
|
||||
subject(ProjectPermissionSub.Secrets, {
|
||||
environment,
|
||||
secretPath,
|
||||
secretName: el.newSecretName as string,
|
||||
secretTags: (el.tagIds || []).map((i) => tagsGroupByID[i][0].slug)
|
||||
})
|
||||
);
|
||||
});
|
||||
}
|
||||
// now get all secret references made and validate the permission
|
||||
const secretReferencesGroupByInputSecretKey: Record<string, ReturnType<typeof getAllSecretReferences>> = {};
|
||||
const secretReferences: TSecretReference[] = [];
|
||||
secretsToUpdate.concat(SecretUpdateMode.Upsert === updateMode ? secretsToCreate : []).forEach((el) => {
|
||||
if (el.secretValue) {
|
||||
const references = getAllSecretReferences(el.secretValue);
|
||||
secretReferencesGroupByInputSecretKey[el.secretKey] = references;
|
||||
secretReferences.push(...references.nestedReferences);
|
||||
references.localReferences.forEach((localRefKey) => {
|
||||
secretReferences.push({ secretKey: localRefKey, secretPath, environment });
|
||||
});
|
||||
}
|
||||
});
|
||||
await $validateSecretReferences(projectId, permission, secretReferences, tx);
|
||||
|
||||
const bulkUpdatedSecrets = await fnSecretBulkUpdate({
|
||||
folderId,
|
||||
orgId: actorOrgId,
|
||||
tx,
|
||||
inputSecrets: secretsToUpdate.map((el) => {
|
||||
const originalSecret = secretsToUpdateInDBGroupedByKey[el.secretKey][0];
|
||||
const encryptedValue =
|
||||
typeof el.secretValue !== "undefined"
|
||||
? {
|
||||
encryptedValue: secretManagerEncryptor({ plainText: Buffer.from(el.secretValue) }).cipherTextBlob,
|
||||
references: secretReferencesGroupByInputSecretKey[el.secretKey]?.nestedReferences
|
||||
}
|
||||
: {};
|
||||
|
||||
return {
|
||||
filter: { id: originalSecret.id, type: SecretType.Shared },
|
||||
data: {
|
||||
reminderRepeatDays: el.secretReminderRepeatDays,
|
||||
encryptedComment: setKnexStringValue(
|
||||
el.secretComment,
|
||||
(value) => secretManagerEncryptor({ plainText: Buffer.from(value) }).cipherTextBlob
|
||||
),
|
||||
reminderNote: el.secretReminderNote,
|
||||
skipMultilineEncoding: el.skipMultilineEncoding,
|
||||
key: el.newSecretName || el.secretKey,
|
||||
tags: el.tagIds,
|
||||
secretMetadata: el.secretMetadata,
|
||||
...encryptedValue
|
||||
}
|
||||
};
|
||||
}),
|
||||
secretDAL,
|
||||
secretVersionDAL,
|
||||
secretTagDAL,
|
||||
secretVersionTagDAL,
|
||||
resourceMetadataDAL
|
||||
});
|
||||
updatedSecrets.push(...bulkUpdatedSecrets.map((el) => ({ ...el, secretPath: folder.path })));
|
||||
if (updateMode === SecretUpdateMode.Upsert) {
|
||||
const bulkInsertedSecrets = await fnSecretBulkInsert({
|
||||
inputSecrets: secretsToCreate.map((el) => {
|
||||
const references = secretReferencesGroupByInputSecretKey[el.secretKey]?.nestedReferences;
|
||||
|
||||
return {
|
||||
version: 1,
|
||||
encryptedComment: setKnexStringValue(
|
||||
el.secretComment,
|
||||
(value) => secretManagerEncryptor({ plainText: Buffer.from(value) }).cipherTextBlob
|
||||
),
|
||||
encryptedValue: el.secretValue
|
||||
? secretManagerEncryptor({ plainText: Buffer.from(el.secretValue) }).cipherTextBlob
|
||||
: undefined,
|
||||
skipMultilineEncoding: el.skipMultilineEncoding,
|
||||
key: el.secretKey,
|
||||
tagIds: el.tagIds,
|
||||
references,
|
||||
secretMetadata: el.secretMetadata,
|
||||
type: SecretType.Shared
|
||||
};
|
||||
}),
|
||||
folderId,
|
||||
orgId: actorOrgId,
|
||||
secretDAL,
|
||||
resourceMetadataDAL,
|
||||
secretVersionDAL,
|
||||
secretTagDAL,
|
||||
secretVersionTagDAL,
|
||||
tx
|
||||
});
|
||||
updatedSecrets.push(...bulkInsertedSecrets.map((el) => ({ ...el, secretPath: folder.path })));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return secrets.map((el) =>
|
||||
reshapeBridgeSecret(projectId, environment, secretPath, {
|
||||
await Promise.allSettled(folders.map((el) => (el?.id ? snapshotService.performSnapshot(el.id) : undefined)));
|
||||
await Promise.allSettled(
|
||||
folders.map((el) =>
|
||||
el
|
||||
? secretQueueService.syncSecrets({
|
||||
actor,
|
||||
actorId,
|
||||
secretPath: el.path,
|
||||
projectId,
|
||||
orgId: actorOrgId,
|
||||
environmentSlug: environment
|
||||
})
|
||||
: undefined
|
||||
)
|
||||
);
|
||||
|
||||
return updatedSecrets.map((el) =>
|
||||
reshapeBridgeSecret(projectId, environment, el.secretPath, {
|
||||
...el,
|
||||
value: el.encryptedValue ? secretManagerDecryptor({ cipherTextBlob: el.encryptedValue }).toString() : "",
|
||||
comment: el.encryptedComment ? secretManagerDecryptor({ cipherTextBlob: el.encryptedComment }).toString() : ""
|
||||
|
@ -23,6 +23,12 @@ export type TSecretReferenceDTO = {
|
||||
secretKey: string;
|
||||
};
|
||||
|
||||
export enum SecretUpdateMode {
|
||||
Ignore = "ignore",
|
||||
Upsert = "upsert",
|
||||
FailOnNotFound = "failOnNotFound"
|
||||
}
|
||||
|
||||
export type TGetSecretsDTO = {
|
||||
expandSecretReferences?: boolean;
|
||||
path: string;
|
||||
@ -113,6 +119,7 @@ export type TUpdateManySecretDTO = Omit<TProjectPermission, "projectId"> & {
|
||||
secretPath: string;
|
||||
projectId: string;
|
||||
environment: string;
|
||||
mode: SecretUpdateMode;
|
||||
secrets: {
|
||||
secretKey: string;
|
||||
newSecretName?: string;
|
||||
@ -123,6 +130,7 @@ export type TUpdateManySecretDTO = Omit<TProjectPermission, "projectId"> & {
|
||||
secretReminderRepeatDays?: number | null;
|
||||
secretReminderNote?: string | null;
|
||||
secretMetadata?: ResourceMetadataDTO;
|
||||
secretPath?: string;
|
||||
}[];
|
||||
};
|
||||
|
||||
|
@ -30,7 +30,10 @@ import { groupBy, pick } from "@app/lib/fn";
|
||||
import { logger } from "@app/lib/logger";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { OrgServiceActor } from "@app/lib/types";
|
||||
import { TGetSecretsRawByFolderMappingsDTO } from "@app/services/secret-v2-bridge/secret-v2-bridge-types";
|
||||
import {
|
||||
SecretUpdateMode,
|
||||
TGetSecretsRawByFolderMappingsDTO
|
||||
} from "@app/services/secret-v2-bridge/secret-v2-bridge-types";
|
||||
|
||||
import { ActorType } from "../auth/auth-type";
|
||||
import { TProjectDALFactory } from "../project/project-dal";
|
||||
@ -2012,6 +2015,7 @@ export const secretServiceFactory = ({
|
||||
actorOrgId,
|
||||
actorAuthMethod,
|
||||
secretPath,
|
||||
mode = SecretUpdateMode.FailOnNotFound,
|
||||
secrets: inputSecrets = []
|
||||
}: TUpdateManySecretRawDTO) => {
|
||||
if (!projectSlug && !optionalProjectId)
|
||||
@ -2076,7 +2080,8 @@ export const secretServiceFactory = ({
|
||||
actorOrgId,
|
||||
actor,
|
||||
actorId,
|
||||
secrets: inputSecrets
|
||||
secrets: inputSecrets,
|
||||
mode
|
||||
});
|
||||
return { type: SecretProtectionType.Direct as const, secrets };
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ import { TKmsServiceFactory } from "../kms/kms-service";
|
||||
import { TResourceMetadataDALFactory } from "../resource-metadata/resource-metadata-dal";
|
||||
import { ResourceMetadataDTO } from "../resource-metadata/resource-metadata-schema";
|
||||
import { TSecretV2BridgeDALFactory } from "../secret-v2-bridge/secret-v2-bridge-dal";
|
||||
import { SecretUpdateMode } from "../secret-v2-bridge/secret-v2-bridge-types";
|
||||
import { TSecretVersionV2DALFactory } from "../secret-v2-bridge/secret-version-dal";
|
||||
import { TSecretVersionV2TagDALFactory } from "../secret-v2-bridge/secret-version-tag-dal";
|
||||
|
||||
@ -274,6 +275,7 @@ export type TUpdateManySecretRawDTO = Omit<TProjectPermission, "projectId"> & {
|
||||
projectId?: string;
|
||||
projectSlug?: string;
|
||||
environment: string;
|
||||
mode: SecretUpdateMode;
|
||||
secrets: {
|
||||
secretKey: string;
|
||||
newSecretName?: string;
|
||||
|
@ -4,9 +4,6 @@ title: "Backend development guide"
|
||||
|
||||
Suppose you're interested in implementing a new feature in Infisical's backend, let's call it "feature-x." Here are the general steps you should follow.
|
||||
|
||||
## Database schema migration
|
||||
In order to run [schema migrations](https://en.wikipedia.org/wiki/Schema_migration#:~:text=A%20schema%20migration%20is%20performed,some%20newer%20or%20older%20version) you need to expose your database connection string. Create a `.env.migration` file to set the database connection URI for migration scripts, or alternatively, export the `DB_CONNECTION_URI` environment variable.
|
||||
|
||||
## Creating new database model
|
||||
If your feature involves a change in the database, you need to first address this by generating the necessary database schemas.
|
||||
|
||||
|
@ -0,0 +1,88 @@
|
||||
---
|
||||
title: "Terraform Cloud"
|
||||
description: "How to authenticate with Infisical from Terraform Cloud using OIDC."
|
||||
---
|
||||
|
||||
This guide will show you how to configure Terraform Cloud to inject a workload identity token and use it for OIDC-based authentication with the Infisical Terraform provider.
|
||||
At a high level, we'll begin by creating a machine identity in Infisical, then move on to enabling and using the injected token in your Terraform runs.
|
||||
|
||||
<Steps>
|
||||
<Step title="Create a Machine Identity in Infisical">
|
||||
Follow the instructions [in this documentation](/documentation/platform/identities/oidc-auth/general) to create a machine identity with OIDC auth. Infisical OIDC configuration values for Terraform Cloud:
|
||||
1. Set the OIDC Discovery URL to https://app.terraform.io.
|
||||
2. Set the Issuer to https://app.terraform.io.
|
||||
3. Configure the Audience to match the value you will use for **TFC_WORKLOAD_IDENTITY_AUDIENCE** in Terraform Cloud for the next step.
|
||||
|
||||
|
||||
To view all possible claims available from Terraform cloud, visit [HashiCorp’s documentation](https://developer.hashicorp.com/terraform/cloud-docs/workspaces/dynamic-provider-credentials/workload-identity-tokens#token-structure).
|
||||
|
||||
</Step>
|
||||
<Step title="Enable Workload Identity Token Injection in Terraform Cloud">
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Generate single token">
|
||||
1. **Navigate to your workspace** in Terraform Cloud.
|
||||
2. **Add a workspace variable** named `TFC_WORKLOAD_IDENTITY_AUDIENCE`:
|
||||
- **Key**: `TFC_WORKLOAD_IDENTITY_AUDIENCE`
|
||||
- **Value**: For example, `my-infisical-audience`
|
||||
- **Category**: Environment
|
||||
|
||||
> **Important**:
|
||||
> - The presence of `TFC_WORKLOAD_IDENTITY_AUDIENCE` is required for Terraform Cloud to inject a token.
|
||||
> - If you are self-hosting HCP Terraform agents, ensure they are **v1.7.0 or above**.
|
||||
|
||||
Once set, Terraform Cloud will inject a workload identity token into the run environment as `TFC_WORKLOAD_IDENTITY_TOKEN`.
|
||||
</Tab>
|
||||
<Tab title="(Optional) Generate Multiple Tokens">
|
||||
If you need multiple tokens (each with a different audience), create additional variables:
|
||||
|
||||
```
|
||||
TFC_WORKLOAD_IDENTITY_AUDIENCE_[YOUR_TAG_HERE]
|
||||
```
|
||||
|
||||
For example:
|
||||
- `TFC_WORKLOAD_IDENTITY_AUDIENCE_INFISICAL`
|
||||
- `TFC_WORKLOAD_IDENTITY_AUDIENCE_OTHER_SERVICE`
|
||||
|
||||
Terraform Cloud will then inject:
|
||||
- `TFC_WORKLOAD_IDENTITY_TOKEN_INFISICAL`
|
||||
- `TFC_WORKLOAD_IDENTITY_TOKEN_OTHER_SERVICE`
|
||||
|
||||
> **Note**:
|
||||
> - The `[YOUR_TAG_HERE]` can only contain letters, numbers, and underscores.
|
||||
> - You **cannot** use the reserved keyword `TYPE`.
|
||||
> - Generating multiple tokens requires **v1.12.0 or later** if you are self-hosting agents.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Warning>
|
||||
If you are running on self-hosted HCP Terraform agents, you must use v1.7.0 or later to enable token injection. If you need to generate multiple tokens, you must use v1.12.0 or later.
|
||||
</Warning>
|
||||
</Step>
|
||||
<Step title="Configure the Infisical Provider">
|
||||
In your Terraform configuration, reference the injected token by name. For example:
|
||||
|
||||
```hcl
|
||||
provider "infisical" {
|
||||
host = "https://app.infisical.com"
|
||||
|
||||
auth = {
|
||||
oidc = {
|
||||
identity_id = "<identity-id>"
|
||||
# This must match the environment variable Terraform injects:
|
||||
token_environment_variable_name = "TFC_WORKLOAD_IDENTITY_TOKEN"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- **`host`**: Defaults to `https://app.infisical.com`. Override if using a self-hosted Infisical instance.
|
||||
- **`identity_id`**: The OIDC identity ID from Infisical.
|
||||
- **`token_environment_variable_name`**: Must match the injected variable name from Terraform Cloud. If using single token, use `TFC_WORKLOAD_IDENTITY_TOKEN`. If using multiple tokens, choose the one you want to use (e.g., `TFC_WORKLOAD_IDENTITY_TOKEN_INFISICAL`).
|
||||
</Step>
|
||||
<Step title="Validate Your Setup">
|
||||
1. Run a plan and apply in Terraform Cloud.
|
||||
2. Verify the Infisical provider authenticates successfully without issues. If you run into authentication errors, double-check the Infisical identity has the correct roles/permissions in Infisical.
|
||||
|
||||
</Step>
|
||||
</Steps>
|
@ -1,102 +1,237 @@
|
||||
---
|
||||
title: "Terraform Provider"
|
||||
description: "Learn how to fetch Secrets From Infisical With Terraform."
|
||||
url: "https://registry.terraform.io/providers/Infisical/infisical/latest/docs"
|
||||
title: "Terraform"
|
||||
description: "Learn how to fetch secrets from Infisical with Terraform using both traditional data sources and ephemeral resources"
|
||||
---
|
||||
{/*
|
||||
This guide provides step-by-step guidance on how to fetch secrets from Infisical using Terraform.
|
||||
|
||||
This guide demonstrates how to use Infisical to manage secrets in your Terraform infrastructure code, supporting both traditional data sources and ephemeral resources for enhanced security. It uses:
|
||||
|
||||
- Infisical (you can use [Infisical Cloud](https://app.infisical.com) or a [self-hosted instance of Infisical](https://infisical.com/docs/self-hosting/overview)) to store your secrets
|
||||
- The [Terraform Provider](https://registry.terraform.io/providers/Infisical/infisical/latest) to fetch secrets for your infrastructure
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Basic understanding of Terraform
|
||||
- Install [Terraform](https://www.terraform.io/downloads.html)
|
||||
Before you begin, make sure you have:
|
||||
|
||||
## Steps
|
||||
- [Terraform](https://www.terraform.io/downloads.html) installed (v1.10.0+ for ephemeral resources)
|
||||
- An Infisical account with access to a project
|
||||
- Basic understanding of Terraform and infrastructure as code
|
||||
|
||||
### 1. Define Required Providers
|
||||
## Project Setup
|
||||
|
||||
Specify `infisical` in the `required_providers` block within the `terraform` block of your configuration file. If you would like to use a specific version of the provider, uncomment and replace `<latest version>` with the version of the Infisical provider that you want to use.
|
||||
### Configure Provider
|
||||
|
||||
```hcl main.tf
|
||||
First, specify the Infisical provider in your Terraform configuration:
|
||||
|
||||
```hcl
|
||||
terraform {
|
||||
required_providers {
|
||||
infisical = {
|
||||
# version = <latest version>
|
||||
source = "infisical/infisical"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Configure the Infisical Provider
|
||||
### Authentication
|
||||
|
||||
Set up the Infisical provider by specifying the `host` and `service_token`. Replace `<>` in `service_token` with your actual token. The `host` is only required if you are using a self-hosted instance of Infisical.
|
||||
Configure the provider using one of these authentication methods:
|
||||
|
||||
```hcl main.tf
|
||||
#### Machine Identity (Recommended)
|
||||
|
||||
Using a Machine Identity, you can authenticate your Terraform provider using either [OIDC Auth](https://infisical.com/docs/documentation/platform/identities/oidc-auth/general) or [Universal Auth](https://infisical.com/docs/documentation/platform/identities/universal-auth) methods.
|
||||
|
||||
```hcl
|
||||
provider "infisical" {
|
||||
host = "https://app.infisical.com" # Only required if using a self-hosted instance of Infisical, default is https://app.infisical.com
|
||||
client_id = "<>"
|
||||
client_secret = "<>"
|
||||
service_token = "<>" # DEPRECATED, USE MACHINE IDENTITY AUTH INSTEAD
|
||||
host = "https://app.infisical.com" # Optional for cloud, required for self-hosted
|
||||
auth {
|
||||
universal { # or use oidc authentication method by providing an identity_id
|
||||
client_id = var.infisical_client_id
|
||||
client_secret = var.infisical_client_secret
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
Learn more about [machine identities](/documentation/platform/identities/machine-identities).
|
||||
|
||||
#### Service Token (Legacy)
|
||||
|
||||
<Warning>
|
||||
Machine Identity authentication is strongly recommended as the secure and modern method. Service tokens are considered legacy and will be deprecated in a future release.
|
||||
</Warning>
|
||||
|
||||
```hcl
|
||||
provider "infisical" {
|
||||
host = "https://app.infisical.com"
|
||||
service_token = var.infisical_service_token
|
||||
}
|
||||
```
|
||||
|
||||
## Using Secrets in Terraform
|
||||
|
||||
Infisical provides two methods to fetch and use secrets in your Terraform configurations:
|
||||
|
||||
### Method 1: Ephemeral Resources (Recommended)
|
||||
|
||||
Ephemeral resources, introduced in Terraform v1.10, provide enhanced security by ensuring sensitive values are never persisted in state files. This is the recommended approach for handling secrets in your infrastructure code.
|
||||
|
||||
```hcl
|
||||
# Fetch database credentials ephemerally
|
||||
ephemeral "infisical_secret" "db_creds" {
|
||||
name = "DB_CREDENTIALS"
|
||||
env_slug = "prod"
|
||||
workspace_id = var.infisical_workspace_id
|
||||
folder_path = "/database"
|
||||
}
|
||||
|
||||
# Use the credentials to configure a provider
|
||||
provider "postgresql" {
|
||||
host = data.aws_db_instance.example.address
|
||||
port = data.aws_db_instance.example.port
|
||||
username = jsondecode(ephemeral.infisical_secret.db_creds.value)["username"]
|
||||
password = jsondecode(ephemeral.infisical_secret.db_creds.value)["password"]
|
||||
}
|
||||
```
|
||||
|
||||
Key benefits:
|
||||
- Values are never stored in state files
|
||||
- Secrets are fetched on-demand during each Terraform operation
|
||||
- Perfect for GitOps workflows
|
||||
- Improved security posture for your infrastructure as code
|
||||
|
||||
### Method 2: Data Sources
|
||||
|
||||
For backwards compatibility or when working with older Terraform versions, you can use the traditional data source approach:
|
||||
|
||||
```hcl
|
||||
# Fetch all secrets in a folder
|
||||
data "infisical_secrets" "my_secrets" {
|
||||
env_slug = "dev"
|
||||
workspace_id = var.infisical_workspace_id
|
||||
folder_path = "/api"
|
||||
}
|
||||
|
||||
# Use individual secrets
|
||||
resource "aws_db_instance" "example" {
|
||||
username = data.infisical_secrets.my_secrets.secrets["DB_USER"]
|
||||
password = data.infisical_secrets.my_secrets.secrets["DB_PASS"]
|
||||
}
|
||||
```
|
||||
|
||||
<Warning>
|
||||
It is recommended to use Terraform variables to pass your service token dynamically to avoid hard coding it
|
||||
When using data sources, secret values are stored in Terraform's state file. Ensure your state file is properly secured.
|
||||
</Warning>
|
||||
|
||||
### 3. Fetch Infisical Secrets
|
||||
## Common Use Cases
|
||||
|
||||
Use the `infisical_secrets` data source to fetch your secrets. In this block, you must set the `env_slug` and `folder_path` to scope the secrets you want.
|
||||
### Secure Database Credential Management
|
||||
|
||||
`env_slug` is the slug of the environment name. This slug name can be found under the project settings page on the Infisical dashboard.
|
||||
|
||||
`folder_path` is the path to the folder in a given environment. The path `/` for root of the environment where as `/folder1` is the folder at the root of the environment.
|
||||
|
||||
```hcl main.tf
|
||||
data "infisical_secrets" "my-secrets" {
|
||||
env_slug = "dev"
|
||||
folder_path = "/some-folder/another-folder"
|
||||
workspace_id = "your-project-id"
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Define Outputs
|
||||
|
||||
As an example, we are going to output your fetched secrets. Replace `SECRET-NAME` with the actual name of your secret.
|
||||
|
||||
For a single secret:
|
||||
|
||||
```hcl main.tf
|
||||
output "single-secret" {
|
||||
value = data.infisical_secrets.my-secrets.secrets["SECRET-NAME"]
|
||||
}
|
||||
```
|
||||
|
||||
For all secrets:
|
||||
Manage database credentials securely without exposing sensitive information in your state files:
|
||||
|
||||
```hcl
|
||||
output "all-secrets" {
|
||||
value = data.infisical_secrets.my-secrets.secrets
|
||||
# Fetch database credentials securely
|
||||
ephemeral "infisical_secret" "db_creds" {
|
||||
name = "DB_CREDENTIALS"
|
||||
env_slug = "prod"
|
||||
workspace_id = var.infisical_workspace_id
|
||||
folder_path = "/database"
|
||||
}
|
||||
|
||||
# Use the credentials in your database instance
|
||||
resource "aws_db_instance" "example" {
|
||||
identifier = "my-database"
|
||||
allocated_storage = 20
|
||||
engine = "postgres"
|
||||
engine_version = "14.0"
|
||||
instance_class = "db.t3.micro"
|
||||
|
||||
# Securely inject credentials from Infisical
|
||||
username = jsondecode(ephemeral.infisical_secret.db_creds.value)["username"]
|
||||
password = jsondecode(ephemeral.infisical_secret.db_creds.value)["password"]
|
||||
}
|
||||
```
|
||||
|
||||
### 5. Run Terraform
|
||||
### GitOps Workflow with OIDC
|
||||
|
||||
Once your configuration is complete, initialize your Terraform working directory:
|
||||
To eliminate the need for static credentials, you can authenticate your workflow using [OpenID Connect (OIDC)](https://infisical.com/docs/documentation/platform/identities/oidc-auth/general) through providers like the [Infisical Secrets GitHub Action](https://github.com/Infisical/secrets-action).
|
||||
Once authenticated, you can securely access secrets through the Infisical provider:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
```hcl
|
||||
provider "infisical" {
|
||||
# Auth credentials automatically injected from the environment
|
||||
}
|
||||
|
||||
# Fetch deployment credentials
|
||||
ephemeral "infisical_secret" "deploy_token" {
|
||||
name = "DEPLOY_TOKEN"
|
||||
env_slug = "prod"
|
||||
workspace_id = var.infisical_workspace_id
|
||||
folder_path = "/deployment"
|
||||
}
|
||||
```
|
||||
For detailed instructions on setting up OIDC authentication with GitHub Actions, refer to our [GitHub Actions OIDC guide](https://infisical.com/docs/documentation/platform/identities/oidc-auth/github).
|
||||
|
||||
Then, run the plan command to view the fetched secrets:
|
||||
## Best Practices
|
||||
|
||||
```bash
|
||||
$ terraform plan
|
||||
```
|
||||
1. **Use Ephemeral Resources**: Whenever possible, use ephemeral resources instead of data sources for improved security.
|
||||
|
||||
Terraform will now fetch your secrets from Infisical and display them as output according to your configuration.
|
||||
2. **Organize Secrets**: Structure your secrets in Infisical using folders to maintain clean separation:
|
||||
```hcl
|
||||
ephemeral "infisical_secret" "db_secret" {
|
||||
folder_path = "/databases/postgresql" # Organized by service
|
||||
# ...
|
||||
}
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
3. **Variable Usage**: Use Terraform variables for workspace IDs and environment slugs:
|
||||
```hcl
|
||||
variable "environment" {
|
||||
description = "Environment (dev, staging, prod)"
|
||||
type = string
|
||||
}
|
||||
|
||||
You have now successfully set up and used the Infisical provider with Terraform to fetch secrets. For more information, visit the [Infisical documentation](https://registry.terraform.io/providers/Infisical/infisical/latest/docs). */}
|
||||
ephemeral "infisical_secret" "secret" {
|
||||
env_slug = var.environment
|
||||
# ...
|
||||
}
|
||||
```
|
||||
|
||||
4. **Error Handling**: Add lifecycle blocks for critical secrets:
|
||||
```hcl
|
||||
ephemeral "infisical_secret" "critical_secret" {
|
||||
# ...
|
||||
lifecycle {
|
||||
postcondition {
|
||||
condition = length(self.value) > 0
|
||||
error_message = "Critical secret must not be empty"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="What happens if I'm using an older version of Terraform?">
|
||||
If you're using Terraform < v1.10.0, you'll need to use the data source approach.
|
||||
Consider upgrading to take advantage of the enhanced security features provided
|
||||
by ephemeral resources.
|
||||
</Accordion>
|
||||
<Accordion title="Can I mix ephemeral resources and data sources?">
|
||||
Yes, you can use both in the same configuration. However, we recommend using
|
||||
ephemeral resources for any sensitive values to ensure they're not stored in state.
|
||||
</Accordion>
|
||||
<Accordion title="How do I secure my state file when using data sources?">
|
||||
When using data sources, follow Terraform's best practices for state management:
|
||||
- Use remote state with encryption at rest
|
||||
- Implement proper access controls
|
||||
- Consider using state encryption
|
||||
- Treat the state like a secret
|
||||
|
||||
Better yet, use ephemeral resources to avoid storing sensitive values in state entirely.
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
See also:
|
||||
- [Machine Identity setup guide](/documentation/platform/identities/machine-identities)
|
||||
- [Terraform Provider Registry](https://registry.terraform.io/providers/Infisical/infisical/latest/docs)
|
||||
- [GitOps Best Practices](https://www.infisical.com/blog/gitops-best-practices)
|
||||
|
@ -1,28 +1,34 @@
|
||||
---
|
||||
title: "Components"
|
||||
description: "Infisical's components span multiple clients, an API, and a storage backend."
|
||||
description: "Understand Infisical's core architectural components and how they work together."
|
||||
---
|
||||
|
||||
## Infisical API
|
||||
## Overview
|
||||
|
||||
The Infisical API (sometimes referred to as the **backend**) contains the core platform logic.
|
||||
Infisical is architected around several key components that work in concert to provide a secure and streamlined secret management experience. These components span the client, API, and storage layers, ensuring that secrets are protected at every stage of their lifecycle.
|
||||
|
||||
## Storage backend
|
||||
## 1. API (Backend)
|
||||
|
||||
Infisical relies on a storage backend to store data including users and secrets. Infisical's storage backend is Postgres.
|
||||
Infisical exposes a well-documented [REST API](https://infisical.com/docs/api-reference/overview/introduction) that enables programmatic interaction with the platform, enabling a wide range of use cases.
|
||||
|
||||
## Redis
|
||||
## 2. Storage Backend
|
||||
|
||||
Infisical uses [Redis](https://redis.com) to enable more complex workflows including a queuing system to manage long running asynchronous tasks, cron jobs, as well as reliable cache for frequently used resources.
|
||||
Infisical relies on a robust storage backend to durably store secrets, users, and other platform data. Infisical's storage backend is [PostgreSQL](https://www.postgresql.org/).
|
||||
|
||||
## Infisical Web UI
|
||||
## 3. Caching Layer
|
||||
|
||||
The Web UI is the browser-based portal that connects to the Infisical API.
|
||||
Infisical uses [Redis](https://redis.com) to enable more complex workflows including a queuing system to manage long-running asynchronous tasks, cron jobs, as well as reliable cache for frequently used resources.
|
||||
|
||||
## Infisical clients
|
||||
## 4. Clients
|
||||
|
||||
Clients are any application or infrastructure that connecting to the Infisical API using one of the below methods:
|
||||
- Public API: Making API requests directly to the Infisical API.
|
||||
- Client SDK: A platform-specific library with method abstractions for working with secrets. Currently, there are three official SDKs: [Node SDK](https://infisical.com/docs/sdks/languages/node), [Python SDK](https://infisical.com/docs/sdks/languages/python), and [Java SDK](https://infisical.com/docs/sdks/languages/java).
|
||||
- CLI: A terminal-based interface for interacting with the Infisical API.
|
||||
- Kubernetes Operator: This operator retrieves secrets from Infisical and securely store
|
||||
Clients are interfaces through which users and applications interact with the Infisical API:
|
||||
|
||||
- **Web UI**: A browser-based portal providing a user-friendly interface for managing secrets, configurations, and performing administrative tasks.
|
||||
|
||||
- [**CLI**](https://infisical.com/docs/cli): A terminal-based tool for interacting with the Infisical API, enabling automation, scripting, and integration into CI/CD pipelines.
|
||||
|
||||
- **SDKs (Software Development Kits)**: Platform-specific libraries with method abstractions for working with secrets. Supported languages include [Node.js](https://infisical.com/docs/sdks/languages/node), [Python](https://infisical.com/docs/sdks/languages/python), [Java](https://infisical.com/docs/sdks/languages/java), [Golang](https://infisical.com/docs/sdks/languages/go), [Ruby](https://infisical.com/docs/sdks/languages/ruby) and [.NET](https://infisical.com/docs/sdks/languages/csharp).
|
||||
|
||||
- [**Kubernetes Operator**](https://infisical.com/docs/integrations/platforms/kubernetes): A Kubernetes-native component that facilitates the secure retrieval and management of secrets within a Kubernetes cluster. The operator supports multiple custom resource definitions (CRDs) for syncing secrets.
|
||||
|
||||
- [**Infisical Agent**](https://infisical.com/docs/integrations/platforms/infisical-agent): Daemon that automatically fetches and manages access tokens and secrets to be used in various client resources.
|
||||
|
@ -232,7 +232,8 @@
|
||||
"documentation/platform/identities/oidc-auth/general",
|
||||
"documentation/platform/identities/oidc-auth/github",
|
||||
"documentation/platform/identities/oidc-auth/circleci",
|
||||
"documentation/platform/identities/oidc-auth/gitlab"
|
||||
"documentation/platform/identities/oidc-auth/gitlab",
|
||||
"documentation/platform/identities/oidc-auth/terraform-cloud"
|
||||
]
|
||||
},
|
||||
"documentation/platform/mfa",
|
||||
@ -285,7 +286,7 @@
|
||||
"pages": [
|
||||
"self-hosting/overview",
|
||||
{
|
||||
"group": "Containerized installation methods",
|
||||
"group": "Installation methods",
|
||||
"pages": [
|
||||
"self-hosting/deployment-options/standalone-infisical",
|
||||
"self-hosting/deployment-options/docker-swarm",
|
||||
@ -293,12 +294,12 @@
|
||||
"self-hosting/deployment-options/kubernetes-helm"
|
||||
]
|
||||
},
|
||||
"self-hosting/guides/upgrading-infisical",
|
||||
"self-hosting/configuration/envars",
|
||||
"self-hosting/configuration/requirements",
|
||||
{
|
||||
"group": "Guides",
|
||||
"pages": [
|
||||
"self-hosting/configuration/schema-migrations",
|
||||
"self-hosting/guides/mongo-to-postgres",
|
||||
"self-hosting/guides/custom-certificates"
|
||||
]
|
||||
@ -633,7 +634,8 @@
|
||||
"api-reference/endpoints/oidc-auth/attach",
|
||||
"api-reference/endpoints/oidc-auth/retrieve",
|
||||
"api-reference/endpoints/oidc-auth/update",
|
||||
"api-reference/endpoints/oidc-auth/revoke"
|
||||
"api-reference/endpoints/oidc-auth/revoke",
|
||||
"integrations/frameworks/terraform-cloud"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -22,33 +22,34 @@ The actual resource requirements will vary in direct proportion to the operation
|
||||
Infisical doesn’t require file storage as all persisted data is saved in the database.
|
||||
However, its logs and metrics are saved to disk for later viewing. As a result, we recommend provisioning 1-2 GB of storage.
|
||||
|
||||
### CPU
|
||||
### CPU and Memory (Per Container/Instance)
|
||||
|
||||
CPU requirements vary heavily on the volume of secret operations (reads and writes) you anticipate.
|
||||
Processing large volumes of secrets frequently and consistently will require higher CPU.
|
||||
Infisical is stateless and scales horizontally by running across multiple containers/instances. Each instance typically does **not** need more than **2–4 CPU cores** and **4–8 GB** of memory.
|
||||
If you need additional capacity, simply increase the **number** of containers/instances running in parallel.
|
||||
|
||||
Recommended minimum CPU hardware for different sizes of deployments:
|
||||
| **Deployment Size** | **CPU (Cores, per container)** | **Memory (GB, per container)** | **Recommended Number of Containers** |
|
||||
|---------------------|--------------------------------|--------------------------------|--------------------------------------|
|
||||
| **Small** | 2 | 4 | 2+ |
|
||||
| **Medium** | 2–4 | 4–8 | 5+ |
|
||||
| **Large** | 2–4 | 4–8 | 10+ |
|
||||
|
||||
- **small:** 2-4 core is the **recommended** minimum
|
||||
- **large:** 4-8 cores are suitable for larger deployments
|
||||
|
||||
### Memory Allocation
|
||||
|
||||
Memory needs depend on expected workload, including factors like user activity, automation level, and the frequency of secret operations.
|
||||
|
||||
Recommended minimum memory hardware for different sizes of deployments:
|
||||
- **small:** 4-8 GB is the **recommended** minimum
|
||||
- **large:** 16-32 GB are suitable for larger deployments
|
||||
> **Note:**
|
||||
> - Adding more containers (horizontal scaling) is generally the best way to handle spikes in secret operations.
|
||||
> - If you prefer, you can increase CPU/memory on a single container (vertical scaling), but horizontal scaling is more flexible and resilient.
|
||||
|
||||
## Database & caching layer
|
||||
|
||||
### Postgres
|
||||
|
||||
PostgreSQL is the only database supported by Infisical. Infisical has been extensively tested with Postgres version 16. We recommend using versions 14 and up for optimal compatibility.
|
||||
The compute required for Postgres is largely dependent on the number of secret operations (reads and writes) you expect. The more frequently you read and write secrets, the more compute you will need.
|
||||
You'll notice that storage requirements are high and this is because audit logs are by default stored in the database.
|
||||
|
||||
Recommended resource allocation based on deployment size:
|
||||
- **small:** 2 vCPU / 8 GB RAM / 20 GB Disk
|
||||
- **large:** 4vCPU / 16 GB RAM / 100 GB Disk
|
||||
|
||||
Recommended resource allocation based on deployment size. You may require more resources if you have a large number of secrets or high transaction volume:
|
||||
- **small:** 2 vCPU / 8 GB RAM / 100 GB Disk
|
||||
- **medium:** 4vCPU / 16 GB RAM / 200 GB Disk
|
||||
- **large:** 8vCPU / 32 GB RAM / 500 GB Disk
|
||||
|
||||
### Redis
|
||||
|
||||
|
@ -1,60 +0,0 @@
|
||||
---
|
||||
title: "Schema migration"
|
||||
description: "Learn how to run Postgres schema migrations."
|
||||
---
|
||||
|
||||
Running schema migrations is a requirement before deploying Infisical.
|
||||
Each time you decide to upgrade your version of Infisical, it's necessary to run schema migrations for that specific version.
|
||||
The guide below outlines a step-by-step guide to help you manually run schema migrations for Infisical.
|
||||
|
||||
### Prerequisites
|
||||
- Docker installed on your machine
|
||||
- An active PostgreSQL database
|
||||
- Postgres database connection string
|
||||
|
||||
<Steps>
|
||||
<Step title="Pull the Infisical Docker Image">
|
||||
First, ensure you have the correct version of the Infisical Docker image. You can pull it from Docker Hub using the following command:
|
||||
```bash
|
||||
docker pull infisical/infisical:<version>
|
||||
```
|
||||
Replace `<version>` with the specific version number you intend to deploy. View available versions [here](https://hub.docker.com/r/infisical/infisical/tags)
|
||||
</Step>
|
||||
|
||||
<Step title="Set Up the Environment Variable">
|
||||
The Docker image requires a `DB_CONNECTION_URI` environment variable. This connection string should point to your PostgreSQL database. The format generally looks like this: `postgresql://username:password@host:port/database`.
|
||||
</Step>
|
||||
|
||||
<Step title="Run the Migration ">
|
||||
To run the schema migration for the version of Infisical you want to deploy, use the following Docker command:
|
||||
|
||||
```bash
|
||||
docker run --env DB_CONNECTION_URI=<your_connection_string> infisical/infisical:<version> npm run migration:latest
|
||||
```
|
||||
Replace `<your_connection_string>` with your actual PostgreSQL connection string, and `<version>` with the desired version number.
|
||||
</Step>
|
||||
|
||||
<Step title="Verify the Migration">
|
||||
After running the migration, it's good practice to check if the migration was successful. You can do this by checking the logs or accessing your database to ensure the schema has been updated accordingly.
|
||||
</Step>
|
||||
<Step title="Rollback If Needed">
|
||||
If you need to rollback a migration by one step, use the following command:
|
||||
|
||||
```bash
|
||||
docker run --env DB_CONNECTION_URI=<your_connection_string> infisical/infisical:<version> npm run migration:rollback
|
||||
```
|
||||
</Step>
|
||||
|
||||
<Step title="Repeat for Each Version">
|
||||
It's important to run schema migrations for each version of the Infisical you deploy. For instance, if you're updating from `infisical/infisical:1` to `infisical/infisical:2`, ensure you run the schema migrations for `infisical/infisical:2` before deploying it.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<Tip>
|
||||
In a production setting, we recommend a more structured approach to deploying migrations prior to upgrading Infisical. This can be accomplished via CI automation.
|
||||
</Tip>
|
||||
|
||||
### Additional discussion
|
||||
- Always back up your database before running migrations, especially in a production environment.
|
||||
- Test the migration process in a staging environment before applying it to production.
|
||||
- Keep track of the versions and their corresponding migrations to avoid any inconsistencies.
|
@ -157,23 +157,6 @@ The [Docker stack file](https://github.com/Infisical/infisical/tree/main/docker-
|
||||
3lznscvk7k5t infisical_spolo2 replicated 1/1 ghcr.io/zalando/spilo-16:3.2-p2
|
||||
v04ml7rz2j5q infisical_spolo3 replicated 1/1 ghcr.io/zalando/spilo-16:3.2-p2
|
||||
```
|
||||
|
||||
<Note>
|
||||
You'll notice that service `infisical_infisical` will not be in running state.
|
||||
This is expected as the database does not yet have the desired schemas.
|
||||
Once the database schema migrations have been successfully applied, this issue should be resolved.
|
||||
</Note>
|
||||
</Step>
|
||||
|
||||
<Step title="Run schema migrations">
|
||||
Run the schema migration to initialize the database. Follow the [guide here](/self-hosting/configuration/schema-migrations) to learn how.
|
||||
|
||||
To run the migrations, you'll need to connect to the Postgres instance deployed on your Docker swarm. The default Postgres user credentials are defined in the Docker swarm: username: `postgres`, password: `postgres` and database: `postgres`.
|
||||
We recommend you change these credentials when deploying to production and creating a separate DB for Infisical.
|
||||
|
||||
<Info>
|
||||
After running the schema migrations, be sure to update the `.env` file to have the correct `DB_CONNECTION_URI`.
|
||||
</Info>
|
||||
</Step>
|
||||
|
||||
<Step title="View service status">
|
||||
|
@ -78,18 +78,6 @@ description: "Learn how to use Helm chart to install Infisical on your Kubernete
|
||||
</Tabs>
|
||||
</Step>
|
||||
|
||||
<Step title="Database schema migration ">
|
||||
Infisical relies on a relational database, which means that database schemas need to be migrated before the instance can become operational.
|
||||
|
||||
To automate this process, the chart includes a option named `infisical.autoDatabaseSchemaMigration`.
|
||||
When this option is enabled, a deployment/upgrade will only occur _after_ a successful schema migration.
|
||||
|
||||
<Info>
|
||||
If you are using in-cluster Postgres, you may notice the migration job failing initially.
|
||||
This is expected as it is waiting for the database to be in ready state.
|
||||
</Info>
|
||||
</Step>
|
||||
|
||||
<Step title="Routing traffic to Infisical">
|
||||
By default, this chart uses Nginx as its Ingress controller to direct traffic to Infisical services.
|
||||
|
||||
|
@ -22,11 +22,6 @@ The following guide provides a detailed step-by-step walkthrough on how you can
|
||||
|
||||
Remember to replace `<version>` with the docker image tag of your choice.
|
||||
</Step>
|
||||
<Step title="Run Postgres schema migration ">
|
||||
Before you can start the instance of Infisical, you need to run the database schema migrations.
|
||||
Follow the step by [step guide here](/self-hosting/configuration/schema-migrations) on running schema migrations for Infisical.
|
||||
|
||||
</Step>
|
||||
<Step title="Start Infisical">
|
||||
For a minimal installation of Infisical, you must configure `ENCRYPTION_KEY`, `AUTH_SECRET`, `DB_CONNECTION_URI`, `SITE_URL`, and `REDIS_URL`. [View all available configurations](/self-hosting/configuration/envars).
|
||||
|
||||
|
57
docs/self-hosting/guides/upgrading-infisical.mdx
Normal file
57
docs/self-hosting/guides/upgrading-infisical.mdx
Normal file
@ -0,0 +1,57 @@
|
||||
---
|
||||
|
||||
title: "Upgrade Infisical Instance"
|
||||
description: "How to upgrade Infisical self-hosted instance"
|
||||
|
||||
---
|
||||
|
||||
Keeping your Infisical instance up to date is key to making sure you receive the latest performance improvements, security patches, and feature updates.
|
||||
We release updates approximately once a week, which may include new features, bug fixes, performance enhancements, and critical security patches.
|
||||
|
||||
Since secrets management is a critical component of your infrastructure, we aim to avoid disruptive changes that will impact fetching secrets in downstream clients.
|
||||
If a release requires specific attention, a note will be attached to the corresponding [release](https://github.com/Infisical/infisical/releases) version.
|
||||
|
||||
During an upgrade, two key components are updated:
|
||||
|
||||
- **Infisical Application:** The core application code is updated.
|
||||
- **PostgreSQL Database Schema:** Schema migrations run automatically to ensure your database remains in sync with the updated application.
|
||||
|
||||
> **Before You Upgrade:**
|
||||
> **Always back up your database.** While our automated migration system is robust, having a backup ensures you can recover quickly in the event of an issue.
|
||||
|
||||
## Automated Schema Migrations
|
||||
|
||||
In previous versions (prior to `v0.111.0-postgres`), schema migrations had to be executed manually before starting the application.
|
||||
Now, migrations run automatically during boot-up. This improvement streamlines the upgrade process, reduces manual steps, and minimizes the risk of inconsistencies between your database schema and application code.
|
||||
|
||||
### Benefits of Automated Migrations
|
||||
|
||||
- **Seamless Integration:**
|
||||
Migrations are now part of the boot-up process, removing the need for manual intervention.
|
||||
|
||||
- **Synchronous Upgrades:**
|
||||
In multi-instance deployments, one instance acquires a lock and performs the migration while the others wait. This ensures that if a migration fails, the rollout is halted to prevent inconsistencies.
|
||||
|
||||
- **Reduced Room for Error:**
|
||||
Automatic migrations help ensure that your database schema always remains in sync with your application code.
|
||||
|
||||
## Upgrade Steps
|
||||
|
||||
1. **Back Up Your Data:**
|
||||
- Ensure you have a complete backup of your Postgres database.
|
||||
- Verify that your backup is current and accessible.
|
||||
|
||||
2. **Select the Upgrade Version:**
|
||||
- Visit the [Infisical releases page](https://github.com/Infisical/infisical/releases) for a list of available versions.
|
||||
- Look for releases with the prefix `infisical/` as there are other releases that are not related to the Infisical instance.
|
||||
|
||||
3. **Start the Upgrade Process:**
|
||||
- Launch the new version of Infisical. During startup, the application will automatically compare the current database schema with the updated schema in the code.
|
||||
- If any differences are detected, Infisical will apply the necessary migrations automatically.
|
||||
|
||||
4. **Multi-Instance Coordination:**
|
||||
- In environments with multiple instances, one instance will acquire a lock and perform the migration while the other instances wait.
|
||||
- Once the migration is complete, all instances will operate with the updated schema.
|
||||
|
||||
5. **Verify the Upgrade:**
|
||||
- Review the logs for any migration errors or warnings.
|
@ -48,9 +48,3 @@ This ensures that if there is a failure in one availability zone, the working re
|
||||
<Accordion title="Can Infisical run in an air-gapped environment without any internet access?" defaultOpen >
|
||||
Yes, Infisical can function in an air-gapped environment. To do so, update your ECS task to use the publicly available AWS Elastic Container Registry (ECR) image instead of the default Docker Hub image. Additionally, it's necessary to configure VPC endpoints, which allows your system to access AWS ECR via a private network route instead of the internet, ensuring all connectivity remains within the secure, private network.
|
||||
</Accordion>
|
||||
<Accordion title="Since RDS is in a private subnet, how do run the Postgres schema migrations?">
|
||||
Since the Amazon RDS instance is housed within a private network to enhance security, it is not directly accessible from the internet. This means that in order to run the required [Postgres schema migrations](/self-hosting/configuration/schema-migrations), you need to connect to this instance of RDS. There are many approaches you can take:
|
||||
- To automate schema migrations, you may setup CI/CD pipeline with access to the same RDS network to run the schema migrations before making deployment to ECS. This ensures that if migrations fail, your Infisical instances continues to run.
|
||||
- If you would like to run the migrations manually, consider using AWS Systems Manager Session Manager to access the RDS within the VPC on your local machine.
|
||||
- If your organization already has mechanisms in place for secure access to the VPC, such as VPNs or Direct Connect, these can also be utilized for performing database migrations manually.
|
||||
</Accordion>
|
||||
|
@ -57,6 +57,10 @@ export const ProjectPermissionCan: FunctionComponent<Props<ProjectPermissionSet>
|
||||
const finalChild =
|
||||
typeof children === "function" ? children(isAllowed, ability as any) : children;
|
||||
|
||||
if (!isAllowed && renderGuardBanner) {
|
||||
return <ProjectPermissionGuardBanner />;
|
||||
}
|
||||
|
||||
if (!isAllowed && passThrough) {
|
||||
return <Tooltip content={label}>{finalChild}</Tooltip>;
|
||||
}
|
||||
@ -65,10 +69,6 @@ export const ProjectPermissionCan: FunctionComponent<Props<ProjectPermissionSet>
|
||||
return <Tooltip content={allowedLabel}>{finalChild}</Tooltip>;
|
||||
}
|
||||
|
||||
if (!isAllowed && renderGuardBanner) {
|
||||
return <ProjectPermissionGuardBanner />;
|
||||
}
|
||||
|
||||
if (!isAllowed) return null;
|
||||
|
||||
return finalChild;
|
||||
|
@ -10,9 +10,9 @@ type Props = {
|
||||
};
|
||||
|
||||
export const SecretSyncSelect = ({ onSelect }: Props) => {
|
||||
const { isLoading, data: secretSyncOptions } = useSecretSyncOptions();
|
||||
const { isPending, data: secretSyncOptions } = useSecretSyncOptions();
|
||||
|
||||
if (isLoading) {
|
||||
if (isPending) {
|
||||
return (
|
||||
<div className="flex h-full flex-col items-center justify-center py-2.5">
|
||||
<Spinner size="lg" className="text-mineshaft-500" />
|
||||
|
@ -23,7 +23,7 @@ export const SecretSyncConnectionField = ({ onChange: callback }: Props) => {
|
||||
const destination = watch("destination");
|
||||
const app = SECRET_SYNC_CONNECTION_MAP[destination];
|
||||
|
||||
const { data: availableConnections, isLoading } = useListAvailableAppConnections(app);
|
||||
const { data: availableConnections, isPending } = useListAvailableAppConnections(app);
|
||||
|
||||
const connectionName = APP_CONNECTION_MAP[app].name;
|
||||
|
||||
@ -54,7 +54,7 @@ export const SecretSyncConnectionField = ({ onChange: callback }: Props) => {
|
||||
onChange(newValue);
|
||||
if (callback) callback();
|
||||
}}
|
||||
isLoading={isLoading}
|
||||
isLoading={isPending}
|
||||
options={availableConnections}
|
||||
placeholder="Select connection..."
|
||||
getOptionLabel={(option) => option.name}
|
||||
|
@ -323,12 +323,12 @@ export const MinimizedOrgSidebar = () => {
|
||||
<DropdownMenuContent align="start" side="right" className="p-1">
|
||||
<DropdownMenuLabel>Organization Options</DropdownMenuLabel>
|
||||
<Link to="/organization/access-management">
|
||||
<DropdownMenuItem icon={<FontAwesomeIcon icon={faUsers} />}>
|
||||
<DropdownMenuItem icon={<FontAwesomeIcon className="w-3" icon={faUsers} />}>
|
||||
Access Control
|
||||
</DropdownMenuItem>
|
||||
</Link>
|
||||
<Link to="/organization/app-connections">
|
||||
<DropdownMenuItem icon={<FontAwesomeIcon icon={faPlug} />}>
|
||||
<DropdownMenuItem icon={<FontAwesomeIcon className="w-3" icon={faPlug} />}>
|
||||
App Connections
|
||||
</DropdownMenuItem>
|
||||
</Link>
|
||||
@ -336,18 +336,20 @@ export const MinimizedOrgSidebar = () => {
|
||||
window.location.origin.includes("https://eu.infisical.com") ||
|
||||
window.location.origin.includes("https://gamma.infisical.com")) && (
|
||||
<Link to="/organization/billing">
|
||||
<DropdownMenuItem icon={<FontAwesomeIcon icon={faMoneyBill} />}>
|
||||
<DropdownMenuItem
|
||||
icon={<FontAwesomeIcon className="w-3" icon={faMoneyBill} />}
|
||||
>
|
||||
Usage & Billing
|
||||
</DropdownMenuItem>
|
||||
</Link>
|
||||
)}
|
||||
<Link to="/organization/audit-logs">
|
||||
<DropdownMenuItem icon={<FontAwesomeIcon icon={faBook} />}>
|
||||
<DropdownMenuItem icon={<FontAwesomeIcon className="w-3" icon={faBook} />}>
|
||||
Audit Logs
|
||||
</DropdownMenuItem>
|
||||
</Link>
|
||||
<Link to="/organization/settings">
|
||||
<DropdownMenuItem icon={<FontAwesomeIcon icon={faCog} />}>
|
||||
<DropdownMenuItem icon={<FontAwesomeIcon className="w-3" icon={faCog} />}>
|
||||
Organization Settings
|
||||
</DropdownMenuItem>
|
||||
</Link>
|
||||
|
@ -11,9 +11,9 @@ type Props = {
|
||||
};
|
||||
|
||||
export const AppConnectionsSelect = ({ onSelect }: Props) => {
|
||||
const { isLoading, data: appConnectionOptions } = useAppConnectionOptions();
|
||||
const { isPending, data: appConnectionOptions } = useAppConnectionOptions();
|
||||
|
||||
if (isLoading) {
|
||||
if (isPending) {
|
||||
return (
|
||||
<div className="flex h-full flex-col items-center justify-center py-2.5">
|
||||
<Spinner size="lg" className="text-mineshaft-500" />
|
||||
|
@ -51,7 +51,7 @@ type AppConnectionFilters = {
|
||||
};
|
||||
|
||||
export const AppConnectionsTable = () => {
|
||||
const { isLoading, data: appConnections = [] } = useListAppConnections();
|
||||
const { isPending, data: appConnections = [] } = useListAppConnections();
|
||||
|
||||
const { popUp, handlePopUpOpen, handlePopUpToggle } = usePopUp([
|
||||
"deleteConnection",
|
||||
@ -262,7 +262,7 @@ export const AppConnectionsTable = () => {
|
||||
</Tr>
|
||||
</THead>
|
||||
<TBody>
|
||||
{isLoading && (
|
||||
{isPending && (
|
||||
<TableSkeleton innerKey="app-connections-table" columns={4} key="app-connections" />
|
||||
)}
|
||||
{filteredAppConnections.slice(offset, perPage * page).map((connection) => (
|
||||
@ -285,7 +285,7 @@ export const AppConnectionsTable = () => {
|
||||
onChangePerPage={setPerPage}
|
||||
/>
|
||||
)}
|
||||
{!isLoading && !filteredAppConnections?.length && (
|
||||
{!isPending && !filteredAppConnections?.length && (
|
||||
<EmptyState
|
||||
title={
|
||||
appConnections.length
|
||||
|
@ -8,6 +8,7 @@ import { ProjectPermissionCan } from "@app/components/permissions";
|
||||
import { Badge, PageHeader, Tab, TabList, TabPanel, Tabs } from "@app/components/v2";
|
||||
import { ROUTE_PATHS } from "@app/const/routes";
|
||||
import { ProjectPermissionActions, ProjectPermissionSub, useWorkspace } from "@app/context";
|
||||
import { ProjectPermissionSecretSyncActions } from "@app/context/ProjectPermissionContext/types";
|
||||
import { IntegrationsListPageTabs } from "@app/types/integrations";
|
||||
|
||||
import {
|
||||
@ -100,8 +101,7 @@ export const IntegrationsListPage = () => {
|
||||
<TabPanel value={IntegrationsListPageTabs.SecretSyncs}>
|
||||
<ProjectPermissionCan
|
||||
renderGuardBanner
|
||||
passThrough={false}
|
||||
I={ProjectPermissionActions.Read}
|
||||
I={ProjectPermissionSecretSyncActions.Read}
|
||||
a={ProjectPermissionSub.SecretSyncs}
|
||||
>
|
||||
<SecretSyncsTab />
|
||||
@ -110,7 +110,6 @@ export const IntegrationsListPage = () => {
|
||||
<TabPanel value={IntegrationsListPageTabs.NativeIntegrations}>
|
||||
<ProjectPermissionCan
|
||||
renderGuardBanner
|
||||
passThrough={false}
|
||||
I={ProjectPermissionActions.Read}
|
||||
a={ProjectPermissionSub.Integrations}
|
||||
>
|
||||
|
@ -3,7 +3,12 @@ import { zodValidator } from "@tanstack/zod-adapter";
|
||||
import { z } from "zod";
|
||||
|
||||
import { workspaceKeys } from "@app/hooks/api";
|
||||
import { fetchSecretSyncsByProjectId, secretSyncKeys } from "@app/hooks/api/secretSyncs";
|
||||
import { TIntegration } from "@app/hooks/api/integrations/types";
|
||||
import {
|
||||
fetchSecretSyncsByProjectId,
|
||||
secretSyncKeys,
|
||||
TSecretSync
|
||||
} from "@app/hooks/api/secretSyncs";
|
||||
import { fetchWorkspaceIntegrations } from "@app/hooks/api/workspace/queries";
|
||||
import { IntegrationsListPageTabs } from "@app/types/integrations";
|
||||
|
||||
@ -20,10 +25,22 @@ export const Route = createFileRoute(
|
||||
validateSearch: zodValidator(IntegrationsListPageQuerySchema),
|
||||
beforeLoad: async ({ context, search, params: { projectId } }) => {
|
||||
if (!search.selectedTab) {
|
||||
const secretSyncs = await context.queryClient.ensureQueryData({
|
||||
queryKey: secretSyncKeys.list(projectId),
|
||||
queryFn: () => fetchSecretSyncsByProjectId(projectId)
|
||||
});
|
||||
let secretSyncs: TSecretSync[];
|
||||
|
||||
try {
|
||||
secretSyncs = await context.queryClient.ensureQueryData({
|
||||
queryKey: secretSyncKeys.list(projectId),
|
||||
queryFn: () => fetchSecretSyncsByProjectId(projectId)
|
||||
});
|
||||
} catch {
|
||||
throw redirect({
|
||||
to: "/secret-manager/$projectId/integrations",
|
||||
params: {
|
||||
projectId
|
||||
},
|
||||
search: { selectedTab: IntegrationsListPageTabs.NativeIntegrations }
|
||||
});
|
||||
}
|
||||
|
||||
if (secretSyncs.length) {
|
||||
throw redirect({
|
||||
@ -35,10 +52,21 @@ export const Route = createFileRoute(
|
||||
});
|
||||
}
|
||||
|
||||
const integrations = await context.queryClient.ensureQueryData({
|
||||
queryKey: workspaceKeys.getWorkspaceIntegrations(projectId),
|
||||
queryFn: () => fetchWorkspaceIntegrations(projectId)
|
||||
});
|
||||
let integrations: TIntegration[];
|
||||
try {
|
||||
integrations = await context.queryClient.ensureQueryData({
|
||||
queryKey: workspaceKeys.getWorkspaceIntegrations(projectId),
|
||||
queryFn: () => fetchWorkspaceIntegrations(projectId)
|
||||
});
|
||||
} catch {
|
||||
throw redirect({
|
||||
to: "/secret-manager/$projectId/integrations",
|
||||
params: {
|
||||
projectId
|
||||
},
|
||||
search: { selectedTab: IntegrationsListPageTabs.SecretSyncs }
|
||||
});
|
||||
}
|
||||
|
||||
if (integrations.length) {
|
||||
throw redirect({
|
||||
|
@ -88,7 +88,7 @@ export const OctopusDeployConfigurePage = () => {
|
||||
{
|
||||
integrationAuthId,
|
||||
spaceId: currentSpace?.Id,
|
||||
resourceId: currentResource.appId!,
|
||||
resourceId: currentResource?.appId || "",
|
||||
scope: currentScope
|
||||
},
|
||||
{ enabled: Boolean(currentSpace && currentResource) }
|
||||
|
Reference in New Issue
Block a user