Compare commits
117 Commits
feat/autom
...
fix/overvi
Author | SHA1 | Date | |
---|---|---|---|
|
031c8d67b1 | ||
|
5fe3c9868f | ||
|
c936aa7157 | ||
|
05005f4258 | ||
|
c179d7e5ae | ||
|
c8553fba2b | ||
|
26a9d68823 | ||
|
af5b3aa171 | ||
|
f9a5b46365 | ||
|
d65deab0af | ||
|
61591742e4 | ||
|
54b13a9daa | ||
|
4adf0aa1e2 | ||
|
3d3ee746cf | ||
|
07e4358d00 | ||
|
962dd5d919 | ||
|
52bd1afb0a | ||
|
d918dd8967 | ||
|
e2e0f6a346 | ||
|
326cb99732 | ||
|
341b63c61c | ||
|
81b026865c | ||
|
f50c72c033 | ||
|
e1046e2d56 | ||
|
ed3fa8add1 | ||
|
d123283849 | ||
|
d7fd44b845 | ||
|
3ffee049ee | ||
|
524462d7bc | ||
|
351e573fea | ||
|
f1bc26e2e5 | ||
|
8aeb607f6e | ||
|
e530b7a788 | ||
|
bf61090b5a | ||
|
106b068a51 | ||
|
6f0a97a2fa | ||
|
5d604be091 | ||
|
905cf47d90 | ||
|
2c40d316f4 | ||
|
32521523c1 | ||
|
3a2e8939b1 | ||
|
e5947fcab9 | ||
|
a6d9c74054 | ||
|
f7cf2bb78f | ||
|
ff24e76a32 | ||
|
6ac802b6c9 | ||
|
ff92e00503 | ||
|
b20474c505 | ||
|
e19ffc91c6 | ||
|
61eb66efca | ||
|
15999daa24 | ||
|
82520a7f0a | ||
|
af236ba892 | ||
|
ec31211bca | ||
|
0ecf6044d9 | ||
|
6c512f47bf | ||
|
c4b7d4618d | ||
|
003f2b003d | ||
|
33b135f02c | ||
|
eed7cc6408 | ||
|
440ada464f | ||
|
6b7abbbeb9 | ||
|
3944e20a5b | ||
|
747b5ec68d | ||
|
ed0dc324a3 | ||
|
1c13ed54af | ||
|
9ad725fd6c | ||
|
9a954c8f15 | ||
|
81a64d081c | ||
|
43804f62e6 | ||
|
67089af17a | ||
|
8abfea0409 | ||
|
ce4adccc80 | ||
|
dcd3b5df56 | ||
|
d83240749f | ||
|
36144d8c42 | ||
|
f6425480ca | ||
|
a3e9392a2f | ||
|
633a2ae985 | ||
|
c487b2b34a | ||
|
8e20531b40 | ||
|
8ead2aa774 | ||
|
1b2128e3cc | ||
|
ad6f285b59 | ||
|
d4842dd273 | ||
|
78f83cb478 | ||
|
e67a8f9c05 | ||
|
c8a871de7c | ||
|
64c0951df3 | ||
|
c185414a3c | ||
|
f9695741f1 | ||
|
b7c4b11260 | ||
|
ad110f490c | ||
|
81f3613393 | ||
|
a7fe79c046 | ||
|
9eb89bb46d | ||
|
c4da1ce32d | ||
|
add97c9b38 | ||
|
768ba4f4dc | ||
|
18c32d872c | ||
|
1fd40ab6ab | ||
|
9d258f57ce | ||
|
45ccbaf4c9 | ||
|
838c1af448 | ||
|
e53439d586 | ||
|
cc7d0d752f | ||
|
b89212a0c9 | ||
|
d4c69d8e5d | ||
|
48943b4d78 | ||
|
fd1afc2cbe | ||
|
6905029455 | ||
|
2ef77c737a | ||
|
0f31fa3128 | ||
|
1da5a5f417 | ||
|
94d7d2b029 | ||
|
e39d1a0530 | ||
|
4c5f3859d6 |
8
.gitignore
vendored
@@ -1,5 +1,3 @@
|
||||
.direnv/
|
||||
|
||||
# backend
|
||||
node_modules
|
||||
.env
|
||||
@@ -28,6 +26,8 @@ node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
|
||||
.env
|
||||
|
||||
# testing
|
||||
coverage
|
||||
reports
|
||||
@@ -63,12 +63,10 @@ yarn-error.log*
|
||||
|
||||
# Editor specific
|
||||
.vscode/*
|
||||
**/.idea/*
|
||||
.idea/*
|
||||
|
||||
frontend-build
|
||||
|
||||
# cli
|
||||
.go/
|
||||
*.tgz
|
||||
cli/infisical-merge
|
||||
cli/test/infisical-merge
|
||||
|
@@ -1,7 +0,0 @@
|
||||
import "@fastify/request-context";
|
||||
|
||||
declare module "@fastify/request-context" {
|
||||
interface RequestContextData {
|
||||
reqId: string;
|
||||
}
|
||||
}
|
7
backend/src/@types/fastify.d.ts
vendored
@@ -100,6 +100,13 @@ import { TWorkflowIntegrationServiceFactory } from "@app/services/workflow-integ
|
||||
declare module "@fastify/request-context" {
|
||||
interface RequestContextData {
|
||||
reqId: string;
|
||||
identityAuthInfo?: {
|
||||
identityId: string;
|
||||
oidc?: {
|
||||
claims: Record<string, string>;
|
||||
};
|
||||
};
|
||||
identityPermissionMetadata?: Record<string, unknown>; // filled by permission service
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -85,7 +85,7 @@ export async function up(knex: Knex): Promise<void> {
|
||||
}
|
||||
|
||||
if (await knex.schema.hasTable(TableName.DynamicSecret)) {
|
||||
const doesGatewayColExist = await knex.schema.hasColumn(TableName.DynamicSecret, "gatewayId");
|
||||
const doesGatewayColExist = await knex.schema.hasColumn(TableName.DynamicSecret, "projectGatewayId");
|
||||
await knex.schema.alterTable(TableName.DynamicSecret, (t) => {
|
||||
// not setting a foreign constraint so that cascade effects are not triggered
|
||||
if (!doesGatewayColExist) {
|
||||
|
@@ -0,0 +1,21 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "../schemas";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
const hasMappingField = await knex.schema.hasColumn(TableName.IdentityOidcAuth, "claimMetadataMapping");
|
||||
if (!hasMappingField) {
|
||||
await knex.schema.alterTable(TableName.IdentityOidcAuth, (t) => {
|
||||
t.jsonb("claimMetadataMapping");
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
const hasMappingField = await knex.schema.hasColumn(TableName.IdentityOidcAuth, "claimMetadataMapping");
|
||||
if (hasMappingField) {
|
||||
await knex.schema.alterTable(TableName.IdentityOidcAuth, (t) => {
|
||||
t.dropColumn("claimMetadataMapping");
|
||||
});
|
||||
}
|
||||
}
|
@@ -0,0 +1,23 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "../schemas";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
const doesParentColumExist = await knex.schema.hasColumn(TableName.SecretFolder, "parentId");
|
||||
const doesNameColumnExist = await knex.schema.hasColumn(TableName.SecretFolder, "name");
|
||||
if (doesParentColumExist && doesNameColumnExist) {
|
||||
await knex.schema.alterTable(TableName.SecretFolder, (t) => {
|
||||
t.index(["parentId", "name"]);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
const doesParentColumExist = await knex.schema.hasColumn(TableName.SecretFolder, "parentId");
|
||||
const doesNameColumnExist = await knex.schema.hasColumn(TableName.SecretFolder, "name");
|
||||
if (doesParentColumExist && doesNameColumnExist) {
|
||||
await knex.schema.alterTable(TableName.SecretFolder, (t) => {
|
||||
t.dropIndex(["parentId", "name"]);
|
||||
});
|
||||
}
|
||||
}
|
@@ -0,0 +1,19 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "../schemas";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
const hasReviewerJwtCol = await knex.schema.hasColumn(
|
||||
TableName.IdentityKubernetesAuth,
|
||||
"encryptedKubernetesTokenReviewerJwt"
|
||||
);
|
||||
if (hasReviewerJwtCol) {
|
||||
await knex.schema.alterTable(TableName.IdentityKubernetesAuth, (t) => {
|
||||
t.binary("encryptedKubernetesTokenReviewerJwt").nullable().alter();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export async function down(): Promise<void> {
|
||||
// we can't make it back to non nullable, it will fail
|
||||
}
|
@@ -28,7 +28,7 @@ export const IdentityKubernetesAuthsSchema = z.object({
|
||||
allowedNamespaces: z.string(),
|
||||
allowedNames: z.string(),
|
||||
allowedAudience: z.string(),
|
||||
encryptedKubernetesTokenReviewerJwt: zodBuffer,
|
||||
encryptedKubernetesTokenReviewerJwt: zodBuffer.nullable().optional(),
|
||||
encryptedKubernetesCaCertificate: zodBuffer.nullable().optional()
|
||||
});
|
||||
|
||||
|
@@ -26,7 +26,8 @@ export const IdentityOidcAuthsSchema = z.object({
|
||||
boundSubject: z.string().nullable().optional(),
|
||||
createdAt: z.date(),
|
||||
updatedAt: z.date(),
|
||||
encryptedCaCertificate: zodBuffer.nullable().optional()
|
||||
encryptedCaCertificate: zodBuffer.nullable().optional(),
|
||||
claimMetadataMapping: z.unknown().nullable().optional()
|
||||
});
|
||||
|
||||
export type TIdentityOidcAuths = z.infer<typeof IdentityOidcAuthsSchema>;
|
||||
|
@@ -12,7 +12,6 @@ import { TImmutableDBKeys } from "./models";
|
||||
export const SecretSharingSchema = z.object({
|
||||
id: z.string().uuid(),
|
||||
encryptedValue: z.string().nullable().optional(),
|
||||
type: z.string(),
|
||||
iv: z.string().nullable().optional(),
|
||||
tag: z.string().nullable().optional(),
|
||||
hashedHex: z.string().nullable().optional(),
|
||||
@@ -27,7 +26,8 @@ export const SecretSharingSchema = z.object({
|
||||
lastViewedAt: z.date().nullable().optional(),
|
||||
password: z.string().nullable().optional(),
|
||||
encryptedSecret: zodBuffer.nullable().optional(),
|
||||
identifier: z.string().nullable().optional()
|
||||
identifier: z.string().nullable().optional(),
|
||||
type: z.string().default("share")
|
||||
});
|
||||
|
||||
export type TSecretSharing = z.infer<typeof SecretSharingSchema>;
|
||||
|
@@ -1,8 +1,10 @@
|
||||
import { ForbiddenError } from "@casl/ability";
|
||||
import { requestContext } from "@fastify/request-context";
|
||||
|
||||
import { ActionProjectType } from "@app/db/schemas";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { ActorType } from "@app/services/auth/auth-type";
|
||||
|
||||
import { OrgPermissionActions, OrgPermissionSubjects } from "../permission/org-permission";
|
||||
import { TPermissionServiceFactory } from "../permission/permission-service";
|
||||
@@ -81,8 +83,12 @@ export const auditLogServiceFactory = ({
|
||||
if (!data.projectId && !data.orgId)
|
||||
throw new BadRequestError({ message: "Must specify either project id or org id" });
|
||||
}
|
||||
|
||||
return auditLogQueue.pushToLog(data);
|
||||
const el = { ...data };
|
||||
if (el.actor.type === ActorType.USER || el.actor.type === ActorType.IDENTITY) {
|
||||
const permissionMetadata = requestContext.get("identityPermissionMetadata");
|
||||
el.actor.metadata.permission = permissionMetadata;
|
||||
}
|
||||
return auditLogQueue.pushToLog(el);
|
||||
};
|
||||
|
||||
return {
|
||||
|
@@ -290,6 +290,7 @@ interface UserActorMetadata {
|
||||
userId: string;
|
||||
email?: string | null;
|
||||
username: string;
|
||||
permission?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
interface ServiceActorMetadata {
|
||||
@@ -300,6 +301,7 @@ interface ServiceActorMetadata {
|
||||
interface IdentityActorMetadata {
|
||||
identityId: string;
|
||||
name: string;
|
||||
permission?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
interface ScimClientActorMetadata {}
|
||||
@@ -978,6 +980,7 @@ interface AddIdentityOidcAuthEvent {
|
||||
boundIssuer: string;
|
||||
boundAudiences: string;
|
||||
boundClaims: Record<string, string>;
|
||||
claimMetadataMapping: Record<string, string>;
|
||||
boundSubject: string;
|
||||
accessTokenTTL: number;
|
||||
accessTokenMaxTTL: number;
|
||||
@@ -1002,6 +1005,7 @@ interface UpdateIdentityOidcAuthEvent {
|
||||
boundIssuer?: string;
|
||||
boundAudiences?: string;
|
||||
boundClaims?: Record<string, string>;
|
||||
claimMetadataMapping?: Record<string, string>;
|
||||
boundSubject?: string;
|
||||
accessTokenTTL?: number;
|
||||
accessTokenMaxTTL?: number;
|
||||
|
@@ -1,31 +1,51 @@
|
||||
import crypto from "node:crypto";
|
||||
import dns from "node:dns/promises";
|
||||
import net from "node:net";
|
||||
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { isPrivateIp } from "@app/lib/ip/ipRange";
|
||||
import { getDbConnectionHost } from "@app/lib/knex";
|
||||
|
||||
export const verifyHostInputValidity = (host: string, isGateway = false) => {
|
||||
export const verifyHostInputValidity = async (host: string, isGateway = false) => {
|
||||
const appCfg = getConfig();
|
||||
const dbHost = appCfg.DB_HOST || getDbConnectionHost(appCfg.DB_CONNECTION_URI);
|
||||
// no need for validation when it's dev
|
||||
if (appCfg.NODE_ENV === "development") return;
|
||||
// if (appCfg.NODE_ENV === "development") return; // incase you want to remove this check in dev
|
||||
|
||||
if (host === "host.docker.internal") throw new BadRequestError({ message: "Invalid db host" });
|
||||
const reservedHosts = [appCfg.DB_HOST || getDbConnectionHost(appCfg.DB_CONNECTION_URI)].concat(
|
||||
(appCfg.DB_READ_REPLICAS || []).map((el) => getDbConnectionHost(el.DB_CONNECTION_URI)),
|
||||
getDbConnectionHost(appCfg.REDIS_URL)
|
||||
);
|
||||
|
||||
if (
|
||||
appCfg.isCloud &&
|
||||
!isGateway &&
|
||||
// localhost
|
||||
// internal ips
|
||||
(host.match(/^10\.\d+\.\d+\.\d+/) || host.match(/^192\.168\.\d+\.\d+/))
|
||||
)
|
||||
throw new BadRequestError({ message: "Invalid db host" });
|
||||
|
||||
if (
|
||||
host === "localhost" ||
|
||||
host === "127.0.0.1" ||
|
||||
(dbHost?.length === host.length && crypto.timingSafeEqual(Buffer.from(dbHost || ""), Buffer.from(host)))
|
||||
) {
|
||||
throw new BadRequestError({ message: "Invalid db host" });
|
||||
// get host db ip
|
||||
const exclusiveIps: string[] = [];
|
||||
for await (const el of reservedHosts) {
|
||||
if (el) {
|
||||
if (net.isIPv4(el)) {
|
||||
exclusiveIps.push(el);
|
||||
} else {
|
||||
const resolvedIps = await dns.resolve4(el);
|
||||
exclusiveIps.push(...resolvedIps);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const normalizedHost = host.split(":")[0];
|
||||
const inputHostIps: string[] = [];
|
||||
if (net.isIPv4(host)) {
|
||||
inputHostIps.push(host);
|
||||
} else {
|
||||
if (normalizedHost === "localhost" || normalizedHost === "host.docker.internal") {
|
||||
throw new BadRequestError({ message: "Invalid db host" });
|
||||
}
|
||||
const resolvedIps = await dns.resolve4(host);
|
||||
inputHostIps.push(...resolvedIps);
|
||||
}
|
||||
|
||||
if (!isGateway) {
|
||||
const isInternalIp = inputHostIps.some((el) => isPrivateIp(el));
|
||||
if (isInternalIp) throw new BadRequestError({ message: "Invalid db host" });
|
||||
}
|
||||
|
||||
const isAppUsedIps = inputHostIps.some((el) => exclusiveIps.includes(el));
|
||||
if (isAppUsedIps) throw new BadRequestError({ message: "Invalid db host" });
|
||||
return inputHostIps;
|
||||
};
|
||||
|
@@ -13,6 +13,7 @@ import { customAlphabet } from "nanoid";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
import { DynamicSecretAwsElastiCacheSchema, TDynamicProviderFns } from "./models";
|
||||
|
||||
@@ -144,6 +145,14 @@ export const AwsElastiCacheDatabaseProvider = (): TDynamicProviderFns => {
|
||||
// We can't return the parsed statements here because we need to use the handlebars template to generate the username and password, before we can use the parsed statements.
|
||||
CreateElastiCacheUserSchema.parse(JSON.parse(providerInputs.creationStatement));
|
||||
DeleteElasticCacheUserSchema.parse(JSON.parse(providerInputs.revocationStatement));
|
||||
validateHandlebarTemplate("AWS ElastiCache creation", providerInputs.creationStatement, {
|
||||
allowedExpressions: (val) => ["username", "password", "expiration"].includes(val)
|
||||
});
|
||||
if (providerInputs.revocationStatement) {
|
||||
validateHandlebarTemplate("AWS ElastiCache revoke", providerInputs.revocationStatement, {
|
||||
allowedExpressions: (val) => ["username"].includes(val)
|
||||
});
|
||||
}
|
||||
|
||||
return providerInputs;
|
||||
};
|
||||
|
@@ -3,9 +3,10 @@ import handlebars from "handlebars";
|
||||
import { customAlphabet } from "nanoid";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
import { verifyHostInputValidity } from "../dynamic-secret-fns";
|
||||
import { DynamicSecretCassandraSchema, TDynamicProviderFns } from "./models";
|
||||
|
||||
const generatePassword = (size = 48) => {
|
||||
@@ -20,11 +21,20 @@ const generateUsername = () => {
|
||||
export const CassandraProvider = (): TDynamicProviderFns => {
|
||||
const validateProviderInputs = async (inputs: unknown) => {
|
||||
const providerInputs = await DynamicSecretCassandraSchema.parseAsync(inputs);
|
||||
if (providerInputs.host === "localhost" || providerInputs.host === "127.0.0.1") {
|
||||
throw new BadRequestError({ message: "Invalid db host" });
|
||||
const [hostIp] = await verifyHostInputValidity(providerInputs.host);
|
||||
validateHandlebarTemplate("Cassandra creation", providerInputs.creationStatement, {
|
||||
allowedExpressions: (val) => ["username", "password", "expiration", "keyspace"].includes(val)
|
||||
});
|
||||
if (providerInputs.renewStatement) {
|
||||
validateHandlebarTemplate("Cassandra renew", providerInputs.renewStatement, {
|
||||
allowedExpressions: (val) => ["username", "expiration", "keyspace"].includes(val)
|
||||
});
|
||||
}
|
||||
validateHandlebarTemplate("Cassandra revoke", providerInputs.revocationStatement, {
|
||||
allowedExpressions: (val) => ["username"].includes(val)
|
||||
});
|
||||
|
||||
return providerInputs;
|
||||
return { ...providerInputs, host: hostIp };
|
||||
};
|
||||
|
||||
const $getClient = async (providerInputs: z.infer<typeof DynamicSecretCassandraSchema>) => {
|
||||
|
@@ -19,9 +19,8 @@ const generateUsername = () => {
|
||||
export const ElasticSearchProvider = (): TDynamicProviderFns => {
|
||||
const validateProviderInputs = async (inputs: unknown) => {
|
||||
const providerInputs = await DynamicSecretElasticSearchSchema.parseAsync(inputs);
|
||||
verifyHostInputValidity(providerInputs.host);
|
||||
|
||||
return providerInputs;
|
||||
const [hostIp] = await verifyHostInputValidity(providerInputs.host);
|
||||
return { ...providerInputs, host: hostIp };
|
||||
};
|
||||
|
||||
const $getClient = async (providerInputs: z.infer<typeof DynamicSecretElasticSearchSchema>) => {
|
||||
|
@@ -19,8 +19,8 @@ const generateUsername = () => {
|
||||
export const MongoDBProvider = (): TDynamicProviderFns => {
|
||||
const validateProviderInputs = async (inputs: unknown) => {
|
||||
const providerInputs = await DynamicSecretMongoDBSchema.parseAsync(inputs);
|
||||
verifyHostInputValidity(providerInputs.host);
|
||||
return providerInputs;
|
||||
const [hostIp] = await verifyHostInputValidity(providerInputs.host);
|
||||
return { ...providerInputs, host: hostIp };
|
||||
};
|
||||
|
||||
const $getClient = async (providerInputs: z.infer<typeof DynamicSecretMongoDBSchema>) => {
|
||||
|
@@ -79,9 +79,8 @@ async function deleteRabbitMqUser({ axiosInstance, usernameToDelete }: TDeleteRa
|
||||
export const RabbitMqProvider = (): TDynamicProviderFns => {
|
||||
const validateProviderInputs = async (inputs: unknown) => {
|
||||
const providerInputs = await DynamicSecretRabbitMqSchema.parseAsync(inputs);
|
||||
verifyHostInputValidity(providerInputs.host);
|
||||
|
||||
return providerInputs;
|
||||
const [hostIp] = await verifyHostInputValidity(providerInputs.host);
|
||||
return { ...providerInputs, host: hostIp };
|
||||
};
|
||||
|
||||
const $getClient = async (providerInputs: z.infer<typeof DynamicSecretRabbitMqSchema>) => {
|
||||
|
@@ -5,6 +5,7 @@ import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
import { verifyHostInputValidity } from "../dynamic-secret-fns";
|
||||
import { DynamicSecretRedisDBSchema, TDynamicProviderFns } from "./models";
|
||||
@@ -51,8 +52,20 @@ const executeTransactions = async (connection: Redis, commands: string[]): Promi
|
||||
export const RedisDatabaseProvider = (): TDynamicProviderFns => {
|
||||
const validateProviderInputs = async (inputs: unknown) => {
|
||||
const providerInputs = await DynamicSecretRedisDBSchema.parseAsync(inputs);
|
||||
verifyHostInputValidity(providerInputs.host);
|
||||
return providerInputs;
|
||||
const [hostIp] = await verifyHostInputValidity(providerInputs.host);
|
||||
validateHandlebarTemplate("Redis creation", providerInputs.creationStatement, {
|
||||
allowedExpressions: (val) => ["username", "password", "expiration"].includes(val)
|
||||
});
|
||||
if (providerInputs.renewStatement) {
|
||||
validateHandlebarTemplate("Redis renew", providerInputs.renewStatement, {
|
||||
allowedExpressions: (val) => ["username", "expiration"].includes(val)
|
||||
});
|
||||
}
|
||||
validateHandlebarTemplate("Redis revoke", providerInputs.revocationStatement, {
|
||||
allowedExpressions: (val) => ["username"].includes(val)
|
||||
});
|
||||
|
||||
return { ...providerInputs, host: hostIp };
|
||||
};
|
||||
|
||||
const $getClient = async (providerInputs: z.infer<typeof DynamicSecretRedisDBSchema>) => {
|
||||
|
@@ -5,6 +5,7 @@ import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
import { verifyHostInputValidity } from "../dynamic-secret-fns";
|
||||
import { DynamicSecretSapAseSchema, TDynamicProviderFns } from "./models";
|
||||
@@ -27,8 +28,16 @@ export const SapAseProvider = (): TDynamicProviderFns => {
|
||||
const validateProviderInputs = async (inputs: unknown) => {
|
||||
const providerInputs = await DynamicSecretSapAseSchema.parseAsync(inputs);
|
||||
|
||||
verifyHostInputValidity(providerInputs.host);
|
||||
return providerInputs;
|
||||
const [hostIp] = await verifyHostInputValidity(providerInputs.host);
|
||||
validateHandlebarTemplate("SAP ASE creation", providerInputs.creationStatement, {
|
||||
allowedExpressions: (val) => ["username", "password"].includes(val)
|
||||
});
|
||||
if (providerInputs.revocationStatement) {
|
||||
validateHandlebarTemplate("SAP ASE revoke", providerInputs.revocationStatement, {
|
||||
allowedExpressions: (val) => ["username"].includes(val)
|
||||
});
|
||||
}
|
||||
return { ...providerInputs, host: hostIp };
|
||||
};
|
||||
|
||||
const $getClient = async (providerInputs: z.infer<typeof DynamicSecretSapAseSchema>, useMaster?: boolean) => {
|
||||
|
@@ -11,6 +11,7 @@ import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
import { verifyHostInputValidity } from "../dynamic-secret-fns";
|
||||
import { DynamicSecretSapHanaSchema, TDynamicProviderFns } from "./models";
|
||||
@@ -28,8 +29,19 @@ export const SapHanaProvider = (): TDynamicProviderFns => {
|
||||
const validateProviderInputs = async (inputs: unknown) => {
|
||||
const providerInputs = await DynamicSecretSapHanaSchema.parseAsync(inputs);
|
||||
|
||||
verifyHostInputValidity(providerInputs.host);
|
||||
return providerInputs;
|
||||
const [hostIp] = await verifyHostInputValidity(providerInputs.host);
|
||||
validateHandlebarTemplate("SAP Hana creation", providerInputs.creationStatement, {
|
||||
allowedExpressions: (val) => ["username", "password", "expiration"].includes(val)
|
||||
});
|
||||
if (providerInputs.renewStatement) {
|
||||
validateHandlebarTemplate("SAP Hana renew", providerInputs.renewStatement, {
|
||||
allowedExpressions: (val) => ["username", "expiration"].includes(val)
|
||||
});
|
||||
}
|
||||
validateHandlebarTemplate("SAP Hana revoke", providerInputs.revocationStatement, {
|
||||
allowedExpressions: (val) => ["username"].includes(val)
|
||||
});
|
||||
return { ...providerInputs, host: hostIp };
|
||||
};
|
||||
|
||||
const $getClient = async (providerInputs: z.infer<typeof DynamicSecretSapHanaSchema>) => {
|
||||
|
@@ -5,6 +5,7 @@ import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
import { DynamicSecretSnowflakeSchema, TDynamicProviderFns } from "./models";
|
||||
|
||||
@@ -31,6 +32,18 @@ const getDaysToExpiry = (expiryDate: Date) => {
|
||||
export const SnowflakeProvider = (): TDynamicProviderFns => {
|
||||
const validateProviderInputs = async (inputs: unknown) => {
|
||||
const providerInputs = await DynamicSecretSnowflakeSchema.parseAsync(inputs);
|
||||
validateHandlebarTemplate("Snowflake creation", providerInputs.creationStatement, {
|
||||
allowedExpressions: (val) => ["username", "password", "expiration"].includes(val)
|
||||
});
|
||||
if (providerInputs.renewStatement) {
|
||||
validateHandlebarTemplate("Snowflake renew", providerInputs.renewStatement, {
|
||||
allowedExpressions: (val) => ["username", "expiration"].includes(val)
|
||||
});
|
||||
}
|
||||
validateHandlebarTemplate("Snowflake revoke", providerInputs.revocationStatement, {
|
||||
allowedExpressions: (val) => ["username"].includes(val)
|
||||
});
|
||||
|
||||
return providerInputs;
|
||||
};
|
||||
|
||||
|
@@ -5,6 +5,7 @@ import { z } from "zod";
|
||||
|
||||
import { withGatewayProxy } from "@app/lib/gateway";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
import { TGatewayServiceFactory } from "../../gateway/gateway-service";
|
||||
import { verifyHostInputValidity } from "../dynamic-secret-fns";
|
||||
@@ -117,8 +118,21 @@ type TSqlDatabaseProviderDTO = {
|
||||
export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO): TDynamicProviderFns => {
|
||||
const validateProviderInputs = async (inputs: unknown) => {
|
||||
const providerInputs = await DynamicSecretSqlDBSchema.parseAsync(inputs);
|
||||
verifyHostInputValidity(providerInputs.host, Boolean(providerInputs.projectGatewayId));
|
||||
return providerInputs;
|
||||
|
||||
const [hostIp] = await verifyHostInputValidity(providerInputs.host, Boolean(providerInputs.projectGatewayId));
|
||||
validateHandlebarTemplate("SQL creation", providerInputs.creationStatement, {
|
||||
allowedExpressions: (val) => ["username", "password", "expiration", "database"].includes(val)
|
||||
});
|
||||
if (providerInputs.renewStatement) {
|
||||
validateHandlebarTemplate("SQL renew", providerInputs.renewStatement, {
|
||||
allowedExpressions: (val) => ["username", "expiration", "database"].includes(val)
|
||||
});
|
||||
}
|
||||
validateHandlebarTemplate("SQL revoke", providerInputs.revocationStatement, {
|
||||
allowedExpressions: (val) => ["username", "database"].includes(val)
|
||||
});
|
||||
|
||||
return { ...providerInputs, host: hostIp };
|
||||
};
|
||||
|
||||
const $getClient = async (providerInputs: z.infer<typeof DynamicSecretSqlDBSchema>) => {
|
||||
@@ -144,7 +158,8 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO)
|
||||
}
|
||||
: undefined
|
||||
},
|
||||
acquireConnectionTimeout: EXTERNAL_REQUEST_TIMEOUT
|
||||
acquireConnectionTimeout: EXTERNAL_REQUEST_TIMEOUT,
|
||||
pool: { min: 0, max: 7 }
|
||||
});
|
||||
return db;
|
||||
};
|
||||
|
@@ -5,6 +5,7 @@ import { ActionProjectType, TableName } from "@app/db/schemas";
|
||||
import { validatePermissionBoundary } from "@app/lib/casl/boundary";
|
||||
import { BadRequestError, ForbiddenRequestError, NotFoundError } from "@app/lib/errors";
|
||||
import { ms } from "@app/lib/ms";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
import { unpackPermissions } from "@app/server/routes/sanitizedSchema/permission";
|
||||
import { ActorType } from "@app/services/auth/auth-type";
|
||||
import { TIdentityProjectDALFactory } from "@app/services/identity-project/identity-project-dal";
|
||||
@@ -86,6 +87,9 @@ export const identityProjectAdditionalPrivilegeV2ServiceFactory = ({
|
||||
message: "Failed to update more privileged identity",
|
||||
details: { missingPermissions: permissionBoundary.missingPermissions }
|
||||
});
|
||||
validateHandlebarTemplate("Identity Additional Privilege Create", JSON.stringify(customPermission || []), {
|
||||
allowedExpressions: (val) => val.includes("identity.")
|
||||
});
|
||||
|
||||
const existingSlug = await identityProjectAdditionalPrivilegeDAL.findOne({
|
||||
slug,
|
||||
@@ -173,6 +177,10 @@ export const identityProjectAdditionalPrivilegeV2ServiceFactory = ({
|
||||
details: { missingPermissions: permissionBoundary.missingPermissions }
|
||||
});
|
||||
|
||||
validateHandlebarTemplate("Identity Additional Privilege Update", JSON.stringify(data.permissions || []), {
|
||||
allowedExpressions: (val) => val.includes("identity.")
|
||||
});
|
||||
|
||||
if (data?.slug) {
|
||||
const existingSlug = await identityProjectAdditionalPrivilegeDAL.findOne({
|
||||
slug: data.slug,
|
||||
|
@@ -5,6 +5,7 @@ import { ActionProjectType } from "@app/db/schemas";
|
||||
import { validatePermissionBoundary } from "@app/lib/casl/boundary";
|
||||
import { BadRequestError, ForbiddenRequestError, NotFoundError } from "@app/lib/errors";
|
||||
import { ms } from "@app/lib/ms";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
import { UnpackedPermissionSchema } from "@app/server/routes/sanitizedSchema/permission";
|
||||
import { ActorType } from "@app/services/auth/auth-type";
|
||||
import { TIdentityProjectDALFactory } from "@app/services/identity-project/identity-project-dal";
|
||||
@@ -102,6 +103,10 @@ export const identityProjectAdditionalPrivilegeServiceFactory = ({
|
||||
});
|
||||
if (existingSlug) throw new BadRequestError({ message: "Additional privilege of provided slug exist" });
|
||||
|
||||
validateHandlebarTemplate("Identity Additional Privilege Create", JSON.stringify(customPermission || []), {
|
||||
allowedExpressions: (val) => val.includes("identity.")
|
||||
});
|
||||
|
||||
const packedPermission = JSON.stringify(packRules(customPermission));
|
||||
if (!dto.isTemporary) {
|
||||
const additionalPrivilege = await identityProjectAdditionalPrivilegeDAL.create({
|
||||
@@ -203,6 +208,9 @@ export const identityProjectAdditionalPrivilegeServiceFactory = ({
|
||||
}
|
||||
|
||||
const isTemporary = typeof data?.isTemporary !== "undefined" ? data.isTemporary : identityPrivilege.isTemporary;
|
||||
validateHandlebarTemplate("Identity Additional Privilege Update", JSON.stringify(data.permissions || []), {
|
||||
allowedExpressions: (val) => val.includes("identity.")
|
||||
});
|
||||
|
||||
const packedPermission = data.permissions ? JSON.stringify(packRules(data.permissions)) : undefined;
|
||||
if (isTemporary) {
|
||||
|
@@ -50,7 +50,7 @@ export type TLicenseServiceFactory = ReturnType<typeof licenseServiceFactory>;
|
||||
const LICENSE_SERVER_CLOUD_LOGIN = "/api/auth/v1/license-server-login";
|
||||
const LICENSE_SERVER_ON_PREM_LOGIN = "/api/auth/v1/license-login";
|
||||
|
||||
const LICENSE_SERVER_CLOUD_PLAN_TTL = 30; // 30 second
|
||||
const LICENSE_SERVER_CLOUD_PLAN_TTL = 5 * 60; // 5 mins
|
||||
const FEATURE_CACHE_KEY = (orgId: string) => `infisical-cloud-plan-${orgId}`;
|
||||
|
||||
export const licenseServiceFactory = ({
|
||||
@@ -142,7 +142,10 @@ export const licenseServiceFactory = ({
|
||||
try {
|
||||
if (instanceType === InstanceType.Cloud) {
|
||||
const cachedPlan = await keyStore.getItem(FEATURE_CACHE_KEY(orgId));
|
||||
if (cachedPlan) return JSON.parse(cachedPlan) as TFeatureSet;
|
||||
if (cachedPlan) {
|
||||
logger.info(`getPlan: plan fetched from cache [orgId=${orgId}] [projectId=${projectId}]`);
|
||||
return JSON.parse(cachedPlan) as TFeatureSet;
|
||||
}
|
||||
|
||||
const org = await orgDAL.findOrgById(orgId);
|
||||
if (!org) throw new NotFoundError({ message: `Organization with ID '${orgId}' not found` });
|
||||
@@ -170,6 +173,8 @@ export const licenseServiceFactory = ({
|
||||
JSON.stringify(onPremFeatures)
|
||||
);
|
||||
return onPremFeatures;
|
||||
} finally {
|
||||
logger.info(`getPlan: Process done for [orgId=${orgId}] [projectId=${projectId}]`);
|
||||
}
|
||||
return onPremFeatures;
|
||||
};
|
||||
|
@@ -131,12 +131,12 @@ function validateOrgSSO(actorAuthMethod: ActorAuthMethod, isOrgSsoEnforced: TOrg
|
||||
}
|
||||
}
|
||||
|
||||
const escapeHandlebarsMissingMetadata = (obj: Record<string, string>) => {
|
||||
const escapeHandlebarsMissingDict = (obj: Record<string, string>, key: string) => {
|
||||
const handler = {
|
||||
get(target: Record<string, string>, prop: string) {
|
||||
if (!(prop in target)) {
|
||||
if (!Object.hasOwn(target, prop)) {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
target[prop] = `{{identity.metadata.${prop}}}`; // Add missing key as an "own" property
|
||||
target[prop] = `{{${key}.${prop}}}`; // Add missing key as an "own" property
|
||||
}
|
||||
return target[prop];
|
||||
}
|
||||
@@ -145,4 +145,4 @@ const escapeHandlebarsMissingMetadata = (obj: Record<string, string>) => {
|
||||
return new Proxy(obj, handler);
|
||||
};
|
||||
|
||||
export { escapeHandlebarsMissingMetadata, isAuthMethodSaml, validateOrgSSO };
|
||||
export { escapeHandlebarsMissingDict, isAuthMethodSaml, validateOrgSSO };
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import { createMongoAbility, MongoAbility, RawRuleOf } from "@casl/ability";
|
||||
import { PackRule, unpackRules } from "@casl/ability/extra";
|
||||
import { requestContext } from "@fastify/request-context";
|
||||
import { MongoQuery } from "@ucast/mongo2js";
|
||||
import handlebars from "handlebars";
|
||||
|
||||
@@ -22,7 +23,7 @@ import { TServiceTokenDALFactory } from "@app/services/service-token/service-tok
|
||||
|
||||
import { orgAdminPermissions, orgMemberPermissions, orgNoAccessPermissions, OrgPermissionSet } from "./org-permission";
|
||||
import { TPermissionDALFactory } from "./permission-dal";
|
||||
import { escapeHandlebarsMissingMetadata, validateOrgSSO } from "./permission-fns";
|
||||
import { escapeHandlebarsMissingDict, validateOrgSSO } from "./permission-fns";
|
||||
import {
|
||||
TBuildOrgPermissionDTO,
|
||||
TBuildProjectPermissionDTO,
|
||||
@@ -243,13 +244,13 @@ export const permissionServiceFactory = ({
|
||||
|
||||
const rules = buildProjectPermissionRules(rolePermissions.concat(additionalPrivileges));
|
||||
const templatedRules = handlebars.compile(JSON.stringify(rules), { data: false });
|
||||
const metadataKeyValuePair = escapeHandlebarsMissingMetadata(
|
||||
objectify(
|
||||
userProjectPermission.metadata,
|
||||
(i) => i.key,
|
||||
(i) => i.value
|
||||
)
|
||||
const unescapedMetadata = objectify(
|
||||
userProjectPermission.metadata,
|
||||
(i) => i.key,
|
||||
(i) => i.value
|
||||
);
|
||||
const metadataKeyValuePair = escapeHandlebarsMissingDict(unescapedMetadata, "identity.metadata");
|
||||
requestContext.set("identityPermissionMetadata", { metadata: unescapedMetadata });
|
||||
const interpolateRules = templatedRules(
|
||||
{
|
||||
identity: {
|
||||
@@ -317,20 +318,26 @@ export const permissionServiceFactory = ({
|
||||
|
||||
const rules = buildProjectPermissionRules(rolePermissions.concat(additionalPrivileges));
|
||||
const templatedRules = handlebars.compile(JSON.stringify(rules), { data: false });
|
||||
const metadataKeyValuePair = escapeHandlebarsMissingMetadata(
|
||||
objectify(
|
||||
identityProjectPermission.metadata,
|
||||
(i) => i.key,
|
||||
(i) => i.value
|
||||
)
|
||||
const unescapedIdentityAuthInfo = requestContext.get("identityAuthInfo");
|
||||
const unescapedMetadata = objectify(
|
||||
identityProjectPermission.metadata,
|
||||
(i) => i.key,
|
||||
(i) => i.value
|
||||
);
|
||||
const identityAuthInfo =
|
||||
unescapedIdentityAuthInfo?.identityId === identityId && unescapedIdentityAuthInfo
|
||||
? escapeHandlebarsMissingDict(unescapedIdentityAuthInfo as never, "identity.auth")
|
||||
: {};
|
||||
const metadataKeyValuePair = escapeHandlebarsMissingDict(unescapedMetadata, "identity.metadata");
|
||||
|
||||
requestContext.set("identityPermissionMetadata", { metadata: unescapedMetadata, auth: unescapedIdentityAuthInfo });
|
||||
const interpolateRules = templatedRules(
|
||||
{
|
||||
identity: {
|
||||
id: identityProjectPermission.identityId,
|
||||
username: identityProjectPermission.username,
|
||||
metadata: metadataKeyValuePair
|
||||
metadata: metadataKeyValuePair,
|
||||
auth: identityAuthInfo
|
||||
}
|
||||
},
|
||||
{ data: false }
|
||||
@@ -424,12 +431,13 @@ export const permissionServiceFactory = ({
|
||||
|
||||
const rules = buildProjectPermissionRules(rolePermissions.concat(additionalPrivileges));
|
||||
const templatedRules = handlebars.compile(JSON.stringify(rules), { data: false });
|
||||
const metadataKeyValuePair = escapeHandlebarsMissingMetadata(
|
||||
const metadataKeyValuePair = escapeHandlebarsMissingDict(
|
||||
objectify(
|
||||
userProjectPermission.metadata,
|
||||
(i) => i.key,
|
||||
(i) => i.value
|
||||
)
|
||||
),
|
||||
"identity.metadata"
|
||||
);
|
||||
const interpolateRules = templatedRules(
|
||||
{
|
||||
@@ -469,14 +477,14 @@ export const permissionServiceFactory = ({
|
||||
|
||||
const rules = buildProjectPermissionRules(rolePermissions.concat(additionalPrivileges));
|
||||
const templatedRules = handlebars.compile(JSON.stringify(rules), { data: false });
|
||||
const metadataKeyValuePair = escapeHandlebarsMissingMetadata(
|
||||
const metadataKeyValuePair = escapeHandlebarsMissingDict(
|
||||
objectify(
|
||||
identityProjectPermission.metadata,
|
||||
(i) => i.key,
|
||||
(i) => i.value
|
||||
)
|
||||
),
|
||||
"identity.metadata"
|
||||
);
|
||||
|
||||
const interpolateRules = templatedRules(
|
||||
{
|
||||
identity: {
|
||||
|
@@ -5,6 +5,7 @@ import { ActionProjectType, TableName } from "@app/db/schemas";
|
||||
import { validatePermissionBoundary } from "@app/lib/casl/boundary";
|
||||
import { BadRequestError, ForbiddenRequestError, NotFoundError } from "@app/lib/errors";
|
||||
import { ms } from "@app/lib/ms";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
import { UnpackedPermissionSchema } from "@app/server/routes/sanitizedSchema/permission";
|
||||
import { ActorType } from "@app/services/auth/auth-type";
|
||||
import { TProjectMembershipDALFactory } from "@app/services/project-membership/project-membership-dal";
|
||||
@@ -92,6 +93,10 @@ export const projectUserAdditionalPrivilegeServiceFactory = ({
|
||||
if (existingSlug)
|
||||
throw new BadRequestError({ message: `Additional privilege with provided slug ${slug} already exists` });
|
||||
|
||||
validateHandlebarTemplate("User Additional Privilege Create", JSON.stringify(customPermission || []), {
|
||||
allowedExpressions: (val) => val.includes("identity.")
|
||||
});
|
||||
|
||||
const packedPermission = JSON.stringify(packRules(customPermission));
|
||||
if (!dto.isTemporary) {
|
||||
const additionalPrivilege = await projectUserAdditionalPrivilegeDAL.create({
|
||||
@@ -185,6 +190,10 @@ export const projectUserAdditionalPrivilegeServiceFactory = ({
|
||||
throw new BadRequestError({ message: `Additional privilege with provided slug ${dto.slug} already exists` });
|
||||
}
|
||||
|
||||
validateHandlebarTemplate("User Additional Privilege Update", JSON.stringify(dto.permissions || []), {
|
||||
allowedExpressions: (val) => val.includes("identity.")
|
||||
});
|
||||
|
||||
const isTemporary = typeof dto?.isTemporary !== "undefined" ? dto.isTemporary : userPrivilege.isTemporary;
|
||||
|
||||
const packedPermission = dto.permissions && JSON.stringify(packRules(dto.permissions));
|
||||
|
@@ -8,10 +8,9 @@ import axios from "axios";
|
||||
import jmespath from "jmespath";
|
||||
import knex from "knex";
|
||||
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { getDbConnectionHost } from "@app/lib/knex";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
|
||||
import { verifyHostInputValidity } from "../../dynamic-secret/dynamic-secret-fns";
|
||||
import { TAssignOp, TDbProviderClients, TDirectAssignOp, THttpProviderFunction } from "../templates/types";
|
||||
import { TSecretRotationData, TSecretRotationDbFn } from "./secret-rotation-queue-types";
|
||||
|
||||
@@ -88,32 +87,14 @@ export const secretRotationDbFn = async ({
|
||||
variables,
|
||||
options
|
||||
}: TSecretRotationDbFn) => {
|
||||
const appCfg = getConfig();
|
||||
|
||||
const ssl = ca ? { rejectUnauthorized: false, ca } : undefined;
|
||||
const isCloud = Boolean(appCfg.LICENSE_SERVER_KEY); // quick and dirty way to check if its cloud or not
|
||||
const dbHost = appCfg.DB_HOST || getDbConnectionHost(appCfg.DB_CONNECTION_URI);
|
||||
|
||||
if (
|
||||
isCloud &&
|
||||
// internal ips
|
||||
(host === "host.docker.internal" || host.match(/^10\.\d+\.\d+\.\d+/) || host.match(/^192\.168\.\d+\.\d+/))
|
||||
)
|
||||
throw new Error("Invalid db host");
|
||||
if (
|
||||
host === "localhost" ||
|
||||
host === "127.0.0.1" ||
|
||||
// database infisical uses
|
||||
dbHost === host
|
||||
)
|
||||
throw new Error("Invalid db host");
|
||||
|
||||
const [hostIp] = await verifyHostInputValidity(host);
|
||||
const db = knex({
|
||||
client,
|
||||
connection: {
|
||||
database,
|
||||
port,
|
||||
host,
|
||||
host: hostIp,
|
||||
user: username,
|
||||
password,
|
||||
connectionTimeoutMillis: EXTERNAL_REQUEST_TIMEOUT,
|
||||
|
@@ -244,7 +244,7 @@ export const KUBERNETES_AUTH = {
|
||||
kubernetesHost: "The host string, host:port pair, or URL to the base of the Kubernetes API server.",
|
||||
caCert: "The PEM-encoded CA cert for the Kubernetes API server.",
|
||||
tokenReviewerJwt:
|
||||
"The long-lived service account JWT token for Infisical to access the TokenReview API to validate other service account JWT tokens submitted by applications/pods.",
|
||||
"Optional JWT token for accessing Kubernetes TokenReview API. If provided, this long-lived token will be used to validate service account tokens during authentication. If omitted, the client's own JWT will be used instead, which requires the client to have the system:auth-delegator ClusterRole binding.",
|
||||
allowedNamespaces:
|
||||
"The comma-separated list of trusted namespaces that service accounts must belong to authenticate with Infisical.",
|
||||
allowedNames: "The comma-separated list of trusted service account names that can authenticate with Infisical.",
|
||||
@@ -260,7 +260,7 @@ export const KUBERNETES_AUTH = {
|
||||
kubernetesHost: "The new host string, host:port pair, or URL to the base of the Kubernetes API server.",
|
||||
caCert: "The new PEM-encoded CA cert for the Kubernetes API server.",
|
||||
tokenReviewerJwt:
|
||||
"The new long-lived service account JWT token for Infisical to access the TokenReview API to validate other service account JWT tokens submitted by applications/pods.",
|
||||
"Optional JWT token for accessing Kubernetes TokenReview API. If provided, this long-lived token will be used to validate service account tokens during authentication. If omitted, the client's own JWT will be used instead, which requires the client to have the system:auth-delegator ClusterRole binding.",
|
||||
allowedNamespaces:
|
||||
"The new comma-separated list of trusted namespaces that service accounts must belong to authenticate with Infisical.",
|
||||
allowedNames: "The new comma-separated list of trusted service account names that can authenticate with Infisical.",
|
||||
@@ -329,6 +329,7 @@ export const OIDC_AUTH = {
|
||||
boundIssuer: "The unique identifier of the identity provider issuing the JWT.",
|
||||
boundAudiences: "The list of intended recipients.",
|
||||
boundClaims: "The attributes that should be present in the JWT for it to be valid.",
|
||||
claimMetadataMapping: "The attributes that should be present in the permission metadata from the JWT.",
|
||||
boundSubject: "The expected principal that is the subject of the JWT.",
|
||||
accessTokenTrustedIps: "The IPs or CIDR ranges that access tokens can be used from.",
|
||||
accessTokenTTL: "The lifetime for an access token in seconds.",
|
||||
@@ -342,6 +343,7 @@ export const OIDC_AUTH = {
|
||||
boundIssuer: "The new unique identifier of the identity provider issuing the JWT.",
|
||||
boundAudiences: "The new list of intended recipients.",
|
||||
boundClaims: "The new attributes that should be present in the JWT for it to be valid.",
|
||||
claimMetadataMapping: "The new attributes that should be present in the permission metadata from the JWT.",
|
||||
boundSubject: "The new expected principal that is the subject of the JWT.",
|
||||
accessTokenTrustedIps: "The new IPs or CIDR ranges that access tokens can be used from.",
|
||||
accessTokenTTL: "The new lifetime for an access token in seconds.",
|
||||
@@ -629,7 +631,8 @@ export const FOLDERS = {
|
||||
workspaceId: "The ID of the project to list folders from.",
|
||||
environment: "The slug of the environment to list folders from.",
|
||||
path: "The path to list folders from.",
|
||||
directory: "The directory to list folders from. (Deprecated in favor of path)"
|
||||
directory: "The directory to list folders from. (Deprecated in favor of path)",
|
||||
recursive: "Whether or not to fetch all folders from the specified base path, and all of its subdirectories."
|
||||
},
|
||||
GET_BY_ID: {
|
||||
folderId: "The ID of the folder to get details."
|
||||
@@ -813,7 +816,8 @@ export const DASHBOARD = {
|
||||
search: "The text string to filter secret keys and folder names by.",
|
||||
includeSecrets: "Whether to include project secrets in the response.",
|
||||
includeFolders: "Whether to include project folders in the response.",
|
||||
includeDynamicSecrets: "Whether to include dynamic project secrets in the response."
|
||||
includeDynamicSecrets: "Whether to include dynamic project secrets in the response.",
|
||||
includeImports: "Whether to include project secret imports in the response."
|
||||
},
|
||||
SECRET_DETAILS_LIST: {
|
||||
projectId: "The ID of the project to list secrets/folders from.",
|
||||
@@ -1725,7 +1729,8 @@ export const SecretSyncs = {
|
||||
SYNC_OPTIONS: (destination: SecretSync) => {
|
||||
const destinationName = SECRET_SYNC_NAME_MAP[destination];
|
||||
return {
|
||||
initialSyncBehavior: `Specify how Infisical should resolve the initial sync to the ${destinationName} destination.`
|
||||
initialSyncBehavior: `Specify how Infisical should resolve the initial sync to the ${destinationName} destination.`,
|
||||
disableSecretDeletion: `Enable this flag to prevent removal of secrets from the ${destinationName} destination when syncing.`
|
||||
};
|
||||
},
|
||||
ADDITIONAL_SYNC_OPTIONS: {
|
||||
|
@@ -56,6 +56,7 @@ const envSchema = z
|
||||
// TODO(akhilmhdh): will be changed to one
|
||||
ENCRYPTION_KEY: zpStr(z.string().optional()),
|
||||
ROOT_ENCRYPTION_KEY: zpStr(z.string().optional()),
|
||||
QUEUE_WORKERS_ENABLED: zodStrBool.default("true"),
|
||||
HTTPS_ENABLED: zodStrBool,
|
||||
// smtp options
|
||||
SMTP_HOST: zpStr(z.string().optional()),
|
||||
|
@@ -93,6 +93,7 @@ export const pingGatewayAndVerify = async ({
|
||||
let lastError: Error | null = null;
|
||||
const quicClient = await createQuicConnection(relayHost, relayPort, tlsOptions, identityId, orgId).catch((err) => {
|
||||
throw new BadRequestError({
|
||||
message: (err as Error)?.message,
|
||||
error: err as Error
|
||||
});
|
||||
});
|
||||
|
61
backend/src/lib/ip/ipRange.ts
Normal file
@@ -0,0 +1,61 @@
|
||||
import { BlockList } from "node:net";
|
||||
|
||||
import { BadRequestError } from "../errors";
|
||||
// Define BlockList instances for each range type
|
||||
const ipv4RangeLists: Record<string, BlockList> = {
|
||||
unspecified: new BlockList(),
|
||||
broadcast: new BlockList(),
|
||||
multicast: new BlockList(),
|
||||
linkLocal: new BlockList(),
|
||||
loopback: new BlockList(),
|
||||
carrierGradeNat: new BlockList(),
|
||||
private: new BlockList(),
|
||||
reserved: new BlockList()
|
||||
};
|
||||
|
||||
// Add IPv4 CIDR ranges to each BlockList
|
||||
ipv4RangeLists.unspecified.addSubnet("0.0.0.0", 8);
|
||||
ipv4RangeLists.broadcast.addAddress("255.255.255.255");
|
||||
ipv4RangeLists.multicast.addSubnet("224.0.0.0", 4);
|
||||
ipv4RangeLists.linkLocal.addSubnet("169.254.0.0", 16);
|
||||
ipv4RangeLists.loopback.addSubnet("127.0.0.0", 8);
|
||||
ipv4RangeLists.carrierGradeNat.addSubnet("100.64.0.0", 10);
|
||||
|
||||
// IPv4 Private ranges
|
||||
ipv4RangeLists.private.addSubnet("10.0.0.0", 8);
|
||||
ipv4RangeLists.private.addSubnet("172.16.0.0", 12);
|
||||
ipv4RangeLists.private.addSubnet("192.168.0.0", 16);
|
||||
|
||||
// IPv4 Reserved ranges
|
||||
ipv4RangeLists.reserved.addSubnet("192.0.0.0", 24);
|
||||
ipv4RangeLists.reserved.addSubnet("192.0.2.0", 24);
|
||||
ipv4RangeLists.reserved.addSubnet("192.88.99.0", 24);
|
||||
ipv4RangeLists.reserved.addSubnet("198.18.0.0", 15);
|
||||
ipv4RangeLists.reserved.addSubnet("198.51.100.0", 24);
|
||||
ipv4RangeLists.reserved.addSubnet("203.0.113.0", 24);
|
||||
ipv4RangeLists.reserved.addSubnet("240.0.0.0", 4);
|
||||
|
||||
/**
|
||||
* Checks if an IP address (IPv4) is private or public
|
||||
* inspired by: https://github.com/whitequark/ipaddr.js/blob/main/lib/ipaddr.js
|
||||
*/
|
||||
export const getIpRange = (ip: string): string => {
|
||||
try {
|
||||
const rangeLists = ipv4RangeLists;
|
||||
// Check each range type
|
||||
for (const rangeName in rangeLists) {
|
||||
if (Object.hasOwn(rangeLists, rangeName)) {
|
||||
if (rangeLists[rangeName].check(ip)) {
|
||||
return rangeName;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no range matched, it's a public address
|
||||
return "unicast";
|
||||
} catch (error) {
|
||||
throw new BadRequestError({ message: "Invalid IP address", error });
|
||||
}
|
||||
};
|
||||
|
||||
export const isPrivateIp = (ip: string) => getIpRange(ip) !== "unicast";
|
34
backend/src/lib/template/dot-access.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
/**
|
||||
* Safely retrieves a value from a nested object using dot notation path
|
||||
*/
|
||||
export const getStringValueByDot = (
|
||||
obj: Record<string, unknown> | null | undefined,
|
||||
path: string,
|
||||
defaultValue?: string
|
||||
): string | undefined => {
|
||||
// Handle null or undefined input
|
||||
if (!obj) {
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
const parts = path.split(".");
|
||||
let current: unknown = obj;
|
||||
|
||||
for (const part of parts) {
|
||||
const isObject = typeof current === "object" && !Array.isArray(current) && current !== null;
|
||||
if (!isObject) {
|
||||
return defaultValue;
|
||||
}
|
||||
if (!Object.hasOwn(current as object, part)) {
|
||||
// Check if the property exists as an own property
|
||||
return defaultValue;
|
||||
}
|
||||
current = (current as Record<string, unknown>)[part];
|
||||
}
|
||||
|
||||
if (typeof current !== "string") {
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
return current;
|
||||
};
|
21
backend/src/lib/template/validate-handlebars.ts
Normal file
@@ -0,0 +1,21 @@
|
||||
import handlebars from "handlebars";
|
||||
|
||||
import { BadRequestError } from "../errors";
|
||||
import { logger } from "../logger";
|
||||
|
||||
type SanitizationArg = {
|
||||
allowedExpressions?: (arg: string) => boolean;
|
||||
};
|
||||
|
||||
export const validateHandlebarTemplate = (templateName: string, template: string, dto: SanitizationArg) => {
|
||||
const parsedAst = handlebars.parse(template);
|
||||
parsedAst.body.forEach((el) => {
|
||||
if (el.type === "ContentStatement") return;
|
||||
if (el.type === "MustacheStatement" && "path" in el) {
|
||||
const { path } = el as { type: "MustacheStatement"; path: { type: "PathExpression"; original: string } };
|
||||
if (path.type === "PathExpression" && dto?.allowedExpressions?.(path.original)) return;
|
||||
}
|
||||
logger.error(el, "Template sanitization failed");
|
||||
throw new BadRequestError({ message: `Template sanitization failed: ${templateName}` });
|
||||
});
|
||||
};
|
@@ -272,10 +272,13 @@ export const queueServiceFactory = (
|
||||
connection
|
||||
});
|
||||
|
||||
workerContainer[name] = new Worker<TQueueJobTypes[T]["payload"], void, TQueueJobTypes[T]["name"]>(name, jobFn, {
|
||||
...queueSettings,
|
||||
connection
|
||||
});
|
||||
const appCfg = getConfig();
|
||||
if (appCfg.QUEUE_WORKERS_ENABLED) {
|
||||
workerContainer[name] = new Worker<TQueueJobTypes[T]["payload"], void, TQueueJobTypes[T]["name"]>(name, jobFn, {
|
||||
...queueSettings,
|
||||
connection
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const startPg = async <T extends QueueName>(
|
||||
@@ -307,6 +310,11 @@ export const queueServiceFactory = (
|
||||
event: U,
|
||||
listener: WorkerListener<TQueueJobTypes[T]["payload"], void, TQueueJobTypes[T]["name"]>[U]
|
||||
) => {
|
||||
const appCfg = getConfig();
|
||||
if (!appCfg.QUEUE_WORKERS_ENABLED) {
|
||||
return;
|
||||
}
|
||||
|
||||
const worker = workerContainer[name];
|
||||
worker.on(event, listener);
|
||||
};
|
||||
|
@@ -1,3 +1,4 @@
|
||||
import { requestContext } from "@fastify/request-context";
|
||||
import { FastifyRequest } from "fastify";
|
||||
import fp from "fastify-plugin";
|
||||
import jwt, { JwtPayload } from "jsonwebtoken";
|
||||
@@ -141,6 +142,12 @@ export const injectIdentity = fp(async (server: FastifyZodProvider) => {
|
||||
authMethod: null,
|
||||
isInstanceAdmin: serverCfg?.adminIdentityIds?.includes(identity.identityId)
|
||||
};
|
||||
if (token?.identityAuth?.oidc) {
|
||||
requestContext.set("identityAuthInfo", {
|
||||
identityId: identity.identityId,
|
||||
oidc: token?.identityAuth?.oidc
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
case AuthMode.SERVICE_TOKEN: {
|
||||
|
@@ -65,7 +65,7 @@ export const registerSecretScannerGhApp = async (server: FastifyZodProvider) =>
|
||||
payload: JSON.stringify(req.body),
|
||||
signature: signatureSHA256
|
||||
});
|
||||
void res.send("ok");
|
||||
return res.send("ok");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@@ -34,7 +34,7 @@ export const registerServeUI = async (
|
||||
TELEMETRY_CAPTURING_ENABLED: appCfg.TELEMETRY_ENABLED
|
||||
};
|
||||
const js = `window.__INFISICAL_RUNTIME_ENV__ = Object.freeze(${JSON.stringify(config)});`;
|
||||
void res.send(js);
|
||||
return res.send(js);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -57,7 +57,7 @@ export const registerServeUI = async (
|
||||
reply.callNotFound();
|
||||
return;
|
||||
}
|
||||
void reply.sendFile("index.html");
|
||||
return reply.sendFile("index.html");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import { CronJob } from "cron";
|
||||
import { Knex } from "knex";
|
||||
import { monitorEventLoopDelay } from "perf_hooks";
|
||||
import { z } from "zod";
|
||||
|
||||
import { registerCertificateEstRouter } from "@app/ee/routes/est/certificate-est-router";
|
||||
@@ -96,6 +97,7 @@ import { trustedIpDALFactory } from "@app/ee/services/trusted-ip/trusted-ip-dal"
|
||||
import { trustedIpServiceFactory } from "@app/ee/services/trusted-ip/trusted-ip-service";
|
||||
import { TKeyStoreFactory } from "@app/keystore/keystore";
|
||||
import { getConfig, TEnvConfig } from "@app/lib/config/env";
|
||||
import { logger } from "@app/lib/logger";
|
||||
import { TQueueServiceFactory } from "@app/queue";
|
||||
import { readLimit } from "@app/server/config/rateLimiter";
|
||||
import { accessTokenQueueServiceFactory } from "@app/services/access-token-queue/access-token-queue";
|
||||
@@ -246,6 +248,9 @@ import { registerV1Routes } from "./v1";
|
||||
import { registerV2Routes } from "./v2";
|
||||
import { registerV3Routes } from "./v3";
|
||||
|
||||
const histogram = monitorEventLoopDelay({ resolution: 20 });
|
||||
histogram.enable();
|
||||
|
||||
export const registerRoutes = async (
|
||||
server: FastifyZodProvider,
|
||||
{
|
||||
@@ -1630,6 +1635,18 @@ export const registerRoutes = async (
|
||||
const cfg = getConfig();
|
||||
const serverCfg = await getServerCfg();
|
||||
|
||||
const meanLagMs = histogram.mean / 1e6;
|
||||
const maxLagMs = histogram.max / 1e6;
|
||||
const p99LagMs = histogram.percentile(99) / 1e6;
|
||||
|
||||
logger.info(
|
||||
`Event loop stats - Mean: ${meanLagMs.toFixed(2)}ms, Max: ${maxLagMs.toFixed(2)}ms, p99: ${p99LagMs.toFixed(
|
||||
2
|
||||
)}ms`
|
||||
);
|
||||
|
||||
logger.info(`Raw event loop stats: ${JSON.stringify(histogram, null, 2)}`);
|
||||
|
||||
// try {
|
||||
// await db.raw("SELECT NOW()");
|
||||
// } catch (err) {
|
||||
|
@@ -70,6 +70,19 @@ export const DefaultResponseErrorsSchema = {
|
||||
})
|
||||
};
|
||||
|
||||
export const booleanSchema = z
|
||||
.union([z.boolean(), z.string().trim()])
|
||||
.transform((value) => {
|
||||
if (typeof value === "string") {
|
||||
// ie if not empty, 0 or false, return true
|
||||
return Boolean(value) && Number(value) !== 0 && value.toLowerCase() !== "false";
|
||||
}
|
||||
|
||||
return value;
|
||||
})
|
||||
.optional()
|
||||
.default(true);
|
||||
|
||||
export const sapPubSchema = SecretApprovalPoliciesSchema.merge(
|
||||
z.object({
|
||||
environment: z.object({
|
||||
|
@@ -16,7 +16,12 @@ import { secretsLimit } from "@app/server/config/rateLimiter";
|
||||
import { getTelemetryDistinctId } from "@app/server/lib/telemetry";
|
||||
import { getUserAgentType } from "@app/server/plugins/audit-log";
|
||||
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
|
||||
import { SanitizedDynamicSecretSchema, SanitizedTagSchema, secretRawSchema } from "@app/server/routes/sanitizedSchemas";
|
||||
import {
|
||||
booleanSchema,
|
||||
SanitizedDynamicSecretSchema,
|
||||
SanitizedTagSchema,
|
||||
secretRawSchema
|
||||
} from "@app/server/routes/sanitizedSchemas";
|
||||
import { AuthMode } from "@app/services/auth/auth-type";
|
||||
import { ResourceMetadataSchema } from "@app/services/resource-metadata/resource-metadata-schema";
|
||||
import { SecretsOrderBy } from "@app/services/secret/secret-types";
|
||||
@@ -24,20 +29,6 @@ import { PostHogEventTypes } from "@app/services/telemetry/telemetry-types";
|
||||
|
||||
const MAX_DEEP_SEARCH_LIMIT = 500; // arbitrary limit to prevent excessive results
|
||||
|
||||
// handle querystring boolean values
|
||||
const booleanSchema = z
|
||||
.union([z.boolean(), z.string().trim()])
|
||||
.transform((value) => {
|
||||
if (typeof value === "string") {
|
||||
// ie if not empty, 0 or false, return true
|
||||
return Boolean(value) && Number(value) !== 0 && value.toLowerCase() !== "false";
|
||||
}
|
||||
|
||||
return value;
|
||||
})
|
||||
.optional()
|
||||
.default(true);
|
||||
|
||||
const parseSecretPathSearch = (search?: string) => {
|
||||
if (!search)
|
||||
return {
|
||||
@@ -109,6 +100,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
|
||||
search: z.string().trim().describe(DASHBOARD.SECRET_OVERVIEW_LIST.search).optional(),
|
||||
includeSecrets: booleanSchema.describe(DASHBOARD.SECRET_OVERVIEW_LIST.includeSecrets),
|
||||
includeFolders: booleanSchema.describe(DASHBOARD.SECRET_OVERVIEW_LIST.includeFolders),
|
||||
includeImports: booleanSchema.describe(DASHBOARD.SECRET_OVERVIEW_LIST.includeImports),
|
||||
includeDynamicSecrets: booleanSchema.describe(DASHBOARD.SECRET_OVERVIEW_LIST.includeDynamicSecrets)
|
||||
}),
|
||||
response: {
|
||||
@@ -124,9 +116,17 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
|
||||
})
|
||||
.array()
|
||||
.optional(),
|
||||
imports: SecretImportsSchema.omit({ importEnv: true })
|
||||
.extend({
|
||||
importEnv: z.object({ name: z.string(), slug: z.string(), id: z.string() }),
|
||||
environment: z.string()
|
||||
})
|
||||
.array()
|
||||
.optional(),
|
||||
totalFolderCount: z.number().optional(),
|
||||
totalDynamicSecretCount: z.number().optional(),
|
||||
totalSecretCount: z.number().optional(),
|
||||
totalImportCount: z.number().optional(),
|
||||
totalCount: z.number()
|
||||
})
|
||||
}
|
||||
@@ -143,6 +143,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
|
||||
orderDirection,
|
||||
includeFolders,
|
||||
includeSecrets,
|
||||
includeImports,
|
||||
includeDynamicSecrets
|
||||
} = req.query;
|
||||
|
||||
@@ -159,6 +160,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
|
||||
let remainingLimit = limit;
|
||||
let adjustedOffset = offset;
|
||||
|
||||
let imports: Awaited<ReturnType<typeof server.services.secretImport.getImportsMultiEnv>> | undefined;
|
||||
let folders: Awaited<ReturnType<typeof server.services.folder.getFoldersMultiEnv>> | undefined;
|
||||
let secrets: Awaited<ReturnType<typeof server.services.secret.getSecretsRawMultiEnv>> | undefined;
|
||||
let dynamicSecrets:
|
||||
@@ -168,6 +170,53 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
|
||||
let totalFolderCount: number | undefined;
|
||||
let totalDynamicSecretCount: number | undefined;
|
||||
let totalSecretCount: number | undefined;
|
||||
let totalImportCount: number | undefined;
|
||||
|
||||
if (includeImports) {
|
||||
totalImportCount = await server.services.secretImport.getProjectImportMultiEnvCount({
|
||||
actorId: req.permission.id,
|
||||
actor: req.permission.type,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
actorOrgId: req.permission.orgId,
|
||||
projectId,
|
||||
environments,
|
||||
path: secretPath,
|
||||
search
|
||||
});
|
||||
|
||||
if (remainingLimit > 0 && totalImportCount > adjustedOffset) {
|
||||
imports = await server.services.secretImport.getImportsMultiEnv({
|
||||
actorId: req.permission.id,
|
||||
actor: req.permission.type,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
actorOrgId: req.permission.orgId,
|
||||
projectId,
|
||||
environments,
|
||||
path: secretPath,
|
||||
search,
|
||||
limit: remainingLimit,
|
||||
offset: adjustedOffset
|
||||
});
|
||||
|
||||
await server.services.auditLog.createAuditLog({
|
||||
...req.auditLogInfo,
|
||||
projectId: req.query.projectId,
|
||||
event: {
|
||||
type: EventType.GET_SECRET_IMPORTS,
|
||||
metadata: {
|
||||
environment: environments.join(","),
|
||||
folderId: imports?.[0]?.folderId,
|
||||
numberOfImports: imports.length
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
remainingLimit -= imports.length;
|
||||
adjustedOffset = 0;
|
||||
} else {
|
||||
adjustedOffset = Math.max(0, adjustedOffset - totalImportCount);
|
||||
}
|
||||
}
|
||||
|
||||
if (includeFolders) {
|
||||
// this is the unique count, ie duplicate folders across envs only count as 1
|
||||
@@ -345,10 +394,13 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
|
||||
folders,
|
||||
dynamicSecrets,
|
||||
secrets,
|
||||
imports,
|
||||
totalFolderCount,
|
||||
totalDynamicSecretCount,
|
||||
totalImportCount,
|
||||
totalSecretCount,
|
||||
totalCount: (totalFolderCount ?? 0) + (totalDynamicSecretCount ?? 0) + (totalSecretCount ?? 0)
|
||||
totalCount:
|
||||
(totalFolderCount ?? 0) + (totalDynamicSecretCount ?? 0) + (totalSecretCount ?? 0) + (totalImportCount ?? 0)
|
||||
};
|
||||
}
|
||||
});
|
||||
|
@@ -24,7 +24,7 @@ const IdentityKubernetesAuthResponseSchema = IdentityKubernetesAuthsSchema.pick(
|
||||
allowedAudience: true
|
||||
}).extend({
|
||||
caCert: z.string(),
|
||||
tokenReviewerJwt: z.string()
|
||||
tokenReviewerJwt: z.string().optional().nullable()
|
||||
});
|
||||
|
||||
export const registerIdentityKubernetesRouter = async (server: FastifyZodProvider) => {
|
||||
@@ -98,7 +98,7 @@ export const registerIdentityKubernetesRouter = async (server: FastifyZodProvide
|
||||
.object({
|
||||
kubernetesHost: z.string().trim().min(1).describe(KUBERNETES_AUTH.ATTACH.kubernetesHost),
|
||||
caCert: z.string().trim().default("").describe(KUBERNETES_AUTH.ATTACH.caCert),
|
||||
tokenReviewerJwt: z.string().trim().min(1).describe(KUBERNETES_AUTH.ATTACH.tokenReviewerJwt),
|
||||
tokenReviewerJwt: z.string().trim().optional().describe(KUBERNETES_AUTH.ATTACH.tokenReviewerJwt),
|
||||
allowedNamespaces: z.string().describe(KUBERNETES_AUTH.ATTACH.allowedNamespaces), // TODO: validation
|
||||
allowedNames: z.string().describe(KUBERNETES_AUTH.ATTACH.allowedNames),
|
||||
allowedAudience: z.string().describe(KUBERNETES_AUTH.ATTACH.allowedAudience),
|
||||
@@ -195,7 +195,7 @@ export const registerIdentityKubernetesRouter = async (server: FastifyZodProvide
|
||||
.object({
|
||||
kubernetesHost: z.string().trim().min(1).optional().describe(KUBERNETES_AUTH.UPDATE.kubernetesHost),
|
||||
caCert: z.string().trim().optional().describe(KUBERNETES_AUTH.UPDATE.caCert),
|
||||
tokenReviewerJwt: z.string().trim().min(1).optional().describe(KUBERNETES_AUTH.UPDATE.tokenReviewerJwt),
|
||||
tokenReviewerJwt: z.string().trim().nullable().optional().describe(KUBERNETES_AUTH.UPDATE.tokenReviewerJwt),
|
||||
allowedNamespaces: z.string().optional().describe(KUBERNETES_AUTH.UPDATE.allowedNamespaces), // TODO: validation
|
||||
allowedNames: z.string().optional().describe(KUBERNETES_AUTH.UPDATE.allowedNames),
|
||||
allowedAudience: z.string().optional().describe(KUBERNETES_AUTH.UPDATE.allowedAudience),
|
||||
|
@@ -24,6 +24,7 @@ const IdentityOidcAuthResponseSchema = IdentityOidcAuthsSchema.pick({
|
||||
boundIssuer: true,
|
||||
boundAudiences: true,
|
||||
boundClaims: true,
|
||||
claimMetadataMapping: true,
|
||||
boundSubject: true,
|
||||
createdAt: true,
|
||||
updatedAt: true
|
||||
@@ -105,6 +106,7 @@ export const registerIdentityOidcAuthRouter = async (server: FastifyZodProvider)
|
||||
boundIssuer: z.string().min(1).describe(OIDC_AUTH.ATTACH.boundIssuer),
|
||||
boundAudiences: validateOidcAuthAudiencesField.describe(OIDC_AUTH.ATTACH.boundAudiences),
|
||||
boundClaims: validateOidcBoundClaimsField.describe(OIDC_AUTH.ATTACH.boundClaims),
|
||||
claimMetadataMapping: validateOidcBoundClaimsField.describe(OIDC_AUTH.ATTACH.claimMetadataMapping).optional(),
|
||||
boundSubject: z.string().optional().default("").describe(OIDC_AUTH.ATTACH.boundSubject),
|
||||
accessTokenTrustedIps: z
|
||||
.object({
|
||||
@@ -163,6 +165,7 @@ export const registerIdentityOidcAuthRouter = async (server: FastifyZodProvider)
|
||||
boundIssuer: identityOidcAuth.boundIssuer,
|
||||
boundAudiences: identityOidcAuth.boundAudiences,
|
||||
boundClaims: identityOidcAuth.boundClaims as Record<string, string>,
|
||||
claimMetadataMapping: identityOidcAuth.claimMetadataMapping as Record<string, string>,
|
||||
boundSubject: identityOidcAuth.boundSubject as string,
|
||||
accessTokenTTL: identityOidcAuth.accessTokenTTL,
|
||||
accessTokenMaxTTL: identityOidcAuth.accessTokenMaxTTL,
|
||||
@@ -202,6 +205,7 @@ export const registerIdentityOidcAuthRouter = async (server: FastifyZodProvider)
|
||||
boundIssuer: z.string().min(1).describe(OIDC_AUTH.UPDATE.boundIssuer),
|
||||
boundAudiences: validateOidcAuthAudiencesField.describe(OIDC_AUTH.UPDATE.boundAudiences),
|
||||
boundClaims: validateOidcBoundClaimsField.describe(OIDC_AUTH.UPDATE.boundClaims),
|
||||
claimMetadataMapping: validateOidcBoundClaimsField.describe(OIDC_AUTH.UPDATE.claimMetadataMapping).optional(),
|
||||
boundSubject: z.string().optional().default("").describe(OIDC_AUTH.UPDATE.boundSubject),
|
||||
accessTokenTrustedIps: z
|
||||
.object({
|
||||
@@ -260,6 +264,7 @@ export const registerIdentityOidcAuthRouter = async (server: FastifyZodProvider)
|
||||
boundIssuer: identityOidcAuth.boundIssuer,
|
||||
boundAudiences: identityOidcAuth.boundAudiences,
|
||||
boundClaims: identityOidcAuth.boundClaims as Record<string, string>,
|
||||
claimMetadataMapping: identityOidcAuth.claimMetadataMapping as Record<string, string>,
|
||||
boundSubject: identityOidcAuth.boundSubject as string,
|
||||
accessTokenTTL: identityOidcAuth.accessTokenTTL,
|
||||
accessTokenMaxTTL: identityOidcAuth.accessTokenMaxTTL,
|
||||
|
@@ -9,6 +9,8 @@ import { readLimit, secretsLimit } from "@app/server/config/rateLimiter";
|
||||
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
|
||||
import { AuthMode } from "@app/services/auth/auth-type";
|
||||
|
||||
import { booleanSchema } from "../sanitizedSchemas";
|
||||
|
||||
export const registerSecretFolderRouter = async (server: FastifyZodProvider) => {
|
||||
server.route({
|
||||
url: "/",
|
||||
@@ -347,11 +349,14 @@ export const registerSecretFolderRouter = async (server: FastifyZodProvider) =>
|
||||
.default("/")
|
||||
.transform(prefixWithSlash)
|
||||
.transform(removeTrailingSlash)
|
||||
.describe(FOLDERS.LIST.directory)
|
||||
.describe(FOLDERS.LIST.directory),
|
||||
recursive: booleanSchema.default(false).describe(FOLDERS.LIST.recursive)
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
folders: SecretFoldersSchema.array()
|
||||
folders: SecretFoldersSchema.extend({
|
||||
relativePath: z.string().optional()
|
||||
}).array()
|
||||
})
|
||||
}
|
||||
},
|
||||
|
@@ -7,4 +7,9 @@ export type TIdentityAccessTokenJwtPayload = {
|
||||
clientSecretId: string;
|
||||
identityAccessTokenId: string;
|
||||
authTokenType: string;
|
||||
identityAuth: {
|
||||
oidc?: {
|
||||
claims: Record<string, string>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@@ -11,6 +11,7 @@ import { validatePermissionBoundary } from "@app/lib/casl/boundary";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { BadRequestError, ForbiddenRequestError, NotFoundError, UnauthorizedError } from "@app/lib/errors";
|
||||
import { extractIPDetails, isValidIpOrCidr } from "@app/lib/ip";
|
||||
import { getStringValueByDot } from "@app/lib/template/dot-access";
|
||||
|
||||
import { ActorType, AuthTokenType } from "../auth/auth-type";
|
||||
import { TIdentityOrgDALFactory } from "../identity/identity-org-dal";
|
||||
@@ -178,8 +179,9 @@ export const identityJwtAuthServiceFactory = ({
|
||||
if (identityJwtAuth.boundClaims) {
|
||||
Object.keys(identityJwtAuth.boundClaims).forEach((claimKey) => {
|
||||
const claimValue = (identityJwtAuth.boundClaims as Record<string, string>)[claimKey];
|
||||
const value = getStringValueByDot(tokenData, claimKey) || "";
|
||||
|
||||
if (!tokenData[claimKey]) {
|
||||
if (!value) {
|
||||
throw new UnauthorizedError({
|
||||
message: `Access denied: token has no ${claimKey} field`
|
||||
});
|
||||
|
@@ -84,6 +84,9 @@ export const identityKubernetesAuthServiceFactory = ({
|
||||
tokenReviewerJwt = decryptor({
|
||||
cipherTextBlob: identityKubernetesAuth.encryptedKubernetesTokenReviewerJwt
|
||||
}).toString();
|
||||
} else {
|
||||
// if no token reviewer is provided means the incoming token has to act as reviewer
|
||||
tokenReviewerJwt = serviceAccountJwt;
|
||||
}
|
||||
|
||||
const { data } = await axios
|
||||
@@ -102,7 +105,8 @@ export const identityKubernetesAuthServiceFactory = ({
|
||||
"Content-Type": "application/json",
|
||||
Authorization: `Bearer ${tokenReviewerJwt}`
|
||||
},
|
||||
|
||||
signal: AbortSignal.timeout(10000),
|
||||
timeout: 10000,
|
||||
// if ca cert, rejectUnauthorized: true
|
||||
httpsAgent: new https.Agent({
|
||||
ca: caCert,
|
||||
@@ -290,7 +294,9 @@ export const identityKubernetesAuthServiceFactory = ({
|
||||
accessTokenTTL,
|
||||
accessTokenNumUsesLimit,
|
||||
accessTokenTrustedIps: JSON.stringify(reformattedAccessTokenTrustedIps),
|
||||
encryptedKubernetesTokenReviewerJwt: encryptor({ plainText: Buffer.from(tokenReviewerJwt) }).cipherTextBlob,
|
||||
encryptedKubernetesTokenReviewerJwt: tokenReviewerJwt
|
||||
? encryptor({ plainText: Buffer.from(tokenReviewerJwt) }).cipherTextBlob
|
||||
: null,
|
||||
encryptedKubernetesCaCertificate: encryptor({ plainText: Buffer.from(caCert) }).cipherTextBlob
|
||||
},
|
||||
tx
|
||||
@@ -386,10 +392,12 @@ export const identityKubernetesAuthServiceFactory = ({
|
||||
updateQuery.encryptedKubernetesCaCertificate = encryptor({ plainText: Buffer.from(caCert) }).cipherTextBlob;
|
||||
}
|
||||
|
||||
if (tokenReviewerJwt !== undefined) {
|
||||
if (tokenReviewerJwt) {
|
||||
updateQuery.encryptedKubernetesTokenReviewerJwt = encryptor({
|
||||
plainText: Buffer.from(tokenReviewerJwt)
|
||||
}).cipherTextBlob;
|
||||
} else if (tokenReviewerJwt === null) {
|
||||
updateQuery.encryptedKubernetesTokenReviewerJwt = null;
|
||||
}
|
||||
|
||||
const updatedKubernetesAuth = await identityKubernetesAuthDAL.updateById(identityKubernetesAuth.id, updateQuery);
|
||||
|
@@ -9,7 +9,7 @@ export type TAttachKubernetesAuthDTO = {
|
||||
identityId: string;
|
||||
kubernetesHost: string;
|
||||
caCert: string;
|
||||
tokenReviewerJwt: string;
|
||||
tokenReviewerJwt?: string;
|
||||
allowedNamespaces: string;
|
||||
allowedNames: string;
|
||||
allowedAudience: string;
|
||||
@@ -24,7 +24,7 @@ export type TUpdateKubernetesAuthDTO = {
|
||||
identityId: string;
|
||||
kubernetesHost?: string;
|
||||
caCert?: string;
|
||||
tokenReviewerJwt?: string;
|
||||
tokenReviewerJwt?: string | null;
|
||||
allowedNamespaces?: string;
|
||||
allowedNames?: string;
|
||||
allowedAudience?: string;
|
||||
|
@@ -12,6 +12,7 @@ import { validatePermissionBoundary } from "@app/lib/casl/boundary";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { BadRequestError, ForbiddenRequestError, NotFoundError, UnauthorizedError } from "@app/lib/errors";
|
||||
import { extractIPDetails, isValidIpOrCidr } from "@app/lib/ip";
|
||||
import { getStringValueByDot } from "@app/lib/template/dot-access";
|
||||
|
||||
import { ActorType, AuthTokenType } from "../auth/auth-type";
|
||||
import { TIdentityOrgDALFactory } from "../identity/identity-org-dal";
|
||||
@@ -78,7 +79,7 @@ export const identityOidcAuthServiceFactory = ({
|
||||
const { data: discoveryDoc } = await axios.get<{ jwks_uri: string }>(
|
||||
`${identityOidcAuth.oidcDiscoveryUrl}/.well-known/openid-configuration`,
|
||||
{
|
||||
httpsAgent: requestAgent
|
||||
httpsAgent: identityOidcAuth.oidcDiscoveryUrl.includes("https") ? requestAgent : undefined
|
||||
}
|
||||
);
|
||||
const jwksUri = discoveryDoc.jwks_uri;
|
||||
@@ -92,7 +93,7 @@ export const identityOidcAuthServiceFactory = ({
|
||||
|
||||
const client = new JwksClient({
|
||||
jwksUri,
|
||||
requestAgent
|
||||
requestAgent: identityOidcAuth.oidcDiscoveryUrl.includes("https") ? requestAgent : undefined
|
||||
});
|
||||
|
||||
const { kid } = decodedToken.header;
|
||||
@@ -109,7 +110,6 @@ export const identityOidcAuthServiceFactory = ({
|
||||
message: `Access denied: ${error.message}`
|
||||
});
|
||||
}
|
||||
|
||||
throw error;
|
||||
}
|
||||
|
||||
@@ -136,10 +136,16 @@ export const identityOidcAuthServiceFactory = ({
|
||||
if (identityOidcAuth.boundClaims) {
|
||||
Object.keys(identityOidcAuth.boundClaims).forEach((claimKey) => {
|
||||
const claimValue = (identityOidcAuth.boundClaims as Record<string, string>)[claimKey];
|
||||
const value = getStringValueByDot(tokenData, claimKey) || "";
|
||||
|
||||
if (!value) {
|
||||
throw new UnauthorizedError({
|
||||
message: `Access denied: token has no ${claimKey} field`
|
||||
});
|
||||
}
|
||||
|
||||
// handle both single and multi-valued claims
|
||||
if (
|
||||
!claimValue.split(", ").some((claimEntry) => doesFieldValueMatchOidcPolicy(tokenData[claimKey], claimEntry))
|
||||
) {
|
||||
if (!claimValue.split(", ").some((claimEntry) => doesFieldValueMatchOidcPolicy(value, claimEntry))) {
|
||||
throw new UnauthorizedError({
|
||||
message: "Access denied: OIDC claim not allowed."
|
||||
});
|
||||
@@ -147,6 +153,20 @@ export const identityOidcAuthServiceFactory = ({
|
||||
});
|
||||
}
|
||||
|
||||
const filteredClaims: Record<string, string> = {};
|
||||
if (identityOidcAuth.claimMetadataMapping) {
|
||||
Object.keys(identityOidcAuth.claimMetadataMapping).forEach((permissionKey) => {
|
||||
const claimKey = (identityOidcAuth.claimMetadataMapping as Record<string, string>)[permissionKey];
|
||||
const value = getStringValueByDot(tokenData, claimKey) || "";
|
||||
if (!value) {
|
||||
throw new UnauthorizedError({
|
||||
message: `Access denied: token has no ${claimKey} field`
|
||||
});
|
||||
}
|
||||
filteredClaims[permissionKey] = value;
|
||||
});
|
||||
}
|
||||
|
||||
const identityAccessToken = await identityOidcAuthDAL.transaction(async (tx) => {
|
||||
const newToken = await identityAccessTokenDAL.create(
|
||||
{
|
||||
@@ -168,7 +188,12 @@ export const identityOidcAuthServiceFactory = ({
|
||||
{
|
||||
identityId: identityOidcAuth.identityId,
|
||||
identityAccessTokenId: identityAccessToken.id,
|
||||
authTokenType: AuthTokenType.IDENTITY_ACCESS_TOKEN
|
||||
authTokenType: AuthTokenType.IDENTITY_ACCESS_TOKEN,
|
||||
identityAuth: {
|
||||
oidc: {
|
||||
claims: filteredClaims
|
||||
}
|
||||
}
|
||||
} as TIdentityAccessTokenJwtPayload,
|
||||
appCfg.AUTH_SECRET,
|
||||
// akhilmhdh: for non-expiry tokens you should not even set the value, including undefined. Even for undefined jsonwebtoken throws error
|
||||
@@ -189,6 +214,7 @@ export const identityOidcAuthServiceFactory = ({
|
||||
boundIssuer,
|
||||
boundAudiences,
|
||||
boundClaims,
|
||||
claimMetadataMapping,
|
||||
boundSubject,
|
||||
accessTokenTTL,
|
||||
accessTokenMaxTTL,
|
||||
@@ -257,6 +283,7 @@ export const identityOidcAuthServiceFactory = ({
|
||||
boundIssuer,
|
||||
boundAudiences,
|
||||
boundClaims,
|
||||
claimMetadataMapping,
|
||||
boundSubject,
|
||||
accessTokenMaxTTL,
|
||||
accessTokenTTL,
|
||||
@@ -277,6 +304,7 @@ export const identityOidcAuthServiceFactory = ({
|
||||
boundIssuer,
|
||||
boundAudiences,
|
||||
boundClaims,
|
||||
claimMetadataMapping,
|
||||
boundSubject,
|
||||
accessTokenTTL,
|
||||
accessTokenMaxTTL,
|
||||
@@ -338,6 +366,7 @@ export const identityOidcAuthServiceFactory = ({
|
||||
boundIssuer,
|
||||
boundAudiences,
|
||||
boundClaims,
|
||||
claimMetadataMapping,
|
||||
boundSubject,
|
||||
accessTokenMaxTTL,
|
||||
accessTokenTTL,
|
||||
|
@@ -7,6 +7,7 @@ export type TAttachOidcAuthDTO = {
|
||||
boundIssuer: string;
|
||||
boundAudiences: string;
|
||||
boundClaims: Record<string, string>;
|
||||
claimMetadataMapping?: Record<string, string>;
|
||||
boundSubject: string;
|
||||
accessTokenTTL: number;
|
||||
accessTokenMaxTTL: number;
|
||||
@@ -22,6 +23,7 @@ export type TUpdateOidcAuthDTO = {
|
||||
boundIssuer?: string;
|
||||
boundAudiences?: string;
|
||||
boundClaims?: Record<string, string>;
|
||||
claimMetadataMapping?: Record<string, string>;
|
||||
boundSubject?: string;
|
||||
accessTokenTTL?: number;
|
||||
accessTokenMaxTTL?: number;
|
||||
|
@@ -69,9 +69,15 @@ export const identityUaServiceFactory = ({
|
||||
isClientSecretRevoked: false
|
||||
});
|
||||
|
||||
const validClientSecretInfo = clientSecrtInfo.find(({ clientSecretHash }) =>
|
||||
bcrypt.compareSync(clientSecret, clientSecretHash)
|
||||
);
|
||||
let validClientSecretInfo: (typeof clientSecrtInfo)[0] | null = null;
|
||||
for await (const info of clientSecrtInfo) {
|
||||
const isMatch = await bcrypt.compare(clientSecret, info.clientSecretHash);
|
||||
if (isMatch) {
|
||||
validClientSecretInfo = info;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!validClientSecretInfo) throw new UnauthorizedError({ message: "Invalid credentials" });
|
||||
|
||||
const { clientSecretTTL, clientSecretNumUses, clientSecretNumUsesLimit } = validClientSecretInfo;
|
||||
@@ -104,7 +110,7 @@ export const identityUaServiceFactory = ({
|
||||
}
|
||||
|
||||
const identityAccessToken = await identityUaDAL.transaction(async (tx) => {
|
||||
const uaClientSecretDoc = await identityUaClientSecretDAL.incrementUsage(validClientSecretInfo.id, tx);
|
||||
const uaClientSecretDoc = await identityUaClientSecretDAL.incrementUsage(validClientSecretInfo!.id, tx);
|
||||
const newToken = await identityAccessTokenDAL.create(
|
||||
{
|
||||
identityId: identityUa.identityId,
|
||||
|
@@ -923,16 +923,14 @@ const getAppsCodefresh = async ({ accessToken }: { accessToken: string }) => {
|
||||
/**
|
||||
* Return list of projects for Windmill integration
|
||||
*/
|
||||
const getAppsWindmill = async ({ accessToken }: { accessToken: string }) => {
|
||||
const { data } = await request.get<{ id: string; name: string }[]>(
|
||||
`${IntegrationUrls.WINDMILL_API_URL}/workspaces/list`,
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
"Accept-Encoding": "application/json"
|
||||
}
|
||||
const getAppsWindmill = async ({ accessToken, url }: { accessToken: string; url?: string | null }) => {
|
||||
const apiUrl = url ? `${url}/api` : IntegrationUrls.WINDMILL_API_URL;
|
||||
const { data } = await request.get<{ id: string; name: string }[]>(`${apiUrl}/workspaces/list`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
"Accept-Encoding": "application/json"
|
||||
}
|
||||
);
|
||||
});
|
||||
|
||||
// check for write access of secrets in windmill workspaces
|
||||
const writeAccessCheck = data.map(async (app) => {
|
||||
@@ -941,7 +939,7 @@ const getAppsWindmill = async ({ accessToken }: { accessToken: string }) => {
|
||||
const folderPath = "f/folder/variable";
|
||||
|
||||
const { data: writeUser } = await request.post<object>(
|
||||
`${IntegrationUrls.WINDMILL_API_URL}/w/${app.id}/variables/create`,
|
||||
`${apiUrl}/w/${app.id}/variables/create`,
|
||||
{
|
||||
path: userPath,
|
||||
value: "variable",
|
||||
@@ -957,7 +955,7 @@ const getAppsWindmill = async ({ accessToken }: { accessToken: string }) => {
|
||||
);
|
||||
|
||||
const { data: writeFolder } = await request.post<object>(
|
||||
`${IntegrationUrls.WINDMILL_API_URL}/w/${app.id}/variables/create`,
|
||||
`${apiUrl}/w/${app.id}/variables/create`,
|
||||
{
|
||||
path: folderPath,
|
||||
value: "variable",
|
||||
@@ -974,14 +972,14 @@ const getAppsWindmill = async ({ accessToken }: { accessToken: string }) => {
|
||||
|
||||
// is write access is allowed then delete the created secrets from workspace
|
||||
if (writeUser && writeFolder) {
|
||||
await request.delete(`${IntegrationUrls.WINDMILL_API_URL}/w/${app.id}/variables/delete/${userPath}`, {
|
||||
await request.delete(`${apiUrl}/w/${app.id}/variables/delete/${userPath}`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
"Accept-Encoding": "application/json"
|
||||
}
|
||||
});
|
||||
|
||||
await request.delete(`${IntegrationUrls.WINDMILL_API_URL}/w/${app.id}/variables/delete/${folderPath}`, {
|
||||
await request.delete(`${apiUrl}/w/${app.id}/variables/delete/${folderPath}`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
"Accept-Encoding": "application/json"
|
||||
@@ -1316,7 +1314,8 @@ export const getApps = async ({
|
||||
|
||||
case Integrations.WINDMILL:
|
||||
return getAppsWindmill({
|
||||
accessToken
|
||||
accessToken,
|
||||
url
|
||||
});
|
||||
|
||||
case Integrations.DIGITAL_OCEAN_APP_PLATFORM:
|
||||
|
@@ -4127,10 +4127,10 @@ const syncSecretsWindmill = async ({
|
||||
is_secret: boolean;
|
||||
description?: string;
|
||||
}
|
||||
|
||||
const apiUrl = integration.url ? `${integration.url}/api` : IntegrationUrls.WINDMILL_API_URL;
|
||||
// get secrets stored in windmill workspace
|
||||
const res = (
|
||||
await request.get<WindmillSecret[]>(`${IntegrationUrls.WINDMILL_API_URL}/w/${integration.appId}/variables/list`, {
|
||||
await request.get<WindmillSecret[]>(`${apiUrl}/w/${integration.appId}/variables/list`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
"Accept-Encoding": "application/json"
|
||||
@@ -4146,7 +4146,6 @@ const syncSecretsWindmill = async ({
|
||||
|
||||
// eslint-disable-next-line
|
||||
const pattern = new RegExp("^(u/|f/)[a-zA-Z0-9_-]+/([a-zA-Z0-9_-]+/)*[a-zA-Z0-9_-]*[^/]$");
|
||||
|
||||
for await (const key of Object.keys(secrets)) {
|
||||
if ((key.startsWith("u/") || key.startsWith("f/")) && pattern.test(key)) {
|
||||
if (!(key in res)) {
|
||||
@@ -4154,7 +4153,7 @@ const syncSecretsWindmill = async ({
|
||||
// -> create secret
|
||||
|
||||
await request.post(
|
||||
`${IntegrationUrls.WINDMILL_API_URL}/w/${integration.appId}/variables/create`,
|
||||
`${apiUrl}/w/${integration.appId}/variables/create`,
|
||||
{
|
||||
path: key,
|
||||
value: secrets[key].value,
|
||||
@@ -4171,7 +4170,7 @@ const syncSecretsWindmill = async ({
|
||||
} else {
|
||||
// -> update secret
|
||||
await request.post(
|
||||
`${IntegrationUrls.WINDMILL_API_URL}/w/${integration.appId}/variables/update/${res[key].path}`,
|
||||
`${apiUrl}/w/${integration.appId}/variables/update/${res[key].path}`,
|
||||
{
|
||||
path: key,
|
||||
value: secrets[key].value,
|
||||
@@ -4192,16 +4191,13 @@ const syncSecretsWindmill = async ({
|
||||
for await (const key of Object.keys(res)) {
|
||||
if (!(key in secrets)) {
|
||||
// -> delete secret
|
||||
await request.delete(
|
||||
`${IntegrationUrls.WINDMILL_API_URL}/w/${integration.appId}/variables/delete/${res[key].path}`,
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
"Content-Type": "application/json",
|
||||
"Accept-Encoding": "application/json"
|
||||
}
|
||||
await request.delete(`${apiUrl}/w/${integration.appId}/variables/delete/${res[key].path}`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
"Content-Type": "application/json",
|
||||
"Accept-Encoding": "application/json"
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@@ -9,6 +9,7 @@ import {
|
||||
ProjectPermissionSub
|
||||
} from "@app/ee/services/permission/project-permission";
|
||||
import { BadRequestError, NotFoundError } from "@app/lib/errors";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
import { UnpackedPermissionSchema } from "@app/server/routes/sanitizedSchema/permission";
|
||||
|
||||
import { ActorAuthMethod } from "../auth/auth-type";
|
||||
@@ -72,6 +73,9 @@ export const projectRoleServiceFactory = ({
|
||||
throw new BadRequestError({ name: "Create Role", message: "Project role with same slug already exists" });
|
||||
}
|
||||
|
||||
validateHandlebarTemplate("Project Role Create", JSON.stringify(data.permissions || []), {
|
||||
allowedExpressions: (val) => val.includes("identity.")
|
||||
});
|
||||
const role = await projectRoleDAL.create({
|
||||
...data,
|
||||
projectId
|
||||
@@ -134,7 +138,9 @@ export const projectRoleServiceFactory = ({
|
||||
if (existingRole && existingRole.id !== roleId)
|
||||
throw new BadRequestError({ name: "Update Role", message: "Project role with the same slug already exists" });
|
||||
}
|
||||
|
||||
validateHandlebarTemplate("Project Role Update", JSON.stringify(data.permissions || []), {
|
||||
allowedExpressions: (val) => val.includes("identity.")
|
||||
});
|
||||
const updatedRole = await projectRoleDAL.updateById(projectRole.id, {
|
||||
...data,
|
||||
permissions: data.permissions ? data.permissions : undefined
|
||||
|
@@ -1,7 +1,7 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TDbClient } from "@app/db";
|
||||
import { TableName, TProjectEnvironments, TSecretFolders, TSecretFoldersUpdate } from "@app/db/schemas";
|
||||
import { TableName, TSecretFolders, TSecretFoldersUpdate } from "@app/db/schemas";
|
||||
import { BadRequestError, DatabaseError } from "@app/lib/errors";
|
||||
import { groupBy, removeTrailingSlash } from "@app/lib/fn";
|
||||
import { ormify, selectAllTableCols } from "@app/lib/knex";
|
||||
@@ -41,12 +41,12 @@ const sqlFindMultipleFolderByEnvPathQuery = (db: Knex, query: Array<{ envId: str
|
||||
void baseQb
|
||||
.select({
|
||||
depth: 1,
|
||||
// latestFolderVerId: db.raw("NULL::uuid"),
|
||||
path: db.raw("'/'")
|
||||
})
|
||||
.from(TableName.SecretFolder)
|
||||
.where({
|
||||
parentId: null
|
||||
parentId: null,
|
||||
name: "root"
|
||||
})
|
||||
.whereIn(
|
||||
"envId",
|
||||
@@ -69,9 +69,7 @@ const sqlFindMultipleFolderByEnvPathQuery = (db: Knex, query: Array<{ envId: str
|
||||
.where((wb) =>
|
||||
formatedQuery.map(({ secretPath }) =>
|
||||
wb.orWhereRaw(
|
||||
`depth = array_position(ARRAY[${secretPath.map(() => "?").join(",")}]::varchar[], ${
|
||||
TableName.SecretFolder
|
||||
}.name,depth)`,
|
||||
`secret_folders.name = (ARRAY[${secretPath.map(() => "?").join(",")}]::varchar[])[depth]`,
|
||||
[...secretPath]
|
||||
)
|
||||
)
|
||||
@@ -107,7 +105,6 @@ const sqlFindFolderByPathQuery = (db: Knex, projectId: string, environments: str
|
||||
void baseQb
|
||||
.select({
|
||||
depth: 1,
|
||||
// latestFolderVerId: db.raw("NULL::uuid"),
|
||||
path: db.raw("'/'")
|
||||
})
|
||||
.from(TableName.SecretFolder)
|
||||
@@ -117,6 +114,11 @@ const sqlFindFolderByPathQuery = (db: Knex, projectId: string, environments: str
|
||||
parentId: null
|
||||
})
|
||||
.whereIn(`${TableName.Environment}.slug`, environments)
|
||||
.select(
|
||||
db.ref("slug").withSchema(TableName.Environment).as("envSlug"),
|
||||
db.ref("name").withSchema(TableName.Environment).as("envName"),
|
||||
db.ref("projectId").withSchema(TableName.Environment)
|
||||
)
|
||||
.select(selectAllTableCols(TableName.SecretFolder))
|
||||
.union(
|
||||
(qb) =>
|
||||
@@ -128,21 +130,20 @@ const sqlFindFolderByPathQuery = (db: Knex, projectId: string, environments: str
|
||||
depth: db.raw("parent.depth + 1"),
|
||||
path: db.raw(
|
||||
"CONCAT((CASE WHEN parent.path = '/' THEN '' ELSE parent.path END),'/', secret_folders.name)"
|
||||
)
|
||||
),
|
||||
envSlug: db.ref("envSlug").withSchema("parent"),
|
||||
envName: db.ref("envName").withSchema("parent"),
|
||||
projectId: db.ref("projectId").withSchema("parent")
|
||||
})
|
||||
.select(selectAllTableCols(TableName.SecretFolder))
|
||||
.whereRaw(
|
||||
`depth = array_position(ARRAY[${pathSegments
|
||||
.map(() => "?")
|
||||
.join(",")}]::varchar[], secret_folders.name,depth)`,
|
||||
[...pathSegments]
|
||||
)
|
||||
.whereRaw(`secret_folders.name = (ARRAY[${pathSegments.map(() => "?").join(",")}]::varchar[])[depth]`, [
|
||||
...pathSegments
|
||||
])
|
||||
.from(TableName.SecretFolder)
|
||||
.join("parent", "parent.id", `${TableName.SecretFolder}.parentId`)
|
||||
);
|
||||
})
|
||||
.from<TSecretFolders & { depth: number; path: string }>("parent")
|
||||
.leftJoin<TProjectEnvironments>(TableName.Environment, `${TableName.Environment}.id`, "parent.envId")
|
||||
.select<
|
||||
(TSecretFolders & {
|
||||
depth: number;
|
||||
@@ -152,13 +153,7 @@ const sqlFindFolderByPathQuery = (db: Knex, projectId: string, environments: str
|
||||
envName: string;
|
||||
projectId: string;
|
||||
})[]
|
||||
>(
|
||||
selectAllTableCols("parent" as TableName.SecretFolder),
|
||||
db.ref("id").withSchema(TableName.Environment).as("envId"),
|
||||
db.ref("slug").withSchema(TableName.Environment).as("envSlug"),
|
||||
db.ref("name").withSchema(TableName.Environment).as("envName"),
|
||||
db.ref("projectId").withSchema(TableName.Environment)
|
||||
);
|
||||
>(selectAllTableCols("parent" as TableName.SecretFolder));
|
||||
};
|
||||
|
||||
const sqlFindSecretPathByFolderId = (db: Knex, projectId: string, folderIds: string[]) =>
|
||||
@@ -220,19 +215,12 @@ export const secretFolderDALFactory = (db: TDbClient) => {
|
||||
throw new BadRequestError({
|
||||
message: "Invalid secret path. Only alphanumeric characters, dashes, and underscores are allowed."
|
||||
});
|
||||
|
||||
const formatedPath = removeTrailingSlash(path);
|
||||
try {
|
||||
const folder = await sqlFindFolderByPathQuery(
|
||||
tx || db.replicaNode(),
|
||||
projectId,
|
||||
[environment],
|
||||
removeTrailingSlash(path)
|
||||
)
|
||||
.orderBy("depth", "desc")
|
||||
const query = sqlFindFolderByPathQuery(tx || db.replicaNode(), projectId, [environment], formatedPath)
|
||||
.where("path", formatedPath)
|
||||
.first();
|
||||
if (folder && folder.path !== removeTrailingSlash(path)) {
|
||||
return;
|
||||
}
|
||||
const folder = await query;
|
||||
if (!folder) return;
|
||||
const { envId: id, envName: name, envSlug: slug, ...el } = folder;
|
||||
return { ...el, envId: id, environment: { id, name, slug } };
|
||||
@@ -250,22 +238,13 @@ export const secretFolderDALFactory = (db: TDbClient) => {
|
||||
});
|
||||
|
||||
try {
|
||||
const pathDepth = removeTrailingSlash(path).split("/").filter(Boolean).length + 1;
|
||||
|
||||
const formatedPath = removeTrailingSlash(path);
|
||||
const folders = await sqlFindFolderByPathQuery(
|
||||
tx || db.replicaNode(),
|
||||
projectId,
|
||||
environments,
|
||||
removeTrailingSlash(path)
|
||||
)
|
||||
.orderBy("depth", "desc")
|
||||
.where("depth", pathDepth);
|
||||
|
||||
const firstFolder = folders[0];
|
||||
|
||||
if (firstFolder && firstFolder.path !== removeTrailingSlash(path)) {
|
||||
return [];
|
||||
}
|
||||
formatedPath
|
||||
).where("path", removeTrailingSlash(path));
|
||||
|
||||
return folders.map((folder) => {
|
||||
const { envId: id, envName: name, envSlug: slug, ...el } = folder;
|
||||
@@ -323,7 +302,6 @@ export const secretFolderDALFactory = (db: TDbClient) => {
|
||||
const findSecretPathByFolderIds = async (projectId: string, folderIds: string[], tx?: Knex) => {
|
||||
try {
|
||||
const folders = await sqlFindSecretPathByFolderId(tx || db.replicaNode(), projectId, folderIds);
|
||||
|
||||
// travelling all the way from leaf node to root contains real path
|
||||
const rootFolders = groupBy(
|
||||
folders.filter(({ parentId }) => parentId === null),
|
||||
|
@@ -401,7 +401,8 @@ export const secretFolderServiceFactory = ({
|
||||
orderBy,
|
||||
orderDirection,
|
||||
limit,
|
||||
offset
|
||||
offset,
|
||||
recursive
|
||||
}: TGetFolderDTO) => {
|
||||
// folder list is allowed to be read by anyone
|
||||
// permission to check does user has access
|
||||
@@ -420,6 +421,17 @@ export const secretFolderServiceFactory = ({
|
||||
const parentFolder = await folderDAL.findBySecretPath(projectId, environment, secretPath);
|
||||
if (!parentFolder) return [];
|
||||
|
||||
if (recursive) {
|
||||
const recursiveFolders = await folderDAL.findByEnvsDeep({ parentIds: [parentFolder.id] });
|
||||
// remove the parent folder
|
||||
return recursiveFolders
|
||||
.filter((folder) => folder.id !== parentFolder.id)
|
||||
.map((folder) => ({
|
||||
...folder,
|
||||
relativePath: folder.path
|
||||
}));
|
||||
}
|
||||
|
||||
const folders = await folderDAL.find(
|
||||
{
|
||||
envId: env.id,
|
||||
|
@@ -45,6 +45,7 @@ export type TGetFolderDTO = {
|
||||
orderDirection?: OrderByDirection;
|
||||
limit?: number;
|
||||
offset?: number;
|
||||
recursive?: boolean;
|
||||
} & TProjectPermission;
|
||||
|
||||
export type TGetFolderByIdDTO = {
|
||||
|
@@ -469,6 +469,58 @@ export const secretImportServiceFactory = ({
|
||||
return count;
|
||||
};
|
||||
|
||||
const getProjectImportMultiEnvCount = async ({
|
||||
path: secretPath,
|
||||
environments,
|
||||
projectId,
|
||||
actor,
|
||||
actorId,
|
||||
actorAuthMethod,
|
||||
actorOrgId,
|
||||
search
|
||||
}: Omit<TGetSecretImportsDTO, "environment"> & { environments: string[] }) => {
|
||||
const { permission } = await permissionService.getProjectPermission({
|
||||
actor,
|
||||
actorId,
|
||||
projectId,
|
||||
actorAuthMethod,
|
||||
actorOrgId,
|
||||
actionProjectType: ActionProjectType.SecretManager
|
||||
});
|
||||
const filteredEnvironments = [];
|
||||
for (const environment of environments) {
|
||||
if (
|
||||
permission.can(
|
||||
ProjectPermissionActions.Read,
|
||||
subject(ProjectPermissionSub.SecretImports, { environment, secretPath })
|
||||
)
|
||||
) {
|
||||
filteredEnvironments.push(environment);
|
||||
}
|
||||
}
|
||||
if (filteredEnvironments.length === 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (const environment of filteredEnvironments) {
|
||||
ForbiddenError.from(permission).throwUnlessCan(
|
||||
ProjectPermissionActions.Read,
|
||||
subject(ProjectPermissionSub.SecretImports, { environment, secretPath })
|
||||
);
|
||||
}
|
||||
|
||||
const folders = await folderDAL.findBySecretPathMultiEnv(projectId, environments, secretPath);
|
||||
if (!folders?.length)
|
||||
throw new NotFoundError({
|
||||
message: `Folder with path '${secretPath}' not found on environments with slugs '${environments.join(", ")}'`
|
||||
});
|
||||
const counts = await Promise.all(
|
||||
folders.map((folder) => secretImportDAL.getProjectImportCount({ folderId: folder.id, search }))
|
||||
);
|
||||
|
||||
return counts.reduce((sum, count) => sum + count, 0);
|
||||
};
|
||||
|
||||
const getImports = async ({
|
||||
path: secretPath,
|
||||
environment,
|
||||
@@ -688,6 +740,59 @@ export const secretImportServiceFactory = ({
|
||||
}));
|
||||
};
|
||||
|
||||
const getImportsMultiEnv = async ({
|
||||
path: secretPath,
|
||||
environments,
|
||||
projectId,
|
||||
actor,
|
||||
actorId,
|
||||
actorAuthMethod,
|
||||
actorOrgId,
|
||||
search,
|
||||
limit,
|
||||
offset
|
||||
}: Omit<TGetSecretImportsDTO, "environment"> & { environments: string[] }) => {
|
||||
const { permission } = await permissionService.getProjectPermission({
|
||||
actor,
|
||||
actorId,
|
||||
projectId,
|
||||
actorAuthMethod,
|
||||
actorOrgId,
|
||||
actionProjectType: ActionProjectType.SecretManager
|
||||
});
|
||||
const filteredEnvironments = [];
|
||||
for (const environment of environments) {
|
||||
if (
|
||||
permission.can(
|
||||
ProjectPermissionActions.Read,
|
||||
subject(ProjectPermissionSub.SecretImports, { environment, secretPath })
|
||||
)
|
||||
) {
|
||||
filteredEnvironments.push(environment);
|
||||
}
|
||||
}
|
||||
if (filteredEnvironments.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const folders = await folderDAL.findBySecretPathMultiEnv(projectId, filteredEnvironments, secretPath);
|
||||
if (!folders?.length)
|
||||
throw new NotFoundError({
|
||||
message: `Folder with path '${secretPath}' not found on environments with slugs '${environments.join(", ")}'`
|
||||
});
|
||||
|
||||
const secImportsArrays = await Promise.all(
|
||||
folders.map(async (folder) => {
|
||||
const imports = await secretImportDAL.find({ folderId: folder.id, search, limit, offset });
|
||||
return imports.map((importItem) => ({
|
||||
...importItem,
|
||||
environment: folder.environment.slug
|
||||
}));
|
||||
})
|
||||
);
|
||||
return secImportsArrays.flat();
|
||||
};
|
||||
|
||||
return {
|
||||
createImport,
|
||||
updateImport,
|
||||
@@ -698,6 +803,8 @@ export const secretImportServiceFactory = ({
|
||||
getRawSecretsFromImports,
|
||||
resyncSecretImportReplication,
|
||||
getProjectImportCount,
|
||||
fnSecretsFromImports
|
||||
fnSecretsFromImports,
|
||||
getProjectImportMultiEnvCount,
|
||||
getImportsMultiEnv
|
||||
};
|
||||
};
|
||||
|
@@ -382,6 +382,8 @@ export const AwsParameterStoreSyncFns = {
|
||||
}
|
||||
}
|
||||
|
||||
if (syncOptions.disableSecretDeletion) return;
|
||||
|
||||
const parametersToDelete: AWS.SSM.Parameter[] = [];
|
||||
|
||||
for (const entry of Object.entries(awsParameterStoreSecretsRecord)) {
|
||||
|
@@ -396,6 +396,8 @@ export const AwsSecretsManagerSyncFns = {
|
||||
}
|
||||
}
|
||||
|
||||
if (syncOptions.disableSecretDeletion) return;
|
||||
|
||||
for await (const secretKey of Object.keys(awsSecretsRecord)) {
|
||||
if (!(secretKey in secretMap) || !secretMap[secretKey].value) {
|
||||
try {
|
||||
|
@@ -136,6 +136,8 @@ export const azureAppConfigurationSyncFactory = ({
|
||||
}
|
||||
}
|
||||
|
||||
if (secretSync.syncOptions.disableSecretDeletion) return;
|
||||
|
||||
for await (const key of Object.keys(azureAppConfigSecrets)) {
|
||||
const azureSecret = azureAppConfigSecrets[key];
|
||||
if (
|
||||
|
@@ -189,6 +189,8 @@ export const azureKeyVaultSyncFactory = ({ kmsService, appConnectionDAL }: TAzur
|
||||
});
|
||||
}
|
||||
|
||||
if (secretSync.syncOptions.disableSecretDeletion) return;
|
||||
|
||||
for await (const deleteSecretKey of deleteSecrets.filter(
|
||||
(secret) => !setSecrets.find((setSecret) => setSecret.key === secret)
|
||||
)) {
|
||||
|
@@ -112,6 +112,8 @@ export const databricksSyncFactory = ({ kmsService, appConnectionDAL }: TDatabri
|
||||
accessToken
|
||||
});
|
||||
|
||||
if (secretSync.syncOptions.disableSecretDeletion) return;
|
||||
|
||||
for await (const secret of databricksSecretKeys) {
|
||||
if (!(secret.key in secretMap)) {
|
||||
await deleteDatabricksSecrets({
|
||||
|
@@ -71,8 +71,16 @@ const getGcpSecrets = async (accessToken: string, secretSync: TGcpSyncWithCreden
|
||||
|
||||
res[key] = Buffer.from(secretLatest.payload.data, "base64").toString("utf-8");
|
||||
} catch (error) {
|
||||
// when a secret in GCP has no versions, we treat it as if it's a blank value
|
||||
if (error instanceof AxiosError && error.response?.status === 404) {
|
||||
// when a secret in GCP has no versions, or is disabled/destroyed, we treat it as if it's a blank value
|
||||
if (
|
||||
error instanceof AxiosError &&
|
||||
(error.response?.status === 404 ||
|
||||
(error.response?.status === 400 &&
|
||||
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access
|
||||
error.response.data.error.status === "FAILED_PRECONDITION" &&
|
||||
// eslint-disable-next-line @typescript-eslint/no-unsafe-member-access,@typescript-eslint/no-unsafe-call
|
||||
error.response.data.error.message.match(/(?:disabled|destroyed)/i)))
|
||||
) {
|
||||
res[key] = "";
|
||||
} else {
|
||||
throw new SecretSyncError({
|
||||
@@ -147,6 +155,9 @@ export const GcpSyncFns = {
|
||||
for await (const key of Object.keys(gcpSecrets)) {
|
||||
try {
|
||||
if (!(key in secretMap) || !secretMap[key].value) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (secretSync.syncOptions.disableSecretDeletion) continue;
|
||||
|
||||
// case: delete secret
|
||||
await request.delete(
|
||||
`${IntegrationUrls.GCP_SECRET_MANAGER_URL}/v1/projects/${destinationConfig.projectId}/secrets/${key}`,
|
||||
|
@@ -192,12 +192,6 @@ export const GithubSyncFns = {
|
||||
|
||||
const publicKey = await getPublicKey(client, secretSync);
|
||||
|
||||
for await (const encryptedSecret of encryptedSecrets) {
|
||||
if (!(encryptedSecret.name in secretMap)) {
|
||||
await deleteSecret(client, secretSync, encryptedSecret);
|
||||
}
|
||||
}
|
||||
|
||||
await sodium.ready.then(async () => {
|
||||
for await (const key of Object.keys(secretMap)) {
|
||||
// convert secret & base64 key to Uint8Array.
|
||||
@@ -224,6 +218,14 @@ export const GithubSyncFns = {
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (secretSync.syncOptions.disableSecretDeletion) return;
|
||||
|
||||
for await (const encryptedSecret of encryptedSecrets) {
|
||||
if (!(encryptedSecret.name in secretMap)) {
|
||||
await deleteSecret(client, secretSync, encryptedSecret);
|
||||
}
|
||||
}
|
||||
},
|
||||
getSecrets: async (secretSync: TGitHubSyncWithCredentials) => {
|
||||
throw new Error(`${SECRET_SYNC_NAME_MAP[secretSync.destination]} does not support importing secrets.`);
|
||||
|
@@ -196,6 +196,8 @@ export const HumanitecSyncFns = {
|
||||
}
|
||||
}
|
||||
|
||||
if (secretSync.syncOptions.disableSecretDeletion) return;
|
||||
|
||||
for await (const humanitecSecret of humanitecSecrets) {
|
||||
if (!secretMap[humanitecSecret.key]) {
|
||||
await deleteSecret(secretSync, humanitecSecret);
|
||||
|
@@ -23,7 +23,8 @@ const BaseSyncOptionsSchema = <T extends AnyZodObject | undefined = undefined>({
|
||||
initialSyncBehavior: (canImportSecrets
|
||||
? z.nativeEnum(SecretSyncInitialSyncBehavior)
|
||||
: z.literal(SecretSyncInitialSyncBehavior.OverwriteDestination)
|
||||
).describe(SecretSyncs.SYNC_OPTIONS(destination).initialSyncBehavior)
|
||||
).describe(SecretSyncs.SYNC_OPTIONS(destination).initialSyncBehavior),
|
||||
disableSecretDeletion: z.boolean().optional().describe(SecretSyncs.SYNC_OPTIONS(destination).disableSecretDeletion)
|
||||
});
|
||||
|
||||
const schema = merge ? baseSchema.merge(merge) : baseSchema;
|
||||
|
@@ -2,12 +2,6 @@ package api
|
||||
|
||||
import "time"
|
||||
|
||||
type Environment struct {
|
||||
Name string `json:"name"`
|
||||
Slug string `json:"slug"`
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// Stores info for login one
|
||||
type LoginOneRequest struct {
|
||||
Email string `json:"email"`
|
||||
@@ -20,6 +14,7 @@ type LoginOneResponse struct {
|
||||
}
|
||||
|
||||
// Stores info for login two
|
||||
|
||||
type LoginTwoRequest struct {
|
||||
Email string `json:"email"`
|
||||
ClientProof string `json:"clientProof"`
|
||||
@@ -173,10 +168,9 @@ type Secret struct {
|
||||
}
|
||||
|
||||
type Project struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Slug string `json:"slug"`
|
||||
Environments []Environment `json:"environments"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Slug string `json:"slug"`
|
||||
}
|
||||
|
||||
type RawSecret struct {
|
||||
|
@@ -18,10 +18,10 @@ import (
|
||||
)
|
||||
|
||||
var gatewayCmd = &cobra.Command{
|
||||
Use: "gateway",
|
||||
Short: "Run the Infisical gateway or manage its systemd service",
|
||||
Long: "Run the Infisical gateway in the foreground or manage its systemd service installation. Use 'gateway install' to set up the systemd service.",
|
||||
Example: `infisical gateway --token=<token>
|
||||
Use: "gateway",
|
||||
Short: "Run the Infisical gateway or manage its systemd service",
|
||||
Long: "Run the Infisical gateway in the foreground or manage its systemd service installation. Use 'gateway install' to set up the systemd service.",
|
||||
Example: `infisical gateway --token=<token>
|
||||
sudo infisical gateway install --token=<token> --domain=<domain>`,
|
||||
DisableFlagsInUseLine: true,
|
||||
Args: cobra.NoArgs,
|
||||
@@ -148,6 +148,28 @@ var gatewayInstallCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var gatewayUninstallCmd = &cobra.Command{
|
||||
Use: "uninstall",
|
||||
Short: "Uninstall and remove systemd service for the gateway (requires sudo)",
|
||||
Long: "Uninstall and remove systemd service for the gateway. Must be run with sudo on Linux.",
|
||||
Example: "sudo infisical gateway uninstall",
|
||||
DisableFlagsInUseLine: true,
|
||||
Args: cobra.NoArgs,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if runtime.GOOS != "linux" {
|
||||
util.HandleError(fmt.Errorf("systemd service installation is only supported on Linux"))
|
||||
}
|
||||
|
||||
if os.Geteuid() != 0 {
|
||||
util.HandleError(fmt.Errorf("systemd service installation requires root/sudo privileges"))
|
||||
}
|
||||
|
||||
if err := gateway.UninstallGatewaySystemdService(); err != nil {
|
||||
util.HandleError(err, "Failed to uninstall systemd service")
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var gatewayRelayCmd = &cobra.Command{
|
||||
Example: `infisical gateway relay`,
|
||||
Short: "Used to run infisical gateway relay",
|
||||
@@ -183,6 +205,7 @@ func init() {
|
||||
gatewayRelayCmd.Flags().String("config", "", "Relay config yaml file path")
|
||||
|
||||
gatewayCmd.AddCommand(gatewayInstallCmd)
|
||||
gatewayCmd.AddCommand(gatewayUninstallCmd)
|
||||
gatewayCmd.AddCommand(gatewayRelayCmd)
|
||||
rootCmd.AddCommand(gatewayCmd)
|
||||
}
|
||||
|
@@ -15,9 +15,6 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Infisical/infisical-merge/packages/api"
|
||||
"github.com/go-resty/resty/v2"
|
||||
|
||||
"github.com/Infisical/infisical-merge/packages/models"
|
||||
"github.com/Infisical/infisical-merge/packages/util"
|
||||
"github.com/fatih/color"
|
||||
@@ -62,11 +59,11 @@ var runCmd = &cobra.Command{
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
environmentSlug, _ := cmd.Flags().GetString("env")
|
||||
environmentName, _ := cmd.Flags().GetString("env")
|
||||
if !cmd.Flags().Changed("env") {
|
||||
environmentFromWorkspace := util.GetEnvFromWorkspaceFile()
|
||||
if environmentFromWorkspace != "" {
|
||||
environmentSlug = environmentFromWorkspace
|
||||
environmentName = environmentFromWorkspace
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,20 +136,8 @@ var runCmd = &cobra.Command{
|
||||
util.HandleError(err, "Unable to parse flag")
|
||||
}
|
||||
|
||||
log.Debug().Msgf("Confirming selected environment is valid: %s", environmentSlug)
|
||||
|
||||
hasEnvironment, err := confirmProjectHasEnvironment(environmentSlug, projectId, token)
|
||||
if err != nil {
|
||||
util.HandleError(err, "Could not confirm project has environment")
|
||||
}
|
||||
if !hasEnvironment {
|
||||
util.HandleError(fmt.Errorf("project does not have environment '%s'", environmentSlug))
|
||||
}
|
||||
|
||||
log.Debug().Msgf("Project '%s' has environment '%s'", projectId, environmentSlug)
|
||||
|
||||
request := models.GetAllSecretsParameters{
|
||||
Environment: environmentSlug,
|
||||
Environment: environmentName,
|
||||
WorkspaceId: projectId,
|
||||
TagSlugs: tagSlugs,
|
||||
SecretsPath: secretsPath,
|
||||
@@ -323,6 +308,7 @@ func waitForExitCommand(cmd *exec.Cmd) (int, error) {
|
||||
}
|
||||
|
||||
func executeCommandWithWatchMode(commandFlag string, args []string, watchModeInterval int, request models.GetAllSecretsParameters, projectConfigDir string, secretOverriding bool, token *models.TokenDetails) {
|
||||
|
||||
var cmd *exec.Cmd
|
||||
var err error
|
||||
var lastSecretsFetch time.Time
|
||||
@@ -453,53 +439,8 @@ func executeCommandWithWatchMode(commandFlag string, args []string, watchModeInt
|
||||
}
|
||||
}
|
||||
|
||||
func confirmProjectHasEnvironment(environmentSlug, projectId string, token *models.TokenDetails) (bool, error) {
|
||||
var accessToken string
|
||||
|
||||
if token != nil && (token.Type == util.SERVICE_TOKEN_IDENTIFIER || token.Type == util.UNIVERSAL_AUTH_TOKEN_IDENTIFIER) {
|
||||
accessToken = token.Token
|
||||
} else {
|
||||
util.RequireLogin()
|
||||
util.RequireLocalWorkspaceFile()
|
||||
|
||||
loggedInUserDetails, err := util.GetCurrentLoggedInUserDetails(true)
|
||||
if err != nil {
|
||||
util.HandleError(err, "Unable to authenticate")
|
||||
}
|
||||
|
||||
if loggedInUserDetails.LoginExpired {
|
||||
util.PrintErrorMessageAndExit("Your login session has expired, please run [infisical login] and try again")
|
||||
}
|
||||
accessToken = loggedInUserDetails.UserCredentials.JTWToken
|
||||
}
|
||||
|
||||
if projectId == "" {
|
||||
workspaceFile, err := util.GetWorkSpaceFromFile()
|
||||
if err != nil {
|
||||
util.HandleError(err, "Unable to get local project details")
|
||||
}
|
||||
|
||||
projectId = workspaceFile.WorkspaceId
|
||||
}
|
||||
|
||||
httpClient := resty.New()
|
||||
httpClient.SetAuthToken(accessToken).
|
||||
SetHeader("Accept", "application/json")
|
||||
|
||||
project, err := api.CallGetProjectById(httpClient, projectId)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
for _, env := range project.Environments {
|
||||
if env.Slug == environmentSlug {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func fetchAndFormatSecretsForShell(request models.GetAllSecretsParameters, projectConfigDir string, secretOverriding bool, token *models.TokenDetails) (models.InjectableEnvironmentResult, error) {
|
||||
|
||||
if token != nil && token.Type == util.SERVICE_TOKEN_IDENTIFIER {
|
||||
request.InfisicalToken = token.Token
|
||||
} else if token != nil && token.Type == util.UNIVERSAL_AUTH_TOKEN_IDENTIFIER {
|
||||
|
@@ -143,7 +143,15 @@ var secretsSetCmd = &cobra.Command{
|
||||
Short: "Used set secrets",
|
||||
Use: "set [secrets]",
|
||||
DisableFlagsInUseLine: true,
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if cmd.Flags().Changed("file") {
|
||||
if len(args) > 0 {
|
||||
return fmt.Errorf("secrets cannot be provided as command-line arguments when the --file option is used. Please choose either file-based or argument-based secret input")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return cobra.MinimumNArgs(1)(cmd, args)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
token, err := util.GetInfisicalToken(cmd)
|
||||
if err != nil {
|
||||
@@ -177,13 +185,18 @@ var secretsSetCmd = &cobra.Command{
|
||||
util.HandleError(err, "Unable to parse secret type")
|
||||
}
|
||||
|
||||
file, err := cmd.Flags().GetString("file")
|
||||
if err != nil {
|
||||
util.HandleError(err, "Unable to parse flag")
|
||||
}
|
||||
|
||||
var secretOperations []models.SecretSetOperation
|
||||
if token != nil && (token.Type == util.SERVICE_TOKEN_IDENTIFIER || token.Type == util.UNIVERSAL_AUTH_TOKEN_IDENTIFIER) {
|
||||
if projectId == "" {
|
||||
util.PrintErrorMessageAndExit("When using service tokens or machine identities, you must set the --projectId flag")
|
||||
}
|
||||
|
||||
secretOperations, err = util.SetRawSecrets(args, secretType, environmentName, secretsPath, projectId, token)
|
||||
secretOperations, err = util.SetRawSecrets(args, secretType, environmentName, secretsPath, projectId, token, file)
|
||||
} else {
|
||||
if projectId == "" {
|
||||
workspaceFile, err := util.GetWorkSpaceFromFile()
|
||||
@@ -206,7 +219,7 @@ var secretsSetCmd = &cobra.Command{
|
||||
secretOperations, err = util.SetRawSecrets(args, secretType, environmentName, secretsPath, projectId, &models.TokenDetails{
|
||||
Type: "",
|
||||
Token: loggedInUserDetails.UserCredentials.JTWToken,
|
||||
})
|
||||
}, file)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -691,6 +704,7 @@ func init() {
|
||||
secretsSetCmd.Flags().String("projectId", "", "manually set the project ID to for setting secrets when using machine identity based auth")
|
||||
secretsSetCmd.Flags().String("path", "/", "set secrets within a folder path")
|
||||
secretsSetCmd.Flags().String("type", util.SECRET_TYPE_SHARED, "the type of secret to create: personal or shared")
|
||||
secretsSetCmd.Flags().String("file", "", "Load secrets from the specified file. File format: .env or YAML (comments: # or //). This option is mutually exclusive with command-line secrets arguments.")
|
||||
|
||||
secretsDeleteCmd.Flags().String("type", "personal", "the type of secret to delete: personal or shared (default: personal)")
|
||||
secretsDeleteCmd.Flags().String("token", "", "Fetch secrets using service token or machine identity access token")
|
||||
|
@@ -89,7 +89,7 @@ func (g *Gateway) ConnectWithRelay() error {
|
||||
turnClientCfg.Conn = turn.NewSTUNConn(conn)
|
||||
} else {
|
||||
log.Info().Msgf("Provided relay port %s. Using non TLS connection.", relayPort)
|
||||
conn, err := net.ListenPacket("udp4", turnAddr.String())
|
||||
conn, err := net.ListenPacket("udp4", "0.0.0.0:0")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to connect with relay server: %w", err)
|
||||
}
|
||||
@@ -342,7 +342,9 @@ func (g *Gateway) registerRelayIsActive(ctx context.Context, errCh chan error) e
|
||||
case <-ticker.C:
|
||||
log.Debug().Msg("Performing relay connection health check")
|
||||
err := g.createPermissionForStaticIps(g.config.InfisicalStaticIp)
|
||||
if err != nil && !strings.Contains(err.Error(), "tls:") {
|
||||
// try again error message from server happens to avoid congestion
|
||||
// https://github.com/pion/turn/blob/master/internal/client/udp_conn.go#L382
|
||||
if err != nil && !strings.Contains(err.Error(), "try again") {
|
||||
failures++
|
||||
log.Warn().Err(err).Int("failures", failures).Msg("Failed to refresh TURN permissions")
|
||||
if failures >= maxFailures {
|
||||
@@ -351,6 +353,7 @@ func (g *Gateway) registerRelayIsActive(ctx context.Context, errCh chan error) e
|
||||
}
|
||||
continue
|
||||
}
|
||||
failures = 0 // reset
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
@@ -15,7 +15,8 @@ Description=Infisical Gateway Service
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Type=notify
|
||||
NotifyAccess=all
|
||||
EnvironmentFile=/etc/infisical/gateway.conf
|
||||
ExecStart=infisical gateway
|
||||
Restart=on-failure
|
||||
@@ -50,8 +51,6 @@ func InstallGatewaySystemdService(token string, domain string) error {
|
||||
configContent := fmt.Sprintf("INFISICAL_UNIVERSAL_AUTH_ACCESS_TOKEN=%s\n", token)
|
||||
if domain != "" {
|
||||
configContent += fmt.Sprintf("INFISICAL_API_URL=%s\n", domain)
|
||||
} else {
|
||||
configContent += "INFISICAL_API_URL=\n"
|
||||
}
|
||||
|
||||
configPath := filepath.Join(configDir, "gateway.conf")
|
||||
@@ -60,11 +59,6 @@ func InstallGatewaySystemdService(token string, domain string) error {
|
||||
}
|
||||
|
||||
servicePath := "/etc/systemd/system/infisical-gateway.service"
|
||||
if _, err := os.Stat(servicePath); err == nil {
|
||||
log.Info().Msg("Systemd service file already exists")
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.WriteFile(servicePath, []byte(systemdServiceTemplate), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write systemd service file: %v", err)
|
||||
}
|
||||
@@ -80,3 +74,48 @@ func InstallGatewaySystemdService(token string, domain string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func UninstallGatewaySystemdService() error {
|
||||
if runtime.GOOS != "linux" {
|
||||
log.Info().Msg("Skipping systemd service uninstallation - not on Linux")
|
||||
return nil
|
||||
}
|
||||
|
||||
if os.Geteuid() != 0 {
|
||||
log.Info().Msg("Skipping systemd service uninstallation - not running as root/sudo")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the service if it's running
|
||||
stopCmd := exec.Command("systemctl", "stop", "infisical-gateway")
|
||||
if err := stopCmd.Run(); err != nil {
|
||||
log.Warn().Msgf("Failed to stop service: %v", err)
|
||||
}
|
||||
|
||||
// Disable the service
|
||||
disableCmd := exec.Command("systemctl", "disable", "infisical-gateway")
|
||||
if err := disableCmd.Run(); err != nil {
|
||||
log.Warn().Msgf("Failed to disable service: %v", err)
|
||||
}
|
||||
|
||||
// Remove the service file
|
||||
servicePath := "/etc/systemd/system/infisical-gateway.service"
|
||||
if err := os.Remove(servicePath); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to remove systemd service file: %v", err)
|
||||
}
|
||||
|
||||
// Remove the configuration file
|
||||
configPath := "/etc/infisical/gateway.conf"
|
||||
if err := os.Remove(configPath); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to remove config file: %v", err)
|
||||
}
|
||||
|
||||
// Reload systemd to apply changes
|
||||
reloadCmd := exec.Command("systemctl", "daemon-reload")
|
||||
if err := reloadCmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to reload systemd: %v", err)
|
||||
}
|
||||
|
||||
log.Info().Msg("Successfully uninstalled Infisical Gateway systemd service")
|
||||
return nil
|
||||
}
|
||||
|
@@ -245,8 +245,9 @@ func getCurrentBranch() (string, error) {
|
||||
}
|
||||
|
||||
func AppendAPIEndpoint(address string) string {
|
||||
// if it's empty return as it is
|
||||
// Ensure the address does not already end with "/api"
|
||||
if strings.HasSuffix(address, "/api") {
|
||||
if address == "" || strings.HasSuffix(address, "/api") {
|
||||
return address
|
||||
}
|
||||
|
||||
|
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/zalando/go-keyring"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func GetPlainTextSecretsViaServiceToken(fullServiceToken string, environment string, secretPath string, includeImports bool, recursive bool, tagSlugs string, expandSecretReferences bool) ([]models.SingleEnvironmentVariable, error) {
|
||||
@@ -232,6 +233,7 @@ func FilterSecretsByTag(plainTextSecrets []models.SingleEnvironmentVariable, tag
|
||||
|
||||
func GetAllEnvironmentVariables(params models.GetAllSecretsParameters, projectConfigFilePath string) ([]models.SingleEnvironmentVariable, error) {
|
||||
var secretsToReturn []models.SingleEnvironmentVariable
|
||||
// var serviceTokenDetails api.GetServiceTokenDetailsResponse
|
||||
var errorToReturn error
|
||||
|
||||
if params.InfisicalToken == "" && params.UniversalAuthAccessToken == "" {
|
||||
@@ -563,7 +565,99 @@ func GetPlainTextWorkspaceKey(authenticationToken string, receiverPrivateKey str
|
||||
return crypto.DecryptAsymmetric(encryptedWorkspaceKey, encryptedWorkspaceKeyNonce, encryptedWorkspaceKeySenderPublicKey, currentUsersPrivateKey), nil
|
||||
}
|
||||
|
||||
func SetRawSecrets(secretArgs []string, secretType string, environmentName string, secretsPath string, projectId string, tokenDetails *models.TokenDetails) ([]models.SecretSetOperation, error) {
|
||||
func parseSecrets(fileName string, content string) (map[string]string, error) {
|
||||
secrets := make(map[string]string)
|
||||
|
||||
if strings.HasSuffix(fileName, ".yaml") || strings.HasSuffix(fileName, ".yml") {
|
||||
// Handle YAML secrets
|
||||
var yamlData map[string]interface{}
|
||||
if err := yaml.Unmarshal([]byte(content), &yamlData); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse YAML file: %v", err)
|
||||
}
|
||||
|
||||
for key, value := range yamlData {
|
||||
if strValue, ok := value.(string); ok {
|
||||
secrets[key] = strValue
|
||||
} else {
|
||||
return nil, fmt.Errorf("YAML secret '%s' must be a string", key)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Handle .env files
|
||||
lines := strings.Split(content, "\n")
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
// Ignore empty lines and comments
|
||||
if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, "//") {
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure it's a valid key=value pair
|
||||
splitKeyValue := strings.SplitN(line, "=", 2)
|
||||
if len(splitKeyValue) != 2 {
|
||||
return nil, fmt.Errorf("invalid format, expected key=value in line: %s", line)
|
||||
}
|
||||
|
||||
key, value := strings.TrimSpace(splitKeyValue[0]), strings.TrimSpace(splitKeyValue[1])
|
||||
|
||||
// Handle quoted values
|
||||
if (strings.HasPrefix(value, `"`) && strings.HasSuffix(value, `"`)) ||
|
||||
(strings.HasPrefix(value, `'`) && strings.HasSuffix(value, `'`)) {
|
||||
value = value[1 : len(value)-1] // Remove surrounding quotes
|
||||
}
|
||||
|
||||
secrets[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
return secrets, nil
|
||||
}
|
||||
|
||||
func validateSecretKey(key string) error {
|
||||
if key == "" {
|
||||
return errors.New("secret keys cannot be empty")
|
||||
}
|
||||
if unicode.IsNumber(rune(key[0])) {
|
||||
return fmt.Errorf("secret key '%s' cannot start with a number", key)
|
||||
}
|
||||
if strings.Contains(key, " ") {
|
||||
return fmt.Errorf("secret key '%s' cannot contain spaces", key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetRawSecrets(secretArgs []string, secretType string, environmentName string, secretsPath string, projectId string, tokenDetails *models.TokenDetails, file string) ([]models.SecretSetOperation, error) {
|
||||
if file != "" {
|
||||
content, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
PrintErrorMessageAndExit("File does not exist")
|
||||
}
|
||||
return nil, fmt.Errorf("unable to process file [err=%v]", err)
|
||||
}
|
||||
|
||||
parsedSecrets, err := parseSecrets(file, string(content))
|
||||
if err != nil {
|
||||
PrintErrorMessageAndExit(fmt.Sprintf("error parsing secrets: %v", err))
|
||||
}
|
||||
|
||||
// Step 2: Validate secrets
|
||||
for key, value := range parsedSecrets {
|
||||
if err := validateSecretKey(key); err != nil {
|
||||
PrintErrorMessageAndExit(err.Error())
|
||||
}
|
||||
if strings.TrimSpace(value) == "" {
|
||||
PrintErrorMessageAndExit(fmt.Sprintf("Secret key '%s' has an empty value", key))
|
||||
}
|
||||
secretArgs = append(secretArgs, fmt.Sprintf("%s=%s", key, value))
|
||||
}
|
||||
|
||||
if len(secretArgs) == 0 {
|
||||
PrintErrorMessageAndExit("no valid secrets found in the file")
|
||||
}
|
||||
}
|
||||
|
||||
if tokenDetails == nil {
|
||||
return nil, fmt.Errorf("unable to process set secret operations, token details are missing")
|
||||
|
@@ -76,6 +76,7 @@ func TestUniversalAuth_SecretsGetWrongEnvironment(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestUserAuth_SecretsGetAll(t *testing.T) {
|
||||
|
@@ -219,6 +219,21 @@ $ infisical secrets set STRIPE_API_KEY=sjdgwkeudyjwe DOMAIN=example.com HASH=jeb
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
<Accordion title="--file">
|
||||
Used to set secrets from a file, supporting both `.env` and `YAML` formats. The file path can be either absolute or relative to the current working directory.
|
||||
|
||||
The file should contain secrets in the following formats:
|
||||
- `key=value` for `.env` files
|
||||
- `key: value` for YAML files
|
||||
|
||||
Comments can be written using `# comment` or `// comment`. Empty lines will be ignored during processing.
|
||||
|
||||
|
||||
```bash
|
||||
# Example
|
||||
infisical secrets set --file="./.env"
|
||||
```
|
||||
</Accordion>
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="infisical secrets delete">
|
||||
|
After Width: | Height: | Size: 1.0 MiB |
After Width: | Height: | Size: 1.1 MiB |
After Width: | Height: | Size: 641 KiB |
@@ -0,0 +1,68 @@
|
||||
---
|
||||
title: "Machine identities"
|
||||
description: "Learn how to set metadata and leverage authentication attributes for machine identities."
|
||||
---
|
||||
|
||||
Machine identities can have metadata set manually, just like users. In addition, during the machine authentication process (e.g., via OIDC), extra attributes called claims—are provided, which can be used in your ABAC policies.
|
||||
|
||||
#### Setting Metadata on Machine Identities
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Manually Configure Metadata">
|
||||
<Steps>
|
||||
<Step title="Navigate to the Access Control page on the organization sidebar and select a machine identity.">
|
||||
<img src="/documentation/platform/access-controls/abac/images/add-metadata-on-machine-identity-1.png" />
|
||||
</Step>
|
||||
<Step title="On the machine identity page, click the pencil icon to edit the selected identity.">
|
||||
<img src="/documentation/platform/access-controls/abac/images/add-metadata-on-machine-identity-2.png" />
|
||||
</Step>
|
||||
<Step title="Add metadata via key-value pairs and update the machine identity.">
|
||||
<img src="/documentation/platform/access-controls/abac/images/add-metadata-on-machine-identity-3.png" />
|
||||
</Step>
|
||||
</Steps>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
#### Accessing Attributes From Machine Identity Login
|
||||
|
||||
When machine identities authenticate, they may receive additional payloads/attributes from the service provider.
|
||||
For methods like OIDC, these come as claims in the token and can be made available in your policies.
|
||||
|
||||
<Tabs>
|
||||
<Tab title="OIDC Login Attributes">
|
||||
1. Navigate to the Identity Authentication settings and select the OIDC Auth Method.
|
||||
2. In the **Advanced section**, locate the Claim Mapping configuration.
|
||||
3. Map the OIDC claims to permission attributes by specifying:
|
||||
- **Attribute Name:** The identifier to be used in your policies (e.g., department).
|
||||
- **Claim Path:** The dot notation path to the claim in the OIDC token (e.g., user.department).
|
||||
|
||||
For example, if your OIDC provider returns:
|
||||
|
||||
```json
|
||||
{
|
||||
"sub": "machine456",
|
||||
"name": "Service A",
|
||||
"user": {
|
||||
"department": "engineering",
|
||||
"role": "service"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You might map:
|
||||
|
||||
- **department:** to `user.department`
|
||||
- **role:** to `user.role`
|
||||
|
||||
Once configured, these attributes become available in your policies using the following format:
|
||||
|
||||
```
|
||||
{{ identity.auth.oidc.claims.<permission claim name> }}
|
||||
```
|
||||
|
||||
<img src="/images/platform/access-controls/abac-policy-oidc-format.png" />
|
||||
</Tab>
|
||||
<Tab title="Other Authentication Method Attributes">
|
||||
At the moment we only support OIDC claims. Payloads on other authentication methods are not yet accessible.
|
||||
</Tab>
|
||||
</Tabs>
|
@@ -0,0 +1,39 @@
|
||||
---
|
||||
title: "Users identities"
|
||||
description: "How to set and use metadata attributes on user identities for ABAC."
|
||||
---
|
||||
|
||||
User identities can have metadata attributes assigned directly. These attributes (such as location or department) are used to define dynamic access policies.
|
||||
|
||||
#### Setting Metadata on Users
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Manually Configure Metadata">
|
||||
<Steps>
|
||||
<Step title="Navigate to the Access Control page on the organization sidebar and select a user.">
|
||||
<img src="/images/platform/access-controls/add-metadata-step1.png" />
|
||||
</Step>
|
||||
<Step title="On the User Page, click the pencil icon to edit the selected user.">
|
||||
<img src="/images/platform/access-controls/add-metadata-step2.png" />
|
||||
</Step>
|
||||
<Step title="Add metadata via key-value pairs and update the user identity.">
|
||||
<img src="/images/platform/access-controls/add-metadata-step3.png" />
|
||||
</Step>
|
||||
</Steps>
|
||||
</Tab>
|
||||
<Tab title="Automatically Populate Metadata">
|
||||
For organizations using SAML for **user logins**, Infisical automatically maps metadata attributes from SAML assertions to user identities on every login. This enables dynamic policies based on the user's SAML attributes.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
#### Applying ABAC Policies with User Metadata
|
||||
Attribute-based access controls are currently only available for polices defined on Secrets Manager projects.
|
||||
You can set ABAC permissions to dynamically set access to environments, folders, secrets, and secret tags.
|
||||
|
||||
<img src="/images/platform/access-controls/example-abac-1.png" />
|
||||
|
||||
In your policies, metadata values are accessed as follows:
|
||||
|
||||
- **User ID:** `{{ identity.id }}` (always available)
|
||||
- **Username:** `{{ identity.username }}` (always available)
|
||||
- **Metadata Attributes:** `{{ identity.metadata.<metadata-key-name> }}` (available if set)
|
@@ -0,0 +1,15 @@
|
||||
---
|
||||
title: "Overview"
|
||||
description: "Learn the basics of ABAC for both users and machine identities."
|
||||
---
|
||||
|
||||
Infisical's Attribute-based Access Controls (ABAC) enable dynamic, attribute-driven permissions for both users and machine identities. ABAC enforces fine-grained, context-aware access controls using metadata attributes—stored as key-value pairs—either attached to identities or provided during authentication.
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Users" icon="square-1" href="./managing-user-metadata">
|
||||
Manage user metadata manually or automatically via SAML logins.
|
||||
</Card>
|
||||
<Card title="Machine Identities" icon="square-2" href="./managing-machine-identity-attributes">
|
||||
Set metadata manually like users and access additional attributes provided during machine authentication (for example, OIDC claims).
|
||||
</Card>
|
||||
</CardGroup>
|
@@ -1,65 +0,0 @@
|
||||
---
|
||||
title: "Attribute-based Access Controls"
|
||||
description: "Learn how to use ABAC to manage permissions based on identity attributes."
|
||||
---
|
||||
|
||||
Infisical's Attribute-based Access Controls (ABAC) allow for dynamic, attribute-driven permissions for both user and machine identities.
|
||||
ABAC policies use metadata attributes—stored as key-value pairs on identities—to enforce fine-grained permissions that are context aware.
|
||||
|
||||
In ABAC, access controls are defined using metadata attributes, such as location or department, which can be set directly on user or machine identities.
|
||||
During policy execution, these attributes are evaluated, and determine whether said actor can access the requested resource or perform the requested operation.
|
||||
|
||||
## Project-level Permissions
|
||||
|
||||
Attribute-based access controls are currently available for polices defined on projects. You can set ABAC permissions to control access to environments, folders, secrets, and secret tags.
|
||||
|
||||
### Setting Metadata on Identities
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Manually Configure Metadata">
|
||||
<Steps>
|
||||
<Step title="Navigate to the Access Control page on the organization sidebar and select an identity (user or machine).">
|
||||
<img src="/images/platform/access-controls/add-metadata-step1.png" />
|
||||
</Step>
|
||||
<Step title="On the Identity Page, click the pencil icon to edit the selected identity.">
|
||||
<img src="/images/platform/access-controls/add-metadata-step2.png" />
|
||||
</Step>
|
||||
<Step title="Add metadata via key-value pairs and update the identity.">
|
||||
<img src="/images/platform/access-controls/add-metadata-step3.png" />
|
||||
</Step>
|
||||
</Steps>
|
||||
</Tab>
|
||||
<Tab title="Automatically Populate Metadata">
|
||||
For organizations using SAML for login, Infisical automatically maps metadata attributes from SAML assertions to user identities.
|
||||
This makes it easy to create policies that dynamically adapt based on the SAML user’s attributes.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
|
||||
## Defining ABAC Policies
|
||||
|
||||
<img src="/images/platform/access-controls/example-abac-1.png" />
|
||||
|
||||
ABAC policies make use of identity metadata to define dynamic permissions. Each attribute must start and end with double curly-brackets `{{ <attribute-name> }}`.
|
||||
The following attributes are available within project permissions:
|
||||
|
||||
- **User ID**: `{{ identity.id }}`
|
||||
- **Username**: `{{ identity.username }}`
|
||||
- **Metadata Attributes**: `{{ identity.metadata.<metadata-key-name> }}`
|
||||
|
||||
During policy execution, these placeholders are replaced by their actual values prior to evaluation.
|
||||
|
||||
### Example Use Case
|
||||
|
||||
#### Location-based Access Control
|
||||
|
||||
Suppose you want to restrict access to secrets within a specific folder based on a user's geographic region.
|
||||
You could assign a `location` attribute to each user (e.g., `identity.metadata.location`).
|
||||
You could then structure your folders to align with this attribute and define permissions accordingly.
|
||||
|
||||
For example, a policy might restrict access to folders matching the user's location attribute in the following pattern:
|
||||
```
|
||||
/appA/{{ identity.metadata.location }}
|
||||
```
|
||||
Using this structure, users can only access folders that correspond to their configured `location` attribute.
|
||||
Consequently, if a users attribute changes due to relocation, no policies need to be changed to gain access to the folders associated with their new location.
|
@@ -18,7 +18,7 @@ To make sure that users and machine identities are only accessing the resources
|
||||
|
||||
<Card
|
||||
title="Attribute-based Access Control"
|
||||
href="./attribute-based-access-controls"
|
||||
href="/documentation/platform/access-controls/abac"
|
||||
icon="address-book"
|
||||
color="#000000"
|
||||
>
|
||||
|
@@ -9,20 +9,76 @@ description: "Track evert event action performed within Infisical projects."
|
||||
If you're using Infisical Cloud, then it is available under the **Pro**,
|
||||
and **Enterprise Tier** with varying retention periods. If you're self-hosting Infisical,
|
||||
then you should contact sales@infisical.com to purchase an enterprise license to use it.
|
||||
|
||||
</Info>
|
||||
|
||||
Infisical provides audit logs for security and compliance teams to monitor information access.
|
||||
With the Audit Log functionality, teams can:
|
||||
|
||||
- **Track** 40+ different events;
|
||||
- **Filter** audit logs by event, actor, source, date or any combination of these filters;
|
||||
- **Inspect** extensive metadata in the event of any suspicious activity or incident review.
|
||||
|
||||

|
||||
|
||||
## Audit Log Structure
|
||||
|
||||
Each log contains the following data:
|
||||
|
||||
- **Event**: The underlying action such as create, list, read, update, or delete secret(s).
|
||||
- **Actor**: The entity responsible for performing or causing the event; this can be a user or service.
|
||||
- **Timestamp**: The date and time at which point the event occurred.
|
||||
- **Source** (User agent + IP): The software (user agent) and network address (IP) from which the event was initiated.
|
||||
- **Metadata**: Additional data to provide context for each event. For example, this could be the path at which a secret was fetched from etc.
|
||||
| Field | Type | Description | Purpose |
|
||||
| ------------------------- | -------- | --------------------------------------------------------- | ------------------------------------------------------------- |
|
||||
| **event** | Object | Contains details about the action performed | Captures what happened |
|
||||
| event.type | String | The specific action that occurred (e.g., "create-secret") | Identifies the exact operation |
|
||||
| event.metadata | Object | Context-specific details about the event | Provides detailed information relevant to the specific action |
|
||||
| **actor** | Object | Information about who performed the action | Identifies the responsible entity |
|
||||
| actor.type | String | Category of actor (user, service, identity, etc.) | Distinguishes between human and non-human actors |
|
||||
| actor.metadata | Object | Details about the specific actor | Provides identity information |
|
||||
| actor.metadata.userId | String | Unique identifier for user actors | Links to specific user account |
|
||||
| actor.metadata.email | String | Email address for user actors | Email of the executing user |
|
||||
| actor.metadata.username | String | Username for user actors | Username of the executing user |
|
||||
| actor.metadata.serviceId | String | Identifier for service actors | ID of specific service token |
|
||||
| actor.metadata.identityId | String | Identifier for identity actors | ID to specific identity |
|
||||
| actor.metadata.permission | Object | Permission context for the action | Shows permission template data when action was performed |
|
||||
| **orgId** | String | Organization identifier | Indicates which organization the action occurred in |
|
||||
| **projectId** | String | Project identifier | Indicates which project the action affected |
|
||||
| **ipAddress** | String | Source IP address | Shows where the request originated from |
|
||||
| **userAgent** | String | Client application information | Identifies browser or application used |
|
||||
| **userAgentType** | String | Category of client (web, CLI, SDK, etc.) | Classifies the access method |
|
||||
| **timestamp** | DateTime | When the action occurred | Records the exact time of the event |
|
||||
|
||||
<Accordion title="Example Payload">
|
||||
```json
|
||||
{
|
||||
"id": "[UUID]",
|
||||
"ipAddress": "[IP_ADDRESS]",
|
||||
"userAgent": "[USER_AGENT_STRING]",
|
||||
"userAgentType": "web",
|
||||
"expiresAt": "[TIMESTAMP]",
|
||||
"createdAt": "[TIMESTAMP]",
|
||||
"updatedAt": "[TIMESTAMP]",
|
||||
"orgId": "[ORGANIZATION_UUID]",
|
||||
"projectId": "[PROJECT_UUID]",
|
||||
"projectName": "[PROJECT_NAME]",
|
||||
"event": {
|
||||
"type": "get-secrets",
|
||||
"metadata": {
|
||||
"secretPath": "[PATH]",
|
||||
"environment": "[ENVIRONMENT_NAME]",
|
||||
"numberOfSecrets": [NUMBER]
|
||||
}
|
||||
},
|
||||
"actor": {
|
||||
"type": "user",
|
||||
"metadata": {
|
||||
"email": "[EMAIL]",
|
||||
"userId": "[USER_UUID]",
|
||||
"username": "[USERNAME]",
|
||||
"permission": {
|
||||
"metadata": {},
|
||||
"auth": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
</Accordion>
|
||||
|
@@ -37,7 +37,8 @@ then Infisical returns a short-lived access token that can be used to make authe
|
||||
To be more specific:
|
||||
|
||||
1. The application deployed on Kubernetes retrieves its [service account credential](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#opt-out-of-api-credential-automounting) that is a JWT token at the `/var/run/secrets/kubernetes.io/serviceaccount/token` pod path.
|
||||
2. The application sends the JWT token to Infisical at the `/api/v1/auth/kubernetes-auth/login` endpoint after which Infisical forwards the JWT token to the Kubernetes API Server at the [TokenReview API](https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-review-v1/) for verification and to obtain the service account information associated with the JWT token. Infisical is able to authenticate and interact with the TokenReview API by using a long-lived service account JWT token itself (referred to onward as the token reviewer JWT token).
|
||||
2. The application sends the JWT token to Infisical at the `/api/v1/auth/kubernetes-auth/login` endpoint after which Infisical forwards the JWT token to the Kubernetes API Server at the TokenReview API for verification and to obtain the service account information associated with the JWT token.
|
||||
Infisical is able to authenticate and interact with the TokenReview API by using either the long lived JWT token set while configuring this authentication method or by using the incoming token itself. The JWT token mentioned in this context is referred as the token reviewer JWT token.
|
||||
3. Infisical checks the service account properties against set criteria such **Allowed Service Account Names** and **Allowed Namespaces**.
|
||||
4. If all is well, Infisical returns a short-lived access token that the application can use to make authenticated requests to the Infisical API.
|
||||
|
||||
@@ -53,6 +54,12 @@ In the following steps, we explore how to create and use identities for your app
|
||||
|
||||
<Steps>
|
||||
<Step title="Obtaining the token reviewer JWT for Infisical">
|
||||
<Tabs>
|
||||
<Tab title="Option 1: Reviewer JWT Token">
|
||||
|
||||
<Note>
|
||||
**When to use this option**: Choose this approach when you want centralized authentication management. Only one service account needs special permissions, and your application service accounts remain unchanged.
|
||||
</Note>
|
||||
1.1. Start by creating a service account in your Kubernetes cluster that will be used by Infisical to authenticate with the Kubernetes API Server.
|
||||
|
||||
```yaml infisical-service-account.yaml
|
||||
@@ -61,7 +68,6 @@ In the following steps, we explore how to create and use identities for your app
|
||||
metadata:
|
||||
name: infisical-auth
|
||||
namespace: default
|
||||
|
||||
```
|
||||
|
||||
```
|
||||
@@ -121,7 +127,40 @@ In the following steps, we explore how to create and use identities for your app
|
||||
|
||||
Keep this JWT token handy as you will need it for the **Token Reviewer JWT** field when configuring the Kubernetes Auth authentication method for the identity in step 2.
|
||||
|
||||
</Step>
|
||||
</Tab>
|
||||
<Tab title="Option 2: Client JWT as Reviewer JWT Token">
|
||||
|
||||
<Note>
|
||||
**When to use this option**: Choose this approach to eliminate long-lived tokens. This option simplifies Infisical configuration but requires each application service account to have elevated permissions.
|
||||
</Note>
|
||||
|
||||
The self-validation method eliminates the need for a separate long-lived reviewer JWT by using the same token for both authentication and validation. Instead of creating a dedicated reviewer service account, you'll grant the necessary permissions to each application service account.
|
||||
|
||||
For each service account that needs to authenticate with Infisical, add the `system:auth-delegator` role:
|
||||
|
||||
```yaml client-role-binding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: infisical-client-binding-[your-app-name]
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: [your-app-service-account]
|
||||
namespace: [your-app-namespace]
|
||||
```
|
||||
|
||||
```
|
||||
kubectl apply -f client-role-binding.yaml
|
||||
```
|
||||
|
||||
When configuring Kubernetes Auth in Infisical, leave the **Token Reviewer JWT** field empty. Infisical will use the client's own token for validation.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Step>
|
||||
|
||||
<Step title="Creating an identity">
|
||||
To create an identity, head to your Organization Settings > Access Control > Machine Identities and press **Create identity**.
|
||||
@@ -151,7 +190,8 @@ In the following steps, we explore how to create and use identities for your app
|
||||
Here's some more guidance on each field:
|
||||
|
||||
- Kubernetes Host / Base Kubernetes API URL: The host string, host:port pair, or URL to the base of the Kubernetes API server. This can usually be obtained by running `kubectl cluster-info`.
|
||||
- Token Reviewer JWT: A long-lived service account JWT token for Infisical to access the [TokenReview API](https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-review-v1/) to validate other service account JWT tokens submitted by applications/pods. This is the JWT token obtained from step 1.5.
|
||||
- Token Reviewer JWT: A long-lived service account JWT token for Infisical to access the [TokenReview API](https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-review-v1/) to validate other service account JWT tokens submitted by applications/pods. This is the JWT token obtained from step 1.5(Reviewer Tab). If omitted, the client's own JWT will be used instead, which requires the client to have the `system:auth-delegator` ClusterRole binding.
|
||||
This is shown in step 1, option 2.
|
||||
- Allowed Service Account Names: A comma-separated list of trusted service account names that are allowed to authenticate with Infisical.
|
||||
- Allowed Namespaces: A comma-separated list of trusted namespaces that service accounts must belong to authenticate with Infisical.
|
||||
- Allowed Audience: An optional audience claim that the service account JWT token must have to authenticate with Infisical.
|
||||
@@ -176,18 +216,19 @@ In the following steps, we explore how to create and use identities for your app
|
||||
</Step>
|
||||
<Step title="Accessing the Infisical API with the identity">
|
||||
To access the Infisical API as the identity, you should first make sure that the pod running your application is bound to a service account specified in the **Allowed Service Account Names** field of the identity's Kubernetes Auth authentication method configuration in step 2.
|
||||
|
||||
|
||||
Once bound, the pod will receive automatically mounted service account credentials that is a JWT token at the `/var/run/secrets/kubernetes.io/serviceaccount/token` path. This token should be used to authenticate with Infisical at the `/api/v1/auth/kubernetes-auth/login` endpoint.
|
||||
|
||||
|
||||
For information on how to configure sevice accounts for pods, refer to the guide [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/).
|
||||
|
||||
|
||||
We provide a code example below of how you might retrieve the JWT token and use it to authenticate with Infisical to gain access to the [Infisical API](/api-reference/overview/introduction).
|
||||
|
||||
<Accordion
|
||||
title="Sample code for inside an application"
|
||||
>
|
||||
>
|
||||
The shown example uses Node.js but you can use any other language to retrieve the service account JWT token and use it to authenticate with Infisical.
|
||||
|
||||
```javascript
|
||||
|
||||
```javascript
|
||||
const fs = require("fs");
|
||||
try {
|
||||
const tokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token";
|
||||
@@ -237,15 +278,16 @@ In the following steps, we explore how to create and use identities for your app
|
||||
</Accordion>
|
||||
<Accordion title="Why is the Infisical API rejecting my access token?">
|
||||
There are a few reasons for why this might happen:
|
||||
|
||||
- The access token has expired.
|
||||
- The identity is insufficently permissioned to interact with the resources you wish to access.
|
||||
- The client access token is being used from an untrusted IP.
|
||||
|
||||
- The access token has expired.
|
||||
- The identity is insufficently permissioned to interact with the resources you wish to access.
|
||||
- The client access token is being used from an untrusted IP.
|
||||
|
||||
</Accordion>
|
||||
<Accordion title="What is access token renewal and TTL/Max TTL?">
|
||||
A identity access token can have a time-to-live (TTL) or incremental lifetime after which it expires.
|
||||
|
||||
In certain cases, you may want to extend the lifespan of an access token; to do so, you must set a max TTL parameter.
|
||||
A identity access token can have a time-to-live (TTL) or incremental lifetime after which it expires.
|
||||
|
||||
In certain cases, you may want to extend the lifespan of an access token; to do so, you must set a max TTL parameter.
|
||||
|
||||
A token can be renewed any number of times where each call to renew it can extend the token's lifetime by increments of the access token's TTL.
|
||||
Regardless of how frequently an access token is renewed, its lifespan remains bound to the maximum TTL determined at its creation.
|
||||
|
@@ -114,6 +114,13 @@ using the Universal Auth authentication method.
|
||||
that is to exchange the **Client ID** and **Client Secret** of the identity for an access token
|
||||
by making a request to the `/api/v1/auth/universal-auth/login` endpoint.
|
||||
|
||||
<Tip>
|
||||
Choose the correct base URL based on your region:
|
||||
|
||||
- For Infisical Cloud US users: `https://app.infisical.com`
|
||||
- For Infisical Cloud EU users: `https://eu.infisical.com`
|
||||
</Tip>
|
||||
|
||||
#### Sample request
|
||||
|
||||
```bash Request
|
||||
|
@@ -66,7 +66,7 @@ For organizations that work with US government agencies, FIPS compliance is almo
|
||||
<Step title="Configure HSM on Infisical">
|
||||
|
||||
<Warning>
|
||||
Are you using Docker? If you are using Docker, please follow the instructions in the [Using HSM's with Docker](#using-hsms-with-docker) section.
|
||||
Are you using Docker or Kubernetes for your deployment? If you are using Docker or Kubernetes, please follow the instructions in the [Using HSM's in your Deployment](#using-hsms-in-your-deployment) section.
|
||||
</Warning>
|
||||
|
||||
Configuring the HSM on Infisical requires setting a set of environment variables:
|
||||
@@ -94,165 +94,447 @@ For organizations that work with US government agencies, FIPS compliance is almo
|
||||
</Steps>
|
||||
|
||||
|
||||
## Using HSMs with Docker
|
||||
When using Docker, you need to mount the path containing the HSM client files. This section covers how to configure your Infisical instance to use an HSM with Docker.
|
||||
## Using HSMs In Your Deployment
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Thales Luna Cloud HSM">
|
||||
<Steps>
|
||||
<Step title="Create HSM client folder">
|
||||
When using Docker, you are able to set your HSM library path to any location on your machine. In this example, we are going to be using `/etc/luna-docker`.
|
||||
<Tab title="Docker">
|
||||
When using Docker, you need to mount the path containing the HSM client files. This section covers how to configure your Infisical instance to use an HSM with Docker.
|
||||
|
||||
```bash
|
||||
mkdir /etc/luna-docker
|
||||
```
|
||||
<Tabs>
|
||||
<Tab title="Thales Luna Cloud HSM">
|
||||
<Steps>
|
||||
<Step title="Create HSM client folder">
|
||||
When using Docker, you are able to set your HSM library path to any location on your machine. In this example, we are going to be using `/etc/luna-docker`.
|
||||
|
||||
After [setting up your Luna Cloud HSM client](https://thalesdocs.com/gphsm/luna/7/docs/network/Content/install/client_install/add_dpod.htm), you should have a set of files, referred to as the HSM client. You don't need all the files, but for simplicity we recommend copying all the files from the client.
|
||||
```bash
|
||||
mkdir /etc/luna-docker
|
||||
```
|
||||
|
||||
A folder structure of a client folder will often look like this:
|
||||
```
|
||||
partition-ca-certificate.pem
|
||||
partition-certificate.pem
|
||||
server-certificate.pem
|
||||
Chrystoki.conf
|
||||
/plugins
|
||||
libcloud.plugin
|
||||
/lock
|
||||
/libs
|
||||
/64
|
||||
libCryptoki2.so
|
||||
/jsp
|
||||
LunaProvider.jar
|
||||
/64
|
||||
libLunaAPI.so
|
||||
/etc
|
||||
openssl.cnf
|
||||
/bin
|
||||
/64
|
||||
ckdemo
|
||||
lunacm
|
||||
multitoken
|
||||
vtl
|
||||
```
|
||||
|
||||
The most important parts of the client folder is the `Chrystoki.conf` file, and the `libs`, `plugins`, and `jsp` folders. You need to copy these files to the folder you created in the first step.
|
||||
After [setting up your Luna Cloud HSM client](https://thalesdocs.com/gphsm/luna/7/docs/network/Content/install/client_install/add_dpod.htm), you should have a set of files, referred to as the HSM client. You don't need all the files, but for simplicity we recommend copying all the files from the client.
|
||||
|
||||
```bash
|
||||
cp -r /<path-to-where-your-luna-client-is-located> /etc/luna-docker
|
||||
```
|
||||
A folder structure of a client folder will often look like this:
|
||||
```
|
||||
partition-ca-certificate.pem
|
||||
partition-certificate.pem
|
||||
server-certificate.pem
|
||||
Chrystoki.conf
|
||||
/plugins
|
||||
libcloud.plugin
|
||||
/lock
|
||||
/libs
|
||||
/64
|
||||
libCryptoki2.so
|
||||
/jsp
|
||||
LunaProvider.jar
|
||||
/64
|
||||
libLunaAPI.so
|
||||
/etc
|
||||
openssl.cnf
|
||||
/bin
|
||||
/64
|
||||
ckdemo
|
||||
lunacm
|
||||
multitoken
|
||||
vtl
|
||||
```
|
||||
|
||||
The most important parts of the client folder is the `Chrystoki.conf` file, and the `libs`, `plugins`, and `jsp` folders. You need to copy these files to the folder you created in the first step.
|
||||
|
||||
</Step>
|
||||
```bash
|
||||
cp -r /<path-to-where-your-luna-client-is-located> /etc/luna-docker
|
||||
```
|
||||
|
||||
<Step title="Update Chrystoki.conf">
|
||||
The `Chrystoki.conf` file is used to configure the HSM client. You need to update the `Chrystoki.conf` file to point to the correct file paths.
|
||||
</Step>
|
||||
|
||||
In this example, we will be mounting the `/etc/luna-docker` folder to the Docker container under a different path. The path we will use in this example is `/usr/safenet/lunaclient`. This means `/etc/luna-docker` will be mounted to `/usr/safenet/lunaclient` in the Docker container.
|
||||
<Step title="Update Chrystoki.conf">
|
||||
The `Chrystoki.conf` file is used to configure the HSM client. You need to update the `Chrystoki.conf` file to point to the correct file paths.
|
||||
|
||||
An example config file will look like this:
|
||||
In this example, we will be mounting the `/etc/luna-docker` folder to the Docker container under a different path. The path we will use in this example is `/usr/safenet/lunaclient`. This means `/etc/luna-docker` will be mounted to `/usr/safenet/lunaclient` in the Docker container.
|
||||
|
||||
```Chrystoki.conf
|
||||
Chrystoki2 = {
|
||||
# This path points to the mounted path, /usr/safenet/lunaclient
|
||||
LibUNIX64 = /usr/safenet/lunaclient/libs/64/libCryptoki2.so;
|
||||
}
|
||||
An example config file will look like this:
|
||||
|
||||
Luna = {
|
||||
DefaultTimeOut = 500000;
|
||||
PEDTimeout1 = 100000;
|
||||
PEDTimeout2 = 200000;
|
||||
PEDTimeout3 = 20000;
|
||||
KeypairGenTimeOut = 2700000;
|
||||
CloningCommandTimeOut = 300000;
|
||||
CommandTimeOutPedSet = 720000;
|
||||
}
|
||||
```Chrystoki.conf
|
||||
Chrystoki2 = {
|
||||
# This path points to the mounted path, /usr/safenet/lunaclient
|
||||
LibUNIX64 = /usr/safenet/lunaclient/libs/64/libCryptoki2.so;
|
||||
}
|
||||
|
||||
CardReader = {
|
||||
LunaG5Slots = 0;
|
||||
RemoteCommand = 1;
|
||||
}
|
||||
Luna = {
|
||||
DefaultTimeOut = 500000;
|
||||
PEDTimeout1 = 100000;
|
||||
PEDTimeout2 = 200000;
|
||||
PEDTimeout3 = 20000;
|
||||
KeypairGenTimeOut = 2700000;
|
||||
CloningCommandTimeOut = 300000;
|
||||
CommandTimeOutPedSet = 720000;
|
||||
}
|
||||
|
||||
Misc = {
|
||||
# Update the paths to point to the mounted path if your folder structure is different from the one mentioned in the previous step.
|
||||
PluginModuleDir = /usr/safenet/lunaclient/plugins;
|
||||
MutexFolder = /usr/safenet/lunaclient/lock;
|
||||
PE1746Enabled = 1;
|
||||
ToolsDir = /usr/bin;
|
||||
CardReader = {
|
||||
LunaG5Slots = 0;
|
||||
RemoteCommand = 1;
|
||||
}
|
||||
|
||||
}
|
||||
Misc = {
|
||||
# Update the paths to point to the mounted path if your folder structure is different from the one mentioned in the previous step.
|
||||
PluginModuleDir = /usr/safenet/lunaclient/plugins;
|
||||
MutexFolder = /usr/safenet/lunaclient/lock;
|
||||
PE1746Enabled = 1;
|
||||
ToolsDir = /usr/bin;
|
||||
|
||||
Presentation = {
|
||||
ShowEmptySlots = no;
|
||||
}
|
||||
}
|
||||
|
||||
LunaSA Client = {
|
||||
ReceiveTimeout = 20000;
|
||||
# Update the paths to point to the mounted path if your folder structure is different from the one mentioned in the previous step.
|
||||
SSLConfigFile = /usr/safenet/lunaclient/etc/openssl.cnf;
|
||||
ClientPrivKeyFile = ./etc/ClientNameKey.pem;
|
||||
ClientCertFile = ./etc/ClientNameCert.pem;
|
||||
ServerCAFile = ./etc/CAFile.pem;
|
||||
NetClient = 1;
|
||||
TCPKeepAlive = 1;
|
||||
}
|
||||
Presentation = {
|
||||
ShowEmptySlots = no;
|
||||
}
|
||||
|
||||
LunaSA Client = {
|
||||
ReceiveTimeout = 20000;
|
||||
# Update the paths to point to the mounted path if your folder structure is different from the one mentioned in the previous step.
|
||||
SSLConfigFile = /usr/safenet/lunaclient/etc/openssl.cnf;
|
||||
ClientPrivKeyFile = ./etc/ClientNameKey.pem;
|
||||
ClientCertFile = ./etc/ClientNameCert.pem;
|
||||
ServerCAFile = ./etc/CAFile.pem;
|
||||
NetClient = 1;
|
||||
TCPKeepAlive = 1;
|
||||
}
|
||||
|
||||
|
||||
REST = {
|
||||
AppLogLevel = error
|
||||
ServerName = <REDACTED>;
|
||||
ServerPort = 443;
|
||||
AuthTokenConfigURI = <REDACTED>;
|
||||
AuthTokenClientId = <REDACTED>;
|
||||
AuthTokenClientSecret = <REDACTED>;
|
||||
RestClient = 1;
|
||||
ClientTimeoutSec = 120;
|
||||
ClientPoolSize = 32;
|
||||
ClientEofRetryCount = 15;
|
||||
ClientConnectRetryCount = 900;
|
||||
ClientConnectIntervalMs = 1000;
|
||||
}
|
||||
XTC = {
|
||||
Enabled = 1;
|
||||
TimeoutSec = 600;
|
||||
}
|
||||
```
|
||||
REST = {
|
||||
AppLogLevel = error
|
||||
ServerName = <REDACTED>;
|
||||
ServerPort = 443;
|
||||
AuthTokenConfigURI = <REDACTED>;
|
||||
AuthTokenClientId = <REDACTED>;
|
||||
AuthTokenClientSecret = <REDACTED>;
|
||||
RestClient = 1;
|
||||
ClientTimeoutSec = 120;
|
||||
ClientPoolSize = 32;
|
||||
ClientEofRetryCount = 15;
|
||||
ClientConnectRetryCount = 900;
|
||||
ClientConnectIntervalMs = 1000;
|
||||
}
|
||||
XTC = {
|
||||
Enabled = 1;
|
||||
TimeoutSec = 600;
|
||||
}
|
||||
```
|
||||
|
||||
Save the file after updating the paths.
|
||||
</Step>
|
||||
Save the file after updating the paths.
|
||||
</Step>
|
||||
|
||||
<Step title="Run Docker">
|
||||
Running Docker with HSM encryption requires setting the HSM-related environment variables as mentioned previously in the [HSM setup instructions](#setup-instructions). You can set these environment variables in your Docker run command.
|
||||
<Step title="Run Docker">
|
||||
Running Docker with HSM encryption requires setting the HSM-related environment variables as mentioned previously in the [HSM setup instructions](#setup-instructions). You can set these environment variables in your Docker run command.
|
||||
|
||||
We are setting the environment variables for Docker via the command line in this example, but you can also pass in a `.env` file to set these environment variables.
|
||||
We are setting the environment variables for Docker via the command line in this example, but you can also pass in a `.env` file to set these environment variables.
|
||||
|
||||
<Warning>
|
||||
If no key is found with the provided key label, the HSM will create a new key with the provided label.
|
||||
Infisical depends on an AES and HMAC key to be present in the HSM. If these keys are not present, Infisical will create them. The AES key label will be the value of the `HSM_KEY_LABEL` environment variable, and the HMAC key label will be the value of the `HSM_KEY_LABEL` environment variable with the suffix `_HMAC`.
|
||||
</Warning>
|
||||
<Warning>
|
||||
If no key is found with the provided key label, the HSM will create a new key with the provided label.
|
||||
Infisical depends on an AES and HMAC key to be present in the HSM. If these keys are not present, Infisical will create them. The AES key label will be the value of the `HSM_KEY_LABEL` environment variable, and the HMAC key label will be the value of the `HSM_KEY_LABEL` environment variable with the suffix `_HMAC`.
|
||||
</Warning>
|
||||
|
||||
```bash
|
||||
docker run -p 80:8080 \
|
||||
-v /etc/luna-docker:/usr/safenet/lunaclient \
|
||||
-e HSM_LIB_PATH="/usr/safenet/lunaclient/libs/64/libCryptoki2.so" \
|
||||
-e HSM_PIN="<your-hsm-device-pin>" \
|
||||
-e HSM_SLOT=<hsm-device-slot> \
|
||||
-e HSM_KEY_LABEL="<your-key-label>" \
|
||||
|
||||
# The rest are unrelated to HSM setup...
|
||||
-e ENCRYPTION_KEY="<>" \
|
||||
-e AUTH_SECRET="<>" \
|
||||
-e DB_CONNECTION_URI="<>" \
|
||||
-e REDIS_URL="<>" \
|
||||
-e SITE_URL="<>" \
|
||||
infisical/infisical-fips:<version> # Replace <version> with the version you want to use
|
||||
```
|
||||
```bash
|
||||
docker run -p 80:8080 \
|
||||
-v /etc/luna-docker:/usr/safenet/lunaclient \
|
||||
-e HSM_LIB_PATH="/usr/safenet/lunaclient/libs/64/libCryptoki2.so" \
|
||||
-e HSM_PIN="<your-hsm-device-pin>" \
|
||||
-e HSM_SLOT=<hsm-device-slot> \
|
||||
-e HSM_KEY_LABEL="<your-key-label>" \
|
||||
|
||||
# The rest are unrelated to HSM setup...
|
||||
-e ENCRYPTION_KEY="<>" \
|
||||
-e AUTH_SECRET="<>" \
|
||||
-e DB_CONNECTION_URI="<>" \
|
||||
-e REDIS_URL="<>" \
|
||||
-e SITE_URL="<>" \
|
||||
infisical/infisical-fips:<version> # Replace <version> with the version you want to use
|
||||
```
|
||||
|
||||
We recommend reading further about [using Infisical with Docker](/self-hosting/deployment-options/standalone-infisical).
|
||||
We recommend reading further about [using Infisical with Docker](/self-hosting/deployment-options/standalone-infisical).
|
||||
|
||||
</Step>
|
||||
</Steps>
|
||||
After following these steps, your Docker setup will be ready to use HSM encryption.
|
||||
</Step>
|
||||
</Steps>
|
||||
After following these steps, your Docker setup will be ready to use HSM encryption.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Tab>
|
||||
<Tab title="Kubernetes">
|
||||
When you are deploying Infisical with the [Kubernetes self-hosting option](/self-hosting/deployment-options/kubernetes-helm), you can still use HSM encryption, but you need to ensure that the HSM client files are present in the container.
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Thales Luna Cloud HSM">
|
||||
<Note>
|
||||
This is only supported on helm chart version `1.4.1` and above. Please see the [Helm Chart Changelog](https://github.com/Infisical/infisical/blob/main/helm-charts/infisical-standalone-postgres/CHANGELOG.md#141-march-19-2025) for more information.
|
||||
</Note>
|
||||
|
||||
<Steps>
|
||||
<Step title="Create HSM client folder">
|
||||
When using Kubernetes, you need to mount the path containing the HSM client files. This section covers how to configure your Infisical instance to use an HSM with Kubernetes.
|
||||
|
||||
|
||||
```bash
|
||||
mkdir /etc/hsm-client
|
||||
```
|
||||
|
||||
After [setting up your Luna Cloud HSM client](https://thalesdocs.com/gphsm/luna/7/docs/network/Content/install/client_install/add_dpod.htm), you should have a set of files, referred to as the HSM client. You don't need all the files, but for simplicity we recommend copying all the files from the client.
|
||||
|
||||
A folder structure of a client folder will often look like this:
|
||||
```
|
||||
partition-ca-certificate.pem
|
||||
partition-certificate.pem
|
||||
server-certificate.pem
|
||||
Chrystoki.conf
|
||||
/plugins
|
||||
libcloud.plugin
|
||||
/lock
|
||||
/libs
|
||||
/64
|
||||
libCryptoki2.so
|
||||
/jsp
|
||||
LunaProvider.jar
|
||||
/64
|
||||
libLunaAPI.so
|
||||
/etc
|
||||
openssl.cnf
|
||||
/bin
|
||||
/64
|
||||
ckdemo
|
||||
lunacm
|
||||
multitoken
|
||||
vtl
|
||||
```
|
||||
|
||||
The most important parts of the client folder is the `Chrystoki.conf` file, and the `libs`, `plugins`, and `jsp` folders. You need to copy these files to the folder you created in the first step.
|
||||
|
||||
```bash
|
||||
cp -r /<path-to-where-your-hsm-client-is-located> /etc/hsm-client
|
||||
```
|
||||
</Step>
|
||||
<Step title="Update Chrystoki.conf">
|
||||
The `Chrystoki.conf` file is used to configure the HSM client. You need to update the `Chrystoki.conf` file to point to the correct file paths.
|
||||
|
||||
In this example, we will be mounting the `/etc/hsm-client` folder from the host to containers in our deployment's pods at the path `/hsm-client`. This means the contents of `/etc/hsm-client` on the host will be accessible at `/hsm-client` within the containers.
|
||||
|
||||
An example config file will look like this:
|
||||
|
||||
```Chrystoki.conf
|
||||
Chrystoki2 = {
|
||||
# This path points to the mounted path, /hsm-client
|
||||
LibUNIX64 = /hsm-client/libs/64/libCryptoki2.so;
|
||||
}
|
||||
|
||||
Luna = {
|
||||
DefaultTimeOut = 500000;
|
||||
PEDTimeout1 = 100000;
|
||||
PEDTimeout2 = 200000;
|
||||
PEDTimeout3 = 20000;
|
||||
KeypairGenTimeOut = 2700000;
|
||||
CloningCommandTimeOut = 300000;
|
||||
CommandTimeOutPedSet = 720000;
|
||||
}
|
||||
|
||||
CardReader = {
|
||||
LunaG5Slots = 0;
|
||||
RemoteCommand = 1;
|
||||
}
|
||||
|
||||
Misc = {
|
||||
# Update the paths to point to the mounted path if your folder structure is different from the one mentioned in the previous step.
|
||||
PluginModuleDir = /hsm-client/plugins;
|
||||
MutexFolder = /hsm-client/lock;
|
||||
PE1746Enabled = 1;
|
||||
ToolsDir = /usr/bin;
|
||||
|
||||
}
|
||||
|
||||
Presentation = {
|
||||
ShowEmptySlots = no;
|
||||
}
|
||||
|
||||
LunaSA Client = {
|
||||
ReceiveTimeout = 20000;
|
||||
# Update the paths to point to the mounted path if your folder structure is different from the one mentioned in the previous step.
|
||||
SSLConfigFile = /hsm-client/etc/openssl.cnf;
|
||||
ClientPrivKeyFile = ./etc/ClientNameKey.pem;
|
||||
ClientCertFile = ./etc/ClientNameCert.pem;
|
||||
ServerCAFile = ./etc/CAFile.pem;
|
||||
NetClient = 1;
|
||||
TCPKeepAlive = 1;
|
||||
}
|
||||
|
||||
|
||||
REST = {
|
||||
AppLogLevel = error
|
||||
ServerName = <REDACTED>;
|
||||
ServerPort = 443;
|
||||
AuthTokenConfigURI = <REDACTED>;
|
||||
AuthTokenClientId = <REDACTED>;
|
||||
AuthTokenClientSecret = <REDACTED>;
|
||||
RestClient = 1;
|
||||
ClientTimeoutSec = 120;
|
||||
ClientPoolSize = 32;
|
||||
ClientEofRetryCount = 15;
|
||||
ClientConnectRetryCount = 900;
|
||||
ClientConnectIntervalMs = 1000;
|
||||
}
|
||||
XTC = {
|
||||
Enabled = 1;
|
||||
TimeoutSec = 600;
|
||||
}
|
||||
```
|
||||
|
||||
Save the file after updating the paths.
|
||||
</Step>
|
||||
|
||||
<Step title="Creating Persistent Volume Claim (PVC)">
|
||||
You need to create a Persistent Volume Claim (PVC) to mount the HSM client files to the Infisical deployment.
|
||||
|
||||
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: infisical-data-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 500Mi
|
||||
EOF
|
||||
```
|
||||
The above command will create a PVC named `infisical-data-pvc` with a storage size of `500Mi`. You can change the storage size if needed.
|
||||
|
||||
|
||||
Next we need to create a temporary pod with the PVC mounted as a volume, allowing us to copy the HSM client files into this mounted storage.
|
||||
|
||||
```bash
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: hsm-setup-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: setup
|
||||
image: busybox
|
||||
command: ["/bin/sh", "-c", "sleep 3600"]
|
||||
volumeMounts:
|
||||
- name: hsm-data
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: hsm-data
|
||||
persistentVolumeClaim:
|
||||
claimName: infisical-data-pvc
|
||||
EOF
|
||||
```
|
||||
|
||||
The above command will create a pod named `hsm-setup-pod` with a busybox image. The pod will sleep for 3600 seconds _(one hour)_, which is enough time to upload the HSM client files to the PVC.
|
||||
|
||||
Ensure that the pod is running and is healthy by running the following command:
|
||||
|
||||
```bash
|
||||
kubectl wait --for=condition=Ready pod/hsm-setup-pod --timeout=60s
|
||||
```
|
||||
|
||||
Next we need to copy the HSM client files into the PVC.
|
||||
|
||||
```bash
|
||||
kubectl exec hsm-setup-pod -- mkdir -p /data/ # Create the data directory
|
||||
kubectl cp ./hsm-client/ hsm-setup-pod:/data/ # Copy the HSM client files into the PVC
|
||||
kubectl exec hsm-setup-pod -- chmod -R 755 /data/ # Set the correct permissions for the HSM client files
|
||||
```
|
||||
|
||||
Finally, we are ready to delete the temporary pod, as we have successfully uploaded the HSM client files to the PVC. This step may take a few minutes to complete.
|
||||
|
||||
```bash
|
||||
kubectl delete pod hsm-setup-pod
|
||||
```
|
||||
</Step>
|
||||
|
||||
<Step title="Updating your environment variables">
|
||||
Next we need to update the environment variables used for the deployment. If you followed the [setup instructions for Kubernetes deployments](/self-hosting/deployment-options/kubernetes-helm), you should have a Kubernetes secret called `infisical-secrets`.
|
||||
We need to update the secret with the following environment variables:
|
||||
|
||||
- `HSM_LIB_PATH` - The path to the HSM client library _(mapped to `/hsm-client/libs/64/libCryptoki2.so`)_
|
||||
- `HSM_PIN` - The PIN for the HSM device that you created when setting up your Luna Cloud HSM client
|
||||
- `HSM_SLOT` - The slot number for the HSM device that you selected when setting up your Luna Cloud HSM client
|
||||
- `HSM_KEY_LABEL` - The label for the HSM key. If no key is found with the provided key label, the HSM will create a new key with the provided label.
|
||||
|
||||
The following is an example of the secret that you should update:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: infisical-secrets
|
||||
type: Opaque
|
||||
stringData:
|
||||
# ... Other environment variables ...
|
||||
HSM_LIB_PATH: "/hsm-client/libs/64/libCryptoki2.so" # If you followed this guide, this will be the path of the Luna Cloud HSM client
|
||||
HSM_PIN: "<your-hsm-device-pin>"
|
||||
HSM_SLOT: "<hsm-device-slot>"
|
||||
HSM_KEY_LABEL: "<your-key-label>"
|
||||
```
|
||||
|
||||
Save the file after updating the environment variables, and apply the secret changes
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./secret-file-name.yaml
|
||||
```
|
||||
</Step>
|
||||
|
||||
<Step title="Updating the Deployment">
|
||||
After we've successfully configured the PVC and updated our environment variables, we are ready to update the deployment configuration so that the pods it creates can access the HSM client files.
|
||||
|
||||
We need to update the Docker image of the deployment to use `infisical/infisical-fips`. The `infisical/infisical-fips` image is a functionally identical image to the `infisical/infisical` image, but it is built with support for HSM encryption.
|
||||
|
||||
```yaml
|
||||
# ... The rest of the values.yaml file ...
|
||||
|
||||
image:
|
||||
repository: infisical/infisical-fips # Very important: Must use "infisical/infisical-fips"
|
||||
tag: "v0.117.1-postgres"
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
extraVolumeMounts:
|
||||
- name: hsm-data
|
||||
mountPath: /hsm-client # The path we will mount the HSM client files to
|
||||
subPath: ./hsm-client
|
||||
|
||||
extraVolumes:
|
||||
- name: hsm-data
|
||||
persistentVolumeClaim:
|
||||
claimName: infisical-data-pvc # The PVC we created in the previous step
|
||||
|
||||
# ... The rest of the values.yaml file ...
|
||||
```
|
||||
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Upgrading the Helm Chart">
|
||||
After updating the values.yaml file, you need to upgrade the Helm chart in order for the changes to take effect.
|
||||
|
||||
```bash
|
||||
helm upgrade --install infisical infisical-helm-charts/infisical-standalone --values /path/to/values.yaml
|
||||
```
|
||||
</Step>
|
||||
<Step title="Restarting the Deployment">
|
||||
After upgrading the Helm chart, you need to restart the deployment in order for the changes to take effect.
|
||||
|
||||
```bash
|
||||
kubectl rollout restart deployment/infisical-infisical
|
||||
```
|
||||
</Step>
|
||||
</Steps>
|
||||
After following these steps, your Kubernetes setup will be ready to use HSM encryption.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
|
||||
## Disabling HSM Encryption
|
||||
|
||||
To disable HSM encryption, navigate to Infisical's Server Admin Console and set the KMS encryption strategy to `Software-based Encryption`. This will revert the encryption strategy back to the default software-based encryption.
|
||||
|
BIN
docs/images/platform/access-controls/abac-policies-by-auth.png
Normal file
After Width: | Height: | Size: 478 KiB |
BIN
docs/images/platform/access-controls/abac-policy-oidc-format.png
Normal file
After Width: | Height: | Size: 544 KiB |
Before Width: | Height: | Size: 1.6 MiB After Width: | Height: | Size: 430 KiB |
@@ -6,7 +6,7 @@ description: "Learn how to fetch secrets from Infisical with Terraform using bot
|
||||
This guide demonstrates how to use Infisical to manage secrets in your Terraform infrastructure code, supporting both traditional data sources and ephemeral resources for enhanced security. It uses:
|
||||
|
||||
- Infisical (you can use [Infisical Cloud](https://app.infisical.com) or a [self-hosted instance of Infisical](https://infisical.com/docs/self-hosting/overview)) to store your secrets
|
||||
- The [Terraform Provider](https://registry.terraform.io/providers/Infisical/infisical/latest) to fetch secrets for your infrastructure
|
||||
- The [Terraform Provider](https://registry.terraform.io/providers/Infisical/infisical/latest/docs) to fetch secrets for your infrastructure
|
||||
|
||||
## Prerequisites
|
||||
|
||||
|
5
docs/integrations/platforms/apache-airflow.mdx
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
title: "Apache Airflow"
|
||||
description: "Learn how to use Infisical as your custom secrets backend in Apache Airflow."
|
||||
url: "https://github.com/Infisical/airflow-provider-infisical?tab=readme-ov-file#airflow-infisical-provider"
|
||||
---
|
@@ -43,8 +43,11 @@ description: "Learn how to configure an AWS Parameter Store Sync for Infisical."
|
||||
- **KMS Key**: The AWS KMS key ID or alias to encrypt parameters with.
|
||||
- **Tags**: Optional resource tags to add to parameters synced by Infisical.
|
||||
- **Sync Secret Metadata as Resource Tags**: If enabled, metadata attached to secrets will be added as resource tags to parameters synced by Infisical.
|
||||
<Note>Manually configured tags from the **Tags** field will take precedence over secret metadata when tag keys conflict.</Note>
|
||||
<Note>
|
||||
Manually configured tags from the **Tags** field will take precedence over secret metadata when tag keys conflict.
|
||||
</Note>
|
||||
- **Auto-Sync Enabled**: If enabled, secrets will automatically be synced from the source location when changes occur. Disable to enforce manual syncing only.
|
||||
- **Disable Secret Deletion**: If enabled, Infisical will not remove secrets from the sync destination. Enable this option if you intend to manage some secrets manually outside of Infisical.
|
||||
|
||||
6. Configure the **Details** of your Parameter Store Sync, then click **Next**.
|
||||

|
||||
|
@@ -46,7 +46,11 @@ description: "Learn how to configure an AWS Secrets Manager Sync for Infisical."
|
||||
- **KMS Key**: The AWS KMS key ID or alias to encrypt secrets with.
|
||||
- **Tags**: Optional tags to add to secrets synced by Infisical.
|
||||
- **Sync Secret Metadata as Tags**: If enabled, metadata attached to secrets will be added as tags to secrets synced by Infisical.
|
||||
<Note>
|
||||
Manually configured tags from the **Tags** field will take precedence over secret metadata when tag keys conflict.
|
||||
</Note>
|
||||
- **Auto-Sync Enabled**: If enabled, secrets will automatically be synced from the source location when changes occur. Disable to enforce manual syncing only.
|
||||
- **Disable Secret Deletion**: If enabled, Infisical will not remove secrets from the sync destination. Enable this option if you intend to manage some secrets manually outside of Infisical.
|
||||
|
||||
6. Configure the **Details** of your Secrets Manager Sync, then click **Next**.
|
||||

|
||||
|
@@ -6,7 +6,7 @@ description: "Learn how to configure an Azure App Configuration Sync for Infisic
|
||||
**Prerequisites:**
|
||||
|
||||
- Set up and add secrets to [Infisical Cloud](https://app.infisical.com)
|
||||
- Create a [Azure Connection](/integrations/app-connections/azure), configured for Azure App Configuration.
|
||||
- Create an [Azure App Configuration Connection](/integrations/app-connections/azure-app-configuration)
|
||||
|
||||
<Note>
|
||||
The Azure App Configuration Secret Sync requires the following permissions to be set on the user / service principal
|
||||
@@ -50,6 +50,7 @@ description: "Learn how to configure an Azure App Configuration Sync for Infisic
|
||||
- **Import Secrets (Prioritize Azure App Configuration)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Secrets Manager over Infisical when keys conflict.
|
||||
|
||||
- **Auto-Sync Enabled**: If enabled, secrets will automatically be synced from the source location when changes occur. Disable to enforce manual syncing only.
|
||||
- **Disable Secret Deletion**: If enabled, Infisical will not remove secrets from the sync destination. Enable this option if you intend to manage some secrets manually outside of Infisical.
|
||||
|
||||
6. Configure the **Details** of your Azure App Configuration Sync, then click **Next**.
|
||||

|
||||
|
@@ -6,7 +6,7 @@ description: "Learn how to configure a Azure Key Vault Sync for Infisical."
|
||||
**Prerequisites:**
|
||||
|
||||
- Set up and add secrets to [Infisical Cloud](https://app.infisical.com)
|
||||
- Create a [Azure Connection](/integrations/app-connections/azure), configured for Azure Key Vault.
|
||||
- Create an [Azure Key Vault Connection](/integrations/app-connections/azure-key-vault)
|
||||
|
||||
<Note>
|
||||
The Azure Key Vault Secret Sync requires the following secrets permissions to be set on the user / service principal
|
||||
@@ -52,6 +52,7 @@ description: "Learn how to configure a Azure Key Vault Sync for Infisical."
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over Secrets Manager when keys conflict.
|
||||
- **Import Secrets (Prioritize Azure Key Vault)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Secrets Manager over Infisical when keys conflict.
|
||||
- **Auto-Sync Enabled**: If enabled, secrets will automatically be synced from the source location when changes occur. Disable to enforce manual syncing only.
|
||||
- **Disable Secret Deletion**: If enabled, Infisical will not remove secrets from the sync destination. Enable this option if you intend to manage some secrets manually outside of Infisical.
|
||||
|
||||
6. Configure the **Details** of your Azure Key Vault Sync, then click **Next**.
|
||||

|
||||
|