Compare commits

...

98 Commits

Author SHA1 Message Date
Vlad Matsiiako
7503876ca0 Merge pull request #2683 from Infisical/blueprint-org-structure
added blueprint for org structure
2024-11-03 09:48:27 -08:00
Vladyslav Matsiiako
36b5a3dc90 fix typo 2024-11-03 09:40:33 -08:00
Vlad Matsiiako
dfe36f346f Merge pull request #2682 from cyberbohu/patch-1
Update overview.mdx
2024-11-03 09:29:56 -08:00
Vladyslav Matsiiako
b1b61842c6 added blueprint for org structure 2024-11-03 09:29:05 -08:00
cyberbohu
f9ca9b51b2 Update overview.mdx
spell check
2024-11-03 12:37:30 +01:00
Maidul Islam
7e7e6ade5c Update deployment-pipeline.yml 2024-11-02 13:19:50 -04:00
Maidul Islam
4010817916 Increase batch size and remove transation 2024-11-02 12:48:34 -04:00
Maidul Islam
eea367c3bc Merge pull request #2606 from Infisical/daniel/multiple-auth-methods
feat: multiple auth methods for identities
2024-11-02 12:17:37 -04:00
Maidul Islam
56567ee7c9 Update deployment-pipeline.yml 2024-11-02 11:31:37 -04:00
BlackMagiq
6b7bc2a3c4 Merge pull request #2677 from un/main
fix: minor typo
2024-11-01 13:12:07 -07:00
McPizza0
cb52568ebd fix: minor typo 2024-11-01 19:59:08 +01:00
Vlad Matsiiako
9d30fb3870 Merge pull request #2676 from scott-ray-wilson/oidc-default-org-docs
Docs: OIDC Default Org Support and OIDC/SAML Tip/Info Improvements
2024-11-01 10:46:15 -07:00
Scott Wilson
161ac5e097 docs: oidc added to default org description and improve oidc/smal info/tips 2024-11-01 10:38:57 -07:00
Maidul Islam
bb5b585cf6 Merge pull request #2674 from scott-ray-wilson/docs-update-api-base-url
Docs: Update OpenAPI Spec Servers
2024-11-01 00:54:51 -04:00
BlackMagiq
fa94191c40 Merge pull request #2673 from areifert/misc/make-azure-devops-variables-secret
Make synced Azure DevOps variables secret
2024-10-31 20:39:59 -07:00
Scott Wilson
6a5eabc411 docs: update urls for openapi docs 2024-10-31 19:51:26 -07:00
Maidul Islam
c956a0f91f Merge pull request #2667 from scott-ray-wilson/oidc-default-org-slug
Feature: OIDC Default Org
2024-10-31 21:56:53 -04:00
Scott Wilson
df7b55606e feature: oidc support for oidc and only display saml/oidc login if enforced 2024-10-31 15:13:13 -07:00
areifert
5f14b27f41 Make Azure DevOps variables secret 2024-10-31 13:29:43 -06:00
Scott Wilson
02b2395276 Merge pull request #2666 from scott-ray-wilson/snowflake-dynamic-secrets
Feature: Snowflake Dynamic Secrets
2024-10-31 11:58:00 -07:00
Scott Wilson
402fa2b0e0 fix: correct typo 2024-10-31 11:53:02 -07:00
Scott Wilson
3725241f52 improvement: improve error for leases 2024-10-31 10:39:43 -07:00
Scott Wilson
10b457a695 fix: correct early return for renew 2024-10-31 10:31:50 -07:00
Scott Wilson
3912e2082d fix: check that renew statement actually exists 2024-10-31 10:27:55 -07:00
Scott Wilson
7dd6eac20a improvement: address feedback 2024-10-31 10:24:30 -07:00
Sheen
5664e1ff26 Merge pull request #2670 from Infisical/feat/added-key-id-column
feat: added key id column
2024-11-01 01:14:46 +08:00
Sheen Capadngan
a27a428329 misc: added mint json changes 2024-11-01 00:47:06 +08:00
Sheen Capadngan
b196251c19 doc: add kubernetes encryption 2024-11-01 00:42:23 +08:00
Sheen Capadngan
b18d8d542f misc: add copy to clipboard 2024-11-01 00:22:21 +08:00
Scott Wilson
3c287600ab Merge pull request #2645 from scott-ray-wilson/secrets-quick-search
Feature: Secrets Dashboard Quick/Deep Search
2024-10-31 08:41:54 -07:00
Sheen Capadngan
759d11ff21 feat: added key id column 2024-10-31 19:05:53 +08:00
Maidul Islam
2bd817765c Merge pull request #2668 from Infisical/vmatsiiako-link-patch-1
Update audit-log-streams.mdx
2024-10-30 23:08:12 -04:00
Vlad Matsiiako
7aa9c5dd00 Update audit-log-streams.mdx 2024-10-30 19:55:32 -07:00
Scott Wilson
b693c035ce chore: remove dev value 2024-10-30 15:02:25 -07:00
Scott Wilson
c65a991943 fix: add missing type properties on client side 2024-10-30 15:01:21 -07:00
Scott Wilson
3a3811cb3c feature: snowflake dynamic secrets 2024-10-30 14:57:15 -07:00
Maidul Islam
332ca61f5d Merge pull request #2665 from akhilmhdh/fix/resolve-service-token
feat: fixed missing secret folder and import permission in service token
2024-10-30 15:50:59 -04:00
=
64f43e59d0 feat: fixed missing secret folder and import permission in service token 2024-10-31 01:17:41 +05:30
Maidul Islam
ccaf4c00af Merge pull request #2663 from Infisical/vmatsiiako-docs-link-patch-1
fix link in docs
2024-10-30 15:47:24 -04:00
Scott Wilson
e3ba1c59bf improvement: add search filter tooltip to quick search 2024-10-30 10:38:25 -07:00
Scott Wilson
ce0bc191d8 Merge pull request #2655 from Daemoen/daemoen/minor-grammar-correction
Daemoen/minor grammar correction
2024-10-30 09:07:51 -07:00
Vlad Matsiiako
489ccb8e15 fix link in docs 2024-10-29 21:57:50 -07:00
Maidul Islam
ae8f695b6f Update attribute-based-access-controls.mdx 2024-10-29 23:10:53 -04:00
Maidul Islam
19357d4bd7 Merge pull request #2662 from Infisical/vmatsiiako-docs-audit-patch-1
Update audit-log-streams.mdx
2024-10-29 23:07:16 -04:00
Vlad Matsiiako
776d0a0fe1 Update audit-log-streams.mdx 2024-10-29 20:06:27 -07:00
Maidul Islam
85dec28667 Merge pull request #2661 from Infisical/maidul-ABAC
abac docs
2024-10-29 18:40:25 -04:00
Scott Wilson
21ea7dd317 feature: deep search for secrets dashboard 2024-10-29 15:08:19 -07:00
Scott Wilson
57e214ef50 improvement: add back comma 2024-10-29 14:46:53 -07:00
Scott Wilson
1986fe9617 improvement: minor doc adjustment and add new page to sidebar 2024-10-29 14:45:38 -07:00
Scott Wilson
1309f30af9 Merge pull request #2660 from scott-ray-wilson/invite-link-expanded
Improvement: Display Full Invite Link in Insecure Context and Increase Default Pagination Size to 100
2024-10-29 13:32:09 -07:00
Maidul Islam
89a4fc91ca abac docs 2024-10-29 15:42:38 -04:00
Marc Mercer
af0ec2400d Reverting as not to affect logging consistency 2024-10-29 12:08:09 -07:00
Scott Wilson
770e73e40b improvement: adjust default pagination size and do not truncate invite links in insecure context 2024-10-29 11:10:51 -07:00
Sheen
39fdeabdea Merge pull request #2646 from Infisical/feat/sap-hana-dynamic-secrets
feat: SAP HANA dynamic secrets
2024-10-29 22:45:46 +08:00
Maidul Islam
25c26f2cde Merge pull request #2658 from Infisical/misc/add-missing-helm-updates-operator
misc: added helm related configs for operator
2024-10-29 09:53:17 -04:00
Sheen Capadngan
1ca8b9ba08 misc: install secret operator updates 2024-10-29 21:50:23 +08:00
Sheen Capadngan
14d9fe01e0 misc: updated chart 2024-10-29 21:46:17 +08:00
Sheen Capadngan
216810f289 misc: added helm related configs 2024-10-29 13:37:03 +08:00
Maidul Islam
f530b78eb8 Merge pull request #2652 from Infisical/feat/add-support-for-custom-ca
feat: add support for custom ca in k8 operator
2024-10-29 01:14:30 -04:00
Sheen Capadngan
c3809ed22b Merge branch 'feat/add-support-for-custom-ca' of https://github.com/Infisical/infisical into feat/add-support-for-custom-ca 2024-10-29 12:00:09 +08:00
Sheen Capadngan
9f85d8bba1 feat: added handling of empty ca 2024-10-29 11:59:41 +08:00
Maidul Islam
1056645ee3 fix small nit 2024-10-28 22:25:21 -04:00
Maidul Islam
5e9914b738 Merge pull request #2657 from Infisical/vmatsiiako--docs-patch-1
Update docker-swarm.mdx
2024-10-28 22:17:19 -04:00
Maidul Islam
1ea52e6a80 update chart version 2024-10-28 21:03:27 -04:00
Maidul Islam
20da697de8 rename change log file 2024-10-28 21:01:20 -04:00
Maidul Islam
16abf48081 add change log 2024-10-28 20:56:42 -04:00
Maidul Islam
e73ae485bc patch service account namespace 2024-10-28 20:32:38 -04:00
Maidul Islam
621f73e223 add support for variable init container img 2024-10-28 20:32:38 -04:00
Scott Wilson
93e69bd34e Merge pull request #2656 from scott-ray-wilson/insecure-context-banner
Feature: Display Warning Banner for Insecure Connection
2024-10-28 16:47:18 -07:00
Scott Wilson
e382135384 improvements: make banner full width and adjust icon/margins 2024-10-28 16:43:15 -07:00
Vlad Matsiiako
f2a554b5fd Update docker-swarm.mdx 2024-10-28 16:16:36 -07:00
Scott Wilson
df5bdf3773 feature: display warning banner for insecure context 2024-10-28 16:00:40 -07:00
Marc Mercer
8401048daf Final fix 2024-10-28 13:28:47 -07:00
Marc Mercer
335a87d856 Think I got them all 2024-10-28 13:20:36 -07:00
Marc Mercer
1add9dd965 Minor adjustments to grammar and consistency 2024-10-28 13:00:29 -07:00
Scott Wilson
df46daf93d Merge pull request #2654 from scott-ray-wilson/kms-doc-fix
Docs: Correct KMS API Docs for Decrypt Endpoint
2024-10-28 11:18:43 -07:00
Scott Wilson
f82f7ae8d0 fix: correct api constant reference for kms docs 2024-10-28 11:10:04 -07:00
Scott Wilson
8536a1c987 Merge pull request #2653 from scott-ray-wilson/fix-copy-shared-secret-link
Fix: Copy Shared Secret Link to Clipboard on Generate
2024-10-28 10:56:19 -07:00
Scott Wilson
b3cf43b46d fix: copy shared secret link to clipboard on generate 2024-10-28 10:25:09 -07:00
Sheen Capadngan
9d4dbb63ae misc: updated go-sdk version 2024-10-28 21:49:34 +08:00
Sheen Capadngan
9c6f23fba6 misc: documentation and samples 2024-10-28 17:45:49 +08:00
Sheen Capadngan
babe483ca9 feat: add support for custom ca in k8 operator 2024-10-28 17:03:56 +08:00
Maidul Islam
38ede687cd Merge pull request #2650 from Infisical/revert-2649-revert-2603-feat/secret-reference-path-way
"feat: secret reference graph for understanding how its pulled""
2024-10-27 22:04:52 -04:00
Maidul Islam
5f465c4832 update env of prod eu ci 2024-10-26 21:19:53 -04:00
Maidul Islam
a0618086b0 update sts endpoint for eu ci 2024-10-26 20:38:37 -04:00
Maidul Islam
9a9bb4ca43 update eu deployment job 2024-10-26 18:45:12 -04:00
Maidul Islam
b68ddfae1b wait for gamma to be fully deployed 2024-10-26 17:57:28 -04:00
Maidul Islam
7646670378 update ci job names 2024-10-26 17:46:27 -04:00
Maidul Islam
d18be0f74c fix deployment ci 2024-10-26 17:44:05 -04:00
Maidul Islam
ec96db3503 Add EU support in deployment 2024-10-26 17:41:08 -04:00
Scott Wilson
bed620aad0 fix: downgrade collapsible version 2024-10-26 11:12:38 -07:00
Maidul Islam
02d9dbb987 Revert "Revert "feat: secret reference graph for understanding how its pulled"" 2024-10-26 13:51:07 -04:00
Sheen Capadngan
3841394eb7 misc: migrated existing to new helper 2024-10-26 03:18:57 +08:00
Sheen Capadngan
3552119c7d misc: moved host validity check to helper 2024-10-25 22:49:57 +08:00
Sheen Capadngan
7a46725523 doc: added note for transaction 2024-10-25 22:45:06 +08:00
Sheen Capadngan
3bc39c6cec feat: add usage of ca 2024-10-25 21:29:47 +08:00
Sheen Capadngan
b5b1e57fe7 doc: sap hana 2024-10-25 21:19:22 +08:00
Sheen Capadngan
1a5f66fe46 feat: added support for sap hana dynamic secrets 2024-10-25 20:33:23 +08:00
157 changed files with 8446 additions and 1620 deletions

View File

@@ -7,12 +7,12 @@ permissions:
jobs:
infisical-tests:
name: Run tests before deployment
name: Integration tests
# https://docs.github.com/en/actions/using-workflows/reusing-workflows#overview
uses: ./.github/workflows/run-backend-tests.yml
infisical-image:
name: Build backend image
name: Build
runs-on: ubuntu-latest
needs: [infisical-tests]
steps:
@@ -102,10 +102,10 @@ jobs:
task-definition: ${{ steps.render-web-container.outputs.task-definition }}
service: infisical-core-gamma-stage
cluster: infisical-gamma-stage
wait-for-service-stability: false
wait-for-service-stability: true
production-postgres-deployment:
name: Deploy to production
production-us:
name: US production deploy
runs-on: ubuntu-latest
needs: [gamma-deployment]
environment:
@@ -159,3 +159,54 @@ jobs:
service: infisical-core-platform
cluster: infisical-core-platform
wait-for-service-stability: true
production-eu:
name: EU production deploy
runs-on: ubuntu-latest
needs: [production-us]
environment:
name: production-eu
steps:
- uses: twingate/github-action@v1
with:
service-key: ${{ secrets.TWINGATE_SERVICE_KEY }}
- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
audience: sts.amazonaws.com
aws-region: eu-central-1
role-to-assume: arn:aws:iam::345594589636:role/gha-make-prod-deployment
- name: Checkout code
uses: actions/checkout@v2
- name: Setup Node.js environment
uses: actions/setup-node@v2
with:
node-version: "20"
- name: Change directory to backend and install dependencies
env:
DB_CONNECTION_URI: ${{ secrets.DB_CONNECTION_URI }}
run: |
cd backend
npm install
npm run migration:latest
- name: Save commit hashes for tag
id: commit
uses: pr-mpt/actions-commit-hash@v2
- name: Download task definition
run: |
aws ecs describe-task-definition --task-definition infisical-core-platform --query taskDefinition > task-definition.json
- name: Render Amazon ECS task definition
id: render-web-container
uses: aws-actions/amazon-ecs-render-task-definition@v1
with:
task-definition: task-definition.json
container-name: infisical-core-platform
image: infisical/staging_infisical:${{ steps.commit.outputs.short }}
environment-variables: "LOG_LEVEL=info"
- name: Deploy to Amazon ECS service
uses: aws-actions/amazon-ecs-deploy-task-definition@v1
with:
task-definition: ${{ steps.render-web-container.outputs.task-definition }}
service: infisical-core-platform
cluster: infisical-core-platform
wait-for-service-stability: true

2749
backend/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -161,6 +161,7 @@
"google-auth-library": "^9.9.0",
"googleapis": "^137.1.0",
"handlebars": "^4.7.8",
"hdb": "^0.19.10",
"ioredis": "^5.3.2",
"jmespath": "^0.16.0",
"jsonwebtoken": "^9.0.2",
@@ -195,6 +196,7 @@
"scim2-parse-filter": "^0.2.10",
"sjcl": "^1.0.8",
"smee-client": "^2.0.0",
"snowflake-sdk": "^1.14.0",
"tedious": "^18.2.1",
"tweetnacl": "^1.0.3",
"tweetnacl-util": "^0.15.1",

4
backend/src/@types/hdb.d.ts vendored Normal file
View File

@@ -0,0 +1,4 @@
declare module "hdb" {
// eslint-disable-next-line @typescript-eslint/no-explicit-any -- Untyped, the function returns `any`.
function createClient(options): any;
}

View File

@@ -9,7 +9,7 @@ export async function up(knex: Knex): Promise<void> {
t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid());
t.string("integration").notNullable();
t.string("teamId"); // vercel-specific
t.string("url"); // for self hosted
t.string("url"); // for self-hosted
t.string("namespace"); // hashicorp specific
t.string("accountId"); // netlify
t.text("refreshCiphertext");
@@ -36,7 +36,7 @@ export async function up(knex: Knex): Promise<void> {
await knex.schema.createTable(TableName.Integration, (t) => {
t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid());
t.boolean("isActive").notNullable();
t.string("url"); // self hosted
t.string("url"); // self-hosted
t.string("app"); // name of app in provider
t.string("appId");
t.string("targetEnvironment");

View File

@@ -2,7 +2,7 @@ import { Knex } from "knex";
import { TableName } from "../schemas";
const BATCH_SIZE = 10_000;
const BATCH_SIZE = 30_000;
export async function up(knex: Knex): Promise<void> {
const hasAuthMethodColumnAccessToken = await knex.schema.hasColumn(TableName.IdentityAccessToken, "authMethod");
@@ -71,3 +71,6 @@ export async function down(knex: Knex): Promise<void> {
});
}
}
const config = {transaction: false};
export { config };

View File

@@ -0,0 +1,20 @@
import { getConfig } from "@app/lib/config/env";
import { BadRequestError } from "@app/lib/errors";
import { getDbConnectionHost } from "@app/lib/knex";
export const verifyHostInputValidity = (host: string) => {
const appCfg = getConfig();
const dbHost = appCfg.DB_HOST || getDbConnectionHost(appCfg.DB_CONNECTION_URI);
if (
appCfg.isCloud &&
// localhost
// internal ips
(host === "host.docker.internal" || host.match(/^10\.\d+\.\d+\.\d+/) || host.match(/^192\.168\.\d+\.\d+/))
)
throw new BadRequestError({ message: "Invalid db host" });
if (host === "localhost" || host === "127.0.0.1" || dbHost === host) {
throw new BadRequestError({ message: "Invalid db host" });
}
};

View File

@@ -9,7 +9,7 @@ import {
} from "@app/ee/services/permission/project-permission";
import { infisicalSymmetricDecrypt, infisicalSymmetricEncypt } from "@app/lib/crypto/encryption";
import { BadRequestError, NotFoundError } from "@app/lib/errors";
import { OrderByDirection } from "@app/lib/types";
import { OrderByDirection, ProjectServiceActor } from "@app/lib/types";
import { TProjectDALFactory } from "@app/services/project/project-dal";
import { TSecretFolderDALFactory } from "@app/services/secret-folder/secret-folder-dal";
@@ -22,6 +22,7 @@ import {
TDeleteDynamicSecretDTO,
TDetailsDynamicSecretDTO,
TGetDynamicSecretsCountDTO,
TListDynamicSecretsByFolderMappingsDTO,
TListDynamicSecretsDTO,
TListDynamicSecretsMultiEnvDTO,
TUpdateDynamicSecretDTO
@@ -454,8 +455,44 @@ export const dynamicSecretServiceFactory = ({
return dynamicSecretCfg;
};
const listDynamicSecretsByFolderIds = async (
{ folderMappings, filters, projectId }: TListDynamicSecretsByFolderMappingsDTO,
actor: ProjectServiceActor
) => {
const { permission } = await permissionService.getProjectPermission(
actor.type,
actor.id,
projectId,
actor.authMethod,
actor.orgId
);
const userAccessibleFolderMappings = folderMappings.filter(({ path, environment }) =>
permission.can(
ProjectPermissionDynamicSecretActions.ReadRootCredential,
subject(ProjectPermissionSub.DynamicSecrets, { environment, secretPath: path })
)
);
const groupedFolderMappings = new Map(userAccessibleFolderMappings.map((path) => [path.folderId, path]));
const dynamicSecrets = await dynamicSecretDAL.listDynamicSecretsByFolderIds({
folderIds: userAccessibleFolderMappings.map(({ folderId }) => folderId),
...filters
});
return dynamicSecrets.map((dynamicSecret) => {
const { environment, path } = groupedFolderMappings.get(dynamicSecret.folderId)!;
return {
...dynamicSecret,
environment,
path
};
});
};
// get dynamic secrets for multiple envs
const listDynamicSecretsByFolderIds = async ({
const listDynamicSecretsByEnvs = async ({
actorAuthMethod,
actorOrgId,
actorId,
@@ -521,9 +558,10 @@ export const dynamicSecretServiceFactory = ({
deleteByName,
getDetails,
listDynamicSecretsByEnv,
listDynamicSecretsByFolderIds,
listDynamicSecretsByEnvs,
getDynamicSecretCount,
getCountMultiEnv,
fetchAzureEntraIdUsers
fetchAzureEntraIdUsers,
listDynamicSecretsByFolderIds
};
};

View File

@@ -48,17 +48,27 @@ export type TDetailsDynamicSecretDTO = {
projectSlug: string;
} & Omit<TProjectPermission, "projectId">;
export type TListDynamicSecretsDTO = {
path: string;
environmentSlug: string;
projectSlug?: string;
projectId?: string;
export type ListDynamicSecretsFilters = {
offset?: number;
limit?: number;
orderBy?: SecretsOrderBy;
orderDirection?: OrderByDirection;
search?: string;
} & Omit<TProjectPermission, "projectId">;
};
export type TListDynamicSecretsDTO = {
path: string;
environmentSlug: string;
projectSlug?: string;
projectId?: string;
} & ListDynamicSecretsFilters &
Omit<TProjectPermission, "projectId">;
export type TListDynamicSecretsByFolderMappingsDTO = {
projectId: string;
folderMappings: { folderId: string; path: string; environment: string }[];
filters: ListDynamicSecretsFilters;
};
export type TListDynamicSecretsMultiEnvDTO = Omit<
TListDynamicSecretsDTO,

View File

@@ -2,10 +2,9 @@ import { Client as ElasticSearchClient } from "@elastic/elasticsearch";
import { customAlphabet } from "nanoid";
import { z } from "zod";
import { getConfig } from "@app/lib/config/env";
import { BadRequestError } from "@app/lib/errors";
import { alphaNumericNanoId } from "@app/lib/nanoid";
import { verifyHostInputValidity } from "../dynamic-secret-fns";
import { DynamicSecretElasticSearchSchema, ElasticSearchAuthTypes, TDynamicProviderFns } from "./models";
const generatePassword = () => {
@@ -19,23 +18,8 @@ const generateUsername = () => {
export const ElasticSearchProvider = (): TDynamicProviderFns => {
const validateProviderInputs = async (inputs: unknown) => {
const appCfg = getConfig();
const isCloud = Boolean(appCfg.LICENSE_SERVER_KEY); // quick and dirty way to check if its cloud or not
const providerInputs = await DynamicSecretElasticSearchSchema.parseAsync(inputs);
if (
isCloud &&
// localhost
// internal ips
(providerInputs.host === "host.docker.internal" ||
providerInputs.host.match(/^10\.\d+\.\d+\.\d+/) ||
providerInputs.host.match(/^192\.168\.\d+\.\d+/))
) {
throw new BadRequestError({ message: "Invalid db host" });
}
if (providerInputs.host === "localhost" || providerInputs.host === "127.0.0.1") {
throw new BadRequestError({ message: "Invalid db host" });
}
verifyHostInputValidity(providerInputs.host);
return providerInputs;
};

View File

@@ -1,3 +1,5 @@
import { SnowflakeProvider } from "@app/ee/services/dynamic-secret/providers/snowflake";
import { AwsElastiCacheDatabaseProvider } from "./aws-elasticache";
import { AwsIamProvider } from "./aws-iam";
import { AzureEntraIDProvider } from "./azure-entra-id";
@@ -9,6 +11,7 @@ import { MongoAtlasProvider } from "./mongo-atlas";
import { MongoDBProvider } from "./mongo-db";
import { RabbitMqProvider } from "./rabbit-mq";
import { RedisDatabaseProvider } from "./redis";
import { SapHanaProvider } from "./sap-hana";
import { SqlDatabaseProvider } from "./sql-database";
export const buildDynamicSecretProviders = () => ({
@@ -22,5 +25,7 @@ export const buildDynamicSecretProviders = () => ({
[DynamicSecretProviders.ElasticSearch]: ElasticSearchProvider(),
[DynamicSecretProviders.RabbitMq]: RabbitMqProvider(),
[DynamicSecretProviders.AzureEntraID]: AzureEntraIDProvider(),
[DynamicSecretProviders.Ldap]: LdapProvider()
[DynamicSecretProviders.Ldap]: LdapProvider(),
[DynamicSecretProviders.SapHana]: SapHanaProvider(),
[DynamicSecretProviders.Snowflake]: SnowflakeProvider()
});

View File

@@ -166,6 +166,27 @@ export const DynamicSecretMongoDBSchema = z.object({
)
});
export const DynamicSecretSapHanaSchema = z.object({
host: z.string().trim().toLowerCase(),
port: z.number(),
username: z.string().trim(),
password: z.string().trim(),
creationStatement: z.string().trim(),
revocationStatement: z.string().trim(),
renewStatement: z.string().trim().optional(),
ca: z.string().optional()
});
export const DynamicSecretSnowflakeSchema = z.object({
accountId: z.string().trim().min(1),
orgId: z.string().trim().min(1),
username: z.string().trim().min(1),
password: z.string().trim().min(1),
creationStatement: z.string().trim().min(1),
revocationStatement: z.string().trim().min(1),
renewStatement: z.string().trim().optional()
});
export const AzureEntraIDSchema = z.object({
tenantId: z.string().trim().min(1),
userId: z.string().trim().min(1),
@@ -196,7 +217,9 @@ export enum DynamicSecretProviders {
MongoDB = "mongo-db",
RabbitMq = "rabbit-mq",
AzureEntraID = "azure-entra-id",
Ldap = "ldap"
Ldap = "ldap",
SapHana = "sap-hana",
Snowflake = "snowflake"
}
export const DynamicSecretProviderSchema = z.discriminatedUnion("type", [
@@ -204,13 +227,15 @@ export const DynamicSecretProviderSchema = z.discriminatedUnion("type", [
z.object({ type: z.literal(DynamicSecretProviders.Cassandra), inputs: DynamicSecretCassandraSchema }),
z.object({ type: z.literal(DynamicSecretProviders.AwsIam), inputs: DynamicSecretAwsIamSchema }),
z.object({ type: z.literal(DynamicSecretProviders.Redis), inputs: DynamicSecretRedisDBSchema }),
z.object({ type: z.literal(DynamicSecretProviders.SapHana), inputs: DynamicSecretSapHanaSchema }),
z.object({ type: z.literal(DynamicSecretProviders.AwsElastiCache), inputs: DynamicSecretAwsElastiCacheSchema }),
z.object({ type: z.literal(DynamicSecretProviders.MongoAtlas), inputs: DynamicSecretMongoAtlasSchema }),
z.object({ type: z.literal(DynamicSecretProviders.ElasticSearch), inputs: DynamicSecretElasticSearchSchema }),
z.object({ type: z.literal(DynamicSecretProviders.MongoDB), inputs: DynamicSecretMongoDBSchema }),
z.object({ type: z.literal(DynamicSecretProviders.RabbitMq), inputs: DynamicSecretRabbitMqSchema }),
z.object({ type: z.literal(DynamicSecretProviders.AzureEntraID), inputs: AzureEntraIDSchema }),
z.object({ type: z.literal(DynamicSecretProviders.Ldap), inputs: LdapSchema })
z.object({ type: z.literal(DynamicSecretProviders.Ldap), inputs: LdapSchema }),
z.object({ type: z.literal(DynamicSecretProviders.Snowflake), inputs: DynamicSecretSnowflakeSchema })
]);
export type TDynamicProviderFns = {

View File

@@ -2,10 +2,9 @@ import { MongoClient } from "mongodb";
import { customAlphabet } from "nanoid";
import { z } from "zod";
import { getConfig } from "@app/lib/config/env";
import { BadRequestError } from "@app/lib/errors";
import { alphaNumericNanoId } from "@app/lib/nanoid";
import { verifyHostInputValidity } from "../dynamic-secret-fns";
import { DynamicSecretMongoDBSchema, TDynamicProviderFns } from "./models";
const generatePassword = (size = 48) => {
@@ -19,22 +18,8 @@ const generateUsername = () => {
export const MongoDBProvider = (): TDynamicProviderFns => {
const validateProviderInputs = async (inputs: unknown) => {
const appCfg = getConfig();
const providerInputs = await DynamicSecretMongoDBSchema.parseAsync(inputs);
if (
appCfg.isCloud &&
// localhost
// internal ips
(providerInputs.host === "host.docker.internal" ||
providerInputs.host.match(/^10\.\d+\.\d+\.\d+/) ||
providerInputs.host.match(/^192\.168\.\d+\.\d+/))
)
throw new BadRequestError({ message: "Invalid db host" });
if (providerInputs.host === "localhost" || providerInputs.host === "127.0.0.1") {
throw new BadRequestError({ message: "Invalid db host" });
}
verifyHostInputValidity(providerInputs.host);
return providerInputs;
};

View File

@@ -3,12 +3,11 @@ import https from "https";
import { customAlphabet } from "nanoid";
import { z } from "zod";
import { getConfig } from "@app/lib/config/env";
import { BadRequestError } from "@app/lib/errors";
import { removeTrailingSlash } from "@app/lib/fn";
import { logger } from "@app/lib/logger";
import { alphaNumericNanoId } from "@app/lib/nanoid";
import { verifyHostInputValidity } from "../dynamic-secret-fns";
import { DynamicSecretRabbitMqSchema, TDynamicProviderFns } from "./models";
const generatePassword = () => {
@@ -79,23 +78,8 @@ async function deleteRabbitMqUser({ axiosInstance, usernameToDelete }: TDeleteRa
export const RabbitMqProvider = (): TDynamicProviderFns => {
const validateProviderInputs = async (inputs: unknown) => {
const appCfg = getConfig();
const isCloud = Boolean(appCfg.LICENSE_SERVER_KEY); // quick and dirty way to check if its cloud or not
const providerInputs = await DynamicSecretRabbitMqSchema.parseAsync(inputs);
if (
isCloud &&
// localhost
// internal ips
(providerInputs.host === "host.docker.internal" ||
providerInputs.host.match(/^10\.\d+\.\d+\.\d+/) ||
providerInputs.host.match(/^192\.168\.\d+\.\d+/))
) {
throw new BadRequestError({ message: "Invalid db host" });
}
if (providerInputs.host === "localhost" || providerInputs.host === "127.0.0.1") {
throw new BadRequestError({ message: "Invalid db host" });
}
verifyHostInputValidity(providerInputs.host);
return providerInputs;
};

View File

@@ -3,11 +3,10 @@ import { Redis } from "ioredis";
import { customAlphabet } from "nanoid";
import { z } from "zod";
import { getConfig } from "@app/lib/config/env";
import { BadRequestError } from "@app/lib/errors";
import { getDbConnectionHost } from "@app/lib/knex";
import { alphaNumericNanoId } from "@app/lib/nanoid";
import { verifyHostInputValidity } from "../dynamic-secret-fns";
import { DynamicSecretRedisDBSchema, TDynamicProviderFns } from "./models";
const generatePassword = () => {
@@ -51,22 +50,8 @@ const executeTransactions = async (connection: Redis, commands: string[]): Promi
export const RedisDatabaseProvider = (): TDynamicProviderFns => {
const validateProviderInputs = async (inputs: unknown) => {
const appCfg = getConfig();
const isCloud = Boolean(appCfg.LICENSE_SERVER_KEY); // quick and dirty way to check if its cloud or not
const dbHost = appCfg.DB_HOST || getDbConnectionHost(appCfg.DB_CONNECTION_URI);
const providerInputs = await DynamicSecretRedisDBSchema.parseAsync(inputs);
if (
isCloud &&
// localhost
// internal ips
(providerInputs.host === "host.docker.internal" ||
providerInputs.host.match(/^10\.\d+\.\d+\.\d+/) ||
providerInputs.host.match(/^192\.168\.\d+\.\d+/))
)
throw new BadRequestError({ message: "Invalid db host" });
if (providerInputs.host === "localhost" || providerInputs.host === "127.0.0.1" || dbHost === providerInputs.host)
throw new BadRequestError({ message: "Invalid db host" });
verifyHostInputValidity(providerInputs.host);
return providerInputs;
};

View File

@@ -0,0 +1,174 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable @typescript-eslint/no-unsafe-member-access */
/* eslint-disable @typescript-eslint/no-unsafe-call */
/* eslint-disable @typescript-eslint/no-unsafe-return */
/* eslint-disable @typescript-eslint/no-unsafe-assignment */
import handlebars from "handlebars";
import hdb from "hdb";
import { customAlphabet } from "nanoid";
import { z } from "zod";
import { BadRequestError } from "@app/lib/errors";
import { alphaNumericNanoId } from "@app/lib/nanoid";
import { verifyHostInputValidity } from "../dynamic-secret-fns";
import { DynamicSecretSapHanaSchema, TDynamicProviderFns } from "./models";
const generatePassword = (size = 48) => {
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
return customAlphabet(charset, 48)(size);
};
const generateUsername = () => {
return alphaNumericNanoId(32);
};
export const SapHanaProvider = (): TDynamicProviderFns => {
const validateProviderInputs = async (inputs: unknown) => {
const providerInputs = await DynamicSecretSapHanaSchema.parseAsync(inputs);
verifyHostInputValidity(providerInputs.host);
return providerInputs;
};
const getClient = async (providerInputs: z.infer<typeof DynamicSecretSapHanaSchema>) => {
const client = hdb.createClient({
host: providerInputs.host,
port: providerInputs.port,
user: providerInputs.username,
password: providerInputs.password,
...(providerInputs.ca
? {
ca: providerInputs.ca
}
: {})
});
await new Promise((resolve, reject) => {
client.connect((err: any) => {
if (err) {
return reject(err);
}
if (client.readyState) {
return resolve(true);
}
reject(new Error("SAP HANA client not ready"));
});
});
return client;
};
const validateConnection = async (inputs: unknown) => {
const providerInputs = await validateProviderInputs(inputs);
const client = await getClient(providerInputs);
const testResult: boolean = await new Promise((resolve, reject) => {
client.exec("SELECT 1 FROM DUMMY;", (err: any) => {
if (err) {
reject();
}
resolve(true);
});
});
return testResult;
};
const create = async (inputs: unknown, expireAt: number) => {
const providerInputs = await validateProviderInputs(inputs);
const username = generateUsername();
const password = generatePassword();
const expiration = new Date(expireAt).toISOString();
const client = await getClient(providerInputs);
const creationStatement = handlebars.compile(providerInputs.creationStatement, { noEscape: true })({
username,
password,
expiration
});
const queries = creationStatement.toString().split(";").filter(Boolean);
for await (const query of queries) {
await new Promise((resolve, reject) => {
client.exec(query, (err: any) => {
if (err) {
reject(
new BadRequestError({
message: err.message
})
);
}
resolve(true);
});
});
}
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
};
const revoke = async (inputs: unknown, username: string) => {
const providerInputs = await validateProviderInputs(inputs);
const client = await getClient(providerInputs);
const revokeStatement = handlebars.compile(providerInputs.revocationStatement)({ username });
const queries = revokeStatement.toString().split(";").filter(Boolean);
for await (const query of queries) {
await new Promise((resolve, reject) => {
client.exec(query, (err: any) => {
if (err) {
reject(
new BadRequestError({
message: err.message
})
);
}
resolve(true);
});
});
}
return { entityId: username };
};
const renew = async (inputs: unknown, username: string, expireAt: number) => {
const providerInputs = await validateProviderInputs(inputs);
const client = await getClient(providerInputs);
try {
const expiration = new Date(expireAt).toISOString();
const renewStatement = handlebars.compile(providerInputs.renewStatement)({ username, expiration });
const queries = renewStatement.toString().split(";").filter(Boolean);
for await (const query of queries) {
await new Promise((resolve, reject) => {
client.exec(query, (err: any) => {
if (err) {
reject(
new BadRequestError({
message: err.message
})
);
}
resolve(true);
});
});
}
} finally {
client.disconnect();
}
return { entityId: username };
};
return {
validateProviderInputs,
validateConnection,
create,
revoke,
renew
};
};

View File

@@ -0,0 +1,174 @@
import handlebars from "handlebars";
import { customAlphabet } from "nanoid";
import snowflake from "snowflake-sdk";
import { z } from "zod";
import { BadRequestError } from "@app/lib/errors";
import { alphaNumericNanoId } from "@app/lib/nanoid";
import { DynamicSecretSnowflakeSchema, TDynamicProviderFns } from "./models";
// destroy client requires callback...
const noop = () => {};
const generatePassword = (size = 48) => {
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_.~!*$#";
return customAlphabet(charset, 48)(size);
};
const generateUsername = () => {
return `infisical_${alphaNumericNanoId(32)}`; // username must start with alpha character, hence prefix
};
const getDaysToExpiry = (expiryDate: Date) => {
const start = new Date().getTime();
const end = new Date(expiryDate).getTime();
const diffTime = Math.abs(end - start);
return Math.ceil(diffTime / (1000 * 60 * 60 * 24));
};
export const SnowflakeProvider = (): TDynamicProviderFns => {
const validateProviderInputs = async (inputs: unknown) => {
const providerInputs = await DynamicSecretSnowflakeSchema.parseAsync(inputs);
return providerInputs;
};
const getClient = async (providerInputs: z.infer<typeof DynamicSecretSnowflakeSchema>) => {
const client = snowflake.createConnection({
account: `${providerInputs.orgId}-${providerInputs.accountId}`,
username: providerInputs.username,
password: providerInputs.password,
application: "Infisical"
});
await client.connectAsync(noop);
return client;
};
const validateConnection = async (inputs: unknown) => {
const providerInputs = await validateProviderInputs(inputs);
const client = await getClient(providerInputs);
let isValidConnection: boolean;
try {
isValidConnection = await Promise.race([
client.isValidAsync(),
new Promise((resolve) => {
setTimeout(resolve, 10000);
}).then(() => {
throw new BadRequestError({ message: "Unable to establish connection - verify credentials" });
})
]);
} finally {
client.destroy(noop);
}
return isValidConnection;
};
const create = async (inputs: unknown, expireAt: number) => {
const providerInputs = await validateProviderInputs(inputs);
const client = await getClient(providerInputs);
const username = generateUsername();
const password = generatePassword();
try {
const expiration = getDaysToExpiry(new Date(expireAt));
const creationStatement = handlebars.compile(providerInputs.creationStatement, { noEscape: true })({
username,
password,
expiration
});
await new Promise((resolve, reject) => {
client.execute({
sqlText: creationStatement,
complete(err) {
if (err) {
return reject(new BadRequestError({ name: "CreateLease", message: err.message }));
}
return resolve(true);
}
});
});
} finally {
client.destroy(noop);
}
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
};
const revoke = async (inputs: unknown, username: string) => {
const providerInputs = await validateProviderInputs(inputs);
const client = await getClient(providerInputs);
try {
const revokeStatement = handlebars.compile(providerInputs.revocationStatement)({ username });
await new Promise((resolve, reject) => {
client.execute({
sqlText: revokeStatement,
complete(err) {
if (err) {
return reject(new BadRequestError({ name: "RevokeLease", message: err.message }));
}
return resolve(true);
}
});
});
} finally {
client.destroy(noop);
}
return { entityId: username };
};
const renew = async (inputs: unknown, username: string, expireAt: number) => {
const providerInputs = await validateProviderInputs(inputs);
if (!providerInputs.renewStatement) return { entityId: username };
const client = await getClient(providerInputs);
try {
const expiration = getDaysToExpiry(new Date(expireAt));
const renewStatement = handlebars.compile(providerInputs.renewStatement)({
username,
expiration
});
await new Promise((resolve, reject) => {
client.execute({
sqlText: renewStatement,
complete(err) {
if (err) {
return reject(new BadRequestError({ name: "RenewLease", message: err.message }));
}
return resolve(true);
}
});
});
} finally {
client.destroy(noop);
}
return { entityId: username };
};
return {
validateProviderInputs,
validateConnection,
create,
revoke,
renew
};
};

View File

@@ -3,11 +3,9 @@ import knex from "knex";
import { customAlphabet } from "nanoid";
import { z } from "zod";
import { getConfig } from "@app/lib/config/env";
import { BadRequestError } from "@app/lib/errors";
import { getDbConnectionHost } from "@app/lib/knex";
import { alphaNumericNanoId } from "@app/lib/nanoid";
import { verifyHostInputValidity } from "../dynamic-secret-fns";
import { DynamicSecretSqlDBSchema, SqlProviders, TDynamicProviderFns } from "./models";
const EXTERNAL_REQUEST_TIMEOUT = 10 * 1000;
@@ -29,27 +27,8 @@ const generateUsername = (provider: SqlProviders) => {
export const SqlDatabaseProvider = (): TDynamicProviderFns => {
const validateProviderInputs = async (inputs: unknown) => {
const appCfg = getConfig();
const isCloud = Boolean(appCfg.LICENSE_SERVER_KEY); // quick and dirty way to check if its cloud or not
const dbHost = appCfg.DB_HOST || getDbConnectionHost(appCfg.DB_CONNECTION_URI);
const providerInputs = await DynamicSecretSqlDBSchema.parseAsync(inputs);
if (
isCloud &&
// localhost
// internal ips
(providerInputs.host === "host.docker.internal" ||
providerInputs.host.match(/^10\.\d+\.\d+\.\d+/) ||
providerInputs.host.match(/^192\.168\.\d+\.\d+/))
)
throw new BadRequestError({ message: "Invalid db host" });
if (
providerInputs.host === "localhost" ||
providerInputs.host === "127.0.0.1" ||
// database infisical uses
dbHost === providerInputs.host
)
throw new BadRequestError({ message: "Invalid db host" });
verifyHostInputValidity(providerInputs.host);
return providerInputs;
};

View File

@@ -129,7 +129,7 @@ export const licenseServiceFactory = ({
}
}
// this means this is self hosted oss version
// this means this is the self-hosted oss version
// else it would reach catch statement
isValidLicense = true;
} catch (error) {

View File

@@ -694,31 +694,35 @@ export const buildServiceTokenProjectPermission = (
const canRead = permission.includes("read");
const { can, build } = new AbilityBuilder<MongoAbility<ProjectPermissionSet>>(createMongoAbility);
scopes.forEach(({ secretPath, environment }) => {
if (canWrite) {
// TODO: @Akhi
// @ts-expect-error type
can(ProjectPermissionActions.Edit, ProjectPermissionSub.Secrets, {
secretPath: { $glob: secretPath },
environment
});
// @ts-expect-error type
can(ProjectPermissionActions.Create, ProjectPermissionSub.Secrets, {
secretPath: { $glob: secretPath },
environment
});
// @ts-expect-error type
can(ProjectPermissionActions.Delete, ProjectPermissionSub.Secrets, {
secretPath: { $glob: secretPath },
environment
});
}
if (canRead) {
// @ts-expect-error type
can(ProjectPermissionActions.Read, ProjectPermissionSub.Secrets, {
secretPath: { $glob: secretPath },
environment
});
}
[ProjectPermissionSub.Secrets, ProjectPermissionSub.SecretImports, ProjectPermissionSub.SecretFolders].forEach(
(subject) => {
if (canWrite) {
// TODO: @Akhi
// @ts-expect-error type
can(ProjectPermissionActions.Edit, subject, {
secretPath: { $glob: secretPath },
environment
});
// @ts-expect-error type
can(ProjectPermissionActions.Create, subject, {
secretPath: { $glob: secretPath },
environment
});
// @ts-expect-error type
can(ProjectPermissionActions.Delete, subject, {
secretPath: { $glob: secretPath },
environment
});
}
if (canRead) {
// @ts-expect-error type
can(ProjectPermissionActions.Read, subject, {
secretPath: { $glob: secretPath },
environment
});
}
}
);
});
return build({ conditionsMatcher });

View File

@@ -669,6 +669,12 @@ export const RAW_SECRETS = {
type: "The type of the secret to delete.",
projectSlug: "The slug of the project to delete the secret in.",
workspaceId: "The ID of the project where the secret is located."
},
GET_REFERENCE_TREE: {
secretName: "The name of the secret to get the reference tree for.",
workspaceId: "The ID of the project where the secret is located.",
environment: "The slug of the environment where the the secret is located.",
secretPath: "The folder path where the secret is located."
}
} as const;

View File

@@ -57,3 +57,10 @@ export enum OrderByDirection {
ASC = "asc",
DESC = "desc"
}
export type ProjectServiceActor = {
type: ActorType;
id: string;
authMethod: ActorAuthMethod;
orgId: string;
};

View File

@@ -15,8 +15,12 @@ export const fastifySwagger = fp(async (fastify) => {
},
servers: [
{
url: "https://app.infisical.com",
description: "Production server"
url: "https://us.infisical.com",
description: "Production server (US)"
},
{
url: "https://eu.infisical.com",
description: "Production server (EU)"
},
{
url: "http://localhost:8080",

View File

@@ -29,6 +29,8 @@ export const registerAdminRouter = async (server: FastifyZodProvider) => {
}).extend({
isMigrationModeOn: z.boolean(),
defaultAuthOrgSlug: z.string().nullable(),
defaultAuthOrgAuthEnforced: z.boolean().nullish(),
defaultAuthOrgAuthMethod: z.string().nullish(),
isSecretScanningDisabled: z.boolean()
})
})

View File

@@ -293,10 +293,10 @@ export const registerCmekRouter = async (server: FastifyZodProvider) => {
schema: {
description: "Decrypt data with KMS key",
params: z.object({
keyId: z.string().uuid().describe(KMS.ENCRYPT.keyId)
keyId: z.string().uuid().describe(KMS.DECRYPT.keyId)
}),
body: z.object({
ciphertext: base64Schema.describe(KMS.ENCRYPT.plaintext)
ciphertext: base64Schema.describe(KMS.DECRYPT.ciphertext)
}),
response: {
200: z.object({

View File

@@ -20,6 +20,8 @@ import { AuthMode } from "@app/services/auth/auth-type";
import { SecretsOrderBy } from "@app/services/secret/secret-types";
import { PostHogEventTypes } from "@app/services/telemetry/telemetry-types";
const MAX_DEEP_SEARCH_LIMIT = 500; // arbitrary limit to prevent excessive results
// handle querystring boolean values
const booleanSchema = z
.union([z.boolean(), z.string().trim()])
@@ -34,6 +36,35 @@ const booleanSchema = z
.optional()
.default(true);
const parseSecretPathSearch = (search?: string) => {
if (!search)
return {
searchName: "",
searchPath: ""
};
if (!search.includes("/"))
return {
searchName: search,
searchPath: ""
};
if (search === "/")
return {
searchName: "",
searchPath: "/"
};
const [searchName, ...searchPathSegments] = search.split("/").reverse();
let searchPath = removeTrailingSlash(searchPathSegments.reverse().join("/").toLowerCase());
if (!searchPath.startsWith("/")) searchPath = `/${searchPath}`;
return {
searchName,
searchPath
};
};
export const registerDashboardRouter = async (server: FastifyZodProvider) => {
server.route({
method: "GET",
@@ -134,7 +165,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
let folders: Awaited<ReturnType<typeof server.services.folder.getFoldersMultiEnv>> | undefined;
let secrets: Awaited<ReturnType<typeof server.services.secret.getSecretsRawMultiEnv>> | undefined;
let dynamicSecrets:
| Awaited<ReturnType<typeof server.services.dynamicSecret.listDynamicSecretsByFolderIds>>
| Awaited<ReturnType<typeof server.services.dynamicSecret.listDynamicSecretsByEnvs>>
| undefined;
let totalFolderCount: number | undefined;
@@ -218,7 +249,7 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
});
if (remainingLimit > 0 && totalDynamicSecretCount > adjustedOffset) {
dynamicSecrets = await server.services.dynamicSecret.listDynamicSecretsByFolderIds({
dynamicSecrets = await server.services.dynamicSecret.listDynamicSecretsByEnvs({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
@@ -633,4 +664,180 @@ export const registerDashboardRouter = async (server: FastifyZodProvider) => {
};
}
});
server.route({
method: "GET",
url: "/secrets-deep-search",
config: {
rateLimit: secretsLimit
},
schema: {
security: [
{
bearerAuth: []
}
],
querystring: z.object({
projectId: z.string().trim(),
environments: z.string().trim().transform(decodeURIComponent),
secretPath: z.string().trim().default("/").transform(removeTrailingSlash),
search: z.string().trim().optional(),
tags: z.string().trim().transform(decodeURIComponent).optional()
}),
response: {
200: z.object({
folders: SecretFoldersSchema.extend({ path: z.string() }).array().optional(),
dynamicSecrets: SanitizedDynamicSecretSchema.extend({ path: z.string(), environment: z.string() })
.array()
.optional(),
secrets: secretRawSchema
.extend({
secretPath: z.string().optional(),
tags: SecretTagsSchema.pick({
id: true,
slug: true,
color: true
})
.extend({ name: z.string() })
.array()
.optional()
})
.array()
.optional()
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const { secretPath, projectId, search } = req.query;
const environments = req.query.environments.split(",").filter((env) => Boolean(env.trim()));
if (!environments.length) throw new BadRequestError({ message: "One or more environments required" });
const tags = req.query.tags?.split(",").filter((tag) => Boolean(tag.trim())) ?? [];
if (!search && !tags.length) throw new BadRequestError({ message: "Search or tags required" });
const searchHasTags = Boolean(tags.length);
const allFolders = await server.services.folder.getFoldersDeepByEnvs(
{
projectId,
environments,
secretPath
},
req.permission
);
const { searchName, searchPath } = parseSecretPathSearch(search);
const folderMappings = allFolders.map((folder) => ({
folderId: folder.id,
path: folder.path,
environment: folder.environment
}));
const sharedFilters = {
search: searchName,
limit: MAX_DEEP_SEARCH_LIMIT,
orderBy: SecretsOrderBy.Name
};
const secrets = await server.services.secret.getSecretsRawByFolderMappings(
{
projectId,
folderMappings,
filters: {
...sharedFilters,
tagSlugs: tags,
includeTagsInSearch: true
}
},
req.permission
);
const dynamicSecrets = searchHasTags
? []
: await server.services.dynamicSecret.listDynamicSecretsByFolderIds(
{
projectId,
folderMappings,
filters: sharedFilters
},
req.permission
);
for await (const environment of environments) {
const secretCountForEnv = secrets.filter((secret) => secret.environment === environment).length;
if (secretCountForEnv) {
await server.services.auditLog.createAuditLog({
projectId,
...req.auditLogInfo,
event: {
type: EventType.GET_SECRETS,
metadata: {
environment,
secretPath,
numberOfSecrets: secretCountForEnv
}
}
});
if (getUserAgentType(req.headers["user-agent"]) !== UserAgentType.K8_OPERATOR) {
await server.services.telemetry.sendPostHogEvents({
event: PostHogEventTypes.SecretPulled,
distinctId: getTelemetryDistinctId(req),
properties: {
numberOfSecrets: secretCountForEnv,
workspaceId: projectId,
environment,
secretPath,
channel: getUserAgentType(req.headers["user-agent"]),
...req.auditLogInfo
}
});
}
}
}
const sliceQuickSearch = <T>(array: T[]) => array.slice(0, 25);
return {
secrets: sliceQuickSearch(
searchPath ? secrets.filter((secret) => secret.secretPath.endsWith(searchPath)) : secrets
),
dynamicSecrets: sliceQuickSearch(
searchPath
? dynamicSecrets.filter((dynamicSecret) => dynamicSecret.path.endsWith(searchPath))
: dynamicSecrets
),
folders: searchHasTags
? []
: sliceQuickSearch(
allFolders.filter((folder) => {
const [folderName, ...folderPathSegments] = folder.path.split("/").reverse();
const folderPath = folderPathSegments.reverse().join("/").toLowerCase() || "/";
if (searchPath) {
if (searchPath === "/") {
// only show root folders if no folder name search
if (!searchName) return folderPath === searchPath;
// start partial match on root folders
return folderName.toLowerCase().startsWith(searchName.toLowerCase());
}
// support ending partial path match
return (
folderPath.endsWith(searchPath) && folderName.toLowerCase().startsWith(searchName.toLowerCase())
);
}
// no search path, "fuzzy" match all folders
return folderName.toLowerCase().includes(searchName.toLowerCase());
})
)
};
}
});
};

View File

@@ -23,6 +23,18 @@ import { PostHogEventTypes } from "@app/services/telemetry/telemetry-types";
import { secretRawSchema } from "../sanitizedSchemas";
const SecretReferenceNode = z.object({
key: z.string(),
value: z.string().optional(),
environment: z.string(),
secretPath: z.string()
});
type TSecretReferenceNode = z.infer<typeof SecretReferenceNode> & { children: TSecretReferenceNode[] };
const SecretReferenceNodeTree: z.ZodType<TSecretReferenceNode> = SecretReferenceNode.extend({
children: z.lazy(() => SecretReferenceNodeTree.array())
});
export const registerSecretRouter = async (server: FastifyZodProvider) => {
server.route({
method: "POST",
@@ -2102,6 +2114,58 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
}
});
server.route({
method: "GET",
url: "/raw/:secretName/secret-reference-tree",
config: {
rateLimit: secretsLimit
},
schema: {
description: "Get secret reference tree",
security: [
{
bearerAuth: []
}
],
params: z.object({
secretName: z.string().trim().describe(RAW_SECRETS.GET_REFERENCE_TREE.secretName)
}),
querystring: z.object({
workspaceId: z.string().trim().describe(RAW_SECRETS.GET_REFERENCE_TREE.workspaceId),
environment: z.string().trim().describe(RAW_SECRETS.GET_REFERENCE_TREE.environment),
secretPath: z
.string()
.trim()
.default("/")
.transform(removeTrailingSlash)
.describe(RAW_SECRETS.GET_REFERENCE_TREE.secretPath)
}),
response: {
200: z.object({
tree: SecretReferenceNodeTree,
value: z.string().optional()
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const { secretName } = req.params;
const { secretPath, environment, workspaceId } = req.query;
const { tree, value } = await server.services.secret.getSecretReferenceTree({
actorId: req.permission.id,
actor: req.permission.type,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
projectId: workspaceId,
secretName,
secretPath,
environment
});
return { tree, value };
}
});
server.route({
method: "POST",
url: "/backfill-secret-references",

View File

@@ -1,9 +1,9 @@
import { ForbiddenError } from "@casl/ability";
import { FastifyRequest } from "fastify";
import { TPermissionServiceFactory } from "@app/ee/services/permission/permission-service";
import { ProjectPermissionCmekActions, ProjectPermissionSub } from "@app/ee/services/permission/project-permission";
import { BadRequestError, NotFoundError } from "@app/lib/errors";
import { ProjectServiceActor } from "@app/lib/types";
import {
TCmekDecryptDTO,
TCmekEncryptDTO,
@@ -23,7 +23,7 @@ type TCmekServiceFactoryDep = {
export type TCmekServiceFactory = ReturnType<typeof cmekServiceFactory>;
export const cmekServiceFactory = ({ kmsService, kmsDAL, permissionService }: TCmekServiceFactoryDep) => {
const createCmek = async ({ projectId, ...dto }: TCreateCmekDTO, actor: FastifyRequest["permission"]) => {
const createCmek = async ({ projectId, ...dto }: TCreateCmekDTO, actor: ProjectServiceActor) => {
const { permission } = await permissionService.getProjectPermission(
actor.type,
actor.id,
@@ -43,7 +43,7 @@ export const cmekServiceFactory = ({ kmsService, kmsDAL, permissionService }: TC
return cmek;
};
const updateCmekById = async ({ keyId, ...data }: TUpdabteCmekByIdDTO, actor: FastifyRequest["permission"]) => {
const updateCmekById = async ({ keyId, ...data }: TUpdabteCmekByIdDTO, actor: ProjectServiceActor) => {
const key = await kmsDAL.findById(keyId);
if (!key) throw new NotFoundError({ message: `Key with ID ${keyId} not found` });
@@ -65,7 +65,7 @@ export const cmekServiceFactory = ({ kmsService, kmsDAL, permissionService }: TC
return cmek;
};
const deleteCmekById = async (keyId: string, actor: FastifyRequest["permission"]) => {
const deleteCmekById = async (keyId: string, actor: ProjectServiceActor) => {
const key = await kmsDAL.findById(keyId);
if (!key) throw new NotFoundError({ message: `Key with ID ${keyId} not found` });
@@ -89,7 +89,7 @@ export const cmekServiceFactory = ({ kmsService, kmsDAL, permissionService }: TC
const listCmeksByProjectId = async (
{ projectId, ...filters }: TListCmeksByProjectIdDTO,
actor: FastifyRequest["permission"]
actor: ProjectServiceActor
) => {
const { permission } = await permissionService.getProjectPermission(
actor.type,
@@ -106,7 +106,7 @@ export const cmekServiceFactory = ({ kmsService, kmsDAL, permissionService }: TC
return { cmeks, totalCount };
};
const cmekEncrypt = async ({ keyId, plaintext }: TCmekEncryptDTO, actor: FastifyRequest["permission"]) => {
const cmekEncrypt = async ({ keyId, plaintext }: TCmekEncryptDTO, actor: ProjectServiceActor) => {
const key = await kmsDAL.findById(keyId);
if (!key) throw new NotFoundError({ message: `Key with ID ${keyId} not found` });
@@ -132,7 +132,7 @@ export const cmekServiceFactory = ({ kmsService, kmsDAL, permissionService }: TC
return cipherTextBlob.toString("base64");
};
const cmekDecrypt = async ({ keyId, ciphertext }: TCmekDecryptDTO, actor: FastifyRequest["permission"]) => {
const cmekDecrypt = async ({ keyId, ciphertext }: TCmekDecryptDTO, actor: ProjectServiceActor) => {
const key = await kmsDAL.findById(keyId);
if (!key) throw new NotFoundError({ message: `Key with ID ${keyId} not found` });

View File

@@ -1,9 +1,9 @@
import { ForbiddenError } from "@casl/ability";
import { FastifyRequest } from "fastify";
import { TLicenseServiceFactory } from "@app/ee/services/license/license-service";
import { OrgPermissionActions, OrgPermissionSubjects } from "@app/ee/services/permission/org-permission";
import { TPermissionServiceFactory } from "@app/ee/services/permission/permission-service";
import { ProjectServiceActor } from "@app/lib/types";
import { constructGroupOrgMembershipRoleMappings } from "@app/services/external-group-org-role-mapping/external-group-org-role-mapping-fns";
import { TSyncExternalGroupOrgMembershipRoleMappingsDTO } from "@app/services/external-group-org-role-mapping/external-group-org-role-mapping-types";
import { TOrgRoleDALFactory } from "@app/services/org/org-role-dal";
@@ -25,7 +25,7 @@ export const externalGroupOrgRoleMappingServiceFactory = ({
permissionService,
orgRoleDAL
}: TExternalGroupOrgRoleMappingServiceFactoryDep) => {
const listExternalGroupOrgRoleMappings = async (actor: FastifyRequest["permission"]) => {
const listExternalGroupOrgRoleMappings = async (actor: ProjectServiceActor) => {
const { permission } = await permissionService.getOrgPermission(
actor.type,
actor.id,
@@ -46,7 +46,7 @@ export const externalGroupOrgRoleMappingServiceFactory = ({
const updateExternalGroupOrgRoleMappings = async (
dto: TSyncExternalGroupOrgMembershipRoleMappingsDTO,
actor: FastifyRequest["permission"]
actor: ProjectServiceActor
) => {
const { permission } = await permissionService.getOrgPermission(
actor.type,

View File

@@ -2540,9 +2540,9 @@ const syncSecretsAzureDevops = async ({
const { groupId, groupName } = await getEnvGroupId(integration.app, integration.appId, integration.environment.name);
const variables: Record<string, { value: string }> = {};
const variables: Record<string, { value: string, isSecret: boolean }> = {};
for (const key of Object.keys(secrets)) {
variables[key] = { value: secrets[key].value };
variables[key] = { value: secrets[key].value, isSecret: true };
}
if (!groupId) {

View File

@@ -291,7 +291,7 @@ export const orgServiceFactory = ({
}
if (authEnforced !== undefined) {
if (!plan?.samlSSO || !plan.oidcSSO)
if (!plan?.samlSSO && !plan.oidcSSO)
throw new BadRequestError({
message: "Failed to enforce/un-enforce SSO due to plan restriction. Upgrade plan to enforce/un-enforce SSO."
});

View File

@@ -8,6 +8,8 @@ import { ormify, selectAllTableCols } from "@app/lib/knex";
import { OrderByDirection } from "@app/lib/types";
import { SecretsOrderBy } from "@app/services/secret/secret-types";
import { TFindFoldersDeepByParentIdsDTO } from "./secret-folder-types";
export const validateFolderName = (folderName: string) => {
const validNameRegex = /^[a-zA-Z0-9-_]+$/;
return validNameRegex.test(folderName);
@@ -444,6 +446,48 @@ export const secretFolderDALFactory = (db: TDbClient) => {
}
};
const findByEnvsDeep = async ({ parentIds }: TFindFoldersDeepByParentIdsDTO, tx?: Knex) => {
try {
const folders = await (tx || db.replicaNode())
.withRecursive("parents", (qb) =>
qb
.select(
selectAllTableCols(TableName.SecretFolder),
db.raw("0 as depth"),
db.raw(`'/' as path`),
db.ref(`${TableName.Environment}.slug`).as("environment")
)
.from(TableName.SecretFolder)
.join(TableName.Environment, `${TableName.SecretFolder}.envId`, `${TableName.Environment}.id`)
.whereIn(`${TableName.SecretFolder}.id`, parentIds)
.union((un) => {
void un
.select(
selectAllTableCols(TableName.SecretFolder),
db.raw("parents.depth + 1 as depth"),
db.raw(
`CONCAT(
CASE WHEN parents.path = '/' THEN '' ELSE parents.path END,
CASE WHEN ${TableName.SecretFolder}."parentId" is NULL THEN '' ELSE CONCAT('/', secret_folders.name) END
)`
),
db.ref("parents.environment")
)
.from(TableName.SecretFolder)
.join("parents", `${TableName.SecretFolder}.parentId`, "parents.id");
})
)
.select<(TSecretFolders & { path: string; depth: number; environment: string })[]>("*")
.from("parents")
.orderBy("depth")
.orderBy(`name`);
return folders;
} catch (error) {
throw new DatabaseError({ error, name: "FindByEnvsDeep" });
}
};
return {
...secretFolderOrm,
update,
@@ -454,6 +498,7 @@ export const secretFolderDALFactory = (db: TDbClient) => {
findSecretPathByFolderIds,
findClosestFolder,
findByProjectId,
findByMultiEnv
findByMultiEnv,
findByEnvsDeep
};
};

View File

@@ -7,7 +7,7 @@ import { TPermissionServiceFactory } from "@app/ee/services/permission/permissio
import { ProjectPermissionActions, ProjectPermissionSub } from "@app/ee/services/permission/project-permission";
import { TSecretSnapshotServiceFactory } from "@app/ee/services/secret-snapshot/secret-snapshot-service";
import { BadRequestError, NotFoundError } from "@app/lib/errors";
import { OrderByDirection } from "@app/lib/types";
import { OrderByDirection, ProjectServiceActor } from "@app/lib/types";
import { TProjectDALFactory } from "../project/project-dal";
import { TProjectEnvDALFactory } from "../project-env/project-env-dal";
@@ -17,6 +17,7 @@ import {
TDeleteFolderDTO,
TGetFolderByIdDTO,
TGetFolderDTO,
TGetFoldersDeepByEnvsDTO,
TUpdateFolderDTO,
TUpdateManyFoldersDTO
} from "./secret-folder-types";
@@ -511,6 +512,30 @@ export const secretFolderServiceFactory = ({
};
};
const getFoldersDeepByEnvs = async (
{ projectId, environments, secretPath }: TGetFoldersDeepByEnvsDTO,
actor: ProjectServiceActor
) => {
// folder list is allowed to be read by anyone
// permission to check does user have access
await permissionService.getProjectPermission(actor.type, actor.id, projectId, actor.authMethod, actor.orgId);
const envs = await projectEnvDAL.findBySlugs(projectId, environments);
if (!envs.length)
throw new NotFoundError({
message: `Environments '${environments.join(", ")}' not found`,
name: "GetFoldersDeep"
});
const parentFolders = await folderDAL.findBySecretPathMultiEnv(projectId, environments, secretPath);
if (!parentFolders.length) return [];
const folders = await folderDAL.findByEnvsDeep({ parentIds: parentFolders.map((parent) => parent.id) });
return folders;
};
return {
createFolder,
updateFolder,
@@ -519,6 +544,7 @@ export const secretFolderServiceFactory = ({
getFolders,
getFolderById,
getProjectFolderCount,
getFoldersMultiEnv
getFoldersMultiEnv,
getFoldersDeepByEnvs
};
};

View File

@@ -47,3 +47,13 @@ export type TGetFolderDTO = {
export type TGetFolderByIdDTO = {
id: string;
} & Omit<TProjectPermission, "projectId">;
export type TGetFoldersDeepByEnvsDTO = {
projectId: string;
environments: string[];
secretPath: string;
};
export type TFindFoldersDeepByParentIdsDTO = {
parentIds: string[];
};

View File

@@ -272,11 +272,11 @@ export const secretSharingServiceFactory = ({
? await secretSharingDAL.findById(sharedSecretId)
: await secretSharingDAL.findOne({ identifier: sharedSecretId });
const deletedSharedSecret = await secretSharingDAL.deleteById(sharedSecretId);
if (sharedSecret.orgId && sharedSecret.orgId !== orgId)
throw new ForbiddenRequestError({ message: "User does not have permission to delete shared secret" });
const deletedSharedSecret = await secretSharingDAL.deleteById(sharedSecretId);
return deletedSharedSecret;
};

View File

@@ -14,6 +14,7 @@ import {
} from "@app/lib/knex";
import { OrderByDirection } from "@app/lib/types";
import { SecretsOrderBy } from "@app/services/secret/secret-types";
import { TFindSecretsByFolderIdsFilter } from "@app/services/secret-v2-bridge/secret-v2-bridge-types";
export type TSecretV2BridgeDALFactory = ReturnType<typeof secretV2BridgeDALFactory>;
@@ -339,14 +340,7 @@ export const secretV2BridgeDALFactory = (db: TDbClient) => {
folderIds: string[],
userId?: string,
tx?: Knex,
filters?: {
limit?: number;
offset?: number;
orderBy?: SecretsOrderBy;
orderDirection?: OrderByDirection;
search?: string;
tagSlugs?: string[];
}
filters?: TFindSecretsByFolderIdsFilter
) => {
try {
// check if not uui then userId id is null (corner case because service token's ID is not UUI in effort to keep backwards compatibility from mongo)
@@ -356,14 +350,20 @@ export const secretV2BridgeDALFactory = (db: TDbClient) => {
}
const query = (tx || db.replicaNode())(TableName.SecretV2)
.whereIn("folderId", folderIds)
.whereIn(`${TableName.SecretV2}.folderId`, folderIds)
.where((bd) => {
if (filters?.search) {
void bd.whereILike("key", `%${filters?.search}%`);
if (filters?.includeTagsInSearch) {
void bd
.whereILike(`${TableName.SecretV2}.key`, `%${filters?.search}%`)
.orWhereILike(`${TableName.SecretTag}.slug`, `%${filters?.search}%`);
} else {
void bd.whereILike(`${TableName.SecretV2}.key`, `%${filters?.search}%`);
}
}
})
.where((bd) => {
void bd.whereNull("userId").orWhere({ userId: userId || null });
void bd.whereNull(`${TableName.SecretV2}.userId`).orWhere({ userId: userId || null });
})
.leftJoin(
TableName.SecretV2JnTag,
@@ -385,7 +385,7 @@ export const secretV2BridgeDALFactory = (db: TDbClient) => {
.where((bd) => {
const slugs = filters?.tagSlugs?.filter(Boolean);
if (slugs && slugs.length > 0) {
void bd.whereIn("slug", slugs);
void bd.whereIn(`${TableName.SecretTag}.slug`, slugs);
}
})
.orderBy(

View File

@@ -11,6 +11,8 @@ import { TSecretV2BridgeDALFactory } from "./secret-v2-bridge-dal";
import { TFnSecretBulkDelete, TFnSecretBulkInsert, TFnSecretBulkUpdate } from "./secret-v2-bridge-types";
const INTERPOLATION_SYNTAX_REG = /\${([^}]+)}/g;
// akhilmhdh: JS regex with global save state in .test
const INTERPOLATION_SYNTAX_REG_NON_GLOBAL = /\${([^}]+)}/;
export const shouldUseSecretV2Bridge = (version: number) => version === 3;
@@ -376,6 +378,13 @@ const formatMultiValueEnv = (val?: string) => {
return `"${val.replace(/\n/g, "\\n")}"`;
};
type TSecretReferenceTraceNode = {
key: string;
value?: string;
environment: string;
secretPath: string;
children: TSecretReferenceTraceNode[];
};
type TInterpolateSecretArg = {
projectId: string;
decryptSecretValue: (encryptedValue?: Buffer | null) => string | undefined;
@@ -417,14 +426,21 @@ export const expandSecretReferencesFactory = ({
return secretCache[cacheKey][secretKey] || { value: "", tags: [] };
};
const recursivelyExpandSecret = async (dto: { value?: string; secretPath: string; environment: string }) => {
if (!dto.value) return "";
const recursivelyExpandSecret = async (dto: {
value?: string;
secretPath: string;
environment: string;
shouldStackTrace?: boolean;
}) => {
const stackTrace = { ...dto, key: "root", children: [] } as TSecretReferenceTraceNode;
const stack = [{ ...dto, depth: 0 }];
if (!dto.value) return { expandedValue: "", stackTrace };
const stack = [{ ...dto, depth: 0, trace: stackTrace }];
let expandedValue = dto.value;
while (stack.length) {
const { value, secretPath, environment, depth } = stack.pop()!;
const { value, secretPath, environment, depth, trace } = stack.pop()!;
// eslint-disable-next-line no-continue
if (depth > MAX_SECRET_REFERENCE_DEPTH) continue;
const refs = value?.match(INTERPOLATION_SYNTAX_REG);
@@ -437,6 +453,11 @@ export const expandSecretReferencesFactory = ({
// eslint-disable-next-line no-continue
if (!entities.length) continue;
let referencedSecretPath = "";
let referencedSecretKey = "";
let referencedSecretEnvironmentSlug = "";
let referencedSecretValue = "";
if (entities.length === 1) {
const [secretKey] = entities;
@@ -449,17 +470,11 @@ export const expandSecretReferencesFactory = ({
const cacheKey = getCacheUniqueKey(environment, secretPath);
secretCache[cacheKey][secretKey] = referredValue;
if (INTERPOLATION_SYNTAX_REG.test(referredValue.value)) {
stack.push({
value: referredValue.value,
secretPath,
environment,
depth: depth + 1
});
}
if (referredValue) {
expandedValue = expandedValue.replaceAll(interpolationSyntax, referredValue.value);
}
referencedSecretValue = referredValue.value;
referencedSecretKey = secretKey;
referencedSecretPath = secretPath;
referencedSecretEnvironmentSlug = environment;
} else {
const secretReferenceEnvironment = entities[0];
const secretReferencePath = path.join("/", ...entities.slice(1, entities.length - 1));
@@ -474,24 +489,42 @@ export const expandSecretReferencesFactory = ({
const cacheKey = getCacheUniqueKey(secretReferenceEnvironment, secretReferencePath);
secretCache[cacheKey][secretReferenceKey] = referedValue;
if (INTERPOLATION_SYNTAX_REG.test(referedValue.value)) {
stack.push({
value: referedValue.value,
secretPath: secretReferencePath,
environment: secretReferenceEnvironment,
depth: depth + 1
});
}
if (referedValue) {
expandedValue = expandedValue.replaceAll(interpolationSyntax, referedValue.value);
referencedSecretValue = referedValue.value;
referencedSecretKey = secretReferenceKey;
referencedSecretPath = secretReferencePath;
referencedSecretEnvironmentSlug = secretReferenceEnvironment;
}
const node = {
value: referencedSecretValue,
secretPath: referencedSecretPath,
environment: referencedSecretEnvironmentSlug,
depth: depth + 1,
trace
};
const shouldExpandMore = INTERPOLATION_SYNTAX_REG_NON_GLOBAL.test(referencedSecretValue);
if (dto.shouldStackTrace) {
const stackTraceNode = { ...node, children: [], key: referencedSecretKey, trace: null };
trace?.children.push(stackTraceNode);
// if stack trace this would be child node
if (shouldExpandMore) {
stack.push({ ...node, trace: stackTraceNode });
}
} else if (shouldExpandMore) {
// if no stack trace is needed we just keep going with root node
stack.push(node);
}
if (referencedSecretValue) {
expandedValue = expandedValue.replaceAll(interpolationSyntax, referencedSecretValue);
}
}
}
}
return expandedValue;
return { expandedValue, stackTrace };
};
const expandSecret = async (inputSecret: {
@@ -505,10 +538,21 @@ export const expandSecretReferencesFactory = ({
const shouldExpand = Boolean(inputSecret.value?.match(INTERPOLATION_SYNTAX_REG));
if (!shouldExpand) return inputSecret.value;
const expandedSecretValue = await recursivelyExpandSecret(inputSecret);
return inputSecret.skipMultilineEncoding ? formatMultiValueEnv(expandedSecretValue) : expandedSecretValue;
const { expandedValue } = await recursivelyExpandSecret(inputSecret);
return inputSecret.skipMultilineEncoding ? formatMultiValueEnv(expandedValue) : expandedValue;
};
return expandSecret;
const getExpandedSecretStackTrace = async (inputSecret: {
value?: string;
secretPath: string;
environment: string;
}) => {
const { stackTrace, expandedValue } = await recursivelyExpandSecret({ ...inputSecret, shouldStackTrace: true });
return { stackTrace, expandedValue };
};
return { expandSecretReferences: expandSecret, getExpandedSecretStackTrace };
};
export const reshapeBridgeSecret = (

View File

@@ -41,7 +41,9 @@ import {
TDeleteManySecretDTO,
TDeleteSecretDTO,
TGetASecretDTO,
TGetSecretReferencesTreeDTO,
TGetSecretsDTO,
TGetSecretsRawByFolderMappingsDTO,
TGetSecretVersionsDTO,
TMoveSecretsDTO,
TSecretReference,
@@ -651,6 +653,56 @@ export const secretV2BridgeServiceFactory = ({
return count;
};
const getSecretsByFolderMappings = async (
{ projectId, userId, filters, folderMappings }: TGetSecretsRawByFolderMappingsDTO,
projectPermission: Awaited<ReturnType<typeof permissionService.getProjectPermission>>["permission"]
) => {
const groupedFolderMappings = groupBy(folderMappings, (folderMapping) => folderMapping.folderId);
const secrets = await secretDAL.findByFolderIds(
folderMappings.map((folderMapping) => folderMapping.folderId),
userId,
undefined,
filters
);
const { decryptor: secretManagerDecryptor } = await kmsService.createCipherPairWithDataKey({
type: KmsDataKey.SecretManager,
projectId
});
const decryptedSecrets = secrets
.filter((el) =>
projectPermission.can(
ProjectPermissionActions.Read,
subject(ProjectPermissionSub.Secrets, {
environment: groupedFolderMappings[el.folderId][0].environment,
secretPath: groupedFolderMappings[el.folderId][0].path,
secretName: el.key,
secretTags: el.tags.map((i) => i.slug)
})
)
)
.map((secret) =>
reshapeBridgeSecret(
projectId,
groupedFolderMappings[secret.folderId][0].environment,
groupedFolderMappings[secret.folderId][0].path,
{
...secret,
value: secret.encryptedValue
? secretManagerDecryptor({ cipherTextBlob: secret.encryptedValue }).toString()
: "",
comment: secret.encryptedComment
? secretManagerDecryptor({ cipherTextBlob: secret.encryptedComment }).toString()
: ""
}
)
);
return decryptedSecrets;
};
// get secrets for multiple envs
const getSecretsMultiEnv = async ({
actorId,
@@ -677,59 +729,28 @@ export const secretV2BridgeServiceFactory = ({
ForbiddenError.from(permission).throwUnlessCan(ProjectPermissionActions.Read, ProjectPermissionSub.Secrets);
}
let paths: { folderId: string; path: string; environment: string }[] = [];
const folders = await folderDAL.findBySecretPathMultiEnv(projectId, environments, path);
if (!folders.length) {
return [];
}
paths = folders.map((folder) => ({ folderId: folder.id, path, environment: folder.environment.slug }));
const folderMappings = folders.map((folder) => ({
folderId: folder.id,
path,
environment: folder.environment.slug
}));
const groupedPaths = groupBy(paths, (p) => p.folderId);
const secrets = await secretDAL.findByFolderIds(
paths.map((p) => p.folderId),
actorId,
undefined,
params
const decryptedSecrets = await getSecretsByFolderMappings(
{
projectId,
folderMappings,
filters: params,
userId: actorId
},
permission
);
const { decryptor: secretManagerDecryptor } = await kmsService.createCipherPairWithDataKey({
type: KmsDataKey.SecretManager,
projectId
});
const decryptedSecrets = secrets
.filter((el) =>
permission.can(
ProjectPermissionActions.Read,
subject(ProjectPermissionSub.Secrets, {
environment: groupedPaths[el.folderId][0].environment,
secretPath: groupedPaths[el.folderId][0].path,
secretName: el.key,
secretTags: el.tags.map((i) => i.slug)
})
)
)
.map((secret) =>
reshapeBridgeSecret(
projectId,
groupedPaths[secret.folderId][0].environment,
groupedPaths[secret.folderId][0].path,
{
...secret,
value: secret.encryptedValue
? secretManagerDecryptor({ cipherTextBlob: secret.encryptedValue }).toString()
: "",
comment: secret.encryptedComment
? secretManagerDecryptor({ cipherTextBlob: secret.encryptedComment }).toString()
: ""
}
)
);
return decryptedSecrets;
};
@@ -815,7 +836,7 @@ export const secretV2BridgeServiceFactory = ({
})
);
const expandSecretReferences = expandSecretReferencesFactory({
const { expandSecretReferences } = expandSecretReferencesFactory({
projectId,
folderDAL,
secretDAL,
@@ -965,7 +986,7 @@ export const secretV2BridgeServiceFactory = ({
})
);
const expandSecretReferences = expandSecretReferencesFactory({
const { expandSecretReferences } = expandSecretReferencesFactory({
projectId,
folderDAL,
secretDAL,
@@ -1032,6 +1053,7 @@ export const secretV2BridgeServiceFactory = ({
value: secretValue,
skipMultilineEncoding: secret.skipMultilineEncoding
});
secretValue = expandedSecretValue || "";
}
@@ -1928,6 +1950,88 @@ export const secretV2BridgeServiceFactory = ({
};
};
const getSecretReferenceTree = async ({
environment,
secretPath,
projectId,
actor,
actorId,
actorOrgId,
secretName,
actorAuthMethod
}: TGetSecretReferencesTreeDTO) => {
const { permission } = await permissionService.getProjectPermission(
actor,
actorId,
projectId,
actorAuthMethod,
actorOrgId
);
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionActions.Read,
subject(ProjectPermissionSub.Secrets, { environment, secretPath })
);
const folder = await folderDAL.findBySecretPath(projectId, environment, secretPath);
if (!folder)
throw new NotFoundError({
message: "Folder not found for the given environment slug & secret path",
name: "Create secret"
});
const folderId = folder.id;
const { decryptor: secretManagerDecryptor } = await kmsService.createCipherPairWithDataKey({
type: KmsDataKey.SecretManager,
projectId
});
const secret = await secretDAL.findOne({
folderId,
key: secretName,
type: SecretType.Shared
});
ForbiddenError.from(permission).throwUnlessCan(
ProjectPermissionActions.Read,
subject(ProjectPermissionSub.Secrets, {
environment,
secretPath,
secretName,
secretTags: (secret?.tags || []).map((el) => el.slug)
})
);
const secretValue = secret.encryptedValue
? secretManagerDecryptor({ cipherTextBlob: secret.encryptedValue }).toString()
: "";
const { getExpandedSecretStackTrace } = expandSecretReferencesFactory({
projectId,
folderDAL,
secretDAL,
decryptSecretValue: (value) => (value ? secretManagerDecryptor({ cipherTextBlob: value }).toString() : undefined),
canExpandValue: (expandEnvironment, expandSecretPath, expandSecretName, expandSecretTags) =>
permission.can(
ProjectPermissionActions.Read,
subject(ProjectPermissionSub.Secrets, {
environment: expandEnvironment,
secretPath: expandSecretPath,
secretName: expandSecretName,
secretTags: expandSecretTags
})
)
});
const { expandedValue, stackTrace } = await getExpandedSecretStackTrace({
environment,
secretPath,
value: secretValue
});
return { tree: stackTrace, value: expandedValue };
};
return {
createSecret,
deleteSecret,
@@ -1942,6 +2046,8 @@ export const secretV2BridgeServiceFactory = ({
moveSecrets,
getSecretsCount,
getSecretsCountMultiEnv,
getSecretsMultiEnv
getSecretsMultiEnv,
getSecretReferenceTree,
getSecretsByFolderMappings
};
};

View File

@@ -278,3 +278,27 @@ export type TAttachSecretTagsDTO = {
secretPath: string;
type: SecretType;
} & Omit<TProjectPermission, "projectId">;
export type TGetSecretReferencesTreeDTO = {
projectId: string;
secretName: string;
environment: string;
secretPath: string;
} & Omit<TProjectPermission, "projectId">;
export type TFindSecretsByFolderIdsFilter = {
limit?: number;
offset?: number;
orderBy?: SecretsOrderBy;
orderDirection?: OrderByDirection;
search?: string;
tagSlugs?: string[];
includeTagsInSearch?: boolean;
};
export type TGetSecretsRawByFolderMappingsDTO = {
projectId: string;
folderMappings: { folderId: string; path: string; environment: string }[];
userId: string;
filters: TFindSecretsByFolderIdsFilter;
};

View File

@@ -299,7 +299,7 @@ export const secretQueueFactory = ({
);
return content;
}
const expandSecretReferences = expandSecretReferencesFactory({
const { expandSecretReferences } = expandSecretReferencesFactory({
decryptSecretValue: dto.decryptor,
secretDAL: secretV2BridgeDAL,
folderDAL,

View File

@@ -27,6 +27,8 @@ import { BadRequestError, ForbiddenRequestError, NotFoundError } from "@app/lib/
import { groupBy, pick } from "@app/lib/fn";
import { logger } from "@app/lib/logger";
import { alphaNumericNanoId } from "@app/lib/nanoid";
import { ProjectServiceActor } from "@app/lib/types";
import { TGetSecretsRawByFolderMappingsDTO } from "@app/services/secret-v2-bridge/secret-v2-bridge-types";
import { ActorType } from "../auth/auth-type";
import { TProjectDALFactory } from "../project/project-dal";
@@ -38,6 +40,7 @@ import { TSecretImportDALFactory } from "../secret-import/secret-import-dal";
import { fnSecretsFromImports } from "../secret-import/secret-import-fns";
import { TSecretTagDALFactory } from "../secret-tag/secret-tag-dal";
import { TSecretV2BridgeServiceFactory } from "../secret-v2-bridge/secret-v2-bridge-service";
import { TGetSecretReferencesTreeDTO } from "../secret-v2-bridge/secret-v2-bridge-types";
import { TSecretDALFactory } from "./secret-dal";
import {
decryptSecretRaw,
@@ -1099,6 +1102,18 @@ export const secretServiceFactory = ({
return secrets;
};
const getSecretReferenceTree = async (dto: TGetSecretReferencesTreeDTO) => {
const { shouldUseSecretV2Bridge } = await projectBotService.getBotKey(dto.projectId);
if (!shouldUseSecretV2Bridge)
throw new BadRequestError({
message: "Project version does not support secret reference tree",
name: "SecretReferenceTreeNotSupported"
});
return secretV2BridgeService.getSecretReferenceTree(dto);
};
const getSecretsRaw = async ({
projectId,
path,
@@ -2832,6 +2847,27 @@ export const secretServiceFactory = ({
return { message: "Migrating project to new KMS architecture" };
};
const getSecretsRawByFolderMappings = async (
params: Omit<TGetSecretsRawByFolderMappingsDTO, "userId">,
actor: ProjectServiceActor
) => {
const { shouldUseSecretV2Bridge } = await projectBotService.getBotKey(params.projectId);
if (!shouldUseSecretV2Bridge) throw new BadRequestError({ message: "Project version not supported" });
const { permission } = await permissionService.getProjectPermission(
actor.type,
actor.id,
params.projectId,
actor.authMethod,
actor.orgId
);
const secrets = secretV2BridgeService.getSecretsByFolderMappings({ ...params, userId: actor.id }, permission);
return secrets;
};
return {
attachTags,
detachTags,
@@ -2857,6 +2893,8 @@ export const secretServiceFactory = ({
startSecretV2Migration,
getSecretsCount,
getSecretsCountMultiEnv,
getSecretsRawMultiEnv
getSecretsRawMultiEnv,
getSecretReferenceTree,
getSecretsRawByFolderMappings
};
};

View File

@@ -14,9 +14,31 @@ export const superAdminDALFactory = (db: TDbClient) => {
const config = await (tx || db)(TableName.SuperAdmin)
.where(`${TableName.SuperAdmin}.id`, id)
.leftJoin(TableName.Organization, `${TableName.SuperAdmin}.defaultAuthOrgId`, `${TableName.Organization}.id`)
.leftJoin(TableName.SamlConfig, (qb) => {
qb.on(`${TableName.SamlConfig}.orgId`, "=", `${TableName.Organization}.id`).andOn(
`${TableName.SamlConfig}.isActive`,
"=",
db.raw("true")
);
})
.leftJoin(TableName.OidcConfig, (qb) => {
qb.on(`${TableName.OidcConfig}.orgId`, "=", `${TableName.Organization}.id`).andOn(
`${TableName.OidcConfig}.isActive`,
"=",
db.raw("true")
);
})
.select(
db.ref("*").withSchema(TableName.SuperAdmin) as unknown as keyof TSuperAdmin,
db.ref("slug").withSchema(TableName.Organization).as("defaultAuthOrgSlug")
db.ref("slug").withSchema(TableName.Organization).as("defaultAuthOrgSlug"),
db.ref("authEnforced").withSchema(TableName.Organization).as("defaultAuthOrgAuthEnforced"),
db.raw(`
CASE
WHEN ${TableName.SamlConfig}."orgId" IS NOT NULL THEN 'saml'
WHEN ${TableName.OidcConfig}."orgId" IS NOT NULL THEN 'oidc'
ELSE NULL
END as "defaultAuthOrgAuthMethod"
`)
)
.first();
@@ -27,7 +49,11 @@ export const superAdminDALFactory = (db: TDbClient) => {
return {
...config,
defaultAuthOrgSlug: config?.defaultAuthOrgSlug || null
} as TSuperAdmin & { defaultAuthOrgSlug: string | null };
} as TSuperAdmin & {
defaultAuthOrgSlug: string | null;
defaultAuthOrgAuthEnforced?: boolean | null;
defaultAuthOrgAuthMethod?: string | null;
};
};
const updateById = async (id: string, data: TSuperAdminUpdate, tx?: Knex) => {

View File

@@ -29,7 +29,13 @@ type TSuperAdminServiceFactoryDep = {
export type TSuperAdminServiceFactory = ReturnType<typeof superAdminServiceFactory>;
// eslint-disable-next-line
export let getServerCfg: () => Promise<TSuperAdmin & { defaultAuthOrgSlug: string | null }>;
export let getServerCfg: () => Promise<
TSuperAdmin & {
defaultAuthOrgSlug: string | null;
defaultAuthOrgAuthEnforced?: boolean | null;
defaultAuthOrgAuthMethod?: string | null;
}
>;
const ADMIN_CONFIG_KEY = "infisical-admin-cfg";
const ADMIN_CONFIG_KEY_EXP = 60; // 60s

View File

@@ -48,7 +48,7 @@ export const telemetryQueueServiceFactory = ({
await keyStore.deleteItem(TELEMETRY_SECRET_OPERATIONS_KEY);
});
// every day at midnight a telemetry job executes on self hosted
// every day at midnight a telemetry job executes on self-hosted instances
// this sends some telemetry information like instance id secrets operated etc
const startTelemetryCheck = async () => {
// this is a fast way to check its cloud or not

View File

@@ -62,7 +62,7 @@ To opt into telemetry, you can set "TELEMETRY_ENABLED=true" within the environme
const sendPostHogEvents = async (event: TPostHogEvent) => {
if (postHog) {
const instanceType = licenseService.getInstanceType();
// capture posthog only when its cloud or signup event happens in self hosted
// capture posthog only when its cloud or signup event happens in self-hosted
if (instanceType === InstanceType.Cloud || event.event === PostHogEventTypes.UserSignedUp) {
postHog.capture({
event: event.event,

View File

@@ -429,7 +429,7 @@ func CallGetRawSecretsV3(httpClient *resty.Client, request GetRawSecretsV3Reques
(strings.Contains(response.String(), "bot_not_found_error") ||
strings.Contains(strings.ToLower(response.String()), "failed to find bot key") ||
strings.Contains(strings.ToLower(response.String()), "bot is not active")) {
return GetRawSecretsV3Response{}, fmt.Errorf(`Project with id %s is incompatible with your current CLI version. Upgrade your project by visiting the project settings page. If you're self hosting and project upgrade option isn't yet available, contact your administrator to upgrade your Infisical instance to the latest release.
return GetRawSecretsV3Response{}, fmt.Errorf(`Project with id %s is incompatible with your current CLI version. Upgrade your project by visiting the project settings page. If you're self-hosting and project upgrade option isn't yet available, contact your administrator to upgrade your Infisical instance to the latest release.
`, request.WorkspaceId)
}

View File

@@ -236,7 +236,7 @@ var loginCmd = &cobra.Command{
}
//prompt user to select domain between Infisical cloud and self hosting
//prompt user to select domain between Infisical cloud and self-hosting
if domainQuery {
err = askForDomain()
if err != nil {
@@ -528,11 +528,11 @@ func DomainOverridePrompt() (bool, error) {
func askForDomain() error {
// query user to choose between Infisical cloud or self hosting
// query user to choose between Infisical cloud or self-hosting
const (
INFISICAL_CLOUD_US = "Infisical Cloud (US Region)"
INFISICAL_CLOUD_EU = "Infisical Cloud (EU Region)"
SELF_HOSTING = "Self Hosting"
SELF_HOSTING = "Self-Hosting"
ADD_NEW_DOMAIN = "Add a new domain"
)
@@ -609,7 +609,7 @@ func askForDomain() error {
return err
}
// Trimmed the '/' from the end of the self hosting url, and set the api & login url
// Trimmed the '/' from the end of the self-hosting url, and set the api & login url
domain = strings.TrimRight(domain, "/")
config.INFISICAL_URL = fmt.Sprintf("%s/api", domain)
config.INFISICAL_LOGIN_URL = fmt.Sprintf("%s/login", domain)

View File

@@ -167,7 +167,7 @@ For security and privacy concerns, we recommend you to configure your terminal t
#### Method 2: Export environment variable
You can point the CLI to the self hosted Infisical instance by exporting the environment variable `INFISICAL_API_URL` in your terminal.
You can point the CLI to the self-hosted Infisical instance by exporting the environment variable `INFISICAL_API_URL` in your terminal.
<Tabs>
<Tab title="Linux/MacOs">
@@ -197,7 +197,7 @@ For security and privacy concerns, we recommend you to configure your terminal t
#### Method 3: Set manually on every command
Another option to point the CLI to your self hosted Infisical instance is to set it via a flag on every command you run.
Another option to point the CLI to your self-hosted Infisical instance is to set it via a flag on every command you run.
```bash
# Example

View File

@@ -0,0 +1,71 @@
---
title: "Infisical Organizational Structure Blueprint"
sidebarTitle: "Organization Structure"
description: "Learn how to structure your projects, secrets, and other resources within Infisical."
---
Infisical is designed to provide comprehensive, centralized, and efficient management of secrets, certificates, and encryption keys within organizations. Below is an overview of Infisical's structured components, which developers and administrators can leverage for optimal project management and security posture.
### 1. Projects
- **Definition and Role**: [Projects](/documentation/platform/project) are the highest-level construct within an [organization](/documentation/platform/organization) in Infisical. They serve as the primary container for all functionalities.
- **Correspondence to Code Repositories**: Projects typically align with specific code repositories.
- **Functional Capabilities**: Each project encompasses features for managing secrets, certificates, and encryption keys, serving as the central hub for these resources.
### 2. Environments
- **Purpose**: Environments are designed for organizing and compartmentalizing secrets within projects.
- **Customization Options**: Environments can be tailored to align with existing infrastructure setups of any project. Default options include **Development**, **Staging**, and **Production**.
- **Structure**: Each environment inherently has a root level for storing secrets, but additional sub-organizations can be created through [folders](/documentation/platform/folder) for better secret management.
### 3. Folders
- **Use Case**: Folders are available for more advanced organizational needs, allowing logical separation of secrets.
- **Typical Structure**: Folders can correspond to specific logical units, such as microservices or different layers of an application, providing refined control over secrets.
### 4. Imports
- **Purpose and Benefits**: To promote reusability and avoid redundancy, Infisical supports the use of imports. This allows secrets, folders, or entire environments to be referenced across multiple projects as needed.
- **Best Practice**: Utilizing [secret imports](/documentation/platform/secret-reference#secret-imports) or [references](/documentation/platform/secret-reference#secret-referencing) ensures consistency and minimizes manual overhead.
### 5. Approval Workflows
- **Importance**: Implementing approval workflows is recommended for organizations aiming to enhance efficiency and strengthen their security posture.
- **Types of Workflows**:
- **[Access Requests](/documentation/platform/pr-workflows)**: This workflow allows developers to request access to sensitive resources. Such access can be configured for temporary use, a practice known as "just-in-time" access.
- **[Change Requests](/documentation/platform/access-controls/access-requests)**: Facilitates reviews and approvals when changes are proposed for sensitive environments or specific folders, ensuring proper oversight.
### 6. Access Controls
Infisicals access control framework is unified for both human users and machine identities, ensuring consistent management across the board.
### 6.1 Roles
- **2 Role Types**:
- **Organization-Level Roles**: Provide broad access across the organization (e.g., ability to manage billing, configure settings, etc.).
- **Project-Level Roles**: Essential for configuring access to specific secrets and other sensitive assets within a project.
- **Granular Permissions**: While default roles are available, [custom roles](/documentation/platform/access-controls/role-based-access-controls#creating-custom-roles) can be created for more tailored access controls.
- **Admin Considerations**: Note that admin users are able to access all projects. This role should be assigned judiciously to prevent unintended overreach.
<Note>Project access is defined not via an organization-level role, but rather through specific project memberships of both human and machine identities. Admin roles bypass this by default. </Note>
### 6.2 Additional Privileges
[Additional privileges](/documentation/platform/access-controls/additional-privileges) can be assigned to users and machines on an ad-hoc basis for specific scenarios where roles alone are insufficient. If you find yourself using additional privileges too much, it is recommended to create custom roles. Additional privileges can be temporary or permanent.
### 6.3 Attribute-Based Access Control (ABAC)
[Attribute-based Access Controls](/documentation/platform/access-controls/attribute-based-access-controls) allow restrictions based on tags or attributes linked to secrets. These can be integrated with SAML assertions and other security frameworks for dynamic access management.
### 6.4 User Groups
- **Application**: Organizations should use users groups in situations when they have a lot of developers with the same level of access (e.g., separated by team, department, seniority, etc.).
- **Synchronization**: [User groups](/documentation/platform/groups) can be synced with an identity provider to maintain consistency and reduce manual management.
### **Implementation Note**
For larger-scale organizations, automating configurations through **Terraform** or other infrastructure-as-code (IaC) tools is advisable. Manual configurations may lead to errors, so leveraging IaC enhances reliability and consistency in managing Infisical's robust capabilities.
This structured approach ensures that Infisical's functionalities are fully leveraged, providing both flexibility and rigorous control over an organization's sensitive information and access needs.

View File

@@ -0,0 +1,65 @@
---
title: "Attribute-based Access Controls"
description: "Learn how to use ABAC to manage permissions based on identity attributes."
---
Infisical's Attribute-based Access Controls (ABAC) allow for dynamic, attribute-driven permissions for both user and machine identities.
ABAC policies use metadata attributes—stored as key-value pairs on identities—to enforce fine-grained permissions that are context aware.
In ABAC, access controls are defined using metadata attributes, such as location or department, which can be set directly on user or machine identities.
During policy execution, these attributes are evaluated, and determine whether said actor can access the requested resource or perform the requested operation.
## Project-level Permissions
Attribute-based access controls are currently available for polices defined on projects. You can set ABAC permissions to control access to environments, folders, secrets, and secret tags.
### Setting Metadata on Identities
<Tabs>
<Tab title="Manually Configure Metadata">
<Steps>
<Step title="Navigate to the Access Control page on the organization sidebar and select an identity (user or machine).">
<img src="/images/platform/access-controls/add-metadata-step1.png" />
</Step>
<Step title="On the Identity Page, click the pencil icon to edit the selected identity.">
<img src="/images/platform/access-controls/add-metadata-step2.png" />
</Step>
<Step title="Add metadata via key-value pairs and update the identity.">
<img src="/images/platform/access-controls/add-metadata-step3.png" />
</Step>
</Steps>
</Tab>
<Tab title="Automatically Populate Metadata">
For organizations using SAML for login, Infisical automatically maps metadata attributes from SAML assertions to user identities.
This makes it easy to create policies that dynamically adapt based on the SAML users attributes.
</Tab>
</Tabs>
## Defining ABAC Policies
<img src="/images/platform/access-controls/example-abac-1.png" />
ABAC policies make use of identity metadata to define dynamic permissions. Each attribute must start and end with double curly-brackets `{{ <attribute-name> }}`.
The following attributes are available within project permissions:
- **User ID**: `{{ identity.id }}`
- **Username**: `{{ identity.username }}`
- **Metadata Attributes**: `{{ identity.metadata.<metadata-key-name> }}`
During policy execution, these placeholders are replaced by their actual values prior to evaluation.
### Example Use Case
#### Location-based Access Control
Suppose you want to restrict access to secrets within a specific folder based on a user's geographic region.
You could assign a `location` attribute to each user (e.g., `identity.metadata.location`).
You could then structure your folders to align with this attribute and define permissions accordingly.
For example, a policy might restrict access to folders matching the user's location attribute in the following pattern:
```
/appA/{{ identity.metadata.location }}
```
Using this structure, users can only access folders that correspond to their configured `location` attribute.
Consequently, if a users attribute changes due to relocation, no policies need to be changed to gain access to the folders associated with their new location.

View File

@@ -15,6 +15,15 @@ To make sure that users and machine identities are only accessing the resources
>
Manage user and machine identitity permissions through predefined roles.
</Card>
<Card
title="Attribute-based Access Control"
href="./attribute-based-access-controls"
icon="address-book"
color="#000000"
>
Manage user and machine identitity permissions based on their attributes.
</Card>
<Card
title="Additional Privileges"
href="./additional-privileges"

View File

@@ -33,7 +33,7 @@ Signup can be restricted to users matching one or more email domains, such as yo
### Default Organization
If you're using SAML/LDAP for only one organization on your instance, you can specify a default organization to use at login to skip requiring users to manually enter the organization slug.
If you're using SAML/LDAP/OIDC for only one organization on your instance, you can specify a default organization to use at login to skip requiring users to manually enter the organization slug.
### Trust Emails

View File

@@ -20,10 +20,10 @@ The logs are formatted in JSON, requiring your logging provider to support JSON-
<Steps>
<Step title="Navigate to Organization Settings in your sidebar." />
<Step title="Select Audit Log Streams Tab.">
![stream create](../../images/platform/audit-log-streams/stream-create.png)
![stream create](/images/platform/audit-log-streams/stream-create.png)
</Step>
<Step title="Click on Create">
![stream create](../../images/platform/audit-log-streams/stream-inputs.png)
![stream create](/images/platform/audit-log-streams/stream-inputs.png)
Provide the following values
<ParamField path="Endpoint URL" type="string" required>
@@ -35,7 +35,7 @@ The logs are formatted in JSON, requiring your logging provider to support JSON-
</Step>
</Steps>
![stream listt](../../images/platform/audit-log-streams/stream-list.png)
![stream listt](/images/platform/audit-log-streams/stream-list.png)
Your Audit Logs are now ready to be streamed.
## Example Providers
@@ -44,11 +44,11 @@ Your Audit Logs are now ready to be streamed.
<Steps>
<Step title="Select Connect Source">
![better stack connect source](../../images/platform/audit-log-streams/betterstack-create-source.png)
![better stack connect source](/images/platform/audit-log-streams/betterstack-create-source.png)
</Step>
<Step title="Provide a name and select platform"/>
<Step title="Provide Audit Log Stream inputs">
![better stack connect](../../images/platform/audit-log-streams/betterstack-source-details.png)
![better stack connect](/images/platform/audit-log-streams/betterstack-source-details.png)
1. Copy the **endpoint** from Better Stack to the **Endpoint URL** field.
3. Create a new header with key **Authorization** and set the value as **Bearer \<source token from betterstack\>**.
@@ -59,21 +59,21 @@ Your Audit Logs are now ready to be streamed.
<Steps>
<Step title="Navigate to API Keys section">
![api key create](../../images/platform/audit-log-streams/datadog-api-sidebar.png)
![api key create](/images/platform/audit-log-streams/datadog-api-sidebar.png)
</Step>
<Step title="Select New Key and provide a key name">
![api key form](../../images/platform/audit-log-streams/data-create-api-key.png)
![api key form](../../images/platform/audit-log-streams/data-dog-api-key.png)
![api key form](/images/platform/audit-log-streams/data-create-api-key.png)
![api key form](/images/platform/audit-log-streams/data-dog-api-key.png)
</Step>
<Step title="Find your Datadog region specific logging endpoint.">
![datadog url](../../images/platform/audit-log-streams/datadog-logging-endpoint.png)
![datadog url](/images/platform/audit-log-streams/datadog-logging-endpoint.png)
1. Navigate to the [Datadog Send Logs API documentation](https://docs.datadoghq.com/api/latest/logs/?code-lang=curl&site=us5#send-logs).
2. Pick your Datadog account region.
3. Obtain your Datadog logging endpoint URL.
</Step>
<Step title="Provide audit log stream inputs">
![datadog api key details](../../images/platform/audit-log-streams/datadog-source-details.png)
![datadog api key details](/images/platform/audit-log-streams/datadog-source-details.png)
1. Copy the **logging endpoint** from Datadog to the **Endpoint URL** field.
2. Copy the **API Key** from previous step

View File

@@ -0,0 +1,121 @@
---
title: "SAP HANA"
description: "Learn how to dynamically generate SAP HANA database account credentials."
---
The Infisical SAP HANA dynamic secret allows you to generate SAP HANA database credentials on demand.
## Prerequisite
- Infisical requires a SAP HANA database user in your instance with the necessary permissions. This user will facilitate the creation of new accounts as needed.
Ensure the user possesses privileges for creating, dropping, and granting permissions to roles for it to be able to create dynamic secrets.
- The SAP HANA instance should be reachable by Infisical.
## Set up Dynamic Secrets with SAP HANA
<Steps>
<Step title="Open Secret Overview Dashboard">
Open the Secret Overview dashboard and select the environment in which you would like to add a dynamic secret.
</Step>
<Step title="Click on the 'Add Dynamic Secret' button">
![Add Dynamic Secret Button](../../../images/platform/dynamic-secrets/add-dynamic-secret-button.png)
</Step>
<Step title="Select SAP HANA">
![Dynamic Secret Modal](../../../images/platform/dynamic-secrets/dynamic-secret-modal-sap-hana.png)
</Step>
<Step title="Provide the inputs for dynamic secret parameters">
<ParamField path="Secret Name" type="string" required>
Name by which you want the secret to be referenced
</ParamField>
<ParamField path="Default TTL" type="string" required>
Default time-to-live for a generated secret (it is possible to modify this value when a secret is generate)
</ParamField>
<ParamField path="Max TTL" type="string" required>
Maximum time-to-live for a generated secret
</ParamField>
<ParamField path="Host" type="string" required>
SAP HANA Host
</ParamField>
<ParamField path="Port" type="number" required>
SAP HANA Port
</ParamField>
<ParamField path="User" type="string" required>
Username that will be used to create dynamic secrets
</ParamField>
<ParamField path="Password" type="string" required>
Password that will be used to create dynamic secrets
</ParamField>
<ParamField path="CA(SSL)" type="string">
A CA may be required for SSL if you are self-hosting SAP HANA
</ParamField>
![Dynamic Secret Setup Modal](../../../images/platform/dynamic-secrets/dynamic-secret-setup-modal-sap-hana.png)
</Step>
<Step title="(Optional) Modify SQL Statements">
If you want to provide specific privileges for the generated dynamic credentials, you can modify the SQL statement to your needs.
![Modify SQL Statements Modal](../../../images/platform/dynamic-secrets/modify-sap-hana-sql-statements.png)
<Warning>
Due to SAP HANA limitations, the attached SQL statements are not executed as a transaction.
</Warning>
</Step>
<Step title="Click 'Submit'">
After submitting the form, you will see a dynamic secret created in the dashboard.
<Note>
If this step fails, you may have to add the CA certficate.
</Note>
</Step>
<Step title="Generate dynamic secrets">
Once you've successfully configured the dynamic secret, you're ready to generate on-demand credentials.
To do this, simply click on the 'Generate' button which appears when hovering over the dynamic secret item.
Alternatively, you can initiate the creation of a new lease by selecting 'New Lease' from the dynamic secret lease list section.
![Dynamic Secret](/images/platform/dynamic-secrets/dynamic-secret-generate.png)
![Dynamic Secret](/images/platform/dynamic-secrets/dynamic-secret-lease-empty.png)
When generating these secrets, it's important to specify a Time-to-Live (TTL) duration. This will dictate how long the credentials are valid for.
![Provision Lease](/images/platform/dynamic-secrets/provision-lease.png)
<Tip>
Ensure that the TTL for the lease fall within the maximum TTL defined when configuring the dynamic secret in step 4.
</Tip>
Once you click the `Submit` button, a new secret lease will be generated and the credentials for it will be shown to you.
![Provision Lease](/images/platform/dynamic-secrets/lease-values.png)
</Step>
</Steps>
## Audit or Revoke Leases
Once you have created one or more leases, you will be able to access them by clicking on the respective dynamic secret item on the dashboard.
This will allow you see the lease details and delete the lease ahead of its expiration time.
![Provision Lease](/images/platform/dynamic-secrets/lease-data.png)
## Renew Leases
To extend the life of the generated dynamic secret lease past its initial time to live, simply click on the **Renew** as illustrated below.
![Provision Lease](/images/platform/dynamic-secrets/dynamic-secret-lease-renew.png)
<Warning>
Lease renewals cannot exceed the maximum TTL set when configuring the dynamic
secret.
</Warning>

View File

@@ -0,0 +1,124 @@
---
title: "Snowflake"
description: "Learn how to dynamically generate Snowflake user credentials."
---
Infisical's Snowflake dynamic secrets allow you to generate Snowflake user credentials on demand.
## Snowflake Prerequisites
<Note>
Infisical requires a Snowflake user in your account with the USERADMIN role. This user will act as a service account for Infisical and facilitate the creation of new users as needed.
</Note>
<Steps>
<Step title="Navigate to Snowflake's User Dashboard and press the '+ User' button">
![Snowflake User Dashboard](/images/platform/dynamic-secrets/snowflake/dynamic-secret-snowflake-users-page.png)
</Step>
<Step title="Create a Snowflake user with the USERADMIN role for Infisical">
<Warning>
Be sure to uncheck "Force user to change password on first time login"
</Warning>
![Snowflake Create Service User](/images/platform/dynamic-secrets/snowflake/dynamic-secret-snowflake-create-service-user.png)
</Step>
<Step title="Click on the Account Menu in the bottom left and take note of your Account and Organization identifiers">
![Snowflake Account And Organization Identifiers](/images/platform/dynamic-secrets/snowflake/dynamic-secret-snowflake-identifiers.png)
</Step>
</Steps>
## Set up Dynamic Secrets with Snowflake
<Steps>
<Step title="Open the Secret Overview Dashboard">
Open the Secret Overview dashboard and select the environment in which you would like to add a dynamic secret.
</Step>
<Step title="Click on the 'Add Dynamic Secret' button">
![Add Dynamic Secret Button](/images/platform/dynamic-secrets/add-dynamic-secret-button.png)
</Step>
<Step title="Select the Snowflake option in the grid list">
![Dynamic Secret Modal](/images/platform/dynamic-secrets/snowflake/dynamic-secret-snowflake-modal.png)
</Step>
<Step title="Provide the required parameters for the Snowflake dynamic secret">
<ParamField path="Secret Name" type="string" required>
The name you want to reference this secret by
</ParamField>
<ParamField path="Default TTL" type="string" required>
Default time-to-live for a generated secret (it is possible to modify this value when generating a secret)
</ParamField>
<ParamField path="Max TTL" type="string" required>
Maximum time-to-live for a generated secret
</ParamField>
<ParamField path="Account Identifier" type="string" required>
Snowflake account identifier
</ParamField>
<ParamField path="Organization Identifier" type="string" required>
Snowflake organization identifier
</ParamField>
<ParamField path="User" type="string" required>
Username of the Infisical Service User
</ParamField>
<ParamField path="Password" type="string" required>
Password of the Infisical Service User
</ParamField>
![Dynamic Secret Setup Modal](/images/platform/dynamic-secrets/snowflake/dynamic-secret-snowflake-setup-modal.png)
</Step>
<Step title="(Optional) Modify SQL Statements">
If you want to provide specific privileges for the generated dynamic credentials, you can modify the SQL
statement to your needs.
![Modify SQL Statements Modal](/images/platform/dynamic-secrets/snowflake/dynamic-secret-snowflake-sql-statements.png)
</Step>
<Step title="Click 'Submit'">
After submitting the form, you will see a dynamic secret created in the dashboard.
</Step>
<Step title="Generate dynamic secrets">
Once you've successfully configured the dynamic secret, you're ready to generate on-demand credentials.
To do this, simply click on the 'Generate' button which appears when hovering over the dynamic secret item.
Alternatively, you can initiate the creation of a new lease by selecting 'New Lease' from the dynamic secret
lease list section.
![Dynamic Secret](/images/platform/dynamic-secrets/dynamic-secret-generate.png)
![Dynamic Secret](/images/platform/dynamic-secrets/dynamic-secret-lease-empty.png)
When generating these secrets, it's important to specify a Time-to-Live (TTL) duration. This will dictate how
long the credentials are valid for.
![Provision Lease](/images/platform/dynamic-secrets/provision-lease.png)
<Tip>
Ensure that the TTL for the lease fall within the maximum TTL defined when configuring the dynamic secret in
step 4.
</Tip>
Once you click the `Submit` button, a new secret lease will be generated and the credentials for it will be
shown to you.
![Provision Lease](/images/platform/dynamic-secrets/lease-values.png)
</Step>
</Steps>
## Audit or Revoke Leases
Once you have created one or more leases, you will be able to access them by clicking on the respective dynamic secret item on the dashboard.
This will allow you see the lease details and delete the lease ahead of its expiration time.
![Provision Lease](/images/platform/dynamic-secrets/lease-data.png)
## Renew Leases
To extend the life of the generated dynamic secret lease past its initial time to live, simply click on the **Renew** button as illustrated below.
![Provision Lease](/images/platform/dynamic-secrets/dynamic-secret-lease-renew.png)
<Warning>
Lease renewals cannot exceed the maximum TTL set when configuring the dynamic
secret.
</Warning>

View File

@@ -20,7 +20,7 @@ Key Features:
A typical workflow for using identities consists of four steps:
1. Creating the identity with a name and [role](/documentation/platform/role-based-access-controls) in Organization Access Control > Machine Identities.
1. Creating the identity with a name and [role](/documentation/platform/access-controls/role-based-access-controls) in Organization Access Control > Machine Identities.
This step also involves configuring an authentication method for it.
2. Adding the identity to the project(s) you want it to have access to.
3. Authenticating the identity with the Infisical API based on the configured authentication method on it and receiving a short-lived access token back.

View File

@@ -4,7 +4,7 @@ sidebarTitle: "Overview"
description: "Learn more about identities to interact with resources in Infisical."
---
To interact with secrets and resource with Infisical, it is important to undrestand the concept of identities.
To interact with secrets and resource with Infisical, it is important to understand the concept of identities.
Identities can be of two types:
- **People** (e.g., developers, platform engineers, administrators)
- **Machines** (e.g., machine entities for managing secrets in CI/CD pipelines, production applications, and more)

View File

@@ -0,0 +1,5 @@
---
title: "Kubernetes Encryption with KMS"
sidebarTitle: "Kubernetes Encryption"
url: "https://github.com/Infisical/k8-kms-plugin"
---

View File

@@ -1,6 +1,6 @@
---
title: "Key Management Service (KMS)"
sidebarTitle: "Key Management (KMS)"
sidebarTitle: "Overview"
description: "Learn how to manage and use cryptographic keys with Infisical."
---

View File

@@ -69,11 +69,18 @@ description: "Learn how to configure Auth0 OIDC for Infisical SSO."
</Step>
</Steps>
<Tip>
If you are only using one organization on your Infisical instance, you can configure a default organization in the [Server Admin Console](../admin-panel/server-admin#default-organization) to expedite OIDC login.
</Tip>
<Note>
If you're configuring OIDC SSO on a self-hosted instance of Infisical, make
sure to set the `AUTH_SECRET` and `SITE_URL` environment variable for it to
work: - `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
work:
<div class="height:1px;"/>
- `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
can be a random 32-byte base64 string generated with `openssl rand -base64
32`. - `SITE_URL`: The URL of your self-hosted instance of Infisical - should
be an absolute URL including the protocol (e.g. https://app.infisical.com)
32`.
<div class="height:1px;"/>
- `SITE_URL`: The absolute URL of your self-hosted instance of Infisical including the protocol (e.g. https://app.infisical.com)
</Note>

View File

@@ -109,12 +109,20 @@ description: "Learn how to configure Microsoft Entra ID for Infisical SSO."
</Step>
</Steps>
<Tip>
If you are only using one organization on your Infisical instance, you can configure a default organization in the [Server Admin Console](../admin-panel/server-admin#default-organization) to expedite SAML login.
</Tip>
<Note>
If you're configuring SAML SSO on a self-hosted instance of Infisical, make sure to
set the `AUTH_SECRET` and `SITE_URL` environment variable for it to work:
- `AUTH_SECRET`: A secret key used for signing and verifying JWT. This can be a random 32-byte base64 string generated with `openssl rand -base64 32`.
- `SITE_URL`: The URL of your self-hosted instance of Infisical - should be an absolute URL including the protocol (e.g. https://app.infisical.com)
If you're configuring SAML SSO on a self-hosted instance of Infisical, make
sure to set the `AUTH_SECRET` and `SITE_URL` environment variable for it to
work:
<div class="height:1px;"/>
- `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
can be a random 32-byte base64 string generated with `openssl rand -base64
32`.
<div class="height:1px;"/>
- `SITE_URL`: The absolute URL of your self-hosted instance of Infisical including the protocol (e.g. https://app.infisical.com)
</Note>
<Note>

View File

@@ -20,11 +20,11 @@ Prerequisites:
<Steps>
<Step title="Setup Identity Provider">
1.1. Register your application with the IdP to obtain a **Client ID** and **Client Secret**. These credentials are used by Infisical to authenticate with your IdP.
1.2. Configure **Redirect URL** to be `https://app.infisical.com/api/v1/sso/oidc/callback`. If you're self-hosting Infisical, replace the domain with your own.
1.3. Configure the scopes needed by Infisical (email, profile, openid) and ensure that they are mapped to the ID token claims.
1.4. Access the IdPs OIDC discovery document (usually located at `https://<idp-domain>/.well-known/openid-configuration`). This document contains important endpoints such as authorization, token, userinfo, and keys.
</Step>
<Step title="Finish configuring OIDC in Infisical">
@@ -70,11 +70,19 @@ Prerequisites:
</Steps>
<Tip>
If you are only using one organization on your Infisical instance, you can configure a default organization in the [Server Admin Console](../admin-panel/server-admin#default-organization) to expedite OIDC login.
</Tip>
<Note>
If you're configuring OIDC SSO on a self-hosted instance of Infisical, make
sure to set the `AUTH_SECRET` and `SITE_URL` environment variable for it to
work: - `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
work:
<div class="height:1px;"/>
- `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
can be a random 32-byte base64 string generated with `openssl rand -base64
32`. - `SITE_URL`: The URL of your self-hosted instance of Infisical - should
be an absolute URL including the protocol (e.g. https://app.infisical.com)
32`.
<div class="height:1px;"/>
- `SITE_URL`: The absolute URL of your self-hosted instance of Infisical including the protocol (e.g. https://app.infisical.com)
</Note>

View File

@@ -85,13 +85,20 @@ description: "Learn how to configure Google SAML for Infisical SSO."
</Steps>
<Tip>
If you are only using one organization on your Infisical instance, you can configure a default organization in the [Server Admin Console](../admin-panel/server-admin#default-organization) to expedite SAML login.
</Tip>
<Note>
If you're configuring SAML SSO on a self-hosted instance of Infisical, make
sure to set the `AUTH_SECRET` and `SITE_URL` environment variable for it to
work: - `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
can be a random 32-byte base64 string generated with `openssl rand -base64
32`. - `SITE_URL`: The URL of your self-hosted instance of Infisical - should
be an absolute URL including the protocol (e.g. https://app.infisical.com)
If you're configuring SAML SSO on a self-hosted instance of Infisical, make
sure to set the `AUTH_SECRET` and `SITE_URL` environment variable for it to
work:
<div class="height:1px;"/>
- `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
can be a random 32-byte base64 string generated with `openssl rand -base64
32`.
<div class="height:1px;"/>
- `SITE_URL`: The absolute URL of your self-hosted instance of Infisical including the protocol (e.g. https://app.infisical.com)
</Note>
References:

View File

@@ -89,10 +89,18 @@ description: "Learn how to configure JumpCloud SAML for Infisical SSO."
</Step>
</Steps>
<Tip>
If you are only using one organization on your Infisical instance, you can configure a default organization in the [Server Admin Console](../admin-panel/server-admin#default-organization) to expedite SAML login.
</Tip>
<Note>
If you're configuring SAML SSO on a self-hosted instance of Infisical, make sure to
set the `AUTH_SECRET` and `SITE_URL` environment variable for it to work:
- `AUTH_SECRET`: A secret key used for signing and verifying JWT. This can be a random 32-byte base64 string generated with `openssl rand -base64 32`.
- `SITE_URL`: The URL of your self-hosted instance of Infisical - should be an absolute URL including the protocol (e.g. https://app.infisical.com)
If you're configuring SAML SSO on a self-hosted instance of Infisical, make
sure to set the `AUTH_SECRET` and `SITE_URL` environment variable for it to
work:
<div class="height:1px;"/>
- `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
can be a random 32-byte base64 string generated with `openssl rand -base64
32`.
<div class="height:1px;"/>
- `SITE_URL`: The absolute URL of your self-hosted instance of Infisical including the protocol (e.g. https://app.infisical.com)
</Note>

View File

@@ -95,11 +95,18 @@ description: "Learn how to configure Keycloak OIDC for Infisical SSO."
</Step>
</Steps>
<Tip>
If you are only using one organization on your Infisical instance, you can configure a default organization in the [Server Admin Console](../admin-panel/server-admin#default-organization) to expedite OIDC login.
</Tip>
<Note>
If you're configuring OIDC SSO on a self-hosted instance of Infisical, make
sure to set the `AUTH_SECRET` and `SITE_URL` environment variable for it to
work: - `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
work:
<div class="height:1px;"/>
- `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
can be a random 32-byte base64 string generated with `openssl rand -base64
32`. - `SITE_URL`: The URL of your self-hosted instance of Infisical - should
be an absolute URL including the protocol (e.g. https://app.infisical.com)
32`.
<div class="height:1px;"/>
- `SITE_URL`: The absolute URL of your self-hosted instance of Infisical including the protocol (e.g. https://app.infisical.com)
</Note>

View File

@@ -130,10 +130,18 @@ description: "Learn how to configure Keycloak SAML for Infisical SSO."
</Step>
</Steps>
<Tip>
If you are only using one organization on your Infisical instance, you can configure a default organization in the [Server Admin Console](../admin-panel/server-admin#default-organization) to expedite SAML login.
</Tip>
<Note>
If you're configuring SAML SSO on a self-hosted instance of Infisical, make sure to
set the `AUTH_SECRET` and `SITE_URL` environment variable for it to work:
- `AUTH_SECRET`: A secret key used for signing and verifying JWT. This can be a random 32-byte base64 string generated with `openssl rand -base64 32`.
- `SITE_URL`: The URL of your self-hosted instance of Infisical - should be an absolute URL including the protocol (e.g. https://app.infisical.com)
If you're configuring SAML SSO on a self-hosted instance of Infisical, make
sure to set the `AUTH_SECRET` and `SITE_URL` environment variable for it to
work:
<div class="height:1px;"/>
- `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
can be a random 32-byte base64 string generated with `openssl rand -base64
32`.
<div class="height:1px;"/>
- `SITE_URL`: The absolute URL of your self-hosted instance of Infisical including the protocol (e.g. https://app.infisical.com)
</Note>

View File

@@ -98,11 +98,18 @@ description: "Learn how to configure Okta SAML 2.0 for Infisical SSO."
</Step>
</Steps>
<Tip>
If you are only using one organization on your Infisical instance, you can configure a default organization in the [Server Admin Console](../admin-panel/server-admin#default-organization) to expedite SAML login.
</Tip>
<Note>
If you're configuring SAML SSO on a self-hosted instance of Infisical, make
sure to set the `AUTH_SECRET` and `SITE_URL` environment variable for it to
work: - `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
can be a random 32-byte base64 string generated with `openssl rand -base64
32`. - `SITE_URL`: The URL of your self-hosted instance of Infisical - should
be an absolute URL including the protocol (e.g. https://app.infisical.com)
If you're configuring SAML SSO on a self-hosted instance of Infisical, make
sure to set the `AUTH_SECRET` and `SITE_URL` environment variable for it to
work:
<div class="height:1px;"/>
- `AUTH_SECRET`: A secret key used for signing and verifying JWT. This
can be a random 32-byte base64 string generated with `openssl rand -base64
32`.
<div class="height:1px;"/>
- `SITE_URL`: The absolute URL of your self-hosted instance of Infisical including the protocol (e.g. https://app.infisical.com)
</Note>

Binary file not shown.

After

Width:  |  Height:  |  Size: 800 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 933 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 499 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 802 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 503 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 457 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 607 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 398 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 122 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 572 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 559 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 666 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 301 KiB

View File

@@ -33,7 +33,7 @@ Set up the Infisical provider by specifying the `host` and `service_token`. Repl
```hcl main.tf
provider "infisical" {
host = "https://app.infisical.com" # Only required if using self hosted instance of Infisical, default is https://app.infisical.com
host = "https://app.infisical.com" # Only required if using a self-hosted instance of Infisical, default is https://app.infisical.com
client_id = "<>"
client_secret = "<>"
service_token = "<>" # DEPRECATED, USE MACHINE IDENTITY AUTH INSTEAD

View File

@@ -10,7 +10,9 @@ It uses an `InfisicalSecret` resource to specify authentication and storage meth
The operator continuously updates secrets and can also reload dependent deployments automatically.
<Note>
If you are already using the External Secrets operator, you can view the integration documentation for it [here](https://external-secrets.io/latest/provider/infisical/).
If you are already using the External Secrets operator, you can view the
integration documentation for it
[here](https://external-secrets.io/latest/provider/infisical/).
</Note>
## Install Operator
@@ -31,7 +33,7 @@ The operator can be install via [Helm](https://helm.sh) or [kubectl](https://git
To select a specific version, view the application versions [here](https://hub.docker.com/r/infisical/kubernetes-operator/tags) and chart versions [here](https://cloudsmith.io/~infisical/repos/helm-charts/packages/detail/helm/secrets-operator/#versions)
```bash
helm install --generate-name infisical-helm-charts/secrets-operator
helm install --generate-name infisical-helm-charts/secrets-operator
```
```bash
@@ -61,115 +63,112 @@ Once you apply the manifest, the operator will be installed in `infisical-operat
Once you have installed the operator to your cluster, you'll need to create a `InfisicalSecret` custom resource definition (CRD).
```yaml example-infisical-secret-crd.yaml
apiVersion: secrets.infisical.com/v1alpha1
kind: InfisicalSecret
metadata:
name: infisicalsecret-sample
labels:
label-to-be-passed-to-managed-secret: sample-value
annotations:
example.com/annotation-to-be-passed-to-managed-secret: "sample-value"
name: infisicalsecret-sample
labels:
label-to-be-passed-to-managed-secret: sample-value
annotations:
example.com/annotation-to-be-passed-to-managed-secret: "sample-value"
spec:
hostAPI: https://app.infisical.com/api
resyncInterval: 10
authentication:
# Make sure to only have 1 authentication method defined, serviceToken/universalAuth.
# If you have multiple authentication methods defined, it may cause issues.
hostAPI: https://app.infisical.com/api
resyncInterval: 10
authentication:
# Make sure to only have 1 authentication method defined, serviceToken/universalAuth.
# If you have multiple authentication methods defined, it may cause issues.
# (Deprecated) Service Token Auth
serviceToken:
serviceTokenSecretReference:
secretName: service-token
secretNamespace: default
secretsScope:
envSlug: <env-slug>
secretsPath: <secrets-path>
recursive: true
# Universal Auth
universalAuth:
secretsScope:
projectSlug: new-ob-em
envSlug: dev # "dev", "staging", "prod", etc..
secretsPath: "/" # Root is "/"
recursive: true # Wether or not to use recursive mode (Fetches all secrets in an environment from a given secret path, and all folders inside the path) / defaults to false
credentialsRef:
secretName: universal-auth-credentials
secretNamespace: default
# Native Kubernetes Auth
kubernetesAuth:
identityId: <machine-identity-id>
serviceAccountRef:
name: <service-account-name>
namespace: <service-account-namespace>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
# AWS IAM Auth
awsIamAuth:
identityId: <your-machine-identity-id>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
# Azure Auth
azureAuth:
identityId: <your-machine-identity-id>
resource: https://management.azure.com/&client_id=CLIENT_ID # (Optional) This is the Azure resource that you want to access. For example, "https://management.azure.com/". If no value is provided, it will default to "https://management.azure.com/"
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
# GCP ID Token Auth
gcpIdTokenAuth:
identityId: <your-machine-identity-id>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
# GCP IAM Auth
gcpIamAuth:
identityId: <your-machine-identity-id>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
managedSecretReference:
secretName: managed-secret
# (Deprecated) Service Token Auth
serviceToken:
serviceTokenSecretReference:
secretName: service-token
secretNamespace: default
creationPolicy: "Orphan" ## Owner | Orphan
# secretType: kubernetes.io/dockerconfigjson
secretsScope:
envSlug: <env-slug>
secretsPath: <secrets-path>
recursive: true
# Universal Auth
universalAuth:
secretsScope:
projectSlug: new-ob-em
envSlug: dev # "dev", "staging", "prod", etc..
secretsPath: "/" # Root is "/"
recursive: true # Wether or not to use recursive mode (Fetches all secrets in an environment from a given secret path, and all folders inside the path) / defaults to false
credentialsRef:
secretName: universal-auth-credentials
secretNamespace: default
# Native Kubernetes Auth
kubernetesAuth:
identityId: <machine-identity-id>
serviceAccountRef:
name: <service-account-name>
namespace: <service-account-namespace>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
# AWS IAM Auth
awsIamAuth:
identityId: <your-machine-identity-id>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
# Azure Auth
azureAuth:
identityId: <your-machine-identity-id>
resource: https://management.azure.com/&client_id=CLIENT_ID # (Optional) This is the Azure resource that you want to access. For example, "https://management.azure.com/". If no value is provided, it will default to "https://management.azure.com/"
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
# GCP ID Token Auth
gcpIdTokenAuth:
identityId: <your-machine-identity-id>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
# GCP IAM Auth
gcpIamAuth:
identityId: <your-machine-identity-id>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
managedSecretReference:
secretName: managed-secret
secretNamespace: default
creationPolicy: "Orphan" ## Owner | Orphan
# secretType: kubernetes.io/dockerconfigjson
```
### InfisicalSecret CRD properties
<Accordion title="hostAPI">
If you are fetching secrets from a self hosted instance of Infisical set the value of `hostAPI` to
If you are fetching secrets from a self-hosted instance of Infisical set the value of `hostAPI` to
` https://your-self-hosted-instace.com/api`
When `hostAPI` is not defined the operator fetches secrets from Infisical Cloud.
@@ -193,6 +192,31 @@ When `hostAPI` is not defined the operator fetches secrets from Infisical Cloud.
available on paid plans. Default re-sync interval is every 1 minute.
</Accordion>
<Accordion title="tls">
This block defines the TLS settings to use for connecting to the Infisical
instance.
</Accordion>
<Accordion title="tls.caRef">
This block defines the reference to the CA certificate to use for connecting
to the Infisical instance with SSL/TLS.
</Accordion>
<Accordion title="tls.caRef.secretName">
The name of the Kubernetes secret containing the CA certificate to use for
connecting to the Infisical instance with SSL/TLS.
</Accordion>
<Accordion title="tls.caRef.secretNamespace">
The namespace of the Kubernetes secret containing the CA certificate to use
for connecting to the Infisical instance with SSL/TLS.
</Accordion>
<Accordion title="tls.caRef.key">
The name of the key in the Kubernetes secret which contains the value of the
CA certificate to use for connecting to the Infisical instance with SSL/TLS.
</Accordion>
<Accordion title="authentication">
This block defines the method that will be used to authenticate with Infisical
so that secrets can be fetched
@@ -222,8 +246,6 @@ When `hostAPI` is not defined the operator fetches secrets from Infisical Cloud.
</Steps>
<Info>
Make sure to also populate the `secretsScope` field with the project slug
_`projectSlug`_, environment slug _`envSlug`_, and secrets path
@@ -365,15 +387,15 @@ spec:
</Step>
<Step title="Add your identity ID & service account to your InfisicalSecret resource">
Once you have created your machine identity and added it to your project(s), you will need to add the identity ID to your InfisicalSecret resource.
In the `authentication.kubernetesAuth.identityId` field, add the identity ID of the machine identity you created.
Once you have created your machine identity and added it to your project(s), you will need to add the identity ID to your InfisicalSecret resource.
In the `authentication.kubernetesAuth.identityId` field, add the identity ID of the machine identity you created.
See the example below for more details.
</Step>
<Step title="Add your Kubernetes service account token to the InfisicalSecret resource">
Add the service account details from the previous steps under `authentication.kubernetesAuth.serviceAccountRef`.
Here you will need to enter the name and namespace of the service account.
Add the service account details from the previous steps under `authentication.kubernetesAuth.serviceAccountRef`.
Here you will need to enter the name and namespace of the service account.
The example below shows a complete InfisicalSecret resource with all required fields defined.
</Step>
</Step>
</Steps>
@@ -539,8 +561,6 @@ spec:
</Accordion>
<Accordion title="authentication.gcpIamAuth">
The GCP IAM machine identity authentication method is used to authenticate with Infisical. The identity ID is stored in a field in the InfisicalSecret resource. This authentication method can only be used both within and outside GCP environments.
@@ -877,6 +897,42 @@ spec:
</Accordion>
### Connecting to instances with private/self-signed certificate
To connect to Infisical instances behind a private/self-signed certificate, you can configure the TLS settings in the `InfisicalSecret` CRD
to point to a CA certificate stored in a Kubernetes secret resource.
```yaml
---
spec:
hostAPI: https://app.infisical.com/api
resyncInterval: 10
tls:
caRef:
secretName: custom-ca-certificate
secretNamespace: default
key: ca.crt
authentication:
---
```
The definition file of the Kubernetes secret for the CA certificate can be structured like the following:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: custom-ca-certificate
type: Opaque
stringData:
ca.crt: |
-----BEGIN CERTIFICATE-----
MIIEZzCCA0+gAwIBAgIUDk9+HZcMHppiNy0TvoBg8/aMEqIwDQYJKoZIhvcNAQEL
...
BQAwDTELMAkGA1UEChMCUEgwHhcNMjQxMDI1MTU0MjAzWhcNMjUxMDI1MjE0MjAz
-----END CERTIFICATE-----
```
## Auto redeployment
Deployments using managed secrets don't reload automatically on updates, so they may use outdated secrets unless manually redeployed.
@@ -889,6 +945,7 @@ To enable auto redeployment you simply have to add the following annotation to t
```yaml
secrets.infisical.com/auto-reload: "true"
```
<Accordion title="Deployment example with auto redeploy enabled">
```yaml
apiVersion: apps/v1

View File

@@ -82,7 +82,8 @@
"documentation/guides/node",
"documentation/guides/python",
"documentation/guides/nextjs-vercel",
"documentation/guides/microsoft-power-apps"
"documentation/guides/microsoft-power-apps",
"documentation/guides/organization-structure"
]
}
]
@@ -113,7 +114,13 @@
"documentation/platform/pki/alerting"
]
},
"documentation/platform/kms",
{
"group": "Key Management (KMS)",
"pages": [
"documentation/platform/kms/overview",
"documentation/platform/kms/kubernetes-encryption"
]
},
{
"group": "KMS Configuration",
"pages": [
@@ -135,11 +142,11 @@
"pages": [
"documentation/platform/access-controls/overview",
"documentation/platform/access-controls/role-based-access-controls",
"documentation/platform/access-controls/attribute-based-access-controls",
"documentation/platform/access-controls/additional-privileges",
"documentation/platform/access-controls/temporary-access",
"documentation/platform/access-controls/access-requests",
"documentation/platform/pr-workflows",
"documentation/platform/audit-log-streams",
"documentation/platform/groups"
]
},
@@ -178,7 +185,9 @@
"documentation/platform/dynamic-secrets/mongo-atlas",
"documentation/platform/dynamic-secrets/mongo-db",
"documentation/platform/dynamic-secrets/azure-entra-id",
"documentation/platform/dynamic-secrets/ldap"
"documentation/platform/dynamic-secrets/ldap",
"documentation/platform/dynamic-secrets/sap-hana",
"documentation/platform/dynamic-secrets/snowflake"
]
},
{

View File

@@ -2,7 +2,7 @@
title: "Docker Compose"
description: "Read how to run Infisical with Docker Compose template."
---
This self hosting guide will walk you though the steps to self host Infisical using Docker compose.
This self-hosting guide will walk you through the steps to self-host Infisical using Docker Compose.
## Prerequisites
- [Docker](https://docs.docker.com/engine/install/)
@@ -79,4 +79,4 @@ docker-compose -f docker-compose.prod.yml up
Your Infisical instance should now be running on port `80`. To access your instance, visit `http://localhost:80`.
![self host sign up](/images/self-hosting/applicable-to-all/selfhost-signup.png)
![self-hosted sign up](/images/self-hosting/applicable-to-all/selfhost-signup.png)

View File

@@ -1,11 +1,11 @@
---
title: "Docker Swarm"
description: "How to self Infisical with Docker Swarm (HA)."
description: "How to self-host Infisical with Docker Swarm (HA)."
---
# Self-Hosting Infisical with Docker Swarm
This guide will provide step-by-step instructions on how to self-host Infisical using Docker Swarm. This is particularly helpful for those wanting to self host Infisical on premise while still maintaining high availability (HA) for the core Infisical components.
This guide will provide step-by-step instructions on how to self-host Infisical using Docker Swarm. This is particularly helpful for those wanting to self-host Infisical on premise while still maintaining high availability (HA) for the core Infisical components.
The guide will demonstrate a setup with three nodes, ensuring that the cluster can tolerate the failure of one node while remaining fully operational.
## Docker Swarm
@@ -198,7 +198,7 @@ The [Docker stack file](https://github.com/Infisical/infisical/tree/main/docker-
</Step>
<Step title="Initialize Infisical">
![self host sign up](/images/self-hosting/applicable-to-all/selfhost-signup.png)
![self-hosting sign up](/images/self-hosting/applicable-to-all/selfhost-signup.png)
Once all expected services are up and running, visit `<NODE-IP>:8080` of any node in the swarm. This will take you to the Infisical configuration page.
</Step>

View File

@@ -53,7 +53,7 @@ The following guide provides a detailed step-by-step walkthrough on how you can
Once the container is running, verify the installation by opening your web browser and navigating to `http://localhost:80`.
![self host sign up](/images/self-hosting/applicable-to-all/selfhost-signup.png)
![self-hosted sign up](/images/self-hosting/applicable-to-all/selfhost-signup.png)
</Step>
</Steps>

View File

@@ -5,7 +5,7 @@ description: "Find out how to activate Infisical Enterprise edition (EE) feature
While most features in Infisical are free to use, others are paid and require purchasing an enterprise license to use them.
This guide walks through how you can use these paid features on a self hosted instance of Infisical.
This guide walks through how you can use these paid features on a self-hosted instance of Infisical.
<Steps>
<Step title="Purchase a license">

View File

@@ -3,11 +3,11 @@ title: "FAQ"
description: "Frequently Asked Questions about self-hosting Infisical."
---
Frequently asked questions about self hosted instance of Infisical can be found on this page.
Frequently asked questions about self-hosted instance of Infisical can be found on this page.
If you can't find the answer you are looking for, please create an issue on our [GitHub repository](https://github.com/Infisical/infisical) or join our [Slack community](https://infisical.com/slack) for additional support.
<Accordion title="When I refresh any page, it logs me out" defaultOpen="true">
This issue is typically seen when you haven't set up SSL for your self hosted instance of Infisical. When SSL is not enabled, you can't receive secure cookies, preventing the session data to not be saved.
This issue is typically seen when you haven't set up SSL for your self-hosted instance of Infisical. When SSL is not enabled, you can't receive secure cookies, preventing the session data to not be saved.
To fix this, we highly recommend that you set up SSL for your instance.
However, in the event you choose to use Infisical without SSL, you can do so by setting the `HTTPS_ENABLED` environment variable to `"false"` for the backend application.

View File

@@ -26,6 +26,7 @@
"@radix-ui/react-accordion": "^1.1.2",
"@radix-ui/react-alert-dialog": "^1.0.5",
"@radix-ui/react-checkbox": "^1.0.4",
"@radix-ui/react-collapsible": "^1.0.3",
"@radix-ui/react-dialog": "^1.0.5",
"@radix-ui/react-dropdown-menu": "^2.0.6",
"@radix-ui/react-hover-card": "^1.0.7",
@@ -4931,6 +4932,7 @@
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@radix-ui/react-collapsible/-/react-collapsible-1.0.3.tgz",
"integrity": "sha512-UBmVDkmR6IvDsloHVN+3rtx4Mi5TFvylYXpluuv0f37dtaz3H99bp8No0LGXRigVpl3UAT4l9j6bIchh42S/Gg==",
"license": "MIT",
"dependencies": {
"@babel/runtime": "^7.13.10",
"@radix-ui/primitive": "1.0.1",

View File

@@ -39,6 +39,7 @@
"@radix-ui/react-accordion": "^1.1.2",
"@radix-ui/react-alert-dialog": "^1.0.5",
"@radix-ui/react-checkbox": "^1.0.4",
"@radix-ui/react-collapsible": "^1.0.3",
"@radix-ui/react-dialog": "^1.0.5",
"@radix-ui/react-dropdown-menu": "^2.0.6",
"@radix-ui/react-hover-card": "^1.0.7",

View File

@@ -16,6 +16,7 @@ export type CheckboxProps = Omit<
checkIndicatorBg?: string | undefined;
isError?: boolean;
isIndeterminate?: boolean;
containerClassName?: string;
};
export const Checkbox = ({
@@ -28,10 +29,11 @@ export const Checkbox = ({
checkIndicatorBg,
isError,
isIndeterminate,
containerClassName,
...props
}: CheckboxProps): JSX.Element => {
return (
<div className="flex items-center font-inter text-bunker-300">
<div className={twMerge("flex items-center font-inter text-bunker-300", containerClassName)}>
<CheckboxPrimitive.Root
className={twMerge(
"flex h-4 w-4 flex-shrink-0 items-center justify-center rounded border border-mineshaft-400 bg-mineshaft-600 shadow transition-all hover:bg-mineshaft-500",

View File

@@ -0,0 +1,5 @@
export const reverseTruncate = (text: string, maxLength = 42) => {
if (text.length < maxLength) return text;
return `...${text.substring(text.length - maxLength + 3)}`;
};

View File

@@ -19,6 +19,8 @@ export type TServerConfig = {
isSecretScanningDisabled: boolean;
defaultAuthOrgSlug: string | null;
defaultAuthOrgId: string | null;
defaultAuthOrgAuthMethod?: string | null;
defaultAuthOrgAuthEnforced?: boolean | null;
enabledLoginMethods: LoginMethod[];
};

View File

@@ -1 +1,5 @@
export { useGetProjectSecretsDetails } from "./queries";
export {
useGetProjectSecretsDetails,
useGetProjectSecretsOverview,
useGetProjectSecretsQuickSearch
} from "./queries";

View File

@@ -10,12 +10,15 @@ import {
DashboardProjectSecretsOverview,
DashboardProjectSecretsOverviewResponse,
DashboardSecretsOrderBy,
TDashboardProjectSecretsQuickSearch,
TDashboardProjectSecretsQuickSearchResponse,
TGetDashboardProjectSecretsDetailsDTO,
TGetDashboardProjectSecretsOverviewDTO
TGetDashboardProjectSecretsOverviewDTO,
TGetDashboardProjectSecretsQuickSearchDTO
} from "@app/hooks/api/dashboard/types";
import { OrderByDirection } from "@app/hooks/api/generic/types";
import { mergePersonalSecrets } from "@app/hooks/api/secrets/queries";
import { unique } from "@app/lib/fn/array";
import { groupBy, unique } from "@app/lib/fn/array";
export const dashboardKeys = {
all: () => ["dashboard"] as const,
@@ -42,8 +45,18 @@ export const dashboardKeys = {
}: TGetDashboardProjectSecretsDetailsDTO) =>
[
...dashboardKeys.getDashboardSecrets({ projectId, secretPath }),
environment,
"secrets-details",
environment,
params
] as const,
getProjectSecretsQuickSearch: ({
projectId,
secretPath,
...params
}: TGetDashboardProjectSecretsQuickSearchDTO) =>
[
...dashboardKeys.getDashboardSecrets({ projectId, secretPath }),
"quick-search",
params
] as const
};
@@ -256,3 +269,101 @@ export const useGetProjectSecretsDetails = (
keepPreviousData: true
});
};
export const fetchProjectSecretsQuickSearch = async ({
environments,
tags,
...params
}: TGetDashboardProjectSecretsQuickSearchDTO) => {
const { data } = await apiRequest.get<TDashboardProjectSecretsQuickSearchResponse>(
"/api/v1/dashboard/secrets-deep-search",
{
params: {
...params,
environments: encodeURIComponent(environments.join(",")),
tags: encodeURIComponent(
Object.entries(tags)
// eslint-disable-next-line @typescript-eslint/no-unused-vars
.filter(([_, enabled]) => enabled)
.map(([tag]) => tag)
.join(",")
)
}
}
);
return data;
};
export const useGetProjectSecretsQuickSearch = (
{
projectId,
secretPath,
search = "",
environments,
tags
}: TGetDashboardProjectSecretsQuickSearchDTO,
options?: Omit<
UseQueryOptions<
TDashboardProjectSecretsQuickSearchResponse,
unknown,
TDashboardProjectSecretsQuickSearch,
ReturnType<typeof dashboardKeys.getProjectSecretsQuickSearch>
>,
"queryKey" | "queryFn"
>
) => {
return useQuery({
...options,
enabled:
Boolean(search?.trim() || Object.values(tags).length) &&
(options?.enabled ?? true) &&
Boolean(environments.length),
queryKey: dashboardKeys.getProjectSecretsQuickSearch({
secretPath,
search,
projectId,
environments,
tags
}),
queryFn: () =>
fetchProjectSecretsQuickSearch({
secretPath,
search,
projectId,
environments,
tags
}),
onError: (error) => {
if (axios.isAxiosError(error)) {
const serverResponse = error.response?.data as { message: string };
createNotification({
title: "Error fetching secrets deep search",
type: "error",
text: serverResponse.message
});
}
},
select: useCallback((data: Awaited<ReturnType<typeof fetchProjectSecretsQuickSearch>>) => {
const { secrets, folders, dynamicSecrets } = data;
const groupedFolders = groupBy(folders, (folder) => folder.path);
const groupedSecrets = groupBy(
mergePersonalSecrets(secrets),
(secret) => `${secret.path === "/" ? "" : secret.path}/${secret.key}`
);
const groupedDynamicSecrets = groupBy(
dynamicSecrets,
(dynamicSecret) =>
`${dynamicSecret.path === "/" ? "" : dynamicSecret.path}/${dynamicSecret.name}`
);
return {
folders: groupedFolders,
secrets: groupedSecrets,
dynamicSecrets: groupedDynamicSecrets
};
}, []),
keepPreviousData: true
});
};

View File

@@ -69,3 +69,23 @@ export type TGetDashboardProjectSecretsDetailsDTO = Omit<
includeImports?: boolean;
tags: Record<string, boolean>;
};
export type TDashboardProjectSecretsQuickSearchResponse = {
folders: (TSecretFolder & { environment: string; path: string })[];
dynamicSecrets: (TDynamicSecret & { environment: string; path: string })[];
secrets: SecretV3Raw[];
};
export type TDashboardProjectSecretsQuickSearch = {
folders: Record<string, TDashboardProjectSecretsQuickSearchResponse["folders"]>;
secrets: Record<string, SecretV3RawSanitized[]>;
dynamicSecrets: Record<string, TDashboardProjectSecretsQuickSearchResponse["folders"]>;
};
export type TGetDashboardProjectSecretsQuickSearchDTO = {
projectId: string;
secretPath: string;
tags: Record<string, boolean>;
search: string;
environments: string[];
};

View File

@@ -26,7 +26,9 @@ export enum DynamicSecretProviders {
MongoDB = "mongo-db",
RabbitMq = "rabbit-mq",
AzureEntraId = "azure-entra-id",
Ldap = "ldap"
Ldap = "ldap",
SapHana = "sap-hana",
Snowflake = "snowflake"
}
export enum SqlProviders {
@@ -189,7 +191,7 @@ export type TDynamicSecretProvider =
applicationId: string;
clientSecret: string;
};
}
}
| {
type: DynamicSecretProviders.Ldap;
inputs: {
@@ -201,9 +203,32 @@ export type TDynamicSecretProvider =
revocationLdif: string;
rollbackLdif?: string;
};
}
| {
type: DynamicSecretProviders.SapHana;
inputs: {
host: string;
port: number;
username: string;
password: string;
creationStatement: string;
revocationStatement: string;
renewStatement?: string;
ca?: string | undefined;
};
}
| {
type: DynamicSecretProviders.Snowflake;
inputs: {
orgId: string;
accountId: string;
username: string;
password: string;
creationStatement: string;
revocationStatement: string;
renewStatement?: string;
};
};
;
export type TCreateDynamicSecretDTO = {
projectSlug: string;
provider: TDynamicSecretProvider;

View File

@@ -8,4 +8,8 @@ export {
useUpdateSecretBatch,
useUpdateSecretV3
} from "./mutations";
export { useGetProjectSecrets, useGetProjectSecretsAllEnv, useGetSecretVersion } from "./queries";
export {
useGetProjectSecrets,
useGetProjectSecretsAllEnv,
useGetSecretReferenceTree,
useGetSecretVersion} from "./queries";

View File

@@ -17,14 +17,17 @@ import {
SecretVersions,
TGetProjectSecretsAllEnvDTO,
TGetProjectSecretsDTO,
TGetProjectSecretsKey
TGetProjectSecretsKey,
TGetSecretReferenceTreeDTO,
TSecretReferenceTraceNode
} from "./types";
export const secretKeys = {
// this is also used in secretSnapshot part
getProjectSecret: ({ workspaceId, environment, secretPath }: TGetProjectSecretsKey) =>
[{ workspaceId, environment, secretPath }, "secrets"] as const,
getSecretVersion: (secretId: string) => [{ secretId }, "secret-versions"] as const
getSecretVersion: (secretId: string) => [{ secretId }, "secret-versions"] as const,
getSecretReferenceTree: (dto: TGetSecretReferenceTreeDTO) => ["secret-reference-tree", dto]
};
export const fetchProjectSecrets = async ({
@@ -63,7 +66,8 @@ export const mergePersonalSecrets = (rawSecrets: SecretV3Raw[]) => {
createdAt: el.createdAt,
updatedAt: el.updatedAt,
version: el.version,
skipMultilineEncoding: el.skipMultilineEncoding
skipMultilineEncoding: el.skipMultilineEncoding,
path: el.secretPath
};
if (el.type === SecretType.Personal) {
@@ -227,3 +231,33 @@ export const useGetSecretVersion = (dto: GetSecretVersionsDTO) =>
return data.sort((a, b) => b.createdAt.localeCompare(a.createdAt));
}, [])
});
const fetchSecretReferenceTree = async ({
secretPath,
projectId,
secretKey,
environmentSlug
}: TGetSecretReferenceTreeDTO) => {
const { data } = await apiRequest.get<{ tree: TSecretReferenceTraceNode; value: string }>(
`/api/v3/secrets/raw/${secretKey}/secret-reference-tree`,
{
params: {
secretPath,
workspaceId: projectId,
environment: environmentSlug
}
}
);
return data;
};
export const useGetSecretReferenceTree = (dto: TGetSecretReferenceTreeDTO) =>
useQuery({
enabled:
Boolean(dto.environmentSlug) &&
Boolean(dto.secretPath) &&
Boolean(dto.projectId) &&
Boolean(dto.secretKey),
queryKey: secretKeys.getSecretReferenceTree(dto),
queryFn: () => fetchSecretReferenceTree(dto)
});

View File

@@ -29,7 +29,7 @@ export type EncryptedSecret = {
tags: WsTag[];
};
// both personal and shared secret stitiched together for dashboard
// both personal and shared secret stitched together for dashboard
export type SecretV3RawSanitized = {
id: string;
version: number;
@@ -42,6 +42,7 @@ export type SecretV3RawSanitized = {
createdAt: string;
updatedAt: string;
env: string;
path?: string;
valueOverride?: string;
idOverride?: string;
overrideAction?: string;
@@ -57,6 +58,7 @@ export type SecretV3Raw = {
version: number;
type: string;
secretKey: string;
secretPath: string;
secretValue?: string;
secretComment?: string;
secretReminderNote?: string;
@@ -210,3 +212,18 @@ export type TMoveSecretsDTO = {
secretIds: string[];
shouldOverwrite: boolean;
};
export type TGetSecretReferenceTreeDTO = {
secretKey: string;
secretPath: string;
environmentSlug: string;
projectId: string;
};
export type TSecretReferenceTraceNode = {
key: string;
value?: string;
environment: string;
secretPath: string;
children: TSecretReferenceTraceNode[];
};

Some files were not shown because too many files have changed in this diff Show More