mirror of
https://github.com/Infisical/infisical.git
synced 2025-08-18 20:42:41 +00:00
Compare commits
160 Commits
fix/oauth-
...
secrets-mi
Author | SHA1 | Date | |
---|---|---|---|
|
43752e1888 | ||
|
bd72129d8c | ||
|
bf10b2f58a | ||
|
d24f5a57a8 | ||
|
166104e523 | ||
|
a7847f177c | ||
|
48e5f550e9 | ||
|
4a4a7fd325 | ||
|
91b8ed8015 | ||
|
6cf978b593 | ||
|
68fbb399fc | ||
|
97366f6e95 | ||
|
c83d4af7a3 | ||
|
c35c937c63 | ||
|
b10752acb5 | ||
|
eb9b75d930 | ||
|
273a7b9657 | ||
|
a3b6fa9a53 | ||
|
f60dd528e8 | ||
|
8ffef1da8e | ||
|
f352f98374 | ||
|
91a76f50ca | ||
|
ea4bb0a062 | ||
|
3d6be7b1b2 | ||
|
12558e8614 | ||
|
987f87e562 | ||
|
4d06d5cbb0 | ||
|
bad934de48 | ||
|
90b93fbd15 | ||
|
c2db2a0bc7 | ||
|
b0d24de008 | ||
|
0473fb0ddb | ||
|
4ccb5dc9b0 | ||
|
930425d5dc | ||
|
f77a53bd8e | ||
|
4bd61e5607 | ||
|
aa4dbfa073 | ||
|
b479406ba0 | ||
|
7cf9d933da | ||
|
ca2825ba95 | ||
|
b8fa4d5255 | ||
|
0d3cb2d41a | ||
|
e0d19d7b65 | ||
|
f5a0d8be78 | ||
|
c7ae7be493 | ||
|
18881749fd | ||
|
fa54c406dc | ||
|
1a2eef3ba6 | ||
|
0c562150f5 | ||
|
6fde132804 | ||
|
799721782a | ||
|
86d430f911 | ||
|
7c28ee844e | ||
|
d5390fcafc | ||
|
1b40f5d475 | ||
|
3cec1b4021 | ||
|
97b2c534a7 | ||
|
d71362ccc3 | ||
|
e4d90eb055 | ||
|
55607a4886 | ||
|
385c75c543 | ||
|
f16dca45d9 | ||
|
118c28df54 | ||
|
249b2933da | ||
|
272336092d | ||
|
6f05a6d82c | ||
|
84ebdb8503 | ||
|
b464941fbc | ||
|
77e8d8a86d | ||
|
c61dd1ee6e | ||
|
9db8573e72 | ||
|
ce8653e908 | ||
|
fd4cdc2769 | ||
|
90a1cc9330 | ||
|
78bfd0922a | ||
|
458dcd31c1 | ||
|
372537f0b6 | ||
|
e173ff3828 | ||
|
2baadf60d1 | ||
|
e13fc93bac | ||
|
6b14fbcce2 | ||
|
86fbe5cc24 | ||
|
3f7862a345 | ||
|
9661458469 | ||
|
c7c1eb0f5f | ||
|
a1e48a1795 | ||
|
d14e80b771 | ||
|
0264d37d9b | ||
|
11a1604e14 | ||
|
f788dee398 | ||
|
88120ed45e | ||
|
d6a377416d | ||
|
dbbd58ffb7 | ||
|
5d2beb3604 | ||
|
ec65e0e29c | ||
|
b819848058 | ||
|
1b0ef540fe | ||
|
4496241002 | ||
|
52e32484ce | ||
|
8b497699d4 | ||
|
be73f62226 | ||
|
102620ff09 | ||
|
994ee88852 | ||
|
770e25b895 | ||
|
fcf3bdb440 | ||
|
89c11b5541 | ||
|
5f764904e2 | ||
|
1a75384dba | ||
|
50f434cd80 | ||
|
d879cfd90c | ||
|
ca1f5eaca3 | ||
|
364027a88a | ||
|
ca110d11b0 | ||
|
4e8f404f16 | ||
|
22abb78f48 | ||
|
24f11406e1 | ||
|
d5d67c82b2 | ||
|
35cfcf1f0f | ||
|
368e00ea71 | ||
|
2c8cfeb826 | ||
|
23237dd055 | ||
|
70d22f90ec | ||
|
e10aec3170 | ||
|
0b11dcd627 | ||
|
d88a473b47 | ||
|
4f52400887 | ||
|
34eb9f475a | ||
|
902a0b0c56 | ||
|
d1e8ae3c98 | ||
|
5c9243d691 | ||
|
35d1eabf49 | ||
|
b6902160ce | ||
|
fbfc51ee93 | ||
|
9e6294786f | ||
|
9d92ffce95 | ||
|
9193418f8b | ||
|
847c50d2d4 | ||
|
352ef050c3 | ||
|
b6b9fb6ef5 | ||
|
7e94791635 | ||
|
eedc5f533e | ||
|
676ebaf3c2 | ||
|
adb3185042 | ||
|
93445d96b3 | ||
|
6100086338 | ||
|
389e2e1fb7 | ||
|
88fcbcadd4 | ||
|
2d68f9aa16 | ||
|
e694293ebe | ||
|
ef6f5ecc4b | ||
|
56f5249925 | ||
|
df5b3fa8dc | ||
|
035ac0fe8d | ||
|
c12408eb81 | ||
|
13194296c6 | ||
|
1b32de5c5b | ||
|
522795871e | ||
|
5c63955fde | ||
|
d7f3892b73 | ||
|
33af2fb2b8 |
123
.github/workflows/build-docker-image-to-prod.yml
vendored
123
.github/workflows/build-docker-image-to-prod.yml
vendored
@@ -1,123 +0,0 @@
|
||||
name: Release production images (frontend, backend)
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "infisical/v*.*.*"
|
||||
- "!infisical/v*.*.*-postgres"
|
||||
|
||||
jobs:
|
||||
backend-image:
|
||||
name: Build backend image
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Extract version from tag
|
||||
id: extract_version
|
||||
run: echo "::set-output name=version::${GITHUB_REF_NAME#infisical/}"
|
||||
- name: ☁️ Checkout source
|
||||
uses: actions/checkout@v3
|
||||
- name: 📦 Install dependencies to test all dependencies
|
||||
run: npm ci --only-production
|
||||
working-directory: backend
|
||||
# - name: 🧪 Run tests
|
||||
# run: npm run test:ci
|
||||
# working-directory: backend
|
||||
- name: Save commit hashes for tag
|
||||
id: commit
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
- name: 🔧 Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: 🐋 Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Set up Depot CLI
|
||||
uses: depot/setup-action@v1
|
||||
- name: 📦 Build backend and export to Docker
|
||||
uses: depot/build-push-action@v1
|
||||
with:
|
||||
project: 64mmf0n610
|
||||
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
|
||||
load: true
|
||||
context: backend
|
||||
tags: infisical/infisical:test
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: ⏻ Spawn backend container and dependencies
|
||||
run: |
|
||||
docker compose -f .github/resources/docker-compose.be-test.yml up --wait --quiet-pull
|
||||
- name: 🧪 Test backend image
|
||||
run: |
|
||||
./.github/resources/healthcheck.sh infisical-backend-test
|
||||
- name: ⏻ Shut down backend container and dependencies
|
||||
run: |
|
||||
docker compose -f .github/resources/docker-compose.be-test.yml down
|
||||
- name: 🏗️ Build backend and push
|
||||
uses: depot/build-push-action@v1
|
||||
with:
|
||||
project: 64mmf0n610
|
||||
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
|
||||
push: true
|
||||
context: backend
|
||||
tags: |
|
||||
infisical/backend:${{ steps.commit.outputs.short }}
|
||||
infisical/backend:latest
|
||||
infisical/backend:${{ steps.extract_version.outputs.version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
frontend-image:
|
||||
name: Build frontend image
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Extract version from tag
|
||||
id: extract_version
|
||||
run: echo "::set-output name=version::${GITHUB_REF_NAME#infisical/}"
|
||||
- name: ☁️ Checkout source
|
||||
uses: actions/checkout@v3
|
||||
- name: Save commit hashes for tag
|
||||
id: commit
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
- name: 🔧 Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: 🐋 Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Set up Depot CLI
|
||||
uses: depot/setup-action@v1
|
||||
- name: 📦 Build frontend and export to Docker
|
||||
uses: depot/build-push-action@v1
|
||||
with:
|
||||
load: true
|
||||
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
|
||||
project: 64mmf0n610
|
||||
context: frontend
|
||||
tags: infisical/frontend:test
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
POSTHOG_API_KEY=${{ secrets.PUBLIC_POSTHOG_API_KEY }}
|
||||
NEXT_INFISICAL_PLATFORM_VERSION=${{ steps.extract_version.outputs.version }}
|
||||
- name: ⏻ Spawn frontend container
|
||||
run: |
|
||||
docker run -d --rm --name infisical-frontend-test infisical/frontend:test
|
||||
- name: 🧪 Test frontend image
|
||||
run: |
|
||||
./.github/resources/healthcheck.sh infisical-frontend-test
|
||||
- name: ⏻ Shut down frontend container
|
||||
run: |
|
||||
docker stop infisical-frontend-test
|
||||
- name: 🏗️ Build frontend and push
|
||||
uses: depot/build-push-action@v1
|
||||
with:
|
||||
project: 64mmf0n610
|
||||
push: true
|
||||
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
|
||||
context: frontend
|
||||
tags: |
|
||||
infisical/frontend:${{ steps.commit.outputs.short }}
|
||||
infisical/frontend:latest
|
||||
infisical/frontend:${{ steps.extract_version.outputs.version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: |
|
||||
POSTHOG_API_KEY=${{ secrets.PUBLIC_POSTHOG_API_KEY }}
|
||||
NEXT_INFISICAL_PLATFORM_VERSION=${{ steps.extract_version.outputs.version }}
|
82
.github/workflows/nightly-tag-generation.yml
vendored
Normal file
82
.github/workflows/nightly-tag-generation.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
name: Generate Nightly Tag
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *' # Run daily at midnight UTC
|
||||
workflow_dispatch: # Allow manual triggering for testing
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
create-nightly-tag:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for tags
|
||||
token: ${{ secrets.GO_RELEASER_GITHUB_TOKEN }}
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Generate nightly tag
|
||||
run: |
|
||||
# Get the latest infisical production tag
|
||||
LATEST_STABLE_TAG=$(git tag --list | grep "^v[0-9].*$" | grep -v "nightly" | sort -V | tail -n1)
|
||||
|
||||
if [ -z "$LATEST_STABLE_TAG" ]; then
|
||||
echo "No infisical production tags found, using v0.1.0"
|
||||
LATEST_STABLE_TAG="v0.1.0"
|
||||
fi
|
||||
|
||||
echo "Latest production tag: $LATEST_STABLE_TAG"
|
||||
|
||||
# Get current date in YYYYMMDD format
|
||||
DATE=$(date +%Y%m%d)
|
||||
|
||||
# Base nightly tag name
|
||||
BASE_TAG="${LATEST_STABLE_TAG}-nightly-${DATE}"
|
||||
|
||||
# Check if this exact tag already exists
|
||||
if git tag --list | grep -q "^${BASE_TAG}$"; then
|
||||
echo "Base tag ${BASE_TAG} already exists, finding next increment"
|
||||
|
||||
# Find existing tags for this date and get the highest increment
|
||||
EXISTING_TAGS=$(git tag --list | grep "^${BASE_TAG}" | grep -E '\.[0-9]+$' || true)
|
||||
|
||||
if [ -z "$EXISTING_TAGS" ]; then
|
||||
# No incremental tags exist, create .1
|
||||
NIGHTLY_TAG="${BASE_TAG}.1"
|
||||
else
|
||||
# Find the highest increment
|
||||
HIGHEST_INCREMENT=$(echo "$EXISTING_TAGS" | sed "s|^${BASE_TAG}\.||" | sort -n | tail -n1)
|
||||
NEXT_INCREMENT=$((HIGHEST_INCREMENT + 1))
|
||||
NIGHTLY_TAG="${BASE_TAG}.${NEXT_INCREMENT}"
|
||||
fi
|
||||
else
|
||||
# Base tag doesn't exist, use it
|
||||
NIGHTLY_TAG="$BASE_TAG"
|
||||
fi
|
||||
|
||||
echo "Generated nightly tag: $NIGHTLY_TAG"
|
||||
echo "NIGHTLY_TAG=$NIGHTLY_TAG" >> $GITHUB_ENV
|
||||
echo "LATEST_PRODUCTION_TAG=$LATEST_STABLE_TAG" >> $GITHUB_ENV
|
||||
|
||||
git tag "$NIGHTLY_TAG"
|
||||
git push origin "$NIGHTLY_TAG"
|
||||
echo "✅ Created and pushed nightly tag: $NIGHTLY_TAG"
|
||||
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: ${{ env.NIGHTLY_TAG }}
|
||||
name: ${{ env.NIGHTLY_TAG }}
|
||||
draft: false
|
||||
prerelease: true
|
||||
generate_release_notes: true
|
||||
make_latest: false
|
@@ -2,7 +2,9 @@ name: Release standalone docker image
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "infisical/v*.*.*-postgres"
|
||||
- "v*.*.*"
|
||||
- "v*.*.*-nightly-*"
|
||||
- "v*.*.*-nightly-*.*"
|
||||
|
||||
jobs:
|
||||
infisical-tests:
|
||||
@@ -17,7 +19,7 @@ jobs:
|
||||
steps:
|
||||
- name: Extract version from tag
|
||||
id: extract_version
|
||||
run: echo "::set-output name=version::${GITHUB_REF_NAME#infisical/}"
|
||||
run: echo "::set-output name=version::${GITHUB_REF_NAME}"
|
||||
- name: ☁️ Checkout source
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
@@ -53,7 +55,7 @@ jobs:
|
||||
push: true
|
||||
context: .
|
||||
tags: |
|
||||
infisical/infisical:latest-postgres
|
||||
infisical/infisical:latest
|
||||
infisical/infisical:${{ steps.commit.outputs.short }}
|
||||
infisical/infisical:${{ steps.extract_version.outputs.version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
@@ -69,7 +71,7 @@ jobs:
|
||||
steps:
|
||||
- name: Extract version from tag
|
||||
id: extract_version
|
||||
run: echo "::set-output name=version::${GITHUB_REF_NAME#infisical/}"
|
||||
run: echo "::set-output name=version::${GITHUB_REF_NAME}"
|
||||
- name: ☁️ Checkout source
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
@@ -105,7 +107,7 @@ jobs:
|
||||
push: true
|
||||
context: .
|
||||
tags: |
|
||||
infisical/infisical-fips:latest-postgres
|
||||
infisical/infisical-fips:latest
|
||||
infisical/infisical-fips:${{ steps.commit.outputs.short }}
|
||||
infisical/infisical-fips:${{ steps.extract_version.outputs.version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
@@ -44,10 +44,7 @@ jobs:
|
||||
|
||||
- name: Generate Helm Chart
|
||||
working-directory: k8-operator
|
||||
run: make helm
|
||||
|
||||
- name: Update Helm Chart Version
|
||||
run: ./k8-operator/scripts/update-version.sh ${{ steps.extract_version.outputs.version }}
|
||||
run: make helm VERSION=${{ steps.extract_version.outputs.version }}
|
||||
|
||||
- name: Debug - Check file changes
|
||||
run: |
|
||||
|
15
.github/workflows/run-backend-tests.yml
vendored
15
.github/workflows/run-backend-tests.yml
vendored
@@ -16,6 +16,16 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
|
||||
|
||||
- name: Free up disk space
|
||||
run: |
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /opt/ghc
|
||||
sudo rm -rf "/usr/local/share/boost"
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
docker system prune -af
|
||||
|
||||
- name: ☁️ Checkout source
|
||||
uses: actions/checkout@v3
|
||||
- uses: KengoTODA/actions-setup-docker-compose@v1
|
||||
@@ -34,6 +44,8 @@ jobs:
|
||||
working-directory: backend
|
||||
- name: Start postgres and redis
|
||||
run: touch .env && docker compose -f docker-compose.dev.yml up -d db redis
|
||||
- name: Start Secret Rotation testing databases
|
||||
run: docker compose -f docker-compose.e2e-dbs.yml up -d --wait --wait-timeout 300
|
||||
- name: Run unit test
|
||||
run: npm run test:unit
|
||||
working-directory: backend
|
||||
@@ -41,6 +53,9 @@ jobs:
|
||||
run: npm run test:e2e
|
||||
working-directory: backend
|
||||
env:
|
||||
E2E_TEST_ORACLE_DB_19_HOST: ${{ secrets.E2E_TEST_ORACLE_DB_19_HOST }}
|
||||
E2E_TEST_ORACLE_DB_19_USERNAME: ${{ secrets.E2E_TEST_ORACLE_DB_19_USERNAME }}
|
||||
E2E_TEST_ORACLE_DB_19_PASSWORD: ${{ secrets.E2E_TEST_ORACLE_DB_19_PASSWORD }}
|
||||
REDIS_URL: redis://172.17.0.1:6379
|
||||
DB_CONNECTION_URI: postgres://infisical:infisical@172.17.0.1:5432/infisical?sslmode=disable
|
||||
AUTH_SECRET: something-random
|
||||
|
@@ -50,3 +50,4 @@ docs/integrations/app-connections/zabbix.mdx:generic-api-key:91
|
||||
docs/integrations/app-connections/bitbucket.mdx:generic-api-key:123
|
||||
docs/integrations/app-connections/railway.mdx:generic-api-key:156
|
||||
.github/workflows/validate-db-schemas.yml:generic-api-key:21
|
||||
k8-operator/config/samples/universalAuthIdentitySecret.yaml:generic-api-key:8
|
||||
|
@@ -1,34 +0,0 @@
|
||||
import { TQueueServiceFactory } from "@app/queue";
|
||||
|
||||
export const mockQueue = (): TQueueServiceFactory => {
|
||||
const queues: Record<string, unknown> = {};
|
||||
const workers: Record<string, unknown> = {};
|
||||
const job: Record<string, unknown> = {};
|
||||
const events: Record<string, unknown> = {};
|
||||
|
||||
return {
|
||||
queue: async (name, jobData) => {
|
||||
job[name] = jobData;
|
||||
},
|
||||
queuePg: async () => {},
|
||||
schedulePg: async () => {},
|
||||
initialize: async () => {},
|
||||
shutdown: async () => undefined,
|
||||
stopRepeatableJob: async () => true,
|
||||
start: (name, jobFn) => {
|
||||
queues[name] = jobFn;
|
||||
workers[name] = jobFn;
|
||||
},
|
||||
startPg: async () => {},
|
||||
listen: (name, event) => {
|
||||
events[name] = event;
|
||||
},
|
||||
getRepeatableJobs: async () => [],
|
||||
getDelayedJobs: async () => [],
|
||||
clearQueue: async () => {},
|
||||
stopJobById: async () => {},
|
||||
stopJobByIdPg: async () => {},
|
||||
stopRepeatableJobByJobId: async () => true,
|
||||
stopRepeatableJobByKey: async () => true
|
||||
};
|
||||
};
|
726
backend/e2e-test/routes/v3/secret-rotations.spec.ts
Normal file
726
backend/e2e-test/routes/v3/secret-rotations.spec.ts
Normal file
@@ -0,0 +1,726 @@
|
||||
/* eslint-disable no-promise-executor-return */
|
||||
/* eslint-disable no-await-in-loop */
|
||||
import knex from "knex";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
|
||||
import { seedData1 } from "@app/db/seed-data";
|
||||
|
||||
enum SecretRotationType {
|
||||
OracleDb = "oracledb",
|
||||
MySQL = "mysql",
|
||||
Postgres = "postgres"
|
||||
}
|
||||
|
||||
type TGenericSqlCredentials = {
|
||||
host: string;
|
||||
port: number;
|
||||
username: string;
|
||||
password: string;
|
||||
database: string;
|
||||
};
|
||||
|
||||
type TSecretMapping = {
|
||||
username: string;
|
||||
password: string;
|
||||
};
|
||||
|
||||
type TDatabaseUserCredentials = {
|
||||
username: string;
|
||||
};
|
||||
|
||||
const formatSqlUsername = (username: string) => `${username}_${uuidv4().slice(0, 8).replace(/-/g, "").toUpperCase()}`;
|
||||
|
||||
const getSecretValue = async (secretKey: string) => {
|
||||
const passwordSecret = await testServer.inject({
|
||||
url: `/api/v3/secrets/raw/${secretKey}`,
|
||||
method: "GET",
|
||||
query: {
|
||||
workspaceId: seedData1.projectV3.id,
|
||||
environment: seedData1.environment.slug
|
||||
},
|
||||
headers: {
|
||||
authorization: `Bearer ${jwtAuthToken}`
|
||||
}
|
||||
});
|
||||
|
||||
expect(passwordSecret.statusCode).toBe(200);
|
||||
expect(passwordSecret.json().secret).toBeDefined();
|
||||
|
||||
const passwordSecretJson = JSON.parse(passwordSecret.payload);
|
||||
|
||||
return passwordSecretJson.secret.secretValue as string;
|
||||
};
|
||||
|
||||
const deleteSecretRotation = async (id: string, type: SecretRotationType) => {
|
||||
const res = await testServer.inject({
|
||||
method: "DELETE",
|
||||
query: {
|
||||
deleteSecrets: "true",
|
||||
revokeGeneratedCredentials: "true"
|
||||
},
|
||||
url: `/api/v2/secret-rotations/${type}-credentials/${id}`,
|
||||
headers: {
|
||||
authorization: `Bearer ${jwtAuthToken}`
|
||||
}
|
||||
});
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
};
|
||||
|
||||
const deleteAppConnection = async (id: string, type: SecretRotationType) => {
|
||||
const res = await testServer.inject({
|
||||
method: "DELETE",
|
||||
url: `/api/v1/app-connections/${type}/${id}`,
|
||||
headers: {
|
||||
authorization: `Bearer ${jwtAuthToken}`
|
||||
}
|
||||
});
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
};
|
||||
|
||||
const createOracleDBAppConnection = async (credentials: TGenericSqlCredentials) => {
|
||||
const createOracleDBAppConnectionReqBody = {
|
||||
credentials: {
|
||||
database: credentials.database,
|
||||
host: credentials.host,
|
||||
username: credentials.username,
|
||||
password: credentials.password,
|
||||
port: credentials.port,
|
||||
sslEnabled: true,
|
||||
sslRejectUnauthorized: true
|
||||
},
|
||||
name: `oracle-db-${uuidv4()}`,
|
||||
description: "Test OracleDB App Connection",
|
||||
gatewayId: null,
|
||||
isPlatformManagedCredentials: false,
|
||||
method: "username-and-password"
|
||||
};
|
||||
|
||||
const res = await testServer.inject({
|
||||
method: "POST",
|
||||
url: `/api/v1/app-connections/oracledb`,
|
||||
headers: {
|
||||
authorization: `Bearer ${jwtAuthToken}`
|
||||
},
|
||||
body: createOracleDBAppConnectionReqBody
|
||||
});
|
||||
|
||||
const json = JSON.parse(res.payload);
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(json.appConnection).toBeDefined();
|
||||
|
||||
return json.appConnection.id as string;
|
||||
};
|
||||
|
||||
const createMySQLAppConnection = async (credentials: TGenericSqlCredentials) => {
|
||||
const createMySQLAppConnectionReqBody = {
|
||||
name: `mysql-test-${uuidv4()}`,
|
||||
description: "test-mysql",
|
||||
gatewayId: null,
|
||||
method: "username-and-password",
|
||||
credentials: {
|
||||
host: credentials.host,
|
||||
port: credentials.port,
|
||||
database: credentials.database,
|
||||
username: credentials.username,
|
||||
password: credentials.password,
|
||||
sslEnabled: false,
|
||||
sslRejectUnauthorized: true
|
||||
}
|
||||
};
|
||||
|
||||
const res = await testServer.inject({
|
||||
method: "POST",
|
||||
url: `/api/v1/app-connections/mysql`,
|
||||
headers: {
|
||||
authorization: `Bearer ${jwtAuthToken}`
|
||||
},
|
||||
body: createMySQLAppConnectionReqBody
|
||||
});
|
||||
|
||||
const json = JSON.parse(res.payload);
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(json.appConnection).toBeDefined();
|
||||
|
||||
return json.appConnection.id as string;
|
||||
};
|
||||
|
||||
const createPostgresAppConnection = async (credentials: TGenericSqlCredentials) => {
|
||||
const createPostgresAppConnectionReqBody = {
|
||||
credentials: {
|
||||
host: credentials.host,
|
||||
port: credentials.port,
|
||||
database: credentials.database,
|
||||
username: credentials.username,
|
||||
password: credentials.password,
|
||||
sslEnabled: false,
|
||||
sslRejectUnauthorized: true
|
||||
},
|
||||
name: `postgres-test-${uuidv4()}`,
|
||||
description: "test-postgres",
|
||||
gatewayId: null,
|
||||
method: "username-and-password"
|
||||
};
|
||||
|
||||
const res = await testServer.inject({
|
||||
method: "POST",
|
||||
url: `/api/v1/app-connections/postgres`,
|
||||
headers: {
|
||||
authorization: `Bearer ${jwtAuthToken}`
|
||||
},
|
||||
body: createPostgresAppConnectionReqBody
|
||||
});
|
||||
|
||||
const json = JSON.parse(res.payload);
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(json.appConnection).toBeDefined();
|
||||
|
||||
return json.appConnection.id as string;
|
||||
};
|
||||
|
||||
const createOracleInfisicalUsers = async (
|
||||
credentials: TGenericSqlCredentials,
|
||||
userCredentials: TDatabaseUserCredentials[]
|
||||
) => {
|
||||
const client = knex({
|
||||
client: "oracledb",
|
||||
connection: {
|
||||
database: credentials.database,
|
||||
port: credentials.port,
|
||||
host: credentials.host,
|
||||
user: credentials.username,
|
||||
password: credentials.password,
|
||||
connectionTimeoutMillis: 10000,
|
||||
ssl: {
|
||||
// @ts-expect-error - this is a valid property for the ssl object
|
||||
sslServerDNMatch: true
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for await (const { username } of userCredentials) {
|
||||
// check if user exists, and if it does, don't create it
|
||||
const existingUser = await client.raw(`SELECT * FROM all_users WHERE username = '${username}'`);
|
||||
|
||||
if (!existingUser.length) {
|
||||
await client.raw(`CREATE USER ${username} IDENTIFIED BY "temporary_password"`);
|
||||
}
|
||||
await client.raw(`GRANT ALL PRIVILEGES TO ${username} WITH ADMIN OPTION`);
|
||||
}
|
||||
|
||||
await client.destroy();
|
||||
};
|
||||
|
||||
const createMySQLInfisicalUsers = async (
|
||||
credentials: TGenericSqlCredentials,
|
||||
userCredentials: TDatabaseUserCredentials[]
|
||||
) => {
|
||||
const client = knex({
|
||||
client: "mysql2",
|
||||
connection: {
|
||||
database: credentials.database,
|
||||
port: credentials.port,
|
||||
host: credentials.host,
|
||||
user: credentials.username,
|
||||
password: credentials.password,
|
||||
connectionTimeoutMillis: 10000
|
||||
}
|
||||
});
|
||||
|
||||
// Fix: Ensure root has GRANT OPTION privileges
|
||||
try {
|
||||
await client.raw("GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;");
|
||||
await client.raw("FLUSH PRIVILEGES;");
|
||||
} catch (error) {
|
||||
// Ignore if already has privileges
|
||||
}
|
||||
|
||||
for await (const { username } of userCredentials) {
|
||||
// check if user exists, and if it does, dont create it
|
||||
|
||||
const existingUser = await client.raw(`SELECT * FROM mysql.user WHERE user = '${username}'`);
|
||||
|
||||
if (!existingUser[0].length) {
|
||||
await client.raw(`CREATE USER '${username}'@'%' IDENTIFIED BY 'temporary_password';`);
|
||||
}
|
||||
|
||||
await client.raw(`GRANT ALL PRIVILEGES ON \`${credentials.database}\`.* TO '${username}'@'%';`);
|
||||
await client.raw("FLUSH PRIVILEGES;");
|
||||
}
|
||||
|
||||
await client.destroy();
|
||||
};
|
||||
|
||||
const createPostgresInfisicalUsers = async (
|
||||
credentials: TGenericSqlCredentials,
|
||||
userCredentials: TDatabaseUserCredentials[]
|
||||
) => {
|
||||
const client = knex({
|
||||
client: "pg",
|
||||
connection: {
|
||||
database: credentials.database,
|
||||
port: credentials.port,
|
||||
host: credentials.host,
|
||||
user: credentials.username,
|
||||
password: credentials.password,
|
||||
connectionTimeoutMillis: 10000
|
||||
}
|
||||
});
|
||||
|
||||
for await (const { username } of userCredentials) {
|
||||
// check if user exists, and if it does, don't create it
|
||||
const existingUser = await client.raw("SELECT * FROM pg_catalog.pg_user WHERE usename = ?", [username]);
|
||||
|
||||
if (!existingUser.rows.length) {
|
||||
await client.raw(`CREATE USER "${username}" WITH PASSWORD 'temporary_password'`);
|
||||
}
|
||||
|
||||
await client.raw("GRANT ALL PRIVILEGES ON DATABASE ?? TO ??", [credentials.database, username]);
|
||||
}
|
||||
|
||||
await client.destroy();
|
||||
};
|
||||
|
||||
const createOracleDBSecretRotation = async (
|
||||
appConnectionId: string,
|
||||
credentials: TGenericSqlCredentials,
|
||||
userCredentials: TDatabaseUserCredentials[],
|
||||
secretMapping: TSecretMapping
|
||||
) => {
|
||||
const now = new Date();
|
||||
const rotationTime = new Date(now.getTime() - 2 * 60 * 1000); // 2 minutes ago
|
||||
|
||||
await createOracleInfisicalUsers(credentials, userCredentials);
|
||||
|
||||
const createOracleDBSecretRotationReqBody = {
|
||||
parameters: userCredentials.reduce(
|
||||
(acc, user, index) => {
|
||||
acc[`username${index + 1}`] = user.username;
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, string>
|
||||
),
|
||||
secretsMapping: {
|
||||
username: secretMapping.username,
|
||||
password: secretMapping.password
|
||||
},
|
||||
name: `test-oracle-${uuidv4()}`,
|
||||
description: "Test OracleDB Secret Rotation",
|
||||
secretPath: "/",
|
||||
isAutoRotationEnabled: true,
|
||||
rotationInterval: 5, // 5 seconds for testing
|
||||
rotateAtUtc: {
|
||||
hours: rotationTime.getUTCHours(),
|
||||
minutes: rotationTime.getUTCMinutes()
|
||||
},
|
||||
connectionId: appConnectionId,
|
||||
environment: seedData1.environment.slug,
|
||||
projectId: seedData1.projectV3.id
|
||||
};
|
||||
|
||||
const res = await testServer.inject({
|
||||
method: "POST",
|
||||
url: `/api/v2/secret-rotations/oracledb-credentials`,
|
||||
headers: {
|
||||
authorization: `Bearer ${jwtAuthToken}`
|
||||
},
|
||||
body: createOracleDBSecretRotationReqBody
|
||||
});
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(res.json().secretRotation).toBeDefined();
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
const createMySQLSecretRotation = async (
|
||||
appConnectionId: string,
|
||||
credentials: TGenericSqlCredentials,
|
||||
userCredentials: TDatabaseUserCredentials[],
|
||||
secretMapping: TSecretMapping
|
||||
) => {
|
||||
const now = new Date();
|
||||
const rotationTime = new Date(now.getTime() - 2 * 60 * 1000); // 2 minutes ago
|
||||
|
||||
await createMySQLInfisicalUsers(credentials, userCredentials);
|
||||
|
||||
const createMySQLSecretRotationReqBody = {
|
||||
parameters: userCredentials.reduce(
|
||||
(acc, user, index) => {
|
||||
acc[`username${index + 1}`] = user.username;
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, string>
|
||||
),
|
||||
secretsMapping: {
|
||||
username: secretMapping.username,
|
||||
password: secretMapping.password
|
||||
},
|
||||
name: `test-mysql-rotation-${uuidv4()}`,
|
||||
description: "Test MySQL Secret Rotation",
|
||||
secretPath: "/",
|
||||
isAutoRotationEnabled: true,
|
||||
rotationInterval: 5,
|
||||
rotateAtUtc: {
|
||||
hours: rotationTime.getUTCHours(),
|
||||
minutes: rotationTime.getUTCMinutes()
|
||||
},
|
||||
connectionId: appConnectionId,
|
||||
environment: seedData1.environment.slug,
|
||||
projectId: seedData1.projectV3.id
|
||||
};
|
||||
|
||||
const res = await testServer.inject({
|
||||
method: "POST",
|
||||
url: `/api/v2/secret-rotations/mysql-credentials`,
|
||||
headers: {
|
||||
authorization: `Bearer ${jwtAuthToken}`
|
||||
},
|
||||
body: createMySQLSecretRotationReqBody
|
||||
});
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(res.json().secretRotation).toBeDefined();
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
const createPostgresSecretRotation = async (
|
||||
appConnectionId: string,
|
||||
credentials: TGenericSqlCredentials,
|
||||
userCredentials: TDatabaseUserCredentials[],
|
||||
secretMapping: TSecretMapping
|
||||
) => {
|
||||
const now = new Date();
|
||||
const rotationTime = new Date(now.getTime() - 2 * 60 * 1000); // 2 minutes ago
|
||||
|
||||
await createPostgresInfisicalUsers(credentials, userCredentials);
|
||||
|
||||
const createPostgresSecretRotationReqBody = {
|
||||
parameters: userCredentials.reduce(
|
||||
(acc, user, index) => {
|
||||
acc[`username${index + 1}`] = user.username;
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, string>
|
||||
),
|
||||
secretsMapping: {
|
||||
username: secretMapping.username,
|
||||
password: secretMapping.password
|
||||
},
|
||||
name: `test-postgres-rotation-${uuidv4()}`,
|
||||
description: "Test Postgres Secret Rotation",
|
||||
secretPath: "/",
|
||||
isAutoRotationEnabled: true,
|
||||
rotationInterval: 5,
|
||||
rotateAtUtc: {
|
||||
hours: rotationTime.getUTCHours(),
|
||||
minutes: rotationTime.getUTCMinutes()
|
||||
},
|
||||
connectionId: appConnectionId,
|
||||
environment: seedData1.environment.slug,
|
||||
projectId: seedData1.projectV3.id
|
||||
};
|
||||
|
||||
const res = await testServer.inject({
|
||||
method: "POST",
|
||||
url: `/api/v2/secret-rotations/postgres-credentials`,
|
||||
headers: {
|
||||
authorization: `Bearer ${jwtAuthToken}`
|
||||
},
|
||||
body: createPostgresSecretRotationReqBody
|
||||
});
|
||||
|
||||
expect(res.statusCode).toBe(200);
|
||||
expect(res.json().secretRotation).toBeDefined();
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
describe("Secret Rotations", async () => {
|
||||
const testCases = [
|
||||
{
|
||||
type: SecretRotationType.MySQL,
|
||||
name: "MySQL (8.4.6) Secret Rotation",
|
||||
dbCredentials: {
|
||||
database: "mysql-test",
|
||||
host: "127.0.0.1",
|
||||
username: "root",
|
||||
password: "mysql-test",
|
||||
port: 3306
|
||||
},
|
||||
secretMapping: {
|
||||
username: formatSqlUsername("MYSQL_USERNAME"),
|
||||
password: formatSqlUsername("MYSQL_PASSWORD")
|
||||
},
|
||||
userCredentials: [
|
||||
{
|
||||
username: formatSqlUsername("MYSQL_USER_1")
|
||||
},
|
||||
{
|
||||
username: formatSqlUsername("MYSQL_USER_2")
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
type: SecretRotationType.MySQL,
|
||||
name: "MySQL (8.0.29) Secret Rotation",
|
||||
dbCredentials: {
|
||||
database: "mysql-test",
|
||||
host: "127.0.0.1",
|
||||
username: "root",
|
||||
password: "mysql-test",
|
||||
port: 3307
|
||||
},
|
||||
secretMapping: {
|
||||
username: formatSqlUsername("MYSQL_USERNAME"),
|
||||
password: formatSqlUsername("MYSQL_PASSWORD")
|
||||
},
|
||||
userCredentials: [
|
||||
{
|
||||
username: formatSqlUsername("MYSQL_USER_1")
|
||||
},
|
||||
{
|
||||
username: formatSqlUsername("MYSQL_USER_2")
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
type: SecretRotationType.MySQL,
|
||||
name: "MySQL (5.7.31) Secret Rotation",
|
||||
dbCredentials: {
|
||||
database: "mysql-test",
|
||||
host: "127.0.0.1",
|
||||
username: "root",
|
||||
password: "mysql-test",
|
||||
port: 3308
|
||||
},
|
||||
secretMapping: {
|
||||
username: formatSqlUsername("MYSQL_USERNAME"),
|
||||
password: formatSqlUsername("MYSQL_PASSWORD")
|
||||
},
|
||||
userCredentials: [
|
||||
{
|
||||
username: formatSqlUsername("MYSQL_USER_1")
|
||||
},
|
||||
{
|
||||
username: formatSqlUsername("MYSQL_USER_2")
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
type: SecretRotationType.OracleDb,
|
||||
name: "OracleDB (23.8) Secret Rotation",
|
||||
dbCredentials: {
|
||||
database: "FREEPDB1",
|
||||
host: "127.0.0.1",
|
||||
username: "system",
|
||||
password: "pdb-password",
|
||||
port: 1521
|
||||
},
|
||||
secretMapping: {
|
||||
username: formatSqlUsername("ORACLEDB_USERNAME"),
|
||||
password: formatSqlUsername("ORACLEDB_PASSWORD")
|
||||
},
|
||||
userCredentials: [
|
||||
{
|
||||
username: formatSqlUsername("INFISICAL_USER_1")
|
||||
},
|
||||
{
|
||||
username: formatSqlUsername("INFISICAL_USER_2")
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
type: SecretRotationType.OracleDb,
|
||||
name: "OracleDB (19.3) Secret Rotation",
|
||||
skippable: true,
|
||||
dbCredentials: {
|
||||
password: process.env.E2E_TEST_ORACLE_DB_19_PASSWORD!,
|
||||
host: process.env.E2E_TEST_ORACLE_DB_19_HOST!,
|
||||
username: process.env.E2E_TEST_ORACLE_DB_19_USERNAME!,
|
||||
port: 1521,
|
||||
database: "ORCLPDB1"
|
||||
},
|
||||
secretMapping: {
|
||||
username: formatSqlUsername("ORACLEDB_USERNAME"),
|
||||
password: formatSqlUsername("ORACLEDB_PASSWORD")
|
||||
},
|
||||
userCredentials: [
|
||||
{
|
||||
username: formatSqlUsername("INFISICAL_USER_1")
|
||||
},
|
||||
{
|
||||
username: formatSqlUsername("INFISICAL_USER_2")
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
type: SecretRotationType.Postgres,
|
||||
name: "Postgres (17) Secret Rotation",
|
||||
dbCredentials: {
|
||||
database: "postgres-test",
|
||||
host: "127.0.0.1",
|
||||
username: "postgres-test",
|
||||
password: "postgres-test",
|
||||
port: 5433
|
||||
},
|
||||
secretMapping: {
|
||||
username: formatSqlUsername("POSTGRES_USERNAME"),
|
||||
password: formatSqlUsername("POSTGRES_PASSWORD")
|
||||
},
|
||||
userCredentials: [
|
||||
{
|
||||
username: formatSqlUsername("INFISICAL_USER_1")
|
||||
},
|
||||
{
|
||||
username: formatSqlUsername("INFISICAL_USER_2")
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
type: SecretRotationType.Postgres,
|
||||
name: "Postgres (16) Secret Rotation",
|
||||
dbCredentials: {
|
||||
database: "postgres-test",
|
||||
host: "127.0.0.1",
|
||||
username: "postgres-test",
|
||||
password: "postgres-test",
|
||||
port: 5434
|
||||
},
|
||||
secretMapping: {
|
||||
username: formatSqlUsername("POSTGRES_USERNAME"),
|
||||
password: formatSqlUsername("POSTGRES_PASSWORD")
|
||||
},
|
||||
userCredentials: [
|
||||
{
|
||||
username: formatSqlUsername("INFISICAL_USER_1")
|
||||
},
|
||||
{
|
||||
username: formatSqlUsername("INFISICAL_USER_2")
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
type: SecretRotationType.Postgres,
|
||||
name: "Postgres (10.12) Secret Rotation",
|
||||
dbCredentials: {
|
||||
database: "postgres-test",
|
||||
host: "127.0.0.1",
|
||||
username: "postgres-test",
|
||||
password: "postgres-test",
|
||||
port: 5435
|
||||
},
|
||||
secretMapping: {
|
||||
username: formatSqlUsername("POSTGRES_USERNAME"),
|
||||
password: formatSqlUsername("POSTGRES_PASSWORD")
|
||||
},
|
||||
userCredentials: [
|
||||
{
|
||||
username: formatSqlUsername("INFISICAL_USER_1")
|
||||
},
|
||||
{
|
||||
username: formatSqlUsername("INFISICAL_USER_2")
|
||||
}
|
||||
]
|
||||
}
|
||||
] as {
|
||||
skippable?: boolean;
|
||||
type: SecretRotationType;
|
||||
name: string;
|
||||
dbCredentials: TGenericSqlCredentials;
|
||||
secretMapping: TSecretMapping;
|
||||
userCredentials: TDatabaseUserCredentials[];
|
||||
}[];
|
||||
|
||||
const createAppConnectionMap = {
|
||||
[SecretRotationType.OracleDb]: createOracleDBAppConnection,
|
||||
[SecretRotationType.MySQL]: createMySQLAppConnection,
|
||||
[SecretRotationType.Postgres]: createPostgresAppConnection
|
||||
};
|
||||
|
||||
const createRotationMap = {
|
||||
[SecretRotationType.OracleDb]: createOracleDBSecretRotation,
|
||||
[SecretRotationType.MySQL]: createMySQLSecretRotation,
|
||||
[SecretRotationType.Postgres]: createPostgresSecretRotation
|
||||
};
|
||||
|
||||
const appConnectionIds: { id: string; type: SecretRotationType }[] = [];
|
||||
const secretRotationIds: { id: string; type: SecretRotationType }[] = [];
|
||||
|
||||
afterAll(async () => {
|
||||
for (const { id, type } of secretRotationIds) {
|
||||
await deleteSecretRotation(id, type);
|
||||
}
|
||||
|
||||
for (const { id, type } of appConnectionIds) {
|
||||
await deleteAppConnection(id, type);
|
||||
}
|
||||
});
|
||||
|
||||
testCases.forEach(({ skippable, dbCredentials, secretMapping, userCredentials, type, name }) => {
|
||||
const shouldSkip = () => {
|
||||
if (skippable) {
|
||||
if (type === SecretRotationType.OracleDb) {
|
||||
if (!process.env.E2E_TEST_ORACLE_DB_19_HOST) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
if (shouldSkip()) {
|
||||
test.skip(`Skipping Secret Rotation for ${type} (${name}) because E2E_TEST_ORACLE_DB_19_HOST is not set`);
|
||||
} else {
|
||||
test.concurrent(
|
||||
`Create secret rotation for ${name}`,
|
||||
async () => {
|
||||
const appConnectionId = await createAppConnectionMap[type](dbCredentials);
|
||||
|
||||
if (appConnectionId) {
|
||||
appConnectionIds.push({ id: appConnectionId, type });
|
||||
}
|
||||
|
||||
const res = await createRotationMap[type](appConnectionId, dbCredentials, userCredentials, secretMapping);
|
||||
|
||||
const resJson = JSON.parse(res.payload);
|
||||
|
||||
if (resJson.secretRotation) {
|
||||
secretRotationIds.push({ id: resJson.secretRotation.id, type });
|
||||
}
|
||||
|
||||
const startSecretValue = await getSecretValue(secretMapping.password);
|
||||
expect(startSecretValue).toBeDefined();
|
||||
|
||||
let attempts = 0;
|
||||
while (attempts < 60) {
|
||||
const currentSecretValue = await getSecretValue(secretMapping.password);
|
||||
|
||||
if (currentSecretValue !== startSecretValue) {
|
||||
break;
|
||||
}
|
||||
|
||||
attempts += 1;
|
||||
await new Promise((resolve) => setTimeout(resolve, 2_500));
|
||||
}
|
||||
|
||||
if (attempts >= 60) {
|
||||
throw new Error("Secret rotation failed to rotate after 60 attempts");
|
||||
}
|
||||
|
||||
const finalSecretValue = await getSecretValue(secretMapping.password);
|
||||
expect(finalSecretValue).not.toBe(startSecretValue);
|
||||
},
|
||||
{
|
||||
timeout: 300_000
|
||||
}
|
||||
);
|
||||
}
|
||||
});
|
||||
});
|
@@ -18,6 +18,7 @@ import { keyStoreFactory } from "@app/keystore/keystore";
|
||||
import { initializeHsmModule } from "@app/ee/services/hsm/hsm-fns";
|
||||
import { buildRedisFromConfig } from "@app/lib/config/redis";
|
||||
import { superAdminDALFactory } from "@app/services/super-admin/super-admin-dal";
|
||||
import { bootstrapCheck } from "@app/server/boot-strap-check";
|
||||
|
||||
dotenv.config({ path: path.join(__dirname, "../../.env.test"), debug: true });
|
||||
export default {
|
||||
@@ -63,6 +64,8 @@ export default {
|
||||
const queue = queueServiceFactory(envCfg, { dbConnectionUrl: envCfg.DB_CONNECTION_URI });
|
||||
const keyStore = keyStoreFactory(envCfg);
|
||||
|
||||
await queue.initialize();
|
||||
|
||||
const hsmModule = initializeHsmModule(envCfg);
|
||||
hsmModule.initialize();
|
||||
|
||||
@@ -78,9 +81,13 @@ export default {
|
||||
envConfig: envCfg
|
||||
});
|
||||
|
||||
await bootstrapCheck({ db });
|
||||
|
||||
// @ts-expect-error type
|
||||
globalThis.testServer = server;
|
||||
// @ts-expect-error type
|
||||
globalThis.testQueue = queue;
|
||||
// @ts-expect-error type
|
||||
globalThis.testSuperAdminDAL = superAdminDAL;
|
||||
// @ts-expect-error type
|
||||
globalThis.jwtAuthToken = crypto.jwt().sign(
|
||||
@@ -105,6 +112,8 @@ export default {
|
||||
// custom setup
|
||||
return {
|
||||
async teardown() {
|
||||
// @ts-expect-error type
|
||||
await globalThis.testQueue.shutdown();
|
||||
// @ts-expect-error type
|
||||
await globalThis.testServer.close();
|
||||
// @ts-expect-error type
|
||||
@@ -112,7 +121,9 @@ export default {
|
||||
// @ts-expect-error type
|
||||
delete globalThis.testSuperAdminDAL;
|
||||
// @ts-expect-error type
|
||||
delete globalThis.jwtToken;
|
||||
delete globalThis.jwtAuthToken;
|
||||
// @ts-expect-error type
|
||||
delete globalThis.testQueue;
|
||||
// called after all tests with this env have been run
|
||||
await db.migrate.rollback(
|
||||
{
|
||||
|
@@ -2,7 +2,7 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { chunkArray } from "@app/lib/fn";
|
||||
import { logger } from "@app/lib/logger";
|
||||
import { initLogger, logger } from "@app/lib/logger";
|
||||
|
||||
import { TableName } from "../schemas";
|
||||
import { TReminders, TRemindersInsert } from "../schemas/reminders";
|
||||
@@ -107,5 +107,6 @@ export async function up(knex: Knex): Promise<void> {
|
||||
}
|
||||
|
||||
export async function down(): Promise<void> {
|
||||
initLogger();
|
||||
logger.info("Rollback not implemented for secret reminders fix migration");
|
||||
}
|
||||
|
@@ -0,0 +1,19 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "../schemas/models";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
if (!(await knex.schema.hasColumn(TableName.AccessApprovalPolicy, "maxTimePeriod"))) {
|
||||
await knex.schema.alterTable(TableName.AccessApprovalPolicy, (t) => {
|
||||
t.string("maxTimePeriod").nullable(); // Ex: 1h - Null is permanent
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
if (await knex.schema.hasColumn(TableName.AccessApprovalPolicy, "maxTimePeriod")) {
|
||||
await knex.schema.alterTable(TableName.AccessApprovalPolicy, (t) => {
|
||||
t.dropColumn("maxTimePeriod");
|
||||
});
|
||||
}
|
||||
}
|
@@ -0,0 +1,38 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "@app/db/schemas";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
const hasEditNoteCol = await knex.schema.hasColumn(TableName.AccessApprovalRequest, "editNote");
|
||||
const hasEditedByUserId = await knex.schema.hasColumn(TableName.AccessApprovalRequest, "editedByUserId");
|
||||
|
||||
if (!hasEditNoteCol || !hasEditedByUserId) {
|
||||
await knex.schema.alterTable(TableName.AccessApprovalRequest, (t) => {
|
||||
if (!hasEditedByUserId) {
|
||||
t.uuid("editedByUserId").nullable();
|
||||
t.foreign("editedByUserId").references("id").inTable(TableName.Users).onDelete("SET NULL");
|
||||
}
|
||||
|
||||
if (!hasEditNoteCol) {
|
||||
t.string("editNote").nullable();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
const hasEditNoteCol = await knex.schema.hasColumn(TableName.AccessApprovalRequest, "editNote");
|
||||
const hasEditedByUserId = await knex.schema.hasColumn(TableName.AccessApprovalRequest, "editedByUserId");
|
||||
|
||||
if (hasEditNoteCol || hasEditedByUserId) {
|
||||
await knex.schema.alterTable(TableName.AccessApprovalRequest, (t) => {
|
||||
if (hasEditedByUserId) {
|
||||
t.dropColumn("editedByUserId");
|
||||
}
|
||||
|
||||
if (hasEditNoteCol) {
|
||||
t.dropColumn("editNote");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
@@ -17,7 +17,8 @@ export const AccessApprovalPoliciesSchema = z.object({
|
||||
updatedAt: z.date(),
|
||||
enforcementLevel: z.string().default("hard"),
|
||||
deletedAt: z.date().nullable().optional(),
|
||||
allowedSelfApprovals: z.boolean().default(true)
|
||||
allowedSelfApprovals: z.boolean().default(true),
|
||||
maxTimePeriod: z.string().nullable().optional()
|
||||
});
|
||||
|
||||
export type TAccessApprovalPolicies = z.infer<typeof AccessApprovalPoliciesSchema>;
|
||||
|
@@ -20,7 +20,9 @@ export const AccessApprovalRequestsSchema = z.object({
|
||||
requestedByUserId: z.string().uuid(),
|
||||
note: z.string().nullable().optional(),
|
||||
privilegeDeletedAt: z.date().nullable().optional(),
|
||||
status: z.string().default("pending")
|
||||
status: z.string().default("pending"),
|
||||
editedByUserId: z.string().uuid().nullable().optional(),
|
||||
editNote: z.string().nullable().optional()
|
||||
});
|
||||
|
||||
export type TAccessApprovalRequests = z.infer<typeof AccessApprovalRequestsSchema>;
|
||||
|
@@ -3,12 +3,32 @@ import { z } from "zod";
|
||||
|
||||
import { ApproverType, BypasserType } from "@app/ee/services/access-approval-policy/access-approval-policy-types";
|
||||
import { removeTrailingSlash } from "@app/lib/fn";
|
||||
import { ms } from "@app/lib/ms";
|
||||
import { EnforcementLevel } from "@app/lib/types";
|
||||
import { readLimit, writeLimit } from "@app/server/config/rateLimiter";
|
||||
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
|
||||
import { sapPubSchema } from "@app/server/routes/sanitizedSchemas";
|
||||
import { AuthMode } from "@app/services/auth/auth-type";
|
||||
|
||||
const maxTimePeriodSchema = z
|
||||
.string()
|
||||
.trim()
|
||||
.nullish()
|
||||
.transform((val, ctx) => {
|
||||
if (val === undefined) return undefined;
|
||||
if (!val || val === "permanent") return null;
|
||||
const parsedMs = ms(val);
|
||||
|
||||
if (typeof parsedMs !== "number" || parsedMs <= 0) {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
message: "Invalid time period format or value. Must be a positive duration (e.g., '1h', '30m', '2d')."
|
||||
});
|
||||
return z.NEVER;
|
||||
}
|
||||
return val;
|
||||
});
|
||||
|
||||
export const registerAccessApprovalPolicyRouter = async (server: FastifyZodProvider) => {
|
||||
server.route({
|
||||
url: "/",
|
||||
@@ -71,7 +91,8 @@ export const registerAccessApprovalPolicyRouter = async (server: FastifyZodProvi
|
||||
.optional(),
|
||||
approvals: z.number().min(1).default(1),
|
||||
enforcementLevel: z.nativeEnum(EnforcementLevel).default(EnforcementLevel.Hard),
|
||||
allowedSelfApprovals: z.boolean().default(true)
|
||||
allowedSelfApprovals: z.boolean().default(true),
|
||||
maxTimePeriod: maxTimePeriodSchema
|
||||
})
|
||||
.refine(
|
||||
(val) => Boolean(val.environment) || Boolean(val.environments),
|
||||
@@ -124,7 +145,8 @@ export const registerAccessApprovalPolicyRouter = async (server: FastifyZodProvi
|
||||
.array()
|
||||
.nullable()
|
||||
.optional(),
|
||||
bypassers: z.object({ type: z.nativeEnum(BypasserType), id: z.string().nullable().optional() }).array()
|
||||
bypassers: z.object({ type: z.nativeEnum(BypasserType), id: z.string().nullable().optional() }).array(),
|
||||
maxTimePeriod: z.string().nullable().optional()
|
||||
})
|
||||
.array()
|
||||
.nullable()
|
||||
@@ -233,7 +255,8 @@ export const registerAccessApprovalPolicyRouter = async (server: FastifyZodProvi
|
||||
stepNumber: z.number().int()
|
||||
})
|
||||
.array()
|
||||
.optional()
|
||||
.optional(),
|
||||
maxTimePeriod: maxTimePeriodSchema
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
@@ -314,7 +337,8 @@ export const registerAccessApprovalPolicyRouter = async (server: FastifyZodProvi
|
||||
})
|
||||
.array()
|
||||
.nullable()
|
||||
.optional()
|
||||
.optional(),
|
||||
maxTimePeriod: z.string().nullable().optional()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ import { z } from "zod";
|
||||
|
||||
import { AccessApprovalRequestsReviewersSchema, AccessApprovalRequestsSchema, UsersSchema } from "@app/db/schemas";
|
||||
import { ApprovalStatus } from "@app/ee/services/access-approval-request/access-approval-request-types";
|
||||
import { ms } from "@app/lib/ms";
|
||||
import { writeLimit } from "@app/server/config/rateLimiter";
|
||||
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
|
||||
import { AuthMode } from "@app/services/auth/auth-type";
|
||||
@@ -26,7 +27,23 @@ export const registerAccessApprovalRequestRouter = async (server: FastifyZodProv
|
||||
body: z.object({
|
||||
permissions: z.any().array(),
|
||||
isTemporary: z.boolean(),
|
||||
temporaryRange: z.string().optional(),
|
||||
temporaryRange: z
|
||||
.string()
|
||||
.optional()
|
||||
.transform((val, ctx) => {
|
||||
if (!val || val === "permanent") return undefined;
|
||||
|
||||
const parsedMs = ms(val);
|
||||
|
||||
if (typeof parsedMs !== "number" || parsedMs <= 0) {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
message: "Invalid time period format or value. Must be a positive duration (e.g., '1h', '30m', '2d')."
|
||||
});
|
||||
return z.NEVER;
|
||||
}
|
||||
return val;
|
||||
}),
|
||||
note: z.string().max(255).optional()
|
||||
}),
|
||||
querystring: z.object({
|
||||
@@ -128,7 +145,8 @@ export const registerAccessApprovalRequestRouter = async (server: FastifyZodProv
|
||||
envId: z.string(),
|
||||
enforcementLevel: z.string(),
|
||||
deletedAt: z.date().nullish(),
|
||||
allowedSelfApprovals: z.boolean()
|
||||
allowedSelfApprovals: z.boolean(),
|
||||
maxTimePeriod: z.string().nullable().optional()
|
||||
}),
|
||||
reviewers: z
|
||||
.object({
|
||||
@@ -189,4 +207,47 @@ export const registerAccessApprovalRequestRouter = async (server: FastifyZodProv
|
||||
return { review };
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
url: "/:requestId",
|
||||
method: "PATCH",
|
||||
schema: {
|
||||
params: z.object({
|
||||
requestId: z.string().trim()
|
||||
}),
|
||||
body: z.object({
|
||||
temporaryRange: z.string().transform((val, ctx) => {
|
||||
const parsedMs = ms(val);
|
||||
|
||||
if (typeof parsedMs !== "number" || parsedMs <= 0) {
|
||||
ctx.addIssue({
|
||||
code: z.ZodIssueCode.custom,
|
||||
message: "Invalid time period format or value. Must be a positive duration (e.g., '1h', '30m', '2d')."
|
||||
});
|
||||
return z.NEVER;
|
||||
}
|
||||
return val;
|
||||
}),
|
||||
editNote: z.string().max(255)
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
approval: AccessApprovalRequestsSchema
|
||||
})
|
||||
}
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT]),
|
||||
handler: async (req) => {
|
||||
const { request } = await server.services.accessApprovalRequest.updateAccessApprovalRequest({
|
||||
actor: req.permission.type,
|
||||
actorId: req.permission.id,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
actorOrgId: req.permission.orgId,
|
||||
temporaryRange: req.body.temporaryRange,
|
||||
editNote: req.body.editNote,
|
||||
requestId: req.params.requestId
|
||||
});
|
||||
return { approval: request };
|
||||
}
|
||||
});
|
||||
};
|
||||
|
@@ -56,6 +56,7 @@ export interface TAccessApprovalPolicyDALFactory
|
||||
allowedSelfApprovals: boolean;
|
||||
secretPath: string;
|
||||
deletedAt?: Date | null | undefined;
|
||||
maxTimePeriod?: string | null;
|
||||
projectId: string;
|
||||
bypassers: (
|
||||
| {
|
||||
@@ -96,6 +97,7 @@ export interface TAccessApprovalPolicyDALFactory
|
||||
allowedSelfApprovals: boolean;
|
||||
secretPath: string;
|
||||
deletedAt?: Date | null | undefined;
|
||||
maxTimePeriod?: string | null;
|
||||
environments: {
|
||||
id: string;
|
||||
name: string;
|
||||
@@ -141,6 +143,7 @@ export interface TAccessApprovalPolicyDALFactory
|
||||
allowedSelfApprovals: boolean;
|
||||
secretPath: string;
|
||||
deletedAt?: Date | null | undefined;
|
||||
maxTimePeriod?: string | null;
|
||||
}
|
||||
| undefined
|
||||
>;
|
||||
|
@@ -100,7 +100,8 @@ export const accessApprovalPolicyServiceFactory = ({
|
||||
environments,
|
||||
enforcementLevel,
|
||||
allowedSelfApprovals,
|
||||
approvalsRequired
|
||||
approvalsRequired,
|
||||
maxTimePeriod
|
||||
}) => {
|
||||
const project = await projectDAL.findProjectBySlug(projectSlug, actorOrgId);
|
||||
if (!project) throw new NotFoundError({ message: `Project with slug '${projectSlug}' not found` });
|
||||
@@ -219,7 +220,8 @@ export const accessApprovalPolicyServiceFactory = ({
|
||||
secretPath,
|
||||
name,
|
||||
enforcementLevel,
|
||||
allowedSelfApprovals
|
||||
allowedSelfApprovals,
|
||||
maxTimePeriod
|
||||
},
|
||||
tx
|
||||
);
|
||||
@@ -318,7 +320,8 @@ export const accessApprovalPolicyServiceFactory = ({
|
||||
enforcementLevel,
|
||||
allowedSelfApprovals,
|
||||
approvalsRequired,
|
||||
environments
|
||||
environments,
|
||||
maxTimePeriod
|
||||
}: TUpdateAccessApprovalPolicy) => {
|
||||
const groupApprovers = approvers.filter((approver) => approver.type === ApproverType.Group);
|
||||
|
||||
@@ -461,7 +464,8 @@ export const accessApprovalPolicyServiceFactory = ({
|
||||
secretPath,
|
||||
name,
|
||||
enforcementLevel,
|
||||
allowedSelfApprovals
|
||||
allowedSelfApprovals,
|
||||
maxTimePeriod
|
||||
},
|
||||
tx
|
||||
);
|
||||
|
@@ -41,6 +41,7 @@ export type TCreateAccessApprovalPolicy = {
|
||||
enforcementLevel: EnforcementLevel;
|
||||
allowedSelfApprovals: boolean;
|
||||
approvalsRequired?: { numberOfApprovals: number; stepNumber: number }[];
|
||||
maxTimePeriod?: string | null;
|
||||
} & Omit<TProjectPermission, "projectId">;
|
||||
|
||||
export type TUpdateAccessApprovalPolicy = {
|
||||
@@ -60,6 +61,7 @@ export type TUpdateAccessApprovalPolicy = {
|
||||
allowedSelfApprovals: boolean;
|
||||
approvalsRequired?: { numberOfApprovals: number; stepNumber: number }[];
|
||||
environments?: string[];
|
||||
maxTimePeriod?: string | null;
|
||||
} & Omit<TProjectPermission, "projectId">;
|
||||
|
||||
export type TDeleteAccessApprovalPolicy = {
|
||||
@@ -104,7 +106,8 @@ export interface TAccessApprovalPolicyServiceFactory {
|
||||
environment,
|
||||
enforcementLevel,
|
||||
allowedSelfApprovals,
|
||||
approvalsRequired
|
||||
approvalsRequired,
|
||||
maxTimePeriod
|
||||
}: TCreateAccessApprovalPolicy) => Promise<{
|
||||
environment: {
|
||||
name: string;
|
||||
@@ -135,6 +138,7 @@ export interface TAccessApprovalPolicyServiceFactory {
|
||||
allowedSelfApprovals: boolean;
|
||||
secretPath: string;
|
||||
deletedAt?: Date | null | undefined;
|
||||
maxTimePeriod?: string | null;
|
||||
}>;
|
||||
deleteAccessApprovalPolicy: ({
|
||||
policyId,
|
||||
@@ -159,6 +163,7 @@ export interface TAccessApprovalPolicyServiceFactory {
|
||||
allowedSelfApprovals: boolean;
|
||||
secretPath: string;
|
||||
deletedAt?: Date | null | undefined;
|
||||
maxTimePeriod?: string | null;
|
||||
environment: {
|
||||
id: string;
|
||||
name: string;
|
||||
@@ -185,7 +190,8 @@ export interface TAccessApprovalPolicyServiceFactory {
|
||||
enforcementLevel,
|
||||
allowedSelfApprovals,
|
||||
approvalsRequired,
|
||||
environments
|
||||
environments,
|
||||
maxTimePeriod
|
||||
}: TUpdateAccessApprovalPolicy) => Promise<{
|
||||
environment: {
|
||||
id: string;
|
||||
@@ -208,6 +214,7 @@ export interface TAccessApprovalPolicyServiceFactory {
|
||||
allowedSelfApprovals: boolean;
|
||||
secretPath?: string | null | undefined;
|
||||
deletedAt?: Date | null | undefined;
|
||||
maxTimePeriod?: string | null;
|
||||
}>;
|
||||
getAccessApprovalPolicyByProjectSlug: ({
|
||||
actorId,
|
||||
@@ -242,6 +249,7 @@ export interface TAccessApprovalPolicyServiceFactory {
|
||||
allowedSelfApprovals: boolean;
|
||||
secretPath: string;
|
||||
deletedAt?: Date | null | undefined;
|
||||
maxTimePeriod?: string | null;
|
||||
environment: {
|
||||
id: string;
|
||||
name: string;
|
||||
@@ -298,6 +306,7 @@ export interface TAccessApprovalPolicyServiceFactory {
|
||||
allowedSelfApprovals: boolean;
|
||||
secretPath: string;
|
||||
deletedAt?: Date | null | undefined;
|
||||
maxTimePeriod?: string | null;
|
||||
environment: {
|
||||
id: string;
|
||||
name: string;
|
||||
|
@@ -63,6 +63,7 @@ export interface TAccessApprovalRequestDALFactory extends Omit<TOrmify<TableName
|
||||
enforcementLevel: string;
|
||||
allowedSelfApprovals: boolean;
|
||||
deletedAt: Date | null | undefined;
|
||||
maxTimePeriod?: string | null;
|
||||
};
|
||||
projectId: string;
|
||||
environments: string[];
|
||||
@@ -161,6 +162,7 @@ export interface TAccessApprovalRequestDALFactory extends Omit<TOrmify<TableName
|
||||
allowedSelfApprovals: boolean;
|
||||
envId: string;
|
||||
deletedAt: Date | null | undefined;
|
||||
maxTimePeriod?: string | null;
|
||||
};
|
||||
projectId: string;
|
||||
environment: string;
|
||||
@@ -297,7 +299,8 @@ export const accessApprovalRequestDALFactory = (db: TDbClient): TAccessApprovalR
|
||||
db.ref("enforcementLevel").withSchema(TableName.AccessApprovalPolicy).as("policyEnforcementLevel"),
|
||||
db.ref("allowedSelfApprovals").withSchema(TableName.AccessApprovalPolicy).as("policyAllowedSelfApprovals"),
|
||||
db.ref("envId").withSchema(TableName.AccessApprovalPolicy).as("policyEnvId"),
|
||||
db.ref("deletedAt").withSchema(TableName.AccessApprovalPolicy).as("policyDeletedAt")
|
||||
db.ref("deletedAt").withSchema(TableName.AccessApprovalPolicy).as("policyDeletedAt"),
|
||||
db.ref("maxTimePeriod").withSchema(TableName.AccessApprovalPolicy).as("policyMaxTimePeriod")
|
||||
)
|
||||
.select(db.ref("approverUserId").withSchema(TableName.AccessApprovalPolicyApprover))
|
||||
.select(db.ref("sequence").withSchema(TableName.AccessApprovalPolicyApprover).as("approverSequence"))
|
||||
@@ -364,7 +367,8 @@ export const accessApprovalRequestDALFactory = (db: TDbClient): TAccessApprovalR
|
||||
enforcementLevel: doc.policyEnforcementLevel,
|
||||
allowedSelfApprovals: doc.policyAllowedSelfApprovals,
|
||||
envId: doc.policyEnvId,
|
||||
deletedAt: doc.policyDeletedAt
|
||||
deletedAt: doc.policyDeletedAt,
|
||||
maxTimePeriod: doc.policyMaxTimePeriod
|
||||
},
|
||||
requestedByUser: {
|
||||
userId: doc.requestedByUserId,
|
||||
@@ -574,7 +578,8 @@ export const accessApprovalRequestDALFactory = (db: TDbClient): TAccessApprovalR
|
||||
tx.ref("enforcementLevel").withSchema(TableName.AccessApprovalPolicy).as("policyEnforcementLevel"),
|
||||
tx.ref("allowedSelfApprovals").withSchema(TableName.AccessApprovalPolicy).as("policyAllowedSelfApprovals"),
|
||||
tx.ref("approvals").withSchema(TableName.AccessApprovalPolicy).as("policyApprovals"),
|
||||
tx.ref("deletedAt").withSchema(TableName.AccessApprovalPolicy).as("policyDeletedAt")
|
||||
tx.ref("deletedAt").withSchema(TableName.AccessApprovalPolicy).as("policyDeletedAt"),
|
||||
tx.ref("maxTimePeriod").withSchema(TableName.AccessApprovalPolicy).as("policyMaxTimePeriod")
|
||||
);
|
||||
|
||||
const findById: TAccessApprovalRequestDALFactory["findById"] = async (id, tx) => {
|
||||
@@ -595,7 +600,8 @@ export const accessApprovalRequestDALFactory = (db: TDbClient): TAccessApprovalR
|
||||
secretPath: el.policySecretPath,
|
||||
enforcementLevel: el.policyEnforcementLevel,
|
||||
allowedSelfApprovals: el.policyAllowedSelfApprovals,
|
||||
deletedAt: el.policyDeletedAt
|
||||
deletedAt: el.policyDeletedAt,
|
||||
maxTimePeriod: el.policyMaxTimePeriod
|
||||
},
|
||||
requestedByUser: {
|
||||
userId: el.requestedByUserId,
|
||||
|
@@ -54,7 +54,7 @@ type TSecretApprovalRequestServiceFactoryDep = {
|
||||
accessApprovalPolicyDAL: Pick<TAccessApprovalPolicyDALFactory, "findOne" | "find" | "findLastValidPolicy">;
|
||||
accessApprovalRequestReviewerDAL: Pick<
|
||||
TAccessApprovalRequestReviewerDALFactory,
|
||||
"create" | "find" | "findOne" | "transaction"
|
||||
"create" | "find" | "findOne" | "transaction" | "delete"
|
||||
>;
|
||||
groupDAL: Pick<TGroupDALFactory, "findAllGroupPossibleMembers">;
|
||||
projectMembershipDAL: Pick<TProjectMembershipDALFactory, "findById">;
|
||||
@@ -156,6 +156,15 @@ export const accessApprovalRequestServiceFactory = ({
|
||||
throw new BadRequestError({ message: "The policy linked to this request has been deleted" });
|
||||
}
|
||||
|
||||
// Check if the requested time falls under policy.maxTimePeriod
|
||||
if (policy.maxTimePeriod) {
|
||||
if (!temporaryRange || ms(temporaryRange) > ms(policy.maxTimePeriod)) {
|
||||
throw new BadRequestError({
|
||||
message: `Requested access time range is limited to ${policy.maxTimePeriod} by policy`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const approverIds: string[] = [];
|
||||
const approverGroupIds: string[] = [];
|
||||
|
||||
@@ -292,6 +301,155 @@ export const accessApprovalRequestServiceFactory = ({
|
||||
return { request: approval };
|
||||
};
|
||||
|
||||
const updateAccessApprovalRequest: TAccessApprovalRequestServiceFactory["updateAccessApprovalRequest"] = async ({
|
||||
temporaryRange,
|
||||
actorId,
|
||||
actor,
|
||||
actorOrgId,
|
||||
actorAuthMethod,
|
||||
editNote,
|
||||
requestId
|
||||
}) => {
|
||||
const cfg = getConfig();
|
||||
|
||||
const accessApprovalRequest = await accessApprovalRequestDAL.findById(requestId);
|
||||
if (!accessApprovalRequest) {
|
||||
throw new NotFoundError({ message: `Access request with ID '${requestId}' not found` });
|
||||
}
|
||||
|
||||
const { policy, requestedByUser } = accessApprovalRequest;
|
||||
if (policy.deletedAt) {
|
||||
throw new BadRequestError({
|
||||
message: "The policy associated with this access request has been deleted."
|
||||
});
|
||||
}
|
||||
|
||||
const { membership, hasRole } = await permissionService.getProjectPermission({
|
||||
actor,
|
||||
actorId,
|
||||
projectId: accessApprovalRequest.projectId,
|
||||
actorAuthMethod,
|
||||
actorOrgId,
|
||||
actionProjectType: ActionProjectType.SecretManager
|
||||
});
|
||||
|
||||
if (!membership) {
|
||||
throw new ForbiddenRequestError({ message: "You are not a member of this project" });
|
||||
}
|
||||
|
||||
const isApprover = policy.approvers.find((approver) => approver.userId === actorId);
|
||||
|
||||
if (!hasRole(ProjectMembershipRole.Admin) && !isApprover) {
|
||||
throw new ForbiddenRequestError({ message: "You are not authorized to modify this request" });
|
||||
}
|
||||
|
||||
const project = await projectDAL.findById(accessApprovalRequest.projectId);
|
||||
|
||||
if (!project) {
|
||||
throw new NotFoundError({
|
||||
message: `The project associated with this access request was not found. [projectId=${accessApprovalRequest.projectId}]`
|
||||
});
|
||||
}
|
||||
|
||||
if (accessApprovalRequest.status !== ApprovalStatus.PENDING) {
|
||||
throw new BadRequestError({ message: "The request has been closed" });
|
||||
}
|
||||
|
||||
const editedByUser = await userDAL.findById(actorId);
|
||||
|
||||
if (!editedByUser) throw new NotFoundError({ message: "Editing user not found" });
|
||||
|
||||
if (accessApprovalRequest.isTemporary && accessApprovalRequest.temporaryRange) {
|
||||
if (ms(temporaryRange) > ms(accessApprovalRequest.temporaryRange)) {
|
||||
throw new BadRequestError({ message: "Updated access duration must be less than current access duration" });
|
||||
}
|
||||
}
|
||||
|
||||
const { envSlug, secretPath, accessTypes } = verifyRequestedPermissions({
|
||||
permissions: accessApprovalRequest.permissions
|
||||
});
|
||||
|
||||
const approval = await accessApprovalRequestDAL.transaction(async (tx) => {
|
||||
const approvalRequest = await accessApprovalRequestDAL.updateById(
|
||||
requestId,
|
||||
{
|
||||
temporaryRange,
|
||||
isTemporary: true,
|
||||
editNote,
|
||||
editedByUserId: actorId
|
||||
},
|
||||
tx
|
||||
);
|
||||
|
||||
// reset review progress
|
||||
await accessApprovalRequestReviewerDAL.delete(
|
||||
{
|
||||
requestId
|
||||
},
|
||||
tx
|
||||
);
|
||||
|
||||
const requesterFullName = `${requestedByUser.firstName} ${requestedByUser.lastName}`;
|
||||
const editorFullName = `${editedByUser.firstName} ${editedByUser.lastName}`;
|
||||
const approvalUrl = `${cfg.SITE_URL}/projects/secret-management/${project.id}/approval`;
|
||||
|
||||
await triggerWorkflowIntegrationNotification({
|
||||
input: {
|
||||
notification: {
|
||||
type: TriggerFeature.ACCESS_REQUEST_UPDATED,
|
||||
payload: {
|
||||
projectName: project.name,
|
||||
requesterFullName,
|
||||
isTemporary: true,
|
||||
requesterEmail: requestedByUser.email as string,
|
||||
secretPath,
|
||||
environment: envSlug,
|
||||
permissions: accessTypes,
|
||||
approvalUrl,
|
||||
editNote,
|
||||
editorEmail: editedByUser.email as string,
|
||||
editorFullName
|
||||
}
|
||||
},
|
||||
projectId: project.id
|
||||
},
|
||||
dependencies: {
|
||||
projectDAL,
|
||||
projectSlackConfigDAL,
|
||||
kmsService,
|
||||
microsoftTeamsService,
|
||||
projectMicrosoftTeamsConfigDAL
|
||||
}
|
||||
});
|
||||
|
||||
await smtpService.sendMail({
|
||||
recipients: policy.approvers
|
||||
.filter((approver) => Boolean(approver.email) && approver.userId !== editedByUser.id)
|
||||
.map((approver) => approver.email!),
|
||||
subjectLine: "Access Approval Request Updated",
|
||||
substitutions: {
|
||||
projectName: project.name,
|
||||
requesterFullName,
|
||||
requesterEmail: requestedByUser.email,
|
||||
isTemporary: true,
|
||||
expiresIn: msFn(ms(temporaryRange || ""), { long: true }),
|
||||
secretPath,
|
||||
environment: envSlug,
|
||||
permissions: accessTypes,
|
||||
approvalUrl,
|
||||
editNote,
|
||||
editorFullName,
|
||||
editorEmail: editedByUser.email
|
||||
},
|
||||
template: SmtpTemplates.AccessApprovalRequestUpdated
|
||||
});
|
||||
|
||||
return approvalRequest;
|
||||
});
|
||||
|
||||
return { request: approval };
|
||||
};
|
||||
|
||||
const listApprovalRequests: TAccessApprovalRequestServiceFactory["listApprovalRequests"] = async ({
|
||||
projectSlug,
|
||||
authorUserId,
|
||||
@@ -641,6 +799,7 @@ export const accessApprovalRequestServiceFactory = ({
|
||||
|
||||
return {
|
||||
createAccessApprovalRequest,
|
||||
updateAccessApprovalRequest,
|
||||
listApprovalRequests,
|
||||
reviewAccessRequest,
|
||||
getCount
|
||||
|
@@ -30,6 +30,12 @@ export type TCreateAccessApprovalRequestDTO = {
|
||||
note?: string;
|
||||
} & Omit<TProjectPermission, "projectId">;
|
||||
|
||||
export type TUpdateAccessApprovalRequestDTO = {
|
||||
requestId: string;
|
||||
temporaryRange: string;
|
||||
editNote: string;
|
||||
} & Omit<TProjectPermission, "projectId">;
|
||||
|
||||
export type TListApprovalRequestsDTO = {
|
||||
projectSlug: string;
|
||||
authorUserId?: string;
|
||||
@@ -54,6 +60,23 @@ export interface TAccessApprovalRequestServiceFactory {
|
||||
privilegeDeletedAt?: Date | null | undefined;
|
||||
};
|
||||
}>;
|
||||
updateAccessApprovalRequest: (arg: TUpdateAccessApprovalRequestDTO) => Promise<{
|
||||
request: {
|
||||
status: string;
|
||||
id: string;
|
||||
createdAt: Date;
|
||||
updatedAt: Date;
|
||||
policyId: string;
|
||||
isTemporary: boolean;
|
||||
requestedByUserId: string;
|
||||
privilegeId?: string | null | undefined;
|
||||
requestedBy?: string | null | undefined;
|
||||
temporaryRange?: string | null | undefined;
|
||||
permissions?: unknown;
|
||||
note?: string | null | undefined;
|
||||
privilegeDeletedAt?: Date | null | undefined;
|
||||
};
|
||||
}>;
|
||||
listApprovalRequests: (arg: TListApprovalRequestsDTO) => Promise<{
|
||||
requests: {
|
||||
policy: {
|
||||
@@ -82,6 +105,7 @@ export interface TAccessApprovalRequestServiceFactory {
|
||||
allowedSelfApprovals: boolean;
|
||||
envId: string;
|
||||
deletedAt: Date | null | undefined;
|
||||
maxTimePeriod?: string | null;
|
||||
};
|
||||
projectId: string;
|
||||
environment: string;
|
||||
|
@@ -1,8 +1,6 @@
|
||||
import { AxiosError, RawAxiosRequestHeaders } from "axios";
|
||||
|
||||
import { ProjectType, SecretKeyEncoding } from "@app/db/schemas";
|
||||
import { TEventBusService } from "@app/ee/services/event/event-bus-service";
|
||||
import { TopicName, toPublishableEvent } from "@app/ee/services/event/types";
|
||||
import { SecretKeyEncoding } from "@app/db/schemas";
|
||||
import { request } from "@app/lib/config/request";
|
||||
import { crypto } from "@app/lib/crypto/cryptography";
|
||||
import { logger } from "@app/lib/logger";
|
||||
@@ -22,7 +20,6 @@ type TAuditLogQueueServiceFactoryDep = {
|
||||
queueService: TQueueServiceFactory;
|
||||
projectDAL: Pick<TProjectDALFactory, "findById">;
|
||||
licenseService: Pick<TLicenseServiceFactory, "getPlan">;
|
||||
eventBusService: TEventBusService;
|
||||
};
|
||||
|
||||
export type TAuditLogQueueServiceFactory = {
|
||||
@@ -38,8 +35,7 @@ export const auditLogQueueServiceFactory = async ({
|
||||
queueService,
|
||||
projectDAL,
|
||||
licenseService,
|
||||
auditLogStreamDAL,
|
||||
eventBusService
|
||||
auditLogStreamDAL
|
||||
}: TAuditLogQueueServiceFactoryDep): Promise<TAuditLogQueueServiceFactory> => {
|
||||
const pushToLog = async (data: TCreateAuditLogDTO) => {
|
||||
await queueService.queue<QueueName.AuditLog>(QueueName.AuditLog, QueueJobs.AuditLog, data, {
|
||||
@@ -145,16 +141,6 @@ export const auditLogQueueServiceFactory = async ({
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const publishable = toPublishableEvent(event);
|
||||
|
||||
if (publishable) {
|
||||
await eventBusService.publish(TopicName.CoreServers, {
|
||||
type: ProjectType.SecretManager,
|
||||
source: "infiscal",
|
||||
data: publishable.data
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
|
@@ -9,7 +9,7 @@ import { getDbConnectionHost } from "@app/lib/knex";
|
||||
export const verifyHostInputValidity = async (host: string, isGateway = false) => {
|
||||
const appCfg = getConfig();
|
||||
|
||||
if (appCfg.isDevelopmentMode) return [host];
|
||||
if (appCfg.isDevelopmentMode || appCfg.isTestMode) return [host];
|
||||
|
||||
if (isGateway) return [host];
|
||||
|
||||
|
@@ -15,6 +15,7 @@ import { z } from "zod";
|
||||
import { CustomAWSHasher } from "@app/lib/aws/hashing";
|
||||
import { crypto } from "@app/lib/crypto";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
import { DynamicSecretAwsElastiCacheSchema, TDynamicProviderFns } from "./models";
|
||||
@@ -170,14 +171,29 @@ export const AwsElastiCacheDatabaseProvider = (): TDynamicProviderFns => {
|
||||
};
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
await ElastiCacheUserManager(
|
||||
{
|
||||
accessKeyId: providerInputs.accessKeyId,
|
||||
secretAccessKey: providerInputs.secretAccessKey
|
||||
},
|
||||
providerInputs.region
|
||||
).verifyCredentials(providerInputs.clusterName);
|
||||
return true;
|
||||
try {
|
||||
await ElastiCacheUserManager(
|
||||
{
|
||||
accessKeyId: providerInputs.accessKeyId,
|
||||
secretAccessKey: providerInputs.secretAccessKey
|
||||
},
|
||||
providerInputs.region
|
||||
).verifyCredentials(providerInputs.clusterName);
|
||||
return true;
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [
|
||||
providerInputs.accessKeyId,
|
||||
providerInputs.secretAccessKey,
|
||||
providerInputs.clusterName,
|
||||
providerInputs.region
|
||||
]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (data: {
|
||||
@@ -206,21 +222,37 @@ export const AwsElastiCacheDatabaseProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const parsedStatement = CreateElastiCacheUserSchema.parse(JSON.parse(creationStatement));
|
||||
|
||||
await ElastiCacheUserManager(
|
||||
{
|
||||
accessKeyId: providerInputs.accessKeyId,
|
||||
secretAccessKey: providerInputs.secretAccessKey
|
||||
},
|
||||
providerInputs.region
|
||||
).createUser(parsedStatement, providerInputs.clusterName);
|
||||
try {
|
||||
await ElastiCacheUserManager(
|
||||
{
|
||||
accessKeyId: providerInputs.accessKeyId,
|
||||
secretAccessKey: providerInputs.secretAccessKey
|
||||
},
|
||||
providerInputs.region
|
||||
).createUser(parsedStatement, providerInputs.clusterName);
|
||||
|
||||
return {
|
||||
entityId: leaseUsername,
|
||||
data: {
|
||||
DB_USERNAME: leaseUsername,
|
||||
DB_PASSWORD: leasePassword
|
||||
}
|
||||
};
|
||||
return {
|
||||
entityId: leaseUsername,
|
||||
data: {
|
||||
DB_USERNAME: leaseUsername,
|
||||
DB_PASSWORD: leasePassword
|
||||
}
|
||||
};
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [
|
||||
leaseUsername,
|
||||
leasePassword,
|
||||
providerInputs.accessKeyId,
|
||||
providerInputs.secretAccessKey,
|
||||
providerInputs.clusterName
|
||||
]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, entityId: string) => {
|
||||
@@ -229,15 +261,25 @@ export const AwsElastiCacheDatabaseProvider = (): TDynamicProviderFns => {
|
||||
const revokeStatement = handlebars.compile(providerInputs.revocationStatement)({ username: entityId });
|
||||
const parsedStatement = DeleteElasticCacheUserSchema.parse(JSON.parse(revokeStatement));
|
||||
|
||||
await ElastiCacheUserManager(
|
||||
{
|
||||
accessKeyId: providerInputs.accessKeyId,
|
||||
secretAccessKey: providerInputs.secretAccessKey
|
||||
},
|
||||
providerInputs.region
|
||||
).deleteUser(parsedStatement);
|
||||
try {
|
||||
await ElastiCacheUserManager(
|
||||
{
|
||||
accessKeyId: providerInputs.accessKeyId,
|
||||
secretAccessKey: providerInputs.secretAccessKey
|
||||
},
|
||||
providerInputs.region
|
||||
).deleteUser(parsedStatement);
|
||||
|
||||
return { entityId };
|
||||
return { entityId };
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [entityId, providerInputs.accessKeyId, providerInputs.secretAccessKey, providerInputs.clusterName]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const renew = async (_inputs: unknown, entityId: string) => {
|
||||
|
@@ -23,6 +23,7 @@ import { CustomAWSHasher } from "@app/lib/aws/hashing";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { crypto } from "@app/lib/crypto/cryptography";
|
||||
import { BadRequestError, UnauthorizedError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
|
||||
import { AwsIamAuthType, DynamicSecretAwsIamSchema, TDynamicProviderFns } from "./models";
|
||||
@@ -118,22 +119,39 @@ export const AwsIamProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const validateConnection = async (inputs: unknown, { projectId }: { projectId: string }) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const client = await $getClient(providerInputs, projectId);
|
||||
const isConnected = await client
|
||||
.send(new GetUserCommand({}))
|
||||
.then(() => true)
|
||||
.catch((err) => {
|
||||
const message = (err as Error)?.message;
|
||||
if (
|
||||
(providerInputs.method === AwsIamAuthType.AssumeRole || providerInputs.method === AwsIamAuthType.IRSA) &&
|
||||
// assume role will throw an error asking to provider username, but if so this has access in aws correctly
|
||||
message.includes("Must specify userName when calling with non-User credentials")
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
throw err;
|
||||
try {
|
||||
const client = await $getClient(providerInputs, projectId);
|
||||
const isConnected = await client
|
||||
.send(new GetUserCommand({}))
|
||||
.then(() => true)
|
||||
.catch((err) => {
|
||||
const message = (err as Error)?.message;
|
||||
if (
|
||||
(providerInputs.method === AwsIamAuthType.AssumeRole || providerInputs.method === AwsIamAuthType.IRSA) &&
|
||||
// assume role will throw an error asking to provider username, but if so this has access in aws correctly
|
||||
message.includes("Must specify userName when calling with non-User credentials")
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
throw err;
|
||||
});
|
||||
return isConnected;
|
||||
} catch (err) {
|
||||
const sensitiveTokens = [];
|
||||
if (providerInputs.method === AwsIamAuthType.AccessKey) {
|
||||
sensitiveTokens.push(providerInputs.accessKey, providerInputs.secretAccessKey);
|
||||
}
|
||||
if (providerInputs.method === AwsIamAuthType.AssumeRole) {
|
||||
sensitiveTokens.push(providerInputs.roleArn);
|
||||
}
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: sensitiveTokens
|
||||
});
|
||||
return isConnected;
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (data: {
|
||||
@@ -162,62 +180,81 @@ export const AwsIamProvider = (): TDynamicProviderFns => {
|
||||
awsTags.push(...additionalTags);
|
||||
}
|
||||
|
||||
const createUserRes = await client.send(
|
||||
new CreateUserCommand({
|
||||
Path: awsPath,
|
||||
PermissionsBoundary: permissionBoundaryPolicyArn || undefined,
|
||||
Tags: awsTags,
|
||||
UserName: username
|
||||
})
|
||||
);
|
||||
|
||||
if (!createUserRes.User) throw new BadRequestError({ message: "Failed to create AWS IAM User" });
|
||||
if (userGroups) {
|
||||
await Promise.all(
|
||||
userGroups
|
||||
.split(",")
|
||||
.filter(Boolean)
|
||||
.map((group) =>
|
||||
client.send(new AddUserToGroupCommand({ UserName: createUserRes?.User?.UserName, GroupName: group }))
|
||||
)
|
||||
);
|
||||
}
|
||||
if (policyArns) {
|
||||
await Promise.all(
|
||||
policyArns
|
||||
.split(",")
|
||||
.filter(Boolean)
|
||||
.map((policyArn) =>
|
||||
client.send(new AttachUserPolicyCommand({ UserName: createUserRes?.User?.UserName, PolicyArn: policyArn }))
|
||||
)
|
||||
);
|
||||
}
|
||||
if (policyDocument) {
|
||||
await client.send(
|
||||
new PutUserPolicyCommand({
|
||||
UserName: createUserRes.User.UserName,
|
||||
PolicyName: `infisical-dynamic-policy-${alphaNumericNanoId(4)}`,
|
||||
PolicyDocument: policyDocument
|
||||
try {
|
||||
const createUserRes = await client.send(
|
||||
new CreateUserCommand({
|
||||
Path: awsPath,
|
||||
PermissionsBoundary: permissionBoundaryPolicyArn || undefined,
|
||||
Tags: awsTags,
|
||||
UserName: username
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
const createAccessKeyRes = await client.send(
|
||||
new CreateAccessKeyCommand({
|
||||
UserName: createUserRes.User.UserName
|
||||
})
|
||||
);
|
||||
if (!createAccessKeyRes.AccessKey)
|
||||
throw new BadRequestError({ message: "Failed to create AWS IAM User access key" });
|
||||
|
||||
return {
|
||||
entityId: username,
|
||||
data: {
|
||||
ACCESS_KEY: createAccessKeyRes.AccessKey.AccessKeyId,
|
||||
SECRET_ACCESS_KEY: createAccessKeyRes.AccessKey.SecretAccessKey,
|
||||
USERNAME: username
|
||||
if (!createUserRes.User) throw new BadRequestError({ message: "Failed to create AWS IAM User" });
|
||||
if (userGroups) {
|
||||
await Promise.all(
|
||||
userGroups
|
||||
.split(",")
|
||||
.filter(Boolean)
|
||||
.map((group) =>
|
||||
client.send(new AddUserToGroupCommand({ UserName: createUserRes?.User?.UserName, GroupName: group }))
|
||||
)
|
||||
);
|
||||
}
|
||||
};
|
||||
if (policyArns) {
|
||||
await Promise.all(
|
||||
policyArns
|
||||
.split(",")
|
||||
.filter(Boolean)
|
||||
.map((policyArn) =>
|
||||
client.send(
|
||||
new AttachUserPolicyCommand({ UserName: createUserRes?.User?.UserName, PolicyArn: policyArn })
|
||||
)
|
||||
)
|
||||
);
|
||||
}
|
||||
if (policyDocument) {
|
||||
await client.send(
|
||||
new PutUserPolicyCommand({
|
||||
UserName: createUserRes.User.UserName,
|
||||
PolicyName: `infisical-dynamic-policy-${alphaNumericNanoId(4)}`,
|
||||
PolicyDocument: policyDocument
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
const createAccessKeyRes = await client.send(
|
||||
new CreateAccessKeyCommand({
|
||||
UserName: createUserRes.User.UserName
|
||||
})
|
||||
);
|
||||
if (!createAccessKeyRes.AccessKey)
|
||||
throw new BadRequestError({ message: "Failed to create AWS IAM User access key" });
|
||||
|
||||
return {
|
||||
entityId: username,
|
||||
data: {
|
||||
ACCESS_KEY: createAccessKeyRes.AccessKey.AccessKeyId,
|
||||
SECRET_ACCESS_KEY: createAccessKeyRes.AccessKey.SecretAccessKey,
|
||||
USERNAME: username
|
||||
}
|
||||
};
|
||||
} catch (err) {
|
||||
const sensitiveTokens = [username];
|
||||
if (providerInputs.method === AwsIamAuthType.AccessKey) {
|
||||
sensitiveTokens.push(providerInputs.accessKey, providerInputs.secretAccessKey);
|
||||
}
|
||||
if (providerInputs.method === AwsIamAuthType.AssumeRole) {
|
||||
sensitiveTokens.push(providerInputs.roleArn);
|
||||
}
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: sensitiveTokens
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, entityId: string, metadata: { projectId: string }) => {
|
||||
@@ -278,8 +315,25 @@ export const AwsIamProvider = (): TDynamicProviderFns => {
|
||||
)
|
||||
);
|
||||
|
||||
await client.send(new DeleteUserCommand({ UserName: username }));
|
||||
return { entityId: username };
|
||||
try {
|
||||
await client.send(new DeleteUserCommand({ UserName: username }));
|
||||
return { entityId: username };
|
||||
} catch (err) {
|
||||
const sensitiveTokens = [username];
|
||||
if (providerInputs.method === AwsIamAuthType.AccessKey) {
|
||||
sensitiveTokens.push(providerInputs.accessKey, providerInputs.secretAccessKey);
|
||||
}
|
||||
if (providerInputs.method === AwsIamAuthType.AssumeRole) {
|
||||
sensitiveTokens.push(providerInputs.roleArn);
|
||||
}
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: sensitiveTokens
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const renew = async (_inputs: unknown, entityId: string) => {
|
||||
|
@@ -2,6 +2,7 @@ import axios from "axios";
|
||||
import { customAlphabet } from "nanoid";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
|
||||
import { AzureEntraIDSchema, TDynamicProviderFns } from "./models";
|
||||
|
||||
@@ -51,45 +52,82 @@ export const AzureEntraIDProvider = (): TDynamicProviderFns & {
|
||||
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const data = await $getToken(providerInputs.tenantId, providerInputs.applicationId, providerInputs.clientSecret);
|
||||
return data.success;
|
||||
try {
|
||||
const data = await $getToken(providerInputs.tenantId, providerInputs.applicationId, providerInputs.clientSecret);
|
||||
return data.success;
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.clientSecret, providerInputs.applicationId, providerInputs.tenantId]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async ({ inputs }: { inputs: unknown }) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const data = await $getToken(providerInputs.tenantId, providerInputs.applicationId, providerInputs.clientSecret);
|
||||
if (!data.success) {
|
||||
throw new BadRequestError({ message: "Failed to authorize to Microsoft Entra ID" });
|
||||
}
|
||||
|
||||
const password = generatePassword();
|
||||
|
||||
const response = await axios.patch(
|
||||
`${MSFT_GRAPH_API_URL}/users/${providerInputs.userId}`,
|
||||
{
|
||||
passwordProfile: {
|
||||
forceChangePasswordNextSignIn: false,
|
||||
password
|
||||
}
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: `Bearer ${data.token}`
|
||||
}
|
||||
try {
|
||||
const data = await $getToken(providerInputs.tenantId, providerInputs.applicationId, providerInputs.clientSecret);
|
||||
if (!data.success) {
|
||||
throw new BadRequestError({ message: "Failed to authorize to Microsoft Entra ID" });
|
||||
}
|
||||
);
|
||||
if (response.status !== 204) {
|
||||
throw new BadRequestError({ message: "Failed to update password" });
|
||||
}
|
||||
|
||||
return { entityId: providerInputs.userId, data: { email: providerInputs.email, password } };
|
||||
const response = await axios.patch(
|
||||
`${MSFT_GRAPH_API_URL}/users/${providerInputs.userId}`,
|
||||
{
|
||||
passwordProfile: {
|
||||
forceChangePasswordNextSignIn: false,
|
||||
password
|
||||
}
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
Authorization: `Bearer ${data.token}`
|
||||
}
|
||||
}
|
||||
);
|
||||
if (response.status !== 204) {
|
||||
throw new BadRequestError({ message: "Failed to update password" });
|
||||
}
|
||||
|
||||
return { entityId: providerInputs.userId, data: { email: providerInputs.email, password } };
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [
|
||||
providerInputs.clientSecret,
|
||||
providerInputs.applicationId,
|
||||
providerInputs.userId,
|
||||
providerInputs.email,
|
||||
password
|
||||
]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, entityId: string) => {
|
||||
// Creates a new password
|
||||
await create({ inputs });
|
||||
return { entityId };
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
try {
|
||||
// Creates a new password
|
||||
await create({ inputs });
|
||||
return { entityId };
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.clientSecret, providerInputs.applicationId, entityId]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const fetchAzureEntraIdUsers = async (tenantId: string, applicationId: string, clientSecret: string) => {
|
||||
|
@@ -3,6 +3,8 @@ import handlebars from "handlebars";
|
||||
import { customAlphabet } from "nanoid";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
@@ -71,9 +73,24 @@ export const CassandraProvider = (): TDynamicProviderFns => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const client = await $getClient(providerInputs);
|
||||
|
||||
const isConnected = await client.execute("SELECT * FROM system_schema.keyspaces").then(() => true);
|
||||
await client.shutdown();
|
||||
return isConnected;
|
||||
try {
|
||||
const isConnected = await client.execute("SELECT * FROM system_schema.keyspaces").then(() => true);
|
||||
await client.shutdown();
|
||||
return isConnected;
|
||||
} catch (err) {
|
||||
const tokens = [providerInputs.password, providerInputs.username];
|
||||
if (providerInputs.keyspace) {
|
||||
tokens.push(providerInputs.keyspace);
|
||||
}
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens
|
||||
});
|
||||
await client.shutdown();
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (data: {
|
||||
@@ -89,23 +106,39 @@ export const CassandraProvider = (): TDynamicProviderFns => {
|
||||
const username = generateUsername(usernameTemplate, identity);
|
||||
const password = generatePassword();
|
||||
const { keyspace } = providerInputs;
|
||||
const expiration = new Date(expireAt).toISOString();
|
||||
|
||||
const creationStatement = handlebars.compile(providerInputs.creationStatement, { noEscape: true })({
|
||||
username,
|
||||
password,
|
||||
expiration,
|
||||
keyspace
|
||||
});
|
||||
try {
|
||||
const expiration = new Date(expireAt).toISOString();
|
||||
|
||||
const queries = creationStatement.toString().split(";").filter(Boolean);
|
||||
for (const query of queries) {
|
||||
// eslint-disable-next-line
|
||||
await client.execute(query);
|
||||
const creationStatement = handlebars.compile(providerInputs.creationStatement, { noEscape: true })({
|
||||
username,
|
||||
password,
|
||||
expiration,
|
||||
keyspace
|
||||
});
|
||||
|
||||
const queries = creationStatement.toString().split(";").filter(Boolean);
|
||||
for (const query of queries) {
|
||||
// eslint-disable-next-line
|
||||
await client.execute(query);
|
||||
}
|
||||
await client.shutdown();
|
||||
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
} catch (err) {
|
||||
const tokens = [username, password];
|
||||
if (keyspace) {
|
||||
tokens.push(keyspace);
|
||||
}
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens
|
||||
});
|
||||
await client.shutdown();
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
await client.shutdown();
|
||||
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, entityId: string) => {
|
||||
@@ -115,14 +148,29 @@ export const CassandraProvider = (): TDynamicProviderFns => {
|
||||
const username = entityId;
|
||||
const { keyspace } = providerInputs;
|
||||
|
||||
const revokeStatement = handlebars.compile(providerInputs.revocationStatement)({ username, keyspace });
|
||||
const queries = revokeStatement.toString().split(";").filter(Boolean);
|
||||
for (const query of queries) {
|
||||
// eslint-disable-next-line
|
||||
await client.execute(query);
|
||||
try {
|
||||
const revokeStatement = handlebars.compile(providerInputs.revocationStatement)({ username, keyspace });
|
||||
const queries = revokeStatement.toString().split(";").filter(Boolean);
|
||||
for (const query of queries) {
|
||||
// eslint-disable-next-line
|
||||
await client.execute(query);
|
||||
}
|
||||
await client.shutdown();
|
||||
return { entityId: username };
|
||||
} catch (err) {
|
||||
const tokens = [username];
|
||||
if (keyspace) {
|
||||
tokens.push(keyspace);
|
||||
}
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens
|
||||
});
|
||||
await client.shutdown();
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
await client.shutdown();
|
||||
return { entityId: username };
|
||||
};
|
||||
|
||||
const renew = async (inputs: unknown, entityId: string, expireAt: number) => {
|
||||
@@ -130,21 +178,36 @@ export const CassandraProvider = (): TDynamicProviderFns => {
|
||||
if (!providerInputs.renewStatement) return { entityId };
|
||||
|
||||
const client = await $getClient(providerInputs);
|
||||
|
||||
const expiration = new Date(expireAt).toISOString();
|
||||
const { keyspace } = providerInputs;
|
||||
|
||||
const renewStatement = handlebars.compile(providerInputs.renewStatement)({
|
||||
username: entityId,
|
||||
keyspace,
|
||||
expiration
|
||||
});
|
||||
const queries = renewStatement.toString().split(";").filter(Boolean);
|
||||
for await (const query of queries) {
|
||||
await client.execute(query);
|
||||
try {
|
||||
const expiration = new Date(expireAt).toISOString();
|
||||
|
||||
const renewStatement = handlebars.compile(providerInputs.renewStatement)({
|
||||
username: entityId,
|
||||
keyspace,
|
||||
expiration
|
||||
});
|
||||
const queries = renewStatement.toString().split(";").filter(Boolean);
|
||||
for await (const query of queries) {
|
||||
await client.execute(query);
|
||||
}
|
||||
await client.shutdown();
|
||||
return { entityId };
|
||||
} catch (err) {
|
||||
const tokens = [entityId];
|
||||
if (keyspace) {
|
||||
tokens.push(keyspace);
|
||||
}
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens
|
||||
});
|
||||
await client.shutdown();
|
||||
throw new BadRequestError({
|
||||
message: `Failed to renew lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
await client.shutdown();
|
||||
return { entityId };
|
||||
};
|
||||
|
||||
return {
|
||||
|
289
backend/src/ee/services/dynamic-secret/providers/couchbase.ts
Normal file
289
backend/src/ee/services/dynamic-secret/providers/couchbase.ts
Normal file
@@ -0,0 +1,289 @@
|
||||
import crypto from "node:crypto";
|
||||
|
||||
import axios from "axios";
|
||||
import RE2 from "re2";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { blockLocalAndPrivateIpAddresses } from "@app/lib/validator/validate-url";
|
||||
|
||||
import { DynamicSecretCouchbaseSchema, PasswordRequirements, TDynamicProviderFns } from "./models";
|
||||
import { compileUsernameTemplate } from "./templateUtils";
|
||||
|
||||
type TCreateCouchbaseUser = {
|
||||
name: string;
|
||||
password: string;
|
||||
access: {
|
||||
privileges: string[];
|
||||
resources: {
|
||||
buckets: {
|
||||
name: string;
|
||||
scopes?: {
|
||||
name: string;
|
||||
collections?: string[];
|
||||
}[];
|
||||
}[];
|
||||
};
|
||||
}[];
|
||||
};
|
||||
|
||||
type CouchbaseUserResponse = {
|
||||
id: string;
|
||||
uuid?: string;
|
||||
};
|
||||
|
||||
const sanitizeCouchbaseUsername = (username: string): string => {
|
||||
// Couchbase username restrictions:
|
||||
// - Cannot contain: ) ( > < , ; : " \ / ] [ ? = } {
|
||||
// - Cannot begin with @ character
|
||||
|
||||
const forbiddenCharsPattern = new RE2('[\\)\\(><,;:"\\\\\\[\\]\\?=\\}\\{]', "g");
|
||||
let sanitized = forbiddenCharsPattern.replace(username, "-");
|
||||
|
||||
const leadingAtPattern = new RE2("^@+");
|
||||
sanitized = leadingAtPattern.replace(sanitized, "");
|
||||
|
||||
if (!sanitized || sanitized.length === 0) {
|
||||
return alphaNumericNanoId(12);
|
||||
}
|
||||
|
||||
return sanitized;
|
||||
};
|
||||
|
||||
/**
|
||||
* Normalizes bucket configuration to handle wildcard (*) access consistently.
|
||||
*
|
||||
* Key behaviors:
|
||||
* - If "*" appears anywhere (string or array), grants access to ALL buckets, scopes, and collections
|
||||
*
|
||||
* @param buckets - Either a string or array of bucket configurations
|
||||
* @returns Normalized bucket resources for Couchbase API
|
||||
*/
|
||||
const normalizeBucketConfiguration = (
|
||||
buckets:
|
||||
| string
|
||||
| Array<{
|
||||
name: string;
|
||||
scopes?: Array<{
|
||||
name: string;
|
||||
collections?: string[];
|
||||
}>;
|
||||
}>
|
||||
) => {
|
||||
if (typeof buckets === "string") {
|
||||
// Simple string format - either "*" or comma-separated bucket names
|
||||
const bucketNames = buckets
|
||||
.split(",")
|
||||
.map((bucket) => bucket.trim())
|
||||
.filter((bucket) => bucket.length > 0);
|
||||
|
||||
// If "*" is present anywhere, grant access to all buckets, scopes, and collections
|
||||
if (bucketNames.includes("*") || buckets === "*") {
|
||||
return [{ name: "*" }];
|
||||
}
|
||||
return bucketNames.map((bucketName) => ({ name: bucketName }));
|
||||
}
|
||||
|
||||
// Array of bucket objects with scopes and collections
|
||||
// Check if any bucket is "*" - if so, grant access to all buckets, scopes, and collections
|
||||
const hasWildcardBucket = buckets.some((bucket) => bucket.name === "*");
|
||||
|
||||
if (hasWildcardBucket) {
|
||||
return [{ name: "*" }];
|
||||
}
|
||||
|
||||
return buckets.map((bucket) => ({
|
||||
name: bucket.name,
|
||||
scopes: bucket.scopes?.map((scope) => ({
|
||||
name: scope.name,
|
||||
collections: scope.collections || []
|
||||
}))
|
||||
}));
|
||||
};
|
||||
|
||||
const generateUsername = (usernameTemplate?: string | null, identity?: { name: string }) => {
|
||||
const randomUsername = alphaNumericNanoId(12);
|
||||
if (!usernameTemplate) return sanitizeCouchbaseUsername(randomUsername);
|
||||
|
||||
const compiledUsername = compileUsernameTemplate({
|
||||
usernameTemplate,
|
||||
randomUsername,
|
||||
identity
|
||||
});
|
||||
|
||||
return sanitizeCouchbaseUsername(compiledUsername);
|
||||
};
|
||||
|
||||
const generatePassword = (requirements?: PasswordRequirements): string => {
|
||||
const {
|
||||
length = 12,
|
||||
required = { lowercase: 1, uppercase: 1, digits: 1, symbols: 1 },
|
||||
allowedSymbols = "!@#$%^()_+-=[]{}:,?/~`"
|
||||
} = requirements || {};
|
||||
|
||||
const lowercase = "abcdefghijklmnopqrstuvwxyz";
|
||||
const uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
|
||||
const digits = "0123456789";
|
||||
const symbols = allowedSymbols;
|
||||
|
||||
let password = "";
|
||||
let remaining = length;
|
||||
|
||||
// Add required characters
|
||||
for (let i = 0; i < required.lowercase; i += 1) {
|
||||
password += lowercase[crypto.randomInt(lowercase.length)];
|
||||
remaining -= 1;
|
||||
}
|
||||
for (let i = 0; i < required.uppercase; i += 1) {
|
||||
password += uppercase[crypto.randomInt(uppercase.length)];
|
||||
remaining -= 1;
|
||||
}
|
||||
for (let i = 0; i < required.digits; i += 1) {
|
||||
password += digits[crypto.randomInt(digits.length)];
|
||||
remaining -= 1;
|
||||
}
|
||||
for (let i = 0; i < required.symbols; i += 1) {
|
||||
password += symbols[crypto.randomInt(symbols.length)];
|
||||
remaining -= 1;
|
||||
}
|
||||
|
||||
// Fill remaining with random characters from all sets
|
||||
const allChars = lowercase + uppercase + digits + symbols;
|
||||
for (let i = 0; i < remaining; i += 1) {
|
||||
password += allChars[crypto.randomInt(allChars.length)];
|
||||
}
|
||||
|
||||
// Shuffle the password
|
||||
return password
|
||||
.split("")
|
||||
.sort(() => crypto.randomInt(3) - 1)
|
||||
.join("");
|
||||
};
|
||||
|
||||
const couchbaseApiRequest = async (
|
||||
method: string,
|
||||
url: string,
|
||||
apiKey: string,
|
||||
data?: unknown
|
||||
): Promise<CouchbaseUserResponse> => {
|
||||
await blockLocalAndPrivateIpAddresses(url);
|
||||
|
||||
try {
|
||||
const response = await axios({
|
||||
method: method.toLowerCase() as "get" | "post" | "put" | "delete",
|
||||
url,
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
data: data || undefined,
|
||||
timeout: 30000
|
||||
});
|
||||
|
||||
return response.data as CouchbaseUserResponse;
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [apiKey]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
export const CouchbaseProvider = (): TDynamicProviderFns => {
|
||||
const validateProviderInputs = async (inputs: object) => {
|
||||
const providerInputs = DynamicSecretCouchbaseSchema.parse(inputs);
|
||||
|
||||
await blockLocalAndPrivateIpAddresses(providerInputs.url);
|
||||
|
||||
return providerInputs;
|
||||
};
|
||||
|
||||
const validateConnection = async (inputs: unknown): Promise<boolean> => {
|
||||
try {
|
||||
const providerInputs = await validateProviderInputs(inputs as object);
|
||||
|
||||
// Test connection by trying to get organization info
|
||||
const url = `${providerInputs.url}/v4/organizations/${providerInputs.orgId}`;
|
||||
await couchbaseApiRequest("GET", url, providerInputs.auth.apiKey);
|
||||
|
||||
return true;
|
||||
} catch (error) {
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect to Couchbase: ${error instanceof Error ? error.message : "Unknown error"}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async ({
|
||||
inputs,
|
||||
usernameTemplate,
|
||||
identity
|
||||
}: {
|
||||
inputs: unknown;
|
||||
usernameTemplate?: string | null;
|
||||
identity?: { name: string };
|
||||
}) => {
|
||||
const providerInputs = await validateProviderInputs(inputs as object);
|
||||
|
||||
const username = generateUsername(usernameTemplate, identity);
|
||||
|
||||
const password = generatePassword(providerInputs.passwordRequirements);
|
||||
|
||||
const createUserUrl = `${providerInputs.url}/v4/organizations/${providerInputs.orgId}/projects/${providerInputs.projectId}/clusters/${providerInputs.clusterId}/users`;
|
||||
|
||||
const bucketResources = normalizeBucketConfiguration(providerInputs.buckets);
|
||||
|
||||
const userData: TCreateCouchbaseUser = {
|
||||
name: username,
|
||||
password,
|
||||
access: [
|
||||
{
|
||||
privileges: providerInputs.roles,
|
||||
resources: {
|
||||
buckets: bucketResources
|
||||
}
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
const response = await couchbaseApiRequest("POST", createUserUrl, providerInputs.auth.apiKey, userData);
|
||||
|
||||
const userUuid = response?.id || response?.uuid || username;
|
||||
|
||||
return {
|
||||
entityId: userUuid,
|
||||
data: {
|
||||
username,
|
||||
password
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, entityId: string) => {
|
||||
const providerInputs = await validateProviderInputs(inputs as object);
|
||||
|
||||
const deleteUserUrl = `${providerInputs.url}/v4/organizations/${providerInputs.orgId}/projects/${providerInputs.projectId}/clusters/${providerInputs.clusterId}/users/${encodeURIComponent(entityId)}`;
|
||||
|
||||
await couchbaseApiRequest("DELETE", deleteUserUrl, providerInputs.auth.apiKey);
|
||||
|
||||
return { entityId };
|
||||
};
|
||||
|
||||
const renew = async (_inputs: unknown, entityId: string) => {
|
||||
// Couchbase Cloud API doesn't support renewing user credentials
|
||||
// The user remains valid until explicitly deleted
|
||||
return { entityId };
|
||||
};
|
||||
|
||||
return {
|
||||
validateProviderInputs,
|
||||
validateConnection,
|
||||
create,
|
||||
revoke,
|
||||
renew
|
||||
};
|
||||
};
|
@@ -2,6 +2,8 @@ import { Client as ElasticSearchClient } from "@elastic/elasticsearch";
|
||||
import { customAlphabet } from "nanoid";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
|
||||
import { verifyHostInputValidity } from "../dynamic-secret-fns";
|
||||
@@ -63,12 +65,24 @@ export const ElasticSearchProvider = (): TDynamicProviderFns => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const connection = await $getClient(providerInputs);
|
||||
|
||||
const infoResponse = await connection
|
||||
.info()
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
|
||||
return infoResponse;
|
||||
try {
|
||||
const infoResponse = await connection.info().then(() => true);
|
||||
return infoResponse;
|
||||
} catch (err) {
|
||||
const tokens = [];
|
||||
if (providerInputs.auth.type === ElasticSearchAuthTypes.ApiKey) {
|
||||
tokens.push(providerInputs.auth.apiKey, providerInputs.auth.apiKeyId);
|
||||
} else {
|
||||
tokens.push(providerInputs.auth.username, providerInputs.auth.password);
|
||||
}
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (data: { inputs: unknown; usernameTemplate?: string | null; identity?: { name: string } }) => {
|
||||
@@ -79,27 +93,49 @@ export const ElasticSearchProvider = (): TDynamicProviderFns => {
|
||||
const username = generateUsername(usernameTemplate, identity);
|
||||
const password = generatePassword();
|
||||
|
||||
await connection.security.putUser({
|
||||
username,
|
||||
password,
|
||||
full_name: "Managed by Infisical.com",
|
||||
roles: providerInputs.roles
|
||||
});
|
||||
try {
|
||||
await connection.security.putUser({
|
||||
username,
|
||||
password,
|
||||
full_name: "Managed by Infisical.com",
|
||||
roles: providerInputs.roles
|
||||
});
|
||||
|
||||
await connection.close();
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
await connection.close();
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, password]
|
||||
});
|
||||
await connection.close();
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, entityId: string) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const connection = await $getClient(providerInputs);
|
||||
|
||||
await connection.security.deleteUser({
|
||||
username: entityId
|
||||
});
|
||||
try {
|
||||
await connection.security.deleteUser({
|
||||
username: entityId
|
||||
});
|
||||
|
||||
await connection.close();
|
||||
return { entityId };
|
||||
await connection.close();
|
||||
return { entityId };
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [entityId]
|
||||
});
|
||||
await connection.close();
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const renew = async (_inputs: unknown, entityId: string) => {
|
||||
|
@@ -3,6 +3,7 @@ import { GetAccessTokenResponse } from "google-auth-library/build/src/auth/oauth
|
||||
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { BadRequestError, InternalServerError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
|
||||
import { DynamicSecretGcpIamSchema, TDynamicProviderFns } from "./models";
|
||||
@@ -65,8 +66,18 @@ export const GcpIamProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
await $getToken(providerInputs.serviceAccountEmail, 10);
|
||||
return true;
|
||||
try {
|
||||
await $getToken(providerInputs.serviceAccountEmail, 10);
|
||||
return true;
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.serviceAccountEmail]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (data: { inputs: unknown; expireAt: number }) => {
|
||||
@@ -74,13 +85,23 @@ export const GcpIamProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
const ttl = Math.max(Math.floor(expireAt / 1000) - now, 0);
|
||||
try {
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
const ttl = Math.max(Math.floor(expireAt / 1000) - now, 0);
|
||||
|
||||
const token = await $getToken(providerInputs.serviceAccountEmail, ttl);
|
||||
const entityId = alphaNumericNanoId(32);
|
||||
const token = await $getToken(providerInputs.serviceAccountEmail, ttl);
|
||||
const entityId = alphaNumericNanoId(32);
|
||||
|
||||
return { entityId, data: { SERVICE_ACCOUNT_EMAIL: providerInputs.serviceAccountEmail, TOKEN: token } };
|
||||
return { entityId, data: { SERVICE_ACCOUNT_EMAIL: providerInputs.serviceAccountEmail, TOKEN: token } };
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.serviceAccountEmail]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const revoke = async (_inputs: unknown, entityId: string) => {
|
||||
@@ -89,10 +110,21 @@ export const GcpIamProvider = (): TDynamicProviderFns => {
|
||||
};
|
||||
|
||||
const renew = async (inputs: unknown, entityId: string, expireAt: number) => {
|
||||
// To renew a token it must be re-created
|
||||
const data = await create({ inputs, expireAt });
|
||||
try {
|
||||
// To renew a token it must be re-created
|
||||
const data = await create({ inputs, expireAt });
|
||||
|
||||
return { ...data, entityId };
|
||||
return { ...data, entityId };
|
||||
} catch (err) {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.serviceAccountEmail]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to renew lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
|
@@ -3,6 +3,7 @@ import jwt from "jsonwebtoken";
|
||||
|
||||
import { crypto } from "@app/lib/crypto";
|
||||
import { BadRequestError, InternalServerError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { IntegrationUrls } from "@app/services/integration-auth/integration-list";
|
||||
|
||||
@@ -89,26 +90,46 @@ export const GithubProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
await $generateGitHubInstallationAccessToken(providerInputs);
|
||||
return true;
|
||||
try {
|
||||
await $generateGitHubInstallationAccessToken(providerInputs);
|
||||
return true;
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.privateKey, String(providerInputs.appId), String(providerInputs.installationId)]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (data: { inputs: unknown }) => {
|
||||
const { inputs } = data;
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
|
||||
const ghTokenData = await $generateGitHubInstallationAccessToken(providerInputs);
|
||||
const entityId = alphaNumericNanoId(32);
|
||||
try {
|
||||
const ghTokenData = await $generateGitHubInstallationAccessToken(providerInputs);
|
||||
const entityId = alphaNumericNanoId(32);
|
||||
|
||||
return {
|
||||
entityId,
|
||||
data: {
|
||||
TOKEN: ghTokenData.token,
|
||||
EXPIRES_AT: ghTokenData.expires_at,
|
||||
PERMISSIONS: ghTokenData.permissions,
|
||||
REPOSITORY_SELECTION: ghTokenData.repository_selection
|
||||
}
|
||||
};
|
||||
return {
|
||||
entityId,
|
||||
data: {
|
||||
TOKEN: ghTokenData.token,
|
||||
EXPIRES_AT: ghTokenData.expires_at,
|
||||
PERMISSIONS: ghTokenData.permissions,
|
||||
REPOSITORY_SELECTION: ghTokenData.repository_selection
|
||||
}
|
||||
};
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.privateKey, String(providerInputs.appId), String(providerInputs.installationId)]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const revoke = async () => {
|
||||
|
@@ -5,6 +5,7 @@ import { AwsElastiCacheDatabaseProvider } from "./aws-elasticache";
|
||||
import { AwsIamProvider } from "./aws-iam";
|
||||
import { AzureEntraIDProvider } from "./azure-entra-id";
|
||||
import { CassandraProvider } from "./cassandra";
|
||||
import { CouchbaseProvider } from "./couchbase";
|
||||
import { ElasticSearchProvider } from "./elastic-search";
|
||||
import { GcpIamProvider } from "./gcp-iam";
|
||||
import { GithubProvider } from "./github";
|
||||
@@ -46,5 +47,6 @@ export const buildDynamicSecretProviders = ({
|
||||
[DynamicSecretProviders.Kubernetes]: KubernetesProvider({ gatewayService }),
|
||||
[DynamicSecretProviders.Vertica]: VerticaProvider({ gatewayService }),
|
||||
[DynamicSecretProviders.GcpIam]: GcpIamProvider(),
|
||||
[DynamicSecretProviders.Github]: GithubProvider()
|
||||
[DynamicSecretProviders.Github]: GithubProvider(),
|
||||
[DynamicSecretProviders.Couchbase]: CouchbaseProvider()
|
||||
});
|
||||
|
@@ -2,7 +2,8 @@ import axios, { AxiosError } from "axios";
|
||||
import handlebars from "handlebars";
|
||||
import https from "https";
|
||||
|
||||
import { BadRequestError, InternalServerError } from "@app/lib/errors";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { GatewayHttpProxyActions, GatewayProxyProtocol, withGatewayProxy } from "@app/lib/gateway";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { blockLocalAndPrivateIpAddresses } from "@app/lib/validator";
|
||||
@@ -356,8 +357,12 @@ export const KubernetesProvider = ({ gatewayService }: TKubernetesProviderDTO):
|
||||
errorMessage = (error.response?.data as { message: string }).message;
|
||||
}
|
||||
|
||||
throw new InternalServerError({
|
||||
message: `Failed to validate connection: ${errorMessage}`
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: errorMessage,
|
||||
tokens: [providerInputs.clusterToken || ""]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
@@ -602,8 +607,12 @@ export const KubernetesProvider = ({ gatewayService }: TKubernetesProviderDTO):
|
||||
errorMessage = (error.response?.data as { message: string }).message;
|
||||
}
|
||||
|
||||
throw new InternalServerError({
|
||||
message: `Failed to create dynamic secret: ${errorMessage}`
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: errorMessage,
|
||||
tokens: [providerInputs.clusterToken || ""]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
@@ -683,50 +692,65 @@ export const KubernetesProvider = ({ gatewayService }: TKubernetesProviderDTO):
|
||||
};
|
||||
|
||||
if (providerInputs.credentialType === KubernetesCredentialType.Dynamic) {
|
||||
const rawUrl =
|
||||
providerInputs.authMethod === KubernetesAuthMethod.Gateway
|
||||
? GATEWAY_AUTH_DEFAULT_URL
|
||||
: providerInputs.url || "";
|
||||
try {
|
||||
const rawUrl =
|
||||
providerInputs.authMethod === KubernetesAuthMethod.Gateway
|
||||
? GATEWAY_AUTH_DEFAULT_URL
|
||||
: providerInputs.url || "";
|
||||
|
||||
const url = new URL(rawUrl);
|
||||
const k8sGatewayHost = url.hostname;
|
||||
const k8sPort = url.port ? Number(url.port) : 443;
|
||||
const k8sHost = `${url.protocol}//${url.hostname}`;
|
||||
const url = new URL(rawUrl);
|
||||
const k8sGatewayHost = url.hostname;
|
||||
const k8sPort = url.port ? Number(url.port) : 443;
|
||||
const k8sHost = `${url.protocol}//${url.hostname}`;
|
||||
|
||||
const httpsAgent =
|
||||
providerInputs.ca && providerInputs.sslEnabled
|
||||
? new https.Agent({
|
||||
ca: providerInputs.ca,
|
||||
rejectUnauthorized: true
|
||||
})
|
||||
: undefined;
|
||||
const httpsAgent =
|
||||
providerInputs.ca && providerInputs.sslEnabled
|
||||
? new https.Agent({
|
||||
ca: providerInputs.ca,
|
||||
rejectUnauthorized: true
|
||||
})
|
||||
: undefined;
|
||||
|
||||
if (providerInputs.gatewayId) {
|
||||
if (providerInputs.authMethod === KubernetesAuthMethod.Gateway) {
|
||||
await $gatewayProxyWrapper(
|
||||
{
|
||||
gatewayId: providerInputs.gatewayId,
|
||||
targetHost: k8sHost,
|
||||
targetPort: k8sPort,
|
||||
httpsAgent,
|
||||
reviewTokenThroughGateway: true
|
||||
},
|
||||
serviceAccountDynamicCallback
|
||||
);
|
||||
if (providerInputs.gatewayId) {
|
||||
if (providerInputs.authMethod === KubernetesAuthMethod.Gateway) {
|
||||
await $gatewayProxyWrapper(
|
||||
{
|
||||
gatewayId: providerInputs.gatewayId,
|
||||
targetHost: k8sHost,
|
||||
targetPort: k8sPort,
|
||||
httpsAgent,
|
||||
reviewTokenThroughGateway: true
|
||||
},
|
||||
serviceAccountDynamicCallback
|
||||
);
|
||||
} else {
|
||||
await $gatewayProxyWrapper(
|
||||
{
|
||||
gatewayId: providerInputs.gatewayId,
|
||||
targetHost: k8sGatewayHost,
|
||||
targetPort: k8sPort,
|
||||
httpsAgent,
|
||||
reviewTokenThroughGateway: false
|
||||
},
|
||||
serviceAccountDynamicCallback
|
||||
);
|
||||
}
|
||||
} else {
|
||||
await $gatewayProxyWrapper(
|
||||
{
|
||||
gatewayId: providerInputs.gatewayId,
|
||||
targetHost: k8sGatewayHost,
|
||||
targetPort: k8sPort,
|
||||
httpsAgent,
|
||||
reviewTokenThroughGateway: false
|
||||
},
|
||||
serviceAccountDynamicCallback
|
||||
);
|
||||
await serviceAccountDynamicCallback(k8sHost, k8sPort, httpsAgent);
|
||||
}
|
||||
} else {
|
||||
await serviceAccountDynamicCallback(k8sHost, k8sPort, httpsAgent);
|
||||
} catch (error) {
|
||||
let errorMessage = error instanceof Error ? error.message : "Unknown error";
|
||||
if (axios.isAxiosError(error) && (error.response?.data as { message: string })?.message) {
|
||||
errorMessage = (error.response?.data as { message: string }).message;
|
||||
}
|
||||
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: errorMessage,
|
||||
tokens: [entityId, providerInputs.clusterToken || ""]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -6,6 +6,7 @@ import RE2 from "re2";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
|
||||
import { LdapCredentialType, LdapSchema, TDynamicProviderFns } from "./models";
|
||||
@@ -91,8 +92,18 @@ export const LdapProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const client = await $getClient(providerInputs);
|
||||
return client.connected;
|
||||
try {
|
||||
const client = await $getClient(providerInputs);
|
||||
return client.connected;
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.bindpass, providerInputs.binddn]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const executeLdif = async (client: ldapjs.Client, ldif_file: string) => {
|
||||
@@ -205,11 +216,11 @@ export const LdapProvider = (): TDynamicProviderFns => {
|
||||
if (providerInputs.credentialType === LdapCredentialType.Static) {
|
||||
const dnRegex = new RE2("^dn:\\s*(.+)", "m");
|
||||
const dnMatch = dnRegex.exec(providerInputs.rotationLdif);
|
||||
const username = dnMatch?.[1];
|
||||
if (!username) throw new BadRequestError({ message: "Username not found from Ldif" });
|
||||
const password = generatePassword();
|
||||
|
||||
if (dnMatch) {
|
||||
const username = dnMatch[1];
|
||||
const password = generatePassword();
|
||||
|
||||
const generatedLdif = generateLDIF({ username, password, ldifTemplate: providerInputs.rotationLdif });
|
||||
|
||||
try {
|
||||
@@ -217,7 +228,11 @@ export const LdapProvider = (): TDynamicProviderFns => {
|
||||
|
||||
return { entityId: username, data: { DN_ARRAY: dnArray, USERNAME: username, PASSWORD: password } };
|
||||
} catch (err) {
|
||||
throw new BadRequestError({ message: (err as Error).message });
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, password, providerInputs.binddn, providerInputs.bindpass]
|
||||
});
|
||||
throw new BadRequestError({ message: sanitizedErrorMessage });
|
||||
}
|
||||
} else {
|
||||
throw new BadRequestError({
|
||||
@@ -238,7 +253,11 @@ export const LdapProvider = (): TDynamicProviderFns => {
|
||||
const rollbackLdif = generateLDIF({ username, password, ldifTemplate: providerInputs.rollbackLdif });
|
||||
await executeLdif(client, rollbackLdif);
|
||||
}
|
||||
throw new BadRequestError({ message: (err as Error).message });
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, password, providerInputs.binddn, providerInputs.bindpass]
|
||||
});
|
||||
throw new BadRequestError({ message: sanitizedErrorMessage });
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -262,7 +281,11 @@ export const LdapProvider = (): TDynamicProviderFns => {
|
||||
|
||||
return { entityId: username, data: { DN_ARRAY: dnArray, USERNAME: username, PASSWORD: password } };
|
||||
} catch (err) {
|
||||
throw new BadRequestError({ message: (err as Error).message });
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, password, providerInputs.binddn, providerInputs.bindpass]
|
||||
});
|
||||
throw new BadRequestError({ message: sanitizedErrorMessage });
|
||||
}
|
||||
} else {
|
||||
throw new BadRequestError({
|
||||
@@ -278,7 +301,7 @@ export const LdapProvider = (): TDynamicProviderFns => {
|
||||
return { entityId };
|
||||
};
|
||||
|
||||
const renew = async (inputs: unknown, entityId: string) => {
|
||||
const renew = async (_inputs: unknown, entityId: string) => {
|
||||
// No renewal necessary
|
||||
return { entityId };
|
||||
};
|
||||
|
@@ -505,6 +505,91 @@ export const DynamicSecretGithubSchema = z.object({
|
||||
.describe("The private key generated for your GitHub App.")
|
||||
});
|
||||
|
||||
export const DynamicSecretCouchbaseSchema = z.object({
|
||||
url: z.string().url().trim().min(1).describe("Couchbase Cloud API URL"),
|
||||
orgId: z.string().trim().min(1).describe("Organization ID"),
|
||||
projectId: z.string().trim().min(1).describe("Project ID"),
|
||||
clusterId: z.string().trim().min(1).describe("Cluster ID"),
|
||||
roles: z.array(z.string().trim().min(1)).min(1).describe("Roles to assign to the user"),
|
||||
buckets: z
|
||||
.union([
|
||||
z
|
||||
.string()
|
||||
.trim()
|
||||
.min(1)
|
||||
.default("*")
|
||||
.refine((val) => {
|
||||
if (val.includes(",")) {
|
||||
const buckets = val
|
||||
.split(",")
|
||||
.map((b) => b.trim())
|
||||
.filter((b) => b.length > 0);
|
||||
if (buckets.includes("*") && buckets.length > 1) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}, "Cannot combine '*' with other bucket names"),
|
||||
z
|
||||
.array(
|
||||
z.object({
|
||||
name: z.string().trim().min(1).describe("Bucket name"),
|
||||
scopes: z
|
||||
.array(
|
||||
z.object({
|
||||
name: z.string().trim().min(1).describe("Scope name"),
|
||||
collections: z.array(z.string().trim().min(1)).optional().describe("Collection names")
|
||||
})
|
||||
)
|
||||
.optional()
|
||||
.describe("Scopes within the bucket")
|
||||
})
|
||||
)
|
||||
.refine((buckets) => {
|
||||
const hasWildcard = buckets.some((bucket) => bucket.name === "*");
|
||||
if (hasWildcard && buckets.length > 1) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}, "Cannot combine '*' bucket with other buckets")
|
||||
])
|
||||
.default("*")
|
||||
.describe(
|
||||
"Bucket configuration: '*' for all buckets, scopes, and collections or array of bucket objects with specific scopes and collections"
|
||||
),
|
||||
passwordRequirements: z
|
||||
.object({
|
||||
length: z.number().min(8, "Password must be at least 8 characters").max(128),
|
||||
required: z
|
||||
.object({
|
||||
lowercase: z.number().min(1, "At least 1 lowercase character required"),
|
||||
uppercase: z.number().min(1, "At least 1 uppercase character required"),
|
||||
digits: z.number().min(1, "At least 1 digit required"),
|
||||
symbols: z.number().min(1, "At least 1 special character required")
|
||||
})
|
||||
.refine((data) => {
|
||||
const total = Object.values(data).reduce((sum, count) => sum + count, 0);
|
||||
return total <= 128;
|
||||
}, "Sum of required characters cannot exceed 128"),
|
||||
allowedSymbols: z
|
||||
.string()
|
||||
.refine((symbols) => {
|
||||
const forbiddenChars = ["<", ">", ";", ".", "*", "&", "|", "£"];
|
||||
return !forbiddenChars.some((char) => symbols?.includes(char));
|
||||
}, "Cannot contain: < > ; . * & | £")
|
||||
.optional()
|
||||
})
|
||||
.refine((data) => {
|
||||
const total = Object.values(data.required).reduce((sum, count) => sum + count, 0);
|
||||
return total <= data.length;
|
||||
}, "Sum of required characters cannot exceed the total length")
|
||||
.optional()
|
||||
.describe("Password generation requirements for Couchbase"),
|
||||
auth: z.object({
|
||||
apiKey: z.string().trim().min(1).describe("Couchbase Cloud API Key")
|
||||
})
|
||||
});
|
||||
|
||||
export enum DynamicSecretProviders {
|
||||
SqlDatabase = "sql-database",
|
||||
Cassandra = "cassandra",
|
||||
@@ -524,7 +609,8 @@ export enum DynamicSecretProviders {
|
||||
Kubernetes = "kubernetes",
|
||||
Vertica = "vertica",
|
||||
GcpIam = "gcp-iam",
|
||||
Github = "github"
|
||||
Github = "github",
|
||||
Couchbase = "couchbase"
|
||||
}
|
||||
|
||||
export const DynamicSecretProviderSchema = z.discriminatedUnion("type", [
|
||||
@@ -546,7 +632,8 @@ export const DynamicSecretProviderSchema = z.discriminatedUnion("type", [
|
||||
z.object({ type: z.literal(DynamicSecretProviders.Kubernetes), inputs: DynamicSecretKubernetesSchema }),
|
||||
z.object({ type: z.literal(DynamicSecretProviders.Vertica), inputs: DynamicSecretVerticaSchema }),
|
||||
z.object({ type: z.literal(DynamicSecretProviders.GcpIam), inputs: DynamicSecretGcpIamSchema }),
|
||||
z.object({ type: z.literal(DynamicSecretProviders.Github), inputs: DynamicSecretGithubSchema })
|
||||
z.object({ type: z.literal(DynamicSecretProviders.Github), inputs: DynamicSecretGithubSchema }),
|
||||
z.object({ type: z.literal(DynamicSecretProviders.Couchbase), inputs: DynamicSecretCouchbaseSchema })
|
||||
]);
|
||||
|
||||
export type TDynamicProviderFns = {
|
||||
|
@@ -3,6 +3,8 @@ import { customAlphabet } from "nanoid";
|
||||
import { z } from "zod";
|
||||
|
||||
import { createDigestAuthRequestInterceptor } from "@app/lib/axios/digest-auth";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
|
||||
import { DynamicSecretMongoAtlasSchema, TDynamicProviderFns } from "./models";
|
||||
@@ -49,19 +51,25 @@ export const MongoAtlasProvider = (): TDynamicProviderFns => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const client = await $getClient(providerInputs);
|
||||
|
||||
const isConnected = await client({
|
||||
method: "GET",
|
||||
url: `v2/groups/${providerInputs.groupId}/databaseUsers`,
|
||||
params: { itemsPerPage: 1 }
|
||||
})
|
||||
.then(() => true)
|
||||
.catch((error) => {
|
||||
if ((error as AxiosError).response) {
|
||||
throw new Error(JSON.stringify((error as AxiosError).response?.data));
|
||||
}
|
||||
throw error;
|
||||
try {
|
||||
const isConnected = await client({
|
||||
method: "GET",
|
||||
url: `v2/groups/${providerInputs.groupId}/databaseUsers`,
|
||||
params: { itemsPerPage: 1 }
|
||||
}).then(() => true);
|
||||
return isConnected;
|
||||
} catch (error) {
|
||||
const errorMessage = (error as AxiosError).response
|
||||
? JSON.stringify((error as AxiosError).response?.data)
|
||||
: (error as Error)?.message;
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: errorMessage,
|
||||
tokens: [providerInputs.adminPublicKey, providerInputs.adminPrivateKey, providerInputs.groupId]
|
||||
});
|
||||
return isConnected;
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (data: {
|
||||
@@ -77,25 +85,39 @@ export const MongoAtlasProvider = (): TDynamicProviderFns => {
|
||||
const username = generateUsername(usernameTemplate, identity);
|
||||
const password = generatePassword();
|
||||
const expiration = new Date(expireAt).toISOString();
|
||||
await client({
|
||||
method: "POST",
|
||||
url: `/v2/groups/${providerInputs.groupId}/databaseUsers`,
|
||||
data: {
|
||||
roles: providerInputs.roles,
|
||||
scopes: providerInputs.scopes,
|
||||
deleteAfterDate: expiration,
|
||||
username,
|
||||
password,
|
||||
databaseName: "admin",
|
||||
groupId: providerInputs.groupId
|
||||
}
|
||||
}).catch((error) => {
|
||||
if ((error as AxiosError).response) {
|
||||
throw new Error(JSON.stringify((error as AxiosError).response?.data));
|
||||
}
|
||||
throw error;
|
||||
});
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
try {
|
||||
await client({
|
||||
method: "POST",
|
||||
url: `/v2/groups/${providerInputs.groupId}/databaseUsers`,
|
||||
data: {
|
||||
roles: providerInputs.roles,
|
||||
scopes: providerInputs.scopes,
|
||||
deleteAfterDate: expiration,
|
||||
username,
|
||||
password,
|
||||
databaseName: "admin",
|
||||
groupId: providerInputs.groupId
|
||||
}
|
||||
});
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
} catch (error) {
|
||||
const errorMessage = (error as AxiosError).response
|
||||
? JSON.stringify((error as AxiosError).response?.data)
|
||||
: (error as Error)?.message;
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: errorMessage,
|
||||
tokens: [
|
||||
username,
|
||||
password,
|
||||
providerInputs.adminPublicKey,
|
||||
providerInputs.adminPrivateKey,
|
||||
providerInputs.groupId
|
||||
]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, entityId: string) => {
|
||||
@@ -111,15 +133,23 @@ export const MongoAtlasProvider = (): TDynamicProviderFns => {
|
||||
throw err;
|
||||
});
|
||||
if (isExisting) {
|
||||
await client({
|
||||
method: "DELETE",
|
||||
url: `/v2/groups/${providerInputs.groupId}/databaseUsers/admin/${username}`
|
||||
}).catch((error) => {
|
||||
if ((error as AxiosError).response) {
|
||||
throw new Error(JSON.stringify((error as AxiosError).response?.data));
|
||||
}
|
||||
throw error;
|
||||
});
|
||||
try {
|
||||
await client({
|
||||
method: "DELETE",
|
||||
url: `/v2/groups/${providerInputs.groupId}/databaseUsers/admin/${username}`
|
||||
});
|
||||
} catch (error) {
|
||||
const errorMessage = (error as AxiosError).response
|
||||
? JSON.stringify((error as AxiosError).response?.data)
|
||||
: (error as Error)?.message;
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: errorMessage,
|
||||
tokens: [username, providerInputs.adminPublicKey, providerInputs.adminPrivateKey, providerInputs.groupId]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return { entityId: username };
|
||||
@@ -132,21 +162,29 @@ export const MongoAtlasProvider = (): TDynamicProviderFns => {
|
||||
const username = entityId;
|
||||
const expiration = new Date(expireAt).toISOString();
|
||||
|
||||
await client({
|
||||
method: "PATCH",
|
||||
url: `/v2/groups/${providerInputs.groupId}/databaseUsers/admin/${username}`,
|
||||
data: {
|
||||
deleteAfterDate: expiration,
|
||||
databaseName: "admin",
|
||||
groupId: providerInputs.groupId
|
||||
}
|
||||
}).catch((error) => {
|
||||
if ((error as AxiosError).response) {
|
||||
throw new Error(JSON.stringify((error as AxiosError).response?.data));
|
||||
}
|
||||
throw error;
|
||||
});
|
||||
return { entityId: username };
|
||||
try {
|
||||
await client({
|
||||
method: "PATCH",
|
||||
url: `/v2/groups/${providerInputs.groupId}/databaseUsers/admin/${username}`,
|
||||
data: {
|
||||
deleteAfterDate: expiration,
|
||||
databaseName: "admin",
|
||||
groupId: providerInputs.groupId
|
||||
}
|
||||
});
|
||||
return { entityId: username };
|
||||
} catch (error) {
|
||||
const errorMessage = (error as AxiosError).response
|
||||
? JSON.stringify((error as AxiosError).response?.data)
|
||||
: (error as Error)?.message;
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: errorMessage,
|
||||
tokens: [username, providerInputs.adminPublicKey, providerInputs.adminPrivateKey, providerInputs.groupId]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to renew lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
|
@@ -2,6 +2,8 @@ import { MongoClient } from "mongodb";
|
||||
import { customAlphabet } from "nanoid";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
|
||||
import { verifyHostInputValidity } from "../dynamic-secret-fns";
|
||||
@@ -51,13 +53,24 @@ export const MongoDBProvider = (): TDynamicProviderFns => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const client = await $getClient(providerInputs);
|
||||
|
||||
const isConnected = await client
|
||||
.db(providerInputs.database)
|
||||
.command({ ping: 1 })
|
||||
.then(() => true);
|
||||
try {
|
||||
const isConnected = await client
|
||||
.db(providerInputs.database)
|
||||
.command({ ping: 1 })
|
||||
.then(() => true);
|
||||
|
||||
await client.close();
|
||||
return isConnected;
|
||||
await client.close();
|
||||
return isConnected;
|
||||
} catch (err) {
|
||||
await client.close();
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.password, providerInputs.username, providerInputs.database, providerInputs.host]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (data: { inputs: unknown; usernameTemplate?: string | null; identity?: { name: string } }) => {
|
||||
@@ -68,16 +81,27 @@ export const MongoDBProvider = (): TDynamicProviderFns => {
|
||||
const username = generateUsername(usernameTemplate, identity);
|
||||
const password = generatePassword();
|
||||
|
||||
const db = client.db(providerInputs.database);
|
||||
try {
|
||||
const db = client.db(providerInputs.database);
|
||||
|
||||
await db.command({
|
||||
createUser: username,
|
||||
pwd: password,
|
||||
roles: providerInputs.roles
|
||||
});
|
||||
await client.close();
|
||||
await db.command({
|
||||
createUser: username,
|
||||
pwd: password,
|
||||
roles: providerInputs.roles
|
||||
});
|
||||
await client.close();
|
||||
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
} catch (err) {
|
||||
await client.close();
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, password, providerInputs.password, providerInputs.username, providerInputs.database]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, entityId: string) => {
|
||||
@@ -86,13 +110,24 @@ export const MongoDBProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const username = entityId;
|
||||
|
||||
const db = client.db(providerInputs.database);
|
||||
await db.command({
|
||||
dropUser: username
|
||||
});
|
||||
await client.close();
|
||||
try {
|
||||
const db = client.db(providerInputs.database);
|
||||
await db.command({
|
||||
dropUser: username
|
||||
});
|
||||
await client.close();
|
||||
|
||||
return { entityId: username };
|
||||
return { entityId: username };
|
||||
} catch (err) {
|
||||
await client.close();
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, providerInputs.password, providerInputs.username, providerInputs.database]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const renew = async (_inputs: unknown, entityId: string) => {
|
||||
|
@@ -3,6 +3,8 @@ import https from "https";
|
||||
import { customAlphabet } from "nanoid";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { logger } from "@app/lib/logger";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
|
||||
@@ -110,11 +112,19 @@ export const RabbitMqProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const connection = await $getClient(providerInputs);
|
||||
|
||||
const infoResponse = await connection.get("/whoami").then(() => true);
|
||||
|
||||
return infoResponse;
|
||||
try {
|
||||
const connection = await $getClient(providerInputs);
|
||||
const infoResponse = await connection.get("/whoami").then(() => true);
|
||||
return infoResponse;
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.password, providerInputs.username, providerInputs.host]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (data: { inputs: unknown; usernameTemplate?: string | null; identity?: { name: string } }) => {
|
||||
@@ -125,26 +135,44 @@ export const RabbitMqProvider = (): TDynamicProviderFns => {
|
||||
const username = generateUsername(usernameTemplate, identity);
|
||||
const password = generatePassword();
|
||||
|
||||
await createRabbitMqUser({
|
||||
axiosInstance: connection,
|
||||
virtualHost: providerInputs.virtualHost,
|
||||
createUser: {
|
||||
password,
|
||||
username,
|
||||
tags: [...(providerInputs.tags ?? []), "infisical-user"]
|
||||
}
|
||||
});
|
||||
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
try {
|
||||
await createRabbitMqUser({
|
||||
axiosInstance: connection,
|
||||
virtualHost: providerInputs.virtualHost,
|
||||
createUser: {
|
||||
password,
|
||||
username,
|
||||
tags: [...(providerInputs.tags ?? []), "infisical-user"]
|
||||
}
|
||||
});
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, password, providerInputs.password, providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, entityId: string) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const connection = await $getClient(providerInputs);
|
||||
|
||||
await deleteRabbitMqUser({ axiosInstance: connection, usernameToDelete: entityId });
|
||||
|
||||
return { entityId };
|
||||
try {
|
||||
await deleteRabbitMqUser({ axiosInstance: connection, usernameToDelete: entityId });
|
||||
return { entityId };
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [entityId, providerInputs.password, providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const renew = async (_inputs: unknown, entityId: string) => {
|
||||
|
@@ -4,6 +4,7 @@ import { customAlphabet } from "nanoid";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
@@ -112,14 +113,27 @@ export const RedisDatabaseProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const connection = await $getClient(providerInputs);
|
||||
|
||||
const pingResponse = await connection
|
||||
.ping()
|
||||
.then(() => true)
|
||||
.catch(() => false);
|
||||
|
||||
return pingResponse;
|
||||
let connection;
|
||||
try {
|
||||
connection = await $getClient(providerInputs);
|
||||
const pingResponse = await connection.ping().then(() => true);
|
||||
await connection.quit();
|
||||
return pingResponse;
|
||||
} catch (err) {
|
||||
if (connection) await connection.quit();
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [
|
||||
providerInputs.password || "",
|
||||
providerInputs.username,
|
||||
providerInputs.host,
|
||||
String(providerInputs.port)
|
||||
]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (data: {
|
||||
@@ -144,10 +158,20 @@ export const RedisDatabaseProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const queries = creationStatement.toString().split(";").filter(Boolean);
|
||||
|
||||
await executeTransactions(connection, queries);
|
||||
|
||||
await connection.quit();
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
try {
|
||||
await executeTransactions(connection, queries);
|
||||
await connection.quit();
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
} catch (err) {
|
||||
await connection.quit();
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, password, providerInputs.password || "", providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, entityId: string) => {
|
||||
@@ -159,10 +183,20 @@ export const RedisDatabaseProvider = (): TDynamicProviderFns => {
|
||||
const revokeStatement = handlebars.compile(providerInputs.revocationStatement)({ username });
|
||||
const queries = revokeStatement.toString().split(";").filter(Boolean);
|
||||
|
||||
await executeTransactions(connection, queries);
|
||||
|
||||
await connection.quit();
|
||||
return { entityId: username };
|
||||
try {
|
||||
await executeTransactions(connection, queries);
|
||||
await connection.quit();
|
||||
return { entityId: username };
|
||||
} catch (err) {
|
||||
await connection.quit();
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, providerInputs.password || "", providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const renew = async (inputs: unknown, entityId: string, expireAt: number) => {
|
||||
@@ -176,13 +210,23 @@ export const RedisDatabaseProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const renewStatement = handlebars.compile(providerInputs.renewStatement)({ username, expiration });
|
||||
|
||||
if (renewStatement) {
|
||||
const queries = renewStatement.toString().split(";").filter(Boolean);
|
||||
await executeTransactions(connection, queries);
|
||||
try {
|
||||
if (renewStatement) {
|
||||
const queries = renewStatement.toString().split(";").filter(Boolean);
|
||||
await executeTransactions(connection, queries);
|
||||
}
|
||||
await connection.quit();
|
||||
return { entityId: username };
|
||||
} catch (err) {
|
||||
await connection.quit();
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, providerInputs.password || "", providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to renew lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
|
||||
await connection.quit();
|
||||
return { entityId: username };
|
||||
};
|
||||
|
||||
return {
|
||||
|
@@ -4,6 +4,7 @@ import odbc from "odbc";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
@@ -67,25 +68,41 @@ export const SapAseProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const masterClient = await $getClient(providerInputs, true);
|
||||
const client = await $getClient(providerInputs);
|
||||
let masterClient;
|
||||
let client;
|
||||
try {
|
||||
masterClient = await $getClient(providerInputs, true);
|
||||
client = await $getClient(providerInputs);
|
||||
|
||||
const [resultFromMasterDatabase] = await masterClient.query<{ version: string }>("SELECT @@VERSION AS version");
|
||||
const [resultFromSelectedDatabase] = await client.query<{ version: string }>("SELECT @@VERSION AS version");
|
||||
const [resultFromMasterDatabase] = await masterClient.query<{ version: string }>("SELECT @@VERSION AS version");
|
||||
const [resultFromSelectedDatabase] = await client.query<{ version: string }>("SELECT @@VERSION AS version");
|
||||
|
||||
if (!resultFromSelectedDatabase.version) {
|
||||
if (!resultFromSelectedDatabase.version) {
|
||||
throw new BadRequestError({
|
||||
message: "Failed to validate SAP ASE connection, version query failed"
|
||||
});
|
||||
}
|
||||
|
||||
if (resultFromMasterDatabase.version !== resultFromSelectedDatabase.version) {
|
||||
throw new BadRequestError({
|
||||
message: "Failed to validate SAP ASE connection (master), version mismatch"
|
||||
});
|
||||
}
|
||||
|
||||
await masterClient.close();
|
||||
await client.close();
|
||||
return true;
|
||||
} catch (err) {
|
||||
if (masterClient) await masterClient.close();
|
||||
if (client) await client.close();
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.password, providerInputs.username, providerInputs.host, providerInputs.database]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: "Failed to validate SAP ASE connection, version query failed"
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
|
||||
if (resultFromMasterDatabase.version !== resultFromSelectedDatabase.version) {
|
||||
throw new BadRequestError({
|
||||
message: "Failed to validate SAP ASE connection (master), version mismatch"
|
||||
});
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
const create = async (data: { inputs: unknown; usernameTemplate?: string | null; identity?: { name: string } }) => {
|
||||
@@ -105,16 +122,26 @@ export const SapAseProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const queries = creationStatement.trim().replaceAll("\n", "").split(";").filter(Boolean);
|
||||
|
||||
for await (const query of queries) {
|
||||
// If it's an adduser query, we need to first call sp_addlogin on the MASTER database.
|
||||
// If not done, then the newly created user won't be able to authenticate.
|
||||
await (query.startsWith(SapCommands.CreateLogin) ? masterClient : client).query(query);
|
||||
try {
|
||||
for await (const query of queries) {
|
||||
// If it's an adduser query, we need to first call sp_addlogin on the MASTER database.
|
||||
// If not done, then the newly created user won't be able to authenticate.
|
||||
await (query.startsWith(SapCommands.CreateLogin) ? masterClient : client).query(query);
|
||||
}
|
||||
await masterClient.close();
|
||||
await client.close();
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
} catch (err) {
|
||||
await masterClient.close();
|
||||
await client.close();
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, password, providerInputs.password, providerInputs.username, providerInputs.database]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
|
||||
await masterClient.close();
|
||||
await client.close();
|
||||
|
||||
return { entityId: username, data: { DB_USERNAME: username, DB_PASSWORD: password } };
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, username: string) => {
|
||||
@@ -140,14 +167,24 @@ export const SapAseProvider = (): TDynamicProviderFns => {
|
||||
}
|
||||
}
|
||||
|
||||
for await (const query of queries) {
|
||||
await (query.startsWith(SapCommands.DropLogin) ? masterClient : client).query(query);
|
||||
try {
|
||||
for await (const query of queries) {
|
||||
await (query.startsWith(SapCommands.DropLogin) ? masterClient : client).query(query);
|
||||
}
|
||||
await masterClient.close();
|
||||
await client.close();
|
||||
return { entityId: username };
|
||||
} catch (err) {
|
||||
await masterClient.close();
|
||||
await client.close();
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, providerInputs.password, providerInputs.username, providerInputs.database]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
|
||||
await masterClient.close();
|
||||
await client.close();
|
||||
|
||||
return { entityId: username };
|
||||
};
|
||||
|
||||
const renew = async (_: unknown, username: string) => {
|
||||
|
@@ -10,6 +10,7 @@ import { customAlphabet } from "nanoid";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
@@ -83,19 +84,26 @@ export const SapHanaProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const client = await $getClient(providerInputs);
|
||||
|
||||
const testResult = await new Promise<boolean>((resolve, reject) => {
|
||||
client.exec("SELECT 1 FROM DUMMY;", (err: any) => {
|
||||
if (err) {
|
||||
reject();
|
||||
}
|
||||
|
||||
resolve(true);
|
||||
try {
|
||||
const client = await $getClient(providerInputs);
|
||||
const testResult = await new Promise<boolean>((resolve, reject) => {
|
||||
client.exec("SELECT 1 FROM DUMMY;", (err: any) => {
|
||||
if (err) {
|
||||
return reject(err);
|
||||
}
|
||||
resolve(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
return testResult;
|
||||
return testResult;
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.password, providerInputs.username, providerInputs.host]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (data: {
|
||||
@@ -119,18 +127,22 @@ export const SapHanaProvider = (): TDynamicProviderFns => {
|
||||
});
|
||||
|
||||
const queries = creationStatement.toString().split(";").filter(Boolean);
|
||||
for await (const query of queries) {
|
||||
await new Promise((resolve, reject) => {
|
||||
client.exec(query, (err: any) => {
|
||||
if (err) {
|
||||
reject(
|
||||
new BadRequestError({
|
||||
message: err.message
|
||||
})
|
||||
);
|
||||
}
|
||||
resolve(true);
|
||||
try {
|
||||
for await (const query of queries) {
|
||||
await new Promise((resolve, reject) => {
|
||||
client.exec(query, (err: any) => {
|
||||
if (err) return reject(err);
|
||||
resolve(true);
|
||||
});
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, password, providerInputs.password, providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
|
||||
@@ -142,18 +154,24 @@ export const SapHanaProvider = (): TDynamicProviderFns => {
|
||||
const client = await $getClient(providerInputs);
|
||||
const revokeStatement = handlebars.compile(providerInputs.revocationStatement)({ username });
|
||||
const queries = revokeStatement.toString().split(";").filter(Boolean);
|
||||
for await (const query of queries) {
|
||||
await new Promise((resolve, reject) => {
|
||||
client.exec(query, (err: any) => {
|
||||
if (err) {
|
||||
reject(
|
||||
new BadRequestError({
|
||||
message: err.message
|
||||
})
|
||||
);
|
||||
}
|
||||
resolve(true);
|
||||
try {
|
||||
for await (const query of queries) {
|
||||
await new Promise((resolve, reject) => {
|
||||
client.exec(query, (err: any) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
}
|
||||
resolve(true);
|
||||
});
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, providerInputs.password, providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
|
||||
@@ -174,16 +192,20 @@ export const SapHanaProvider = (): TDynamicProviderFns => {
|
||||
await new Promise((resolve, reject) => {
|
||||
client.exec(query, (err: any) => {
|
||||
if (err) {
|
||||
reject(
|
||||
new BadRequestError({
|
||||
message: err.message
|
||||
})
|
||||
);
|
||||
reject(err);
|
||||
}
|
||||
resolve(true);
|
||||
});
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [entityId, providerInputs.password, providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to renew lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
} finally {
|
||||
client.disconnect();
|
||||
}
|
||||
|
@@ -4,6 +4,7 @@ import snowflake from "snowflake-sdk";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
|
||||
@@ -69,12 +70,10 @@ export const SnowflakeProvider = (): TDynamicProviderFns => {
|
||||
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const client = await $getClient(providerInputs);
|
||||
|
||||
let isValidConnection: boolean;
|
||||
|
||||
let client;
|
||||
try {
|
||||
isValidConnection = await Promise.race([
|
||||
client = await $getClient(providerInputs);
|
||||
const isValidConnection = await Promise.race([
|
||||
client.isValidAsync(),
|
||||
new Promise((resolve) => {
|
||||
setTimeout(resolve, 10000);
|
||||
@@ -82,11 +81,18 @@ export const SnowflakeProvider = (): TDynamicProviderFns => {
|
||||
throw new BadRequestError({ message: "Unable to establish connection - verify credentials" });
|
||||
})
|
||||
]);
|
||||
return isValidConnection;
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.password, providerInputs.username, providerInputs.accountId, providerInputs.orgId]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
} finally {
|
||||
client.destroy(noop);
|
||||
if (client) client.destroy(noop);
|
||||
}
|
||||
|
||||
return isValidConnection;
|
||||
};
|
||||
|
||||
const create = async (data: {
|
||||
@@ -116,13 +122,19 @@ export const SnowflakeProvider = (): TDynamicProviderFns => {
|
||||
sqlText: creationStatement,
|
||||
complete(err) {
|
||||
if (err) {
|
||||
return reject(new BadRequestError({ name: "CreateLease", message: err.message }));
|
||||
return reject(err);
|
||||
}
|
||||
|
||||
return resolve(true);
|
||||
}
|
||||
});
|
||||
});
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error).message,
|
||||
tokens: [username, password, providerInputs.password, providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({ message: `Failed to create lease from provider: ${sanitizedErrorMessage}` });
|
||||
} finally {
|
||||
client.destroy(noop);
|
||||
}
|
||||
@@ -143,13 +155,19 @@ export const SnowflakeProvider = (): TDynamicProviderFns => {
|
||||
sqlText: revokeStatement,
|
||||
complete(err) {
|
||||
if (err) {
|
||||
return reject(new BadRequestError({ name: "RevokeLease", message: err.message }));
|
||||
return reject(err);
|
||||
}
|
||||
|
||||
return resolve(true);
|
||||
}
|
||||
});
|
||||
});
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error).message,
|
||||
tokens: [username, providerInputs.password, providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({ message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}` });
|
||||
} finally {
|
||||
client.destroy(noop);
|
||||
}
|
||||
@@ -175,13 +193,19 @@ export const SnowflakeProvider = (): TDynamicProviderFns => {
|
||||
sqlText: renewStatement,
|
||||
complete(err) {
|
||||
if (err) {
|
||||
return reject(new BadRequestError({ name: "RenewLease", message: err.message }));
|
||||
return reject(err);
|
||||
}
|
||||
|
||||
return resolve(true);
|
||||
}
|
||||
});
|
||||
});
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error).message,
|
||||
tokens: [entityId, providerInputs.password, providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({ message: `Failed to renew lease from provider: ${sanitizedErrorMessage}` });
|
||||
} finally {
|
||||
client.destroy(noop);
|
||||
}
|
||||
|
@@ -3,6 +3,8 @@ import knex from "knex";
|
||||
import { z } from "zod";
|
||||
|
||||
import { crypto } from "@app/lib/crypto/cryptography";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { GatewayProxyProtocol, withGatewayProxy } from "@app/lib/gateway";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
import { validateHandlebarTemplate } from "@app/lib/template/validate-handlebars";
|
||||
@@ -212,8 +214,19 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO)
|
||||
// oracle needs from keyword
|
||||
const testStatement = providerInputs.client === SqlProviders.Oracle ? "SELECT 1 FROM DUAL" : "SELECT 1";
|
||||
|
||||
isConnected = await db.raw(testStatement).then(() => true);
|
||||
await db.destroy();
|
||||
try {
|
||||
isConnected = await db.raw(testStatement).then(() => true);
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [providerInputs.username]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
} finally {
|
||||
await db.destroy();
|
||||
}
|
||||
};
|
||||
|
||||
if (providerInputs.gatewayId) {
|
||||
@@ -233,13 +246,13 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO)
|
||||
const { inputs, expireAt, usernameTemplate, identity } = data;
|
||||
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const { database } = providerInputs;
|
||||
const username = generateUsername(providerInputs.client, usernameTemplate, identity);
|
||||
|
||||
const password = generatePassword(providerInputs.client, providerInputs.passwordRequirements);
|
||||
const gatewayCallback = async (host = providerInputs.host, port = providerInputs.port) => {
|
||||
const db = await $getClient({ ...providerInputs, port, host });
|
||||
try {
|
||||
const { database } = providerInputs;
|
||||
const expiration = new Date(expireAt).toISOString();
|
||||
|
||||
const creationStatement = handlebars.compile(providerInputs.creationStatement, { noEscape: true })({
|
||||
@@ -256,6 +269,14 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO)
|
||||
await tx.raw(query);
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, password, database]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
} finally {
|
||||
await db.destroy();
|
||||
}
|
||||
@@ -283,6 +304,14 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO)
|
||||
await tx.raw(query);
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, database]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
} finally {
|
||||
await db.destroy();
|
||||
}
|
||||
@@ -319,6 +348,14 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO)
|
||||
}
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [database]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to renew lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
} finally {
|
||||
await db.destroy();
|
||||
}
|
||||
|
@@ -1,6 +1,8 @@
|
||||
import { authenticator } from "otplib";
|
||||
import { HashAlgorithms } from "otplib/core";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
|
||||
import { DynamicSecretTotpSchema, TDynamicProviderFns, TotpConfigType } from "./models";
|
||||
@@ -12,62 +14,84 @@ export const TotpProvider = (): TDynamicProviderFns => {
|
||||
return providerInputs;
|
||||
};
|
||||
|
||||
const validateConnection = async () => {
|
||||
return true;
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
try {
|
||||
await validateProviderInputs(inputs);
|
||||
return true;
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: []
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to connect with provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const create = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const create = async (data: { inputs: unknown }) => {
|
||||
const { inputs } = data;
|
||||
try {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
|
||||
const entityId = alphaNumericNanoId(32);
|
||||
const authenticatorInstance = authenticator.clone();
|
||||
const entityId = alphaNumericNanoId(32);
|
||||
const authenticatorInstance = authenticator.clone();
|
||||
|
||||
let secret: string;
|
||||
let period: number | null | undefined;
|
||||
let digits: number | null | undefined;
|
||||
let algorithm: HashAlgorithms | null | undefined;
|
||||
let secret: string;
|
||||
let period: number | null | undefined;
|
||||
let digits: number | null | undefined;
|
||||
let algorithm: HashAlgorithms | null | undefined;
|
||||
|
||||
if (providerInputs.configType === TotpConfigType.URL) {
|
||||
const urlObj = new URL(providerInputs.url);
|
||||
secret = urlObj.searchParams.get("secret") as string;
|
||||
const periodFromUrl = urlObj.searchParams.get("period");
|
||||
const digitsFromUrl = urlObj.searchParams.get("digits");
|
||||
const algorithmFromUrl = urlObj.searchParams.get("algorithm");
|
||||
if (providerInputs.configType === TotpConfigType.URL) {
|
||||
const urlObj = new URL(providerInputs.url);
|
||||
secret = urlObj.searchParams.get("secret") as string;
|
||||
const periodFromUrl = urlObj.searchParams.get("period");
|
||||
const digitsFromUrl = urlObj.searchParams.get("digits");
|
||||
const algorithmFromUrl = urlObj.searchParams.get("algorithm");
|
||||
|
||||
if (periodFromUrl) {
|
||||
period = +periodFromUrl;
|
||||
if (periodFromUrl) {
|
||||
period = +periodFromUrl;
|
||||
}
|
||||
|
||||
if (digitsFromUrl) {
|
||||
digits = +digitsFromUrl;
|
||||
}
|
||||
|
||||
if (algorithmFromUrl) {
|
||||
algorithm = algorithmFromUrl.toLowerCase() as HashAlgorithms;
|
||||
}
|
||||
} else {
|
||||
secret = providerInputs.secret;
|
||||
period = providerInputs.period;
|
||||
digits = providerInputs.digits;
|
||||
algorithm = providerInputs.algorithm as unknown as HashAlgorithms;
|
||||
}
|
||||
|
||||
if (digitsFromUrl) {
|
||||
digits = +digitsFromUrl;
|
||||
if (digits) {
|
||||
authenticatorInstance.options = { digits };
|
||||
}
|
||||
|
||||
if (algorithmFromUrl) {
|
||||
algorithm = algorithmFromUrl.toLowerCase() as HashAlgorithms;
|
||||
if (algorithm) {
|
||||
authenticatorInstance.options = { algorithm };
|
||||
}
|
||||
} else {
|
||||
secret = providerInputs.secret;
|
||||
period = providerInputs.period;
|
||||
digits = providerInputs.digits;
|
||||
algorithm = providerInputs.algorithm as unknown as HashAlgorithms;
|
||||
}
|
||||
|
||||
if (digits) {
|
||||
authenticatorInstance.options = { digits };
|
||||
}
|
||||
if (period) {
|
||||
authenticatorInstance.options = { step: period };
|
||||
}
|
||||
|
||||
if (algorithm) {
|
||||
authenticatorInstance.options = { algorithm };
|
||||
return {
|
||||
entityId,
|
||||
data: { TOTP: authenticatorInstance.generate(secret), TIME_REMAINING: authenticatorInstance.timeRemaining() }
|
||||
};
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: []
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
}
|
||||
|
||||
if (period) {
|
||||
authenticatorInstance.options = { step: period };
|
||||
}
|
||||
|
||||
return {
|
||||
entityId,
|
||||
data: { TOTP: authenticatorInstance.generate(secret), TIME_REMAINING: authenticatorInstance.timeRemaining() }
|
||||
};
|
||||
};
|
||||
|
||||
const revoke = async (_inputs: unknown, entityId: string) => {
|
||||
|
@@ -4,6 +4,7 @@ import { z } from "zod";
|
||||
|
||||
import { crypto } from "@app/lib/crypto/cryptography";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { sanitizeString } from "@app/lib/fn";
|
||||
import { GatewayProxyProtocol, withGatewayProxy } from "@app/lib/gateway";
|
||||
import { logger } from "@app/lib/logger";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
@@ -275,6 +276,14 @@ export const VerticaProvider = ({ gatewayService }: TVerticaProviderDTO): TDynam
|
||||
await client.raw(trimmedQuery);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, password, providerInputs.username, providerInputs.password]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to create lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
} finally {
|
||||
if (client) await client.destroy();
|
||||
}
|
||||
@@ -339,6 +348,14 @@ export const VerticaProvider = ({ gatewayService }: TVerticaProviderDTO): TDynam
|
||||
await client.raw(trimmedQuery);
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
const sanitizedErrorMessage = sanitizeString({
|
||||
unsanitizedString: (err as Error)?.message,
|
||||
tokens: [username, providerInputs.username, providerInputs.password]
|
||||
});
|
||||
throw new BadRequestError({
|
||||
message: `Failed to revoke lease from provider: ${sanitizedErrorMessage}`
|
||||
});
|
||||
} finally {
|
||||
if (client) await client.destroy();
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ import { z } from "zod";
|
||||
|
||||
import { logger } from "@app/lib/logger";
|
||||
|
||||
import { EventSchema, TopicName } from "./types";
|
||||
import { BusEventSchema, TopicName } from "./types";
|
||||
|
||||
export const eventBusFactory = (redis: Redis) => {
|
||||
const publisher = redis.duplicate();
|
||||
@@ -28,7 +28,7 @@ export const eventBusFactory = (redis: Redis) => {
|
||||
* @param topic - The topic to publish the event to.
|
||||
* @param event - The event data to publish.
|
||||
*/
|
||||
const publish = async <T extends z.input<typeof EventSchema>>(topic: TopicName, event: T) => {
|
||||
const publish = async <T extends z.input<typeof BusEventSchema>>(topic: TopicName, event: T) => {
|
||||
const json = JSON.stringify(event);
|
||||
|
||||
return publisher.publish(topic, json, (err) => {
|
||||
@@ -44,7 +44,7 @@ export const eventBusFactory = (redis: Redis) => {
|
||||
* @template T - The type of the event data, which should match the schema defined in EventSchema.
|
||||
* @returns A function that can be called to unsubscribe from the event bus.
|
||||
*/
|
||||
const subscribe = <T extends z.infer<typeof EventSchema>>(fn: (data: T) => Promise<void> | void) => {
|
||||
const subscribe = <T extends z.infer<typeof BusEventSchema>>(fn: (data: T) => Promise<void> | void) => {
|
||||
// Not using async await cause redis client's `on` method does not expect async listeners.
|
||||
const listener = (channel: string, message: string) => {
|
||||
try {
|
||||
|
@@ -7,7 +7,7 @@ import { logger } from "@app/lib/logger";
|
||||
|
||||
import { TEventBusService } from "./event-bus-service";
|
||||
import { createEventStreamClient, EventStreamClient, IEventStreamClientOpts } from "./event-sse-stream";
|
||||
import { EventData, RegisteredEvent, toBusEventName } from "./types";
|
||||
import { BusEvent, RegisteredEvent } from "./types";
|
||||
|
||||
const AUTH_REFRESH_INTERVAL = 60 * 1000;
|
||||
const HEART_BEAT_INTERVAL = 15 * 1000;
|
||||
@@ -69,8 +69,8 @@ export const sseServiceFactory = (bus: TEventBusService, redis: Redis) => {
|
||||
}
|
||||
};
|
||||
|
||||
function filterEventsForClient(client: EventStreamClient, event: EventData, registered: RegisteredEvent[]) {
|
||||
const eventType = toBusEventName(event.data.eventType);
|
||||
function filterEventsForClient(client: EventStreamClient, event: BusEvent, registered: RegisteredEvent[]) {
|
||||
const eventType = event.data.event;
|
||||
const match = registered.find((r) => r.event === eventType);
|
||||
if (!match) return;
|
||||
|
||||
|
@@ -12,7 +12,7 @@ import { KeyStorePrefixes } from "@app/keystore/keystore";
|
||||
import { conditionsMatcher } from "@app/lib/casl";
|
||||
import { logger } from "@app/lib/logger";
|
||||
|
||||
import { EventData, RegisteredEvent } from "./types";
|
||||
import { BusEvent, RegisteredEvent } from "./types";
|
||||
|
||||
export const getServerSentEventsHeaders = () =>
|
||||
({
|
||||
@@ -55,7 +55,7 @@ export type EventStreamClient = {
|
||||
id: string;
|
||||
stream: Readable;
|
||||
open: () => Promise<void>;
|
||||
send: (data: EventMessage | EventData) => void;
|
||||
send: (data: EventMessage | BusEvent) => void;
|
||||
ping: () => Promise<void>;
|
||||
refresh: () => Promise<void>;
|
||||
close: () => void;
|
||||
@@ -73,15 +73,12 @@ export function createEventStreamClient(redis: Redis, options: IEventStreamClien
|
||||
return {
|
||||
subject: options.type,
|
||||
action: "subscribe",
|
||||
conditions: {
|
||||
eventType: r.event,
|
||||
...(hasConditions
|
||||
? {
|
||||
environment: r.conditions?.environmentSlug ?? "",
|
||||
secretPath: { $glob: secretPath }
|
||||
}
|
||||
: {})
|
||||
}
|
||||
conditions: hasConditions
|
||||
? {
|
||||
environment: r.conditions?.environmentSlug ?? "",
|
||||
secretPath: { $glob: secretPath }
|
||||
}
|
||||
: undefined
|
||||
};
|
||||
});
|
||||
|
||||
@@ -98,7 +95,7 @@ export function createEventStreamClient(redis: Redis, options: IEventStreamClien
|
||||
// We will manually push data to the stream
|
||||
stream._read = () => {};
|
||||
|
||||
const send = (data: EventMessage | EventData) => {
|
||||
const send = (data: EventMessage | BusEvent) => {
|
||||
const chunk = serializeSseEvent(data);
|
||||
if (!stream.push(chunk)) {
|
||||
logger.debug("Backpressure detected: dropped manual event");
|
||||
|
@@ -1,7 +1,8 @@
|
||||
import { z } from "zod";
|
||||
|
||||
import { ProjectType } from "@app/db/schemas";
|
||||
import { Event, EventType } from "@app/ee/services/audit-log/audit-log-types";
|
||||
|
||||
import { ProjectPermissionSecretEventActions } from "../permission/project-permission";
|
||||
|
||||
export enum TopicName {
|
||||
CoreServers = "infisical::core-servers"
|
||||
@@ -10,84 +11,44 @@ export enum TopicName {
|
||||
export enum BusEventName {
|
||||
CreateSecret = "secret:create",
|
||||
UpdateSecret = "secret:update",
|
||||
DeleteSecret = "secret:delete"
|
||||
DeleteSecret = "secret:delete",
|
||||
ImportMutation = "secret:import-mutation"
|
||||
}
|
||||
|
||||
type PublisableEventTypes =
|
||||
| EventType.CREATE_SECRET
|
||||
| EventType.CREATE_SECRETS
|
||||
| EventType.DELETE_SECRET
|
||||
| EventType.DELETE_SECRETS
|
||||
| EventType.UPDATE_SECRETS
|
||||
| EventType.UPDATE_SECRET;
|
||||
|
||||
export function toBusEventName(input: EventType) {
|
||||
switch (input) {
|
||||
case EventType.CREATE_SECRET:
|
||||
case EventType.CREATE_SECRETS:
|
||||
return BusEventName.CreateSecret;
|
||||
case EventType.UPDATE_SECRET:
|
||||
case EventType.UPDATE_SECRETS:
|
||||
return BusEventName.UpdateSecret;
|
||||
case EventType.DELETE_SECRET:
|
||||
case EventType.DELETE_SECRETS:
|
||||
return BusEventName.DeleteSecret;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
const isBulkEvent = (event: Event): event is Extract<Event, { metadata: { secrets: Array<unknown> } }> => {
|
||||
return event.type.endsWith("-secrets"); // Feels so wrong
|
||||
};
|
||||
|
||||
export const toPublishableEvent = (event: Event) => {
|
||||
const name = toBusEventName(event.type);
|
||||
|
||||
if (!name) return null;
|
||||
|
||||
const e = event as Extract<Event, { type: PublisableEventTypes }>;
|
||||
|
||||
if (isBulkEvent(e)) {
|
||||
return {
|
||||
name,
|
||||
isBulk: true,
|
||||
data: {
|
||||
eventType: e.type,
|
||||
payload: e.metadata.secrets.map((s) => ({
|
||||
environment: e.metadata.environment,
|
||||
secretPath: e.metadata.secretPath,
|
||||
...s
|
||||
}))
|
||||
}
|
||||
} as const;
|
||||
}
|
||||
|
||||
return {
|
||||
name,
|
||||
isBulk: false,
|
||||
data: {
|
||||
eventType: e.type,
|
||||
payload: {
|
||||
...e.metadata,
|
||||
environment: e.metadata.environment
|
||||
}
|
||||
export const Mappings = {
|
||||
BusEventToAction(input: BusEventName) {
|
||||
switch (input) {
|
||||
case BusEventName.CreateSecret:
|
||||
return ProjectPermissionSecretEventActions.SubscribeCreated;
|
||||
case BusEventName.DeleteSecret:
|
||||
return ProjectPermissionSecretEventActions.SubscribeDeleted;
|
||||
case BusEventName.ImportMutation:
|
||||
return ProjectPermissionSecretEventActions.SubscribeImportMutations;
|
||||
case BusEventName.UpdateSecret:
|
||||
return ProjectPermissionSecretEventActions.SubscribeUpdated;
|
||||
default:
|
||||
throw new Error("Unknown bus event name");
|
||||
}
|
||||
} as const;
|
||||
}
|
||||
};
|
||||
|
||||
export const EventName = z.nativeEnum(BusEventName);
|
||||
|
||||
const EventSecretPayload = z.object({
|
||||
secretPath: z.string().optional(),
|
||||
secretId: z.string(),
|
||||
secretPath: z.string().optional(),
|
||||
secretKey: z.string(),
|
||||
environment: z.string()
|
||||
});
|
||||
|
||||
const EventImportMutationPayload = z.object({
|
||||
secretPath: z.string(),
|
||||
environment: z.string()
|
||||
});
|
||||
|
||||
export type EventSecret = z.infer<typeof EventSecretPayload>;
|
||||
|
||||
export const EventSchema = z.object({
|
||||
export const BusEventSchema = z.object({
|
||||
datacontenttype: z.literal("application/json").optional().default("application/json"),
|
||||
type: z.nativeEnum(ProjectType),
|
||||
source: z.string(),
|
||||
@@ -95,25 +56,38 @@ export const EventSchema = z.object({
|
||||
.string()
|
||||
.optional()
|
||||
.default(() => new Date().toISOString()),
|
||||
data: z.discriminatedUnion("eventType", [
|
||||
data: z.discriminatedUnion("event", [
|
||||
z.object({
|
||||
specversion: z.number().optional().default(1),
|
||||
eventType: z.enum([EventType.CREATE_SECRET, EventType.UPDATE_SECRET, EventType.DELETE_SECRET]),
|
||||
payload: EventSecretPayload
|
||||
event: z.enum([BusEventName.CreateSecret, BusEventName.DeleteSecret, BusEventName.UpdateSecret]),
|
||||
payload: z.union([EventSecretPayload, EventSecretPayload.array()])
|
||||
}),
|
||||
z.object({
|
||||
specversion: z.number().optional().default(1),
|
||||
eventType: z.enum([EventType.CREATE_SECRETS, EventType.UPDATE_SECRETS, EventType.DELETE_SECRETS]),
|
||||
payload: EventSecretPayload.array()
|
||||
event: z.enum([BusEventName.ImportMutation]),
|
||||
payload: z.union([EventImportMutationPayload, EventImportMutationPayload.array()])
|
||||
})
|
||||
// Add more event types as needed
|
||||
])
|
||||
});
|
||||
|
||||
export type EventData = z.infer<typeof EventSchema>;
|
||||
export type BusEvent = z.infer<typeof BusEventSchema>;
|
||||
|
||||
type PublishableEventPayload = z.input<typeof BusEventSchema>["data"];
|
||||
type PublishableSecretEvent = Extract<
|
||||
PublishableEventPayload,
|
||||
{ event: Exclude<BusEventName, BusEventName.ImportMutation> }
|
||||
>["payload"];
|
||||
|
||||
export type PublishableEvent = {
|
||||
created?: PublishableSecretEvent;
|
||||
updated?: PublishableSecretEvent;
|
||||
deleted?: PublishableSecretEvent;
|
||||
importMutation?: Extract<PublishableEventPayload, { event: BusEventName.ImportMutation }>["payload"];
|
||||
};
|
||||
|
||||
export const EventRegisterSchema = z.object({
|
||||
event: EventName,
|
||||
event: z.nativeEnum(BusEventName),
|
||||
conditions: z
|
||||
.object({
|
||||
secretPath: z.string().optional().default("/"),
|
||||
|
@@ -31,7 +31,7 @@ export const getDefaultOnPremFeatures = () => {
|
||||
caCrl: false,
|
||||
sshHostGroups: false,
|
||||
enterpriseSecretSyncs: false,
|
||||
enterpriseAppConnections: false,
|
||||
enterpriseAppConnections: true,
|
||||
machineIdentityAuthTemplates: false
|
||||
};
|
||||
};
|
||||
|
@@ -161,8 +161,7 @@ const buildAdminPermissionRules = () => {
|
||||
ProjectPermissionSecretActions.ReadValue,
|
||||
ProjectPermissionSecretActions.Create,
|
||||
ProjectPermissionSecretActions.Edit,
|
||||
ProjectPermissionSecretActions.Delete,
|
||||
ProjectPermissionSecretActions.Subscribe
|
||||
ProjectPermissionSecretActions.Delete
|
||||
],
|
||||
ProjectPermissionSub.Secrets
|
||||
);
|
||||
@@ -266,8 +265,7 @@ const buildMemberPermissionRules = () => {
|
||||
ProjectPermissionSecretActions.ReadValue,
|
||||
ProjectPermissionSecretActions.Edit,
|
||||
ProjectPermissionSecretActions.Create,
|
||||
ProjectPermissionSecretActions.Delete,
|
||||
ProjectPermissionSecretActions.Subscribe
|
||||
ProjectPermissionSecretActions.Delete
|
||||
],
|
||||
ProjectPermissionSub.Secrets
|
||||
);
|
||||
|
@@ -36,8 +36,7 @@ export enum ProjectPermissionSecretActions {
|
||||
ReadValue = "readValue",
|
||||
Create = "create",
|
||||
Edit = "edit",
|
||||
Delete = "delete",
|
||||
Subscribe = "subscribe"
|
||||
Delete = "delete"
|
||||
}
|
||||
|
||||
export enum ProjectPermissionCmekActions {
|
||||
@@ -158,6 +157,13 @@ export enum ProjectPermissionSecretScanningConfigActions {
|
||||
Update = "update-configs"
|
||||
}
|
||||
|
||||
export enum ProjectPermissionSecretEventActions {
|
||||
SubscribeCreated = "subscribe-on-created",
|
||||
SubscribeUpdated = "subscribe-on-updated",
|
||||
SubscribeDeleted = "subscribe-on-deleted",
|
||||
SubscribeImportMutations = "subscribe-on-import-mutations"
|
||||
}
|
||||
|
||||
export enum ProjectPermissionSub {
|
||||
Role = "role",
|
||||
Member = "member",
|
||||
@@ -197,7 +203,8 @@ export enum ProjectPermissionSub {
|
||||
Kmip = "kmip",
|
||||
SecretScanningDataSources = "secret-scanning-data-sources",
|
||||
SecretScanningFindings = "secret-scanning-findings",
|
||||
SecretScanningConfigs = "secret-scanning-configs"
|
||||
SecretScanningConfigs = "secret-scanning-configs",
|
||||
SecretEvents = "secret-events"
|
||||
}
|
||||
|
||||
export type SecretSubjectFields = {
|
||||
@@ -205,7 +212,13 @@ export type SecretSubjectFields = {
|
||||
secretPath: string;
|
||||
secretName?: string;
|
||||
secretTags?: string[];
|
||||
eventType?: string;
|
||||
};
|
||||
|
||||
export type SecretEventSubjectFields = {
|
||||
environment: string;
|
||||
secretPath: string;
|
||||
secretName?: string;
|
||||
secretTags?: string[];
|
||||
};
|
||||
|
||||
export type SecretFolderSubjectFields = {
|
||||
@@ -344,7 +357,11 @@ export type ProjectPermissionSet =
|
||||
| [ProjectPermissionCommitsActions, ProjectPermissionSub.Commits]
|
||||
| [ProjectPermissionSecretScanningDataSourceActions, ProjectPermissionSub.SecretScanningDataSources]
|
||||
| [ProjectPermissionSecretScanningFindingActions, ProjectPermissionSub.SecretScanningFindings]
|
||||
| [ProjectPermissionSecretScanningConfigActions, ProjectPermissionSub.SecretScanningConfigs];
|
||||
| [ProjectPermissionSecretScanningConfigActions, ProjectPermissionSub.SecretScanningConfigs]
|
||||
| [
|
||||
ProjectPermissionSecretEventActions,
|
||||
ProjectPermissionSub.SecretEvents | (ForcedSubject<ProjectPermissionSub.SecretEvents> & SecretEventSubjectFields)
|
||||
];
|
||||
|
||||
const SECRET_PATH_MISSING_SLASH_ERR_MSG = "Invalid Secret Path; it must start with a '/'";
|
||||
const SECRET_PATH_PERMISSION_OPERATOR_SCHEMA = z.union([
|
||||
@@ -877,7 +894,16 @@ export const ProjectPermissionV2Schema = z.discriminatedUnion("subject", [
|
||||
"When specified, only matching conditions will be allowed to access given resource."
|
||||
).optional()
|
||||
}),
|
||||
|
||||
z.object({
|
||||
subject: z.literal(ProjectPermissionSub.SecretEvents).describe("The entity this permission pertains to."),
|
||||
inverted: z.boolean().optional().describe("Whether rule allows or forbids."),
|
||||
action: CASL_ACTION_SCHEMA_NATIVE_ENUM(ProjectPermissionSecretEventActions).describe(
|
||||
"Describe what action an entity can take."
|
||||
),
|
||||
conditions: SecretSyncConditionV2Schema.describe(
|
||||
"When specified, only matching conditions will be allowed to access given resource."
|
||||
).optional()
|
||||
}),
|
||||
...GeneralPermissionSchema
|
||||
]);
|
||||
|
||||
|
@@ -952,13 +952,39 @@ export const secretApprovalRequestServiceFactory = ({
|
||||
if (!folder) {
|
||||
throw new NotFoundError({ message: `Folder with ID '${folderId}' not found in project with ID '${projectId}'` });
|
||||
}
|
||||
|
||||
const { secrets } = mergeStatus;
|
||||
|
||||
await secretQueueService.syncSecrets({
|
||||
projectId,
|
||||
orgId: actorOrgId,
|
||||
secretPath: folder.path,
|
||||
environmentSlug: folder.environmentSlug,
|
||||
actorId,
|
||||
actor
|
||||
actor,
|
||||
event: {
|
||||
created: secrets.created.map((el) => ({
|
||||
environment: folder.environmentSlug,
|
||||
secretPath: folder.path,
|
||||
secretId: el.id,
|
||||
// @ts-expect-error - not present on V1 secrets
|
||||
secretKey: el.key as string
|
||||
})),
|
||||
updated: secrets.updated.map((el) => ({
|
||||
environment: folder.environmentSlug,
|
||||
secretPath: folder.path,
|
||||
secretId: el.id,
|
||||
// @ts-expect-error - not present on V1 secrets
|
||||
secretKey: el.key as string
|
||||
})),
|
||||
deleted: secrets.deleted.map((el) => ({
|
||||
environment: folder.environmentSlug,
|
||||
secretPath: folder.path,
|
||||
secretId: el.id,
|
||||
// @ts-expect-error - not present on V1 secrets
|
||||
secretKey: el.key as string
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
if (isSoftEnforcement) {
|
||||
|
@@ -2,6 +2,7 @@ import { AxiosError } from "axios";
|
||||
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { logger } from "@app/lib/logger";
|
||||
import { KmsDataKey } from "@app/services/kms/kms-types";
|
||||
|
||||
import { AUTH0_CLIENT_SECRET_ROTATION_LIST_OPTION } from "./auth0-client-secret";
|
||||
@@ -13,9 +14,11 @@ import { MYSQL_CREDENTIALS_ROTATION_LIST_OPTION } from "./mysql-credentials";
|
||||
import { OKTA_CLIENT_SECRET_ROTATION_LIST_OPTION } from "./okta-client-secret";
|
||||
import { ORACLEDB_CREDENTIALS_ROTATION_LIST_OPTION } from "./oracledb-credentials";
|
||||
import { POSTGRES_CREDENTIALS_ROTATION_LIST_OPTION } from "./postgres-credentials";
|
||||
import { TSecretRotationV2DALFactory } from "./secret-rotation-v2-dal";
|
||||
import { SecretRotation, SecretRotationStatus } from "./secret-rotation-v2-enums";
|
||||
import { TSecretRotationV2ServiceFactoryDep } from "./secret-rotation-v2-service";
|
||||
import { TSecretRotationV2ServiceFactory, TSecretRotationV2ServiceFactoryDep } from "./secret-rotation-v2-service";
|
||||
import {
|
||||
TSecretRotationRotateSecretsJobPayload,
|
||||
TSecretRotationV2,
|
||||
TSecretRotationV2GeneratedCredentials,
|
||||
TSecretRotationV2ListItem,
|
||||
@@ -74,6 +77,10 @@ export const getNextUtcRotationInterval = (rotateAtUtc?: TSecretRotationV2["rota
|
||||
const appCfg = getConfig();
|
||||
|
||||
if (appCfg.isRotationDevelopmentMode) {
|
||||
if (appCfg.isTestMode) {
|
||||
// if its test mode, it should always rotate
|
||||
return new Date(Date.now() + 365 * 24 * 60 * 60 * 1000); // Current time + 1 year
|
||||
}
|
||||
return getNextUTCMinuteInterval(rotateAtUtc);
|
||||
}
|
||||
|
||||
@@ -263,3 +270,51 @@ export const throwOnImmutableParameterUpdate = (
|
||||
// do nothing
|
||||
}
|
||||
};
|
||||
|
||||
export const rotateSecretsFns = async ({
|
||||
job,
|
||||
secretRotationV2DAL,
|
||||
secretRotationV2Service
|
||||
}: {
|
||||
job: {
|
||||
data: TSecretRotationRotateSecretsJobPayload;
|
||||
id: string;
|
||||
retryCount: number;
|
||||
retryLimit: number;
|
||||
};
|
||||
secretRotationV2DAL: Pick<TSecretRotationV2DALFactory, "findById">;
|
||||
secretRotationV2Service: Pick<TSecretRotationV2ServiceFactory, "rotateGeneratedCredentials">;
|
||||
}) => {
|
||||
const { rotationId, queuedAt, isManualRotation } = job.data;
|
||||
const { retryCount, retryLimit } = job;
|
||||
|
||||
const logDetails = `[rotationId=${rotationId}] [jobId=${job.id}] retryCount=[${retryCount}/${retryLimit}]`;
|
||||
|
||||
try {
|
||||
const secretRotation = await secretRotationV2DAL.findById(rotationId);
|
||||
|
||||
if (!secretRotation) throw new Error(`Secret rotation ${rotationId} not found`);
|
||||
|
||||
if (!secretRotation.isAutoRotationEnabled) {
|
||||
logger.info(`secretRotationV2Queue: Skipping Rotation - Auto-Rotation Disabled Since Queue ${logDetails}`);
|
||||
}
|
||||
|
||||
if (new Date(secretRotation.lastRotatedAt).getTime() >= new Date(queuedAt).getTime()) {
|
||||
// rotated since being queued, skip rotation
|
||||
logger.info(`secretRotationV2Queue: Skipping Rotation - Rotated Since Queue ${logDetails}`);
|
||||
return;
|
||||
}
|
||||
|
||||
await secretRotationV2Service.rotateGeneratedCredentials(secretRotation, {
|
||||
jobId: job.id,
|
||||
shouldSendNotification: true,
|
||||
isFinalAttempt: retryCount === retryLimit,
|
||||
isManualRotation
|
||||
});
|
||||
|
||||
logger.info(`secretRotationV2Queue: Secrets Rotated ${logDetails}`);
|
||||
} catch (error) {
|
||||
logger.error(error, `secretRotationV2Queue: Failed to Rotate Secrets ${logDetails}`);
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
@@ -1,9 +1,12 @@
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
|
||||
import { ProjectMembershipRole } from "@app/db/schemas";
|
||||
import { TSecretRotationV2DALFactory } from "@app/ee/services/secret-rotation-v2/secret-rotation-v2-dal";
|
||||
import { SecretRotation } from "@app/ee/services/secret-rotation-v2/secret-rotation-v2-enums";
|
||||
import {
|
||||
getNextUtcRotationInterval,
|
||||
getSecretRotationRotateSecretJobOptions
|
||||
getSecretRotationRotateSecretJobOptions,
|
||||
rotateSecretsFns
|
||||
} from "@app/ee/services/secret-rotation-v2/secret-rotation-v2-fns";
|
||||
import { SECRET_ROTATION_NAME_MAP } from "@app/ee/services/secret-rotation-v2/secret-rotation-v2-maps";
|
||||
import { TSecretRotationV2ServiceFactory } from "@app/ee/services/secret-rotation-v2/secret-rotation-v2-service";
|
||||
@@ -63,14 +66,34 @@ export const secretRotationV2QueueServiceFactory = async ({
|
||||
rotation.lastRotatedAt
|
||||
).toISOString()}] [rotateAt=${new Date(rotation.nextRotationAt!).toISOString()}]`
|
||||
);
|
||||
await queueService.queuePg(
|
||||
QueueJobs.SecretRotationV2RotateSecrets,
|
||||
{
|
||||
rotationId: rotation.id,
|
||||
queuedAt: currentTime
|
||||
},
|
||||
getSecretRotationRotateSecretJobOptions(rotation)
|
||||
);
|
||||
|
||||
const data = {
|
||||
rotationId: rotation.id,
|
||||
queuedAt: currentTime
|
||||
} as TSecretRotationRotateSecretsJobPayload;
|
||||
|
||||
if (appCfg.isTestMode) {
|
||||
logger.warn("secretRotationV2Queue: Manually rotating secrets for test mode");
|
||||
await rotateSecretsFns({
|
||||
job: {
|
||||
id: uuidv4(),
|
||||
data,
|
||||
retryCount: 0,
|
||||
retryLimit: 0
|
||||
},
|
||||
secretRotationV2DAL,
|
||||
secretRotationV2Service
|
||||
});
|
||||
} else {
|
||||
await queueService.queuePg(
|
||||
QueueJobs.SecretRotationV2RotateSecrets,
|
||||
{
|
||||
rotationId: rotation.id,
|
||||
queuedAt: currentTime
|
||||
},
|
||||
getSecretRotationRotateSecretJobOptions(rotation)
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(error, "secretRotationV2Queue: Queue Rotations Error:");
|
||||
@@ -87,38 +110,14 @@ export const secretRotationV2QueueServiceFactory = async ({
|
||||
await queueService.startPg<QueueName.SecretRotationV2>(
|
||||
QueueJobs.SecretRotationV2RotateSecrets,
|
||||
async ([job]) => {
|
||||
const { rotationId, queuedAt, isManualRotation } = job.data as TSecretRotationRotateSecretsJobPayload;
|
||||
const { retryCount, retryLimit } = job;
|
||||
|
||||
const logDetails = `[rotationId=${rotationId}] [jobId=${job.id}] retryCount=[${retryCount}/${retryLimit}]`;
|
||||
|
||||
try {
|
||||
const secretRotation = await secretRotationV2DAL.findById(rotationId);
|
||||
|
||||
if (!secretRotation) throw new Error(`Secret rotation ${rotationId} not found`);
|
||||
|
||||
if (!secretRotation.isAutoRotationEnabled) {
|
||||
logger.info(`secretRotationV2Queue: Skipping Rotation - Auto-Rotation Disabled Since Queue ${logDetails}`);
|
||||
}
|
||||
|
||||
if (new Date(secretRotation.lastRotatedAt).getTime() >= new Date(queuedAt).getTime()) {
|
||||
// rotated since being queued, skip rotation
|
||||
logger.info(`secretRotationV2Queue: Skipping Rotation - Rotated Since Queue ${logDetails}`);
|
||||
return;
|
||||
}
|
||||
|
||||
await secretRotationV2Service.rotateGeneratedCredentials(secretRotation, {
|
||||
jobId: job.id,
|
||||
shouldSendNotification: true,
|
||||
isFinalAttempt: retryCount === retryLimit,
|
||||
isManualRotation
|
||||
});
|
||||
|
||||
logger.info(`secretRotationV2Queue: Secrets Rotated ${logDetails}`);
|
||||
} catch (error) {
|
||||
logger.error(error, `secretRotationV2Queue: Failed to Rotate Secrets ${logDetails}`);
|
||||
throw error;
|
||||
}
|
||||
await rotateSecretsFns({
|
||||
job: {
|
||||
...job,
|
||||
data: job.data as TSecretRotationRotateSecretsJobPayload
|
||||
},
|
||||
secretRotationV2DAL,
|
||||
secretRotationV2Service
|
||||
});
|
||||
},
|
||||
{
|
||||
batchSize: 1,
|
||||
|
@@ -58,9 +58,9 @@ export function scanDirectory(inputPath: string, outputPath: string, configPath?
|
||||
});
|
||||
}
|
||||
|
||||
export function scanFile(inputPath: string): Promise<void> {
|
||||
export function scanFile(inputPath: string, configPath?: string): Promise<void> {
|
||||
return new Promise((resolve, reject) => {
|
||||
const command = `infisical scan --exit-code=77 --source "${inputPath}" --no-git`;
|
||||
const command = `infisical scan --exit-code=77 --source "${inputPath}" --no-git ${configPath ? `-c ${configPath}` : ""}`;
|
||||
exec(command, (error) => {
|
||||
if (error && error.code === 77) {
|
||||
reject(error);
|
||||
@@ -166,6 +166,20 @@ export const parseScanErrorMessage = (err: unknown): string => {
|
||||
: `${errorMessage.substring(0, MAX_MESSAGE_LENGTH - 3)}...`;
|
||||
};
|
||||
|
||||
const generateSecretValuePolicyConfiguration = (entropy: number): string => `
|
||||
# Extend default configuration to preserve existing rules
|
||||
[extend]
|
||||
useDefault = true
|
||||
|
||||
# Add custom high-entropy rule
|
||||
[[rules]]
|
||||
id = "high-entropy"
|
||||
description = "Will scan for high entropy secrets"
|
||||
regex = '''.*'''
|
||||
entropy = ${entropy}
|
||||
keywords = []
|
||||
`;
|
||||
|
||||
export const scanSecretPolicyViolations = async (
|
||||
projectId: string,
|
||||
secretPath: string,
|
||||
@@ -188,14 +202,25 @@ export const scanSecretPolicyViolations = async (
|
||||
|
||||
const tempFolder = await createTempFolder();
|
||||
try {
|
||||
const configPath = join(tempFolder, "infisical-scan.toml");
|
||||
|
||||
const secretPolicyConfiguration = generateSecretValuePolicyConfiguration(
|
||||
appCfg.PARAMS_FOLDER_SECRET_DETECTION_ENTROPY
|
||||
);
|
||||
|
||||
await writeTextToFile(configPath, secretPolicyConfiguration);
|
||||
|
||||
const scanPromises = secrets
|
||||
.filter((secret) => !ignoreValues.includes(secret.secretValue))
|
||||
.map(async (secret) => {
|
||||
const secretFilePath = join(tempFolder, `${crypto.nativeCrypto.randomUUID()}.txt`);
|
||||
await writeTextToFile(secretFilePath, `${secret.secretKey}=${secret.secretValue}`);
|
||||
const secretKeyValueFilePath = join(tempFolder, `${crypto.nativeCrypto.randomUUID()}.txt`);
|
||||
const secretValueOnlyFilePath = join(tempFolder, `${crypto.nativeCrypto.randomUUID()}.txt`);
|
||||
await writeTextToFile(secretKeyValueFilePath, `${secret.secretKey}=${secret.secretValue}`);
|
||||
await writeTextToFile(secretValueOnlyFilePath, secret.secretValue);
|
||||
|
||||
try {
|
||||
await scanFile(secretFilePath);
|
||||
await scanFile(secretKeyValueFilePath);
|
||||
await scanFile(secretValueOnlyFilePath, configPath);
|
||||
} catch (error) {
|
||||
throw new BadRequestError({
|
||||
message: `Secret value detected in ${secret.secretKey}. Please add this instead to the designated secrets path in the project.`,
|
||||
|
@@ -79,6 +79,7 @@ const envSchema = z
|
||||
QUEUE_WORKER_PROFILE: z.nativeEnum(QueueWorkerProfile).default(QueueWorkerProfile.All),
|
||||
HTTPS_ENABLED: zodStrBool,
|
||||
ROTATION_DEVELOPMENT_MODE: zodStrBool.default("false").optional(),
|
||||
DAILY_RESOURCE_CLEAN_UP_DEVELOPMENT_MODE: zodStrBool.default("false").optional(),
|
||||
// smtp options
|
||||
SMTP_HOST: zpStr(z.string().optional()),
|
||||
SMTP_IGNORE_TLS: zodStrBool.default("false"),
|
||||
@@ -215,6 +216,7 @@ const envSchema = z
|
||||
return JSON.parse(val) as { secretPath: string; projectId: string }[];
|
||||
})
|
||||
),
|
||||
PARAMS_FOLDER_SECRET_DETECTION_ENTROPY: z.coerce.number().optional().default(3.7),
|
||||
|
||||
// HSM
|
||||
HSM_LIB_PATH: zpStr(z.string().optional()),
|
||||
@@ -346,7 +348,11 @@ const envSchema = z
|
||||
isSmtpConfigured: Boolean(data.SMTP_HOST),
|
||||
isRedisConfigured: Boolean(data.REDIS_URL || data.REDIS_SENTINEL_HOSTS),
|
||||
isDevelopmentMode: data.NODE_ENV === "development",
|
||||
isRotationDevelopmentMode: data.NODE_ENV === "development" && data.ROTATION_DEVELOPMENT_MODE,
|
||||
isTestMode: data.NODE_ENV === "test",
|
||||
isRotationDevelopmentMode:
|
||||
(data.NODE_ENV === "development" && data.ROTATION_DEVELOPMENT_MODE) || data.NODE_ENV === "test",
|
||||
isDailyResourceCleanUpDevelopmentMode:
|
||||
data.NODE_ENV === "development" && data.DAILY_RESOURCE_CLEAN_UP_DEVELOPMENT_MODE,
|
||||
isProductionMode: data.NODE_ENV === "production" || IS_PACKAGED,
|
||||
isRedisSentinelMode: Boolean(data.REDIS_SENTINEL_HOSTS),
|
||||
REDIS_SENTINEL_HOSTS: data.REDIS_SENTINEL_HOSTS?.trim()
|
||||
|
@@ -19,3 +19,17 @@ export const prefixWithSlash = (str: string) => {
|
||||
const vowelRegex = new RE2(/^[aeiou]/i);
|
||||
|
||||
export const startsWithVowel = (str: string) => vowelRegex.test(str);
|
||||
|
||||
const pickWordsRegex = new RE2(/(\W+)/);
|
||||
export const sanitizeString = (dto: { unsanitizedString: string; tokens: string[] }) => {
|
||||
const words = dto.unsanitizedString.split(pickWordsRegex);
|
||||
|
||||
const redactionSet = new Set(dto.tokens.filter(Boolean));
|
||||
const sanitizedWords = words.map((el) => {
|
||||
if (redactionSet.has(el)) {
|
||||
return "[REDACTED]";
|
||||
}
|
||||
return el;
|
||||
});
|
||||
return sanitizedWords.join("");
|
||||
};
|
||||
|
@@ -20,7 +20,10 @@ export const triggerWorkflowIntegrationNotification = async (dto: TTriggerWorkfl
|
||||
const slackConfig = await projectSlackConfigDAL.getIntegrationDetailsByProject(projectId);
|
||||
|
||||
if (slackConfig) {
|
||||
if (notification.type === TriggerFeature.ACCESS_REQUEST) {
|
||||
if (
|
||||
notification.type === TriggerFeature.ACCESS_REQUEST ||
|
||||
notification.type === TriggerFeature.ACCESS_REQUEST_UPDATED
|
||||
) {
|
||||
const targetChannelIds = slackConfig.accessRequestChannels?.split(", ") || [];
|
||||
if (targetChannelIds.length && slackConfig.isAccessRequestNotificationEnabled) {
|
||||
await sendSlackNotification({
|
||||
@@ -50,7 +53,10 @@ export const triggerWorkflowIntegrationNotification = async (dto: TTriggerWorkfl
|
||||
}
|
||||
|
||||
if (microsoftTeamsConfig) {
|
||||
if (notification.type === TriggerFeature.ACCESS_REQUEST) {
|
||||
if (
|
||||
notification.type === TriggerFeature.ACCESS_REQUEST ||
|
||||
notification.type === TriggerFeature.ACCESS_REQUEST_UPDATED
|
||||
) {
|
||||
if (microsoftTeamsConfig.isAccessRequestNotificationEnabled && microsoftTeamsConfig.accessRequestChannels) {
|
||||
const { success, data } = validateMicrosoftTeamsChannelsSchema.safeParse(
|
||||
microsoftTeamsConfig.accessRequestChannels
|
||||
|
@@ -6,7 +6,8 @@ import { TProjectSlackConfigDALFactory } from "@app/services/slack/project-slack
|
||||
|
||||
export enum TriggerFeature {
|
||||
SECRET_APPROVAL = "secret-approval",
|
||||
ACCESS_REQUEST = "access-request"
|
||||
ACCESS_REQUEST = "access-request",
|
||||
ACCESS_REQUEST_UPDATED = "access-request-updated"
|
||||
}
|
||||
|
||||
export type TNotification =
|
||||
@@ -34,6 +35,22 @@ export type TNotification =
|
||||
approvalUrl: string;
|
||||
note?: string;
|
||||
};
|
||||
}
|
||||
| {
|
||||
type: TriggerFeature.ACCESS_REQUEST_UPDATED;
|
||||
payload: {
|
||||
requesterFullName: string;
|
||||
requesterEmail: string;
|
||||
isTemporary: boolean;
|
||||
secretPath: string;
|
||||
environment: string;
|
||||
projectName: string;
|
||||
permissions: string[];
|
||||
approvalUrl: string;
|
||||
editNote?: string;
|
||||
editorFullName?: string;
|
||||
editorEmail?: string;
|
||||
};
|
||||
};
|
||||
|
||||
export type TTriggerWorkflowNotificationDTO = {
|
||||
|
@@ -560,8 +560,7 @@ export const registerRoutes = async (
|
||||
queueService,
|
||||
projectDAL,
|
||||
licenseService,
|
||||
auditLogStreamDAL,
|
||||
eventBusService
|
||||
auditLogStreamDAL
|
||||
});
|
||||
|
||||
const auditLogService = auditLogServiceFactory({ auditLogDAL, permissionService, auditLogQueue });
|
||||
@@ -849,8 +848,6 @@ export const registerRoutes = async (
|
||||
projectDAL,
|
||||
permissionService,
|
||||
projectUserMembershipRoleDAL,
|
||||
projectBotDAL,
|
||||
projectKeyDAL,
|
||||
projectMembershipDAL
|
||||
});
|
||||
|
||||
@@ -1123,7 +1120,9 @@ export const registerRoutes = async (
|
||||
resourceMetadataDAL,
|
||||
folderCommitService,
|
||||
secretSyncQueue,
|
||||
reminderService
|
||||
reminderService,
|
||||
eventBusService,
|
||||
licenseService
|
||||
});
|
||||
|
||||
const projectService = projectServiceFactory({
|
||||
@@ -1974,7 +1973,7 @@ export const registerRoutes = async (
|
||||
|
||||
await telemetryQueue.startTelemetryCheck();
|
||||
await telemetryQueue.startAggregatedEventsJob();
|
||||
await dailyResourceCleanUp.startCleanUp();
|
||||
await dailyResourceCleanUp.init();
|
||||
await dailyReminderQueueService.startDailyRemindersJob();
|
||||
await dailyReminderQueueService.startSecretReminderMigrationJob();
|
||||
await dailyExpiringPkiItemAlert.startSendingAlerts();
|
||||
|
@@ -583,16 +583,7 @@ export const registerAdminRouter = async (server: FastifyZodProvider) => {
|
||||
email: z.string().email().trim(),
|
||||
password: z.string().trim(),
|
||||
firstName: z.string().trim(),
|
||||
lastName: z.string().trim().optional(),
|
||||
protectedKey: z.string().trim(),
|
||||
protectedKeyIV: z.string().trim(),
|
||||
protectedKeyTag: z.string().trim(),
|
||||
publicKey: z.string().trim(),
|
||||
encryptedPrivateKey: z.string().trim(),
|
||||
encryptedPrivateKeyIV: z.string().trim(),
|
||||
encryptedPrivateKeyTag: z.string().trim(),
|
||||
salt: z.string().trim(),
|
||||
verifier: z.string().trim()
|
||||
lastName: z.string().trim().optional()
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
|
@@ -5,8 +5,8 @@ import { z } from "zod";
|
||||
|
||||
import { ActionProjectType, ProjectType } from "@app/db/schemas";
|
||||
import { getServerSentEventsHeaders } from "@app/ee/services/event/event-sse-stream";
|
||||
import { EventRegisterSchema } from "@app/ee/services/event/types";
|
||||
import { ProjectPermissionSecretActions, ProjectPermissionSub } from "@app/ee/services/permission/project-permission";
|
||||
import { EventRegisterSchema, Mappings } from "@app/ee/services/event/types";
|
||||
import { ProjectPermissionSub } from "@app/ee/services/permission/project-permission";
|
||||
import { ApiDocsTags, EventSubscriptions } from "@app/lib/api-docs";
|
||||
import { BadRequestError, ForbiddenRequestError, RateLimitError } from "@app/lib/errors";
|
||||
import { readLimit } from "@app/server/config/rateLimiter";
|
||||
@@ -82,21 +82,19 @@ export const registerEventRouter = async (server: FastifyZodProvider) => {
|
||||
req.body.register.forEach((r) => {
|
||||
const fields = {
|
||||
environment: r.conditions?.environmentSlug ?? "",
|
||||
secretPath: r.conditions?.secretPath ?? "/",
|
||||
eventType: r.event
|
||||
secretPath: r.conditions?.secretPath ?? "/"
|
||||
};
|
||||
|
||||
const allowed = info.permission.can(
|
||||
ProjectPermissionSecretActions.Subscribe,
|
||||
subject(ProjectPermissionSub.Secrets, fields)
|
||||
);
|
||||
const action = Mappings.BusEventToAction(r.event);
|
||||
|
||||
const allowed = info.permission.can(action, subject(ProjectPermissionSub.SecretEvents, fields));
|
||||
|
||||
if (!allowed) {
|
||||
throw new ForbiddenRequestError({
|
||||
name: "PermissionDenied",
|
||||
message: `You are not allowed to subscribe on secrets`,
|
||||
message: `You are not allowed to subscribe on ${ProjectPermissionSub.SecretEvents}`,
|
||||
details: {
|
||||
event: fields.eventType,
|
||||
action,
|
||||
environmentSlug: fields.environment,
|
||||
secretPath: fields.secretPath
|
||||
}
|
||||
|
@@ -478,4 +478,30 @@ export const registerIdentityRouter = async (server: FastifyZodProvider) => {
|
||||
return { identityMemberships };
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "GET",
|
||||
url: "/details",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
schema: {
|
||||
response: {
|
||||
200: z.object({
|
||||
identityDetails: z.object({
|
||||
organization: z.object({
|
||||
id: z.string(),
|
||||
name: z.string(),
|
||||
slug: z.string()
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.IDENTITY_ACCESS_TOKEN], { requireOrg: false }),
|
||||
handler: async (req) => {
|
||||
const organization = await server.services.org.findIdentityOrganization(req.permission.id);
|
||||
return { identityDetails: { organization } };
|
||||
}
|
||||
});
|
||||
};
|
||||
|
@@ -45,7 +45,7 @@ export const registerSecretFolderRouter = async (server: FastifyZodProvider) =>
|
||||
.transform(removeTrailingSlash)
|
||||
.describe(FOLDERS.CREATE.path)
|
||||
.optional(),
|
||||
// backward compatiability with cli
|
||||
// backward compatibility with cli
|
||||
directory: z
|
||||
.string()
|
||||
.trim()
|
||||
@@ -58,7 +58,9 @@ export const registerSecretFolderRouter = async (server: FastifyZodProvider) =>
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
folder: SecretFoldersSchema
|
||||
folder: SecretFoldersSchema.extend({
|
||||
path: z.string()
|
||||
})
|
||||
})
|
||||
}
|
||||
},
|
||||
@@ -130,7 +132,7 @@ export const registerSecretFolderRouter = async (server: FastifyZodProvider) =>
|
||||
.transform(removeTrailingSlash)
|
||||
.describe(FOLDERS.UPDATE.path)
|
||||
.optional(),
|
||||
// backward compatiability with cli
|
||||
// backward compatibility with cli
|
||||
directory: z
|
||||
.string()
|
||||
.trim()
|
||||
@@ -143,7 +145,9 @@ export const registerSecretFolderRouter = async (server: FastifyZodProvider) =>
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
folder: SecretFoldersSchema
|
||||
folder: SecretFoldersSchema.extend({
|
||||
path: z.string()
|
||||
})
|
||||
})
|
||||
}
|
||||
},
|
||||
@@ -359,7 +363,7 @@ export const registerSecretFolderRouter = async (server: FastifyZodProvider) =>
|
||||
.transform(removeTrailingSlash)
|
||||
.describe(FOLDERS.LIST.path)
|
||||
.optional(),
|
||||
// backward compatiability with cli
|
||||
// backward compatibility with cli
|
||||
directory: z
|
||||
.string()
|
||||
.trim()
|
||||
|
@@ -283,6 +283,14 @@ export const registerProjectRouter = async (server: FastifyZodProvider) => {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
schema: {
|
||||
hide: false,
|
||||
tags: [ApiDocsTags.Projects],
|
||||
description: "Get project details by slug",
|
||||
security: [
|
||||
{
|
||||
bearerAuth: []
|
||||
}
|
||||
],
|
||||
params: z.object({
|
||||
slug: slugSchema({ max: 36 }).describe("The slug of the project to get.")
|
||||
}),
|
||||
|
@@ -142,16 +142,27 @@ export const getGitHubAppAuthToken = async (appConnection: TGitHubConnection) =>
|
||||
return token;
|
||||
};
|
||||
|
||||
const parseGitHubLinkHeader = (linkHeader: string | undefined): Record<string, string> => {
|
||||
if (!linkHeader) return {};
|
||||
|
||||
const links: Record<string, string> = {};
|
||||
const segments = linkHeader.split(",");
|
||||
const re = new RE2(/<([^>]+)>;\s*rel="([^"]+)"/);
|
||||
|
||||
for (const segment of segments) {
|
||||
const match = re.exec(segment.trim());
|
||||
if (match) {
|
||||
const url = match[1];
|
||||
const rel = match[2];
|
||||
links[rel] = url;
|
||||
}
|
||||
}
|
||||
return links;
|
||||
};
|
||||
|
||||
function extractNextPageUrl(linkHeader: string | undefined): string | null {
|
||||
if (!linkHeader) return null;
|
||||
|
||||
const links = linkHeader.split(",");
|
||||
const nextLink = links.find((link) => link.includes('rel="next"'));
|
||||
|
||||
if (!nextLink) return null;
|
||||
|
||||
const match = new RE2(/<([^>]+)>/).exec(nextLink);
|
||||
return match ? match[1] : null;
|
||||
const links = parseGitHubLinkHeader(linkHeader);
|
||||
return links.next || null;
|
||||
}
|
||||
|
||||
export const makePaginatedGitHubRequest = async <T, R = T[]>(
|
||||
@@ -164,27 +175,83 @@ export const makePaginatedGitHubRequest = async <T, R = T[]>(
|
||||
|
||||
const token =
|
||||
method === GitHubConnectionMethod.OAuth ? credentials.accessToken : await getGitHubAppAuthToken(appConnection);
|
||||
let url: string | null = `https://${await getGitHubInstanceApiUrl(appConnection)}${path}`;
|
||||
|
||||
const baseUrl = `https://${await getGitHubInstanceApiUrl(appConnection)}${path}`;
|
||||
const initialUrlObj = new URL(baseUrl);
|
||||
initialUrlObj.searchParams.set("per_page", "100");
|
||||
|
||||
let results: T[] = [];
|
||||
let i = 0;
|
||||
const maxIterations = 1000;
|
||||
|
||||
while (url && i < 1000) {
|
||||
// eslint-disable-next-line no-await-in-loop
|
||||
const response: AxiosResponse<R> = await requestWithGitHubGateway<R>(appConnection, gatewayService, {
|
||||
url,
|
||||
method: "GET",
|
||||
headers: {
|
||||
Accept: "application/vnd.github+json",
|
||||
Authorization: `Bearer ${token}`,
|
||||
"X-GitHub-Api-Version": "2022-11-28"
|
||||
}
|
||||
});
|
||||
// Make initial request to get link header
|
||||
const firstResponse: AxiosResponse<R> = await requestWithGitHubGateway<R>(appConnection, gatewayService, {
|
||||
url: initialUrlObj.toString(),
|
||||
method: "GET",
|
||||
headers: {
|
||||
Accept: "application/vnd.github+json",
|
||||
Authorization: `Bearer ${token}`,
|
||||
"X-GitHub-Api-Version": "2022-11-28"
|
||||
}
|
||||
});
|
||||
|
||||
const items = dataMapper ? dataMapper(response.data) : (response.data as unknown as T[]);
|
||||
results = results.concat(items);
|
||||
const firstPageItems = dataMapper ? dataMapper(firstResponse.data) : (firstResponse.data as unknown as T[]);
|
||||
results = results.concat(firstPageItems);
|
||||
|
||||
url = extractNextPageUrl(response.headers.link as string | undefined);
|
||||
i += 1;
|
||||
const linkHeader = parseGitHubLinkHeader(firstResponse.headers.link as string | undefined);
|
||||
const lastPageUrl = linkHeader.last;
|
||||
|
||||
// If there's a last page URL, get its page number and concurrently fetch every page starting from 2 to last
|
||||
if (lastPageUrl) {
|
||||
const lastPageParam = new URL(lastPageUrl).searchParams.get("page");
|
||||
const totalPages = lastPageParam ? parseInt(lastPageParam, 10) : 1;
|
||||
|
||||
const pageRequests: Promise<AxiosResponse<R>>[] = [];
|
||||
|
||||
for (let pageNum = 2; pageNum <= totalPages && pageNum - 1 < maxIterations; pageNum += 1) {
|
||||
const pageUrlObj = new URL(initialUrlObj.toString());
|
||||
pageUrlObj.searchParams.set("page", pageNum.toString());
|
||||
|
||||
pageRequests.push(
|
||||
requestWithGitHubGateway<R>(appConnection, gatewayService, {
|
||||
url: pageUrlObj.toString(),
|
||||
method: "GET",
|
||||
headers: {
|
||||
Accept: "application/vnd.github+json",
|
||||
Authorization: `Bearer ${token}`,
|
||||
"X-GitHub-Api-Version": "2022-11-28"
|
||||
}
|
||||
})
|
||||
);
|
||||
}
|
||||
const responses = await Promise.all(pageRequests);
|
||||
|
||||
for (const response of responses) {
|
||||
const items = dataMapper ? dataMapper(response.data) : (response.data as unknown as T[]);
|
||||
results = results.concat(items);
|
||||
}
|
||||
} else {
|
||||
// Fallback in case last link isn't present
|
||||
let url: string | null = extractNextPageUrl(firstResponse.headers.link as string | undefined);
|
||||
let i = 1;
|
||||
|
||||
while (url && i < maxIterations) {
|
||||
// eslint-disable-next-line no-await-in-loop
|
||||
const response: AxiosResponse<R> = await requestWithGitHubGateway<R>(appConnection, gatewayService, {
|
||||
url,
|
||||
method: "GET",
|
||||
headers: {
|
||||
Accept: "application/vnd.github+json",
|
||||
Authorization: `Bearer ${token}`,
|
||||
"X-GitHub-Api-Version": "2022-11-28"
|
||||
}
|
||||
});
|
||||
|
||||
const items = dataMapper ? dataMapper(response.data) : (response.data as unknown as T[]);
|
||||
results = results.concat(items);
|
||||
|
||||
url = extractNextPageUrl(response.headers.link as string | undefined);
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
|
@@ -462,6 +462,54 @@ export const buildTeamsPayload = (notification: TNotification) => {
|
||||
};
|
||||
}
|
||||
|
||||
case TriggerFeature.ACCESS_REQUEST_UPDATED: {
|
||||
const { payload } = notification;
|
||||
|
||||
const adaptiveCard = {
|
||||
type: "AdaptiveCard",
|
||||
$schema: "http://adaptivecards.io/schemas/adaptive-card.json",
|
||||
version: "1.5",
|
||||
body: [
|
||||
{
|
||||
type: "TextBlock",
|
||||
text: "Updated access approval request pending for review",
|
||||
weight: "Bolder",
|
||||
size: "Large"
|
||||
},
|
||||
{
|
||||
type: "TextBlock",
|
||||
text: `${payload.editorFullName} (${payload.editorEmail}) has updated the ${
|
||||
payload.isTemporary ? "temporary" : "permanent"
|
||||
} access request from ${payload.requesterFullName} (${payload.requesterEmail}) to ${payload.secretPath} in the ${payload.environment} environment of ${payload.projectName}.`,
|
||||
wrap: true
|
||||
},
|
||||
{
|
||||
type: "TextBlock",
|
||||
text: `The following permissions are requested: ${payload.permissions.join(", ")}`,
|
||||
wrap: true
|
||||
},
|
||||
payload.editNote
|
||||
? {
|
||||
type: "TextBlock",
|
||||
text: `**Editor Note**: ${payload.editNote}`,
|
||||
wrap: true
|
||||
}
|
||||
: null
|
||||
].filter(Boolean),
|
||||
actions: [
|
||||
{
|
||||
type: "Action.OpenUrl",
|
||||
title: "View request in Infisical",
|
||||
url: payload.approvalUrl
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
return {
|
||||
adaptiveCard
|
||||
};
|
||||
}
|
||||
|
||||
default: {
|
||||
throw new BadRequestError({
|
||||
message: "Teams notification type not supported."
|
||||
|
@@ -6,8 +6,6 @@ import { TPermissionServiceFactory } from "@app/ee/services/permission/permissio
|
||||
import { BadRequestError, NotFoundError } from "@app/lib/errors";
|
||||
|
||||
import { TProjectDALFactory } from "../project/project-dal";
|
||||
import { TProjectBotDALFactory } from "../project-bot/project-bot-dal";
|
||||
import { TProjectKeyDALFactory } from "../project-key/project-key-dal";
|
||||
import { TProjectMembershipDALFactory } from "../project-membership/project-membership-dal";
|
||||
import { TProjectUserMembershipRoleDALFactory } from "../project-membership/project-user-membership-role-dal";
|
||||
import { SmtpTemplates, TSmtpService } from "../smtp/smtp-service";
|
||||
@@ -20,8 +18,6 @@ type TOrgAdminServiceFactoryDep = {
|
||||
TProjectMembershipDALFactory,
|
||||
"findOne" | "create" | "transaction" | "delete" | "findAllProjectMembers"
|
||||
>;
|
||||
projectKeyDAL: Pick<TProjectKeyDALFactory, "findLatestProjectKey" | "create">;
|
||||
projectBotDAL: Pick<TProjectBotDALFactory, "findOne">;
|
||||
projectUserMembershipRoleDAL: Pick<TProjectUserMembershipRoleDALFactory, "create" | "delete">;
|
||||
smtpService: Pick<TSmtpService, "sendMail">;
|
||||
};
|
||||
@@ -32,8 +28,6 @@ export const orgAdminServiceFactory = ({
|
||||
permissionService,
|
||||
projectDAL,
|
||||
projectMembershipDAL,
|
||||
projectKeyDAL,
|
||||
projectBotDAL,
|
||||
projectUserMembershipRoleDAL,
|
||||
smtpService
|
||||
}: TOrgAdminServiceFactoryDep) => {
|
||||
@@ -119,28 +113,6 @@ export const orgAdminServiceFactory = ({
|
||||
return { isExistingMember: true, membership: projectMembership };
|
||||
}
|
||||
|
||||
// missing membership thus add admin back as admin to project
|
||||
const ghostUser = await projectDAL.findProjectGhostUser(projectId);
|
||||
if (!ghostUser) {
|
||||
throw new NotFoundError({
|
||||
message: `Project owner of project with ID '${projectId}' not found`
|
||||
});
|
||||
}
|
||||
|
||||
const ghostUserLatestKey = await projectKeyDAL.findLatestProjectKey(ghostUser.id, projectId);
|
||||
if (!ghostUserLatestKey) {
|
||||
throw new NotFoundError({
|
||||
message: `Project owner's latest key of project with ID '${projectId}' not found`
|
||||
});
|
||||
}
|
||||
|
||||
const bot = await projectBotDAL.findOne({ projectId });
|
||||
if (!bot) {
|
||||
throw new NotFoundError({
|
||||
message: `Project bot for project with ID '${projectId}' not found`
|
||||
});
|
||||
}
|
||||
|
||||
const updatedMembership = await projectMembershipDAL.transaction(async (tx) => {
|
||||
const newProjectMembership = await projectMembershipDAL.create(
|
||||
{
|
||||
|
@@ -630,6 +630,25 @@ export const orgDALFactory = (db: TDbClient) => {
|
||||
}
|
||||
};
|
||||
|
||||
const findIdentityOrganization = async (
|
||||
identityId: string
|
||||
): Promise<{ id: string; name: string; slug: string; role: string }> => {
|
||||
try {
|
||||
const org = await db
|
||||
.replicaNode()(TableName.IdentityOrgMembership)
|
||||
.where({ identityId })
|
||||
.join(TableName.Organization, `${TableName.IdentityOrgMembership}.orgId`, `${TableName.Organization}.id`)
|
||||
.select(db.ref("id").withSchema(TableName.Organization).as("id"))
|
||||
.select(db.ref("name").withSchema(TableName.Organization).as("name"))
|
||||
.select(db.ref("slug").withSchema(TableName.Organization).as("slug"))
|
||||
.select(db.ref("role").withSchema(TableName.IdentityOrgMembership).as("role"));
|
||||
|
||||
return org?.[0];
|
||||
} catch (error) {
|
||||
throw new DatabaseError({ error, name: "Find identity organization" });
|
||||
}
|
||||
};
|
||||
|
||||
return withTransaction(db, {
|
||||
...orgOrm,
|
||||
findOrgByProjectId,
|
||||
@@ -652,6 +671,7 @@ export const orgDALFactory = (db: TDbClient) => {
|
||||
updateMembershipById,
|
||||
deleteMembershipById,
|
||||
deleteMembershipsById,
|
||||
updateMembership
|
||||
updateMembership,
|
||||
findIdentityOrganization
|
||||
});
|
||||
};
|
||||
|
@@ -198,6 +198,15 @@ export const orgServiceFactory = ({
|
||||
// Filter out orgs where the membership object is an invitation
|
||||
return orgs.filter((org) => org.userStatus !== "invited");
|
||||
};
|
||||
|
||||
/*
|
||||
* Get all organization an identity is part of
|
||||
* */
|
||||
const findIdentityOrganization = async (identityId: string) => {
|
||||
const org = await orgDAL.findIdentityOrganization(identityId);
|
||||
|
||||
return org;
|
||||
};
|
||||
/*
|
||||
* Get all workspace members
|
||||
* */
|
||||
@@ -1403,6 +1412,7 @@ export const orgServiceFactory = ({
|
||||
findOrganizationById,
|
||||
findAllOrgMembers,
|
||||
findAllOrganizationOfUser,
|
||||
findIdentityOrganization,
|
||||
inviteUserToOrganization,
|
||||
verifyUserToOrg,
|
||||
updateOrg,
|
||||
|
@@ -177,6 +177,18 @@ export const projectEnvServiceFactory = ({
|
||||
}
|
||||
}
|
||||
|
||||
const envs = await projectEnvDAL.find({ projectId });
|
||||
const project = await projectDAL.findById(projectId);
|
||||
const plan = await licenseService.getPlan(project.orgId);
|
||||
if (plan.environmentLimit !== null && envs.length > plan.environmentLimit) {
|
||||
// case: limit imposed on number of environments allowed
|
||||
// case: number of environments used exceeds the number of environments allowed
|
||||
throw new BadRequestError({
|
||||
message:
|
||||
"Failed to update environment due to environment limit exceeded. To update an environment, please upgrade your plan or remove unused environments."
|
||||
});
|
||||
}
|
||||
|
||||
const env = await projectEnvDAL.transaction(async (tx) => {
|
||||
if (position) {
|
||||
const existingEnvWithPosition = await projectEnvDAL.findOne({ projectId, position }, tx);
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import { TAuditLogDALFactory } from "@app/ee/services/audit-log/audit-log-dal";
|
||||
import { TSnapshotDALFactory } from "@app/ee/services/secret-snapshot/snapshot-dal";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { logger } from "@app/lib/logger";
|
||||
import { QueueJobs, QueueName, TQueueServiceFactory } from "@app/queue";
|
||||
|
||||
@@ -41,32 +42,19 @@ export const dailyResourceCleanUpQueueServiceFactory = ({
|
||||
serviceTokenService,
|
||||
orgService
|
||||
}: TDailyResourceCleanUpQueueServiceFactoryDep) => {
|
||||
queueService.start(QueueName.DailyResourceCleanUp, async () => {
|
||||
logger.info(`${QueueName.DailyResourceCleanUp}: queue task started`);
|
||||
await identityAccessTokenDAL.removeExpiredTokens();
|
||||
await identityUniversalAuthClientSecretDAL.removeExpiredClientSecrets();
|
||||
await secretSharingDAL.pruneExpiredSharedSecrets();
|
||||
await secretSharingDAL.pruneExpiredSecretRequests();
|
||||
await snapshotDAL.pruneExcessSnapshots();
|
||||
await secretVersionDAL.pruneExcessVersions();
|
||||
await secretVersionV2DAL.pruneExcessVersions();
|
||||
await secretFolderVersionDAL.pruneExcessVersions();
|
||||
await serviceTokenService.notifyExpiringTokens();
|
||||
await orgService.notifyInvitedUsers();
|
||||
await auditLogDAL.pruneAuditLog();
|
||||
logger.info(`${QueueName.DailyResourceCleanUp}: queue task completed`);
|
||||
});
|
||||
const appCfg = getConfig();
|
||||
|
||||
// we do a repeat cron job in utc timezone at 12 Midnight each day
|
||||
const startCleanUp = async () => {
|
||||
// TODO(akhilmhdh): remove later
|
||||
if (appCfg.isDailyResourceCleanUpDevelopmentMode) {
|
||||
logger.warn("Daily Resource Clean Up is in development mode.");
|
||||
}
|
||||
|
||||
const init = async () => {
|
||||
await queueService.stopRepeatableJob(
|
||||
QueueName.AuditLogPrune,
|
||||
QueueJobs.AuditLogPrune,
|
||||
{ pattern: "0 0 * * *", utc: true },
|
||||
QueueName.AuditLogPrune // just a job id
|
||||
);
|
||||
// clear previous job
|
||||
await queueService.stopRepeatableJob(
|
||||
QueueName.DailyResourceCleanUp,
|
||||
QueueJobs.DailyResourceCleanUp,
|
||||
@@ -74,18 +62,43 @@ export const dailyResourceCleanUpQueueServiceFactory = ({
|
||||
QueueName.DailyResourceCleanUp // just a job id
|
||||
);
|
||||
|
||||
await queueService.queue(QueueName.DailyResourceCleanUp, QueueJobs.DailyResourceCleanUp, undefined, {
|
||||
delay: 5000,
|
||||
jobId: QueueName.DailyResourceCleanUp,
|
||||
repeat: { pattern: "0 0 * * *", utc: true }
|
||||
});
|
||||
await queueService.startPg<QueueName.DailyResourceCleanUp>(
|
||||
QueueJobs.DailyResourceCleanUp,
|
||||
async () => {
|
||||
try {
|
||||
logger.info(`${QueueName.DailyResourceCleanUp}: queue task started`);
|
||||
await identityAccessTokenDAL.removeExpiredTokens();
|
||||
await identityUniversalAuthClientSecretDAL.removeExpiredClientSecrets();
|
||||
await secretSharingDAL.pruneExpiredSharedSecrets();
|
||||
await secretSharingDAL.pruneExpiredSecretRequests();
|
||||
await snapshotDAL.pruneExcessSnapshots();
|
||||
await secretVersionDAL.pruneExcessVersions();
|
||||
await secretVersionV2DAL.pruneExcessVersions();
|
||||
await secretFolderVersionDAL.pruneExcessVersions();
|
||||
await serviceTokenService.notifyExpiringTokens();
|
||||
await orgService.notifyInvitedUsers();
|
||||
await auditLogDAL.pruneAuditLog();
|
||||
logger.info(`${QueueName.DailyResourceCleanUp}: queue task completed`);
|
||||
} catch (error) {
|
||||
logger.error(error, `${QueueName.DailyResourceCleanUp}: resource cleanup failed`);
|
||||
throw error;
|
||||
}
|
||||
},
|
||||
{
|
||||
batchSize: 1,
|
||||
workerCount: 1,
|
||||
pollingIntervalSeconds: 1
|
||||
}
|
||||
);
|
||||
await queueService.schedulePg(
|
||||
QueueJobs.DailyResourceCleanUp,
|
||||
appCfg.isDailyResourceCleanUpDevelopmentMode ? "*/5 * * * *" : "0 0 * * *",
|
||||
undefined,
|
||||
{ tz: "UTC" }
|
||||
);
|
||||
};
|
||||
|
||||
queueService.listen(QueueName.DailyResourceCleanUp, "failed", (_, err) => {
|
||||
logger.error(err, `${QueueName.DailyResourceCleanUp}: resource cleanup failed`);
|
||||
});
|
||||
|
||||
return {
|
||||
startCleanUp
|
||||
init
|
||||
};
|
||||
};
|
||||
|
@@ -238,8 +238,16 @@ export const secretFolderServiceFactory = ({
|
||||
return doc;
|
||||
});
|
||||
|
||||
const [folderWithFullPath] = await folderDAL.findSecretPathByFolderIds(projectId, [folder.id]);
|
||||
|
||||
if (!folderWithFullPath) {
|
||||
throw new NotFoundError({
|
||||
message: `Failed to retrieve path for folder with ID '${folder.id}'`
|
||||
});
|
||||
}
|
||||
|
||||
await snapshotService.performSnapshot(folder.parentId as string);
|
||||
return folder;
|
||||
return { ...folder, path: folderWithFullPath.path };
|
||||
};
|
||||
|
||||
const updateManyFolders = async ({
|
||||
@@ -496,8 +504,27 @@ export const secretFolderServiceFactory = ({
|
||||
return doc;
|
||||
});
|
||||
|
||||
const foldersWithFullPaths = await folderDAL.findSecretPathByFolderIds(projectId, [newFolder.id, folder.id]);
|
||||
|
||||
const newFolderWithFullPath = foldersWithFullPaths.find((f) => f?.id === newFolder.id);
|
||||
if (!newFolderWithFullPath) {
|
||||
throw new NotFoundError({
|
||||
message: `Failed to retrieve path for folder with ID '${newFolder.id}'`
|
||||
});
|
||||
}
|
||||
|
||||
const folderWithFullPath = foldersWithFullPaths.find((f) => f?.id === folder.id);
|
||||
if (!folderWithFullPath) {
|
||||
throw new NotFoundError({
|
||||
message: `Failed to retrieve path for folder with ID '${folder.id}'`
|
||||
});
|
||||
}
|
||||
|
||||
await snapshotService.performSnapshot(newFolder.parentId as string);
|
||||
return { folder: newFolder, old: folder };
|
||||
return {
|
||||
folder: { ...newFolder, path: newFolderWithFullPath.path },
|
||||
old: { ...folder, path: folderWithFullPath.path }
|
||||
};
|
||||
};
|
||||
|
||||
const $checkFolderPolicy = async ({
|
||||
|
@@ -181,7 +181,13 @@ export const secretImportServiceFactory = ({
|
||||
projectId,
|
||||
environmentSlug: environment,
|
||||
actorId,
|
||||
actor
|
||||
actor,
|
||||
event: {
|
||||
importMutation: {
|
||||
secretPath,
|
||||
environment
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -356,7 +362,13 @@ export const secretImportServiceFactory = ({
|
||||
projectId,
|
||||
environmentSlug: environment,
|
||||
actor,
|
||||
actorId
|
||||
actorId,
|
||||
event: {
|
||||
importMutation: {
|
||||
secretPath,
|
||||
environment
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
await secretV2BridgeDAL.invalidateSecretCacheByProjectId(projectId);
|
||||
|
@@ -1,4 +1,5 @@
|
||||
import AWS, { AWSError } from "aws-sdk";
|
||||
import handlebars from "handlebars";
|
||||
|
||||
import { getAwsConnectionConfig } from "@app/services/app-connection/aws/aws-connection-fns";
|
||||
import { SecretSyncError } from "@app/services/secret-sync/secret-sync-errors";
|
||||
@@ -34,18 +35,51 @@ const sleep = async () =>
|
||||
setTimeout(resolve, 1000);
|
||||
});
|
||||
|
||||
const getParametersByPath = async (ssm: AWS.SSM, path: string): Promise<TAWSParameterStoreRecord> => {
|
||||
const getFullPath = ({ path, keySchema, environment }: { path: string; keySchema?: string; environment: string }) => {
|
||||
if (!keySchema || !keySchema.includes("/")) return path;
|
||||
|
||||
if (keySchema.startsWith("/")) {
|
||||
throw new SecretSyncError({ message: `Key schema cannot contain leading '/'`, shouldRetry: false });
|
||||
}
|
||||
|
||||
const keySchemaSegments = handlebars
|
||||
.compile(keySchema)({
|
||||
environment,
|
||||
secretKey: "{{secretKey}}"
|
||||
})
|
||||
.split("/");
|
||||
|
||||
const pathSegments = keySchemaSegments.slice(0, keySchemaSegments.length - 1);
|
||||
|
||||
if (pathSegments.some((segment) => segment.includes("{{secretKey}}"))) {
|
||||
throw new SecretSyncError({
|
||||
message: "Key schema cannot contain '/' after {{secretKey}}",
|
||||
shouldRetry: false
|
||||
});
|
||||
}
|
||||
|
||||
return `${path}${pathSegments.join("/")}/`;
|
||||
};
|
||||
|
||||
const getParametersByPath = async (
|
||||
ssm: AWS.SSM,
|
||||
path: string,
|
||||
keySchema: string | undefined,
|
||||
environment: string
|
||||
): Promise<TAWSParameterStoreRecord> => {
|
||||
const awsParameterStoreSecretsRecord: TAWSParameterStoreRecord = {};
|
||||
let hasNext = true;
|
||||
let nextToken: string | undefined;
|
||||
let attempt = 0;
|
||||
|
||||
const fullPath = getFullPath({ path, keySchema, environment });
|
||||
|
||||
while (hasNext) {
|
||||
try {
|
||||
// eslint-disable-next-line no-await-in-loop
|
||||
const parameters = await ssm
|
||||
.getParametersByPath({
|
||||
Path: path,
|
||||
Path: fullPath,
|
||||
Recursive: false,
|
||||
WithDecryption: true,
|
||||
MaxResults: BATCH_SIZE,
|
||||
@@ -59,7 +93,7 @@ const getParametersByPath = async (ssm: AWS.SSM, path: string): Promise<TAWSPara
|
||||
parameters.Parameters.forEach((parameter) => {
|
||||
if (parameter.Name) {
|
||||
// no leading slash if path is '/'
|
||||
const secKey = path.length > 1 ? parameter.Name.substring(path.length) : parameter.Name;
|
||||
const secKey = fullPath.length > 1 ? parameter.Name.substring(path.length) : parameter.Name;
|
||||
awsParameterStoreSecretsRecord[secKey] = parameter;
|
||||
}
|
||||
});
|
||||
@@ -83,12 +117,19 @@ const getParametersByPath = async (ssm: AWS.SSM, path: string): Promise<TAWSPara
|
||||
return awsParameterStoreSecretsRecord;
|
||||
};
|
||||
|
||||
const getParameterMetadataByPath = async (ssm: AWS.SSM, path: string): Promise<TAWSParameterStoreMetadataRecord> => {
|
||||
const getParameterMetadataByPath = async (
|
||||
ssm: AWS.SSM,
|
||||
path: string,
|
||||
keySchema: string | undefined,
|
||||
environment: string
|
||||
): Promise<TAWSParameterStoreMetadataRecord> => {
|
||||
const awsParameterStoreMetadataRecord: TAWSParameterStoreMetadataRecord = {};
|
||||
let hasNext = true;
|
||||
let nextToken: string | undefined;
|
||||
let attempt = 0;
|
||||
|
||||
const fullPath = getFullPath({ path, keySchema, environment });
|
||||
|
||||
while (hasNext) {
|
||||
try {
|
||||
// eslint-disable-next-line no-await-in-loop
|
||||
@@ -100,7 +141,7 @@ const getParameterMetadataByPath = async (ssm: AWS.SSM, path: string): Promise<T
|
||||
{
|
||||
Key: "Path",
|
||||
Option: "OneLevel",
|
||||
Values: [path]
|
||||
Values: [fullPath]
|
||||
}
|
||||
]
|
||||
})
|
||||
@@ -112,7 +153,7 @@ const getParameterMetadataByPath = async (ssm: AWS.SSM, path: string): Promise<T
|
||||
parameters.Parameters.forEach((parameter) => {
|
||||
if (parameter.Name) {
|
||||
// no leading slash if path is '/'
|
||||
const secKey = path.length > 1 ? parameter.Name.substring(path.length) : parameter.Name;
|
||||
const secKey = fullPath.length > 1 ? parameter.Name.substring(path.length) : parameter.Name;
|
||||
awsParameterStoreMetadataRecord[secKey] = parameter;
|
||||
}
|
||||
});
|
||||
@@ -298,9 +339,19 @@ export const AwsParameterStoreSyncFns = {
|
||||
|
||||
const ssm = await getSSM(secretSync);
|
||||
|
||||
const awsParameterStoreSecretsRecord = await getParametersByPath(ssm, destinationConfig.path);
|
||||
const awsParameterStoreSecretsRecord = await getParametersByPath(
|
||||
ssm,
|
||||
destinationConfig.path,
|
||||
syncOptions.keySchema,
|
||||
environment!.slug
|
||||
);
|
||||
|
||||
const awsParameterStoreMetadataRecord = await getParameterMetadataByPath(ssm, destinationConfig.path);
|
||||
const awsParameterStoreMetadataRecord = await getParameterMetadataByPath(
|
||||
ssm,
|
||||
destinationConfig.path,
|
||||
syncOptions.keySchema,
|
||||
environment!.slug
|
||||
);
|
||||
|
||||
const { shouldManageTags, awsParameterStoreTagsRecord } = await getParameterStoreTagsRecord(
|
||||
ssm,
|
||||
@@ -400,22 +451,32 @@ export const AwsParameterStoreSyncFns = {
|
||||
await deleteParametersBatch(ssm, parametersToDelete);
|
||||
},
|
||||
getSecrets: async (secretSync: TAwsParameterStoreSyncWithCredentials): Promise<TSecretMap> => {
|
||||
const { destinationConfig } = secretSync;
|
||||
const { destinationConfig, syncOptions, environment } = secretSync;
|
||||
|
||||
const ssm = await getSSM(secretSync);
|
||||
|
||||
const awsParameterStoreSecretsRecord = await getParametersByPath(ssm, destinationConfig.path);
|
||||
const awsParameterStoreSecretsRecord = await getParametersByPath(
|
||||
ssm,
|
||||
destinationConfig.path,
|
||||
syncOptions.keySchema,
|
||||
environment!.slug
|
||||
);
|
||||
|
||||
return Object.fromEntries(
|
||||
Object.entries(awsParameterStoreSecretsRecord).map(([key, value]) => [key, { value: value.Value ?? "" }])
|
||||
);
|
||||
},
|
||||
removeSecrets: async (secretSync: TAwsParameterStoreSyncWithCredentials, secretMap: TSecretMap) => {
|
||||
const { destinationConfig } = secretSync;
|
||||
const { destinationConfig, syncOptions, environment } = secretSync;
|
||||
|
||||
const ssm = await getSSM(secretSync);
|
||||
|
||||
const awsParameterStoreSecretsRecord = await getParametersByPath(ssm, destinationConfig.path);
|
||||
const awsParameterStoreSecretsRecord = await getParametersByPath(
|
||||
ssm,
|
||||
destinationConfig.path,
|
||||
syncOptions.keySchema,
|
||||
environment!.slug
|
||||
);
|
||||
|
||||
const parametersToDelete: AWS.SSM.Parameter[] = [];
|
||||
|
||||
|
@@ -386,7 +386,15 @@ export const secretV2BridgeServiceFactory = ({
|
||||
actorId,
|
||||
actor,
|
||||
projectId,
|
||||
environmentSlug: folder.environment.slug
|
||||
environmentSlug: folder.environment.slug,
|
||||
event: {
|
||||
created: {
|
||||
secretId: secret.id,
|
||||
environment: folder.environment.slug,
|
||||
secretKey: secret.key,
|
||||
secretPath
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -616,7 +624,15 @@ export const secretV2BridgeServiceFactory = ({
|
||||
actor,
|
||||
projectId,
|
||||
orgId: actorOrgId,
|
||||
environmentSlug: folder.environment.slug
|
||||
environmentSlug: folder.environment.slug,
|
||||
event: {
|
||||
updated: {
|
||||
secretId: secret.id,
|
||||
environment: folder.environment.slug,
|
||||
secretKey: secret.key,
|
||||
secretPath
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -728,7 +744,15 @@ export const secretV2BridgeServiceFactory = ({
|
||||
actor,
|
||||
projectId,
|
||||
orgId: actorOrgId,
|
||||
environmentSlug: folder.environment.slug
|
||||
environmentSlug: folder.environment.slug,
|
||||
event: {
|
||||
deleted: {
|
||||
secretId: secretToDelete.id,
|
||||
environment: folder.environment.slug,
|
||||
secretKey: secretToDelete.key,
|
||||
secretPath
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -1708,7 +1732,15 @@ export const secretV2BridgeServiceFactory = ({
|
||||
secretPath,
|
||||
projectId,
|
||||
orgId: actorOrgId,
|
||||
environmentSlug: folder.environment.slug
|
||||
environmentSlug: folder.environment.slug,
|
||||
event: {
|
||||
created: newSecrets.map((el) => ({
|
||||
secretId: el.id,
|
||||
secretKey: el.key,
|
||||
secretPath,
|
||||
environment: folder.environment.slug
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
return newSecrets.map((el) => {
|
||||
@@ -2075,7 +2107,15 @@ export const secretV2BridgeServiceFactory = ({
|
||||
secretPath: el.path,
|
||||
projectId,
|
||||
orgId: actorOrgId,
|
||||
environmentSlug: environment
|
||||
environmentSlug: environment,
|
||||
event: {
|
||||
updated: updatedSecrets.map((sec) => ({
|
||||
secretId: sec.id,
|
||||
secretKey: sec.key,
|
||||
secretPath: sec.secretPath,
|
||||
environment
|
||||
}))
|
||||
}
|
||||
})
|
||||
: undefined
|
||||
)
|
||||
@@ -2214,7 +2254,15 @@ export const secretV2BridgeServiceFactory = ({
|
||||
secretPath,
|
||||
projectId,
|
||||
orgId: actorOrgId,
|
||||
environmentSlug: folder.environment.slug
|
||||
environmentSlug: folder.environment.slug,
|
||||
event: {
|
||||
deleted: secretsDeleted.map((el) => ({
|
||||
secretId: el.id,
|
||||
secretKey: el.key,
|
||||
secretPath,
|
||||
environment: folder.environment.slug
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
const { decryptor: secretManagerDecryptor } = await kmsService.createCipherPairWithDataKey({
|
||||
@@ -2751,7 +2799,13 @@ export const secretV2BridgeServiceFactory = ({
|
||||
secretPath: destinationFolder.path,
|
||||
environmentSlug: destinationFolder.environment.slug,
|
||||
actorId,
|
||||
actor
|
||||
actor,
|
||||
event: {
|
||||
importMutation: {
|
||||
secretPath: sourceFolder.path,
|
||||
environment: sourceFolder.environment.slug
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -2763,7 +2817,13 @@ export const secretV2BridgeServiceFactory = ({
|
||||
secretPath: sourceFolder.path,
|
||||
environmentSlug: sourceFolder.environment.slug,
|
||||
actorId,
|
||||
actor
|
||||
actor,
|
||||
event: {
|
||||
importMutation: {
|
||||
secretPath: sourceFolder.path,
|
||||
environment: sourceFolder.environment.slug
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
@@ -5,6 +5,7 @@ import { Knex } from "knex";
|
||||
|
||||
import {
|
||||
ProjectMembershipRole,
|
||||
ProjectType,
|
||||
ProjectUpgradeStatus,
|
||||
ProjectVersion,
|
||||
SecretType,
|
||||
@@ -12,6 +13,9 @@ import {
|
||||
TSecretVersionsV2
|
||||
} from "@app/db/schemas";
|
||||
import { Actor, EventType, TAuditLogServiceFactory } from "@app/ee/services/audit-log/audit-log-types";
|
||||
import { TEventBusService } from "@app/ee/services/event/event-bus-service";
|
||||
import { BusEventName, PublishableEvent, TopicName } from "@app/ee/services/event/types";
|
||||
import { TLicenseServiceFactory } from "@app/ee/services/license/license-service";
|
||||
import { TSecretApprovalRequestDALFactory } from "@app/ee/services/secret-approval-request/secret-approval-request-dal";
|
||||
import { TSecretRotationDALFactory } from "@app/ee/services/secret-rotation/secret-rotation-dal";
|
||||
import { TSnapshotDALFactory } from "@app/ee/services/secret-snapshot/snapshot-dal";
|
||||
@@ -111,6 +115,8 @@ type TSecretQueueFactoryDep = {
|
||||
folderCommitService: Pick<TFolderCommitServiceFactory, "createCommit">;
|
||||
secretSyncQueue: Pick<TSecretSyncQueueFactory, "queueSecretSyncsSyncSecretsByPath">;
|
||||
reminderService: Pick<TReminderServiceFactory, "createReminderInternal" | "deleteReminderBySecretId">;
|
||||
eventBusService: TEventBusService;
|
||||
licenseService: Pick<TLicenseServiceFactory, "getPlan">;
|
||||
};
|
||||
|
||||
export type TGetSecrets = {
|
||||
@@ -172,7 +178,9 @@ export const secretQueueFactory = ({
|
||||
resourceMetadataDAL,
|
||||
secretSyncQueue,
|
||||
folderCommitService,
|
||||
reminderService
|
||||
reminderService,
|
||||
eventBusService,
|
||||
licenseService
|
||||
}: TSecretQueueFactoryDep) => {
|
||||
const integrationMeter = opentelemetry.metrics.getMeter("Integrations");
|
||||
const errorHistogram = integrationMeter.createHistogram("integration_secret_sync_errors", {
|
||||
@@ -534,17 +542,70 @@ export const secretQueueFactory = ({
|
||||
});
|
||||
};
|
||||
|
||||
const publishEvents = async (event: PublishableEvent) => {
|
||||
if (event.created) {
|
||||
await eventBusService.publish(TopicName.CoreServers, {
|
||||
type: ProjectType.SecretManager,
|
||||
source: "infiscal",
|
||||
data: {
|
||||
event: BusEventName.CreateSecret,
|
||||
payload: event.created
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (event.updated) {
|
||||
await eventBusService.publish(TopicName.CoreServers, {
|
||||
type: ProjectType.SecretManager,
|
||||
source: "infiscal",
|
||||
data: {
|
||||
event: BusEventName.UpdateSecret,
|
||||
payload: event.updated
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (event.deleted) {
|
||||
await eventBusService.publish(TopicName.CoreServers, {
|
||||
type: ProjectType.SecretManager,
|
||||
source: "infiscal",
|
||||
data: {
|
||||
event: BusEventName.DeleteSecret,
|
||||
payload: event.deleted
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (event.importMutation) {
|
||||
await eventBusService.publish(TopicName.CoreServers, {
|
||||
type: ProjectType.SecretManager,
|
||||
source: "infiscal",
|
||||
data: {
|
||||
event: BusEventName.ImportMutation,
|
||||
payload: event.importMutation
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const syncSecrets = async <T extends boolean = false>({
|
||||
// seperate de-dupe queue for integration sync and replication sync
|
||||
_deDupeQueue: deDupeQueue = {},
|
||||
_depth: depth = 0,
|
||||
_deDupeReplicationQueue: deDupeReplicationQueue = {},
|
||||
event,
|
||||
...dto
|
||||
}: TSyncSecretsDTO<T>) => {
|
||||
}: TSyncSecretsDTO<T> & { event?: PublishableEvent }) => {
|
||||
logger.info(
|
||||
`syncSecrets: syncing project secrets where [projectId=${dto.projectId}] [environment=${dto.environmentSlug}] [path=${dto.secretPath}]`
|
||||
);
|
||||
|
||||
const plan = await licenseService.getPlan(dto.orgId);
|
||||
|
||||
if (event && plan.eventSubscriptions) {
|
||||
await publishEvents(event);
|
||||
}
|
||||
|
||||
const deDuplicationKey = uniqueSecretQueueKey(dto.environmentSlug, dto.secretPath);
|
||||
if (
|
||||
!dto.excludeReplication
|
||||
@@ -565,7 +626,7 @@ export const secretQueueFactory = ({
|
||||
_deDupeQueue: deDupeQueue,
|
||||
_deDupeReplicationQueue: deDupeReplicationQueue,
|
||||
_depth: depth
|
||||
} as TSyncSecretsDTO,
|
||||
} as unknown as TSyncSecretsDTO,
|
||||
{
|
||||
removeOnFail: true,
|
||||
removeOnComplete: true,
|
||||
@@ -689,6 +750,7 @@ export const secretQueueFactory = ({
|
||||
isManual,
|
||||
projectId,
|
||||
secretPath,
|
||||
|
||||
depth = 1,
|
||||
deDupeQueue = {}
|
||||
} = job.data as TIntegrationSyncPayload;
|
||||
@@ -738,7 +800,13 @@ export const secretQueueFactory = ({
|
||||
environmentSlug: foldersGroupedById[folderId][0]?.environmentSlug as string,
|
||||
_deDupeQueue: deDupeQueue,
|
||||
_depth: depth + 1,
|
||||
excludeReplication: true
|
||||
excludeReplication: true,
|
||||
event: {
|
||||
importMutation: {
|
||||
secretPath: foldersGroupedById[folderId][0]?.path as string,
|
||||
environment: foldersGroupedById[folderId][0]?.environmentSlug as string
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
);
|
||||
@@ -791,7 +859,13 @@ export const secretQueueFactory = ({
|
||||
environmentSlug: referencedFoldersGroupedById[folderId][0]?.environmentSlug as string,
|
||||
_deDupeQueue: deDupeQueue,
|
||||
_depth: depth + 1,
|
||||
excludeReplication: true
|
||||
excludeReplication: true,
|
||||
event: {
|
||||
importMutation: {
|
||||
secretPath: referencedFoldersGroupedById[folderId][0]?.path as string,
|
||||
environment: referencedFoldersGroupedById[folderId][0]?.environmentSlug as string
|
||||
}
|
||||
}
|
||||
})
|
||||
)
|
||||
);
|
||||
|
@@ -115,6 +115,44 @@ User Note: ${payload.note}`
|
||||
payloadBlocks
|
||||
};
|
||||
}
|
||||
case TriggerFeature.ACCESS_REQUEST_UPDATED: {
|
||||
const { payload } = notification;
|
||||
const messageBody = `${payload.editorFullName} (${payload.editorEmail}) has updated the ${
|
||||
payload.isTemporary ? "temporary" : "permanent"
|
||||
} access request from ${payload.requesterFullName} (${payload.requesterEmail}) to ${payload.secretPath} in the ${payload.environment} environment of ${payload.projectName}.
|
||||
|
||||
The following permissions are requested: ${payload.permissions.join(", ")}
|
||||
|
||||
View the request and approve or deny it <${payload.approvalUrl}|here>.${
|
||||
payload.editNote
|
||||
? `
|
||||
Editor Note: ${payload.editNote}`
|
||||
: ""
|
||||
}`;
|
||||
|
||||
const payloadBlocks = [
|
||||
{
|
||||
type: "header",
|
||||
text: {
|
||||
type: "plain_text",
|
||||
text: "Updated access approval request pending for review",
|
||||
emoji: true
|
||||
}
|
||||
},
|
||||
{
|
||||
type: "section",
|
||||
text: {
|
||||
type: "mrkdwn",
|
||||
text: messageBody
|
||||
}
|
||||
}
|
||||
];
|
||||
|
||||
return {
|
||||
payloadMessage: messageBody,
|
||||
payloadBlocks
|
||||
};
|
||||
}
|
||||
default: {
|
||||
throw new BadRequestError({
|
||||
message: "Slack notification type not supported."
|
||||
|
@@ -0,0 +1,95 @@
|
||||
import { Heading, Section, Text } from "@react-email/components";
|
||||
import React from "react";
|
||||
|
||||
import { BaseButton } from "./BaseButton";
|
||||
import { BaseEmailWrapper, BaseEmailWrapperProps } from "./BaseEmailWrapper";
|
||||
import { BaseLink } from "./BaseLink";
|
||||
|
||||
interface AccessApprovalRequestUpdatedTemplateProps
|
||||
extends Omit<BaseEmailWrapperProps, "title" | "preview" | "children"> {
|
||||
projectName: string;
|
||||
requesterFullName: string;
|
||||
requesterEmail: string;
|
||||
isTemporary: boolean;
|
||||
secretPath: string;
|
||||
environment: string;
|
||||
expiresIn: string;
|
||||
permissions: string[];
|
||||
editNote: string;
|
||||
editorFullName: string;
|
||||
editorEmail: string;
|
||||
approvalUrl: string;
|
||||
}
|
||||
|
||||
export const AccessApprovalRequestUpdatedTemplate = ({
|
||||
projectName,
|
||||
siteUrl,
|
||||
requesterFullName,
|
||||
requesterEmail,
|
||||
isTemporary,
|
||||
secretPath,
|
||||
environment,
|
||||
expiresIn,
|
||||
permissions,
|
||||
editNote,
|
||||
editorEmail,
|
||||
editorFullName,
|
||||
approvalUrl
|
||||
}: AccessApprovalRequestUpdatedTemplateProps) => {
|
||||
return (
|
||||
<BaseEmailWrapper
|
||||
title="Access Approval Request Update"
|
||||
preview="An access approval request was updated and requires your review."
|
||||
siteUrl={siteUrl}
|
||||
>
|
||||
<Heading className="text-black text-[18px] leading-[28px] text-center font-normal p-0 mx-0">
|
||||
An access approval request was updated and is pending your review for the project <strong>{projectName}</strong>
|
||||
</Heading>
|
||||
<Section className="px-[24px] mb-[28px] mt-[36px] pt-[12px] pb-[8px] border border-solid border-gray-200 rounded-md bg-gray-50">
|
||||
<Text className="text-black text-[14px] leading-[24px]">
|
||||
<strong>{editorFullName}</strong> (<BaseLink href={`mailto:${editorEmail}`}>{editorEmail}</BaseLink>) has
|
||||
updated the access request submitted by <strong>{requesterFullName}</strong> (
|
||||
<BaseLink href={`mailto:${requesterEmail}`}>{requesterEmail}</BaseLink>) for <strong>{secretPath}</strong> in
|
||||
the <strong>{environment}</strong> environment.
|
||||
</Text>
|
||||
|
||||
{isTemporary && (
|
||||
<Text className="text-[14px] text-red-600 leading-[24px]">
|
||||
<strong>This access will expire {expiresIn} after approval.</strong>
|
||||
</Text>
|
||||
)}
|
||||
<Text className="text-[14px] leading-[24px] mb-[4px]">
|
||||
<strong>The following permissions are requested:</strong>
|
||||
</Text>
|
||||
{permissions.map((permission) => (
|
||||
<Text key={permission} className="text-[14px] my-[2px] leading-[24px]">
|
||||
- {permission}
|
||||
</Text>
|
||||
))}
|
||||
<Text className="text-[14px] text-slate-700 leading-[24px]">
|
||||
<strong className="text-black">Editor Note:</strong> "{editNote}"
|
||||
</Text>
|
||||
</Section>
|
||||
<Section className="text-center">
|
||||
<BaseButton href={approvalUrl}>Review Request</BaseButton>
|
||||
</Section>
|
||||
</BaseEmailWrapper>
|
||||
);
|
||||
};
|
||||
|
||||
export default AccessApprovalRequestUpdatedTemplate;
|
||||
|
||||
AccessApprovalRequestUpdatedTemplate.PreviewProps = {
|
||||
requesterFullName: "Abigail Williams",
|
||||
requesterEmail: "abigail@infisical.com",
|
||||
isTemporary: true,
|
||||
secretPath: "/api/secrets",
|
||||
environment: "Production",
|
||||
siteUrl: "https://infisical.com",
|
||||
projectName: "Example Project",
|
||||
expiresIn: "1 day",
|
||||
permissions: ["Read Secret", "Delete Project", "Create Dynamic Secret"],
|
||||
editNote: "Too permissive, they only need 3 days",
|
||||
editorEmail: "john@infisical.com",
|
||||
editorFullName: "John Smith"
|
||||
} as AccessApprovalRequestUpdatedTemplateProps;
|
@@ -1,4 +1,5 @@
|
||||
export * from "./AccessApprovalRequestTemplate";
|
||||
export * from "./AccessApprovalRequestUpdatedTemplate";
|
||||
export * from "./EmailMfaTemplate";
|
||||
export * from "./EmailVerificationTemplate";
|
||||
export * from "./ExternalImportFailedTemplate";
|
||||
|
@@ -8,6 +8,7 @@ import { logger } from "@app/lib/logger";
|
||||
|
||||
import {
|
||||
AccessApprovalRequestTemplate,
|
||||
AccessApprovalRequestUpdatedTemplate,
|
||||
EmailMfaTemplate,
|
||||
EmailVerificationTemplate,
|
||||
ExternalImportFailedTemplate,
|
||||
@@ -54,6 +55,7 @@ export enum SmtpTemplates {
|
||||
EmailMfa = "emailMfa",
|
||||
UnlockAccount = "unlockAccount",
|
||||
AccessApprovalRequest = "accessApprovalRequest",
|
||||
AccessApprovalRequestUpdated = "accessApprovalRequestUpdated",
|
||||
AccessSecretRequestBypassed = "accessSecretRequestBypassed",
|
||||
SecretApprovalRequestNeedsReview = "secretApprovalRequestNeedsReview",
|
||||
// HistoricalSecretList = "historicalSecretLeakIncident", not used anymore?
|
||||
@@ -96,6 +98,7 @@ const EmailTemplateMap: Record<SmtpTemplates, React.FC<any>> = {
|
||||
[SmtpTemplates.SignupEmailVerification]: SignupEmailVerificationTemplate,
|
||||
[SmtpTemplates.EmailMfa]: EmailMfaTemplate,
|
||||
[SmtpTemplates.AccessApprovalRequest]: AccessApprovalRequestTemplate,
|
||||
[SmtpTemplates.AccessApprovalRequestUpdated]: AccessApprovalRequestUpdatedTemplate,
|
||||
[SmtpTemplates.EmailVerification]: EmailVerificationTemplate,
|
||||
[SmtpTemplates.ExternalImportFailed]: ExternalImportFailedTemplate,
|
||||
[SmtpTemplates.ExternalImportStarted]: ExternalImportStartedTemplate,
|
||||
|
@@ -11,7 +11,6 @@ import {
|
||||
validateOverrides
|
||||
} from "@app/lib/config/env";
|
||||
import { crypto } from "@app/lib/crypto/cryptography";
|
||||
import { generateUserSrpKeys, getUserPrivateKey } from "@app/lib/crypto/srp";
|
||||
import { BadRequestError, NotFoundError } from "@app/lib/errors";
|
||||
import { logger } from "@app/lib/logger";
|
||||
import { TIdentityDALFactory } from "@app/services/identity/identity-dal";
|
||||
@@ -465,43 +464,15 @@ export const superAdminServiceFactory = ({
|
||||
return updatedServerCfg;
|
||||
};
|
||||
|
||||
const adminSignUp = async ({
|
||||
lastName,
|
||||
firstName,
|
||||
email,
|
||||
salt,
|
||||
password,
|
||||
verifier,
|
||||
publicKey,
|
||||
protectedKey,
|
||||
protectedKeyIV,
|
||||
protectedKeyTag,
|
||||
encryptedPrivateKey,
|
||||
encryptedPrivateKeyIV,
|
||||
encryptedPrivateKeyTag,
|
||||
ip,
|
||||
userAgent
|
||||
}: TAdminSignUpDTO) => {
|
||||
const adminSignUp = async ({ lastName, firstName, email, password, ip, userAgent }: TAdminSignUpDTO) => {
|
||||
const appCfg = getConfig();
|
||||
|
||||
const sanitizedEmail = email.trim().toLowerCase();
|
||||
const existingUser = await userDAL.findOne({ username: sanitizedEmail });
|
||||
if (existingUser) throw new BadRequestError({ name: "Admin sign up", message: "User already exists" });
|
||||
|
||||
const privateKey = await getUserPrivateKey(password, {
|
||||
encryptionVersion: 2,
|
||||
salt,
|
||||
protectedKey,
|
||||
protectedKeyIV,
|
||||
protectedKeyTag,
|
||||
encryptedPrivateKey,
|
||||
iv: encryptedPrivateKeyIV,
|
||||
tag: encryptedPrivateKeyTag
|
||||
});
|
||||
|
||||
const hashedPassword = await crypto.hashing().createHash(password, appCfg.SALT_ROUNDS);
|
||||
|
||||
const { iv, tag, ciphertext, encoding } = crypto.encryption().symmetric().encryptWithRootEncryptionKey(privateKey);
|
||||
const userInfo = await userDAL.transaction(async (tx) => {
|
||||
const newUser = await userDAL.create(
|
||||
{
|
||||
@@ -519,25 +490,13 @@ export const superAdminServiceFactory = ({
|
||||
);
|
||||
const userEnc = await userDAL.createUserEncryption(
|
||||
{
|
||||
salt,
|
||||
encryptionVersion: 2,
|
||||
protectedKey,
|
||||
protectedKeyIV,
|
||||
protectedKeyTag,
|
||||
publicKey,
|
||||
encryptedPrivateKey,
|
||||
iv: encryptedPrivateKeyIV,
|
||||
tag: encryptedPrivateKeyTag,
|
||||
verifier,
|
||||
userId: newUser.id,
|
||||
hashedPassword,
|
||||
serverEncryptedPrivateKey: ciphertext,
|
||||
serverEncryptedPrivateKeyIV: iv,
|
||||
serverEncryptedPrivateKeyTag: tag,
|
||||
serverEncryptedPrivateKeyEncoding: encoding
|
||||
hashedPassword
|
||||
},
|
||||
tx
|
||||
);
|
||||
|
||||
return { user: newUser, enc: userEnc };
|
||||
});
|
||||
|
||||
@@ -587,26 +546,14 @@ export const superAdminServiceFactory = ({
|
||||
},
|
||||
tx
|
||||
);
|
||||
const { tag, encoding, ciphertext, iv } = crypto.encryption().symmetric().encryptWithRootEncryptionKey(password);
|
||||
const encKeys = await generateUserSrpKeys(sanitizedEmail, password);
|
||||
|
||||
const hashedPassword = await crypto.hashing().createHash(password, appCfg.SALT_ROUNDS);
|
||||
|
||||
const userEnc = await userDAL.createUserEncryption(
|
||||
{
|
||||
userId: newUser.id,
|
||||
encryptionVersion: 2,
|
||||
protectedKey: encKeys.protectedKey,
|
||||
protectedKeyIV: encKeys.protectedKeyIV,
|
||||
protectedKeyTag: encKeys.protectedKeyTag,
|
||||
publicKey: encKeys.publicKey,
|
||||
encryptedPrivateKey: encKeys.encryptedPrivateKey,
|
||||
iv: encKeys.encryptedPrivateKeyIV,
|
||||
tag: encKeys.encryptedPrivateKeyTag,
|
||||
salt: encKeys.salt,
|
||||
verifier: encKeys.verifier,
|
||||
serverEncryptedPrivateKeyEncoding: encoding,
|
||||
serverEncryptedPrivateKeyTag: tag,
|
||||
serverEncryptedPrivateKeyIV: iv,
|
||||
serverEncryptedPrivateKey: ciphertext
|
||||
hashedPassword
|
||||
},
|
||||
tx
|
||||
);
|
||||
|
@@ -3,17 +3,8 @@ import { TEnvConfig } from "@app/lib/config/env";
|
||||
export type TAdminSignUpDTO = {
|
||||
email: string;
|
||||
password: string;
|
||||
publicKey: string;
|
||||
salt: string;
|
||||
lastName?: string;
|
||||
verifier: string;
|
||||
firstName: string;
|
||||
protectedKey: string;
|
||||
protectedKeyIV: string;
|
||||
protectedKeyTag: string;
|
||||
encryptedPrivateKey: string;
|
||||
encryptedPrivateKeyIV: string;
|
||||
encryptedPrivateKeyTag: string;
|
||||
ip: string;
|
||||
userAgent: string;
|
||||
};
|
||||
|
@@ -5,7 +5,10 @@ export default defineConfig({
|
||||
test: {
|
||||
globals: true,
|
||||
env: {
|
||||
NODE_ENV: "test"
|
||||
NODE_ENV: "test",
|
||||
E2E_TEST_ORACLE_DB_19_HOST: process.env.E2E_TEST_ORACLE_DB_19_HOST!,
|
||||
E2E_TEST_ORACLE_DB_19_USERNAME: process.env.E2E_TEST_ORACLE_DB_19_USERNAME!,
|
||||
E2E_TEST_ORACLE_DB_19_PASSWORD: process.env.E2E_TEST_ORACLE_DB_19_PASSWORD!
|
||||
},
|
||||
environment: "./e2e-test/vitest-environment-knex.ts",
|
||||
include: ["./e2e-test/**/*.spec.ts"],
|
||||
|
157
docker-compose.e2e-dbs.yml
Normal file
157
docker-compose.e2e-dbs.yml
Normal file
@@ -0,0 +1,157 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# Oracle Databases
|
||||
oracle-db-23.8:
|
||||
image: container-registry.oracle.com/database/free:23.8.0.0
|
||||
container_name: oracle-db-23.8
|
||||
ports:
|
||||
- "1521:1521"
|
||||
environment:
|
||||
- ORACLE_PDB=pdb
|
||||
- ORACLE_PWD=pdb-password
|
||||
volumes:
|
||||
- oracle-data-23.8:/opt/oracle/oradata
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "sqlplus", "-L", "system/pdb-password@//localhost:1521/FREEPDB1", "<<<", "SELECT 1 FROM DUAL;"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
|
||||
# MySQL Databases
|
||||
mysql-8.4.6:
|
||||
image: mysql:8.4.6
|
||||
container_name: mysql-8.4.6
|
||||
ports:
|
||||
- "3306:3306"
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=mysql-test
|
||||
- MYSQL_DATABASE=mysql-test
|
||||
- MYSQL_ROOT_HOST=%
|
||||
- MYSQL_USER=mysql-test
|
||||
- MYSQL_PASSWORD=mysql-test
|
||||
volumes:
|
||||
- mysql-data-8.4.6:/var/lib/mysql
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "mysql-test", "-pmysql-test"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
|
||||
mysql-8.0.29:
|
||||
image: mysql:8.0.29
|
||||
container_name: mysql-8.0.28
|
||||
ports:
|
||||
- "3307:3306"
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=mysql-test
|
||||
- MYSQL_DATABASE=mysql-test
|
||||
- MYSQL_ROOT_HOST=%
|
||||
- MYSQL_USER=mysql-test
|
||||
- MYSQL_PASSWORD=mysql-test
|
||||
volumes:
|
||||
- mysql-data-8.0.29:/var/lib/mysql
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "mysql-test", "-pmysql-test"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
|
||||
mysql-5.7.31:
|
||||
image: mysql:5.7.31
|
||||
container_name: mysql-5.7.31
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "3308:3306"
|
||||
environment:
|
||||
- MYSQL_ROOT_PASSWORD=mysql-test
|
||||
- MYSQL_DATABASE=mysql-test
|
||||
- MYSQL_ROOT_HOST=%
|
||||
- MYSQL_USER=mysql-test
|
||||
- MYSQL_PASSWORD=mysql-test
|
||||
volumes:
|
||||
- mysql-data-5.7.31:/var/lib/mysql
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "mysqladmin", "ping", "-h", "localhost", "-u", "mysql-test", "-pmysql-test"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
|
||||
|
||||
# PostgreSQL Databases
|
||||
postgres-17:
|
||||
image: postgres:17
|
||||
platform: linux/amd64
|
||||
container_name: postgres-17
|
||||
ports:
|
||||
- "5433:5432"
|
||||
environment:
|
||||
- POSTGRES_DB=postgres-test
|
||||
- POSTGRES_USER=postgres-test
|
||||
- POSTGRES_PASSWORD=postgres-test
|
||||
volumes:
|
||||
- postgres-data-17:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres-test -d postgres-test"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
|
||||
postgres-16:
|
||||
image: postgres:16
|
||||
platform: linux/amd64
|
||||
container_name: postgres-16
|
||||
ports:
|
||||
- "5434:5432"
|
||||
environment:
|
||||
- POSTGRES_DB=postgres-test
|
||||
- POSTGRES_USER=postgres-test
|
||||
- POSTGRES_PASSWORD=postgres-test
|
||||
volumes:
|
||||
- postgres-data-16:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres-test -d postgres-test"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
|
||||
postgres-10.12:
|
||||
image: postgres:10.12
|
||||
platform: linux/amd64
|
||||
container_name: postgres-10.12
|
||||
ports:
|
||||
- "5435:5432"
|
||||
environment:
|
||||
- POSTGRES_DB=postgres-test
|
||||
- POSTGRES_USER=postgres-test
|
||||
- POSTGRES_PASSWORD=postgres-test
|
||||
volumes:
|
||||
- postgres-data-10.12:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres-test -d postgres-test"]
|
||||
interval: 10s
|
||||
timeout: 10s
|
||||
retries: 30
|
||||
start_period: 30s
|
||||
|
||||
volumes:
|
||||
oracle-data-23.8:
|
||||
mysql-data-8.4.6:
|
||||
mysql-data-8.0.29:
|
||||
mysql-data-5.7.31:
|
||||
postgres-data-17:
|
||||
postgres-data-16:
|
||||
postgres-data-10.12:
|
@@ -0,0 +1,4 @@
|
||||
---
|
||||
title: "Get Project By Slug"
|
||||
openapi: "GET /api/v2/workspace/{slug}"
|
||||
---
|
@@ -41,8 +41,6 @@
|
||||
"group": "Platform Reference",
|
||||
"pages": [
|
||||
"documentation/platform/organization",
|
||||
"documentation/platform/event-subscriptions",
|
||||
"documentation/platform/folder",
|
||||
{
|
||||
"group": "Projects",
|
||||
"pages": [
|
||||
@@ -145,6 +143,7 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"documentation/platform/event-subscriptions",
|
||||
{
|
||||
"group": "Workflow Integrations",
|
||||
"pages": [
|
||||
@@ -303,6 +302,7 @@
|
||||
},
|
||||
"self-hosting/guides/upgrading-infisical",
|
||||
"self-hosting/configuration/envars",
|
||||
"self-hosting/guides/releases",
|
||||
"self-hosting/configuration/requirements",
|
||||
{
|
||||
"group": "Guides",
|
||||
@@ -388,8 +388,37 @@
|
||||
"group": "Secrets Management",
|
||||
"pages": [
|
||||
"documentation/platform/secrets-mgmt/overview",
|
||||
{
|
||||
"group": "Concepts",
|
||||
"pages": [
|
||||
"documentation/platform/secrets-mgmt/concepts/secrets-mgmt",
|
||||
"documentation/platform/secrets-mgmt/concepts/access-control",
|
||||
"documentation/platform/secrets-mgmt/concepts/secrets-delivery",
|
||||
"documentation/platform/secrets-mgmt/concepts/secrets-rotation",
|
||||
"documentation/platform/secrets-mgmt/concepts/dynamic-secrets"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Guides",
|
||||
"pages": [
|
||||
"documentation/guides/introduction",
|
||||
"documentation/guides/local-development",
|
||||
"documentation/guides/node",
|
||||
"documentation/guides/python",
|
||||
"documentation/guides/nextjs-vercel",
|
||||
"documentation/guides/microsoft-power-apps"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Product Reference",
|
||||
"pages": [
|
||||
"documentation/platform/secrets-mgmt/project",
|
||||
"documentation/platform/folder",
|
||||
"documentation/platform/secret-versioning",
|
||||
"documentation/platform/pit-recovery",
|
||||
"documentation/platform/secret-reference",
|
||||
{
|
||||
"group": "Secret Rotation",
|
||||
"pages": [
|
||||
@@ -413,6 +442,7 @@
|
||||
"documentation/platform/dynamic-secrets/aws-iam",
|
||||
"documentation/platform/dynamic-secrets/azure-entra-id",
|
||||
"documentation/platform/dynamic-secrets/cassandra",
|
||||
"documentation/platform/dynamic-secrets/couchbase",
|
||||
"documentation/platform/dynamic-secrets/elastic-search",
|
||||
"documentation/platform/dynamic-secrets/gcp-iam",
|
||||
"documentation/platform/dynamic-secrets/github",
|
||||
@@ -433,17 +463,7 @@
|
||||
"documentation/platform/dynamic-secrets/vertica"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Guides",
|
||||
"pages": [
|
||||
"documentation/guides/introduction",
|
||||
"documentation/guides/local-development",
|
||||
"documentation/guides/node",
|
||||
"documentation/guides/python",
|
||||
"documentation/guides/nextjs-vercel",
|
||||
"documentation/guides/microsoft-power-apps"
|
||||
]
|
||||
}
|
||||
"documentation/platform/webhooks"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -466,7 +486,7 @@
|
||||
"group": "Agent",
|
||||
"pages": [
|
||||
"integrations/platforms/infisical-agent",
|
||||
"integrations/platforms/docker-swarm-with-agent",
|
||||
"integrations/platforms/docker-swarm-with-agent",
|
||||
"integrations/platforms/ecs-with-agent"
|
||||
]
|
||||
},
|
||||
@@ -635,14 +655,21 @@
|
||||
"item": "Secrets Scanning",
|
||||
"groups": [
|
||||
{
|
||||
"group": "Secret Scanning",
|
||||
"group": "Secrets Scanning",
|
||||
"pages": [
|
||||
"documentation/platform/secret-scanning/overview"
|
||||
"documentation/platform/secret-scanning/overview",
|
||||
{
|
||||
"group": "Concepts",
|
||||
"pages": [
|
||||
"documentation/platform/secret-scanning/concepts/secret-scanning"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Datasources",
|
||||
"group": "Product Reference",
|
||||
"pages": [
|
||||
"documentation/platform/secret-scanning/usage",
|
||||
"documentation/platform/secret-scanning/bitbucket",
|
||||
"documentation/platform/secret-scanning/github",
|
||||
"documentation/platform/secret-scanning/gitlab"
|
||||
@@ -682,6 +709,18 @@
|
||||
"group": "Infisical SSH",
|
||||
"pages": [
|
||||
"documentation/platform/ssh/overview",
|
||||
{
|
||||
"group": "Concepts",
|
||||
"pages": [
|
||||
"documentation/platform/ssh/concepts/ssh-certificates"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "Platform Reference",
|
||||
"pages": [
|
||||
"documentation/platform/ssh/usage",
|
||||
"documentation/platform/ssh/host-groups"
|
||||
]
|
||||
}
|
||||
@@ -961,6 +1000,7 @@
|
||||
{
|
||||
"group": "Projects",
|
||||
"pages": [
|
||||
"api-reference/endpoints/workspaces/get-workspace-by-slug",
|
||||
"api-reference/endpoints/workspaces/create-workspace",
|
||||
"api-reference/endpoints/workspaces/delete-workspace",
|
||||
"api-reference/endpoints/workspaces/get-workspace",
|
||||
|
259
docs/documentation/platform/dynamic-secrets/couchbase.mdx
Normal file
259
docs/documentation/platform/dynamic-secrets/couchbase.mdx
Normal file
@@ -0,0 +1,259 @@
|
||||
---
|
||||
title: "Couchbase"
|
||||
description: "Learn how to dynamically generate Couchbase Database user credentials."
|
||||
---
|
||||
|
||||
The Infisical Couchbase dynamic secret allows you to generate Couchbase Cloud Database user credentials on demand based on configured roles and bucket access permissions.
|
||||
|
||||
## Prerequisite
|
||||
|
||||
Create an API Key in your Couchbase Cloud following the [official documentation](https://docs.couchbase.com/cloud/get-started/create-account.html#create-api-key).
|
||||
|
||||
<Info>The API Key must have permission to manage database users in your Couchbase Cloud organization and project.</Info>
|
||||
|
||||
## Set up Dynamic Secrets with Couchbase
|
||||
|
||||
<Steps>
|
||||
<Step title="Open Secret Overview Dashboard">
|
||||
Open the Secret Overview dashboard and select the environment in which you would like to add a dynamic secret.
|
||||
</Step>
|
||||
<Step title="Click on the 'Add Dynamic Secret' button">
|
||||

|
||||
</Step>
|
||||
<Step title="Select Couchbase">
|
||||

|
||||
</Step>
|
||||
<Step title="Provide the inputs for dynamic secret parameters">
|
||||
<ParamField path="Secret Name" type="string" required>
|
||||
Name by which you want the secret to be referenced
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Default TTL" type="string" required>
|
||||
Default time-to-live for a generated secret (it is possible to modify this value after a secret is generated)
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Max TTL" type="string" required>
|
||||
Maximum time-to-live for a generated secret
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="URL" type="string" required default="https://cloudapi.cloud.couchbase.com">
|
||||
The Couchbase Cloud API URL
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Organization ID" type="string" required>
|
||||
Your Couchbase Cloud organization ID
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Project ID" type="string" required>
|
||||
Your Couchbase Cloud project ID
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Cluster ID" type="string" required>
|
||||
Your Couchbase Cloud cluster ID where users will be created
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Roles" type="array" required>
|
||||
Database credential roles to assign to the generated user. Available options:
|
||||
- **read**: Read access to bucket data (alias for data_reader)
|
||||
- **write**: Read and write access to bucket data (alias for data_writer)
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Bucket Access" type="string" required default="*">
|
||||
Specify bucket access configuration:
|
||||
- Use `*` for access to all buckets
|
||||
- Use comma-separated bucket names (e.g., `bucket1,bucket2,bucket3`) for specific buckets
|
||||
- Use Advanced Bucket Configuration for granular scope and collection access
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="API Key" type="string" required>
|
||||
Your Couchbase Cloud API Key for authentication
|
||||
</ParamField>
|
||||
|
||||

|
||||
|
||||
</Step>
|
||||
<Step title="(Optional) Advanced Configuration">
|
||||
|
||||

|
||||
|
||||
<ParamField path="Advanced Bucket Configuration" type="boolean" default="false">
|
||||
Enable advanced bucket configuration to specify granular access to buckets, scopes, and collections
|
||||
</ParamField>
|
||||
|
||||
When Advanced Bucket Configuration is enabled, you can configure:
|
||||
|
||||
<ParamField path="Buckets" type="array">
|
||||
List of buckets with optional scope and collection specifications:
|
||||
- **Bucket Name**: Name of the bucket (e.g., travel-sample)
|
||||
- **Scopes**: Optional array of scopes within the bucket
|
||||
- **Scope Name**: Name of the scope (e.g., inventory, _default)
|
||||
- **Collections**: Optional array of collection names within the scope
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Username Template" type="string" default="{{randomUsername}}">
|
||||
Specifies a template for generating usernames. This field allows customization of how usernames are automatically created.
|
||||
|
||||
Allowed template variables are:
|
||||
- `{{randomUsername}}`: Random username string
|
||||
- `{{unixTimestamp}}`: Current Unix timestamp
|
||||
- `{{identity.name}}`: Name of the identity that is generating the secret
|
||||
- `{{random N}}`: Random string of N characters
|
||||
|
||||
Allowed template functions are:
|
||||
- `truncate`: Truncates a string to a specified length
|
||||
- `replace`: Replaces a substring with another value
|
||||
|
||||
Examples:
|
||||
```
|
||||
{{randomUsername}} // infisical-3POnzeFyK9gW2nioK0q2gMjr6CZqsRiX
|
||||
{{unixTimestamp}} // 17490641580
|
||||
{{identity.name}} // testuser
|
||||
{{random 5}} // x9k2m
|
||||
{{truncate identity.name 4}} // test
|
||||
{{replace identity.name 'user' 'replace'}} // testreplace
|
||||
```
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Password Configuration" type="object">
|
||||
Optional password generation requirements for Couchbase users:
|
||||
|
||||
<ParamField path="Password Length" type="number" default="12" min="8" max="128">
|
||||
Length of the generated password
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Character Requirements" type="object">
|
||||
Minimum required character counts:
|
||||
- **Lowercase Count**: Minimum lowercase letters (default: 1)
|
||||
- **Uppercase Count**: Minimum uppercase letters (default: 1)
|
||||
- **Digit Count**: Minimum digits (default: 1)
|
||||
- **Symbol Count**: Minimum special characters (default: 1)
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Allowed Symbols" type="string" default="!@#$%^()_+-=[]{}:,?/~`">
|
||||
Special characters allowed in passwords. Cannot contain: `< > ; . * & | £`
|
||||
</ParamField>
|
||||
|
||||
<Info>
|
||||
Couchbase password requirements: minimum 8 characters, maximum 128 characters, at least 1 uppercase, 1 lowercase, 1 digit, and 1 special character. Cannot contain: `< > ; . * & | £`
|
||||
</Info>
|
||||
</ParamField>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Click 'Submit'">
|
||||
After submitting the form, you will see a dynamic secret created in the dashboard.
|
||||
|
||||
<Note>
|
||||
If this step fails, you may need to verify your Couchbase Cloud API key permissions and organization/project/cluster IDs.
|
||||
</Note>
|
||||
|
||||

|
||||
|
||||
</Step>
|
||||
<Step title="Generate dynamic secrets">
|
||||
Once you've successfully configured the dynamic secret, you're ready to generate on-demand credentials.
|
||||
To do this, simply click on the 'Generate' button which appears when hovering over the dynamic secret item.
|
||||
Alternatively, you can initiate the creation of a new lease by selecting 'New Lease' from the dynamic secret lease list section.
|
||||
|
||||

|
||||

|
||||
|
||||
When generating these secrets, it's important to specify a Time-to-Live (TTL) duration. This will dictate how long the credentials are valid for.
|
||||
|
||||

|
||||
|
||||
<Tip>
|
||||
Ensure that the TTL for the lease falls within the maximum TTL defined when configuring the dynamic secret.
|
||||
</Tip>
|
||||
|
||||
Once you click the `Submit` button, a new secret lease will be generated and the credentials for it will be shown to you.
|
||||
|
||||

|
||||
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Advanced Bucket Configuration Examples
|
||||
|
||||
The advanced bucket configuration allows you to specify granular access control:
|
||||
|
||||
### Example 1: Specific Bucket Access
|
||||
```json
|
||||
[
|
||||
{
|
||||
"name": "travel-sample"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Example 2: Bucket with Specific Scopes
|
||||
```json
|
||||
[
|
||||
{
|
||||
"name": "travel-sample",
|
||||
"scopes": [
|
||||
{
|
||||
"name": "inventory"
|
||||
},
|
||||
{
|
||||
"name": "_default"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Example 3: Bucket with Scopes and Collections
|
||||
```json
|
||||
[
|
||||
{
|
||||
"name": "travel-sample",
|
||||
"scopes": [
|
||||
{
|
||||
"name": "inventory",
|
||||
"collections": ["airport", "airline"]
|
||||
},
|
||||
{
|
||||
"name": "_default",
|
||||
"collections": ["users"]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Audit or Revoke Leases
|
||||
|
||||
Once you have created one or more leases, you will be able to access them by clicking on the respective dynamic secret item on the dashboard.
|
||||
This will allow you to see the expiration time of the lease or delete a lease before its set time to live.
|
||||
|
||||

|
||||
|
||||
## Renew Leases
|
||||
|
||||
To extend the life of the generated dynamic secret leases past its initial time to live, simply click on the **Renew** button as illustrated below.
|
||||

|
||||
|
||||
<Warning>
|
||||
Lease renewals cannot exceed the maximum TTL set when configuring the dynamic secret
|
||||
</Warning>
|
||||
|
||||
## Couchbase Roles and Permissions
|
||||
|
||||
The Couchbase dynamic secret integration supports the following database credential roles:
|
||||
|
||||
- **read**: Provides read-only access to bucket data
|
||||
- **write**: Provides read and write access to bucket data
|
||||
|
||||
<Note>
|
||||
These roles are specifically for database credentials and are different from Couchbase's administrative roles. They provide data-level access to buckets, scopes, and collections based on your configuration.
|
||||
</Note>
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Invalid API Key**: Ensure your Couchbase Cloud API key has the necessary permissions to manage database users
|
||||
2. **Invalid Organization/Project/Cluster IDs**: Verify that the provided IDs exist and are accessible with your API key
|
||||
3. **Role Permission Errors**: Make sure you're using only the supported database credential roles (read, write)
|
||||
4. **Bucket Access Issues**: Ensure the specified buckets exist in your cluster and are accessible
|
@@ -0,0 +1,20 @@
|
||||
---
|
||||
title: "Secrets Scanning"
|
||||
description: "Learn what is secret scanning and why it matters for building secure systems."
|
||||
---
|
||||
|
||||
## What is Secret Scanning?
|
||||
|
||||
_Secret scanning_ is the process of monitoring code and related systems for exposed secrets — such as API keys, database credentials, and authentication tokens — that may have been accidentally committed or leaked.
|
||||
|
||||
As teams grow and development accelerates, it becomes easy for secrets to slip into version control, CI/CD pipelines, or shared files. Left undetected, secrets can fall into the wrong hands and give attackers direct access to production systems, third-party services, or internal APIs.
|
||||
|
||||
A secret scanning solution helps teams proactively identify and respond to these risks before they result in compromise. Rather than relying on manual review, secret scanning automates detection through pattern matching, entropy analysis, and contextual rules that surface secrets across your infrastructure and repositories.
|
||||
|
||||
## Secret Scanning in Infisical
|
||||
|
||||
Infisical Secret Scanning continuously monitors your source code and connected systems for exposed credentials. It integrates with platforms like [GitHub](/documentation/platform/secret-scanning/github), [GitLab](/documentation/platform/secret-scanning/gitlab), and [Bitbucket](/documentation/platform/secret-scanning/bitbucket) to scan codebases in real-time, detecting leaks as they happen and notifying administrators when action is needed.
|
||||
|
||||
Findings are surfaced with detailed context — including file location, commit metadata, and rule match — and can be tracked through their lifecycle using status labels like `Resolved`, `False Positive`, or `Ignored`. Teams can configure rules, exclusions, and thresholds to reduce noise and tailor detection to their environment.
|
||||
|
||||
In addition to real-time monitoring, Infisical supports both full repository scans and lightweight diff scans, as well as local pre-commit scanning via the [Infisical CLI](/cli/commands/scan). This allows teams to prevent secret leaks before they ever reach production.
|
@@ -1,230 +1,17 @@
|
||||
---
|
||||
title: "Secret Scanning"
|
||||
sidebarTitle: "Overview"
|
||||
description: "Scan and prevent secret leaks in your code repositories"
|
||||
description: "Learn how to detect and respond to exposed secrets in code."
|
||||
---
|
||||
|
||||
## Introduction
|
||||
Infisical Secret Scanning helps teams detect leaked credentials — such as API keys, database passwords, and tokens — across source code and developer systems. It allows organizations to proactively catch exposed secrets before they can be exploited, and respond quickly when incidents occur.
|
||||
|
||||
Monitor and detect exposed secrets across your data sources, including code repositories, with Infisical Secret Scanning.
|
||||
Secret Scanning works across both cloud-connected repositories and local developer environments. It integrates with data sources like [GitHub](/documentation/platform/secret-scanning/github), [GitLab](/documentation/platform/secret-scanning/gitlab), and [Bitbucket](/documentation/platform/secret-scanning/bitbucket) to monitor repositories for exposed secrets in real time, and provides a CLI ([`infisical scan`](/cli/commands/scan)) for scanning local directories, Git history, or CI pipelines before changes are pushed.
|
||||
|
||||
For additional security, we recommend using our [CLI Secret Scanner](/cli/scanning-overview#automatically-scan-changes-before-you-commit) to check for exposed secrets before pushing your code changes.
|
||||
Core capabilities include:
|
||||
|
||||
<Note>
|
||||
Secret Scanning is a paid feature.
|
||||
If you're using Infisical Cloud, then it is available under the **Enterprise Tier**. If you're self-hosting Infisical,
|
||||
then you should contact team@infisical.com to purchase an enterprise license to use it.
|
||||
</Note>
|
||||
|
||||
## How Secret Scanning Works
|
||||
|
||||
Secret Scanning consists of several components that enable you to quickly respond to secret leaks:
|
||||
|
||||
- **Scanner Engine**: The core component that analyzes your code and detects potential secrets using pattern matching and entropy analysis
|
||||
- **Real-time Monitoring**: Provides continuous surveillance of your repositories for immediate detection of exposed secrets
|
||||
- **Alert System**: Notifies organization admins via email when secrets are detected
|
||||
- **Risk Management**: Allows tracking and managing detected secrets with different status options
|
||||
- **Data Sources**: Integrates with various data sources and version control systems
|
||||
- **Customizable Rules**: Supports ignore patterns and custom configurations to reduce false positives
|
||||
|
||||
These components work together to provide comprehensive secret detection and incident response capabilities.
|
||||
|
||||
### Data Sources
|
||||
|
||||
Data sources are configured integrations with external platforms, such as a GitHub organization or a GitLab group, that establish secure connections for scanning purposes using [App Connections](/integrations/app-connections/overview).
|
||||
|
||||
A data source acts as a secure intermediary between the external system and the scanner engine. It manages a collection of scannable resources (such as repositories) and handles the authentication and communication required for scanning operations.
|
||||
|
||||

|
||||
|
||||
### Resources
|
||||
|
||||
Resources are the atomic, scannable units, such as a repository, that can be monitored for secret exposure. Resources are added automatically when a data source is scanned and updated when scanning events are triggered, such as when a user pushes changes to GitHub.
|
||||
|
||||
Each resource maintains its own scanning history and status, allowing for granular monitoring and management of secret scanning across your organization.
|
||||
|
||||

|
||||
|
||||
### Scans
|
||||
|
||||
Scans can be initiated in two ways:
|
||||
|
||||
1. **Full Scan** - Manually triggered scan that comprehensively checks either all resources associated with a data source or a single selected resource.
|
||||
|
||||
2. **Diff Scan** - Automatically executed when **Auto-Scan** is enabled on a data source. This scan type specifically focuses on updates to existing resources.
|
||||
|
||||
All scan activities can be monitored in real-time through the Infisical UI, which displays:
|
||||
- Current scan status
|
||||
- Timestamp of the scan
|
||||
- Resource(s) being scanned
|
||||
- Detection results (whether any secrets were found)
|
||||
|
||||

|
||||
|
||||
### Findings
|
||||
|
||||
Findings are automatically generated when secret leaks are detected during scanning operations. Each finding contains comprehensive information including:
|
||||
- The specific scanning rule that identified the leak
|
||||
- File location and line number where the secret was found
|
||||
- Resource-specific details (e.g., commit hash and author for Git repositories)
|
||||
|
||||
Findings are initially marked as **Unresolved** and can be updated to one of the following statuses with additional remarks:
|
||||
- **Resolved** - The issue has been addressed
|
||||
- **False Positive** - The detection was incorrect
|
||||
- **Ignore** - The finding can be safely disregarded
|
||||
|
||||
These status options help teams effectively track and manage the lifecycle of detected secret leaks.
|
||||
|
||||

|
||||
|
||||
### Configuration
|
||||
|
||||
You can configure custom scanning rules and exceptions by updating your project's scanning configuration via the UI or API.
|
||||
|
||||
The configuration options allow you to:
|
||||
- Define custom scanning patterns and rules
|
||||
- Set up ignore patterns to reduce false positives
|
||||
- Specify file path exclusions
|
||||
- Configure entropy thresholds for secret detection
|
||||
- Add allowlists for known safe patterns
|
||||
|
||||
For detailed configuration options, expand the example configuration below.
|
||||
|
||||
<Accordion title="Example Configuration">
|
||||
```toml
|
||||
# Title for the configuration file
|
||||
title = "Some title"
|
||||
|
||||
|
||||
# This configuration is the foundation that can be expanded. If there are any overlapping rules
|
||||
# between this base and the expanded configuration, the rules in this base will take priority.
|
||||
# Another aspect of extending configurations is the ability to link multiple files, up to a depth of 2.
|
||||
# "Allowlist" arrays get appended and may have repeated elements.
|
||||
# "useDefault" and "path" cannot be used simultaneously. Please choose one.
|
||||
[extend]
|
||||
# useDefault will extend the base configuration with the default config:
|
||||
# https://raw.githubusercontent.com/Infisical/infisical/main/cli/config/infisical-scan.toml
|
||||
useDefault = true
|
||||
# or you can supply a path to a configuration. Path is relative to where infisical cli
|
||||
# was invoked, not the location of the base config.
|
||||
path = "common_config.toml"
|
||||
|
||||
# An array of tables that contain information that define instructions
|
||||
# on how to detect secrets
|
||||
[[rules]]
|
||||
|
||||
# Unique identifier for this rule
|
||||
id = "some-identifier-for-rule"
|
||||
|
||||
# Short human readable description of the rule.
|
||||
description = "awesome rule 1"
|
||||
|
||||
# Golang regular expression used to detect secrets. Note Golang's regex engine
|
||||
# does not support lookaheads.
|
||||
regex = '''one-go-style-regex-for-this-rule'''
|
||||
|
||||
# Golang regular expression used to match paths. This can be used as a standalone rule or it can be used
|
||||
# in conjunction with a valid `regex` entry.
|
||||
path = '''a-file-path-regex'''
|
||||
|
||||
# Array of strings used for metadata and reporting purposes.
|
||||
tags = ["tag","another tag"]
|
||||
|
||||
# A regex match may have many groups, this allows you to specify the group that should be used as (which group the secret is contained in)
|
||||
# its entropy checked if `entropy` is set.
|
||||
secretGroup = 3
|
||||
|
||||
# Float representing the minimum shannon entropy a regex group must have to be considered a secret.
|
||||
# Shannon entropy measures how random a data is. Since secrets are usually composed of many random characters, they typically have high entropy
|
||||
entropy = 3.5
|
||||
|
||||
# Keywords are used for pre-regex check filtering.
|
||||
# If rule has keywords but the text fragment being scanned doesn't have at least one of it's keywords, it will be skipped for processing further.
|
||||
# Ideally these values should either be part of the identifier or unique strings specific to the rule's regex
|
||||
# (introduced in v8.6.0)
|
||||
keywords = [
|
||||
"auth",
|
||||
"password",
|
||||
"token",
|
||||
]
|
||||
|
||||
# You can include an allowlist table for a single rule to reduce false positives or ignore commits
|
||||
# with known/rotated secrets
|
||||
[rules.allowlist]
|
||||
description = "ignore commit A"
|
||||
commits = [ "commit-A", "commit-B"]
|
||||
paths = [
|
||||
'''go\.mod''',
|
||||
'''go\.sum'''
|
||||
]
|
||||
# note: (rule) regexTarget defaults to check the _Secret_ in the finding.
|
||||
# if regexTarget is not specified then _Secret_ will be used.
|
||||
# Acceptable values for regexTarget are "match" and "line"
|
||||
regexTarget = "match"
|
||||
regexes = [
|
||||
'''process''',
|
||||
'''getenv''',
|
||||
]
|
||||
# note: stopwords targets the extracted secret, not the entire regex match
|
||||
# if the extracted secret is found in the stopwords list, the finding will be skipped (i.e not included in report)
|
||||
stopwords = [
|
||||
'''client''',
|
||||
'''endpoint''',
|
||||
]
|
||||
|
||||
|
||||
# This is a global allowlist which has a higher order of precedence than rule-specific allowlists.
|
||||
# If a commit listed in the `commits` field below is encountered then that commit will be skipped and no
|
||||
# secrets will be detected for said commit. The same logic applies for regexes and paths.
|
||||
[allowlist]
|
||||
description = "global allow list"
|
||||
commits = [ "commit-A", "commit-B", "commit-C"]
|
||||
paths = [
|
||||
'''gitleaks\.toml''',
|
||||
'''(.*?)(jpg|gif|doc)'''
|
||||
]
|
||||
|
||||
# note: (global) regexTarget defaults to check the _Secret_ in the finding.
|
||||
# if regexTarget is not specified then _Secret_ will be used.
|
||||
# Acceptable values for regexTarget are "match" and "line"
|
||||
regexTarget = "match"
|
||||
|
||||
regexes = [
|
||||
'''219-09-9999''',
|
||||
'''078-05-1120''',
|
||||
'''(9[0-9]{2}|666)-\d{2}-\d{4}''',
|
||||
]
|
||||
# note: stopwords targets the extracted secret, not the entire regex match
|
||||
# if the extracted secret is found in the stopwords list, the finding will be skipped (i.e not included in report)
|
||||
stopwords = [
|
||||
'''client''',
|
||||
'''endpoint''',
|
||||
]
|
||||
```
|
||||
</Accordion>
|
||||
|
||||

|
||||
|
||||
## Ignoring Known Secrets
|
||||
If you're intentionally committing a test secret that the secret scanner might flag, you can instruct Infisical to overlook that secret with the methods listed below.
|
||||
|
||||
### infisical-scan:ignore
|
||||
|
||||
To ignore a secret contained in line of code, simply add `infisical-scan:ignore ` at the end of the line as comment in the given programming.
|
||||
|
||||
```js example.js
|
||||
function helloWorld() {
|
||||
console.log("8dyfuiRyq=vVc3RRr_edRk-fK__JItpZ"); // infisical-scan:ignore
|
||||
}
|
||||
```
|
||||
|
||||
### .infisicalignore
|
||||
An alternative method to exclude specific findings involves creating a .infisicalignore file at your repository's root.
|
||||
You can then add the fingerprints of the findings you wish to exclude. The [Infisical scan](/cli/scanning-overview) report provides a unique Fingerprint for each secret found.
|
||||
By incorporating these Fingerprints into the .infisicalignore file, Infisical will skip the corresponding secret findings in subsequent scans.
|
||||
|
||||
```.ignore .infisicalignore
|
||||
bea0ff6e05a4de73a5db625d4ae181a015b50855:frontend/components/utilities/attemptLogin.js:stripe-access-token:147
|
||||
bea0ff6e05a4de73a5db625d4ae181a015b50855:backend/src/json/integrations.json:generic-api-key:5
|
||||
1961b92340e5d2613acae528b886c842427ce5d0:frontend/components/utilities/attemptLogin.js:stripe-access-token:148
|
||||
```
|
||||
- Integrated Scanning Across Environments: Monitor secrets in real time across connected repositories like GitHub, GitLab, and Bitbucket, or scan locally using the infisical scan CLI.
|
||||
- Detection Engine: Identify potential secrets using pattern matching, entropy analysis, and custom rules tailored to your codebase and workflows.
|
||||
- Flexible Scan Modes: Run full scans manually or configure automatic diff scans triggered by new commits. CLI scans support Git history, file directories, or staged changes in CI pipelines.
|
||||
- Findings and Lifecycle Management: Track detected secrets with context like file path, commit hash, and scanning rule. Findings can be resolved, ignored, or marked as false positives — with full visibility into scan results over time.
|
||||
- Custom Configuration and Noise Reduction: Fine-tune scanning behavior with custom patterns, ignore rules (infisical-scan:ignore, .infisicalignore), entropy thresholds, and excluded paths to reduce false positives.
|
||||
|
236
docs/documentation/platform/secret-scanning/usage.mdx
Normal file
236
docs/documentation/platform/secret-scanning/usage.mdx
Normal file
@@ -0,0 +1,236 @@
|
||||
---
|
||||
title: "Usage"
|
||||
description: "Learn what is secret scanning and why it matters for building secure systems."
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
||||
Monitor and detect exposed secrets across your data sources, including code repositories, with Infisical Secret Scanning.
|
||||
|
||||
For additional security, we recommend using our [CLI Secret Scanner](/cli/scanning-overview#automatically-scan-changes-before-you-commit) to check for exposed secrets before pushing your code changes.
|
||||
|
||||
<Note>
|
||||
Secret Scanning is a paid feature. If you're using Infisical Cloud, then it is
|
||||
available under the **Enterprise Tier**. If you're self-hosting Infisical,
|
||||
then you should contact team@infisical.com to purchase an enterprise license
|
||||
to use it.
|
||||
</Note>
|
||||
|
||||
## How Secret Scanning Works
|
||||
|
||||
Secret Scanning consists of several components that enable you to quickly respond to secret leaks:
|
||||
|
||||
- **Scanner Engine**: The core component that analyzes your code and detects potential secrets using pattern matching and entropy analysis
|
||||
- **Real-time Monitoring**: Provides continuous surveillance of your repositories for immediate detection of exposed secrets
|
||||
- **Alert System**: Notifies organization admins via email when secrets are detected
|
||||
- **Risk Management**: Allows tracking and managing detected secrets with different status options
|
||||
- **Data Sources**: Integrates with various data sources and version control systems
|
||||
- **Customizable Rules**: Supports ignore patterns and custom configurations to reduce false positives
|
||||
|
||||
These components work together to provide comprehensive secret detection and incident response capabilities.
|
||||
|
||||
### Data Sources
|
||||
|
||||
Data sources are configured integrations with external platforms, such as a GitHub organization or a GitLab group, that establish secure connections for scanning purposes using [App Connections](/integrations/app-connections/overview).
|
||||
|
||||
A data source acts as a secure intermediary between the external system and the scanner engine. It manages a collection of scannable resources (such as repositories) and handles the authentication and communication required for scanning operations.
|
||||
|
||||

|
||||
|
||||
### Resources
|
||||
|
||||
Resources are the atomic, scannable units, such as a repository, that can be monitored for secret exposure. Resources are added automatically when a data source is scanned and updated when scanning events are triggered, such as when a user pushes changes to GitHub.
|
||||
|
||||
Each resource maintains its own scanning history and status, allowing for granular monitoring and management of secret scanning across your organization.
|
||||
|
||||

|
||||
|
||||
### Scans
|
||||
|
||||
Scans can be initiated in two ways:
|
||||
|
||||
1. **Full Scan** - Manually triggered scan that comprehensively checks either all resources associated with a data source or a single selected resource.
|
||||
|
||||
2. **Diff Scan** - Automatically executed when **Auto-Scan** is enabled on a data source. This scan type specifically focuses on updates to existing resources.
|
||||
|
||||
All scan activities can be monitored in real-time through the Infisical UI, which displays:
|
||||
|
||||
- Current scan status
|
||||
- Timestamp of the scan
|
||||
- Resource(s) being scanned
|
||||
- Detection results (whether any secrets were found)
|
||||
|
||||

|
||||
|
||||
### Findings
|
||||
|
||||
Findings are automatically generated when secret leaks are detected during scanning operations. Each finding contains comprehensive information including:
|
||||
|
||||
- The specific scanning rule that identified the leak
|
||||
- File location and line number where the secret was found
|
||||
- Resource-specific details (e.g., commit hash and author for Git repositories)
|
||||
|
||||
Findings are initially marked as **Unresolved** and can be updated to one of the following statuses with additional remarks:
|
||||
|
||||
- **Resolved** - The issue has been addressed
|
||||
- **False Positive** - The detection was incorrect
|
||||
- **Ignore** - The finding can be safely disregarded
|
||||
|
||||
These status options help teams effectively track and manage the lifecycle of detected secret leaks.
|
||||
|
||||

|
||||
|
||||
### Configuration
|
||||
|
||||
You can configure custom scanning rules and exceptions by updating your project's scanning configuration via the UI or API.
|
||||
|
||||
The configuration options allow you to:
|
||||
|
||||
- Define custom scanning patterns and rules
|
||||
- Set up ignore patterns to reduce false positives
|
||||
- Specify file path exclusions
|
||||
- Configure entropy thresholds for secret detection
|
||||
- Add allowlists for known safe patterns
|
||||
|
||||
For detailed configuration options, expand the example configuration below.
|
||||
|
||||
<Accordion title="Example Configuration">
|
||||
```toml
|
||||
# Title for the configuration file
|
||||
title = "Some title"
|
||||
|
||||
# This configuration is the foundation that can be expanded. If there are any overlapping rules
|
||||
# between this base and the expanded configuration, the rules in this base will take priority.
|
||||
# Another aspect of extending configurations is the ability to link multiple files, up to a depth of 2.
|
||||
# "Allowlist" arrays get appended and may have repeated elements.
|
||||
# "useDefault" and "path" cannot be used simultaneously. Please choose one.
|
||||
[extend]
|
||||
# useDefault will extend the base configuration with the default config:
|
||||
# https://raw.githubusercontent.com/Infisical/infisical/main/cli/config/infisical-scan.toml
|
||||
useDefault = true
|
||||
# or you can supply a path to a configuration. Path is relative to where infisical cli
|
||||
# was invoked, not the location of the base config.
|
||||
path = "common_config.toml"
|
||||
|
||||
# An array of tables that contain information that define instructions
|
||||
# on how to detect secrets
|
||||
[[rules]]
|
||||
|
||||
# Unique identifier for this rule
|
||||
id = "some-identifier-for-rule"
|
||||
|
||||
# Short human readable description of the rule.
|
||||
description = "awesome rule 1"
|
||||
|
||||
# Golang regular expression used to detect secrets. Note Golang's regex engine
|
||||
# does not support lookaheads.
|
||||
regex = '''one-go-style-regex-for-this-rule'''
|
||||
|
||||
# Golang regular expression used to match paths. This can be used as a standalone rule or it can be used
|
||||
# in conjunction with a valid `regex` entry.
|
||||
path = '''a-file-path-regex'''
|
||||
|
||||
# Array of strings used for metadata and reporting purposes.
|
||||
tags = ["tag","another tag"]
|
||||
|
||||
# A regex match may have many groups, this allows you to specify the group that should be used as (which group the secret is contained in)
|
||||
# its entropy checked if `entropy` is set.
|
||||
secretGroup = 3
|
||||
|
||||
# Float representing the minimum shannon entropy a regex group must have to be considered a secret.
|
||||
# Shannon entropy measures how random a data is. Since secrets are usually composed of many random characters, they typically have high entropy
|
||||
entropy = 3.5
|
||||
|
||||
# Keywords are used for pre-regex check filtering.
|
||||
# If rule has keywords but the text fragment being scanned doesn't have at least one of it's keywords, it will be skipped for processing further.
|
||||
# Ideally these values should either be part of the identifier or unique strings specific to the rule's regex
|
||||
# (introduced in v8.6.0)
|
||||
keywords = [
|
||||
"auth",
|
||||
"password",
|
||||
"token",
|
||||
]
|
||||
|
||||
# You can include an allowlist table for a single rule to reduce false positives or ignore commits
|
||||
# with known/rotated secrets
|
||||
[rules.allowlist]
|
||||
description = "ignore commit A"
|
||||
commits = [ "commit-A", "commit-B"]
|
||||
paths = [
|
||||
'''go\.mod''',
|
||||
'''go\.sum'''
|
||||
]
|
||||
# note: (rule) regexTarget defaults to check the _Secret_ in the finding.
|
||||
# if regexTarget is not specified then _Secret_ will be used.
|
||||
# Acceptable values for regexTarget are "match" and "line"
|
||||
regexTarget = "match"
|
||||
regexes = [
|
||||
'''process''',
|
||||
'''getenv''',
|
||||
]
|
||||
# note: stopwords targets the extracted secret, not the entire regex match
|
||||
# if the extracted secret is found in the stopwords list, the finding will be skipped (i.e not included in report)
|
||||
stopwords = [
|
||||
'''client''',
|
||||
'''endpoint''',
|
||||
]
|
||||
|
||||
|
||||
# This is a global allowlist which has a higher order of precedence than rule-specific allowlists.
|
||||
# If a commit listed in the `commits` field below is encountered then that commit will be skipped and no
|
||||
# secrets will be detected for said commit. The same logic applies for regexes and paths.
|
||||
[allowlist]
|
||||
description = "global allow list"
|
||||
commits = [ "commit-A", "commit-B", "commit-C"]
|
||||
paths = [
|
||||
'''gitleaks\.toml''',
|
||||
'''(.*?)(jpg|gif|doc)'''
|
||||
]
|
||||
|
||||
# note: (global) regexTarget defaults to check the _Secret_ in the finding.
|
||||
# if regexTarget is not specified then _Secret_ will be used.
|
||||
# Acceptable values for regexTarget are "match" and "line"
|
||||
regexTarget = "match"
|
||||
|
||||
regexes = [
|
||||
'''219-09-9999''',
|
||||
'''078-05-1120''',
|
||||
'''(9[0-9]{2}|666)-\d{2}-\d{4}''',
|
||||
]
|
||||
# note: stopwords targets the extracted secret, not the entire regex match
|
||||
# if the extracted secret is found in the stopwords list, the finding will be skipped (i.e not included in report)
|
||||
stopwords = [
|
||||
'''client''',
|
||||
'''endpoint''',
|
||||
]
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
|
||||

|
||||
|
||||
## Ignoring Known Secrets
|
||||
|
||||
If you're intentionally committing a test secret that the secret scanner might flag, you can instruct Infisical to overlook that secret with the methods listed below.
|
||||
|
||||
### infisical-scan:ignore
|
||||
|
||||
To ignore a secret contained in line of code, simply add `infisical-scan:ignore ` at the end of the line as comment in the given programming.
|
||||
|
||||
```js example.js
|
||||
function helloWorld() {
|
||||
console.log("8dyfuiRyq=vVc3RRr_edRk-fK__JItpZ"); // infisical-scan:ignore
|
||||
}
|
||||
```
|
||||
|
||||
### .infisicalignore
|
||||
|
||||
An alternative method to exclude specific findings involves creating a .infisicalignore file at your repository's root.
|
||||
You can then add the fingerprints of the findings you wish to exclude. The [Infisical scan](/cli/scanning-overview) report provides a unique Fingerprint for each secret found.
|
||||
By incorporating these Fingerprints into the .infisicalignore file, Infisical will skip the corresponding secret findings in subsequent scans.
|
||||
|
||||
```.ignore .infisicalignore
|
||||
bea0ff6e05a4de73a5db625d4ae181a015b50855:frontend/components/utilities/attemptLogin.js:stripe-access-token:147
|
||||
bea0ff6e05a4de73a5db625d4ae181a015b50855:backend/src/json/integrations.json:generic-api-key:5
|
||||
1961b92340e5d2613acae528b886c842427ce5d0:frontend/components/utilities/attemptLogin.js:stripe-access-token:148
|
||||
```
|
@@ -0,0 +1,33 @@
|
||||
---
|
||||
title: "Scoping Secrets"
|
||||
description: "Learn how access to secrets is controlled in Infisical."
|
||||
---
|
||||
|
||||
## Secret Hierarchy
|
||||
|
||||
Every secret in Infisical is scoped to an environment and a path.
|
||||
|
||||
- An environment separates where secrets are used, such as `development`, `staging`, or `production`.
|
||||
- A path is an (optional) namespace within an environment that groups related secrets such as `/postgres`, `/redis`, or per-service paths like `/service-a`.
|
||||
|
||||
This structure makes it easy to organize secrets by team, service, or environment, and sets the foundation for controlling who can access what.
|
||||
|
||||
## Access Control
|
||||
|
||||
Access control determines who (or what) can access a secret and under what conditions. Without clear policies, even securely stored secrets can be misused or exposed.
|
||||
|
||||
To control access to secrets, you configure role-based permissions at the project level. These permissions determine which environments and paths a user or machine identity with that role can access. For example, an engineer might have a role that allows them to read secrets in the `development` environment but not those in the `production` environment.
|
||||
|
||||
This model follows the [principle of least privilege](https://en.wikipedia.org/wiki/Principle_of_least_privilege) such that each user or machine identity has access only to the secrets it needs — and nothing more.
|
||||
|
||||
## Advanced Capabilities
|
||||
|
||||
Beyond basic role assignments, Infisical includes additional access control mechanisms for more advanced use cases:
|
||||
|
||||
- Access approvals: Users can request access to specific environments or paths. Access can be temporary and reviewed before it is granted, reducing long-term exposure.
|
||||
|
||||
- Secret change approvals: Updates to sensitive secrets can require approval before taking effect. This adds control in environments where unreviewed changes pose risk.
|
||||
|
||||
- Attribute-based access control (ABAC): Permissions can be matched against metadata on a user or machine identity — such as team, service, or environment — enabling dynamic access rules without manual role changes.
|
||||
|
||||
All access and approval actions are logged, so it’s always possible to trace who accessed what, when, and under what conditions.
|
@@ -0,0 +1,22 @@
|
||||
---
|
||||
title: "Dynamic Secrets"
|
||||
description: "Learn what dynamic secrets are, why they're useful, and how Infisical enables them."
|
||||
---
|
||||
|
||||
## What is a Dynamic Secret?
|
||||
|
||||
A _dynamic secret_ is a time-bound credential generated on demand for a specific user or system. Unlike _static secrets_, which are created and stored ahead of time, or rotated credentials, which are periodically replaced, dynamic secrets don’t exist until they’re requested — and automatically expire shortly after use.
|
||||
|
||||
Each secret is unique to the identity that requested it, reducing the risk of reuse, long-term exposure, or accidental leaks. Because they are short-lived and tightly scoped, dynamic secrets are well suited for high-security environments, automated systems, and ephemeral workloads where access needs to be both temporary and auditable.
|
||||
|
||||
By limiting the lifespan and visibility of credentials, dynamic secrets offer a strong alternative to managing long-lived secrets manually.
|
||||
|
||||
## Dynamic Secrets in Infisical
|
||||
|
||||
Infisical generates dynamic secrets in real time when a user or machine identity requests access. Each secret is uniquely scoped to the requesting identity, valid just-in-time only for a limited duration, and automatically revoked after it expires.
|
||||
|
||||
Because they are short-lived and identity-specific, dynamic secrets reduce the risk of credential reuse, accidental exposure, or long-term persistence across environments.
|
||||
|
||||
When supported for a given integration, using dynamic secrets is strongly recommended. Infisical currently supports dynamic secret templates for commonly used systems including [PostgreSQL](/documentation/platform/dynamic-secrets/postgresql), [MySQL](/documentation/platform/dynamic-secrets/mysql), [Microsoft SQL Server](/documentation/platform/dynamic-secrets/mssql), [MongoDB Atlas](/documentation/platform/dynamic-secrets/mongo-db), [Redis](/documentation/platform/dynamic-secrets/redis), [AWS IAM](/documentation/platform/dynamic-secrets/aws-iam), [GCP IAM](/documentation/platform/dynamic-secrets/gcp-iam), [Azure Entra ID](/documentation/platform/dynamic-secrets/azure-entra-id), and more.
|
||||
|
||||
To learn more, refer to the [dynamic secrets documentation](/documentation/platform/dynamic-secrets/overview).
|
@@ -0,0 +1,96 @@
|
||||
---
|
||||
title: "Fetching Secrets"
|
||||
description: "Learn how to deliver secrets from Infisical into the systems, applications, and environments that need them."
|
||||
---
|
||||
|
||||
Once secrets are stored and scoped in Infisical, the next step is delivering them securely to the systems and applications that need them.
|
||||
|
||||
Infisical supports many delivery methods to match a wide range of environments — from [local development](/documentation/platform/secrets-mgmt/concepts/secrets-delivery#local-development%2C-scripts%2C-and-one-off-tasks) to [Kubernetes workloads](/documentation/platform/secrets-mgmt/concepts/secrets-delivery#kubernetes-workloads), [CI/CD pipelines](/documentation/platform/secrets-mgmt/concepts/secrets-delivery#ci%2Fcd-pipelines), [infrastructure-as-code tools](/documentation/platform/secrets-mgmt/concepts/secrets-delivery#infrastructure-as-code-and-automation-tools), and more.
|
||||
|
||||
The table below provides a quick overview of which delivery method may be suitable to use based on your environment and how secrets are consumed:
|
||||
|
||||
| Use Case / Environment | Recommended Method(s) | Consumes Secrets As | Notes |
|
||||
| ----------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------- | ------------------------------------------------------------------ |
|
||||
| Local development or scripting | [Infisical CLI](/cli/overview) | Environment variables | Easiest way to inject secrets during local dev or debugging |
|
||||
| Application code fetching at runtime | [SDKs](/sdks/overview), [HTTP API](/api-reference/overview/introduction) | In-memory / API call | Full control in app code; supports dynamic or ephemeral fetching |
|
||||
| VMs, containers, or CI jobs needing preloaded secrets | [Infisical Agent](/integrations/platforms/infisical-agent) | Env vars or files | Good for non-interactive workloads; avoids inline secret fetch |
|
||||
| GitHub Actions | [Secrets Action](https://github.com/Infisical/secrets-action), [Secret Syncs](/integrations/secret-syncs/github) | Env vars or files | Use Action for dynamic fetch; use Syncs to preload into GitHub |
|
||||
| GitLab CI, Jenkins, other CI | [Infisical CLI](/cli/overview), [Infisical Agent](/integrations/platforms/infisical-agent), [Secret Syncs](/integrations/secret-syncs/gitlab) | Env vars or files | Choose based on timing — fetch at runtime vs. pre-populate ahead |
|
||||
| Kubernetes (declarative secrets) | [Kubernetes Operator](/integrations/platforms/kubernetes/overview) | Kubernetes Secrets | Syncs from Infisical into native Kubernetes Secrets |
|
||||
| Kubernetes (ESO-based workflows) | [External Secrets Operator (ESO)](https://external-secrets.io/latest/provider/infisical/) | Kubernetes Secrets | Reuses existing ESO setup; Infisical acts as a provider |
|
||||
| Kubernetes (file-based, no K8s secrets) | [Kubernetes Agent Injector](/integrations/platforms/kubernetes-injector) | Mounted files | Injects secrets via init container into volume at pod startup |
|
||||
| Kubernetes (file-based, with rotation) | [Kubernetes CSI Provider](/integrations/platforms/kubernetes-csi) | Mounted files | Uses CSI driver to mount secrets as files with automatic rotation |
|
||||
| Image builds (VMs or containers) | [Packer Plugin](/integrations/frameworks/packer) | Env vars or files | Inject secrets at image build time |
|
||||
| Ansible automation | [Ansible Collection](/integrations/platforms/ansible) | Variables | Runtime secret fetching in playbooks using lookup plugin |
|
||||
| Terraform / Pulumi | [Terraform Provider](/integrations/frameworks/terraform), [Pulumi](/integrations/frameworks/pulumi) | Inputs / ephemeral resources | Use ephemeral for security; avoids storing secrets in state |
|
||||
| Third-party platforms (GitHub, AWS, etc.) | [Secret Syncs](/integrations/secret-syncs/overview) | Preloaded secrets | Push secrets to platforms that can't fetch directly from Infisical |
|
||||
|
||||
From here, you can explore the delivery method that best matches your environment:
|
||||
|
||||
## Local Development, Scripts, and One-Off Tasks
|
||||
|
||||
For local development, one-off scripts, or basic automation, the [Infisical CLI](/cli/overview) is a quick and flexible option.
|
||||
|
||||
Instead of using a `.env` file, you can use [`infisical run`](/cli/commands/run) to inject secrets as environment variables directly into your development process. This provides a cleaner and more secure workflow. You can also use [`infisical secrets`](/cli/commands/secrets#infisical-secrets) to perform CRUD operations on secrets from the command line, which works well for debugging, local tooling, and lightweight scripting.
|
||||
|
||||
To learn more, refer to the [CLI quickstart](cli/usage).
|
||||
|
||||
## Applications and Services
|
||||
|
||||
When secrets need to be accessed directly from within application code, Infisical provides SDKs for [Node.js](https://github.com/Infisical/node-sdk-v2), [Python](https://github.com/Infisical/python-sdk-official), [Go](/sdks/languages/go), [Java](https://github.com/Infisical/java-sdk), [.NET](https://github.com/Infisical/infisical-dotnet-sdk), [C++](https://github.com/Infisical/infisical-cpp-sdk), [Rust](https://github.com/Infisical/rust-sdk), and [Ruby](/sdks/languages/ruby).
|
||||
|
||||
These SDKs let services fetch secrets at runtime or startup. For unsupported languages or if you prefer direct integration, you can use the fully documented [HTTP API](/api-reference/overview/introduction) to fetch secrets within your application logic.
|
||||
|
||||
This approach gives you fine-grained control but also requires managing authentication and caching.
|
||||
|
||||
## Virtual Machines(VMs), Containers, and CI Environments
|
||||
|
||||
For systems that shouldn’t fetch secrets themselves — such as production VMs, Docker containers, or CI jobs — the [Infisical Agent](/integrations/platforms/infisical-agent) can be used to sync secrets into the local environment on their behalf.
|
||||
|
||||
The agent runs as a lightweight background process and supports injecting secrets into files, environment variables, or application config formats. It's especially useful for non-interactive workloads that expect secrets to already exist at runtime.
|
||||
|
||||
You can run the agent as a standalone binary, as a Docker sidecar, or embedded in automation scripts. It works well in environments like:
|
||||
|
||||
- VMs that need secrets provisioned at startup.
|
||||
- [Docker Swarm](/integrations/platforms/docker-swarm-with-agent) services using shared volumes.
|
||||
- [ECS tasks](/integrations/platforms/ecs-with-agent) using EFS for shared secret delivery.
|
||||
|
||||
## CI/CD Pipelines
|
||||
|
||||
For CI/CD pipelines, the right method depends on the platform.
|
||||
|
||||
- On GitHub Actions, the [Infisical Secrets Action](https://github.com/Infisical/secrets-action) provides a native integration that injects secrets as environment variables or `.env` files during workflows. It supports authentication via [AWS IAM](/documentation/platform/identities/aws-auth), [OIDC](/documentation/platform/identities/oidc-auth/github), or [Universal Auth](/documentation/platform/identities/universal-auth) using a Machine Identity.
|
||||
- On other CI platforms like GitLab CI, CircleCI, or Jenkins, the CLI or Agent may be used depending on how secrets are consumed — whether at runtime or during setup.
|
||||
|
||||
Some CI/CD systems also support [Secret Syncs](/integrations/secret-syncs/overview) as an alternative. Instead of fetching secrets dynamically, you can configure Infisical to forward secrets into [GitHub Actions](/integrations/secret-syncs/github), [GitLab CI](/integrations/secret-syncs/gitlab), and similar platforms ahead of time — allowing them to be used as native environment secrets during jobs.
|
||||
|
||||
## Kubernetes Workloads
|
||||
|
||||
Infisical supports multiple options for delivering secrets into Kubernetes, each designed to match different operational models and consumption patterns:
|
||||
|
||||
- [Infisical Kubernetes Operator](/integrations/platforms/kubernetes/overview): A set of CRDs that sync secrets from Infisical into Kubernetes Secrets, push secrets from Kubernetes back to Infisical, and manage dynamic secrets with automatic leases. Best suited for teams using declarative workflows and wanting to treat secrets as part of infrastructure code.
|
||||
|
||||
- [External Secrets Operator (ESO)](https://external-secrets.io/latest/provider/infisical/): Enables syncing Infisical secrets into Kubernetes by defining ExternalSecret resources. Ideal if your team already uses ESO as a centralized way to fetch secrets from multiple providers.
|
||||
|
||||
- [Infisical Agent Injector](/integrations/platforms/kubernetes-injector): A Kubernetes mutating admission webhook that injects an init container into your pods. The injected container syncs secrets from Infisical into a shared volume at startup, making them available as files. Useful for workloads that expect file-based secrets and where avoiding Kubernetes Secrets entirely is preferred.
|
||||
|
||||
- [Infisical CSI Provider](/integrations/platforms/kubernetes-csi): Integrates with the Kubernetes Secrets Store CSI Driver to mount secrets as files in pods. Supports automatic rotation and fine-grained control over how secrets are structured and updated. Suitable for environments that require file-based secret delivery with rotation, without persisting Kubernetes Secrets.
|
||||
|
||||
These methods provide secure, declarative integrations with Kubernetes-native workflows.
|
||||
|
||||
## Forwarding to Third-Party Platforms
|
||||
|
||||
In some cases, secrets must be delivered into systems that can’t fetch them from Infisical directly.
|
||||
|
||||
[Secret Syncs](/integrations/secret-syncs/overview) allow you to forward secrets to platforms such as [GitHub](/integrations/secret-syncs/github), [GitLab](/integrations/secret-syncs/gitlab), [AWS Secrets Manager](/integrations/secret-syncs/aws-secrets-manager), [Vercel](/integrations/secret-syncs/vercel), and more.
|
||||
|
||||
This is useful when external systems require secrets to be available ahead of time or expect them in a specific location.
|
||||
|
||||
## Infrastructure-as-Code and Automation Tools
|
||||
|
||||
Infisical integrates with common IaC and automation tools to help you securely inject secrets into your infrastructure provisioning workflows:
|
||||
|
||||
- [Terraform](/integrations/frameworks/terraform): Use the official Infisical Terraform provider to fetch secrets either as ephemeral resources (never written to state files) or as traditional data sources. Ideal for managing cloud infrastructure while keeping secrets secure and version-safe.
|
||||
- [Pulumi](/integrations/frameworks/pulumi): Integrate Infisical into Pulumi projects using the Terraform Bridge, allowing you to fetch and manage secrets in TypeScript, Go, Python, or C# — without changing your existing workflows.
|
||||
- [Ansible](/integrations/platforms/ansible): Retrieve secrets from Infisical at runtime using the official Ansible Collection and lookup plugin. Works well for dynamic configuration during playbook execution.
|
||||
- [Packer](/integrations/frameworks/packer): Inject secrets into VM or container images at build time using the Infisical Packer Plugin — useful for provisioning base images that require secure configuration values.
|
@@ -0,0 +1,18 @@
|
||||
---
|
||||
title: "Secrets Management"
|
||||
description: "Learn what is secrets management and why it matters for building secure systems."
|
||||
---
|
||||
|
||||
## What is Secret?
|
||||
|
||||
A _secret_ is a confidential value used by an application such as database credential, API key, or other configuration.
|
||||
|
||||
In most cases, secrets allow applications to access systems and control how they behave across the development cycle — for example, an application might use a database password stored in an environment variable like `DB_PASSWORD` to connect to production data. These secrets must be kept secure to protect infrastructure and data.
|
||||
|
||||
## What is Secrets Management?
|
||||
|
||||
As infrastructure scales and systems become more distributed, [secrets sprawl](https://infisical.com/blog/what-is-secret-sprawl). Without consistent security practices, secrets get hardcoded in source code, exposed in environment variables, left unrotated for long periods, and scattered across systems without clear visibility into who can access them.
|
||||
|
||||
To solve secret sprawl, organizations rely on [secrets management](https://infisical.com/blog/what-is-secrets-management): the practice of centralizing secrets and managing them through well-defined workflows. This includes secure storage, fine-grained access controls, automatic rotation, audit logging, and support for dynamic, short-lived credentials.
|
||||
|
||||
A consistent approach makes it easier to keep secrets safe, reduce risk, and operate reliably across environments.
|
@@ -0,0 +1,18 @@
|
||||
---
|
||||
title: "Secrets Rotation"
|
||||
description: "Learn what secrets rotation is, why it matters, and how Infisical enables it."
|
||||
---
|
||||
|
||||
## What is Secrets Rotation?
|
||||
|
||||
Secrets rotation is the process of regularly replacing credentials like API keys, database passwords, and tokens to reduce the risk of long-term exposure. Even if a secret is compromised, frequent rotation limits how long it can be used.
|
||||
|
||||
Without rotation, secrets often go unchanged for months or years — hardcoded in codebases, embedded in CI pipelines, or shared across environments. Over time, this increases the risk of leaks, misuse, and operational blind spots.
|
||||
|
||||
## Secrets Rotation in Infisical
|
||||
|
||||
Infisical automates rotation using a rolling lifecycle model where new credentials are issued on a fixed schedule with previous ones remaining temporarily valid to give systems time to update without disruption. Each secret moves through three phases: active, inactive, and eventually revoked. This ensures that applications continue to function smoothly throughout the rotation process.
|
||||
|
||||
When rotation is applicable for a given secret type, using it is strongly recommended. Infisical supports configuring automatic rotation for a growing set of use cases including [PostgreSQL](/documentation/platform/secret-rotation/postgres-credentials), [MySQL](/documentation/platform/secret-rotation/mysql-credentials), [Microsoft SQL Server](/documentation/platform/secret-rotation/mssql-credentials), [OracleDB](/documentation/platform/secret-rotation/oracledb-credentials), [LDAP](/documentation/platform/secret-rotation/ldap-password), [AWS IAM users](/documentation/platform/secret-rotation/aws-iam-user-secret), [Azure](/documentation/platform/secret-rotation/azure-client-secret) and [Okta](/documentation/platform/secret-rotation/okta-client-secret) client secrets, and more.
|
||||
|
||||
To learn more, refer to the [secrets rotation documentation](/documentation/platform/secret-rotation/overview).
|
@@ -12,6 +12,6 @@ Core capabilities include:
|
||||
|
||||
- Secret Stores: Secure, versioned storage scoped by [project](/documentation/platform/secrets-mgmt/project), [environment](/documentation/platform/secrets-mgmt/project#project-environments), and [path](/documentation/platform/folder).
|
||||
- [Access Control](/documentation/platform/access-controls/overview): Fine-grained, identity-aware permissions for users and machines
|
||||
- Secret Delivery: Access secrets via [CLI](/cli/overview), [SDKs](/sdks/overview) (Go, Node.js, Python, etc.), [HTTP API](/api-reference/overview/introduction), [agents](/integrations/platforms/infisical-agent), [Kubernetes Operator](/integrations/platforms/kubernetes/overview), [External Secrets Operator (ESO)](https://external-secrets.io/latest/provider/infisical), and more.
|
||||
- Lifecycle Automation: Automate [secret rotation](/documentation/platform/secret-rotation/overview), generate [dynamic secrets](/documentation/platform/dynamic-secrets/overview), and enforce [approval-based workflows](/documentation/platform/pr-workflows).
|
||||
- [Secret Delivery](/documentation/platform/secrets-mgmt/concepts/secrets-delivery): Access secrets via [CLI](/cli/overview), [SDKs](/sdks/overview) (Go, Node.js, Python, etc.), [HTTP API](/api-reference/overview/introduction), [agents](/integrations/platforms/infisical-agent), [Kubernetes Operator](/integrations/platforms/kubernetes/overview), [External Secrets Operator (ESO)](https://external-secrets.io/latest/provider/infisical), and more.
|
||||
- Lifecycle Automation: Automate [secret rotation](/documentation/platform/secrets-mgmt/concepts/secrets-rotation), generate [dynamic secrets](/documentation/platform/secrets-mgmt/concepts/dynamic-secrets), and enforce [approval-based workflows](/documentation/platform/pr-workflows).
|
||||
- [Secrets Syncs](/integrations/secret-syncs/overview): Push secrets to external services like [GitHub](/integrations/secret-syncs/github), [GitLab](/integrations/secret-syncs/gitlab), [AWS Secrets Manager](/integrations/secret-syncs/aws-secrets-manager), [Vercel](/integrations/secret-syncs/vercel), and more.
|
||||
|
@@ -0,0 +1,26 @@
|
||||
---
|
||||
title: "SSH Certificates"
|
||||
description: "Learn what SSH certificates are, why they're useful, and how they enable secure, scalable infrastructure access."
|
||||
---
|
||||
|
||||
SSH access is ubiquitous — It's how engineers, scripts, and platforms across the world remotely administer Linux systems. That said, as teams and systems grow, managing access with static SSH keys becomes brittle and issues like key sprawl, unclear boundaries, and poor revocation hygiene start to emerge.
|
||||
|
||||
_SSH certificates_ offer an alternative approach to securing and managing access at scale.
|
||||
|
||||
## What is an SSH Certificate?
|
||||
|
||||
An _SSH certificate_ is a short-lived, signed credential that proves a user or host’s identity. Unlike static SSH keys, which are distributed and managed manually, SSH certificates rely on a centralized certificate authority (CA) to vouch for identities.
|
||||
There are two types of SSH certificates:
|
||||
|
||||
- User certificates: Issued to users to authenticate with remote hosts
|
||||
- Host certificates: Issued to hosts so clients can verify they're trusted
|
||||
|
||||
Because certificates are time-bound and centrally managed, they’re easier to audit, revoke, and scale across infrastructure.
|
||||
|
||||
## SSH with Infisical
|
||||
|
||||
Infisical SSH gives you a secure, scalable way to manage infrastructure access using SSH certificates — without the overhead of running your own certificate authority, wiring trust across hosts, or building issuance workflows from scratch.
|
||||
|
||||
It replaces long-lived SSH keys with short-lived, identity-bound certificates and handles all the moving parts for you: operating CAs, configuring trust between users and hosts, and issuing certificates on demand. With Infisical SSH, you can register a host with [`infisical ssh add-host`](/docs/cli/commands/ssh#infisical-ssh-add-host), then connect with [`infisical ssh connect`](/docs/cli/commands/ssh#infisical-ssh-connect) — that’s all it takes.
|
||||
|
||||
The result is centralized, auditable SSH access that’s easy to use and built to scale with your infrastructure.
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user