1
0
mirror of https://github.com/Infisical/infisical.git synced 2025-03-28 15:29:21 +00:00

Compare commits

..

157 Commits

Author SHA1 Message Date
045debeaf3 misc: added unhandled rejection handler 2025-03-08 00:29:23 +08:00
3fb8ad2fac misc: add uncaught exception handler 2025-03-08 00:22:27 +08:00
58ebebb162 Merge pull request from Infisical/feat/addActorToVersionHistory
Add actor to secret version history
2025-03-07 08:06:24 -03:00
b7640f2d03 Lint fixes 2025-03-06 17:36:09 -03:00
2ee4d68fd0 Fix case for multiple projects messing with the joins 2025-03-06 17:04:01 -03:00
3ca931acf1 Add condition to query to only retrieve the actual project id 2025-03-06 16:38:49 -03:00
8e311658d4 Improve query to only use one to retrieve all information 2025-03-06 15:15:52 -03:00
9116acd37b Fix linter issues 2025-03-06 13:07:03 -03:00
0513307d98 Improve code quality 2025-03-06 12:55:10 -03:00
efc3b6d474 Remove secret_version_v1 changes 2025-03-06 11:31:26 -03:00
07e1d1b130 Merge branch 'main' into feat/addActorToVersionHistory 2025-03-06 10:56:54 -03:00
7f76779124 Fix frontend type errors 2025-03-06 09:17:55 -03:00
30bcf1f204 Fix linter and type issues, made a small fix for secret rotation platform events 2025-03-06 09:10:13 -03:00
706feafbf2 revert featureset changes 2025-03-06 00:20:08 -05:00
fc4e3f1f72 update relay health check 2025-03-05 23:50:11 -05:00
dcd5f20325 add example 2025-03-05 22:20:13 -05:00
58f3e116a3 add example 2025-03-05 22:19:56 -05:00
7bc5aad8ec fix infinite loop 2025-03-05 22:14:09 -05:00
a16dc3aef6 add windows stub to fix build issue 2025-03-05 18:29:29 -05:00
da7746c639 use forked pion 2025-03-05 17:54:23 -05:00
cd5b6da541 Merge branch 'main' into feat/addActorToVersionHistory 2025-03-05 17:53:57 -03:00
2dda7180a9 Fix linter issue 2025-03-05 17:36:00 -03:00
30ccfbfc8e Add actor to secret version history 2025-03-05 17:20:57 -03:00
aa76924ee6 fix import 2025-03-05 14:48:36 -05:00
d8f679e72d Merge pull request from Infisical/revert-3128-daniel/view-secret-value-permission
Revert "feat(api/secrets): view secret value permission"
2025-03-05 14:15:16 -05:00
bf6cfbac7a Revert "feat(api/secrets): view secret value permission" 2025-03-05 14:15:02 -05:00
8e82813894 Merge pull request from Infisical/daniel/view-secret-value-permission
feat(api/secrets): view secret value permission
2025-03-05 22:57:25 +04:00
df21a1fb81 fix: types 2025-03-05 22:47:40 +04:00
bdbb6346cb fix: permission error instead of not found error on single secret import 2025-03-05 22:47:40 +04:00
ea9da6d2a8 fix: view secret value (requested changes) 2025-03-05 22:47:40 +04:00
3c2c70912f Update secret-service.ts 2025-03-05 22:47:40 +04:00
b607429b99 chore: minor ui improvements 2025-03-05 22:47:40 +04:00
16c1516979 fix: move permissions 2025-03-05 22:47:40 +04:00
f5dbbaf1fd Update SecretEditRow.tsx 2025-03-05 22:47:40 +04:00
2a292455ef chore: minor ui improvements 2025-03-05 22:47:40 +04:00
4d040706a9 Update SecretDetailSidebar.tsx 2025-03-05 22:47:40 +04:00
5183f76397 fix: pathing 2025-03-05 22:47:40 +04:00
4b3efb43b0 fix: view secret value permission (requested changes) 2025-03-05 22:47:40 +04:00
96046726b2 Update 20250218020306_backfill-secret-permissions-with-readvalue.ts 2025-03-05 22:47:40 +04:00
a86a951acc Update secret-snapshot-service.ts 2025-03-05 22:47:40 +04:00
5e70860160 fix: ui bug 2025-03-05 22:47:40 +04:00
abbd427ee2 minor lint fixes 2025-03-05 22:47:40 +04:00
8fd5fdbc6a chore: minor changes 2025-03-05 22:47:40 +04:00
77e1ccc8d7 fix: view secret value permission (requested changes) 2025-03-05 22:47:40 +04:00
711cc438f6 chore: better error 2025-03-05 22:47:40 +04:00
8447190bf8 fix: coderabbit requested changes 2025-03-05 22:47:40 +04:00
12b447425b chore: further cleanup 2025-03-05 22:47:40 +04:00
9cb1a31287 fix: allow Viewer role to read value 2025-03-05 22:47:40 +04:00
b00413817d fix: add service token read value permissions 2025-03-05 22:47:40 +04:00
2a8bd74e88 Update 20250218020306_backfill-secret-permissions-with-readvalue.ts 2025-03-05 22:47:40 +04:00
f28f4f7561 fix: requested changes 2025-03-05 22:47:40 +04:00
f0b05c683b fix: service token creation 2025-03-05 22:47:40 +04:00
3e8f02a4f9 Update service-token.spec.ts 2025-03-05 22:47:40 +04:00
50ee60a3ea Update service-token.spec.ts 2025-03-05 22:47:40 +04:00
21bdecdf2a Update secret-v2-bridge-service.ts 2025-03-05 22:47:40 +04:00
bf09461416 Update secret-v2-bridge-service.ts 2025-03-05 22:47:40 +04:00
1ff615913c fix: bulk secret create 2025-03-05 22:47:40 +04:00
281cedf1a2 fix: updated migration to support additional privileges 2025-03-05 22:47:39 +04:00
a8d847f139 chore: remove logs 2025-03-05 22:47:39 +04:00
2a0c0590f1 fix: cleanup and bug fixes 2025-03-05 22:47:39 +04:00
2e6d525d27 chore: cleanup 2025-03-05 22:47:39 +04:00
7fd4249d00 fix: frontend requested changes 2025-03-05 22:47:39 +04:00
90cfc44592 fix: personal secret support without read value permission 2025-03-05 22:47:39 +04:00
8c403780c2 chore: lint & ts 2025-03-05 22:47:39 +04:00
b69c091f2f Update 20250218020306_backfill-secret-permissions-with-readvalue.ts 2025-03-05 22:47:39 +04:00
4a66395ce6 feat(api): view secret value, WIP 2025-03-05 22:47:39 +04:00
8c18753e3f Merge pull request from Infisical/daniel/fix-breaking-check
fix: breaking changes check
2025-03-05 22:45:56 +04:00
85c5d69c36 chore: remove breaking change test 2025-03-05 22:42:29 +04:00
94fe577046 chore: test breaking change 2025-03-05 22:38:35 +04:00
a0a579834c fix: check docs endpoint instead of status 2025-03-05 22:36:43 +04:00
b5575f4c20 fix api endpoint 2025-03-05 22:31:01 +04:00
f98f212ecf Update check-api-for-breaking-changes.yml 2025-03-05 22:23:49 +04:00
b331a4a708 fix: breaking changes check 2025-03-05 22:17:16 +04:00
e351a16b5a Merge pull request from Infisical/feat/add-secret-approval-review-comment
feat: add secret approval review comment
2025-03-05 12:24:59 -05:00
2cfca823f2 Merge pull request from akhilmhdh/feat/connector
feat: added ca to cli
2025-03-05 10:13:27 -05:00
=
a8398a7009 feat: added ca to cli 2025-03-05 20:00:45 +05:30
8c054cedfc misc: added section for approval and rejections 2025-03-05 22:30:26 +08:00
24d4f8100c Merge pull request from akhilmhdh/feat/connector
feat: fixed cli issues in gateway
2025-03-05 08:26:04 -05:00
08f23e2d3c remove background context 2025-03-05 08:24:56 -05:00
d1ad605ac4 misc: address nit 2025-03-05 21:19:41 +08:00
9dd5857ff5 misc: minor UI 2025-03-05 19:32:26 +08:00
babbacdc96 feat: add secret approval review comment 2025-03-05 19:25:56 +08:00
=
76427f43f7 feat: fixed cli issues in gateway 2025-03-05 16:16:07 +05:30
3badcea95b added permission refresh and main context 2025-03-05 01:07:36 -05:00
1a4c0fe8d9 make heartbeat method simple + fix import 2025-03-04 23:21:26 -05:00
04f6864abc Merge pull request from Infisical/improve-secret-scanning-setup
Improvement: Clear Secret Scanning Query Params after Setup
2025-03-05 04:05:38 +04:00
fcbe0f59d2 Merge pull request from Infisical/daniel/fix-vercel-custom-envs
fix: vercel integration custom envs
2025-03-04 13:45:48 -08:00
e95b6fdeaa cleanup 2025-03-05 01:36:06 +04:00
5391bcd3b2 fix: vercel integration custom envs 2025-03-05 01:33:58 +04:00
48fd9e2a56 Merge pull request from akhilmhdh/feat/connector
feat: quick fix for quic
2025-03-04 15:52:48 -05:00
=
7b5926d865 feat: quick fix for quic 2025-03-05 02:14:00 +05:30
034123bcdf Merge pull request from Infisical/feat/grantServerAdminAccessToUsers
Allow server admins to grant server admin access to other users
2025-03-04 15:25:09 -05:00
f3786788fd Improve UserPanelTable, moved from useState to handlePopUpOpen 2025-03-04 16:54:28 -03:00
c406f6d78d Update release_build_infisical_cli.yml 2025-03-04 14:52:01 -05:00
eb66295dd4 Update release_build_infisical_cli.yml 2025-03-04 14:41:44 -05:00
798215e84c Update release_build_infisical_cli.yml 2025-03-04 14:36:39 -05:00
53f7491441 Update UpgradePlanModal message to show relevant message on user actions 2025-03-04 16:30:22 -03:00
53f6ab118b Merge pull request from akhilmhdh/feat/connector
Add QUIC to gateway
2025-03-04 14:06:42 -05:00
=
0f5a1b13a6 fix: lint and typecheck 2025-03-05 00:33:28 +05:30
5c606fe45f improvement: replace window reload with query refetch 2025-03-04 10:39:40 -08:00
bbf60169eb Update Server Admin Console documentation and add a fix for endpoint /admin-access 2025-03-04 15:29:34 -03:00
=
e004be22e3 feat: updated docker image and resolved build error 2025-03-04 23:58:31 +05:30
=
016cb4a7ba feat: completed gateway in quic mode 2025-03-04 23:55:40 +05:30
=
9bfc2a5dd2 feat: updated gateway to quic 2025-03-04 23:55:40 +05:30
72dbef97fb improvement: clear query params after setup to avoid false error messages 2025-03-04 10:14:56 -08:00
f376eaae13 Merge pull request from Infisical/feat/addFolderDescription
Add descriptions to secret folders
2025-03-04 14:56:43 -03:00
026f883d21 Merge pull request from Infisical/misc/replaced-otel-auto-instrumentation-with-manual
misc: replaced otel auto instrumentation with manual
2025-03-04 12:24:14 -05:00
e42f860261 misc: removed host metrics 2025-03-05 01:20:06 +08:00
08ec8c9b73 Fix linter issue and remove background colors from dropdown list 2025-03-04 13:58:34 -03:00
1512d4f496 Fix folder empty description issue and added icon to display it 2025-03-04 13:44:40 -03:00
9f7b42ad91 misc: replaced otel auto instrumentation with manual 2025-03-05 00:16:15 +08:00
3045477c32 Merge pull request from Infisical/bitbucket-workspace-select-fix
Fix: Address Bitbucket Configuration UI Bug Preventing Workspace Selection
2025-03-05 01:14:09 +09:00
be4adc2759 Allow server admins to grant server admin access to other users 2025-03-04 12:38:27 -03:00
4eba80905a Lint fixes 2025-03-04 10:44:26 -03:00
b023bc7442 Type fixes 2025-03-04 10:26:23 -03:00
a0029ab469 Add descriptions to secret folders 2025-03-04 10:11:20 -03:00
53605c3880 improvement: address feedback 2025-03-03 15:11:48 -08:00
e5bca5b5df Merge pull request from Infisical/remove-mention-of-affixes-for-secret-syncs
Documentation: Remove Secret Sync Affix Options Reference
2025-03-03 14:51:56 -08:00
4091bc19e9 Merge pull request from Infisical/fix/secretReminderSubmitOnModalClose
Save Secret Reminder from Modal
2025-03-03 15:25:42 -05:00
23bd048bb9 Fix delete secret reminder notification 2025-03-03 17:20:44 -03:00
17a4674821 Fix success notification message on reminder updates 2025-03-03 17:04:02 -03:00
ec9631107d Type fixes 2025-03-03 16:36:14 -03:00
3fa450b9a7 Fix for secrets reminder modal, now saving the reminder on modal close 2025-03-03 16:13:03 -03:00
3b9c62c366 Merge pull request from Infisical/daniel/secret-requests
feat(secret-sharing): secret requests
2025-03-04 04:04:39 +09:00
cb3d171d48 documentation: remove reference to secret affixes in secret syncs overview (temp) 2025-03-03 10:59:31 -08:00
c29841fbcf Merge pull request from Infisical/misc/updated-notices-doc
misc: updated notices doc
2025-03-03 13:57:06 -05:00
fcccf1bd8d misc: updated notices doc 2025-03-04 02:46:25 +08:00
4382825162 fix: address ui preventing from selecting non-default workspace 2025-03-03 10:16:15 -08:00
f80ef1dcc8 Merge pull request from Infisical/misc/add-datadog-profiler
misc: add datadog profiler
2025-03-04 01:54:07 +08:00
7abf3e3642 misc: re-added dd-trace 2025-03-04 01:51:58 +08:00
82ef35bd08 Merge remote-tracking branch 'origin/main' into misc/add-datadog-profiler 2025-03-04 01:51:13 +08:00
4eb668b5a5 misc: uninstalled dd-trace 2025-03-04 01:50:57 +08:00
18edea9f26 Merge pull request from Infisical/misc/gov-banner-and-consent-reqs
misc: add instance banner and consent support
2025-03-04 01:46:54 +08:00
787c091948 requested changes 2025-03-03 21:44:40 +04:00
ff269b1063 Update RequestedSecretsRow.tsx 2025-03-03 21:14:40 +04:00
ca0636cb25 minor fixes 2025-03-03 21:14:40 +04:00
b995358b7e fix: type fixes 2025-03-03 21:14:40 +04:00
7aaf0f4ed3 feat(secret-sharing): secret requests 2025-03-03 21:14:40 +04:00
68646bcdf8 doc: added docs 2025-03-04 00:36:42 +08:00
9989ceb6d1 misc: addressed comments 2025-03-03 23:55:11 +08:00
95d7ba5f22 misc: add datadog profiler 2025-03-03 22:39:55 +08:00
2aa6fdf983 Merge pull request from akoullick1/patch-10
Update spending-money.mdx
2025-03-02 17:47:01 -08:00
be5a32a5d6 Merge pull request from akoullick1/patch-9
Update onboarding.mdx
2025-03-02 17:45:57 -08:00
f009cd329b Update spending-money.mdx 2025-03-02 15:56:44 -08:00
e2778864e2 Update onboarding.mdx 2025-03-02 15:50:35 -08:00
ea7375b2c6 Merge pull request from akhilmhdh/fix/migration-dev
feat: added dev migration commands
2025-03-01 09:26:45 +09:00
d42566c335 Merge pull request from Infisical/fix-secret-approval-generation-when-new-key-name-with-tags
Fix: Use New Secret Key for Approval Policy Generation for Tag Resolution
2025-03-01 02:57:56 +09:00
=
45cbd9f006 feat: added dev migration commands 2025-02-28 15:37:51 +05:30
8580602ea7 Merge pull request from Infisical/feat/add-auto-redeploy-daemonset-and-statefulset
feat: add auto redeploy for daemonset and statefulset
2025-02-28 17:00:52 +09:00
7ff75cdfab Merge pull request from thomas-infisical/remove-service-token-deprecation
docs: remove service token deprecation warning
2025-02-28 13:43:57 +09:00
bd8c8871c0 fix: use new secret key value if present for tags when resolving update for secret approval 2025-02-28 13:38:42 +09:00
d5aa13b277 Merge pull request from Infisical/increase-secret-reminder-note-max-length
Improvement: Increase Secret v2 Reminder Note Max Length
2025-02-28 13:12:55 +09:00
428dc5d371 misc: add rbac/permissions for daemonsets and statefulsets 2025-02-28 13:01:45 +09:00
31dc36d4e2 misc: updated helm version 2025-02-27 16:31:00 +09:00
51f29e5357 feat: add auto redeploy for daemonset and statefulset 2025-02-27 16:26:43 +09:00
ce4c5d8ea1 misc: add instance banner and consent support 2025-02-26 23:58:45 +09:00
1c2b4e91ba docs: remove service token deprecation warning 2025-02-26 13:38:36 +09:00
146 changed files with 7312 additions and 1936 deletions
.env.example
.github/workflows
Dockerfile.fips.standalone-infisicalDockerfile.standalone-infisical
backend
DockerfileDockerfile.devpackage-lock.jsonpackage.json
src
db
ee
lib
api-docs
config
gateway
telemetry
main.ts
server/routes
services
cli
company/handbook
docs
api-reference/overview
documentation/platform/admin-panel
images/platform/admin-panels
integrations
frontend
helm-charts/secrets-operator
k8-operator
config/rbac
controllers/infisicalsecret
kubectl-install
packages/controllerhelpers

@ -112,4 +112,11 @@ INF_APP_CONNECTION_GCP_SERVICE_ACCOUNT_CREDENTIAL=
# azure app connection
INF_APP_CONNECTION_AZURE_CLIENT_ID=
INF_APP_CONNECTION_AZURE_CLIENT_SECRET=
INF_APP_CONNECTION_AZURE_CLIENT_SECRET=
# datadog
SHOULD_USE_DATADOG_TRACER=
DATADOG_PROFILING_ENABLED=
DATADOG_ENV=
DATADOG_SERVICE=
DATADOG_HOSTNAME=

@ -32,10 +32,23 @@ jobs:
run: touch .env && docker compose -f docker-compose.dev.yml up -d db redis
- name: Start the server
run: |
echo "SECRET_SCANNING_GIT_APP_ID=793712" >> .env
echo "SECRET_SCANNING_PRIVATE_KEY=some-random" >> .env
echo "SECRET_SCANNING_WEBHOOK_SECRET=some-random" >> .env
docker run --name infisical-api -d -p 4000:4000 -e DB_CONNECTION_URI=$DB_CONNECTION_URI -e REDIS_URL=$REDIS_URL -e JWT_AUTH_SECRET=$JWT_AUTH_SECRET -e ENCRYPTION_KEY=$ENCRYPTION_KEY --env-file .env --entrypoint '/bin/sh' infisical-api -c "npm run migration:latest && ls && node dist/main.mjs"
echo "SECRET_SCANNING_GIT_APP_ID=793712" >> .env
echo "SECRET_SCANNING_PRIVATE_KEY=some-random" >> .env
echo "SECRET_SCANNING_WEBHOOK_SECRET=some-random" >> .env
echo "Examining built image:"
docker image inspect infisical-api | grep -A 5 "Entrypoint"
docker run --name infisical-api -d -p 4000:4000 \
-e DB_CONNECTION_URI=$DB_CONNECTION_URI \
-e REDIS_URL=$REDIS_URL \
-e JWT_AUTH_SECRET=$JWT_AUTH_SECRET \
-e ENCRYPTION_KEY=$ENCRYPTION_KEY \
--env-file .env \
infisical-api
echo "Container status right after creation:"
docker ps -a | grep infisical-api
env:
REDIS_URL: redis://172.17.0.1:6379
DB_CONNECTION_URI: postgres://infisical:infisical@172.17.0.1:5432/infisical?sslmode=disable
@ -43,27 +56,39 @@ jobs:
ENCRYPTION_KEY: 4bnfe4e407b8921c104518903515b218
- uses: actions/setup-go@v5
with:
go-version: '1.21.5'
go-version: "1.21.5"
- name: Wait for container to be stable and check logs
run: |
SECONDS=0
HEALTHY=0
while [ $SECONDS -lt 60 ]; do
if docker ps | grep infisical-api | grep -q healthy; then
echo "Container is healthy."
HEALTHY=1
# Check if container is running
if docker ps | grep infisical-api; then
# Try to access the API endpoint
if curl -s -f http://localhost:4000/api/docs/json > /dev/null 2>&1; then
echo "API endpoint is responding. Container seems healthy."
HEALTHY=1
break
fi
else
echo "Container is not running!"
docker ps -a | grep infisical-api
break
fi
echo "Waiting for container to be healthy... ($SECONDS seconds elapsed)"
docker logs infisical-api
sleep 2
SECONDS=$((SECONDS+2))
sleep 5
SECONDS=$((SECONDS+5))
done
if [ $HEALTHY -ne 1 ]; then
echo "Container did not become healthy in time"
echo "Container status:"
docker ps -a | grep infisical-api
echo "Container logs (if any):"
docker logs infisical-api || echo "No logs available"
echo "Container inspection:"
docker inspect infisical-api | grep -A 5 "State"
exit 1
fi
- name: Install openapi-diff
@ -71,7 +96,8 @@ jobs:
- name: Running OpenAPI Spec diff action
run: oasdiff breaking https://app.infisical.com/api/docs/json http://localhost:4000/api/docs/json --fail-on ERR
- name: cleanup
if: always()
run: |
docker compose -f "docker-compose.dev.yml" down
docker stop infisical-api
docker remove infisical-api
docker stop infisical-api || true
docker rm infisical-api || true

@ -26,7 +26,7 @@ jobs:
CLI_TESTS_INFISICAL_VAULT_FILE_PASSPHRASE: ${{ secrets.CLI_TESTS_INFISICAL_VAULT_FILE_PASSPHRASE }}
npm-release:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
env:
working-directory: ./npm
needs:
@ -83,7 +83,7 @@ jobs:
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
goreleaser:
runs-on: ubuntu-20.04
runs-on: ubuntu-latest
needs: [cli-integration-tests]
steps:
- uses: actions/checkout@v3
@ -103,11 +103,12 @@ jobs:
go-version: ">=1.19.3"
cache: true
cache-dependency-path: cli/go.sum
- name: libssl1.1 => libssl1.0-dev for OSXCross
- name: Setup for libssl1.0-dev
run: |
echo 'deb http://security.ubuntu.com/ubuntu bionic-security main' | sudo tee -a /etc/apt/sources.list
sudo apt update && apt-cache policy libssl1.0-dev
sudo apt-get install libssl1.0-dev
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32
sudo apt update
sudo apt-get install -y libssl1.0-dev
- name: OSXCross for CGO Support
run: |
mkdir ../../osxcross

@ -161,6 +161,9 @@ COPY --from=backend-runner /app /backend
COPY --from=frontend-runner /app ./backend/frontend-build
ARG INFISICAL_PLATFORM_VERSION
ENV INFISICAL_PLATFORM_VERSION $INFISICAL_PLATFORM_VERSION
ENV PORT 8080
ENV HOST=0.0.0.0
ENV HTTPS_ENABLED false

@ -3,13 +3,10 @@ ARG POSTHOG_API_KEY=posthog-api-key
ARG INTERCOM_ID=intercom-id
ARG CAPTCHA_SITE_KEY=captcha-site-key
FROM node:20-alpine AS base
FROM node:20-slim AS base
FROM base AS frontend-dependencies
# Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
RUN apk add --no-cache libc6-compat
WORKDIR /app
COPY frontend/package.json frontend/package-lock.json ./
@ -45,8 +42,8 @@ RUN npm run build
FROM base AS frontend-runner
WORKDIR /app
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 non-root-user
RUN groupadd --system --gid 1001 nodejs
RUN useradd --system --uid 1001 --gid nodejs non-root-user
COPY --from=frontend-builder --chown=non-root-user:nodejs /app/dist ./
@ -56,21 +53,23 @@ USER non-root-user
## BACKEND
##
FROM base AS backend-build
RUN addgroup --system --gid 1001 nodejs \
&& adduser --system --uid 1001 non-root-user
WORKDIR /app
# Install all required dependencies for build
RUN apk --update add \
RUN apt-get update && apt-get install -y \
python3 \
make \
g++ \
unixodbc \
freetds \
freetds-bin \
unixodbc-dev \
libc-dev \
freetds-dev
freetds-dev \
&& rm -rf /var/lib/apt/lists/*
RUN groupadd --system --gid 1001 nodejs
RUN useradd --system --uid 1001 --gid nodejs non-root-user
COPY backend/package*.json ./
RUN npm ci --only-production
@ -86,18 +85,19 @@ FROM base AS backend-runner
WORKDIR /app
# Install all required dependencies for runtime
RUN apk --update add \
RUN apt-get update && apt-get install -y \
python3 \
make \
g++ \
unixodbc \
freetds \
freetds-bin \
unixodbc-dev \
libc-dev \
freetds-dev
freetds-dev \
&& rm -rf /var/lib/apt/lists/*
# Configure ODBC
RUN printf "[FreeTDS]\nDescription = FreeTDS Driver\nDriver = /usr/lib/libtdsodbc.so\nSetup = /usr/lib/libtdsodbc.so\nFileUsage = 1\n" > /etc/odbcinst.ini
RUN printf "[FreeTDS]\nDescription = FreeTDS Driver\nDriver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so\nSetup = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so\nFileUsage = 1\n" > /etc/odbcinst.ini
COPY backend/package*.json ./
RUN npm ci --only-production
@ -109,34 +109,36 @@ RUN mkdir frontend-build
# Production stage
FROM base AS production
RUN apk add --upgrade --no-cache ca-certificates
RUN apk add --no-cache bash curl && curl -1sLf \
'https://dl.cloudsmith.io/public/infisical/infisical-cli/setup.alpine.sh' | bash \
&& apk add infisical=0.31.1 && apk add --no-cache git
WORKDIR /
# Install all required runtime dependencies
RUN apk --update add \
RUN apt-get update && apt-get install -y \
ca-certificates \
bash \
curl \
git \
python3 \
make \
g++ \
unixodbc \
freetds \
freetds-bin \
unixodbc-dev \
libc-dev \
freetds-dev \
bash \
curl \
git \
openssh
wget \
openssh-client \
&& rm -rf /var/lib/apt/lists/*
# Install Infisical CLI
RUN curl -1sLf 'https://dl.cloudsmith.io/public/infisical/infisical-cli/setup.deb.sh' | bash \
&& apt-get update && apt-get install -y infisical=0.31.1 \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /
# Configure ODBC in production
RUN printf "[FreeTDS]\nDescription = FreeTDS Driver\nDriver = /usr/lib/libtdsodbc.so\nSetup = /usr/lib/libtdsodbc.so\nFileUsage = 1\n" > /etc/odbcinst.ini
RUN printf "[FreeTDS]\nDescription = FreeTDS Driver\nDriver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so\nSetup = /usr/lib/x86_64-linux-gnu/odbc/libtdsS.so\nFileUsage = 1\n" > /etc/odbcinst.ini
# Setup user permissions
RUN addgroup --system --gid 1001 nodejs \
&& adduser --system --uid 1001 non-root-user
RUN groupadd --system --gid 1001 nodejs \
&& useradd --system --uid 1001 --gid nodejs non-root-user
# Give non-root-user permission to update SSL certs
RUN chown -R non-root-user /etc/ssl/certs
@ -154,11 +156,11 @@ ENV INTERCOM_ID=$INTERCOM_ID
ARG CAPTCHA_SITE_KEY
ENV CAPTCHA_SITE_KEY=$CAPTCHA_SITE_KEY
COPY --from=backend-runner /app /backend
COPY --from=frontend-runner /app ./backend/frontend-build
ARG INFISICAL_PLATFORM_VERSION
ENV INFISICAL_PLATFORM_VERSION $INFISICAL_PLATFORM_VERSION
ENV PORT 8080
ENV HOST=0.0.0.0
@ -166,6 +168,7 @@ ENV HTTPS_ENABLED false
ENV NODE_ENV production
ENV STANDALONE_BUILD true
ENV STANDALONE_MODE true
WORKDIR /backend
ENV TELEMETRY_ENABLED true

@ -1,23 +1,22 @@
# Build stage
FROM node:20-alpine AS build
FROM node:20-slim AS build
WORKDIR /app
# Required for pkcs11js
RUN apk --update add \
python3 \
make \
g++ \
openssh
RUN apt-get update && apt-get install -y \
python3 \
make \
g++ \
openssh-client
# install dependencies for TDS driver (required for SAP ASE dynamic secrets)
RUN apk add --no-cache \
# Install dependencies for TDS driver (required for SAP ASE dynamic secrets)
RUN apt-get install -y \
unixodbc \
freetds \
freetds-bin \
freetds-dev \
unixodbc-dev \
libc-dev \
freetds-dev
libc-dev
COPY package*.json ./
RUN npm ci --only-production
@ -26,36 +25,36 @@ COPY . .
RUN npm run build
# Production stage
FROM node:20-alpine
FROM node:20-slim
WORKDIR /app
ENV npm_config_cache /home/node/.npm
COPY package*.json ./
RUN apk --update add \
python3 \
make \
g++
RUN apt-get update && apt-get install -y \
python3 \
make \
g++
# install dependencies for TDS driver (required for SAP ASE dynamic secrets)
RUN apk add --no-cache \
# Install dependencies for TDS driver (required for SAP ASE dynamic secrets)
RUN apt-get install -y \
unixodbc \
freetds \
freetds-bin \
freetds-dev \
unixodbc-dev \
libc-dev \
freetds-dev
libc-dev
RUN printf "[FreeTDS]\nDescription = FreeTDS Driver\nDriver = /usr/lib/libtdsodbc.so\nSetup = /usr/lib/libtdsodbc.so\nFileUsage = 1\n" > /etc/odbcinst.ini
RUN printf "[FreeTDS]\nDescription = FreeTDS Driver\nDriver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so\nSetup = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so\nFileUsage = 1\n" > /etc/odbcinst.ini
RUN npm ci --only-production && npm cache clean --force
COPY --from=build /app .
RUN apk add --no-cache bash curl && curl -1sLf \
'https://dl.cloudsmith.io/public/infisical/infisical-cli/setup.alpine.sh' | bash \
&& apk add infisical=0.8.1 && apk add --no-cache git
# Install Infisical CLI
RUN apt-get install -y curl bash && \
curl -1sLf 'https://dl.cloudsmith.io/public/infisical/infisical-cli/setup.deb.sh' | bash && \
apt-get update && apt-get install -y infisical=0.8.1 git
HEALTHCHECK --interval=10s --timeout=3s --start-period=10s \
CMD node healthcheck.js

@ -1,4 +1,4 @@
FROM node:20-alpine
FROM node:20-slim
# ? Setup a test SoftHSM module. In production a real HSM is used.
@ -7,32 +7,32 @@ ARG SOFTHSM2_VERSION=2.5.0
ENV SOFTHSM2_VERSION=${SOFTHSM2_VERSION} \
SOFTHSM2_SOURCES=/tmp/softhsm2
# install build dependencies including python3 (required for pkcs11js and partially TDS driver)
RUN apk --update add \
alpine-sdk \
autoconf \
automake \
git \
libtool \
openssl-dev \
python3 \
make \
g++ \
openssh
# Install build dependencies including python3 (required for pkcs11js and partially TDS driver)
RUN apt-get update && apt-get install -y \
build-essential \
autoconf \
automake \
git \
libtool \
libssl-dev \
python3 \
make \
g++ \
openssh-client \
curl \
pkg-config
# install dependencies for TDS driver (required for SAP ASE dynamic secrets)
RUN apk add --no-cache \
# Install dependencies for TDS driver (required for SAP ASE dynamic secrets)
RUN apt-get install -y \
unixodbc \
freetds \
unixodbc-dev \
libc-dev \
freetds-dev
freetds-dev \
freetds-bin \
tdsodbc
RUN printf "[FreeTDS]\nDescription = FreeTDS Driver\nDriver = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so\nSetup = /usr/lib/x86_64-linux-gnu/odbc/libtdsodbc.so\nFileUsage = 1\n" > /etc/odbcinst.ini
RUN printf "[FreeTDS]\nDescription = FreeTDS Driver\nDriver = /usr/lib/libtdsodbc.so\nSetup = /usr/lib/libtdsodbc.so\nFileUsage = 1\n" > /etc/odbcinst.ini
# build and install SoftHSM2
# Build and install SoftHSM2
RUN git clone https://github.com/opendnssec/SoftHSMv2.git ${SOFTHSM2_SOURCES}
WORKDIR ${SOFTHSM2_SOURCES}
@ -45,16 +45,18 @@ RUN git checkout ${SOFTHSM2_VERSION} -b ${SOFTHSM2_VERSION} \
WORKDIR /root
RUN rm -fr ${SOFTHSM2_SOURCES}
# install pkcs11-tool
RUN apk --update add opensc
# Install pkcs11-tool
RUN apt-get install -y opensc
RUN softhsm2-util --init-token --slot 0 --label "auth-app" --pin 1234 --so-pin 0000
RUN mkdir -p /etc/softhsm2/tokens && \
softhsm2-util --init-token --slot 0 --label "auth-app" --pin 1234 --so-pin 0000
# ? App setup
RUN apk add --no-cache bash curl && curl -1sLf \
'https://dl.cloudsmith.io/public/infisical/infisical-cli/setup.alpine.sh' | bash \
&& apk add infisical=0.8.1 && apk add --no-cache git
# Install Infisical CLI
RUN curl -1sLf 'https://dl.cloudsmith.io/public/infisical/infisical-cli/setup.deb.sh' | bash && \
apt-get update && \
apt-get install -y infisical=0.8.1
WORKDIR /app

2239
backend/package-lock.json generated

File diff suppressed because it is too large Load Diff

@ -60,6 +60,13 @@
"migration:status": "npm run auditlog-migration:status && knex --knexfile ./dist/db/knexfile.mjs --client pg migrate:status",
"migration:rollback": "npm run auditlog-migration:rollback && knex --knexfile ./dist/db/knexfile.mjs migrate:rollback",
"migration:unlock": "npm run auditlog-migration:unlock && knex --knexfile ./dist/db/knexfile.mjs migrate:unlock",
"migration:up-dev": "knex --knexfile ./src/db/knexfile.ts --client pg migrate:up",
"migration:down-dev": "knex --knexfile ./src/db/knexfile.ts --client pg migrate:down",
"migration:list-dev": "knex --knexfile ./src/db/knexfile.ts --client pg migrate:list",
"migration:latest-dev": "knex --knexfile ./src/db/knexfile.ts --client pg migrate:latest",
"migration:status-dev": "knex --knexfile ./src/db/knexfile.ts --client pg migrate:status",
"migration:rollback-dev": "knex --knexfile ./src/db/knexfile.ts migrate:rollback",
"migration:unlock-dev": "knex --knexfile ./src/db/knexfile.ts migrate:unlock",
"migrate:org": "tsx ./scripts/migrate-organization.ts",
"seed:new": "tsx ./scripts/create-seed-file.ts",
"seed": "knex --knexfile ./dist/db/knexfile.ts --client pg seed:run",
@ -138,6 +145,7 @@
"@fastify/swagger": "^8.14.0",
"@fastify/swagger-ui": "^2.1.0",
"@google-cloud/kms": "^4.5.0",
"@infisical/quic": "^1.0.8",
"@node-saml/passport-saml": "^4.0.4",
"@octokit/auth-app": "^7.1.1",
"@octokit/plugin-retry": "^5.0.5",
@ -145,10 +153,10 @@
"@octokit/webhooks-types": "^7.3.1",
"@octopusdeploy/api-client": "^3.4.1",
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/auto-instrumentations-node": "^0.53.0",
"@opentelemetry/exporter-metrics-otlp-proto": "^0.55.0",
"@opentelemetry/exporter-prometheus": "^0.55.0",
"@opentelemetry/instrumentation": "^0.55.0",
"@opentelemetry/instrumentation-http": "^0.57.2",
"@opentelemetry/resources": "^1.28.0",
"@opentelemetry/sdk-metrics": "^1.28.0",
"@opentelemetry/semantic-conventions": "^1.27.0",
@ -169,6 +177,7 @@
"cassandra-driver": "^4.7.2",
"connect-redis": "^7.1.1",
"cron": "^3.1.7",
"dd-trace": "^5.40.0",
"dotenv": "^16.4.1",
"fastify": "^4.28.1",
"fastify-plugin": "^4.5.1",
@ -177,6 +186,7 @@
"handlebars": "^4.7.8",
"hdb": "^0.19.10",
"ioredis": "^5.3.2",
"isomorphic-dompurify": "^2.22.0",
"jmespath": "^0.16.0",
"jsonwebtoken": "^9.0.2",
"jsrp": "^0.2.4",

@ -39,7 +39,7 @@ export default {
},
migrations: {
tableName: "infisical_migrations",
loadExtensions: [".mjs"]
loadExtensions: [".mjs", ".ts"]
}
},
production: {
@ -64,7 +64,7 @@ export default {
},
migrations: {
tableName: "infisical_migrations",
loadExtensions: [".mjs"]
loadExtensions: [".mjs", ".ts"]
}
}
} as Knex.Config;

@ -0,0 +1,25 @@
import { Knex } from "knex";
import { SecretSharingType } from "@app/services/secret-sharing/secret-sharing-types";
import { TableName } from "../schemas";
export async function up(knex: Knex): Promise<void> {
const hasSharingTypeColumn = await knex.schema.hasColumn(TableName.SecretSharing, "type");
await knex.schema.alterTable(TableName.SecretSharing, (table) => {
if (!hasSharingTypeColumn) {
table.string("type", 32).defaultTo(SecretSharingType.Share).notNullable();
}
});
}
export async function down(knex: Knex): Promise<void> {
const hasSharingTypeColumn = await knex.schema.hasColumn(TableName.SecretSharing, "type");
await knex.schema.alterTable(TableName.SecretSharing, (table) => {
if (hasSharingTypeColumn) {
table.dropColumn("type");
}
});
}

@ -0,0 +1,31 @@
import { Knex } from "knex";
import { TableName } from "../schemas";
export async function up(knex: Knex): Promise<void> {
const hasAuthConsentContentCol = await knex.schema.hasColumn(TableName.SuperAdmin, "authConsentContent");
const hasPageFrameContentCol = await knex.schema.hasColumn(TableName.SuperAdmin, "pageFrameContent");
if (await knex.schema.hasTable(TableName.SuperAdmin)) {
await knex.schema.alterTable(TableName.SuperAdmin, (t) => {
if (!hasAuthConsentContentCol) {
t.text("authConsentContent");
}
if (!hasPageFrameContentCol) {
t.text("pageFrameContent");
}
});
}
}
export async function down(knex: Knex): Promise<void> {
const hasAuthConsentContentCol = await knex.schema.hasColumn(TableName.SuperAdmin, "authConsentContent");
const hasPageFrameContentCol = await knex.schema.hasColumn(TableName.SuperAdmin, "pageFrameContent");
await knex.schema.alterTable(TableName.SuperAdmin, (t) => {
if (hasAuthConsentContentCol) {
t.dropColumn("authConsentContent");
}
if (hasPageFrameContentCol) {
t.dropColumn("pageFrameContent");
}
});
}

@ -0,0 +1,23 @@
import { Knex } from "knex";
import { TableName } from "@app/db/schemas";
export async function up(knex: Knex): Promise<void> {
const hasProjectDescription = await knex.schema.hasColumn(TableName.SecretFolder, "description");
if (!hasProjectDescription) {
await knex.schema.alterTable(TableName.SecretFolder, (t) => {
t.string("description");
});
}
}
export async function down(knex: Knex): Promise<void> {
const hasProjectDescription = await knex.schema.hasColumn(TableName.SecretFolder, "description");
if (hasProjectDescription) {
await knex.schema.alterTable(TableName.SecretFolder, (t) => {
t.dropColumn("description");
});
}
}

@ -0,0 +1,19 @@
import { Knex } from "knex";
import { TableName } from "../schemas";
export async function up(knex: Knex): Promise<void> {
if (!(await knex.schema.hasColumn(TableName.SecretApprovalRequestReviewer, "comment"))) {
await knex.schema.alterTable(TableName.SecretApprovalRequestReviewer, (t) => {
t.string("comment");
});
}
}
export async function down(knex: Knex): Promise<void> {
if (await knex.schema.hasColumn(TableName.SecretApprovalRequestReviewer, "comment")) {
await knex.schema.alterTable(TableName.SecretApprovalRequestReviewer, (t) => {
t.dropColumn("comment");
});
}
}

@ -0,0 +1,45 @@
import { Knex } from "knex";
import { TableName } from "@app/db/schemas";
export async function up(knex: Knex): Promise<void> {
if (await knex.schema.hasTable(TableName.SecretVersionV2)) {
const hasSecretVersionV2UserActorId = await knex.schema.hasColumn(TableName.SecretVersionV2, "userActorId");
const hasSecretVersionV2IdentityActorId = await knex.schema.hasColumn(TableName.SecretVersionV2, "identityActorId");
const hasSecretVersionV2ActorType = await knex.schema.hasColumn(TableName.SecretVersionV2, "actorType");
await knex.schema.alterTable(TableName.SecretVersionV2, (t) => {
if (!hasSecretVersionV2UserActorId) {
t.uuid("userActorId");
t.foreign("userActorId").references("id").inTable(TableName.Users);
}
if (!hasSecretVersionV2IdentityActorId) {
t.uuid("identityActorId");
t.foreign("identityActorId").references("id").inTable(TableName.Identity);
}
if (!hasSecretVersionV2ActorType) {
t.string("actorType");
}
});
}
}
export async function down(knex: Knex): Promise<void> {
if (await knex.schema.hasTable(TableName.SecretVersionV2)) {
const hasSecretVersionV2UserActorId = await knex.schema.hasColumn(TableName.SecretVersionV2, "userActorId");
const hasSecretVersionV2IdentityActorId = await knex.schema.hasColumn(TableName.SecretVersionV2, "identityActorId");
const hasSecretVersionV2ActorType = await knex.schema.hasColumn(TableName.SecretVersionV2, "actorType");
await knex.schema.alterTable(TableName.SecretVersionV2, (t) => {
if (hasSecretVersionV2UserActorId) {
t.dropColumn("userActorId");
}
if (hasSecretVersionV2IdentityActorId) {
t.dropColumn("identityActorId");
}
if (hasSecretVersionV2ActorType) {
t.dropColumn("actorType");
}
});
}
}

@ -13,7 +13,8 @@ export const SecretApprovalRequestsReviewersSchema = z.object({
requestId: z.string().uuid(),
createdAt: z.date(),
updatedAt: z.date(),
reviewerUserId: z.string().uuid()
reviewerUserId: z.string().uuid(),
comment: z.string().nullable().optional()
});
export type TSecretApprovalRequestsReviewers = z.infer<typeof SecretApprovalRequestsReviewersSchema>;

@ -15,7 +15,8 @@ export const SecretFoldersSchema = z.object({
updatedAt: z.date(),
envId: z.string().uuid(),
parentId: z.string().uuid().nullable().optional(),
isReserved: z.boolean().default(false).nullable().optional()
isReserved: z.boolean().default(false).nullable().optional(),
description: z.string().nullable().optional()
});
export type TSecretFolders = z.infer<typeof SecretFoldersSchema>;

@ -12,6 +12,7 @@ import { TImmutableDBKeys } from "./models";
export const SecretSharingSchema = z.object({
id: z.string().uuid(),
encryptedValue: z.string().nullable().optional(),
type: z.string(),
iv: z.string().nullable().optional(),
tag: z.string().nullable().optional(),
hashedHex: z.string().nullable().optional(),

@ -25,7 +25,10 @@ export const SecretVersionsV2Schema = z.object({
folderId: z.string().uuid(),
userId: z.string().uuid().nullable().optional(),
createdAt: z.date(),
updatedAt: z.date()
updatedAt: z.date(),
userActorId: z.string().uuid().nullable().optional(),
identityActorId: z.string().uuid().nullable().optional(),
actorType: z.string().nullable().optional()
});
export type TSecretVersionsV2 = z.infer<typeof SecretVersionsV2Schema>;

@ -23,7 +23,9 @@ export const SuperAdminSchema = z.object({
defaultAuthOrgId: z.string().uuid().nullable().optional(),
enabledLoginMethods: z.string().array().nullable().optional(),
encryptedSlackClientId: zodBuffer.nullable().optional(),
encryptedSlackClientSecret: zodBuffer.nullable().optional()
encryptedSlackClientSecret: zodBuffer.nullable().optional(),
authConsentContent: z.string().nullable().optional(),
pageFrameContent: z.string().nullable().optional()
});
export type TSuperAdmin = z.infer<typeof SuperAdminSchema>;

@ -159,7 +159,8 @@ export const registerSecretApprovalRequestRouter = async (server: FastifyZodProv
id: z.string()
}),
body: z.object({
status: z.enum([ApprovalStatus.APPROVED, ApprovalStatus.REJECTED])
status: z.enum([ApprovalStatus.APPROVED, ApprovalStatus.REJECTED]),
comment: z.string().optional()
}),
response: {
200: z.object({
@ -175,8 +176,25 @@ export const registerSecretApprovalRequestRouter = async (server: FastifyZodProv
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
approvalId: req.params.id,
status: req.body.status
status: req.body.status,
comment: req.body.comment
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: req.permission.orgId,
projectId: review.projectId,
event: {
type: EventType.SECRET_APPROVAL_REQUEST_REVIEW,
metadata: {
secretApprovalRequestId: review.requestId,
reviewedBy: review.reviewerUserId,
status: review.status as ApprovalStatus,
comment: review.comment || ""
}
}
});
return { review };
}
});
@ -235,7 +253,6 @@ export const registerSecretApprovalRequestRouter = async (server: FastifyZodProv
const tagSchema = SecretTagsSchema.pick({
id: true,
slug: true,
name: true,
color: true
})
.array()
@ -268,7 +285,7 @@ export const registerSecretApprovalRequestRouter = async (server: FastifyZodProv
environment: z.string(),
statusChangedByUser: approvalRequestUser.optional(),
committerUser: approvalRequestUser,
reviewers: approvalRequestUser.extend({ status: z.string() }).array(),
reviewers: approvalRequestUser.extend({ status: z.string(), comment: z.string().optional() }).array(),
secretPath: z.string(),
commits: secretRawSchema
.omit({ _id: true, environment: true, workspace: true, type: true, version: true })

@ -35,7 +35,6 @@ export const registerSnapshotRouter = async (server: FastifyZodProvider) => {
tags: SecretTagsSchema.pick({
id: true,
slug: true,
name: true,
color: true
}).array()
})

@ -22,6 +22,7 @@ import {
} from "@app/services/secret-sync/secret-sync-types";
import { KmipPermission } from "../kmip/kmip-enum";
import { ApprovalStatus } from "../secret-approval-request/secret-approval-request-types";
export type TListProjectAuditLogDTO = {
filter: {
@ -165,6 +166,7 @@ export enum EventType {
SECRET_APPROVAL_REQUEST = "secret-approval-request",
SECRET_APPROVAL_CLOSED = "secret-approval-closed",
SECRET_APPROVAL_REOPENED = "secret-approval-reopened",
SECRET_APPROVAL_REQUEST_REVIEW = "secret-approval-request-review",
SIGN_SSH_KEY = "sign-ssh-key",
ISSUE_SSH_CREDS = "issue-ssh-creds",
CREATE_SSH_CA = "create-ssh-certificate-authority",
@ -250,6 +252,7 @@ export enum EventType {
UPDATE_APP_CONNECTION = "update-app-connection",
DELETE_APP_CONNECTION = "delete-app-connection",
CREATE_SHARED_SECRET = "create-shared-secret",
CREATE_SECRET_REQUEST = "create-secret-request",
DELETE_SHARED_SECRET = "delete-shared-secret",
READ_SHARED_SECRET = "read-shared-secret",
GET_SECRET_SYNCS = "get-secret-syncs",
@ -1141,6 +1144,7 @@ interface CreateFolderEvent {
folderId: string;
folderName: string;
folderPath: string;
description?: string;
};
}
@ -1312,6 +1316,16 @@ interface SecretApprovalRequest {
};
}
interface SecretApprovalRequestReview {
type: EventType.SECRET_APPROVAL_REQUEST_REVIEW;
metadata: {
secretApprovalRequestId: string;
reviewedBy: string;
status: ApprovalStatus;
comment: string;
};
}
interface SignSshKey {
type: EventType.SIGN_SSH_KEY;
metadata: {
@ -2020,6 +2034,15 @@ interface CreateSharedSecretEvent {
};
}
interface CreateSecretRequestEvent {
type: EventType.CREATE_SECRET_REQUEST;
metadata: {
id: string;
accessType: string;
name?: string;
};
}
interface DeleteSharedSecretEvent {
type: EventType.DELETE_SHARED_SECRET;
metadata: {
@ -2470,4 +2493,6 @@ export type Event =
| KmipOperationActivateEvent
| KmipOperationRevokeEvent
| KmipOperationLocateEvent
| KmipOperationRegisterEvent;
| KmipOperationRegisterEvent
| CreateSecretRequestEvent
| SecretApprovalRequestReview;

@ -86,7 +86,7 @@ export const SqlDatabaseProvider = ({ gatewayService }: TSqlDatabaseProviderDTO)
tlsOptions: {
ca: relayDetails.certChain,
cert: relayDetails.certificate,
key: relayDetails.privateKey
key: relayDetails.privateKey.toString()
}
}
);

@ -474,7 +474,7 @@ export const gatewayServiceFactory = ({
relayHost,
relayPort: Number(relayPort),
tlsOptions: {
key: privateKey,
key: privateKey.toString(),
ca: `${gatewayCaCert.toString("pem")}\n${rootCaCert.toString("pem")}`.trim(),
cert: clientCert.toString("pem")
},

@ -100,6 +100,7 @@ export const secretApprovalRequestDALFactory = (db: TDbClient) => {
tx.ref("lastName").withSchema("committerUser").as("committerUserLastName"),
tx.ref("reviewerUserId").withSchema(TableName.SecretApprovalRequestReviewer),
tx.ref("status").withSchema(TableName.SecretApprovalRequestReviewer).as("reviewerStatus"),
tx.ref("comment").withSchema(TableName.SecretApprovalRequestReviewer).as("reviewerComment"),
tx.ref("email").withSchema("secretApprovalReviewerUser").as("reviewerEmail"),
tx.ref("username").withSchema("secretApprovalReviewerUser").as("reviewerUsername"),
tx.ref("firstName").withSchema("secretApprovalReviewerUser").as("reviewerFirstName"),
@ -162,8 +163,10 @@ export const secretApprovalRequestDALFactory = (db: TDbClient) => {
reviewerEmail: email,
reviewerLastName: lastName,
reviewerUsername: username,
reviewerFirstName: firstName
}) => (userId ? { userId, status, email, firstName, lastName, username } : undefined)
reviewerFirstName: firstName,
reviewerComment: comment
}) =>
userId ? { userId, status, email, firstName, lastName, username, comment: comment ?? "" } : undefined
},
{
key: "approverUserId",

@ -320,6 +320,7 @@ export const secretApprovalRequestServiceFactory = ({
approvalId,
actor,
status,
comment,
actorId,
actorAuthMethod,
actorOrgId
@ -372,15 +373,18 @@ export const secretApprovalRequestServiceFactory = ({
return secretApprovalRequestReviewerDAL.create(
{
status,
comment,
requestId: secretApprovalRequest.id,
reviewerUserId: actorId
},
tx
);
}
return secretApprovalRequestReviewerDAL.updateById(review.id, { status }, tx);
return secretApprovalRequestReviewerDAL.updateById(review.id, { status, comment }, tx);
});
return reviewStatus;
return { ...reviewStatus, projectId: secretApprovalRequest.projectId };
};
const updateApprovalStatus = async ({
@ -1294,7 +1298,7 @@ export const secretApprovalRequestServiceFactory = ({
secretMetadata
}) => {
const secretId = updatingSecretsGroupByKey[secretKey][0].id;
if (tagIds?.length) commitTagIds[secretKey] = tagIds;
if (tagIds?.length) commitTagIds[newSecretName ?? secretKey] = tagIds;
return {
...latestSecretVersions[secretId],
secretMetadata,

@ -80,6 +80,7 @@ export type TStatusChangeDTO = {
export type TReviewRequestDTO = {
approvalId: string;
status: ApprovalStatus;
comment?: string;
} & Omit<TProjectPermission, "projectId">;
export type TApprovalRequestCountDTO = TProjectPermission;

@ -13,6 +13,7 @@ import { NotFoundError } from "@app/lib/errors";
import { logger } from "@app/lib/logger";
import { alphaNumericNanoId } from "@app/lib/nanoid";
import { QueueJobs, QueueName, TQueueServiceFactory } from "@app/queue";
import { ActorType } from "@app/services/auth/auth-type";
import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { KmsDataKey } from "@app/services/kms/kms-types";
import { TProjectBotServiceFactory } from "@app/services/project-bot/project-bot-service";
@ -332,6 +333,7 @@ export const secretRotationQueueFactory = ({
await secretVersionV2BridgeDAL.insertMany(
updatedSecrets.map(({ id, updatedAt, createdAt, ...el }) => ({
...el,
actorType: ActorType.PLATFORM,
secretId: id
})),
tx

@ -7,6 +7,7 @@ import { decryptSymmetric128BitHexKeyUTF8 } from "@app/lib/crypto";
import { InternalServerError, NotFoundError } from "@app/lib/errors";
import { groupBy } from "@app/lib/fn";
import { logger } from "@app/lib/logger";
import { ActorType } from "@app/services/auth/auth-type";
import { TKmsServiceFactory } from "@app/services/kms/kms-service";
import { KmsDataKey } from "@app/services/kms/kms-types";
import { TProjectBotServiceFactory } from "@app/services/project-bot/project-bot-service";
@ -370,7 +371,21 @@ export const secretSnapshotServiceFactory = ({
const secrets = await secretV2BridgeDAL.insertMany(
rollbackSnaps.flatMap(({ secretVersions, folderId }) =>
secretVersions.map(
({ latestSecretVersion, version, updatedAt, createdAt, secretId, envId, id, tags, ...el }) => ({
({
latestSecretVersion,
version,
updatedAt,
createdAt,
secretId,
envId,
id,
tags,
// exclude the bottom fields from the secret - they are for versioning only.
userActorId,
identityActorId,
actorType,
...el
}) => ({
...el,
id: secretId,
version: deletedTopLevelSecsGroupById[secretId] ? latestSecretVersion + 1 : latestSecretVersion,
@ -401,8 +416,18 @@ export const secretSnapshotServiceFactory = ({
})),
tx
);
const userActorId = actor === ActorType.USER ? actorId : undefined;
const identityActorId = actor !== ActorType.USER ? actorId : undefined;
const actorType = actor || ActorType.PLATFORM;
const secretVersions = await secretVersionV2BridgeDAL.insertMany(
secrets.map(({ id, updatedAt, createdAt, ...el }) => ({ ...el, secretId: id })),
secrets.map(({ id, updatedAt, createdAt, ...el }) => ({
...el,
secretId: id,
userActorId,
identityActorId,
actorType
})),
tx
);
await secretVersionV2TagBridgeDAL.insertMany(

@ -6,7 +6,6 @@ export const sanitizedSshCertificate = SshCertificatesSchema.pick({
sshCertificateTemplateId: true,
serialNumber: true,
certType: true,
publicKey: true,
principals: true,
keyId: true,
notBefore: true,

@ -638,7 +638,8 @@ export const FOLDERS = {
environment: "The slug of the environment to create the folder in.",
name: "The name of the folder to create.",
path: "The path of the folder to create.",
directory: "The directory of the folder to create. (Deprecated in favor of path)"
directory: "The directory of the folder to create. (Deprecated in favor of path)",
description: "An optional description label for the folder."
},
UPDATE: {
folderId: "The ID of the folder to update.",
@ -647,7 +648,8 @@ export const FOLDERS = {
path: "The path of the folder to update.",
directory: "The new directory of the folder to update. (Deprecated in favor of path)",
projectSlug: "The slug of the project where the folder is located.",
workspaceId: "The ID of the project where the folder is located."
workspaceId: "The ID of the project where the folder is located.",
description: "An optional description label for the folder."
},
DELETE: {
folderIdOrName: "The ID or name of the folder to delete.",

@ -24,6 +24,7 @@ const databaseReadReplicaSchema = z
const envSchema = z
.object({
INFISICAL_PLATFORM_VERSION: zpStr(z.string().optional()),
PORT: z.coerce.number().default(IS_PACKAGED ? 8080 : 4000),
DISABLE_SECRET_SCANNING: z
.enum(["true", "false"])
@ -216,6 +217,13 @@ const envSchema = z
INF_APP_CONNECTION_AZURE_CLIENT_ID: zpStr(z.string().optional()),
INF_APP_CONNECTION_AZURE_CLIENT_SECRET: zpStr(z.string().optional()),
// datadog
SHOULD_USE_DATADOG_TRACER: zodStrBool.default("false"),
DATADOG_PROFILING_ENABLED: zodStrBool.default("false"),
DATADOG_ENV: zpStr(z.string().optional().default("prod")),
DATADOG_SERVICE: zpStr(z.string().optional().default("infisical-core")),
DATADOG_HOSTNAME: zpStr(z.string().optional()),
/* CORS ----------------------------------------------------------------------------- */
CORS_ALLOWED_ORIGINS: zpStr(

@ -1,6 +1,8 @@
/* eslint-disable no-await-in-loop */
import crypto from "node:crypto";
import net from "node:net";
import tls from "node:tls";
import quicDefault, * as quicModule from "@infisical/quic";
import { BadRequestError } from "../errors";
import { logger } from "../logger";
@ -8,34 +10,73 @@ import { logger } from "../logger";
const DEFAULT_MAX_RETRIES = 3;
const DEFAULT_RETRY_DELAY = 1000; // 1 second
const createTLSConnection = (relayHost: string, relayPort: number, tlsOptions: tls.TlsOptions = {}) => {
return new Promise<tls.TLSSocket>((resolve, reject) => {
// @ts-expect-error this is resolved in next connect
const socket = new tls.TLSSocket(null, {
rejectUnauthorized: true,
...tlsOptions
});
const quic = quicDefault || quicModule;
const cleanup = () => {
socket.removeAllListeners();
socket.end();
};
socket.once("error", (err) => {
cleanup();
reject(err);
});
socket.connect(relayPort, relayHost, () => {
resolve(socket);
});
const parseSubjectDetails = (data: string) => {
const values: Record<string, string> = {};
data.split("\n").forEach((el) => {
const [key, value] = el.split("=");
values[key.trim()] = value.trim();
});
return values;
};
type TTlsOption = { ca: string; cert: string; key: string };
const createQuicConnection = async (
relayHost: string,
relayPort: number,
tlsOptions: TTlsOption,
identityId: string,
orgId: string
) => {
const client = await quic.QUICClient.createQUICClient({
host: relayHost,
port: relayPort,
config: {
ca: tlsOptions.ca,
cert: tlsOptions.cert,
key: tlsOptions.key,
applicationProtos: ["infisical-gateway"],
verifyPeer: true,
verifyCallback: async (certs) => {
if (!certs || certs.length === 0) return quic.native.CryptoError.CertificateRequired;
const serverCertificate = new crypto.X509Certificate(Buffer.from(certs[0]));
const caCertificate = new crypto.X509Certificate(tlsOptions.ca);
const isValidServerCertificate = serverCertificate.checkIssued(caCertificate);
if (!isValidServerCertificate) return quic.native.CryptoError.BadCertificate;
const subjectDetails = parseSubjectDetails(serverCertificate.subject);
if (subjectDetails.OU !== "Gateway" || subjectDetails.CN !== identityId || subjectDetails.O !== orgId) {
return quic.native.CryptoError.CertificateUnknown;
}
if (new Date() > new Date(serverCertificate.validTo) || new Date() < new Date(serverCertificate.validFrom)) {
return quic.native.CryptoError.CertificateExpired;
}
const formatedRelayHost =
process.env.NODE_ENV === "development" ? relayHost.replace("host.docker.internal", "127.0.0.1") : relayHost;
if (!serverCertificate.checkIP(formatedRelayHost)) return quic.native.CryptoError.BadCertificate;
},
maxIdleTimeout: 90000,
keepAliveIntervalTime: 30000
},
crypto: {
ops: {
randomBytes: async (data) => {
crypto.getRandomValues(new Uint8Array(data));
}
}
}
});
return client;
};
type TPingGatewayAndVerifyDTO = {
relayHost: string;
relayPort: number;
tlsOptions: tls.TlsOptions;
tlsOptions: TTlsOption;
maxRetries?: number;
identityId: string;
orgId: string;
@ -44,56 +85,44 @@ type TPingGatewayAndVerifyDTO = {
export const pingGatewayAndVerify = async ({
relayHost,
relayPort,
tlsOptions = {},
tlsOptions,
maxRetries = DEFAULT_MAX_RETRIES,
identityId,
orgId
}: TPingGatewayAndVerifyDTO) => {
let lastError: Error | null = null;
const quicClient = await createQuicConnection(relayHost, relayPort, tlsOptions, identityId, orgId).catch((err) => {
throw new BadRequestError({
error: err as Error
});
});
for (let attempt = 1; attempt <= maxRetries; attempt += 1) {
try {
const socket = await createTLSConnection(relayHost, relayPort, tlsOptions);
socket.setTimeout(2000);
const stream = quicClient.connection.newStream("bidi");
const pingWriter = stream.writable.getWriter();
await pingWriter.write(Buffer.from("PING\n"));
pingWriter.releaseLock();
const pingResult = await new Promise((resolve, reject) => {
socket.once("timeout", () => {
socket.destroy();
reject(new Error("Timeout"));
// Read PONG response
const reader = stream.readable.getReader();
const { value, done } = await reader.read();
if (done) {
throw new BadRequestError({
message: "Gateway closed before receiving PONG"
});
socket.once("close", () => {
socket.destroy();
}
const response = Buffer.from(value).toString();
if (response !== "PONG\n" && response !== "PONG") {
throw new BadRequestError({
message: `Failed to Ping. Unexpected response: ${response}`
});
}
socket.once("end", () => {
socket.destroy();
});
socket.once("error", (err) => {
reject(err);
});
socket.write(Buffer.from("PING\n"), () => {
socket.once("data", (data) => {
const response = (data as string).toString();
const certificate = socket.getPeerCertificate();
if (certificate.subject.CN !== identityId || certificate.subject.O !== orgId) {
throw new BadRequestError({
message: `Invalid gateway. Certificate not found for ${identityId} in organization ${orgId}`
});
}
if (response === "PONG") {
resolve(true);
} else {
reject(new Error(`Unexpected response: ${response}`));
}
});
});
});
socket.end();
return pingResult;
reader.releaseLock();
return;
} catch (err) {
lastError = err as Error;
@ -102,6 +131,8 @@ export const pingGatewayAndVerify = async ({
setTimeout(resolve, DEFAULT_RETRY_DELAY);
});
}
} finally {
await quicClient.destroy();
}
}
@ -114,76 +145,125 @@ export const pingGatewayAndVerify = async ({
interface TProxyServer {
server: net.Server;
port: number;
cleanup: () => void;
cleanup: () => Promise<void>;
}
const setupProxyServer = ({
const setupProxyServer = async ({
targetPort,
targetHost,
tlsOptions = {},
tlsOptions,
relayHost,
relayPort
relayPort,
identityId,
orgId
}: {
targetHost: string;
targetPort: number;
relayPort: number;
relayHost: string;
tlsOptions: tls.TlsOptions;
tlsOptions: TTlsOption;
identityId: string;
orgId: string;
}): Promise<TProxyServer> => {
const quicClient = await createQuicConnection(relayHost, relayPort, tlsOptions, identityId, orgId).catch((err) => {
throw new BadRequestError({
error: err as Error
});
});
return new Promise((resolve, reject) => {
const server = net.createServer();
// eslint-disable-next-line @typescript-eslint/no-misused-promises
server.on("connection", async (clientSocket) => {
server.on("connection", async (clientConn) => {
try {
const targetSocket = await createTLSConnection(relayHost, relayPort, tlsOptions);
clientConn.setKeepAlive(true, 30000); // 30 seconds
clientConn.setNoDelay(true);
targetSocket.write(Buffer.from(`FORWARD-TCP ${targetHost}:${targetPort}\n`), () => {
clientSocket.on("data", (data) => {
const flushed = targetSocket.write(data);
if (!flushed) {
clientSocket.pause();
targetSocket.once("drain", () => {
clientSocket.resume();
});
}
});
const stream = quicClient.connection.newStream("bidi");
// Send FORWARD-TCP command
const forwardWriter = stream.writable.getWriter();
await forwardWriter.write(Buffer.from(`FORWARD-TCP ${targetHost}:${targetPort}\n`));
forwardWriter.releaseLock();
/* eslint-disable @typescript-eslint/no-misused-promises */
// Set up bidirectional copy
const setupCopy = async () => {
// Client to QUIC
// eslint-disable-next-line
(async () => {
try {
const writer = stream.writable.getWriter();
targetSocket.on("data", (data) => {
const flushed = clientSocket.write(data as string);
if (!flushed) {
targetSocket.pause();
clientSocket.once("drain", () => {
targetSocket.resume();
// Create a handler for client data
clientConn.on("data", async (chunk) => {
await writer.write(chunk);
});
// Handle client connection close
clientConn.on("end", async () => {
await writer.close();
});
clientConn.on("error", async (err) => {
await writer.abort(err);
});
} catch (err) {
clientConn.destroy();
}
});
})();
// QUIC to Client
void (async () => {
try {
const reader = stream.readable.getReader();
let reading = true;
while (reading) {
const { value, done } = await reader.read();
if (done) {
reading = false;
clientConn.end(); // Close client connection when QUIC stream ends
break;
}
// Write data to TCP client
const canContinue = clientConn.write(Buffer.from(value));
// Handle backpressure
if (!canContinue) {
await new Promise((res) => {
clientConn.once("drain", res);
});
}
}
} catch (err) {
clientConn.destroy();
}
})();
};
await setupCopy();
//
// Handle connection closure
clientConn.on("close", async () => {
await stream.destroy();
});
const cleanup = () => {
clientSocket?.unpipe();
clientSocket?.end();
targetSocket?.unpipe();
targetSocket?.end();
const cleanup = async () => {
clientConn?.destroy();
await stream.destroy();
};
clientSocket.on("error", (err) => {
clientConn.on("error", (err) => {
logger.error(err, "Client socket error");
cleanup();
void cleanup();
reject(err);
});
targetSocket.on("error", (err) => {
logger.error(err, "Target socket error");
cleanup();
reject(err);
});
clientSocket.on("end", cleanup);
targetSocket.on("end", cleanup);
clientConn.on("end", cleanup);
} catch (err) {
logger.error(err, "Failed to establish target connection:");
clientSocket.end();
clientConn.end();
reject(err);
}
});
@ -192,6 +272,12 @@ const setupProxyServer = ({
reject(err);
});
server.on("close", async () => {
await quicClient?.destroy();
});
/* eslint-enable */
server.listen(0, () => {
const address = server.address();
if (!address || typeof address === "string") {
@ -204,8 +290,9 @@ const setupProxyServer = ({
resolve({
server,
port: address.port,
cleanup: () => {
cleanup: async () => {
server.close();
await quicClient?.destroy();
}
});
});
@ -217,8 +304,7 @@ interface ProxyOptions {
targetPort: number;
relayHost: string;
relayPort: number;
tlsOptions?: tls.TlsOptions;
maxRetries?: number;
tlsOptions: TTlsOption;
identityId: string;
orgId: string;
}
@ -227,30 +313,19 @@ export const withGatewayProxy = async (
callback: (port: number) => Promise<void>,
options: ProxyOptions
): Promise<void> => {
const {
relayHost,
relayPort,
const { relayHost, relayPort, targetHost, targetPort, tlsOptions, identityId, orgId } = options;
// Setup the proxy server
const { port, cleanup } = await setupProxyServer({
targetHost,
targetPort,
tlsOptions = {},
maxRetries = DEFAULT_MAX_RETRIES,
identityId,
orgId
} = options;
// First, try to ping the gateway
await pingGatewayAndVerify({
relayHost,
relayPort,
relayHost,
tlsOptions,
maxRetries,
identityId,
orgId
});
// Setup the proxy server
const { port, cleanup } = await setupProxyServer({ targetHost, targetPort, relayPort, relayHost, tlsOptions });
try {
// Execute the callback with the allocated port
await callback(port);
@ -259,6 +334,6 @@ export const withGatewayProxy = async (
throw new BadRequestError({ message: (err as Error)?.message });
} finally {
// Ensure cleanup happens regardless of success or failure
cleanup();
await cleanup();
}
};

@ -1,11 +1,12 @@
import opentelemetry, { diag, DiagConsoleLogger, DiagLogLevel } from "@opentelemetry/api";
import { getNodeAutoInstrumentations } from "@opentelemetry/auto-instrumentations-node";
import { OTLPMetricExporter } from "@opentelemetry/exporter-metrics-otlp-proto";
import { PrometheusExporter } from "@opentelemetry/exporter-prometheus";
import { registerInstrumentations } from "@opentelemetry/instrumentation";
import { HttpInstrumentation } from "@opentelemetry/instrumentation-http";
import { Resource } from "@opentelemetry/resources";
import { AggregationTemporality, MeterProvider, PeriodicExportingMetricReader } from "@opentelemetry/sdk-metrics";
import { ATTR_SERVICE_NAME, ATTR_SERVICE_VERSION } from "@opentelemetry/semantic-conventions";
import tracer from "dd-trace";
import dotenv from "dotenv";
import { initEnvConfig } from "../config/env";
@ -69,7 +70,7 @@ const initTelemetryInstrumentation = ({
opentelemetry.metrics.setGlobalMeterProvider(meterProvider);
registerInstrumentations({
instrumentations: [getNodeAutoInstrumentations()]
instrumentations: [new HttpInstrumentation()]
});
};
@ -86,6 +87,17 @@ const setupTelemetry = () => {
exportType: appCfg.OTEL_EXPORT_TYPE
});
}
if (appCfg.SHOULD_USE_DATADOG_TRACER) {
console.log("Initializing Datadog tracer");
tracer.init({
profiling: appCfg.DATADOG_PROFILING_ENABLED,
version: appCfg.INFISICAL_PLATFORM_VERSION,
env: appCfg.DATADOG_ENV,
service: appCfg.DATADOG_SERVICE,
hostname: appCfg.DATADOG_HOSTNAME
});
}
};
void setupTelemetry();

@ -83,6 +83,14 @@ const run = async () => {
process.exit(0);
});
process.on("uncaughtException", (error) => {
logger.error(error, "CRITICAL ERROR: Uncaught Exception");
});
process.on("unhandledRejection", (error) => {
logger.error(error, "CRITICAL ERROR: Unhandled Promise Rejection");
});
await server.listen({
port: envConfig.PORT,
host: envConfig.HOST,

@ -1096,7 +1096,9 @@ export const registerRoutes = async (
permissionService,
secretSharingDAL,
orgDAL,
kmsService
kmsService,
smtpService,
userDAL
});
const accessApprovalPolicyService = accessApprovalPolicyServiceFactory({

@ -111,7 +111,16 @@ export const secretRawSchema = z.object({
secretReminderRepeatDays: z.number().nullable().optional(),
skipMultilineEncoding: z.boolean().default(false).nullable().optional(),
createdAt: z.date(),
updatedAt: z.date()
updatedAt: z.date(),
actor: z
.object({
actorId: z.string().nullable().optional(),
actorType: z.string().nullable().optional(),
name: z.string().nullable().optional(),
membershipId: z.string().nullable().optional()
})
.optional()
.nullable()
});
export const ProjectPermissionSchema = z.object({

@ -1,3 +1,4 @@
import DOMPurify from "isomorphic-dompurify";
import { z } from "zod";
import { OrganizationsSchema, SuperAdminSchema, UsersSchema } from "@app/db/schemas";
@ -72,7 +73,21 @@ export const registerAdminRouter = async (server: FastifyZodProvider) => {
message: "At least one login method should be enabled."
}),
slackClientId: z.string().optional(),
slackClientSecret: z.string().optional()
slackClientSecret: z.string().optional(),
authConsentContent: z
.string()
.trim()
.refine((content) => DOMPurify.sanitize(content) === content, {
message: "Auth consent content contains unsafe HTML."
})
.optional(),
pageFrameContent: z
.string()
.trim()
.refine((content) => DOMPurify.sanitize(content) === content, {
message: "Page frame content contains unsafe HTML."
})
.optional()
}),
response: {
200: z.object({
@ -196,6 +211,27 @@ export const registerAdminRouter = async (server: FastifyZodProvider) => {
}
});
server.route({
method: "PATCH",
url: "/user-management/users/:userId/admin-access",
config: {
rateLimit: writeLimit
},
schema: {
params: z.object({
userId: z.string()
})
},
onRequest: (req, res, done) => {
verifyAuth([AuthMode.JWT])(req, res, () => {
verifySuperAdmin(req, res, done);
});
},
handler: async (req) => {
await server.services.superAdmin.grantServerAdminAccessToUser(req.params.userId);
}
});
server.route({
method: "GET",
url: "/encryption-strategies",

@ -37,6 +37,7 @@ import { registerProjectMembershipRouter } from "./project-membership-router";
import { registerProjectRouter } from "./project-router";
import { registerSecretFolderRouter } from "./secret-folder-router";
import { registerSecretImportRouter } from "./secret-import-router";
import { registerSecretRequestsRouter } from "./secret-requests-router";
import { registerSecretSharingRouter } from "./secret-sharing-router";
import { registerSecretTagRouter } from "./secret-tag-router";
import { registerSlackRouter } from "./slack-router";
@ -110,7 +111,15 @@ export const registerV1Routes = async (server: FastifyZodProvider) => {
await server.register(registerIntegrationAuthRouter, { prefix: "/integration-auth" });
await server.register(registerWebhookRouter, { prefix: "/webhooks" });
await server.register(registerIdentityRouter, { prefix: "/identities" });
await server.register(registerSecretSharingRouter, { prefix: "/secret-sharing" });
await server.register(
async (secretSharingRouter) => {
await secretSharingRouter.register(registerSecretSharingRouter, { prefix: "/shared" });
await secretSharingRouter.register(registerSecretRequestsRouter, { prefix: "/requests" });
},
{ prefix: "/secret-sharing" }
);
await server.register(registerUserEngagementRouter, { prefix: "/user-engagement" });
await server.register(registerDashboardRouter, { prefix: "/dashboard" });
await server.register(registerCmekRouter, { prefix: "/kms" });

@ -47,7 +47,8 @@ export const registerSecretFolderRouter = async (server: FastifyZodProvider) =>
.default("/")
.transform(prefixWithSlash)
.transform(removeTrailingSlash)
.describe(FOLDERS.CREATE.directory)
.describe(FOLDERS.CREATE.directory),
description: z.string().optional().nullable().describe(FOLDERS.CREATE.description)
}),
response: {
200: z.object({
@ -65,7 +66,8 @@ export const registerSecretFolderRouter = async (server: FastifyZodProvider) =>
actorOrgId: req.permission.orgId,
...req.body,
projectId: req.body.workspaceId,
path
path,
description: req.body.description
});
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
@ -76,7 +78,8 @@ export const registerSecretFolderRouter = async (server: FastifyZodProvider) =>
environment: req.body.environment,
folderId: folder.id,
folderName: folder.name,
folderPath: path
folderPath: path,
...(req.body.description ? { description: req.body.description } : {})
}
}
});
@ -125,7 +128,8 @@ export const registerSecretFolderRouter = async (server: FastifyZodProvider) =>
.default("/")
.transform(prefixWithSlash)
.transform(removeTrailingSlash)
.describe(FOLDERS.UPDATE.directory)
.describe(FOLDERS.UPDATE.directory),
description: z.string().optional().nullable().describe(FOLDERS.UPDATE.description)
}),
response: {
200: z.object({
@ -196,7 +200,8 @@ export const registerSecretFolderRouter = async (server: FastifyZodProvider) =>
.default("/")
.transform(prefixWithSlash)
.transform(removeTrailingSlash)
.describe(FOLDERS.UPDATE.path)
.describe(FOLDERS.UPDATE.path),
description: z.string().optional().nullable().describe(FOLDERS.UPDATE.description)
})
.array()
.min(1)

@ -0,0 +1,270 @@
import { z } from "zod";
import { SecretSharingSchema } from "@app/db/schemas";
import { EventType } from "@app/ee/services/audit-log/audit-log-types";
import { SecretSharingAccessType } from "@app/lib/types";
import { readLimit, writeLimit } from "@app/server/config/rateLimiter";
import { getTelemetryDistinctId } from "@app/server/lib/telemetry";
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
import { AuthMode } from "@app/services/auth/auth-type";
import { SecretSharingType } from "@app/services/secret-sharing/secret-sharing-types";
import { PostHogEventTypes } from "@app/services/telemetry/telemetry-types";
export const registerSecretRequestsRouter = async (server: FastifyZodProvider) => {
server.route({
method: "GET",
url: "/:id",
config: {
rateLimit: readLimit
},
schema: {
params: z.object({
id: z.string()
}),
response: {
200: z.object({
secretRequest: SecretSharingSchema.omit({
encryptedSecret: true,
tag: true,
iv: true,
encryptedValue: true
}).extend({
isSecretValueSet: z.boolean(),
requester: z.object({
organizationName: z.string(),
firstName: z.string().nullish(),
lastName: z.string().nullish(),
username: z.string()
})
})
})
}
},
handler: async (req) => {
const secretRequest = await req.server.services.secretSharing.getSecretRequestById({
id: req.params.id,
actorOrgId: req.permission?.orgId,
actor: req.permission?.type,
actorId: req.permission?.id,
actorAuthMethod: req.permission?.authMethod
});
return { secretRequest };
}
});
server.route({
method: "POST",
url: "/:id/set-value",
config: {
rateLimit: writeLimit
},
schema: {
params: z.object({
id: z.string()
}),
body: z.object({
secretValue: z.string()
}),
response: {
200: z.object({
secretRequest: SecretSharingSchema.omit({
encryptedSecret: true,
tag: true,
iv: true,
encryptedValue: true
})
})
}
},
handler: async (req) => {
const secretRequest = await req.server.services.secretSharing.setSecretRequestValue({
id: req.params.id,
actorOrgId: req.permission?.orgId,
actor: req.permission?.type,
actorId: req.permission?.id,
actorAuthMethod: req.permission?.authMethod,
secretValue: req.body.secretValue
});
return { secretRequest };
}
});
server.route({
method: "POST",
url: "/:id/reveal-value",
config: {
rateLimit: writeLimit
},
schema: {
params: z.object({
id: z.string()
}),
response: {
200: z.object({
secretRequest: SecretSharingSchema.omit({
encryptedSecret: true,
tag: true,
iv: true,
encryptedValue: true
}).extend({
secretValue: z.string()
})
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const secretRequest = await req.server.services.secretSharing.revealSecretRequestValue({
id: req.params.id,
actorOrgId: req.permission.orgId,
orgId: req.permission.orgId,
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod
});
return { secretRequest };
}
});
server.route({
method: "DELETE",
url: "/:id",
config: {
rateLimit: writeLimit
},
schema: {
params: z.object({
id: z.string()
}),
response: {
200: z.object({
secretRequest: SecretSharingSchema.omit({
encryptedSecret: true,
tag: true,
iv: true,
encryptedValue: true
})
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const secretRequest = await req.server.services.secretSharing.deleteSharedSecretById({
actorOrgId: req.permission.orgId,
actorAuthMethod: req.permission.authMethod,
actorId: req.permission.id,
sharedSecretId: req.params.id,
orgId: req.permission.orgId,
actor: req.permission.type,
type: SecretSharingType.Request
});
await server.services.telemetry.sendPostHogEvents({
event: PostHogEventTypes.SecretRequestDeleted,
distinctId: getTelemetryDistinctId(req),
properties: {
secretRequestId: req.params.id,
organizationId: req.permission.orgId,
...req.auditLogInfo
}
});
return { secretRequest };
}
});
server.route({
method: "GET",
url: "/",
config: {
rateLimit: readLimit
},
schema: {
querystring: z.object({
offset: z.coerce.number().min(0).max(100).default(0),
limit: z.coerce.number().min(1).max(100).default(25)
}),
response: {
200: z.object({
secrets: z.array(SecretSharingSchema),
totalCount: z.number()
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const { secrets, totalCount } = await req.server.services.secretSharing.getSharedSecrets({
actor: req.permission.type,
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
type: SecretSharingType.Request,
...req.query
});
return {
secrets,
totalCount
};
}
});
server.route({
method: "POST",
url: "/",
config: {
rateLimit: writeLimit
},
schema: {
body: z.object({
name: z.string().max(50).optional(),
expiresAt: z.string(),
accessType: z.nativeEnum(SecretSharingAccessType).default(SecretSharingAccessType.Organization)
}),
response: {
200: z.object({
id: z.string()
})
}
},
onRequest: verifyAuth([AuthMode.JWT]),
handler: async (req) => {
const shareRequest = await req.server.services.secretSharing.createSecretRequest({
actor: req.permission.type,
actorId: req.permission.id,
orgId: req.permission.orgId,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
...req.body
});
await server.services.auditLog.createAuditLog({
orgId: req.permission.orgId,
...req.auditLogInfo,
event: {
type: EventType.CREATE_SECRET_REQUEST,
metadata: {
accessType: req.body.accessType,
name: req.body.name,
id: shareRequest.id
}
}
});
await server.services.telemetry.sendPostHogEvents({
event: PostHogEventTypes.SecretRequestCreated,
distinctId: getTelemetryDistinctId(req),
properties: {
secretRequestId: shareRequest.id,
organizationId: req.permission.orgId,
secretRequestName: req.body.name,
...req.auditLogInfo
}
});
return { id: shareRequest.id };
}
});
};

@ -11,6 +11,7 @@ import {
} from "@app/server/config/rateLimiter";
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
import { AuthMode } from "@app/services/auth/auth-type";
import { SecretSharingType } from "@app/services/secret-sharing/secret-sharing-types";
export const registerSecretSharingRouter = async (server: FastifyZodProvider) => {
server.route({
@ -38,6 +39,7 @@ export const registerSecretSharingRouter = async (server: FastifyZodProvider) =>
actorId: req.permission.id,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
type: SecretSharingType.Share,
...req.query
});
@ -211,7 +213,8 @@ export const registerSecretSharingRouter = async (server: FastifyZodProvider) =>
orgId: req.permission.orgId,
actorAuthMethod: req.permission.authMethod,
actorOrgId: req.permission.orgId,
sharedSecretId
sharedSecretId,
type: SecretSharingType.Share
});
await server.services.auditLog.createAuditLog({

@ -772,6 +772,10 @@ export const importDataIntoInfisicalFn = async ({
secretVersionDAL,
secretTagDAL,
secretVersionTagDAL,
actor: {
type: actor,
actorId
},
tx
});
}

@ -134,7 +134,15 @@ const getAppsHeroku = async ({ accessToken }: { accessToken: string }) => {
* Return list of names of apps for Vercel integration
* This is re-used for getting custom environments for Vercel
*/
export const getAppsVercel = async ({ accessToken, teamId }: { teamId?: string | null; accessToken: string }) => {
export const getAppsVercel = async ({
accessToken,
teamId,
includeCustomEnvironments
}: {
teamId?: string | null;
accessToken: string;
includeCustomEnvironments?: boolean;
}) => {
const apps: Array<{ name: string; appId: string; customEnvironments: Array<{ slug: string; id: string }> }> = [];
const limit = "20";
@ -145,12 +153,6 @@ export const getAppsVercel = async ({ accessToken, teamId }: { teamId?: string |
projects: {
name: string;
id: string;
customEnvironments?: {
id: string;
type: string;
description: string;
slug: string;
}[];
}[];
pagination: {
count: number;
@ -159,6 +161,20 @@ export const getAppsVercel = async ({ accessToken, teamId }: { teamId?: string |
};
}
const getProjectCustomEnvironments = async (projectId: string) => {
const { data } = await request.get<{ environments: { id: string; slug: string }[] }>(
`${IntegrationUrls.VERCEL_API_URL}/v9/projects/${projectId}/custom-environments`,
{
headers: {
Authorization: `Bearer ${accessToken}`,
"Accept-Encoding": "application/json"
}
}
);
return data.environments;
};
while (hasMorePages) {
const params: { [key: string]: string } = {
limit
@ -180,17 +196,38 @@ export const getAppsVercel = async ({ accessToken, teamId }: { teamId?: string |
}
});
data.projects.forEach((a) => {
apps.push({
name: a.name,
appId: a.id,
customEnvironments:
a.customEnvironments?.map((env) => ({
slug: env.slug,
id: env.id
})) ?? []
if (includeCustomEnvironments) {
const projectsWithCustomEnvironments = await Promise.all(
data.projects.map(async (a) => {
const customEnvironments = await getProjectCustomEnvironments(a.id);
return {
...a,
customEnvironments
};
})
);
projectsWithCustomEnvironments.forEach((a) => {
apps.push({
name: a.name,
appId: a.id,
customEnvironments:
a.customEnvironments?.map((env) => ({
slug: env.slug,
id: env.id
})) ?? []
});
});
});
} else {
data.projects.forEach((a) => {
apps.push({
name: a.name,
appId: a.id,
customEnvironments: []
});
});
}
next = data.pagination.next;

@ -1851,6 +1851,7 @@ export const integrationAuthServiceFactory = ({
const { accessToken } = await getIntegrationAccessToken(integrationAuth, shouldUseSecretV2Bridge, botKey);
const vercelApps = await getAppsVercel({
includeCustomEnvironments: true,
accessToken,
teamId
});

@ -20,7 +20,7 @@ type TDailyResourceCleanUpQueueServiceFactoryDep = {
secretDAL: Pick<TSecretDALFactory, "pruneSecretReminders">;
secretFolderVersionDAL: Pick<TSecretFolderVersionDALFactory, "pruneExcessVersions">;
snapshotDAL: Pick<TSnapshotDALFactory, "pruneExcessSnapshots">;
secretSharingDAL: Pick<TSecretSharingDALFactory, "pruneExpiredSharedSecrets">;
secretSharingDAL: Pick<TSecretSharingDALFactory, "pruneExpiredSharedSecrets" | "pruneExpiredSecretRequests">;
queueService: TQueueServiceFactory;
};
@ -45,6 +45,7 @@ export const dailyResourceCleanUpQueueServiceFactory = ({
await identityAccessTokenDAL.removeExpiredTokens();
await identityUniversalAuthClientSecretDAL.removeExpiredClientSecrets();
await secretSharingDAL.pruneExpiredSharedSecrets();
await secretSharingDAL.pruneExpiredSecretRequests();
await snapshotDAL.pruneExcessSnapshots();
await secretVersionDAL.pruneExcessVersions();
await secretVersionV2DAL.pruneExcessVersions();

@ -50,7 +50,8 @@ export const secretFolderServiceFactory = ({
actorOrgId,
name,
environment,
path: secretPath
path: secretPath,
description
}: TCreateFolderDTO) => {
const { permission } = await permissionService.getProjectPermission({
actor,
@ -121,7 +122,10 @@ export const secretFolderServiceFactory = ({
}
}
const doc = await folderDAL.create({ name, envId: env.id, version: 1, parentId: parentFolderId }, tx);
const doc = await folderDAL.create(
{ name, envId: env.id, version: 1, parentId: parentFolderId, description },
tx
);
await folderVersionDAL.create(
{
name: doc.name,
@ -170,7 +174,7 @@ export const secretFolderServiceFactory = ({
const result = await folderDAL.transaction(async (tx) =>
Promise.all(
folders.map(async (newFolder) => {
const { environment, path: secretPath, id, name } = newFolder;
const { environment, path: secretPath, id, name, description } = newFolder;
const parentFolder = await folderDAL.findBySecretPath(project.id, environment, secretPath);
if (!parentFolder) {
@ -217,7 +221,7 @@ export const secretFolderServiceFactory = ({
const [doc] = await folderDAL.update(
{ envId: env.id, id: folder.id, parentId: parentFolder.id },
{ name },
{ name, description },
tx
);
await folderVersionDAL.create(
@ -259,7 +263,8 @@ export const secretFolderServiceFactory = ({
name,
environment,
path: secretPath,
id
id,
description
}: TUpdateFolderDTO) => {
const { permission } = await permissionService.getProjectPermission({
actor,
@ -312,7 +317,7 @@ export const secretFolderServiceFactory = ({
const newFolder = await folderDAL.transaction(async (tx) => {
const [doc] = await folderDAL.update(
{ envId: env.id, id: folder.id, parentId: parentFolder.id, isReserved: false },
{ name },
{ name, description },
tx
);
await folderVersionDAL.create(

@ -9,6 +9,7 @@ export type TCreateFolderDTO = {
environment: string;
path: string;
name: string;
description?: string | null;
} & TProjectPermission;
export type TUpdateFolderDTO = {
@ -16,6 +17,7 @@ export type TUpdateFolderDTO = {
path: string;
id: string;
name: string;
description?: string | null;
} & TProjectPermission;
export type TUpdateManyFoldersDTO = {
@ -25,6 +27,7 @@ export type TUpdateManyFoldersDTO = {
path: string;
id: string;
name: string;
description?: string | null;
}[];
} & Omit<TProjectPermission, "projectId">;

@ -2,17 +2,61 @@ import { Knex } from "knex";
import { TDbClient } from "@app/db";
import { TableName, TSecretSharing } from "@app/db/schemas";
import { DatabaseError } from "@app/lib/errors";
import { DatabaseError, NotFoundError } from "@app/lib/errors";
import { ormify, selectAllTableCols } from "@app/lib/knex";
import { logger } from "@app/lib/logger";
import { QueueName } from "@app/queue";
import { SecretSharingType } from "./secret-sharing-types";
export type TSecretSharingDALFactory = ReturnType<typeof secretSharingDALFactory>;
export const secretSharingDALFactory = (db: TDbClient) => {
const sharedSecretOrm = ormify(db, TableName.SecretSharing);
const countAllUserOrgSharedSecrets = async ({ orgId, userId }: { orgId: string; userId: string }) => {
const getSecretRequestById = async (id: string) => {
const repDb = db.replicaNode();
const secretRequest = await repDb(TableName.SecretSharing)
.leftJoin(TableName.Organization, `${TableName.Organization}.id`, `${TableName.SecretSharing}.orgId`)
.leftJoin(TableName.Users, `${TableName.Users}.id`, `${TableName.SecretSharing}.userId`)
.where(`${TableName.SecretSharing}.id`, id)
.where(`${TableName.SecretSharing}.type`, SecretSharingType.Request)
.select(
repDb.ref("name").withSchema(TableName.Organization).as("orgName"),
repDb.ref("firstName").withSchema(TableName.Users).as("requesterFirstName"),
repDb.ref("lastName").withSchema(TableName.Users).as("requesterLastName"),
repDb.ref("username").withSchema(TableName.Users).as("requesterUsername")
)
.select(selectAllTableCols(TableName.SecretSharing))
.first();
if (!secretRequest) {
throw new NotFoundError({
message: `Secret request with ID '${id}' not found`
});
}
return {
...secretRequest,
requester: {
organizationName: secretRequest.orgName,
firstName: secretRequest.requesterFirstName,
lastName: secretRequest.requesterLastName,
username: secretRequest.requesterUsername
}
};
};
const countAllUserOrgSharedSecrets = async ({
orgId,
userId,
type
}: {
orgId: string;
userId: string;
type: SecretSharingType;
}) => {
try {
interface CountResult {
count: string;
@ -22,6 +66,7 @@ export const secretSharingDALFactory = (db: TDbClient) => {
.replicaNode()(TableName.SecretSharing)
.where(`${TableName.SecretSharing}.orgId`, orgId)
.where(`${TableName.SecretSharing}.userId`, userId)
.where(`${TableName.SecretSharing}.type`, type)
.count("*")
.first();
@ -38,6 +83,7 @@ export const secretSharingDALFactory = (db: TDbClient) => {
const docs = await (tx || db)(TableName.SecretSharing)
.where("expiresAt", "<", today)
.andWhere("encryptedValue", "<>", "")
.andWhere("type", SecretSharingType.Share)
.update({
encryptedValue: "",
tag: "",
@ -50,6 +96,26 @@ export const secretSharingDALFactory = (db: TDbClient) => {
}
};
const pruneExpiredSecretRequests = async (tx?: Knex) => {
logger.info(`${QueueName.DailyResourceCleanUp}: pruning expired secret requests started`);
try {
const today = new Date();
const docs = await (tx || db)(TableName.SecretSharing)
.whereNotNull("expiresAt")
.andWhere("expiresAt", "<", today)
.andWhere("encryptedSecret", null)
.andWhere("type", SecretSharingType.Request)
.delete();
logger.info(`${QueueName.DailyResourceCleanUp}: pruning expired secret requests completed`);
return docs;
} catch (error) {
throw new DatabaseError({ error, name: "pruneExpiredSecretRequests" });
}
};
const findActiveSharedSecrets = async (filters: Partial<TSecretSharing>, tx?: Knex) => {
try {
const now = new Date();
@ -57,6 +123,7 @@ export const secretSharingDALFactory = (db: TDbClient) => {
.where(filters)
.andWhere("expiresAt", ">", now)
.andWhere("encryptedValue", "<>", "")
.andWhere("type", SecretSharingType.Share)
.select(selectAllTableCols(TableName.SecretSharing))
.orderBy("expiresAt", "asc");
} catch (error) {
@ -86,7 +153,9 @@ export const secretSharingDALFactory = (db: TDbClient) => {
...sharedSecretOrm,
countAllUserOrgSharedSecrets,
pruneExpiredSharedSecrets,
pruneExpiredSecretRequests,
softDeleteById,
findActiveSharedSecrets
findActiveSharedSecrets,
getSecretRequestById
};
};

@ -4,26 +4,36 @@ import bcrypt from "bcrypt";
import { TSecretSharing } from "@app/db/schemas";
import { TPermissionServiceFactory } from "@app/ee/services/permission/permission-service";
import { getConfig } from "@app/lib/config/env";
import { BadRequestError, ForbiddenRequestError, NotFoundError, UnauthorizedError } from "@app/lib/errors";
import { SecretSharingAccessType } from "@app/lib/types";
import { isUuidV4 } from "@app/lib/validator";
import { TKmsServiceFactory } from "../kms/kms-service";
import { TOrgDALFactory } from "../org/org-dal";
import { SmtpTemplates, TSmtpService } from "../smtp/smtp-service";
import { TUserDALFactory } from "../user/user-dal";
import { TSecretSharingDALFactory } from "./secret-sharing-dal";
import {
SecretSharingType,
TCreatePublicSharedSecretDTO,
TCreateSecretRequestDTO,
TCreateSharedSecretDTO,
TDeleteSharedSecretDTO,
TGetActiveSharedSecretByIdDTO,
TGetSharedSecretsDTO
TGetSecretRequestByIdDTO,
TGetSharedSecretsDTO,
TRevealSecretRequestValueDTO,
TSetSecretRequestValueDTO
} from "./secret-sharing-types";
type TSecretSharingServiceFactoryDep = {
permissionService: Pick<TPermissionServiceFactory, "getOrgPermission">;
secretSharingDAL: TSecretSharingDALFactory;
orgDAL: TOrgDALFactory;
userDAL: TUserDALFactory;
kmsService: TKmsServiceFactory;
smtpService: TSmtpService;
};
export type TSecretSharingServiceFactory = ReturnType<typeof secretSharingServiceFactory>;
@ -32,7 +42,9 @@ export const secretSharingServiceFactory = ({
permissionService,
secretSharingDAL,
orgDAL,
kmsService
kmsService,
smtpService,
userDAL
}: TSecretSharingServiceFactoryDep) => {
const $validateSharedSecretExpiry = (expiresAt: string) => {
if (new Date(expiresAt) < new Date()) {
@ -75,7 +87,6 @@ export const secretSharingServiceFactory = ({
}
const encryptWithRoot = kmsService.encryptWithRootKey();
const encryptedSecret = encryptWithRoot(Buffer.from(secretValue));
const id = crypto.randomBytes(32).toString("hex");
@ -88,6 +99,7 @@ export const secretSharingServiceFactory = ({
encryptedValue: null,
encryptedSecret,
name,
type: SecretSharingType.Share,
password: hashedPassword,
expiresAt: new Date(expiresAt),
expiresAfterViews,
@ -101,6 +113,191 @@ export const secretSharingServiceFactory = ({
return { id: idToReturn };
};
const createSecretRequest = async ({
actor,
accessType,
expiresAt,
name,
actorId,
orgId,
actorAuthMethod,
actorOrgId
}: TCreateSecretRequestDTO) => {
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
if (!permission) throw new ForbiddenRequestError({ name: "User is not a part of the specified organization" });
$validateSharedSecretExpiry(expiresAt);
const newSecretRequest = await secretSharingDAL.create({
type: SecretSharingType.Request,
userId: actorId,
orgId,
name,
encryptedSecret: null,
accessType,
expiresAt: new Date(expiresAt)
});
return { id: newSecretRequest.id };
};
const revealSecretRequestValue = async ({
id,
actor,
actorId,
actorOrgId,
orgId,
actorAuthMethod
}: TRevealSecretRequestValueDTO) => {
const secretRequest = await secretSharingDAL.getSecretRequestById(id);
if (!secretRequest) {
throw new NotFoundError({ message: `Secret request with ID '${id}' not found` });
}
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
if (!permission) throw new ForbiddenRequestError({ name: "User is not a part of the specified organization" });
if (secretRequest.userId !== actorId || secretRequest.orgId !== orgId) {
throw new ForbiddenRequestError({ name: "User does not have permission to access this secret request" });
}
if (!secretRequest.encryptedSecret) {
throw new BadRequestError({ message: "Secret request has no value set" });
}
const decryptWithRoot = kmsService.decryptWithRootKey();
const decryptedSecret = decryptWithRoot(secretRequest.encryptedSecret);
return { ...secretRequest, secretValue: decryptedSecret.toString() };
};
const getSecretRequestById = async ({
id,
actor,
actorId,
actorAuthMethod,
actorOrgId
}: TGetSecretRequestByIdDTO) => {
const secretRequest = await secretSharingDAL.getSecretRequestById(id);
if (!secretRequest) {
throw new NotFoundError({ message: `Secret request with ID '${id}' not found` });
}
if (secretRequest.accessType === SecretSharingAccessType.Organization) {
if (!secretRequest.orgId) {
throw new BadRequestError({ message: "No organization ID present on secret request" });
}
if (!actorOrgId) {
throw new UnauthorizedError();
}
const { permission } = await permissionService.getOrgPermission(
actor,
actorId,
secretRequest.orgId,
actorAuthMethod,
actorOrgId
);
if (!permission) throw new ForbiddenRequestError({ name: "User is not a part of the specified organization" });
}
if (secretRequest.expiresAt && secretRequest.expiresAt < new Date()) {
throw new ForbiddenRequestError({
message: "Access denied: Secret request has expired"
});
}
return {
...secretRequest,
isSecretValueSet: Boolean(secretRequest.encryptedSecret)
};
};
const setSecretRequestValue = async ({
id,
actor,
actorId,
actorAuthMethod,
actorOrgId,
secretValue
}: TSetSecretRequestValueDTO) => {
const appCfg = getConfig();
const secretRequest = await secretSharingDAL.getSecretRequestById(id);
if (!secretRequest) {
throw new NotFoundError({ message: `Secret request with ID '${id}' not found` });
}
let respondentUsername: string | undefined;
if (secretRequest.accessType === SecretSharingAccessType.Organization) {
if (!secretRequest.orgId) {
throw new BadRequestError({ message: "No organization ID present on secret request" });
}
if (!actorOrgId) {
throw new UnauthorizedError();
}
const { permission } = await permissionService.getOrgPermission(
actor,
actorId,
secretRequest.orgId,
actorAuthMethod,
actorOrgId
);
if (!permission) throw new ForbiddenRequestError({ name: "User is not a part of the specified organization" });
const user = await userDAL.findById(actorId);
if (!user) {
throw new NotFoundError({ message: `User with ID '${actorId}' not found` });
}
respondentUsername = user.username;
}
if (secretRequest.encryptedSecret) {
throw new BadRequestError({ message: "Secret request already has a value set" });
}
if (secretValue.length > 10_000) {
throw new BadRequestError({ message: "Shared secret value too long" });
}
if (secretRequest.expiresAt && secretRequest.expiresAt < new Date()) {
throw new ForbiddenRequestError({
message: "Access denied: Secret request has expired"
});
}
const encryptWithRoot = kmsService.encryptWithRootKey();
const encryptedSecret = encryptWithRoot(Buffer.from(secretValue));
const request = await secretSharingDAL.transaction(async (tx) => {
const updatedRequest = await secretSharingDAL.updateById(id, { encryptedSecret }, tx);
await smtpService.sendMail({
recipients: [secretRequest.requesterUsername],
subjectLine: "Secret Request Completed",
substitutions: {
name: secretRequest.name,
respondentUsername,
secretRequestUrl: `${appCfg.SITE_URL}/organization/secret-sharing?selectedTab=request-secret`
},
template: SmtpTemplates.SecretRequestCompleted
});
return updatedRequest;
});
return request;
};
const createPublicSharedSecret = async ({
password,
secretValue,
@ -121,6 +318,7 @@ export const secretSharingServiceFactory = ({
encryptedValue: null,
iv: null,
tag: null,
type: SecretSharingType.Share,
encryptedSecret,
password: hashedPassword,
expiresAt: new Date(expiresAt),
@ -137,7 +335,8 @@ export const secretSharingServiceFactory = ({
actorAuthMethod,
actorOrgId,
offset,
limit
limit,
type
}: TGetSharedSecretsDTO) => {
if (!actorOrgId) throw new ForbiddenRequestError();
@ -153,14 +352,16 @@ export const secretSharingServiceFactory = ({
const secrets = await secretSharingDAL.find(
{
userId: actorId,
orgId: actorOrgId
orgId: actorOrgId,
type
},
{ offset, limit, sort: [["createdAt", "desc"]] }
);
const count = await secretSharingDAL.countAllUserOrgSharedSecrets({
orgId: actorOrgId,
userId: actorId
userId: actorId,
type
});
return {
@ -187,9 +388,11 @@ export const secretSharingServiceFactory = ({
const sharedSecret = isUuidV4(sharedSecretId)
? await secretSharingDAL.findOne({
id: sharedSecretId,
type: SecretSharingType.Share,
hashedHex
})
: await secretSharingDAL.findOne({
type: SecretSharingType.Share,
identifier: Buffer.from(sharedSecretId, "base64url").toString("hex")
});
@ -254,7 +457,7 @@ export const secretSharingServiceFactory = ({
secret: {
...sharedSecret,
...(decryptedSecretValue && {
secretValue: Buffer.from(decryptedSecretValue).toString()
secretValue: decryptedSecretValue.toString()
}),
orgName:
sharedSecret.accessType === SecretSharingAccessType.Organization && orgId === sharedSecret.orgId
@ -270,11 +473,17 @@ export const secretSharingServiceFactory = ({
if (!permission) throw new ForbiddenRequestError({ name: "User does not belong to the specified organization" });
const sharedSecret = isUuidV4(sharedSecretId)
? await secretSharingDAL.findById(sharedSecretId)
: await secretSharingDAL.findOne({ identifier: sharedSecretId });
? await secretSharingDAL.findOne({ id: sharedSecretId, type: deleteSharedSecretInput.type })
: await secretSharingDAL.findOne({ identifier: sharedSecretId, type: deleteSharedSecretInput.type });
if (sharedSecret.orgId && sharedSecret.orgId !== orgId)
if (sharedSecret.userId !== actorId) {
throw new ForbiddenRequestError({
message: "User does not have permission to delete shared secret"
});
}
if (sharedSecret.orgId && sharedSecret.orgId !== orgId) {
throw new ForbiddenRequestError({ message: "User does not have permission to delete shared secret" });
}
const deletedSharedSecret = await secretSharingDAL.deleteById(sharedSecretId);
@ -286,6 +495,11 @@ export const secretSharingServiceFactory = ({
createPublicSharedSecret,
getSharedSecrets,
deleteSharedSecretById,
getSharedSecretById
getSharedSecretById,
createSecretRequest,
getSecretRequestById,
setSecretRequestValue,
revealSecretRequestValue
};
};

@ -1,8 +1,14 @@
import { SecretSharingAccessType, TGenericPermission } from "@app/lib/types";
import { SecretSharingAccessType, TGenericPermission, TOrgPermission } from "@app/lib/types";
import { ActorAuthMethod, ActorType } from "../auth/auth-type";
export enum SecretSharingType {
Share = "share",
Request = "request"
}
export type TGetSharedSecretsDTO = {
type: SecretSharingType;
offset: number;
limit: number;
} & TGenericPermission;
@ -39,6 +45,26 @@ export type TValidateActiveSharedSecretDTO = TGetActiveSharedSecretByIdDTO & {
export type TCreateSharedSecretDTO = TSharedSecretPermission & TCreatePublicSharedSecretDTO;
export type TCreateSecretRequestDTO = {
name?: string;
accessType: SecretSharingAccessType;
expiresAt: string;
} & TOrgPermission;
export type TRevealSecretRequestValueDTO = {
id: string;
} & TOrgPermission;
export type TGetSecretRequestByIdDTO = {
id: string;
} & Omit<TOrgPermission, "orgId">;
export type TSetSecretRequestValueDTO = {
id: string;
secretValue: string;
} & Omit<TOrgPermission, "orgId">;
export type TDeleteSharedSecretDTO = {
sharedSecretId: string;
type: SecretSharingType;
} & TSharedSecretPermission;

@ -5,6 +5,7 @@ import { ForbiddenRequestError, NotFoundError } from "@app/lib/errors";
import { groupBy } from "@app/lib/fn";
import { logger } from "@app/lib/logger";
import { ActorType } from "../auth/auth-type";
import { TProjectEnvDALFactory } from "../project-env/project-env-dal";
import { ResourceMetadataDTO } from "../resource-metadata/resource-metadata-schema";
import { TSecretFolderDALFactory } from "../secret-folder/secret-folder-dal";
@ -62,6 +63,7 @@ export const fnSecretBulkInsert = async ({
resourceMetadataDAL,
secretTagDAL,
secretVersionTagDAL,
actor,
tx
}: TFnSecretBulkInsert) => {
const sanitizedInputSecrets = inputSecrets.map(
@ -90,6 +92,10 @@ export const fnSecretBulkInsert = async ({
})
);
const userActorId = actor && actor.type === ActorType.USER ? actor.actorId : undefined;
const identityActorId = actor && actor.type !== ActorType.USER ? actor.actorId : undefined;
const actorType = actor?.type || ActorType.PLATFORM;
const newSecrets = await secretDAL.insertMany(
sanitizedInputSecrets.map((el) => ({ ...el, folderId })),
tx
@ -106,6 +112,9 @@ export const fnSecretBulkInsert = async ({
sanitizedInputSecrets.map((el) => ({
...el,
folderId,
userActorId,
identityActorId,
actorType,
secretId: newSecretGroupedByKeyName[el.key][0].id
})),
tx
@ -157,8 +166,13 @@ export const fnSecretBulkUpdate = async ({
secretVersionDAL,
secretTagDAL,
secretVersionTagDAL,
resourceMetadataDAL
resourceMetadataDAL,
actor
}: TFnSecretBulkUpdate) => {
const userActorId = actor && actor?.type === ActorType.USER ? actor?.actorId : undefined;
const identityActorId = actor && actor?.type !== ActorType.USER ? actor?.actorId : undefined;
const actorType = actor?.type || ActorType.PLATFORM;
const sanitizedInputSecrets = inputSecrets.map(
({
filter,
@ -216,7 +230,10 @@ export const fnSecretBulkUpdate = async ({
encryptedValue,
reminderRepeatDays,
folderId,
secretId
secretId,
userActorId,
identityActorId,
actorType
})
),
tx
@ -616,6 +633,12 @@ export const reshapeBridgeSecret = (
secret: Omit<TSecretsV2, "encryptedValue" | "encryptedComment"> & {
value: string;
comment: string;
userActorName?: string | null;
identityActorName?: string | null;
userActorId?: string | null;
identityActorId?: string | null;
membershipId?: string | null;
actorType?: string | null;
tags?: {
id: string;
slug: string;
@ -636,6 +659,14 @@ export const reshapeBridgeSecret = (
_id: secret.id,
id: secret.id,
user: secret.userId,
actor: secret.actorType
? {
actorType: secret.actorType,
actorId: secret.userActorId || secret.identityActorId,
name: secret.identityActorName || secret.userActorName,
membershipId: secret.membershipId
}
: undefined,
tags: secret.tags,
skipMultilineEncoding: secret.skipMultilineEncoding,
secretReminderRepeatDays: secret.reminderRepeatDays,

@ -301,6 +301,10 @@ export const secretV2BridgeServiceFactory = ({
secretVersionDAL,
secretTagDAL,
secretVersionTagDAL,
actor: {
type: actor,
actorId
},
tx
})
);
@ -483,6 +487,10 @@ export const secretV2BridgeServiceFactory = ({
secretVersionDAL,
secretTagDAL,
secretVersionTagDAL,
actor: {
type: actor,
actorId
},
tx
})
);
@ -1230,6 +1238,10 @@ export const secretV2BridgeServiceFactory = ({
secretVersionDAL,
secretTagDAL,
secretVersionTagDAL,
actor: {
type: actor,
actorId
},
tx
})
);
@ -1490,6 +1502,10 @@ export const secretV2BridgeServiceFactory = ({
secretVersionDAL,
secretTagDAL,
secretVersionTagDAL,
actor: {
type: actor,
actorId
},
resourceMetadataDAL
});
updatedSecrets.push(...bulkUpdatedSecrets.map((el) => ({ ...el, secretPath: folder.path })));
@ -1522,6 +1538,10 @@ export const secretV2BridgeServiceFactory = ({
secretVersionDAL,
secretTagDAL,
secretVersionTagDAL,
actor: {
type: actor,
actorId
},
tx
});
updatedSecrets.push(...bulkInsertedSecrets.map((el) => ({ ...el, secretPath: folder.path })));
@ -1689,14 +1709,19 @@ export const secretV2BridgeServiceFactory = ({
type: KmsDataKey.SecretManager,
projectId: folder.projectId
});
const secretVersions = await secretVersionDAL.find({ secretId }, { offset, limit, sort: [["createdAt", "desc"]] });
return secretVersions.map((el) =>
reshapeBridgeSecret(folder.projectId, folder.environment.envSlug, "/", {
const secretVersions = await secretVersionDAL.findVersionsBySecretIdWithActors(secretId, folder.projectId, {
offset,
limit,
sort: [["createdAt", "desc"]]
});
return secretVersions.map((el) => {
return reshapeBridgeSecret(folder.projectId, folder.environment.envSlug, "/", {
...el,
value: el.encryptedValue ? secretManagerDecryptor({ cipherTextBlob: el.encryptedValue }).toString() : "",
comment: el.encryptedComment ? secretManagerDecryptor({ cipherTextBlob: el.encryptedComment }).toString() : ""
})
);
});
});
};
// this is a backfilling API for secret references
@ -1956,6 +1981,10 @@ export const secretV2BridgeServiceFactory = ({
secretTagDAL,
resourceMetadataDAL,
secretVersionTagDAL,
actor: {
type: actor,
actorId
},
inputSecrets: locallyCreatedSecrets.map((doc) => {
return {
type: doc.type,
@ -1982,6 +2011,10 @@ export const secretV2BridgeServiceFactory = ({
tx,
secretTagDAL,
secretVersionTagDAL,
actor: {
type: actor,
actorId
},
inputSecrets: locallyUpdatedSecrets.map((doc) => {
return {
filter: {

@ -168,6 +168,10 @@ export type TFnSecretBulkInsert = {
secretVersionDAL: Pick<TSecretVersionV2DALFactory, "insertMany">;
secretTagDAL: Pick<TSecretTagDALFactory, "saveTagsToSecretV2">;
secretVersionTagDAL: Pick<TSecretVersionV2TagDALFactory, "insertMany">;
actor?: {
type: string;
actorId: string;
};
};
type TRequireReferenceIfValue =
@ -192,6 +196,10 @@ export type TFnSecretBulkUpdate = {
secretVersionDAL: Pick<TSecretVersionV2DALFactory, "insertMany">;
secretTagDAL: Pick<TSecretTagDALFactory, "saveTagsToSecretV2" | "deleteTagsToSecretV2">;
secretVersionTagDAL: Pick<TSecretVersionV2TagDALFactory, "insertMany">;
actor?: {
type: string;
actorId: string;
};
tx?: Knex;
};

@ -1,9 +1,10 @@
/* eslint-disable @typescript-eslint/no-unsafe-assignment */
import { Knex } from "knex";
import { TDbClient } from "@app/db";
import { TableName, TSecretVersionsV2, TSecretVersionsV2Update } from "@app/db/schemas";
import { BadRequestError, DatabaseError } from "@app/lib/errors";
import { ormify, selectAllTableCols } from "@app/lib/knex";
import { ormify, selectAllTableCols, TFindOpt } from "@app/lib/knex";
import { logger } from "@app/lib/logger";
import { QueueName } from "@app/queue";
@ -119,11 +120,67 @@ export const secretVersionV2BridgeDALFactory = (db: TDbClient) => {
logger.info(`${QueueName.DailyResourceCleanUp}: pruning secret version v2 completed`);
};
const findVersionsBySecretIdWithActors = async (
secretId: string,
projectId: string,
{ offset, limit, sort = [["createdAt", "desc"]] }: TFindOpt<TSecretVersionsV2> = {},
tx?: Knex
) => {
try {
const query = (tx || db)(TableName.SecretVersionV2)
.leftJoin(TableName.Users, `${TableName.Users}.id`, `${TableName.SecretVersionV2}.userActorId`)
.leftJoin(
TableName.ProjectMembership,
`${TableName.ProjectMembership}.userId`,
`${TableName.SecretVersionV2}.userActorId`
)
.leftJoin(TableName.Identity, `${TableName.Identity}.id`, `${TableName.SecretVersionV2}.identityActorId`)
.where((qb) => {
void qb.where(`${TableName.SecretVersionV2}.secretId`, secretId);
void qb.where(`${TableName.ProjectMembership}.projectId`, projectId);
})
.orWhere((qb) => {
void qb.where(`${TableName.SecretVersionV2}.secretId`, secretId);
void qb.whereNull(`${TableName.ProjectMembership}.projectId`);
})
.select(
selectAllTableCols(TableName.SecretVersionV2),
`${TableName.Users}.username as userActorName`,
`${TableName.Identity}.name as identityActorName`,
`${TableName.ProjectMembership}.id as membershipId`
);
if (limit) void query.limit(limit);
if (offset) void query.offset(offset);
if (sort) {
void query.orderBy(
sort.map(([column, order, nulls]) => ({
column: `${TableName.SecretVersionV2}.${column as string}`,
order,
nulls
}))
);
}
const docs: Array<
TSecretVersionsV2 & {
userActorName: string | undefined | null;
identityActorName: string | undefined | null;
membershipId: string | undefined | null;
}
> = await query;
return docs;
} catch (error) {
throw new DatabaseError({ error, name: "FindVersionsBySecretIdWithActors" });
}
};
return {
...secretVersionV2Orm,
pruneExcessVersions,
findLatestVersionMany,
bulkUpdate,
findLatestVersionByFolderId
findLatestVersionByFolderId,
findVersionsBySecretIdWithActors
};
};

@ -579,6 +579,7 @@ export const fnSecretBulkInsert = async ({
[`${TableName.Secret}Id` as const]: newSecretGroupByBlindIndex[secretBlindIndex as string][0].id
}))
);
const secretVersions = await secretVersionDAL.insertMany(
sanitizedInputSecrets.map((el) => ({
...el,

@ -39,7 +39,8 @@ export enum SmtpTemplates {
SecretSyncFailed = "secretSyncFailed.handlebars",
ExternalImportSuccessful = "externalImportSuccessful.handlebars",
ExternalImportFailed = "externalImportFailed.handlebars",
ExternalImportStarted = "externalImportStarted.handlebars"
ExternalImportStarted = "externalImportStarted.handlebars",
SecretRequestCompleted = "secretRequestCompleted.handlebars"
}
export enum SmtpHost {

@ -0,0 +1,33 @@
<html>
<head>
<meta charset="utf-8" />
<meta http-equiv="x-ua-compatible" content="ie=edge" />
<title>Secret Request Completed</title>
</head>
<body>
<h2>Infisical</h2>
<h2>A secret has been shared with you</h2>
{{#if name}}
<p>Secret request name: {{name}}</p>
{{/if}}
{{#if respondentUsername}}
<p>Shared by: {{respondentUsername}}</p>
{{/if}}
<br />
<br/>
<p>
You can access the secret by clicking the link below.
</p>
<p>
<a href="{{secretRequestUrl}}">Access Secret</a>
</p>
{{emailFooter}}
</body>
</html>

@ -291,6 +291,15 @@ export const superAdminServiceFactory = ({
return user;
};
const grantServerAdminAccessToUser = async (userId: string) => {
if (!licenseService.onPremFeatures?.instanceUserManagement) {
throw new BadRequestError({
message: "Failed to grant server admin access to user due to plan restriction. Upgrade to Infisical's Pro plan."
});
}
await userDAL.updateById(userId, { superAdmin: true });
};
const getAdminSlackConfig = async () => {
const serverCfg = await serverCfgDAL.findById(ADMIN_CONFIG_DB_UUID);
@ -381,6 +390,7 @@ export const superAdminServiceFactory = ({
deleteUser,
getAdminSlackConfig,
updateRootEncryptionStrategy,
getConfiguredEncryptionStrategies
getConfiguredEncryptionStrategies,
grantServerAdminAccessToUser
};
};

@ -13,7 +13,9 @@ export enum PostHogEventTypes {
IntegrationCreated = "Integration Created",
MachineIdentityCreated = "Machine Identity Created",
UserOrgInvitation = "User Org Invitation",
TelemetryInstanceStats = "Self Hosted Instance Stats"
TelemetryInstanceStats = "Self Hosted Instance Stats",
SecretRequestCreated = "Secret Request Created",
SecretRequestDeleted = "Secret Request Deleted"
}
export type TSecretModifiedEvent = {
@ -120,6 +122,23 @@ export type TTelemetryInstanceStatsEvent = {
};
};
export type TSecretRequestCreatedEvent = {
event: PostHogEventTypes.SecretRequestCreated;
properties: {
secretRequestId: string;
organizationId: string;
secretRequestName?: string;
};
};
export type TSecretRequestDeletedEvent = {
event: PostHogEventTypes.SecretRequestDeleted;
properties: {
secretRequestId: string;
organizationId: string;
};
};
export type TPostHogEvent = { distinctId: string } & (
| TSecretModifiedEvent
| TAdminInitEvent
@ -130,4 +149,6 @@ export type TPostHogEvent = { distinctId: string } & (
| TIntegrationCreatedEvent
| TProjectCreateEvent
| TTelemetryInstanceStatsEvent
| TSecretRequestCreatedEvent
| TSecretRequestDeletedEvent
);

@ -0,0 +1,8 @@
public_ip: 127.0.0.1
auth_secret: example-auth-secret
realm: infisical.org
# set port 5349 for tls
# port: 5349
# tls_private_key_path: /full-path
# tls_ca_path: /full-path
# tls_cert_path: /full-path

@ -0,0 +1,8 @@
public_ip: 127.0.0.1
auth_secret: changeThisOnProduction
realm: infisical.org
# set port 5349 for tls
# port: 5349
# tls_private_key_path: /full-path
# tls_ca_path: /full-path
# tls_cert_path: /full-path

@ -1,6 +1,8 @@
module github.com/Infisical/infisical-merge
go 1.21
go 1.23.0
toolchain go1.23.5
require (
github.com/bradleyjkemp/cupaloy/v2 v2.8.0
@ -21,12 +23,14 @@ require (
github.com/pion/logging v0.2.3
github.com/pion/turn/v4 v4.0.0
github.com/posthog/posthog-go v0.0.0-20221221115252-24dfed35d71a
github.com/quic-go/quic-go v0.50.0
github.com/rs/cors v1.11.0
github.com/rs/zerolog v1.26.1
github.com/spf13/cobra v1.6.1
github.com/spf13/viper v1.8.1
github.com/stretchr/testify v1.9.0
golang.org/x/crypto v0.33.0
github.com/stretchr/testify v1.10.0
golang.org/x/crypto v0.35.0
golang.org/x/sys v0.30.0
golang.org/x/term v0.29.0
gopkg.in/yaml.v2 v2.4.0
)
@ -58,13 +62,15 @@ require (
github.com/dvsekhvalnov/jose2go v1.6.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/errors v0.20.2 // indirect
github.com/go-openapi/strfmt v0.21.3 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.5 // indirect
@ -82,6 +88,7 @@ require (
github.com/muesli/mango-pflag v0.1.0 // indirect
github.com/muesli/termenv v0.15.2 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/pelletier/go-toml v1.9.3 // indirect
github.com/pion/dtls/v3 v3.0.4 // indirect
github.com/pion/randutil v0.1.0 // indirect
@ -103,17 +110,20 @@ require (
go.opentelemetry.io/otel v1.24.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
golang.org/x/net v0.33.0 // indirect
go.uber.org/mock v0.5.0 // indirect
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 // indirect
golang.org/x/mod v0.23.0 // indirect
golang.org/x/net v0.35.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sync v0.11.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.22.0 // indirect
golang.org/x/time v0.6.0 // indirect
golang.org/x/tools v0.30.0 // indirect
google.golang.org/api v0.188.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b // indirect
google.golang.org/grpc v1.64.1 // indirect
google.golang.org/protobuf v1.34.2 // indirect
google.golang.org/protobuf v1.36.1 // indirect
gopkg.in/ini.v1 v1.62.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
@ -129,3 +139,5 @@ require (
)
replace github.com/zalando/go-keyring => github.com/Infisical/go-keyring v1.0.2
replace github.com/pion/turn/v4 => github.com/Infisical/turn/v4 v4.0.1

@ -49,6 +49,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Infisical/go-keyring v1.0.2 h1:dWOkI/pB/7RocfSJgGXbXxLDcVYsdslgjEPmVhb+nl8=
github.com/Infisical/go-keyring v1.0.2/go.mod h1:LWOnn/sw9FxDW/0VY+jHFAfOFEe03xmwBVSfJnBowto=
github.com/Infisical/turn/v4 v4.0.1 h1:omdelNsnFfzS5cu86W5OBR68by68a8sva4ogR0lQQnw=
github.com/Infisical/turn/v4 v4.0.1/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
@ -144,8 +146,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8=
@ -154,6 +156,8 @@ github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtK
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=
github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@ -222,6 +226,8 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7 h1:+J3r2e8+RsmN3vKfo75g0YSY61ms37qzPglu4p0sGro=
github.com/google/pprof v0.0.0-20250302191652-9094ed2288e7/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
@ -342,6 +348,10 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
@ -357,8 +367,6 @@ github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -369,6 +377,8 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/posthog/posthog-go v0.0.0-20221221115252-24dfed35d71a h1:Ey0XWvrg6u6hyIn1Kd/jCCmL+bMv9El81tvuGBbxZGg=
github.com/posthog/posthog-go v0.0.0-20221221115252-24dfed35d71a/go.mod h1:oa2sAs9tGai3VldabTV0eWejt/O4/OOD7azP8GaikqU=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/quic-go/quic-go v0.50.0 h1:3H/ld1pa3CYhkcc20TPIyG1bNsdhn9qZBGN3b9/UyUo=
github.com/quic-go/quic-go v0.50.0/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@ -415,8 +425,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
@ -461,6 +471,8 @@ go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8p
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -472,8 +484,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs=
golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -484,6 +496,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7 h1:aWwlzYV971S4BXRS9AmqwDLAD85ouC6X+pocatKY58c=
golang.org/x/exp v0.0.0-20250228200357-dead58393ab7/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -509,6 +523,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -547,8 +563,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -697,6 +713,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -811,8 +829,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

@ -109,12 +109,38 @@ var gatewayCmd = &cobra.Command{
},
}
var gatewayRelayCmd = &cobra.Command{
Example: `infisical gateway relay`,
Short: "Used to run infisical gateway relay",
Use: "relay",
DisableFlagsInUseLine: true,
Args: cobra.NoArgs,
Run: func(cmd *cobra.Command, args []string) {
relayConfigFilePath, err := cmd.Flags().GetString("config")
if err != nil {
util.HandleError(err, "Unable to parse flag")
}
if relayConfigFilePath == "" {
util.HandleError(fmt.Errorf("Missing config file"))
}
gatewayRelay, err := gateway.NewGatewayRelay(relayConfigFilePath)
if err != nil {
util.HandleError(err, "Failed to initialize gateway")
}
err = gatewayRelay.Run()
if err != nil {
util.HandleError(err, "Failed to start gateway")
}
},
}
func init() {
gatewayCmd.SetHelpFunc(func(command *cobra.Command, strings []string) {
command.Flags().MarkHidden("domain")
command.Parent().HelpFunc()(command, strings)
})
gatewayCmd.Flags().String("token", "", "Connect with Infisical using machine identity access token")
gatewayRelayCmd.Flags().String("config", "", "Relay config yaml file path")
gatewayCmd.AddCommand(gatewayRelayCmd)
rootCmd.AddCommand(gatewayCmd)
}

@ -3,20 +3,49 @@ package gateway
import (
"bufio"
"bytes"
"context"
"errors"
"io"
"net"
"strings"
"sync"
"github.com/quic-go/quic-go"
"github.com/rs/zerolog/log"
)
func handleConnection(conn net.Conn) {
defer conn.Close()
log.Info().Msgf("New connection from: %s", conn.RemoteAddr().String())
func handleConnection(ctx context.Context, quicConn quic.Connection) {
log.Info().Msgf("New connection from: %s", quicConn.RemoteAddr().String())
// Use WaitGroup to track all streams
var wg sync.WaitGroup
for {
// Accept the first stream, which we'll use for commands
stream, err := quicConn.AcceptStream(ctx)
if err != nil {
log.Printf("Failed to accept QUIC stream: %v", err)
break
}
wg.Add(1)
go func(stream quic.Stream) {
defer wg.Done()
defer stream.Close()
handleStream(stream, quicConn)
}(stream)
}
wg.Wait()
log.Printf("All streams closed for connection: %s", quicConn.RemoteAddr().String())
}
func handleStream(stream quic.Stream, quicConn quic.Connection) {
streamID := stream.StreamID()
log.Printf("New stream %d from: %s", streamID, quicConn.RemoteAddr().String())
// Use buffered reader for better handling of fragmented data
reader := bufio.NewReader(conn)
reader := bufio.NewReader(stream)
defer stream.Close()
for {
msg, err := reader.ReadBytes('\n')
if err != nil {
@ -39,6 +68,7 @@ func handleConnection(conn net.Conn) {
return
}
defer destTarget.Close()
log.Info().Msgf("Starting secure transmission between %s->%s", quicConn.LocalAddr().String(), destTarget.LocalAddr().String())
// Handle buffered data
buffered := reader.Buffered()
@ -56,10 +86,11 @@ func handleConnection(conn net.Conn) {
}
}
CopyData(conn, destTarget)
CopyDataFromQuicToTcp(stream, destTarget)
log.Info().Msgf("Ending secure transmission between %s->%s", quicConn.LocalAddr().String(), destTarget.LocalAddr().String())
return
case "PING":
if _, err := conn.Write([]byte("PONG")); err != nil {
if _, err := stream.Write([]byte("PONG\n")); err != nil {
log.Error().Msgf("Error writing PONG response: %v", err)
}
return
@ -74,34 +105,38 @@ type CloseWrite interface {
CloseWrite() error
}
func CopyData(src, dst net.Conn) {
func CopyDataFromQuicToTcp(quicStream quic.Stream, tcpConn net.Conn) {
// Create a WaitGroup to wait for both copy operations
var wg sync.WaitGroup
wg.Add(2)
copyAndClose := func(dst, src net.Conn, done chan<- bool) {
// Start copying from QUIC stream to TCP
go func() {
defer wg.Done()
_, err := io.Copy(dst, src)
if err != nil && !errors.Is(err, io.EOF) {
log.Error().Msgf("Copy error: %v", err)
if _, err := io.Copy(tcpConn, quicStream); err != nil {
log.Error().Msgf("Error copying quic->postgres: %v", err)
}
// Signal we're done writing
done <- true
// Half close the connection if possible
if c, ok := dst.(CloseWrite); ok {
c.CloseWrite()
if e, ok := tcpConn.(CloseWrite); ok {
log.Debug().Msg("Closing TCP write end")
e.CloseWrite()
} else {
log.Debug().Msg("TCP connection does not support CloseWrite")
}
}
}()
done1 := make(chan bool, 1)
done2 := make(chan bool, 1)
go copyAndClose(dst, src, done1)
go copyAndClose(src, dst, done2)
// Start copying from TCP to QUIC stream
go func() {
defer wg.Done()
if _, err := io.Copy(quicStream, tcpConn); err != nil {
log.Debug().Msgf("Error copying postgres->quic: %v", err)
}
// Close the write side of the QUIC stream
if err := quicStream.Close(); err != nil && !strings.Contains(err.Error(), "close called for canceled stream") {
log.Error().Msgf("Error closing QUIC stream write: %v", err)
}
}()
// Wait for both copies to complete
<-done1
<-done2
wg.Wait()
}

@ -6,15 +6,19 @@ import (
"crypto/x509"
"fmt"
"net"
"os"
"strings"
"sync"
"time"
"github.com/Infisical/infisical-merge/packages/api"
"github.com/Infisical/infisical-merge/packages/systemd"
"github.com/go-resty/resty/v2"
"github.com/pion/logging"
"github.com/pion/turn/v4"
"github.com/rs/zerolog/log"
"github.com/quic-go/quic-go"
)
type GatewayConfig struct {
@ -56,13 +60,12 @@ func (g *Gateway) ConnectWithRelay() error {
if relayPort == "5349" {
log.Info().Msgf("Provided relay port %s. Using TLS", relayPort)
conn, err = tls.Dial("tcp", relayDetails.TurnServerAddress, &tls.Config{
InsecureSkipVerify: false,
ServerName: relayAddress,
ServerName: relayAddress,
})
} else {
log.Info().Msgf("Provided relay port %s. Using non TLS connection.", relayPort)
peerAddr, err := net.ResolveTCPAddr("tcp", relayDetails.TurnServerAddress)
if err != nil {
peerAddr, errPeer := net.ResolveTCPAddr("tcp", relayDetails.TurnServerAddress)
if errPeer != nil {
return fmt.Errorf("Failed to parse turn server address: %w", err)
}
conn, err = net.DialTCP("tcp", nil, peerAddr)
@ -74,6 +77,10 @@ func (g *Gateway) ConnectWithRelay() error {
// Start a new TURN Client and wrap our net.Conn in a STUNConn
// This allows us to simulate datagram based communication over a net.Conn
logger := logging.NewDefaultLoggerFactory()
if os.Getenv("LOG_LEVEL") == "debug" {
logger.DefaultLogLevel = logging.LogLevelDebug
}
cfg := &turn.ClientConfig{
STUNServerAddr: relayDetails.TurnServerAddress,
TURNServerAddr: relayDetails.TurnServerAddress,
@ -81,7 +88,7 @@ func (g *Gateway) ConnectWithRelay() error {
Username: relayDetails.TurnServerUsername,
Password: relayDetails.TurnServerPassword,
Realm: relayDetails.TurnServerRealm,
LoggerFactory: logging.NewDefaultLoggerFactory(),
LoggerFactory: logger,
}
client, err := turn.NewClient(cfg)
@ -95,10 +102,6 @@ func (g *Gateway) ConnectWithRelay() error {
TurnServerAddress: relayDetails.TurnServerAddress,
InfisicalStaticIp: relayDetails.InfisicalStaticIp,
}
// if port not specific allow all port
if relayDetails.InfisicalStaticIp != "" && !strings.Contains(relayDetails.InfisicalStaticIp, ":") {
g.config.InfisicalStaticIp = g.config.InfisicalStaticIp + ":0"
}
g.client = client
return nil
@ -116,20 +119,20 @@ func (g *Gateway) Listen(ctx context.Context) error {
// Allocate a relay socket on the TURN server. On success, it
// will return a net.PacketConn which represents the remote
// socket.
relayNonTlsConn, err := g.client.AllocateTCP()
relayUdpConnection, err := g.client.Allocate()
if err != nil {
return fmt.Errorf("Failed to allocate relay connection: %w", err)
}
log.Info().Msg(relayNonTlsConn.Addr().String())
log.Info().Msg(relayUdpConnection.LocalAddr().String())
defer func() {
if closeErr := relayNonTlsConn.Close(); closeErr != nil {
if closeErr := relayUdpConnection.Close(); closeErr != nil {
log.Error().Msgf("Failed to close connection: %s", closeErr)
}
}()
gatewayCert, err := api.CallExchangeRelayCertV1(g.httpClient, api.ExchangeRelayCertRequestV1{
RelayAddress: relayNonTlsConn.Addr().String(),
RelayAddress: relayUdpConnection.LocalAddr().String(),
})
if err != nil {
return err
@ -140,49 +143,54 @@ func (g *Gateway) Listen(ctx context.Context) error {
g.config.Certificate = gatewayCert.Certificate
g.config.CertificateChain = gatewayCert.CertificateChain
errCh := make(chan error, 1)
shutdownCh := make(chan bool, 1)
if g.config.InfisicalStaticIp != "" {
log.Info().Msgf("Found static ip from Infisical: %s. Creating permission IP lifecycle", g.config.InfisicalStaticIp)
peerAddr, err := net.ResolveTCPAddr("tcp", g.config.InfisicalStaticIp)
if err != nil {
return fmt.Errorf("Failed to parse infisical static ip: %w", err)
}
g.registerPermissionLifecycle(func() error {
err := relayNonTlsConn.CreatePermissions(peerAddr)
return err
}, shutdownCh)
if err = g.createPermissionForStaticIps(g.config.InfisicalStaticIp); err != nil {
return err
}
g.registerHeartBeat(ctx, errCh)
cert, err := tls.X509KeyPair([]byte(gatewayCert.Certificate), []byte(gatewayCert.PrivateKey))
if err != nil {
return fmt.Errorf("failed to parse cert: %s", err)
return fmt.Errorf("failed to parse cert: %w", err)
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(gatewayCert.CertificateChain))
relayConn := tls.NewListener(relayNonTlsConn, &tls.Config{
// Setup QUIC server
tlsConfig := &tls.Config{
Certificates: []tls.Certificate{cert},
MinVersion: tls.VersionTLS12,
ClientCAs: caCertPool,
ClientAuth: tls.RequireAndVerifyClientCert,
})
NextProtos: []string{"infisical-gateway"},
}
// Setup QUIC listener on the relayConn
quicConfig := &quic.Config{
EnableDatagrams: true,
MaxIdleTimeout: 10 * time.Second,
KeepAlivePeriod: 2 * time.Second,
}
g.registerRelayIsActive(ctx, errCh)
quicListener, err := quic.Listen(relayUdpConnection, tlsConfig, quicConfig)
if err != nil {
return fmt.Errorf("Failed to listen for QUIC: %w", err)
}
defer quicListener.Close()
log.Printf("Listener started on %s", quicListener.Addr())
errCh := make(chan error, 1)
log.Info().Msg("Gateway started successfully")
g.registerHeartBeat(errCh, shutdownCh)
g.registerRelayIsActive(relayNonTlsConn.Addr().String(), errCh, shutdownCh)
// Create a WaitGroup to track active connections
var wg sync.WaitGroup
go func() {
for {
if relayDeadlineConn, ok := relayConn.(*net.TCPListener); ok {
relayDeadlineConn.SetDeadline(time.Now().Add(1 * time.Second))
}
select {
case <-ctx.Done():
return
@ -190,75 +198,53 @@ func (g *Gateway) Listen(ctx context.Context) error {
return
default:
// Accept new relay connection
conn, err := relayConn.Accept()
quicConn, err := quicListener.Accept(context.Background())
if err != nil {
// Check if it's a timeout error (which we expect due to our deadline)
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
continue
}
if !strings.Contains(err.Error(), "data contains incomplete STUN or TURN frame") {
log.Error().Msgf("Failed to accept connection: %v", err)
}
log.Printf("Failed to accept QUIC connection: %v", err)
continue
}
tlsConn, ok := conn.(*tls.Conn)
if !ok {
log.Error().Msg("Failed to convert to TLS connection")
conn.Close()
continue
}
// Set a deadline for the handshake to prevent hanging
tlsConn.SetDeadline(time.Now().Add(10 * time.Second))
err = tlsConn.Handshake()
// Clear the deadline after handshake
tlsConn.SetDeadline(time.Time{})
if err != nil {
log.Error().Msgf("TLS handshake failed: %v", err)
conn.Close()
continue
}
// Get connection state which contains certificate information
state := tlsConn.ConnectionState()
if len(state.PeerCertificates) > 0 {
organizationUnit := state.PeerCertificates[0].Subject.OrganizationalUnit
commonName := state.PeerCertificates[0].Subject.CommonName
tlsState := quicConn.ConnectionState().TLS
if len(tlsState.PeerCertificates) > 0 {
organizationUnit := tlsState.PeerCertificates[0].Subject.OrganizationalUnit
commonName := tlsState.PeerCertificates[0].Subject.CommonName
if organizationUnit[0] != "gateway-client" || commonName != "cloud" {
log.Error().Msgf("Client certificate verification failed. Received %s, %s", organizationUnit, commonName)
conn.Close()
errMsg := fmt.Sprintf("Client certificate verification failed. Received %s, %s", organizationUnit, commonName)
log.Error().Msg(errMsg)
quicConn.CloseWithError(1, errMsg)
continue
}
}
// Handle the connection in a goroutine
wg.Add(1)
go func(c net.Conn) {
go func(c quic.Connection) {
defer wg.Done()
defer c.Close()
defer c.CloseWithError(0, "connection closed")
// Monitor parent context to close this connection when needed
go func() {
select {
case <-ctx.Done():
c.Close() // Force close connection when context is canceled
c.CloseWithError(0, "connection closed") // Force close connection when context is canceled
case <-shutdownCh:
c.Close() // Force close connection when accepting loop is done
c.CloseWithError(0, "connection closed") // Force close connection when accepting loop is done
}
}()
handleConnection(c)
}(conn)
handleConnection(ctx, c)
}(quicConn)
}
}
}()
// make this compatiable with systemd notify mode
systemd.SdNotify(false, systemd.SdNotifyReady)
select {
case <-ctx.Done():
log.Info().Msg("Shutting down gateway...")
case err = <-errCh:
log.Error().Err(err).Msg("Gateway error occurred")
}
// Signal the accept loop to stop
@ -281,70 +267,107 @@ func (g *Gateway) Listen(ctx context.Context) error {
return err
}
func (g *Gateway) registerHeartBeat(errCh chan error, done chan bool) {
ticker := time.NewTicker(1 * time.Hour)
func (g *Gateway) registerHeartBeat(ctx context.Context, errCh chan error) {
ticker := time.NewTicker(30 * time.Minute)
defer ticker.Stop()
go func() {
time.Sleep(10 * time.Second)
log.Info().Msg("Registering first heart beat")
err := api.CallGatewayHeartBeatV1(g.httpClient)
if err != nil {
log.Error().Msgf("Failed to register heartbeat: %s", err)
}
for {
select {
case <-done:
ticker.Stop()
return
case <-ticker.C:
log.Info().Msg("Registering heart beat")
err := api.CallGatewayHeartBeatV1(g.httpClient)
if err := api.CallGatewayHeartBeatV1(g.httpClient); err != nil {
errCh <- err
} else {
log.Info().Msg("Gateway is reachable by Infisical")
}
}
}()
}
func (g *Gateway) registerPermissionLifecycle(permissionFn func() error, done chan bool) {
ticker := time.NewTicker(3 * time.Minute)
go func() {
// wait for 5 mins
permissionFn()
log.Printf("Created permission for incoming connections")
for {
select {
case <-done:
ticker.Stop()
case <-ctx.Done():
return
case <-ticker.C:
permissionFn()
}
}
}()
}
func (g *Gateway) registerRelayIsActive(serverAddr string, errCh chan error, done chan bool) {
ticker := time.NewTicker(10 * time.Second)
func (g *Gateway) createPermissionForStaticIps(staticIps string) error {
if staticIps == "" {
return fmt.Errorf("Missing Infisical static ips for permission")
}
splittedIps := strings.Split(staticIps, ",")
resolvedIps := make([]net.Addr, 0)
for _, ip := range splittedIps {
ip = strings.TrimSpace(ip)
if ip == "" {
continue
}
// if port not specific allow all port
if !strings.Contains(ip, ":") {
ip = ip + ":0"
}
peerAddr, err := net.ResolveUDPAddr("udp", ip)
if err != nil {
return fmt.Errorf("Failed to resolve static ip for permission: %w", err)
}
resolvedIps = append(resolvedIps, peerAddr)
}
if err := g.client.CreatePermission(resolvedIps...); err != nil {
return fmt.Errorf("Failed to set ip permission: %w", err)
}
return nil
}
func (g *Gateway) registerRelayIsActive(ctx context.Context, errCh chan error) error {
ticker := time.NewTicker(15 * time.Second)
maxFailures := 3
failures := 0
log.Info().Msg("Starting relay connection health check")
go func() {
time.Sleep(5 * time.Second)
for {
select {
case <-done:
ticker.Stop()
case <-ctx.Done():
log.Info().Msg("Stopping relay connection health check")
return
case <-ticker.C:
conn, err := net.Dial("tcp", serverAddr)
if err != nil {
errCh <- err
return
}
if conn != nil {
conn.Close()
}
func() {
log.Debug().Msg("Performing relay connection health check")
if g.client == nil {
failures++
log.Warn().Int("failures", failures).Msg("TURN client is nil")
if failures >= maxFailures {
errCh <- fmt.Errorf("relay connection check failed: TURN client is nil")
}
return
}
// we try to refresh permissions - this is a lightweight operation
// that will fail immediately if the UDP connection is broken. good for health check
log.Debug().Msg("Refreshing TURN permissions to verify connection")
if err := g.createPermissionForStaticIps(g.config.InfisicalStaticIp); err != nil {
failures++
log.Warn().Err(err).Int("failures", failures).Msg("Failed to refresh TURN permissions")
if failures >= maxFailures {
errCh <- fmt.Errorf("relay connection check failed: %w", err)
}
return
}
log.Debug().Msg("Successfully refreshed TURN permissions - connection is healthy")
if failures > 0 {
log.Info().Int("previous_failures", failures).Msg("Relay connection restored")
failures = 0
}
}()
}
}
}()
return nil
}

@ -0,0 +1,190 @@
//go:build !windows
// +build !windows
package gateway
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"os"
"os/signal"
"runtime"
"strconv"
"syscall"
udplistener "github.com/Infisical/infisical-merge/packages/gateway/udp_listener"
"github.com/Infisical/infisical-merge/packages/systemd"
"github.com/pion/logging"
"github.com/pion/turn/v4"
"github.com/rs/zerolog/log"
"gopkg.in/yaml.v2"
)
var (
errMissingTlsCert = errors.New("Missing TLS files")
)
type GatewayRelay struct {
Config *GatewayRelayConfig
}
type GatewayRelayConfig struct {
PublicIP string `yaml:"public_ip"`
Port int `yaml:"port"`
Realm string `yaml:"realm"`
AuthSecret string `yaml:"auth_secret"`
RelayMinPort uint16 `yaml:"relay_min_port"`
RelayMaxPort uint16 `yaml:"relay_max_port"`
TlsCertPath string `yaml:"tls_cert_path"`
TlsPrivateKeyPath string `yaml:"tls_private_key_path"`
TlsCaPath string `yaml:"tls_ca_path"`
tls tls.Certificate
tlsCa string
isTlsEnabled bool
}
func NewGatewayRelay(configFilePath string) (*GatewayRelay, error) {
cfgFile, err := os.ReadFile(configFilePath)
if err != nil {
return nil, err
}
var cfg GatewayRelayConfig
if err := yaml.Unmarshal(cfgFile, &cfg); err != nil {
return nil, err
}
if cfg.PublicIP == "" {
return nil, fmt.Errorf("Missing public ip")
}
if cfg.AuthSecret == "" {
return nil, fmt.Errorf("Missing auth secret")
}
if cfg.Realm == "" {
cfg.Realm = "infisical.org"
}
if cfg.RelayMinPort == 0 {
cfg.RelayMinPort = 49152
}
if cfg.RelayMaxPort == 0 {
cfg.RelayMaxPort = 65535
}
if cfg.Port == 0 {
cfg.Port = 3478
} else if cfg.Port == 5349 {
if cfg.TlsCertPath == "" || cfg.TlsPrivateKeyPath == "" {
return nil, errMissingTlsCert
}
cert, err := tls.LoadX509KeyPair(cfg.TlsCertPath, cfg.TlsPrivateKeyPath)
if err != nil {
return nil, fmt.Errorf("Failed to read load server tls key pair: %w", err)
}
if cfg.TlsCaPath != "" {
ca, err := os.ReadFile(cfg.TlsCaPath)
if err != nil {
return nil, fmt.Errorf("Failed to read tls ca: %w", err)
}
cfg.tlsCa = string(ca)
}
cfg.tls = cert
cfg.isTlsEnabled = true
}
return &GatewayRelay{
Config: &cfg,
}, nil
}
func (g *GatewayRelay) Run() error {
addr, err := net.ResolveTCPAddr("tcp", "0.0.0.0:"+strconv.Itoa(g.Config.Port))
if err != nil {
return fmt.Errorf("Failed to parse server address: %s", err)
}
// NewLongTermAuthHandler takes a pion.LeveledLogger. This allows you to intercept messages
// and process them yourself.
logger := logging.NewDefaultLeveledLoggerForScope("lt-creds", logging.LogLevelTrace, os.Stdout)
// Create `numThreads` UDP listeners to pass into pion/turn
// pion/turn itself doesn't allocate any UDP sockets, but lets the user pass them in
// this allows us to add logging, storage or modify inbound/outbound traffic
// UDP listeners share the same local address:port with setting SO_REUSEPORT and the kernel
// will load-balance received packets per the IP 5-tuple
listenerConfig := udplistener.SetupListenerConfig()
publicIP := g.Config.PublicIP
relayAddressGenerator := &turn.RelayAddressGeneratorPortRange{
RelayAddress: net.ParseIP(publicIP), // Claim that we are listening on IP passed by user
Address: "0.0.0.0", // But actually be listening on every interface
MinPort: g.Config.RelayMinPort,
MaxPort: g.Config.RelayMaxPort,
}
threadNum := runtime.NumCPU()
listenerConfigs := make([]turn.ListenerConfig, threadNum)
var connAddress string
for i := 0; i < threadNum; i++ {
conn, listErr := listenerConfig.Listen(context.Background(), addr.Network(), addr.String())
if listErr != nil {
return fmt.Errorf("Failed to allocate TCP listener at %s:%s %s", addr.Network(), addr.String(), listErr)
}
listenerConfigs[i] = turn.ListenerConfig{
RelayAddressGenerator: relayAddressGenerator,
}
if g.Config.isTlsEnabled {
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM([]byte(g.Config.tlsCa))
listenerConfigs[i].Listener = tls.NewListener(conn, &tls.Config{
Certificates: []tls.Certificate{g.Config.tls},
ClientCAs: caCertPool,
})
} else {
listenerConfigs[i].Listener = conn
}
connAddress = conn.Addr().String()
}
loggerF := logging.NewDefaultLoggerFactory()
loggerF.DefaultLogLevel = logging.LogLevelDebug
server, err := turn.NewServer(turn.ServerConfig{
Realm: g.Config.Realm,
AuthHandler: turn.LongTermTURNRESTAuthHandler(g.Config.AuthSecret, logger),
// PacketConnConfigs is a list of UDP Listeners and the configuration around them
ListenerConfigs: listenerConfigs,
LoggerFactory: loggerF,
})
if err != nil {
return fmt.Errorf("Failed to start server: %w", err)
}
log.Info().Msgf("Relay listening on %s\n", connAddress)
// make this compatiable with systemd notify mode
systemd.SdNotify(false, systemd.SdNotifyReady)
// Block until user sends SIGINT or SIGTERM
sigs := make(chan os.Signal, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
<-sigs
if err = server.Close(); err != nil {
return fmt.Errorf("Failed to close server: %w", err)
}
return nil
}

@ -0,0 +1,37 @@
//go:build windows
// +build windows
package gateway
import (
"errors"
)
var (
errMissingTlsCert = errors.New("Missing TLS files")
errWindowsNotSupported = errors.New("Relay is not supported on Windows")
)
type GatewayRelay struct {
Config *GatewayRelayConfig
}
type GatewayRelayConfig struct {
PublicIP string
Port int
Realm string
AuthSecret string
RelayMinPort uint16
RelayMaxPort uint16
TlsCertPath string
TlsPrivateKeyPath string
TlsCaPath string
}
func NewGatewayRelay(configFilePath string) (*GatewayRelay, error) {
return nil, errWindowsNotSupported
}
func (g *GatewayRelay) Run() error {
return errWindowsNotSupported
}

@ -0,0 +1,26 @@
//go:build !windows
// +build !windows
package udplistener
import (
"net"
"syscall"
"golang.org/x/sys/unix"
// other imports
)
func SetupListenerConfig() *net.ListenConfig {
return &net.ListenConfig{
Control: func(network, address string, conn syscall.RawConn) error {
var operr error
if err := conn.Control(func(fd uintptr) {
operr = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1)
}); err != nil {
return err
}
return operr
},
}
}

@ -0,0 +1,18 @@
//go:build windows
// +build windows
package udplistener
import (
"fmt"
"net"
"syscall"
)
func SetupListenerConfig() *net.ListenConfig {
return &net.ListenConfig{
Control: func(network, address string, conn syscall.RawConn) error {
return fmt.Errorf("Infisical relay not supported for windows.")
},
}
}

@ -0,0 +1,84 @@
// Copyright 2014 Docker, Inc.
// Copyright 2015-2018 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Package daemon provides a Go implementation of the sd_notify protocol.
// It can be used to inform systemd of service start-up completion, watchdog
// events, and other status changes.
//
// https://www.freedesktop.org/software/systemd/man/sd_notify.html#Description
package systemd
import (
"net"
"os"
)
const (
// SdNotifyReady tells the service manager that service startup is finished
// or the service finished loading its configuration.
SdNotifyReady = "READY=1"
// SdNotifyStopping tells the service manager that the service is beginning
// its shutdown.
SdNotifyStopping = "STOPPING=1"
// SdNotifyReloading tells the service manager that this service is
// reloading its configuration. Note that you must call SdNotifyReady when
// it completed reloading.
SdNotifyReloading = "RELOADING=1"
// SdNotifyWatchdog tells the service manager to update the watchdog
// timestamp for the service.
SdNotifyWatchdog = "WATCHDOG=1"
)
// SdNotify sends a message to the init daemon. It is common to ignore the error.
// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET`
// will be unconditionally unset.
//
// It returns one of the following:
// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset)
// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data)
// (true, nil) - notification supported, data has been sent
func SdNotify(unsetEnvironment bool, state string) (bool, error) {
socketAddr := &net.UnixAddr{
Name: os.Getenv("NOTIFY_SOCKET"),
Net: "unixgram",
}
// NOTIFY_SOCKET not set
if socketAddr.Name == "" {
return false, nil
}
if unsetEnvironment {
if err := os.Unsetenv("NOTIFY_SOCKET"); err != nil {
return false, err
}
}
conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
// Error connecting to NOTIFY_SOCKET
if err != nil {
return false, err
}
defer conn.Close()
if _, err = conn.Write([]byte(state)); err != nil {
return false, err
}
return true, nil
}

@ -12,18 +12,15 @@ Plus, our team is remote-first and spread across the globe (from San Francisco t
## Onboarding buddy
Every new joiner has an onboarding buddy who should ideally be in the the same timezone. The onboarding buddy should be able to help with any questions that pop up during the first few weeks. Of course, everyone is available to help, but it&apos;s good to have a dedicated person that you can go to with any questions.
Every new joiner at Infisical will have an onboarding buddy—a teammate in a similar time zone whos there to help you settle in. They are your go-to person for any questions that come up. Of course, everyone on the team is happy to help, but its always nice to have a dedicated person whos there for you. Dont hesitate to reach out to your buddy if youre unsure about something or need a hand! Your onboarding buddy will set up regular syncs for your first two months—ideally at least 2-3 times a week.
If youre joining the engineering team, your onboarding buddy will:
1. Walk you through Infisicals development process and share any best practices to keep in mind when tackling tickets.
2. Be your go-to person if you are blocked or need to think through your sprint task.
3. Help you ship something small on day one!
## Onboarding Checklist
1. Join the weekly all-hands meeting. It typically happens on Monday's at 8:30am PT.
2. Ship something together on day one even if tiny! It feels great to hit the ground running, with a development environment all ready to go.
3. Check out the [Areas of Responsibility (AoR) Table](https://docs.google.com/spreadsheets/d/1RnXlGFg83Sgu0dh7ycuydsSobmFfI3A0XkGw7vrVxEI/edit?usp=sharing). This is helpful to know who you can ask about particular areas of Infisical. Feel free to add yourself to the areas you'd be most interesting to dive into.
4. Read the [Infisical Strategy Doc](https://docs.google.com/document/d/1uV9IaahYwbZ5OuzDTFdQMSa1P0mpMOnetGB-xqf4G40).
5. Update your LinkedIn profile with one of [Infisical's official banners](https://drive.google.com/drive/u/0/folders/1oSNWjbpRl9oNYwxM_98IqzKs9fAskrb2) (if you want to). You can also coordinate your social posts in the #marketing Slack channel, so that we can boost it from Infisical's official social media accounts.
6. Over the first few weeks, feel free to schedule 1:1s with folks on the team to get to know them a bit better.
7. Change your Slack username in the users channel to `[NAME] (Infisical)`.
8. Go through the [technical overview](https://infisical.com/docs/internals/overview) of Infisical.
9. Request a company credit card (Maidul will be able to help with that).
Your hiring manager will send you an onboarding checklist doc for your first day.

@ -24,6 +24,9 @@ Make sure you keep copies for all receipts. If you expense something on a compan
You should default to using your company card in all cases - it has no transaction fees. If using your personal card is unavoidable, please reach out to Maidul to get it reimbursed manually.
## Training
For engineers, youre welcome to take an approved Udemy course. Please reach out to Maidul. For the GTM team, you may buy a book a month if its relevant to your work.
# Equipment
@ -55,4 +58,4 @@ For any equipment related questions, please reach out to Maidul.
## Brex
We use Brex as our primary credit card provider. Don't have a company card yet? Reach out to Maidul.
We use Brex as our primary credit card provider. Don't have a company card yet? Reach out to Maidul.

@ -11,13 +11,6 @@ To interact with the Infisical API, you will need to obtain an access token. Fol
**FAQ**
<AccordionGroup>
<Accordion title="What happened to the Service Token and API Key authentication modes?">
The Service Token and API Key authentication modes are being deprecated out in favor of [Identities](/documentation/platform/identity).
We expect to make a deprecation notice in the coming months alongside a larger deprecation initiative planned for Q1/Q2 2024.
With identities, we're improving significantly over the shortcomings of Service Tokens and API Keys. Amongst many differences, identities provide broader access over the Infisical API, utilizes the same role-based
permission system used by users, and comes with ample more configurable security measures.
</Accordion>
<Accordion title="Why can I not create, read, update, or delete an identity?">
There are a few reasons for why this might happen:

@ -7,21 +7,22 @@ The Server Admin Console provides **server administrators** with the ability to
customize settings and manage users for their entire Infisical instance.
<Note>
The first user to setup an account on your Infisical instance is designated as the server administrator by default.
The first user to setup an account on your Infisical instance is designated as
the server administrator by default.
</Note>
## Accessing the Server Admin Console
On the sidebar, tap on your initials to access the settings dropdown and press the **Server Admin Console** option.
![Access Server Admin Console](/images/platform/admin-panels/access-server-admin-panel.png)
## General Tab
Configure general settings for your instance.
![General Settings](/images/platform/admin-panels/admin-panel-general.png)
![General Settings 1](/images/platform/admin-panels/admin-panel-general-1.png)
### Allow User Signups
@ -39,6 +40,22 @@ If you're using SAML/LDAP/OIDC for only one organization on your instance, you c
By default, users signing up through SAML/LDAP/OIDC will still need to verify their email address to prevent email spoofing. This requirement can be skipped by enabling the switch to trust logins through the respective method.
### Notices
Auth consent content is displayed to users on the login page. They can be used to display important information to users, such as a maintenance message or a new feature announcement. Both HTML and Markdown formatting are supported, allowing for customized styling like below:
```
**You are entering a confidential website**
```
```html
<div style="font-weight: bold;">You are entering a confidential website</div>
```
![Auth Consent Usage](/images/platform/admin-panels/auth-consent-usage.png)
Page frame content is displayed as a header and footer in ALL protected pages. Like the auth consent content, both HTML and Markdown formatting are supported here as well.
![Page Frame Usage](/images/platform/admin-panels/page-frame-usage.png)
## Authentication Tab
@ -46,24 +63,23 @@ From this tab, you can configure which login methods are enabled for your instan
![Authentication Settings](/images/platform/admin-panels/admin-panel-auths.png)
## Rate Limit Tab
This tab allows you to set various rate limits for your Infisical instance. You do not need to redeploy when making changes to rate limits as these will be propagated automatically.
![Rate Limit Settings](/images/platform/admin-panels/admin-panel-rate-limits.png)
<Note>
Note that rate limit configuration is a paid feature. Please contact sales@infisical.com to purchase a license for its use.
Note that rate limit configuration is a paid feature. Please contact
sales@infisical.com to purchase a license for its use.
</Note>
## User Management Tab
From this tab, you can view all the users who have signed up for your instance. You can search for users using the search bar and remove them from your instance by pressing the **X** button on their respective row.
From this tab, you can view all the users who have signed up for your instance. You can search for users using the search bar and remove them from your instance by clicking on the three dots icon on the right. Additionally, the Server Admin can grant server administrator access to other users through this menu.
![User Management](/images/platform/admin-panels/admin-panel-users.png)
<Note>
Note that rate limit configuration is a paid feature. Please contact sales@infisical.com to purchase a license for its use.
Note that rate limit configuration is a paid feature. Please contact
sales@infisical.com to purchase a license for its use.
</Note>

Binary file not shown.

After

(image error) Size: 488 KiB

Binary file not shown.

Before

(image error) Size: 151 KiB

After

(image error) Size: 288 KiB

Binary file not shown.

After

(image error) Size: 393 KiB

Binary file not shown.

After

(image error) Size: 365 KiB

@ -3,18 +3,21 @@ sidebarTitle: "InfisicalDynamicSecret CRD"
title: "Using the InfisicalDynamicSecret CRD"
description: "Learn how to generate dynamic secret leases in Infisical and sync them to your Kubernetes cluster."
---
## Overview
The **InfisicalDynamicSecret** CRD allows you to easily create and manage dynamic secret leases in Infisical and automatically sync them to your Kubernetes cluster as native **Kubernetes Secret** resources.
This means any Pod, Deployment, or other Kubernetes resource can make use of dynamic secrets from Infisical just like any other K8s secret.
## Overview
The **InfisicalDynamicSecret** CRD allows you to easily create and manage dynamic secret leases in Infisical and automatically sync them to your Kubernetes cluster as native **Kubernetes Secret** resources.
This means any Pod, Deployment, or other Kubernetes resource can make use of dynamic secrets from Infisical just like any other K8s secret.
This CRD offers the following features:
- **Generate a dynamic secret lease** in Infisical and track its lifecycle.
- **Write** the dynamic secret from Infisical to your cluster as native Kubernetes secret.
- **Automatically rotate** the dynamic secret value before it expires to make sure your cluster always has valid credentials.
- **Optionally trigger redeployments** of any workloads that consume the secret if you enable auto-reload.
### Prerequisites
- A project within Infisical.
- A [machine identity](/docs/documentation/platform/identities/overview) ready for use in Infisical that has permissions to create dynamic secret leases in the project.
- You have already configured a dynamic secret in Infisical.
@ -77,16 +80,19 @@ spec:
```
Apply the InfisicalDynamicSecret CRD to your cluster.
```bash
kubectl apply -f dynamic-secret-crd.yaml
```
After applying the InfisicalDynamicSecret CRD, you should notice that the dynamic secret lease has been created in Infisical and synced to your Kubernetes cluster. You can verify that the lease has been created by doing:
```bash
kubectl get secret <managed-secret-name> -o yaml
```
After getting the secret, you should should see that the secret has data that contains the lease credentials.
```yaml
apiVersion: v1
data:
@ -102,7 +108,7 @@ kind: Secret
If you are fetching secrets from a self-hosted instance of Infisical set the value of `hostAPI` to
` https://your-self-hosted-instace.com/api`
When `hostAPI` is not defined the operator fetches secrets from Infisical Cloud.
When `hostAPI` is not defined the operator fetches secrets from Infisical Cloud.
<Accordion title="Advanced use case">
If you have installed your Infisical instance within the same cluster as the Infisical operator, you can optionally access the Infisical backend's service directly without having to route through the public internet.
@ -120,36 +126,45 @@ kind: Secret
<Accordion title="leaseTTL">
The `leaseTTL` is a string-formatted duration that defines the time the lease should last for the dynamic secret.
The format of the field is `[duration][unit]` where `duration` is a number and `unit` is a string representing the unit of time.
The format of the field is `[duration][unit]` where `duration` is a number and `unit` is a string representing the unit of time.
The following units are supported:
- `s` for seconds (must be at least 5 seconds)
- `m` for minutes
- `h` for hours
- `d` for days
The following units are supported:
<Note>
The lease duration at most be 1 day (24 hours). And the TTL must be less than the max TTL defined on the dynamic secret.
</Note>
</Accordion>
- `s` for seconds (must be at least 5 seconds)
- `m` for minutes
- `h` for hours
- `d` for days
<Note>
The lease duration at most be 1 day (24 hours). And the TTL must be less than the max TTL defined on the dynamic secret.
</Note>
</Accordion>
<Accordion title="managedSecretReference">
The `managedSecretReference` field is used to define the Kubernetes secret where the dynamic secret lease should be stored. The required fields are `secretName` and `secretNamespace`.
```yaml
spec:
managedSecretReference:
secretName: <secret-name>
secretNamespace: default
```
```yaml
spec:
managedSecretReference:
secretName: <secret-name>
secretNamespace: default
```
<Accordion title="managedSecretReference.secretName">
The name of the Kubernetes secret where the dynamic secret lease should be stored.
</Accordion>
{" "}
<Accordion title="managedSecretReference.secretNamespace">
The namespace of the Kubernetes secret where the dynamic secret lease should be stored.
</Accordion>
<Accordion title="managedSecretReference.secretName">
The name of the Kubernetes secret where the dynamic secret lease should be
stored.
</Accordion>
{" "}
<Accordion title="managedSecretReference.secretNamespace">
The namespace of the Kubernetes secret where the dynamic secret lease should
be stored.
</Accordion>
<Accordion title="managedSecretReference.creationPolicy">
Creation polices allow you to control whether or not owner references should be added to the managed Kubernetes secret that is generated by the Infisical operator.
@ -165,32 +180,36 @@ kind: Secret
</Tip>
This field is optional.
</Accordion>
<Accordion title="managedSecretReference.secretType">
Override the default Opaque type for managed secrets with this field. Useful for creating kubernetes.io/dockerconfigjson secrets.
This field is optional.
</Accordion>
</Accordion>
<Accordion title="leaseRevocationPolicy">
The field is optional and will default to `None` if not defined.
The field is optional and will default to `None` if not defined.
The lease revocation policy defines what the operator should do with the leases created by the operator, when the InfisicalDynamicSecret CRD is deleted.
The lease revocation policy defines what the operator should do with the leases created by the operator, when the InfisicalDynamicSecret CRD is deleted.
Valid values are `None` and `Revoke`.
Valid values are `None` and `Revoke`.
Behavior of each policy:
- `None`: The operator will not override existing secrets in Infisical. If a secret with the same key already exists, the operator will skip pushing that secret, and the secret will not be managed by the operator.
- `Revoke`: The operator will revoke the leases created by the operator when the InfisicalDynamicSecret CRD is deleted.
Behavior of each policy:
- `None`: The operator will not override existing secrets in Infisical. If a secret with the same key already exists, the operator will skip pushing that secret, and the secret will not be managed by the operator.
- `Revoke`: The operator will revoke the leases created by the operator when the InfisicalDynamicSecret CRD is deleted.
```yaml
spec:
leaseRevocationPolicy: Revoke
```
```yaml
spec:
leaseRevocationPolicy: Revoke
```
</Accordion>
<Accordion title="dynamicSecret">
@ -205,29 +224,37 @@ kind: Secret
secretsPath: <secrets-path>
```
<Accordion title="dynamicSecret.secretName">
The name of the dynamic secret.
</Accordion>
{" "}
<Accordion title="dynamicSecret.projectId">
The project ID of where the dynamic secret is stored in Infisical.
</Accordion>
<Accordion title="dynamicSecret.secretName">
The name of the dynamic secret.
</Accordion>
<Accordion title="dynamicSecret.environmentSlug">
The environment slug of where the dynamic secret is stored in Infisical.
</Accordion>
{" "}
<Accordion title="dynamicSecret.secretsPath">
The path of where the dynamic secret is stored in Infisical. The root path is `/`.
</Accordion>
<Accordion title="dynamicSecret.projectId">
The project ID of where the dynamic secret is stored in Infisical.
</Accordion>
{" "}
<Accordion title="dynamicSecret.environmentSlug">
The environment slug of where the dynamic secret is stored in Infisical.
</Accordion>
{" "}
<Accordion title="dynamicSecret.secretsPath">
The path of where the dynamic secret is stored in Infisical. The root path is
`/`.
</Accordion>
</Accordion>
<Accordion title="authentication">
The `authentication` field dictates which authentication method to use when pushing secrets to Infisical.
The available authentication methods are `universalAuth`, `kubernetesAuth`, `awsIamAuth`, `azureAuth`, `gcpIdTokenAuth`, and `gcpIamAuth`.
The `authentication` field dictates which authentication method to use when pushing secrets to Infisical.
The available authentication methods are `universalAuth`, `kubernetesAuth`, `awsIamAuth`, `azureAuth`, `gcpIdTokenAuth`, and `gcpIamAuth`.
<Accordion title="universalAuth">
The universal authentication method is one of the easiest ways to get started with Infisical. Universal Auth works anywhere and is not tied to any specific cloud provider.
@ -246,7 +273,7 @@ kind: Secret
spec:
universalAuth:
credentialsRef:
secretName: <secret-name>
secretName: <secret-name>
secretNamespace: <secret-namespace>
```
@ -282,6 +309,7 @@ kind: Secret
name: <secret-name>
namespace: <secret-namespace>
```
</Accordion>
<Accordion title="awsIamAuth">
@ -316,12 +344,12 @@ kind: Secret
azureAuth:
identityId: <machine-identity-id>
```
</Accordion>
<Accordion title="gcpIamAuth">
The GCP IAM machine identity authentication method is used to authenticate with Infisical. The identity ID is stored in a field in the InfisicalSecret resource. This authentication method can only be used both within and outside GCP environments.
[Read more about Azure Auth](/documentation/platform/identities/gcp-auth).
Valid fields:
- `identityId`: The identity ID of the machine identity you created.
- `serviceAccountKeyFilePath`: The path to the GCP service account key file.
@ -334,6 +362,7 @@ kind: Secret
identityId: <machine-identity-id>
serviceAccountKeyFilePath: </path-to-service-account-key-file.json>
```
</Accordion>
<Accordion title="gcpIdTokenAuth">
The GCP ID Token machine identity authentication method is used to authenticate with Infisical. The identity ID is stored in a field in the InfisicalSecret resource. This authentication method can only be used within GCP environments.
@ -349,11 +378,11 @@ kind: Secret
gcpIdTokenAuth:
identityId: <machine-identity-id>
```
</Accordion>
</Accordion>
<Accordion title="tls">
This block defines the TLS settings to use for connecting to the Infisical
instance.
@ -376,11 +405,11 @@ kind: Secret
secretNamespace: default
key: ca.crt
```
</Accordion>
</Accordion>
### Applying the InfisicalDynamicSecret CRD to your cluster
Once you have configured the `InfisicalDynamicSecret` CRD with the required fields, you can apply it to your cluster. After applying, you should notice that a lease has been created in Infisical and synced to your Kubernetes cluster.
@ -396,7 +425,7 @@ To address this, we've added functionality to automatically redeploy your deploy
#### Enabling auto redeploy
To enable auto redeployment you simply have to add the following annotation to the deployment that consumes a managed secret
To enable auto redeployment you simply have to add the following annotation to the deployment, statefulset, or daemonset that consumes a managed secret.
```yaml
secrets.infisical.com/auto-reload: "true"

@ -547,12 +547,14 @@ The `managedSecretReference` field is deprecated and will be removed in a future
Replace it with `managedKubeSecretReferences`, which now accepts an array of references to support multiple managed secrets in a single InfisicalSecret CRD.
Example:
```yaml
managedKubeSecretReferences:
- secretName: managed-secret
secretNamespace: default
creationPolicy: "Orphan"
managedKubeSecretReferences:
- secretName: managed-secret
secretNamespace: default
creationPolicy: "Orphan"
```
</Note>
<Accordion title="managedKubeSecretReferences">
@ -666,13 +668,13 @@ The example below assumes that the `BINARY_KEY_BASE64` secret is stored as a bas
The resulting managed secret will contain the decoded value of `BINARY_KEY_BASE64`.
```yaml
managedKubeSecretReferences:
secretName: managed-secret
secretNamespace: default
template:
includeAllSecrets: true
data:
BINARY_KEY: "{{ decodeBase64ToBytes .BINARY_KEY_BASE64.Value }}"
managedKubeSecretReferences:
secretName: managed-secret
secretNamespace: default
template:
includeAllSecrets: true
data:
BINARY_KEY: "{{ decodeBase64ToBytes .BINARY_KEY_BASE64.Value }}"
```
</Accordion>
@ -866,7 +868,7 @@ To address this, we added functionality to automatically redeploy your deploymen
#### Enabling auto redeploy
To enable auto redeployment you simply have to add the following annotation to the deployment that consumes a managed secret
To enable auto redeployment you simply have to add the following annotation to the deployment, statefulset, or daemonset that consumes a managed secret.
```yaml
secrets.infisical.com/auto-reload: "true"
@ -948,4 +950,4 @@ metadata:
type: Opaque
```
</Accordion>
</Accordion>

@ -75,7 +75,7 @@ via the UI or API for the third-party service you intend to sync secrets to.
2. <strong>Create Secret Sync:</strong> Configure a Secret Sync in the desired project by specifying the following parameters via the UI or API:
- <strong>Source:</strong> The project environment and folder path you wish to retrieve secrets from.
- <strong>Destination:</strong> The App Connection to utilize and the destination endpoint to deploy secrets to. These can vary between services.
- <strong>Options:</strong> Customize how secrets should be synced. Examples include adding a suffix or prefix to your secrets, or importing secrets from the destination on the initial sync.
- <strong>Options:</strong> Customize how secrets should be synced, such as whether or not secrets should be imported from the destination on the initial sync.
<Note>
Secret Syncs are the source of truth for connected third-party services. Any secret,

File diff suppressed because it is too large Load Diff

@ -58,6 +58,7 @@
"classnames": "^2.5.1",
"cva": "npm:class-variance-authority@^0.7.1",
"date-fns": "^4.1.0",
"dompurify": "^3.2.4",
"file-saver": "^2.0.5",
"framer-motion": "^11.14.1",
"i18next": "^24.1.0",
@ -79,9 +80,11 @@
"react-hook-form": "^7.54.0",
"react-i18next": "^15.2.0",
"react-icons": "^5.4.0",
"react-markdown": "^10.0.1",
"react-select": "^5.9.0",
"react-toastify": "^10.0.6",
"redaxios": "^0.5.1",
"rehype-raw": "^7.0.0",
"tailwind-merge": "^2.5.5",
"tweetnacl": "^1.0.3",
"tweetnacl-util": "^0.15.1",

@ -0,0 +1,20 @@
import ReactMarkdown from "react-markdown";
import DOMPurify from "dompurify";
import rehypeRaw from "rehype-raw";
import { useServerConfig } from "@app/context";
export const Banner = () => {
const { config } = useServerConfig();
// eslint-disable-next-line react/no-danger-with-children
return config.pageFrameContent ? (
<div className="h-[3vh] w-full text-center">
<ReactMarkdown rehypePlugins={[rehypeRaw]}>
{DOMPurify.sanitize(config.pageFrameContent)}
</ReactMarkdown>
</div>
) : (
<div />
);
};

@ -25,6 +25,7 @@ export const publicPaths = [
"/login/sso",
"/admin/signup",
"/shared/secret/[id]",
"/secret-request/secret/[id]",
"/share-secret"
];
@ -59,7 +60,8 @@ export const leaveConfirmDefaultMessage =
export enum SessionStorageKeys {
CLI_TERMINAL_TOKEN = "CLI_TERMINAL_TOKEN",
ORG_LOGIN_SUCCESS_REDIRECT_URL = "ORG_LOGIN_SUCCESS_REDIRECT_URL"
ORG_LOGIN_SUCCESS_REDIRECT_URL = "ORG_LOGIN_SUCCESS_REDIRECT_URL",
AUTH_CONSENT = "AUTH_CONSENT"
}
export const secretTagsColors = [

@ -21,6 +21,10 @@ export const ROUTE_PATHS = Object.freeze({
"/organization/secret-scanning",
"/_authenticate/_inject-org-details/_org-layout/organization/secret-scanning"
),
SecretSharing: setRoute(
"/organization/secret-sharing",
"/_authenticate/_inject-org-details/_org-layout/organization/secret-sharing"
),
SettingsPage: setRoute(
"/organization/settings",
"/_authenticate/_inject-org-details/_org-layout/organization/settings"
@ -285,6 +289,10 @@ export const ROUTE_PATHS = Object.freeze({
)
},
Public: {
ViewSharedSecretByIDPage: setRoute("/shared/secret/$secretId", "/shared/secret/$secretId")
ViewSharedSecretByIDPage: setRoute("/shared/secret/$secretId", "/shared/secret/$secretId"),
ViewSecretRequestByIDPage: setRoute(
"/secret-request/secret/$secretRequestId",
"/secret-request/secret/$secretRequestId"
)
}
});

@ -3,7 +3,8 @@ export {
useCreateAdminUser,
useUpdateAdminSlackConfig,
useUpdateServerConfig,
useUpdateServerEncryptionStrategy
useUpdateServerEncryptionStrategy,
useAdminGrantServerAdminAccess
} from "./mutation";
export {
useAdminGetUsers,

@ -70,6 +70,21 @@ export const useAdminDeleteUser = () => {
});
};
export const useAdminGrantServerAdminAccess = () => {
const queryClient = useQueryClient();
return useMutation({
mutationFn: async (userId: string) => {
await apiRequest.patch(`/api/v1/admin/user-management/users/${userId}/admin-access`);
return {};
},
onSuccess: () => {
queryClient.invalidateQueries({
queryKey: [adminStandaloneKeys.getUsers]
});
}
});
};
export const useUpdateAdminSlackConfig = () => {
const queryClient = useQueryClient();
return useMutation<AdminSlackConfig, object, TUpdateAdminSlackConfigDTO>({

@ -22,6 +22,8 @@ export type TServerConfig = {
defaultAuthOrgAuthMethod?: string | null;
defaultAuthOrgAuthEnforced?: boolean | null;
enabledLoginMethods: LoginMethod[];
authConsentContent?: string;
pageFrameContent?: string;
};
export type TCreateAdminUserDTO = {

@ -122,6 +122,7 @@ export const eventToNameMap: { [K in EventType]: string } = {
"OIDC group membership mapping assigned user to groups",
[EventType.OIDC_GROUP_MEMBERSHIP_MAPPING_REMOVE_USER]:
"OIDC group membership mapping removed user from groups",
[EventType.SECRET_APPROVAL_REQUEST_REVIEW]: "Review Secret Approval Request",
[EventType.CREATE_KMIP_CLIENT]: "Create KMIP client",
[EventType.UPDATE_KMIP_CLIENT]: "Update KMIP client",
[EventType.DELETE_KMIP_CLIENT]: "Delete KMIP client",

@ -150,5 +150,6 @@ export enum EventType {
KMIP_OPERATION_ACTIVATE = "kmip-operation-activate",
KMIP_OPERATION_REVOKE = "kmip-operation-revoke",
KMIP_OPERATION_LOCATE = "kmip-operation-locate",
KMIP_OPERATION_REGISTER = "kmip-operation-register"
KMIP_OPERATION_REGISTER = "kmip-operation-register",
SECRET_APPROVAL_REQUEST_REVIEW = "secret-approval-request-review"
}

@ -13,9 +13,10 @@ export const useUpdateSecretApprovalReviewStatus = () => {
const queryClient = useQueryClient();
return useMutation<object, object, TUpdateSecretApprovalReviewStatusDTO>({
mutationFn: async ({ id, status }) => {
mutationFn: async ({ id, status, comment }) => {
const { data } = await apiRequest.post(`/api/v1/secret-approval-requests/${id}/review`, {
status
status,
comment
});
return data;
},

@ -44,6 +44,7 @@ export type TSecretApprovalRequest = {
reviewers: {
userId: string;
status: ApprovalStatus;
comment: string;
email: string;
firstName: string;
lastName: string;
@ -114,6 +115,7 @@ export type TGetSecretApprovalRequestDetails = {
export type TUpdateSecretApprovalReviewStatusDTO = {
status: ApprovalStatus;
comment?: string;
id: string;
};

@ -148,12 +148,13 @@ export const useUpdateFolder = () => {
const queryClient = useQueryClient();
return useMutation<object, object, TUpdateFolderDTO>({
mutationFn: async ({ path = "/", folderId, name, environment, projectId }) => {
mutationFn: async ({ path = "/", folderId, name, environment, projectId, description }) => {
const { data } = await apiRequest.patch(`/api/v1/folders/${folderId}`, {
name,
environment,
workspaceId: projectId,
path
path,
description
});
return data;
},

@ -5,6 +5,7 @@ export enum ReservedFolders {
export type TSecretFolder = {
id: string;
name: string;
description?: string;
};
export type TGetProjectFoldersDTO = {
@ -24,6 +25,7 @@ export type TCreateFolderDTO = {
environment: string;
name: string;
path?: string;
description?: string | null;
};
export type TUpdateFolderDTO = {
@ -32,6 +34,7 @@ export type TUpdateFolderDTO = {
name: string;
folderId: string;
path?: string;
description?: string | null;
};
export type TDeleteFolderDTO = {
@ -49,5 +52,6 @@ export type TUpdateFolderBatchDTO = {
environment: string;
id: string;
path?: string;
description?: string | null;
}[];
};

@ -5,8 +5,13 @@ import { apiRequest } from "@app/config/request";
import { secretSharingKeys } from "./queries";
import {
TCreatedSharedSecret,
TCreateSecretRequestRequestDTO,
TCreateSharedSecretRequest,
TDeleteSharedSecretRequest,
TDeleteSecretRequestDTO,
TDeleteSharedSecretRequestDTO,
TRevealedSecretRequest,
TRevealSecretRequestValueRequest,
TSetSecretRequestValueRequest,
TSharedSecret
} from "./types";
@ -15,7 +20,7 @@ export const useCreateSharedSecret = () => {
return useMutation({
mutationFn: async (inputData: TCreateSharedSecretRequest) => {
const { data } = await apiRequest.post<TCreatedSharedSecret>(
"/api/v1/secret-sharing",
"/api/v1/secret-sharing/shared",
inputData
);
return data;
@ -30,7 +35,7 @@ export const useCreatePublicSharedSecret = () => {
return useMutation({
mutationFn: async (inputData: TCreateSharedSecretRequest) => {
const { data } = await apiRequest.post<TCreatedSharedSecret>(
"/api/v1/secret-sharing/public",
"/api/v1/secret-sharing/shared/public",
inputData
);
return data;
@ -40,12 +45,50 @@ export const useCreatePublicSharedSecret = () => {
});
};
export const useCreateSecretRequest = () => {
const queryClient = useQueryClient();
return useMutation({
mutationFn: async (inputData: TCreateSecretRequestRequestDTO) => {
const { data } = await apiRequest.post<TCreatedSharedSecret>(
"/api/v1/secret-sharing/requests",
inputData
);
return data;
},
onSuccess: () =>
queryClient.invalidateQueries({ queryKey: secretSharingKeys.allSecretRequests() })
});
};
export const useSetSecretRequestValue = () => {
return useMutation({
mutationFn: async (inputData: TSetSecretRequestValueRequest) => {
const { data } = await apiRequest.post<TSharedSecret>(
`/api/v1/secret-sharing/requests/${inputData.id}/set-value`,
inputData
);
return data;
}
});
};
export const useRevealSecretRequestValue = () => {
return useMutation({
mutationFn: async (inputData: TRevealSecretRequestValueRequest) => {
const { data } = await apiRequest.post<TRevealedSecretRequest>(
`/api/v1/secret-sharing/requests/${inputData.id}/reveal-value`,
inputData
);
return data.secretRequest;
}
});
};
export const useDeleteSharedSecret = () => {
const queryClient = useQueryClient();
return useMutation<TSharedSecret, { message: string }, { sharedSecretId: string }>({
mutationFn: async ({ sharedSecretId }: TDeleteSharedSecretRequest) => {
mutationFn: async ({ sharedSecretId }: TDeleteSharedSecretRequestDTO) => {
const { data } = await apiRequest.delete<TSharedSecret>(
`/api/v1/secret-sharing/${sharedSecretId}`
`/api/v1/secret-sharing/shared/${sharedSecretId}`
);
return data;
},
@ -53,3 +96,19 @@ export const useDeleteSharedSecret = () => {
queryClient.invalidateQueries({ queryKey: secretSharingKeys.allSharedSecrets() })
});
};
export const useDeleteSecretRequest = () => {
const queryClient = useQueryClient();
return useMutation<TSharedSecret, unknown, TDeleteSecretRequestDTO>({
mutationFn: async ({ secretRequestId }: TDeleteSecretRequestDTO) => {
const { data } = await apiRequest.delete<TSharedSecret>(
`/api/v1/secret-sharing/requests/${secretRequestId}`
);
return data;
},
onSuccess: () => {
queryClient.invalidateQueries({ queryKey: secretSharingKeys.allSecretRequests() });
}
});
};

@ -2,16 +2,20 @@ import { useQuery } from "@tanstack/react-query";
import { apiRequest } from "@app/config/request";
import { TSharedSecret, TViewSharedSecretResponse } from "./types";
import { TGetSecretRequestByIdResponse, TSharedSecret, TViewSharedSecretResponse } from "./types";
export const secretSharingKeys = {
allSharedSecrets: () => ["sharedSecrets"] as const,
specificSharedSecrets: ({ offset, limit }: { offset: number; limit: number }) =>
[...secretSharingKeys.allSharedSecrets(), { offset, limit }] as const,
allSecretRequests: () => ["secretRequests"] as const,
specificSecretRequests: ({ offset, limit }: { offset: number; limit: number }) =>
[...secretSharingKeys.allSecretRequests(), { offset, limit }] as const,
getSecretById: (arg: { id: string; hashedHex: string | null; password?: string }) => [
"shared-secret",
arg
]
],
getSecretRequestById: (arg: { id: string }) => ["secret-request", arg] as const
};
export const useGetSharedSecrets = ({
@ -30,7 +34,7 @@ export const useGetSharedSecrets = ({
});
const { data } = await apiRequest.get<{ secrets: TSharedSecret[]; totalCount: number }>(
"/api/v1/secret-sharing/",
"/api/v1/secret-sharing/shared",
{
params
}
@ -40,6 +44,29 @@ export const useGetSharedSecrets = ({
});
};
export const useGetSecretRequests = ({
offset = 0,
limit = 25
}: {
offset: number;
limit: number;
}) => {
return useQuery({
queryKey: secretSharingKeys.specificSecretRequests({ offset, limit }),
queryFn: async () => {
const { data } = await apiRequest.get<{ secrets: TSharedSecret[]; totalCount: number }>(
"/api/v1/secret-sharing/requests",
{
params: {
offset: String(offset),
limit: String(limit)
}
}
);
return data;
}
});
};
export const useGetActiveSharedSecretById = ({
sharedSecretId,
hashedHex,
@ -53,7 +80,7 @@ export const useGetActiveSharedSecretById = ({
queryKey: secretSharingKeys.getSecretById({ id: sharedSecretId, hashedHex, password }),
queryFn: async () => {
const { data } = await apiRequest.post<TViewSharedSecretResponse>(
`/api/v1/secret-sharing/public/${sharedSecretId}`,
`/api/v1/secret-sharing/shared/public/${sharedSecretId}`,
{
...(hashedHex && { hashedHex }),
password
@ -65,3 +92,16 @@ export const useGetActiveSharedSecretById = ({
enabled: Boolean(sharedSecretId)
});
};
export const useGetSecretRequestById = ({ secretRequestId }: { secretRequestId: string }) => {
return useQuery({
queryKey: secretSharingKeys.getSecretRequestById({ id: secretRequestId }),
queryFn: async () => {
const { data } = await apiRequest.get<TGetSecretRequestByIdResponse>(
`/api/v1/secret-sharing/requests/${secretRequestId}`
);
return data.secretRequest;
}
});
};

Some files were not shown because too many files have changed in this diff Show More