Compare commits
229 Commits
daniel/cli
...
misc/impro
Author | SHA1 | Date | |
---|---|---|---|
f957b9d970 | |||
c08fcc6f5e | |||
06c103c10a | |||
b6a73459a8 | |||
536f51f6ba | |||
a9b72b2da3 | |||
a3552d00d1 | |||
fdd67c89b3 | |||
79e9b1b2ae | |||
b4f1bec1a9 | |||
ab79342743 | |||
1957531ac4 | |||
61ae0e2fc7 | |||
87b571d6ff | |||
1e6af8ad8f | |||
a771ddf859 | |||
c4cd6909bb | |||
49642480d3 | |||
b667dccc0d | |||
fdda247120 | |||
ee8a88d062 | |||
33349839cd | |||
8f3883c7d4 | |||
38cfb7fd41 | |||
a331eb8dc4 | |||
2dcb409d3b | |||
39bcb73f3d | |||
52189111d7 | |||
f369761920 | |||
8eb22630b6 | |||
d650fd68c0 | |||
387c899193 | |||
37882e6344 | |||
68a1aa6f46 | |||
fa18ca41ac | |||
8485fdc1cd | |||
49ae2386c0 | |||
f2b1f3f0e7 | |||
69aa20e35c | |||
524c7ae78f | |||
e13f7a7486 | |||
1867fb2fc4 | |||
5dd144b97b | |||
b1b430e003 | |||
fb09980413 | |||
3b36cb8b3d | |||
be6a98d0bb | |||
f8e1ed09d2 | |||
5c71116be6 | |||
07cc4fd1ab | |||
74bdbc0724 | |||
a0d5c67456 | |||
db4f4d8f28 | |||
d6f6f51d16 | |||
79a0f3d701 | |||
46912c4c3c | |||
6636377cb5 | |||
26320ddce4 | |||
f5964040d7 | |||
dcaa7f1fce | |||
a4119ee1bb | |||
74f866715f | |||
667f696d26 | |||
5f3938c33d | |||
07845ad6af | |||
17fa72be13 | |||
bf3e93460a | |||
306709cde6 | |||
c41518c822 | |||
f0f2905789 | |||
212a7b49f0 | |||
22e3fcb43c | |||
93b65a1534 | |||
039882e78b | |||
f0f51089fe | |||
447141ab1f | |||
d2ba436338 | |||
ad0d281629 | |||
c8638479a8 | |||
8aa75484f3 | |||
66d70f5a25 | |||
8e7cf5f9ac | |||
f9f79cb69e | |||
4235be4be9 | |||
5c3f2e66fd | |||
a37b3ccede | |||
d64eb4b901 | |||
6e882aa46e | |||
bf4db0a9ff | |||
3a3e3a7afc | |||
cdba78b51d | |||
0c324e804c | |||
47aca3f3e2 | |||
31ef1a2183 | |||
66a6f9de71 | |||
6333eccc4a | |||
0af2b113df | |||
63a7941047 | |||
edeac08cb5 | |||
019b0ae09a | |||
1d00bb0a64 | |||
d96f1320ed | |||
50dbefeb48 | |||
56ac2c6780 | |||
c2f16da411 | |||
8223aee2ef | |||
5bd2af9621 | |||
b3df6ce6b5 | |||
e12eb5347d | |||
83a4426d31 | |||
3fd1fbc355 | |||
306d2b4bd9 | |||
c2c66af1f9 | |||
7ae65478aa | |||
b1594e65c6 | |||
0bce5b1daa | |||
207db93483 | |||
972f6a4887 | |||
6e1bece9d9 | |||
63e8bc1845 | |||
4f92663b66 | |||
a66a6790c0 | |||
bde853d280 | |||
acda627236 | |||
875afbb4d6 | |||
56f50a18dc | |||
801c438d05 | |||
baba411502 | |||
4c20ac6564 | |||
4e8556dec2 | |||
2d7b9ec1e4 | |||
8bb9ed4394 | |||
e4246ae85f | |||
f24067542f | |||
a7f5a61f37 | |||
b5fd7698d8 | |||
61c3102573 | |||
d6a5bf9d50 | |||
70f63b3190 | |||
2b0670a409 | |||
cc25639157 | |||
5ff30aed10 | |||
656ec4bf16 | |||
0bac9a8e02 | |||
5142e6e5f6 | |||
5c0e5a8ae0 | |||
49c735caf9 | |||
b4de2ea85d | |||
8b8baf1ef2 | |||
2a89b872c5 | |||
2d2d9a5987 | |||
a20a60850b | |||
35e38c23dd | |||
b79e61c86b | |||
e555d3129d | |||
a41883137c | |||
c414bf6c39 | |||
9b782a9da6 | |||
497c0cf63d | |||
93761f37ea | |||
68e530e5d2 | |||
20b1cdf909 | |||
4bae65cc55 | |||
6da5f12855 | |||
7a242c4976 | |||
b01d381993 | |||
1ac18fcf0c | |||
8d5ef5f4d9 | |||
35b5253853 | |||
99d59a38d5 | |||
9ab1fce0e0 | |||
9992fbf3dd | |||
3ca596d4af | |||
5e0d64525f | |||
8bcf936b91 | |||
1a2508d91a | |||
e81a77652f | |||
1c95b3abe7 | |||
1f3c72b997 | |||
e55b981cea | |||
49d4e67e07 | |||
a54d156bf0 | |||
f3fc898232 | |||
c61602370e | |||
5178663797 | |||
f04f3aee25 | |||
e5333e2718 | |||
f27d9f8cee | |||
cbd568b714 | |||
b330c5570d | |||
d222bbf131 | |||
961c6391a8 | |||
d68d7df0f8 | |||
c44c7810ce | |||
b7893a6a72 | |||
7a3d425b0e | |||
bd570bd02f | |||
246b8728a4 | |||
00415e1a87 | |||
ad354c106e | |||
26778d92d3 | |||
b135ba263c | |||
9b7ef55ad7 | |||
872f8bdad8 | |||
80b0dc6895 | |||
2d51445dd9 | |||
20898c00c6 | |||
2200bd646e | |||
fb69236f47 | |||
918734b26b | |||
729c75112b | |||
738e8cfc5c | |||
6daeed68a0 | |||
31a499c9cd | |||
358ca3decd | |||
0899fdb7d5 | |||
f9957e111c | |||
1193e33890 | |||
ec64753795 | |||
c908310f6e | |||
ee2b8a594a | |||
3ae27e088f | |||
393c0c9e90 | |||
5e453ab8a6 | |||
273c78c0a5 | |||
1bcc742466 | |||
1fc9e60254 | |||
126e385046 | |||
2f932ad103 |
26
.github/resources/rename_migration_files.py
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
def rename_migrations():
|
||||
migration_folder = "./backend/src/db/migrations"
|
||||
with open("added_files.txt", "r") as file:
|
||||
changed_files = file.readlines()
|
||||
|
||||
# Find the latest file among the changed files
|
||||
latest_timestamp = datetime.now() # utc time
|
||||
for file_path in changed_files:
|
||||
file_path = file_path.strip()
|
||||
# each new file bump by 1s
|
||||
latest_timestamp = latest_timestamp + timedelta(seconds=1)
|
||||
|
||||
new_filename = os.path.join(migration_folder, latest_timestamp.strftime("%Y%m%d%H%M%S") + f"_{file_path.split('_')[1]}")
|
||||
old_filename = os.path.join(migration_folder, file_path)
|
||||
os.rename(old_filename, new_filename)
|
||||
print(f"Renamed {old_filename} to {new_filename}")
|
||||
|
||||
if len(changed_files) == 0:
|
||||
print("No new files added to migration folder")
|
||||
|
||||
if __name__ == "__main__":
|
||||
rename_migrations()
|
||||
|
38
.github/workflows/build-patroni-docker-img.yml
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
name: Build patroni
|
||||
on: [workflow_dispatch]
|
||||
|
||||
jobs:
|
||||
patroni-image:
|
||||
name: Build patroni
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: ☁️ Checkout source
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: 'zalando/patroni'
|
||||
- name: Save commit hashes for tag
|
||||
id: commit
|
||||
uses: pr-mpt/actions-commit-hash@v2
|
||||
- name: 🔧 Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: 🐋 Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Set up Depot CLI
|
||||
uses: depot/setup-action@v1
|
||||
- name: 🏗️ Build backend and push to docker hub
|
||||
uses: depot/build-push-action@v1
|
||||
with:
|
||||
project: 64mmf0n610
|
||||
token: ${{ secrets.DEPOT_PROJECT_TOKEN }}
|
||||
push: true
|
||||
context: .
|
||||
file: Dockerfile
|
||||
tags: |
|
||||
infisical/patroni:${{ steps.commit.outputs.short }}
|
||||
infisical/patroni:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
name: Build and release CLI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
push:
|
||||
# run only against tags
|
||||
tags:
|
||||
@ -14,6 +16,12 @@ jobs:
|
||||
cli-integration-tests:
|
||||
name: Run tests before deployment
|
||||
uses: ./.github/workflows/run-cli-tests.yml
|
||||
secrets:
|
||||
CLI_TESTS_UA_CLIENT_ID: ${{ secrets.CLI_TESTS_UA_CLIENT_ID }}
|
||||
CLI_TESTS_UA_CLIENT_SECRET: ${{ secrets.CLI_TESTS_UA_CLIENT_SECRET }}
|
||||
CLI_TESTS_SERVICE_TOKEN: ${{ secrets.CLI_TESTS_SERVICE_TOKEN }}
|
||||
CLI_TESTS_PROJECT_ID: ${{ secrets.CLI_TESTS_PROJECT_ID }}
|
||||
CLI_TESTS_ENV_SLUG: ${{ secrets.CLI_TESTS_ENV_SLUG }}
|
||||
|
||||
goreleaser:
|
||||
runs-on: ubuntu-20.04
|
||||
|
13
.github/workflows/run-cli-tests.yml
vendored
@ -6,7 +6,20 @@ on:
|
||||
paths:
|
||||
- "cli/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
workflow_call:
|
||||
secrets:
|
||||
CLI_TESTS_UA_CLIENT_ID:
|
||||
required: true
|
||||
CLI_TESTS_UA_CLIENT_SECRET:
|
||||
required: true
|
||||
CLI_TESTS_SERVICE_TOKEN:
|
||||
required: true
|
||||
CLI_TESTS_PROJECT_ID:
|
||||
required: true
|
||||
CLI_TESTS_ENV_SLUG:
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
|
48
.github/workflows/update-be-new-migration-latest-timestamp.yml
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
name: Rename Migrations
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
paths:
|
||||
- 'backend/src/db/migrations/**'
|
||||
|
||||
jobs:
|
||||
rename:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.merged == true
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get list of newly added files in migration folder
|
||||
run: |
|
||||
git diff --name-status HEAD^ HEAD backend/src/db/migrations | grep '^A' | cut -f2 | xargs -n1 basename > added_files.txt
|
||||
if [ ! -s added_files.txt ]; then
|
||||
echo "No new files added. Skipping"
|
||||
echo "SKIP_RENAME=true" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Script to rename migrations
|
||||
if: env.SKIP_RENAME != 'true'
|
||||
run: python .github/resources/rename_migration_files.py
|
||||
|
||||
- name: Commit and push changes
|
||||
if: env.SKIP_RENAME != 'true'
|
||||
run: |
|
||||
git config user.name github-actions
|
||||
git config user.email github-actions@github.com
|
||||
git add ./backend/src/db/migrations
|
||||
rm added_files.txt
|
||||
git commit -m "chore: renamed new migration files to latest timestamp (gh-action)"
|
||||
|
||||
- name: Create Pull Request
|
||||
if: env.SKIP_RENAME != 'true'
|
||||
uses: peter-evans/create-pull-request@v6
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
commit-message: 'chore: renamed new migration files to latest UTC (gh-action)'
|
||||
title: 'GH Action: rename new migration file timestamp'
|
||||
branch-suffix: timestamp
|
@ -76,7 +76,7 @@ Check out the [Quickstart Guides](https://infisical.com/docs/getting-started/int
|
||||
|
||||
| Use Infisical Cloud | Deploy Infisical on premise |
|
||||
| ------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| The fastest and most reliable way to <br> get started with Infisical is signing up <br> for free to [Infisical Cloud](https://app.infisical.com/login). | <a href="https://infisical.com/docs/self-hosting/deployment-options/aws-ec2"><img src=".github/images/deploy-to-aws.png" width="150" width="300" /></a> <a href="https://infisical.com/docs/self-hosting/deployment-options/digital-ocean-marketplace" alt="Deploy to DigitalOcean"> <img width="217" alt="Deploy to DO" src="https://www.deploytodo.com/do-btn-blue.svg"/> </a> <br> View all [deployment options](https://infisical.com/docs/self-hosting/overview) |
|
||||
| The fastest and most reliable way to <br> get started with Infisical is signing up <br> for free to [Infisical Cloud](https://app.infisical.com/login). | <br> View all [deployment options](https://infisical.com/docs/self-hosting/overview) |
|
||||
|
||||
### Run Infisical locally
|
||||
|
||||
|
117
backend/package-lock.json
generated
@ -45,6 +45,7 @@
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"jsrp": "^0.2.4",
|
||||
"knex": "^3.0.1",
|
||||
"ldapjs": "^3.0.7",
|
||||
"libsodium-wrappers": "^0.7.13",
|
||||
"lodash.isequal": "^4.5.0",
|
||||
"ms": "^2.1.3",
|
||||
@ -2510,6 +2511,83 @@
|
||||
"@jridgewell/sourcemap-codec": "^1.4.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/asn1": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/asn1/-/asn1-2.0.0.tgz",
|
||||
"integrity": "sha512-G9+DkEOirNgdPmD0I8nu57ygQJKOOgFEMKknEuQvIHbGLwP3ny1mY+OTUYLCbCaGJP4sox5eYgBJRuSUpnAddA=="
|
||||
},
|
||||
"node_modules/@ldapjs/attribute": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/attribute/-/attribute-1.0.0.tgz",
|
||||
"integrity": "sha512-ptMl2d/5xJ0q+RgmnqOi3Zgwk/TMJYG7dYMC0Keko+yZU6n+oFM59MjQOUht5pxJeS4FWrImhu/LebX24vJNRQ==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "2.0.0",
|
||||
"@ldapjs/protocol": "^1.2.1",
|
||||
"process-warning": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/change": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/change/-/change-1.0.0.tgz",
|
||||
"integrity": "sha512-EOQNFH1RIku3M1s0OAJOzGfAohuFYXFY4s73wOhRm4KFGhmQQ7MChOh2YtYu9Kwgvuq1B0xKciXVzHCGkB5V+Q==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "2.0.0",
|
||||
"@ldapjs/attribute": "1.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/controls": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/controls/-/controls-2.1.0.tgz",
|
||||
"integrity": "sha512-2pFdD1yRC9V9hXfAWvCCO2RRWK9OdIEcJIos/9cCVP9O4k72BY1bLDQQ4KpUoJnl4y/JoD4iFgM+YWT3IfITWw==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "^1.2.0",
|
||||
"@ldapjs/protocol": "^1.2.1"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/controls/node_modules/@ldapjs/asn1": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/asn1/-/asn1-1.2.0.tgz",
|
||||
"integrity": "sha512-KX/qQJ2xxzvO2/WOvr1UdQ+8P5dVvuOLk/C9b1bIkXxZss8BaR28njXdPgFCpj5aHaf1t8PmuVnea+N9YG9YMw=="
|
||||
},
|
||||
"node_modules/@ldapjs/dn": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/dn/-/dn-1.1.0.tgz",
|
||||
"integrity": "sha512-R72zH5ZeBj/Fujf/yBu78YzpJjJXG46YHFo5E4W1EqfNpo1UsVPqdLrRMXeKIsJT3x9dJVIfR6OpzgINlKpi0A==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "2.0.0",
|
||||
"process-warning": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/filter": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/filter/-/filter-2.1.1.tgz",
|
||||
"integrity": "sha512-TwPK5eEgNdUO1ABPBUQabcZ+h9heDORE4V9WNZqCtYLKc06+6+UAJ3IAbr0L0bYTnkkWC/JEQD2F+zAFsuikNw==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "2.0.0",
|
||||
"@ldapjs/protocol": "^1.2.1",
|
||||
"process-warning": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/messages": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/messages/-/messages-1.3.0.tgz",
|
||||
"integrity": "sha512-K7xZpXJ21bj92jS35wtRbdcNrwmxAtPwy4myeh9duy/eR3xQKvikVycbdWVzkYEAVE5Ce520VXNOwCHjomjCZw==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "^2.0.0",
|
||||
"@ldapjs/attribute": "^1.0.0",
|
||||
"@ldapjs/change": "^1.0.0",
|
||||
"@ldapjs/controls": "^2.1.0",
|
||||
"@ldapjs/dn": "^1.1.0",
|
||||
"@ldapjs/filter": "^2.1.1",
|
||||
"@ldapjs/protocol": "^1.2.1",
|
||||
"process-warning": "^2.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/protocol": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/protocol/-/protocol-1.2.1.tgz",
|
||||
"integrity": "sha512-O89xFDLW2gBoZWNXuXpBSM32/KealKCTb3JGtJdtUQc7RjAk8XzrRgyz02cPAwGKwKPxy0ivuC7UP9bmN87egQ=="
|
||||
},
|
||||
"node_modules/@lukeed/ms": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@lukeed/ms/-/ms-2.0.1.tgz",
|
||||
@ -9304,15 +9382,7 @@
|
||||
"node": ">=0.8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ldapauth-fork/node_modules/lru-cache": {
|
||||
"version": "7.18.3",
|
||||
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
|
||||
"integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/ldapjs": {
|
||||
"node_modules/ldapauth-fork/node_modules/ldapjs": {
|
||||
"version": "2.3.3",
|
||||
"resolved": "https://registry.npmjs.org/ldapjs/-/ldapjs-2.3.3.tgz",
|
||||
"integrity": "sha512-75QiiLJV/PQqtpH+HGls44dXweviFwQ6SiIK27EqzKQ5jU/7UFrl2E5nLdQ3IYRBzJ/AVFJI66u0MZ0uofKYwg==",
|
||||
@ -9330,6 +9400,35 @@
|
||||
"node": ">=10.13.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ldapauth-fork/node_modules/lru-cache": {
|
||||
"version": "7.18.3",
|
||||
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
|
||||
"integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/ldapjs": {
|
||||
"version": "3.0.7",
|
||||
"resolved": "https://registry.npmjs.org/ldapjs/-/ldapjs-3.0.7.tgz",
|
||||
"integrity": "sha512-1ky+WrN+4CFMuoekUOv7Y1037XWdjKpu0xAPwSP+9KdvmV9PG+qOKlssDV6a+U32apwxdD3is/BZcWOYzN30cg==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "^2.0.0",
|
||||
"@ldapjs/attribute": "^1.0.0",
|
||||
"@ldapjs/change": "^1.0.0",
|
||||
"@ldapjs/controls": "^2.1.0",
|
||||
"@ldapjs/dn": "^1.1.0",
|
||||
"@ldapjs/filter": "^2.1.1",
|
||||
"@ldapjs/messages": "^1.3.0",
|
||||
"@ldapjs/protocol": "^1.2.1",
|
||||
"abstract-logging": "^2.0.1",
|
||||
"assert-plus": "^1.0.0",
|
||||
"backoff": "^2.5.0",
|
||||
"once": "^1.4.0",
|
||||
"vasync": "^2.2.1",
|
||||
"verror": "^1.10.1"
|
||||
}
|
||||
},
|
||||
"node_modules/leven": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/leven/-/leven-2.1.0.tgz",
|
||||
|
@ -106,6 +106,7 @@
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"jsrp": "^0.2.4",
|
||||
"knex": "^3.0.1",
|
||||
"ldapjs": "^3.0.7",
|
||||
"libsodium-wrappers": "^0.7.13",
|
||||
"lodash.isequal": "^4.5.0",
|
||||
"ms": "^2.1.3",
|
||||
|
2
backend/src/@types/fastify.d.ts
vendored
@ -3,6 +3,7 @@ import "fastify";
|
||||
import { TUsers } from "@app/db/schemas";
|
||||
import { TAuditLogServiceFactory } from "@app/ee/services/audit-log/audit-log-service";
|
||||
import { TCreateAuditLogDTO } from "@app/ee/services/audit-log/audit-log-types";
|
||||
import { TAuditLogStreamServiceFactory } from "@app/ee/services/audit-log-stream/audit-log-stream-service";
|
||||
import { TDynamicSecretServiceFactory } from "@app/ee/services/dynamic-secret/dynamic-secret-service";
|
||||
import { TDynamicSecretLeaseServiceFactory } from "@app/ee/services/dynamic-secret-lease/dynamic-secret-lease-service";
|
||||
import { TGroupServiceFactory } from "@app/ee/services/group/group-service";
|
||||
@ -120,6 +121,7 @@ declare module "fastify" {
|
||||
scim: TScimServiceFactory;
|
||||
ldap: TLdapConfigServiceFactory;
|
||||
auditLog: TAuditLogServiceFactory;
|
||||
auditLogStream: TAuditLogStreamServiceFactory;
|
||||
secretScanning: TSecretScanningServiceFactory;
|
||||
license: TLicenseServiceFactory;
|
||||
trustedIp: TTrustedIpServiceFactory;
|
||||
|
12
backend/src/@types/knex.d.ts
vendored
@ -7,6 +7,9 @@ import {
|
||||
TApiKeysUpdate,
|
||||
TAuditLogs,
|
||||
TAuditLogsInsert,
|
||||
TAuditLogStreams,
|
||||
TAuditLogStreamsInsert,
|
||||
TAuditLogStreamsUpdate,
|
||||
TAuditLogsUpdate,
|
||||
TAuthTokens,
|
||||
TAuthTokenSessions,
|
||||
@ -74,6 +77,9 @@ import {
|
||||
TLdapConfigs,
|
||||
TLdapConfigsInsert,
|
||||
TLdapConfigsUpdate,
|
||||
TLdapGroupMaps,
|
||||
TLdapGroupMapsInsert,
|
||||
TLdapGroupMapsUpdate,
|
||||
TOrganizations,
|
||||
TOrganizationsInsert,
|
||||
TOrganizationsUpdate,
|
||||
@ -398,8 +404,14 @@ declare module "knex/types/tables" {
|
||||
>;
|
||||
[TableName.SamlConfig]: Knex.CompositeTableType<TSamlConfigs, TSamlConfigsInsert, TSamlConfigsUpdate>;
|
||||
[TableName.LdapConfig]: Knex.CompositeTableType<TLdapConfigs, TLdapConfigsInsert, TLdapConfigsUpdate>;
|
||||
[TableName.LdapGroupMap]: Knex.CompositeTableType<TLdapGroupMaps, TLdapGroupMapsInsert, TLdapGroupMapsUpdate>;
|
||||
[TableName.OrgBot]: Knex.CompositeTableType<TOrgBots, TOrgBotsInsert, TOrgBotsUpdate>;
|
||||
[TableName.AuditLog]: Knex.CompositeTableType<TAuditLogs, TAuditLogsInsert, TAuditLogsUpdate>;
|
||||
[TableName.AuditLogStream]: Knex.CompositeTableType<
|
||||
TAuditLogStreams,
|
||||
TAuditLogStreamsInsert,
|
||||
TAuditLogStreamsUpdate
|
||||
>;
|
||||
[TableName.GitAppInstallSession]: Knex.CompositeTableType<
|
||||
TGitAppInstallSessions,
|
||||
TGitAppInstallSessionsInsert,
|
||||
|
@ -0,0 +1,34 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "../schemas";
|
||||
import { createOnUpdateTrigger, dropOnUpdateTrigger } from "../utils";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
if (!(await knex.schema.hasTable(TableName.LdapGroupMap))) {
|
||||
await knex.schema.createTable(TableName.LdapGroupMap, (t) => {
|
||||
t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid());
|
||||
t.uuid("ldapConfigId").notNullable();
|
||||
t.foreign("ldapConfigId").references("id").inTable(TableName.LdapConfig).onDelete("CASCADE");
|
||||
t.string("ldapGroupCN").notNullable();
|
||||
t.uuid("groupId").notNullable();
|
||||
t.foreign("groupId").references("id").inTable(TableName.Groups).onDelete("CASCADE");
|
||||
t.unique(["ldapGroupCN", "groupId", "ldapConfigId"]);
|
||||
});
|
||||
}
|
||||
|
||||
await createOnUpdateTrigger(knex, TableName.LdapGroupMap);
|
||||
|
||||
await knex.schema.alterTable(TableName.LdapConfig, (t) => {
|
||||
t.string("groupSearchBase").notNullable().defaultTo("");
|
||||
t.string("groupSearchFilter").notNullable().defaultTo("");
|
||||
});
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
await knex.schema.dropTableIfExists(TableName.LdapGroupMap);
|
||||
await dropOnUpdateTrigger(knex, TableName.LdapGroupMap);
|
||||
await knex.schema.alterTable(TableName.LdapConfig, (t) => {
|
||||
t.dropColumn("groupSearchBase");
|
||||
t.dropColumn("groupSearchFilter");
|
||||
});
|
||||
}
|
@ -0,0 +1,15 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "../schemas";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
await knex.schema.alterTable(TableName.LdapConfig, (t) => {
|
||||
t.string("searchFilter").notNullable().defaultTo("");
|
||||
});
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
await knex.schema.alterTable(TableName.LdapConfig, (t) => {
|
||||
t.dropColumn("searchFilter");
|
||||
});
|
||||
}
|
28
backend/src/db/migrations/20240429154610_audit-log-index.ts
Normal file
@ -0,0 +1,28 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "../schemas";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
const doesOrgIdExist = await knex.schema.hasColumn(TableName.AuditLog, "orgId");
|
||||
const doesProjectIdExist = await knex.schema.hasColumn(TableName.AuditLog, "projectId");
|
||||
const doesCreatedAtExist = await knex.schema.hasColumn(TableName.AuditLog, "createdAt");
|
||||
if (await knex.schema.hasTable(TableName.AuditLog)) {
|
||||
await knex.schema.alterTable(TableName.AuditLog, (t) => {
|
||||
if (doesProjectIdExist && doesCreatedAtExist) t.index(["projectId", "createdAt"]);
|
||||
if (doesOrgIdExist && doesCreatedAtExist) t.index(["orgId", "createdAt"]);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
const doesOrgIdExist = await knex.schema.hasColumn(TableName.AuditLog, "orgId");
|
||||
const doesProjectIdExist = await knex.schema.hasColumn(TableName.AuditLog, "projectId");
|
||||
const doesCreatedAtExist = await knex.schema.hasColumn(TableName.AuditLog, "createdAt");
|
||||
|
||||
if (await knex.schema.hasTable(TableName.AuditLog)) {
|
||||
await knex.schema.alterTable(TableName.AuditLog, (t) => {
|
||||
if (doesProjectIdExist && doesCreatedAtExist) t.dropIndex(["projectId", "createdAt"]);
|
||||
if (doesOrgIdExist && doesCreatedAtExist) t.dropIndex(["orgId", "createdAt"]);
|
||||
});
|
||||
}
|
||||
}
|
28
backend/src/db/migrations/20240503101144_audit-log-stream.ts
Normal file
@ -0,0 +1,28 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "../schemas";
|
||||
import { createOnUpdateTrigger, dropOnUpdateTrigger } from "../utils";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
if (!(await knex.schema.hasTable(TableName.AuditLogStream))) {
|
||||
await knex.schema.createTable(TableName.AuditLogStream, (t) => {
|
||||
t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid());
|
||||
t.string("url").notNullable();
|
||||
t.text("encryptedHeadersCiphertext");
|
||||
t.text("encryptedHeadersIV");
|
||||
t.text("encryptedHeadersTag");
|
||||
t.string("encryptedHeadersAlgorithm");
|
||||
t.string("encryptedHeadersKeyEncoding");
|
||||
t.uuid("orgId").notNullable();
|
||||
t.foreign("orgId").references("id").inTable(TableName.Organization).onDelete("CASCADE");
|
||||
t.timestamps(true, true, true);
|
||||
});
|
||||
}
|
||||
|
||||
await createOnUpdateTrigger(knex, TableName.AuditLogStream);
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
await dropOnUpdateTrigger(knex, TableName.AuditLogStream);
|
||||
await knex.schema.dropTableIfExists(TableName.AuditLogStream);
|
||||
}
|
25
backend/src/db/schemas/audit-log-streams.ts
Normal file
@ -0,0 +1,25 @@
|
||||
// Code generated by automation script, DO NOT EDIT.
|
||||
// Automated by pulling database and generating zod schema
|
||||
// To update. Just run npm run generate:schema
|
||||
// Written by akhilmhdh.
|
||||
|
||||
import { z } from "zod";
|
||||
|
||||
import { TImmutableDBKeys } from "./models";
|
||||
|
||||
export const AuditLogStreamsSchema = z.object({
|
||||
id: z.string().uuid(),
|
||||
url: z.string(),
|
||||
encryptedHeadersCiphertext: z.string().nullable().optional(),
|
||||
encryptedHeadersIV: z.string().nullable().optional(),
|
||||
encryptedHeadersTag: z.string().nullable().optional(),
|
||||
encryptedHeadersAlgorithm: z.string().nullable().optional(),
|
||||
encryptedHeadersKeyEncoding: z.string().nullable().optional(),
|
||||
orgId: z.string().uuid(),
|
||||
createdAt: z.date(),
|
||||
updatedAt: z.date()
|
||||
});
|
||||
|
||||
export type TAuditLogStreams = z.infer<typeof AuditLogStreamsSchema>;
|
||||
export type TAuditLogStreamsInsert = Omit<z.input<typeof AuditLogStreamsSchema>, TImmutableDBKeys>;
|
||||
export type TAuditLogStreamsUpdate = Partial<Omit<z.input<typeof AuditLogStreamsSchema>, TImmutableDBKeys>>;
|
@ -1,4 +1,5 @@
|
||||
export * from "./api-keys";
|
||||
export * from "./audit-log-streams";
|
||||
export * from "./audit-logs";
|
||||
export * from "./auth-token-sessions";
|
||||
export * from "./auth-tokens";
|
||||
@ -22,6 +23,7 @@ export * from "./incident-contacts";
|
||||
export * from "./integration-auths";
|
||||
export * from "./integrations";
|
||||
export * from "./ldap-configs";
|
||||
export * from "./ldap-group-maps";
|
||||
export * from "./models";
|
||||
export * from "./org-bots";
|
||||
export * from "./org-memberships";
|
||||
|
@ -23,7 +23,10 @@ export const LdapConfigsSchema = z.object({
|
||||
caCertIV: z.string(),
|
||||
caCertTag: z.string(),
|
||||
createdAt: z.date(),
|
||||
updatedAt: z.date()
|
||||
updatedAt: z.date(),
|
||||
groupSearchBase: z.string().default(""),
|
||||
groupSearchFilter: z.string().default(""),
|
||||
searchFilter: z.string().default("")
|
||||
});
|
||||
|
||||
export type TLdapConfigs = z.infer<typeof LdapConfigsSchema>;
|
||||
|
19
backend/src/db/schemas/ldap-group-maps.ts
Normal file
@ -0,0 +1,19 @@
|
||||
// Code generated by automation script, DO NOT EDIT.
|
||||
// Automated by pulling database and generating zod schema
|
||||
// To update. Just run npm run generate:schema
|
||||
// Written by akhilmhdh.
|
||||
|
||||
import { z } from "zod";
|
||||
|
||||
import { TImmutableDBKeys } from "./models";
|
||||
|
||||
export const LdapGroupMapsSchema = z.object({
|
||||
id: z.string().uuid(),
|
||||
ldapConfigId: z.string().uuid(),
|
||||
ldapGroupCN: z.string(),
|
||||
groupId: z.string().uuid()
|
||||
});
|
||||
|
||||
export type TLdapGroupMaps = z.infer<typeof LdapGroupMapsSchema>;
|
||||
export type TLdapGroupMapsInsert = Omit<z.input<typeof LdapGroupMapsSchema>, TImmutableDBKeys>;
|
||||
export type TLdapGroupMapsUpdate = Partial<Omit<z.input<typeof LdapGroupMapsSchema>, TImmutableDBKeys>>;
|
@ -60,7 +60,9 @@ export enum TableName {
|
||||
SecretRotationOutput = "secret_rotation_outputs",
|
||||
SamlConfig = "saml_configs",
|
||||
LdapConfig = "ldap_configs",
|
||||
LdapGroupMap = "ldap_group_maps",
|
||||
AuditLog = "audit_logs",
|
||||
AuditLogStream = "audit_log_streams",
|
||||
GitAppInstallSession = "git_app_install_sessions",
|
||||
GitAppOrg = "git_app_org",
|
||||
SecretScanningGitRisk = "secret_scanning_git_risks",
|
||||
|
215
backend/src/ee/routes/v1/audit-log-stream-router.ts
Normal file
@ -0,0 +1,215 @@
|
||||
import { z } from "zod";
|
||||
|
||||
import { AUDIT_LOG_STREAMS } from "@app/lib/api-docs";
|
||||
import { readLimit } from "@app/server/config/rateLimiter";
|
||||
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
|
||||
import { SanitizedAuditLogStreamSchema } from "@app/server/routes/sanitizedSchemas";
|
||||
import { AuthMode } from "@app/services/auth/auth-type";
|
||||
|
||||
export const registerAuditLogStreamRouter = async (server: FastifyZodProvider) => {
|
||||
server.route({
|
||||
method: "POST",
|
||||
url: "/",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
schema: {
|
||||
description: "Create an Audit Log Stream.",
|
||||
security: [
|
||||
{
|
||||
bearerAuth: []
|
||||
}
|
||||
],
|
||||
body: z.object({
|
||||
url: z.string().min(1).describe(AUDIT_LOG_STREAMS.CREATE.url),
|
||||
headers: z
|
||||
.object({
|
||||
key: z.string().min(1).trim().describe(AUDIT_LOG_STREAMS.CREATE.headers.key),
|
||||
value: z.string().min(1).trim().describe(AUDIT_LOG_STREAMS.CREATE.headers.value)
|
||||
})
|
||||
.describe(AUDIT_LOG_STREAMS.CREATE.headers.desc)
|
||||
.array()
|
||||
.optional()
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
auditLogStream: SanitizedAuditLogStreamSchema
|
||||
})
|
||||
}
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
|
||||
handler: async (req) => {
|
||||
const auditLogStream = await server.services.auditLogStream.create({
|
||||
actorId: req.permission.id,
|
||||
actor: req.permission.type,
|
||||
actorOrgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
url: req.body.url,
|
||||
headers: req.body.headers
|
||||
});
|
||||
|
||||
return { auditLogStream };
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "PATCH",
|
||||
url: "/:id",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
schema: {
|
||||
description: "Update an Audit Log Stream by ID.",
|
||||
security: [
|
||||
{
|
||||
bearerAuth: []
|
||||
}
|
||||
],
|
||||
params: z.object({
|
||||
id: z.string().describe(AUDIT_LOG_STREAMS.UPDATE.id)
|
||||
}),
|
||||
body: z.object({
|
||||
url: z.string().optional().describe(AUDIT_LOG_STREAMS.UPDATE.url),
|
||||
headers: z
|
||||
.object({
|
||||
key: z.string().min(1).trim().describe(AUDIT_LOG_STREAMS.UPDATE.headers.key),
|
||||
value: z.string().min(1).trim().describe(AUDIT_LOG_STREAMS.UPDATE.headers.value)
|
||||
})
|
||||
.describe(AUDIT_LOG_STREAMS.UPDATE.headers.desc)
|
||||
.array()
|
||||
.optional()
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
auditLogStream: SanitizedAuditLogStreamSchema
|
||||
})
|
||||
}
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
|
||||
handler: async (req) => {
|
||||
const auditLogStream = await server.services.auditLogStream.updateById({
|
||||
actorId: req.permission.id,
|
||||
actor: req.permission.type,
|
||||
actorOrgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
id: req.params.id,
|
||||
url: req.body.url,
|
||||
headers: req.body.headers
|
||||
});
|
||||
|
||||
return { auditLogStream };
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "DELETE",
|
||||
url: "/:id",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
schema: {
|
||||
description: "Delete an Audit Log Stream by ID.",
|
||||
security: [
|
||||
{
|
||||
bearerAuth: []
|
||||
}
|
||||
],
|
||||
params: z.object({
|
||||
id: z.string().describe(AUDIT_LOG_STREAMS.DELETE.id)
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
auditLogStream: SanitizedAuditLogStreamSchema
|
||||
})
|
||||
}
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
|
||||
handler: async (req) => {
|
||||
const auditLogStream = await server.services.auditLogStream.deleteById({
|
||||
actorId: req.permission.id,
|
||||
actor: req.permission.type,
|
||||
actorOrgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
id: req.params.id
|
||||
});
|
||||
|
||||
return { auditLogStream };
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "GET",
|
||||
url: "/:id",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
schema: {
|
||||
description: "Get an Audit Log Stream by ID.",
|
||||
security: [
|
||||
{
|
||||
bearerAuth: []
|
||||
}
|
||||
],
|
||||
params: z.object({
|
||||
id: z.string().describe(AUDIT_LOG_STREAMS.GET_BY_ID.id)
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
auditLogStream: SanitizedAuditLogStreamSchema.extend({
|
||||
headers: z
|
||||
.object({
|
||||
key: z.string(),
|
||||
value: z.string()
|
||||
})
|
||||
.array()
|
||||
.optional()
|
||||
})
|
||||
})
|
||||
}
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
|
||||
handler: async (req) => {
|
||||
const auditLogStream = await server.services.auditLogStream.getById({
|
||||
actorId: req.permission.id,
|
||||
actor: req.permission.type,
|
||||
actorOrgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
id: req.params.id
|
||||
});
|
||||
|
||||
return { auditLogStream };
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "GET",
|
||||
url: "/",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
schema: {
|
||||
description: "List Audit Log Streams.",
|
||||
security: [
|
||||
{
|
||||
bearerAuth: []
|
||||
}
|
||||
],
|
||||
response: {
|
||||
200: z.object({
|
||||
auditLogStreams: SanitizedAuditLogStreamSchema.array()
|
||||
})
|
||||
}
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT, AuthMode.IDENTITY_ACCESS_TOKEN]),
|
||||
handler: async (req) => {
|
||||
const auditLogStreams = await server.services.auditLogStream.list({
|
||||
actorId: req.permission.id,
|
||||
actor: req.permission.type,
|
||||
actorOrgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod
|
||||
});
|
||||
|
||||
return { auditLogStreams };
|
||||
}
|
||||
});
|
||||
};
|
@ -1,3 +1,4 @@
|
||||
import { registerAuditLogStreamRouter } from "./audit-log-stream-router";
|
||||
import { registerDynamicSecretLeaseRouter } from "./dynamic-secret-lease-router";
|
||||
import { registerDynamicSecretRouter } from "./dynamic-secret-router";
|
||||
import { registerGroupRouter } from "./group-router";
|
||||
@ -55,6 +56,7 @@ export const registerV1EERoutes = async (server: FastifyZodProvider) => {
|
||||
await server.register(registerSecretRotationRouter, { prefix: "/secret-rotations" });
|
||||
await server.register(registerSecretVersionRouter, { prefix: "/secret" });
|
||||
await server.register(registerGroupRouter, { prefix: "/groups" });
|
||||
await server.register(registerAuditLogStreamRouter, { prefix: "/audit-log-streams" });
|
||||
await server.register(
|
||||
async (privilegeRouter) => {
|
||||
await privilegeRouter.register(registerUserAdditionalPrivilegeRouter, { prefix: "/users" });
|
||||
|
@ -14,7 +14,9 @@ import { FastifyRequest } from "fastify";
|
||||
import LdapStrategy from "passport-ldapauth";
|
||||
import { z } from "zod";
|
||||
|
||||
import { LdapConfigsSchema } from "@app/db/schemas";
|
||||
import { LdapConfigsSchema, LdapGroupMapsSchema } from "@app/db/schemas";
|
||||
import { TLDAPConfig } from "@app/ee/services/ldap-config/ldap-config-types";
|
||||
import { isValidLdapFilter, searchGroups } from "@app/ee/services/ldap-config/ldap-fns";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { logger } from "@app/lib/logger";
|
||||
import { readLimit, writeLimit } from "@app/server/config/rateLimiter";
|
||||
@ -50,20 +52,38 @@ export const registerLdapRouter = async (server: FastifyZodProvider) => {
|
||||
// eslint-disable-next-line
|
||||
async (req: IncomingMessage, user, cb) => {
|
||||
try {
|
||||
const ldapConfig = (req as unknown as FastifyRequest).ldapConfig as TLDAPConfig;
|
||||
|
||||
let groups: { dn: string; cn: string }[] | undefined;
|
||||
if (ldapConfig.groupSearchBase) {
|
||||
const groupFilter = "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))";
|
||||
const groupSearchFilter = (ldapConfig.groupSearchFilter || groupFilter)
|
||||
.replace(/{{\.Username}}/g, user.uid)
|
||||
.replace(/{{\.UserDN}}/g, user.dn);
|
||||
|
||||
if (!isValidLdapFilter(groupSearchFilter)) {
|
||||
throw new Error("Generated LDAP search filter is invalid.");
|
||||
}
|
||||
|
||||
groups = await searchGroups(ldapConfig, groupSearchFilter, ldapConfig.groupSearchBase);
|
||||
}
|
||||
|
||||
const { isUserCompleted, providerAuthToken } = await server.services.ldap.ldapLogin({
|
||||
ldapConfigId: ldapConfig.id,
|
||||
externalId: user.uidNumber,
|
||||
username: user.uid,
|
||||
firstName: user.givenName,
|
||||
lastName: user.sn,
|
||||
firstName: user.givenName ?? user.cn ?? "",
|
||||
lastName: user.sn ?? "",
|
||||
emails: user.mail ? [user.mail] : [],
|
||||
groups,
|
||||
relayState: ((req as unknown as FastifyRequest).body as { RelayState?: string }).RelayState,
|
||||
orgId: (req as unknown as FastifyRequest).ldapConfig.organization
|
||||
});
|
||||
|
||||
return cb(null, { isUserCompleted, providerAuthToken });
|
||||
} catch (err) {
|
||||
logger.error(err);
|
||||
return cb(err, false);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return cb(error, false);
|
||||
}
|
||||
}
|
||||
)
|
||||
@ -117,6 +137,9 @@ export const registerLdapRouter = async (server: FastifyZodProvider) => {
|
||||
bindDN: z.string(),
|
||||
bindPass: z.string(),
|
||||
searchBase: z.string(),
|
||||
searchFilter: z.string(),
|
||||
groupSearchBase: z.string(),
|
||||
groupSearchFilter: z.string(),
|
||||
caCert: z.string()
|
||||
})
|
||||
}
|
||||
@ -148,6 +171,12 @@ export const registerLdapRouter = async (server: FastifyZodProvider) => {
|
||||
bindDN: z.string().trim(),
|
||||
bindPass: z.string().trim(),
|
||||
searchBase: z.string().trim(),
|
||||
searchFilter: z.string().trim().default("(uid={{username}})"),
|
||||
groupSearchBase: z.string().trim(),
|
||||
groupSearchFilter: z
|
||||
.string()
|
||||
.trim()
|
||||
.default("(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))"),
|
||||
caCert: z.string().trim().default("")
|
||||
}),
|
||||
response: {
|
||||
@ -183,6 +212,9 @@ export const registerLdapRouter = async (server: FastifyZodProvider) => {
|
||||
bindDN: z.string().trim(),
|
||||
bindPass: z.string().trim(),
|
||||
searchBase: z.string().trim(),
|
||||
searchFilter: z.string().trim(),
|
||||
groupSearchBase: z.string().trim(),
|
||||
groupSearchFilter: z.string().trim(),
|
||||
caCert: z.string().trim()
|
||||
})
|
||||
.partial()
|
||||
@ -204,4 +236,134 @@ export const registerLdapRouter = async (server: FastifyZodProvider) => {
|
||||
return ldap;
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "GET",
|
||||
url: "/config/:configId/group-maps",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT]),
|
||||
schema: {
|
||||
params: z.object({
|
||||
configId: z.string().trim()
|
||||
}),
|
||||
response: {
|
||||
200: z.array(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
ldapConfigId: z.string(),
|
||||
ldapGroupCN: z.string(),
|
||||
group: z.object({
|
||||
id: z.string(),
|
||||
name: z.string(),
|
||||
slug: z.string()
|
||||
})
|
||||
})
|
||||
)
|
||||
}
|
||||
},
|
||||
handler: async (req) => {
|
||||
const ldapGroupMaps = await server.services.ldap.getLdapGroupMaps({
|
||||
actor: req.permission.type,
|
||||
actorId: req.permission.id,
|
||||
orgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
actorOrgId: req.permission.orgId,
|
||||
ldapConfigId: req.params.configId
|
||||
});
|
||||
return ldapGroupMaps;
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "POST",
|
||||
url: "/config/:configId/group-maps",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT]),
|
||||
schema: {
|
||||
params: z.object({
|
||||
configId: z.string().trim()
|
||||
}),
|
||||
body: z.object({
|
||||
ldapGroupCN: z.string().trim(),
|
||||
groupSlug: z.string().trim()
|
||||
}),
|
||||
response: {
|
||||
200: LdapGroupMapsSchema
|
||||
}
|
||||
},
|
||||
handler: async (req) => {
|
||||
const ldapGroupMap = await server.services.ldap.createLdapGroupMap({
|
||||
actor: req.permission.type,
|
||||
actorId: req.permission.id,
|
||||
orgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
actorOrgId: req.permission.orgId,
|
||||
ldapConfigId: req.params.configId,
|
||||
...req.body
|
||||
});
|
||||
return ldapGroupMap;
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "DELETE",
|
||||
url: "/config/:configId/group-maps/:groupMapId",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT]),
|
||||
schema: {
|
||||
params: z.object({
|
||||
configId: z.string().trim(),
|
||||
groupMapId: z.string().trim()
|
||||
}),
|
||||
response: {
|
||||
200: LdapGroupMapsSchema
|
||||
}
|
||||
},
|
||||
handler: async (req) => {
|
||||
const ldapGroupMap = await server.services.ldap.deleteLdapGroupMap({
|
||||
actor: req.permission.type,
|
||||
actorId: req.permission.id,
|
||||
orgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
actorOrgId: req.permission.orgId,
|
||||
ldapConfigId: req.params.configId,
|
||||
ldapGroupMapId: req.params.groupMapId
|
||||
});
|
||||
return ldapGroupMap;
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "POST",
|
||||
url: "/config/:configId/test-connection",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT]),
|
||||
schema: {
|
||||
params: z.object({
|
||||
configId: z.string().trim()
|
||||
}),
|
||||
response: {
|
||||
200: z.boolean()
|
||||
}
|
||||
},
|
||||
handler: async (req) => {
|
||||
const result = await server.services.ldap.testLDAPConnection({
|
||||
actor: req.permission.type,
|
||||
actorId: req.permission.id,
|
||||
orgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
actorOrgId: req.permission.orgId,
|
||||
ldapConfigId: req.params.configId
|
||||
});
|
||||
return result;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
@ -0,0 +1,11 @@
|
||||
import { TDbClient } from "@app/db";
|
||||
import { TableName } from "@app/db/schemas";
|
||||
import { ormify } from "@app/lib/knex";
|
||||
|
||||
export type TAuditLogStreamDALFactory = ReturnType<typeof auditLogStreamDALFactory>;
|
||||
|
||||
export const auditLogStreamDALFactory = (db: TDbClient) => {
|
||||
const orm = ormify(db, TableName.AuditLogStream);
|
||||
|
||||
return orm;
|
||||
};
|
@ -0,0 +1,233 @@
|
||||
import { ForbiddenError } from "@casl/ability";
|
||||
import { RawAxiosRequestHeaders } from "axios";
|
||||
|
||||
import { SecretKeyEncoding } from "@app/db/schemas";
|
||||
import { request } from "@app/lib/config/request";
|
||||
import { infisicalSymmetricDecrypt, infisicalSymmetricEncypt } from "@app/lib/crypto/encryption";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { validateLocalIps } from "@app/lib/validator";
|
||||
|
||||
import { AUDIT_LOG_STREAM_TIMEOUT } from "../audit-log/audit-log-queue";
|
||||
import { TLicenseServiceFactory } from "../license/license-service";
|
||||
import { OrgPermissionActions, OrgPermissionSubjects } from "../permission/org-permission";
|
||||
import { TPermissionServiceFactory } from "../permission/permission-service";
|
||||
import { TAuditLogStreamDALFactory } from "./audit-log-stream-dal";
|
||||
import {
|
||||
LogStreamHeaders,
|
||||
TCreateAuditLogStreamDTO,
|
||||
TDeleteAuditLogStreamDTO,
|
||||
TGetDetailsAuditLogStreamDTO,
|
||||
TListAuditLogStreamDTO,
|
||||
TUpdateAuditLogStreamDTO
|
||||
} from "./audit-log-stream-types";
|
||||
|
||||
type TAuditLogStreamServiceFactoryDep = {
|
||||
auditLogStreamDAL: TAuditLogStreamDALFactory;
|
||||
permissionService: Pick<TPermissionServiceFactory, "getOrgPermission">;
|
||||
licenseService: Pick<TLicenseServiceFactory, "getPlan">;
|
||||
};
|
||||
|
||||
export type TAuditLogStreamServiceFactory = ReturnType<typeof auditLogStreamServiceFactory>;
|
||||
|
||||
export const auditLogStreamServiceFactory = ({
|
||||
auditLogStreamDAL,
|
||||
permissionService,
|
||||
licenseService
|
||||
}: TAuditLogStreamServiceFactoryDep) => {
|
||||
const create = async ({
|
||||
url,
|
||||
actor,
|
||||
headers = [],
|
||||
actorId,
|
||||
actorOrgId,
|
||||
actorAuthMethod
|
||||
}: TCreateAuditLogStreamDTO) => {
|
||||
if (!actorOrgId) throw new BadRequestError({ message: "Missing org id from token" });
|
||||
|
||||
const plan = await licenseService.getPlan(actorOrgId);
|
||||
if (!plan.auditLogStreams)
|
||||
throw new BadRequestError({
|
||||
message: "Failed to create audit log streams due to plan restriction. Upgrade plan to create group."
|
||||
});
|
||||
|
||||
const { permission } = await permissionService.getOrgPermission(
|
||||
actor,
|
||||
actorId,
|
||||
actorOrgId,
|
||||
actorAuthMethod,
|
||||
actorOrgId
|
||||
);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Create, OrgPermissionSubjects.Settings);
|
||||
|
||||
validateLocalIps(url);
|
||||
|
||||
const totalStreams = await auditLogStreamDAL.find({ orgId: actorOrgId });
|
||||
if (totalStreams.length >= plan.auditLogStreamLimit) {
|
||||
throw new BadRequestError({
|
||||
message:
|
||||
"Failed to create audit log streams due to plan limit reached. Kindly contact Infisical to add more streams."
|
||||
});
|
||||
}
|
||||
|
||||
// testing connection first
|
||||
const streamHeaders: RawAxiosRequestHeaders = { "Content-Type": "application/json" };
|
||||
if (headers.length)
|
||||
headers.forEach(({ key, value }) => {
|
||||
streamHeaders[key] = value;
|
||||
});
|
||||
await request
|
||||
.post(
|
||||
url,
|
||||
{ ping: "ok" },
|
||||
{
|
||||
headers: streamHeaders,
|
||||
// request timeout
|
||||
timeout: AUDIT_LOG_STREAM_TIMEOUT,
|
||||
// connection timeout
|
||||
signal: AbortSignal.timeout(AUDIT_LOG_STREAM_TIMEOUT)
|
||||
}
|
||||
)
|
||||
.catch((err) => {
|
||||
throw new Error(`Failed to connect with the source ${(err as Error)?.message}`);
|
||||
});
|
||||
const encryptedHeaders = headers ? infisicalSymmetricEncypt(JSON.stringify(headers)) : undefined;
|
||||
const logStream = await auditLogStreamDAL.create({
|
||||
orgId: actorOrgId,
|
||||
url,
|
||||
...(encryptedHeaders
|
||||
? {
|
||||
encryptedHeadersCiphertext: encryptedHeaders.ciphertext,
|
||||
encryptedHeadersIV: encryptedHeaders.iv,
|
||||
encryptedHeadersTag: encryptedHeaders.tag,
|
||||
encryptedHeadersAlgorithm: encryptedHeaders.algorithm,
|
||||
encryptedHeadersKeyEncoding: encryptedHeaders.encoding
|
||||
}
|
||||
: {})
|
||||
});
|
||||
return logStream;
|
||||
};
|
||||
|
||||
const updateById = async ({
|
||||
id,
|
||||
url,
|
||||
actor,
|
||||
headers = [],
|
||||
actorId,
|
||||
actorOrgId,
|
||||
actorAuthMethod
|
||||
}: TUpdateAuditLogStreamDTO) => {
|
||||
if (!actorOrgId) throw new BadRequestError({ message: "Missing org id from token" });
|
||||
|
||||
const plan = await licenseService.getPlan(actorOrgId);
|
||||
if (!plan.auditLogStreams)
|
||||
throw new BadRequestError({
|
||||
message: "Failed to update audit log streams due to plan restriction. Upgrade plan to create group."
|
||||
});
|
||||
|
||||
const logStream = await auditLogStreamDAL.findById(id);
|
||||
if (!logStream) throw new BadRequestError({ message: "Audit log stream not found" });
|
||||
|
||||
const { orgId } = logStream;
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Edit, OrgPermissionSubjects.Settings);
|
||||
|
||||
if (url) validateLocalIps(url);
|
||||
|
||||
// testing connection first
|
||||
const streamHeaders: RawAxiosRequestHeaders = { "Content-Type": "application/json" };
|
||||
if (headers.length)
|
||||
headers.forEach(({ key, value }) => {
|
||||
streamHeaders[key] = value;
|
||||
});
|
||||
|
||||
await request
|
||||
.post(
|
||||
url || logStream.url,
|
||||
{ ping: "ok" },
|
||||
{
|
||||
headers: streamHeaders,
|
||||
// request timeout
|
||||
timeout: AUDIT_LOG_STREAM_TIMEOUT,
|
||||
// connection timeout
|
||||
signal: AbortSignal.timeout(AUDIT_LOG_STREAM_TIMEOUT)
|
||||
}
|
||||
)
|
||||
.catch((err) => {
|
||||
throw new Error(`Failed to connect with the source ${(err as Error)?.message}`);
|
||||
});
|
||||
|
||||
const encryptedHeaders = headers ? infisicalSymmetricEncypt(JSON.stringify(headers)) : undefined;
|
||||
const updatedLogStream = await auditLogStreamDAL.updateById(id, {
|
||||
url,
|
||||
...(encryptedHeaders
|
||||
? {
|
||||
encryptedHeadersCiphertext: encryptedHeaders.ciphertext,
|
||||
encryptedHeadersIV: encryptedHeaders.iv,
|
||||
encryptedHeadersTag: encryptedHeaders.tag,
|
||||
encryptedHeadersAlgorithm: encryptedHeaders.algorithm,
|
||||
encryptedHeadersKeyEncoding: encryptedHeaders.encoding
|
||||
}
|
||||
: {})
|
||||
});
|
||||
return updatedLogStream;
|
||||
};
|
||||
|
||||
const deleteById = async ({ id, actor, actorId, actorOrgId, actorAuthMethod }: TDeleteAuditLogStreamDTO) => {
|
||||
if (!actorOrgId) throw new BadRequestError({ message: "Missing org id from token" });
|
||||
|
||||
const logStream = await auditLogStreamDAL.findById(id);
|
||||
if (!logStream) throw new BadRequestError({ message: "Audit log stream not found" });
|
||||
|
||||
const { orgId } = logStream;
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Delete, OrgPermissionSubjects.Settings);
|
||||
|
||||
const deletedLogStream = await auditLogStreamDAL.deleteById(id);
|
||||
return deletedLogStream;
|
||||
};
|
||||
|
||||
const getById = async ({ id, actor, actorId, actorOrgId, actorAuthMethod }: TGetDetailsAuditLogStreamDTO) => {
|
||||
const logStream = await auditLogStreamDAL.findById(id);
|
||||
if (!logStream) throw new BadRequestError({ message: "Audit log stream not found" });
|
||||
|
||||
const { orgId } = logStream;
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Read, OrgPermissionSubjects.Settings);
|
||||
|
||||
const headers =
|
||||
logStream?.encryptedHeadersCiphertext && logStream?.encryptedHeadersIV && logStream?.encryptedHeadersTag
|
||||
? (JSON.parse(
|
||||
infisicalSymmetricDecrypt({
|
||||
tag: logStream.encryptedHeadersTag,
|
||||
iv: logStream.encryptedHeadersIV,
|
||||
ciphertext: logStream.encryptedHeadersCiphertext,
|
||||
keyEncoding: logStream.encryptedHeadersKeyEncoding as SecretKeyEncoding
|
||||
})
|
||||
) as LogStreamHeaders[])
|
||||
: undefined;
|
||||
|
||||
return { ...logStream, headers };
|
||||
};
|
||||
|
||||
const list = async ({ actor, actorId, actorOrgId, actorAuthMethod }: TListAuditLogStreamDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(
|
||||
actor,
|
||||
actorId,
|
||||
actorOrgId,
|
||||
actorAuthMethod,
|
||||
actorOrgId
|
||||
);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Read, OrgPermissionSubjects.Settings);
|
||||
|
||||
const logStreams = await auditLogStreamDAL.find({ orgId: actorOrgId });
|
||||
return logStreams;
|
||||
};
|
||||
|
||||
return {
|
||||
create,
|
||||
updateById,
|
||||
deleteById,
|
||||
getById,
|
||||
list
|
||||
};
|
||||
};
|
@ -0,0 +1,27 @@
|
||||
import { TOrgPermission } from "@app/lib/types";
|
||||
|
||||
export type LogStreamHeaders = {
|
||||
key: string;
|
||||
value: string;
|
||||
};
|
||||
|
||||
export type TCreateAuditLogStreamDTO = Omit<TOrgPermission, "orgId"> & {
|
||||
url: string;
|
||||
headers?: LogStreamHeaders[];
|
||||
};
|
||||
|
||||
export type TUpdateAuditLogStreamDTO = Omit<TOrgPermission, "orgId"> & {
|
||||
id: string;
|
||||
url?: string;
|
||||
headers?: LogStreamHeaders[];
|
||||
};
|
||||
|
||||
export type TDeleteAuditLogStreamDTO = Omit<TOrgPermission, "orgId"> & {
|
||||
id: string;
|
||||
};
|
||||
|
||||
export type TListAuditLogStreamDTO = Omit<TOrgPermission, "orgId">;
|
||||
|
||||
export type TGetDetailsAuditLogStreamDTO = Omit<TOrgPermission, "orgId"> & {
|
||||
id: string;
|
||||
};
|
@ -1,13 +1,21 @@
|
||||
import { RawAxiosRequestHeaders } from "axios";
|
||||
|
||||
import { SecretKeyEncoding } from "@app/db/schemas";
|
||||
import { request } from "@app/lib/config/request";
|
||||
import { infisicalSymmetricDecrypt } from "@app/lib/crypto/encryption";
|
||||
import { logger } from "@app/lib/logger";
|
||||
import { QueueJobs, QueueName, TQueueServiceFactory } from "@app/queue";
|
||||
import { TProjectDALFactory } from "@app/services/project/project-dal";
|
||||
|
||||
import { TAuditLogStreamDALFactory } from "../audit-log-stream/audit-log-stream-dal";
|
||||
import { LogStreamHeaders } from "../audit-log-stream/audit-log-stream-types";
|
||||
import { TLicenseServiceFactory } from "../license/license-service";
|
||||
import { TAuditLogDALFactory } from "./audit-log-dal";
|
||||
import { TCreateAuditLogDTO } from "./audit-log-types";
|
||||
|
||||
type TAuditLogQueueServiceFactoryDep = {
|
||||
auditLogDAL: TAuditLogDALFactory;
|
||||
auditLogStreamDAL: Pick<TAuditLogStreamDALFactory, "find">;
|
||||
queueService: TQueueServiceFactory;
|
||||
projectDAL: Pick<TProjectDALFactory, "findById">;
|
||||
licenseService: Pick<TLicenseServiceFactory, "getPlan">;
|
||||
@ -15,11 +23,15 @@ type TAuditLogQueueServiceFactoryDep = {
|
||||
|
||||
export type TAuditLogQueueServiceFactory = ReturnType<typeof auditLogQueueServiceFactory>;
|
||||
|
||||
// keep this timeout 5s it must be fast because else the queue will take time to finish
|
||||
// audit log is a crowded queue thus needs to be fast
|
||||
export const AUDIT_LOG_STREAM_TIMEOUT = 5 * 1000;
|
||||
export const auditLogQueueServiceFactory = ({
|
||||
auditLogDAL,
|
||||
queueService,
|
||||
projectDAL,
|
||||
licenseService
|
||||
licenseService,
|
||||
auditLogStreamDAL
|
||||
}: TAuditLogQueueServiceFactoryDep) => {
|
||||
const pushToLog = async (data: TCreateAuditLogDTO) => {
|
||||
await queueService.queue(QueueName.AuditLog, QueueJobs.AuditLog, data, {
|
||||
@ -47,7 +59,7 @@ export const auditLogQueueServiceFactory = ({
|
||||
// skip inserting if audit log retention is 0 meaning its not supported
|
||||
if (ttl === 0) return;
|
||||
|
||||
await auditLogDAL.create({
|
||||
const auditLog = await auditLogDAL.create({
|
||||
actor: actor.type,
|
||||
actorMetadata: actor.metadata,
|
||||
userAgent,
|
||||
@ -59,6 +71,46 @@ export const auditLogQueueServiceFactory = ({
|
||||
eventMetadata: event.metadata,
|
||||
userAgentType
|
||||
});
|
||||
|
||||
const logStreams = orgId ? await auditLogStreamDAL.find({ orgId }) : [];
|
||||
await Promise.allSettled(
|
||||
logStreams.map(
|
||||
async ({
|
||||
url,
|
||||
encryptedHeadersTag,
|
||||
encryptedHeadersIV,
|
||||
encryptedHeadersKeyEncoding,
|
||||
encryptedHeadersCiphertext
|
||||
}) => {
|
||||
const streamHeaders =
|
||||
encryptedHeadersIV && encryptedHeadersCiphertext && encryptedHeadersTag
|
||||
? (JSON.parse(
|
||||
infisicalSymmetricDecrypt({
|
||||
keyEncoding: encryptedHeadersKeyEncoding as SecretKeyEncoding,
|
||||
iv: encryptedHeadersIV,
|
||||
tag: encryptedHeadersTag,
|
||||
ciphertext: encryptedHeadersCiphertext
|
||||
})
|
||||
) as LogStreamHeaders[])
|
||||
: [];
|
||||
|
||||
const headers: RawAxiosRequestHeaders = { "Content-Type": "application/json" };
|
||||
|
||||
if (streamHeaders.length)
|
||||
streamHeaders.forEach(({ key, value }) => {
|
||||
headers[key] = value;
|
||||
});
|
||||
|
||||
return request.post(url, auditLog, {
|
||||
headers,
|
||||
// request timeout
|
||||
timeout: AUDIT_LOG_STREAM_TIMEOUT,
|
||||
// connection timeout
|
||||
signal: AbortSignal.timeout(AUDIT_LOG_STREAM_TIMEOUT)
|
||||
});
|
||||
}
|
||||
)
|
||||
);
|
||||
});
|
||||
|
||||
queueService.start(QueueName.AuditLogPrune, async () => {
|
||||
|
194
backend/src/ee/services/dynamic-secret/providers/aws-iam.ts
Normal file
@ -0,0 +1,194 @@
|
||||
import {
|
||||
AddUserToGroupCommand,
|
||||
AttachUserPolicyCommand,
|
||||
CreateAccessKeyCommand,
|
||||
CreateUserCommand,
|
||||
DeleteAccessKeyCommand,
|
||||
DeleteUserCommand,
|
||||
DeleteUserPolicyCommand,
|
||||
DetachUserPolicyCommand,
|
||||
GetUserCommand,
|
||||
IAMClient,
|
||||
ListAccessKeysCommand,
|
||||
ListAttachedUserPoliciesCommand,
|
||||
ListGroupsForUserCommand,
|
||||
ListUserPoliciesCommand,
|
||||
PutUserPolicyCommand,
|
||||
RemoveUserFromGroupCommand
|
||||
} from "@aws-sdk/client-iam";
|
||||
import { z } from "zod";
|
||||
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||
|
||||
import { DynamicSecretAwsIamSchema, TDynamicProviderFns } from "./models";
|
||||
|
||||
const generateUsername = () => {
|
||||
return alphaNumericNanoId(32);
|
||||
};
|
||||
|
||||
export const AwsIamProvider = (): TDynamicProviderFns => {
|
||||
const validateProviderInputs = async (inputs: unknown) => {
|
||||
const providerInputs = await DynamicSecretAwsIamSchema.parseAsync(inputs);
|
||||
return providerInputs;
|
||||
};
|
||||
|
||||
const getClient = async (providerInputs: z.infer<typeof DynamicSecretAwsIamSchema>) => {
|
||||
const client = new IAMClient({
|
||||
region: providerInputs.region,
|
||||
credentials: {
|
||||
accessKeyId: providerInputs.accessKey,
|
||||
secretAccessKey: providerInputs.secretAccessKey
|
||||
}
|
||||
});
|
||||
|
||||
return client;
|
||||
};
|
||||
|
||||
const validateConnection = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const client = await getClient(providerInputs);
|
||||
|
||||
const isConnected = await client.send(new GetUserCommand({})).then(() => true);
|
||||
return isConnected;
|
||||
};
|
||||
|
||||
const create = async (inputs: unknown) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const client = await getClient(providerInputs);
|
||||
|
||||
const username = generateUsername();
|
||||
const { policyArns, userGroups, policyDocument, awsPath, permissionBoundaryPolicyArn } = providerInputs;
|
||||
const createUserRes = await client.send(
|
||||
new CreateUserCommand({
|
||||
Path: awsPath,
|
||||
PermissionsBoundary: permissionBoundaryPolicyArn || undefined,
|
||||
Tags: [{ Key: "createdBy", Value: "infisical-dynamic-secret" }],
|
||||
UserName: username
|
||||
})
|
||||
);
|
||||
if (!createUserRes.User) throw new BadRequestError({ message: "Failed to create AWS IAM User" });
|
||||
if (userGroups) {
|
||||
await Promise.all(
|
||||
userGroups
|
||||
.split(",")
|
||||
.filter(Boolean)
|
||||
.map((group) =>
|
||||
client.send(new AddUserToGroupCommand({ UserName: createUserRes?.User?.UserName, GroupName: group }))
|
||||
)
|
||||
);
|
||||
}
|
||||
if (policyArns) {
|
||||
await Promise.all(
|
||||
policyArns
|
||||
.split(",")
|
||||
.filter(Boolean)
|
||||
.map((policyArn) =>
|
||||
client.send(new AttachUserPolicyCommand({ UserName: createUserRes?.User?.UserName, PolicyArn: policyArn }))
|
||||
)
|
||||
);
|
||||
}
|
||||
if (policyDocument) {
|
||||
await client.send(
|
||||
new PutUserPolicyCommand({
|
||||
UserName: createUserRes.User.UserName,
|
||||
PolicyName: `infisical-dynamic-policy-${alphaNumericNanoId(4)}`,
|
||||
PolicyDocument: policyDocument
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
const createAccessKeyRes = await client.send(
|
||||
new CreateAccessKeyCommand({
|
||||
UserName: createUserRes.User.UserName
|
||||
})
|
||||
);
|
||||
if (!createAccessKeyRes.AccessKey)
|
||||
throw new BadRequestError({ message: "Failed to create AWS IAM User access key" });
|
||||
|
||||
return {
|
||||
entityId: username,
|
||||
data: {
|
||||
ACCESS_KEY: createAccessKeyRes.AccessKey.AccessKeyId,
|
||||
SECRET_ACCESS_KEY: createAccessKeyRes.AccessKey.SecretAccessKey,
|
||||
USERNAME: username
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
const revoke = async (inputs: unknown, entityId: string) => {
|
||||
const providerInputs = await validateProviderInputs(inputs);
|
||||
const client = await getClient(providerInputs);
|
||||
|
||||
const username = entityId;
|
||||
|
||||
// remove user from groups
|
||||
const userGroups = await client.send(new ListGroupsForUserCommand({ UserName: username }));
|
||||
await Promise.all(
|
||||
(userGroups.Groups || []).map(({ GroupName }) =>
|
||||
client.send(
|
||||
new RemoveUserFromGroupCommand({
|
||||
GroupName,
|
||||
UserName: username
|
||||
})
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
// remove user access keys
|
||||
const userAccessKeys = await client.send(new ListAccessKeysCommand({ UserName: username }));
|
||||
await Promise.all(
|
||||
(userAccessKeys.AccessKeyMetadata || []).map(({ AccessKeyId }) =>
|
||||
client.send(
|
||||
new DeleteAccessKeyCommand({
|
||||
AccessKeyId,
|
||||
UserName: username
|
||||
})
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
// remove user inline policies
|
||||
const userInlinePolicies = await client.send(new ListUserPoliciesCommand({ UserName: username }));
|
||||
await Promise.all(
|
||||
(userInlinePolicies.PolicyNames || []).map((policyName) =>
|
||||
client.send(
|
||||
new DeleteUserPolicyCommand({
|
||||
PolicyName: policyName,
|
||||
UserName: username
|
||||
})
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
// remove user attached policies
|
||||
const userAttachedPolicies = await client.send(new ListAttachedUserPoliciesCommand({ UserName: username }));
|
||||
await Promise.all(
|
||||
(userAttachedPolicies.AttachedPolicies || []).map((policy) =>
|
||||
client.send(
|
||||
new DetachUserPolicyCommand({
|
||||
PolicyArn: policy.PolicyArn,
|
||||
UserName: username
|
||||
})
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
await client.send(new DeleteUserCommand({ UserName: username }));
|
||||
return { entityId: username };
|
||||
};
|
||||
|
||||
const renew = async (_inputs: unknown, entityId: string) => {
|
||||
// do nothing
|
||||
const username = entityId;
|
||||
return { entityId: username };
|
||||
};
|
||||
|
||||
return {
|
||||
validateProviderInputs,
|
||||
validateConnection,
|
||||
create,
|
||||
revoke,
|
||||
renew
|
||||
};
|
||||
};
|
@ -1,8 +1,10 @@
|
||||
import { AwsIamProvider } from "./aws-iam";
|
||||
import { CassandraProvider } from "./cassandra";
|
||||
import { DynamicSecretProviders } from "./models";
|
||||
import { SqlDatabaseProvider } from "./sql-database";
|
||||
|
||||
export const buildDynamicSecretProviders = () => ({
|
||||
[DynamicSecretProviders.SqlDatabase]: SqlDatabaseProvider(),
|
||||
[DynamicSecretProviders.Cassandra]: CassandraProvider()
|
||||
[DynamicSecretProviders.Cassandra]: CassandraProvider(),
|
||||
[DynamicSecretProviders.AwsIam]: AwsIamProvider()
|
||||
});
|
||||
|
@ -8,38 +8,51 @@ export enum SqlProviders {
|
||||
|
||||
export const DynamicSecretSqlDBSchema = z.object({
|
||||
client: z.nativeEnum(SqlProviders),
|
||||
host: z.string().toLowerCase(),
|
||||
host: z.string().trim().toLowerCase(),
|
||||
port: z.number(),
|
||||
database: z.string(),
|
||||
username: z.string(),
|
||||
password: z.string(),
|
||||
creationStatement: z.string(),
|
||||
revocationStatement: z.string(),
|
||||
renewStatement: z.string().optional(),
|
||||
database: z.string().trim(),
|
||||
username: z.string().trim(),
|
||||
password: z.string().trim(),
|
||||
creationStatement: z.string().trim(),
|
||||
revocationStatement: z.string().trim(),
|
||||
renewStatement: z.string().trim().optional(),
|
||||
ca: z.string().optional()
|
||||
});
|
||||
|
||||
export const DynamicSecretCassandraSchema = z.object({
|
||||
host: z.string().toLowerCase(),
|
||||
host: z.string().trim().toLowerCase(),
|
||||
port: z.number(),
|
||||
localDataCenter: z.string().min(1),
|
||||
keyspace: z.string().optional(),
|
||||
username: z.string(),
|
||||
password: z.string(),
|
||||
creationStatement: z.string(),
|
||||
revocationStatement: z.string(),
|
||||
renewStatement: z.string().optional(),
|
||||
localDataCenter: z.string().trim().min(1),
|
||||
keyspace: z.string().trim().optional(),
|
||||
username: z.string().trim(),
|
||||
password: z.string().trim(),
|
||||
creationStatement: z.string().trim(),
|
||||
revocationStatement: z.string().trim(),
|
||||
renewStatement: z.string().trim().optional(),
|
||||
ca: z.string().optional()
|
||||
});
|
||||
|
||||
export const DynamicSecretAwsIamSchema = z.object({
|
||||
accessKey: z.string().trim().min(1),
|
||||
secretAccessKey: z.string().trim().min(1),
|
||||
region: z.string().trim().min(1),
|
||||
awsPath: z.string().trim().optional(),
|
||||
permissionBoundaryPolicyArn: z.string().trim().optional(),
|
||||
policyDocument: z.string().trim().optional(),
|
||||
userGroups: z.string().trim().optional(),
|
||||
policyArns: z.string().trim().optional()
|
||||
});
|
||||
|
||||
export enum DynamicSecretProviders {
|
||||
SqlDatabase = "sql-database",
|
||||
Cassandra = "cassandra"
|
||||
Cassandra = "cassandra",
|
||||
AwsIam = "aws-iam"
|
||||
}
|
||||
|
||||
export const DynamicSecretProviderSchema = z.discriminatedUnion("type", [
|
||||
z.object({ type: z.literal(DynamicSecretProviders.SqlDatabase), inputs: DynamicSecretSqlDBSchema }),
|
||||
z.object({ type: z.literal(DynamicSecretProviders.Cassandra), inputs: DynamicSecretCassandraSchema })
|
||||
z.object({ type: z.literal(DynamicSecretProviders.Cassandra), inputs: DynamicSecretCassandraSchema }),
|
||||
z.object({ type: z.literal(DynamicSecretProviders.AwsIam), inputs: DynamicSecretAwsIamSchema })
|
||||
]);
|
||||
|
||||
export type TDynamicProviderFns = {
|
||||
|
@ -22,10 +22,6 @@ const addAcceptedUsersToGroup = async ({
|
||||
projectBotDAL,
|
||||
tx
|
||||
}: TAddUsersToGroup) => {
|
||||
console.log("addAcceptedUsersToGroup args: ", {
|
||||
userIds,
|
||||
group
|
||||
});
|
||||
const users = await userDAL.findUserEncKeyByUserIdsBatch(
|
||||
{
|
||||
userIds
|
||||
|
@ -2,6 +2,9 @@ import { ForbiddenError } from "@casl/ability";
|
||||
import jwt from "jsonwebtoken";
|
||||
|
||||
import { OrgMembershipRole, OrgMembershipStatus, SecretKeyEncoding, TLdapConfigsUpdate } from "@app/db/schemas";
|
||||
import { TGroupDALFactory } from "@app/ee/services/group/group-dal";
|
||||
import { addUsersToGroupByUserIds, removeUsersFromGroupByUserIds } from "@app/ee/services/group/group-fns";
|
||||
import { TUserGroupMembershipDALFactory } from "@app/ee/services/group/user-group-membership-dal";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import {
|
||||
decryptSymmetric,
|
||||
@ -13,8 +16,12 @@ import {
|
||||
} from "@app/lib/crypto/encryption";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { AuthMethod, AuthTokenType } from "@app/services/auth/auth-type";
|
||||
import { TGroupProjectDALFactory } from "@app/services/group-project/group-project-dal";
|
||||
import { TOrgBotDALFactory } from "@app/services/org/org-bot-dal";
|
||||
import { TOrgDALFactory } from "@app/services/org/org-dal";
|
||||
import { TProjectDALFactory } from "@app/services/project/project-dal";
|
||||
import { TProjectBotDALFactory } from "@app/services/project-bot/project-bot-dal";
|
||||
import { TProjectKeyDALFactory } from "@app/services/project-key/project-key-dal";
|
||||
import { TUserDALFactory } from "@app/services/user/user-dal";
|
||||
import { normalizeUsername } from "@app/services/user/user-fns";
|
||||
import { TUserAliasDALFactory } from "@app/services/user-alias/user-alias-dal";
|
||||
@ -23,16 +30,40 @@ import { TLicenseServiceFactory } from "../license/license-service";
|
||||
import { OrgPermissionActions, OrgPermissionSubjects } from "../permission/org-permission";
|
||||
import { TPermissionServiceFactory } from "../permission/permission-service";
|
||||
import { TLdapConfigDALFactory } from "./ldap-config-dal";
|
||||
import { TCreateLdapCfgDTO, TGetLdapCfgDTO, TLdapLoginDTO, TUpdateLdapCfgDTO } from "./ldap-config-types";
|
||||
import {
|
||||
TCreateLdapCfgDTO,
|
||||
TCreateLdapGroupMapDTO,
|
||||
TDeleteLdapGroupMapDTO,
|
||||
TGetLdapCfgDTO,
|
||||
TGetLdapGroupMapsDTO,
|
||||
TLdapLoginDTO,
|
||||
TTestLdapConnectionDTO,
|
||||
TUpdateLdapCfgDTO
|
||||
} from "./ldap-config-types";
|
||||
import { testLDAPConfig } from "./ldap-fns";
|
||||
import { TLdapGroupMapDALFactory } from "./ldap-group-map-dal";
|
||||
|
||||
type TLdapConfigServiceFactoryDep = {
|
||||
ldapConfigDAL: TLdapConfigDALFactory;
|
||||
ldapConfigDAL: Pick<TLdapConfigDALFactory, "create" | "update" | "findOne">;
|
||||
ldapGroupMapDAL: Pick<TLdapGroupMapDALFactory, "find" | "create" | "delete" | "findLdapGroupMapsByLdapConfigId">;
|
||||
orgDAL: Pick<
|
||||
TOrgDALFactory,
|
||||
"createMembership" | "updateMembershipById" | "findMembership" | "findOrgById" | "findOne" | "updateById"
|
||||
>;
|
||||
orgBotDAL: Pick<TOrgBotDALFactory, "findOne" | "create" | "transaction">;
|
||||
userDAL: Pick<TUserDALFactory, "create" | "findOne" | "transaction" | "updateById">;
|
||||
groupDAL: Pick<TGroupDALFactory, "find" | "findOne">;
|
||||
groupProjectDAL: Pick<TGroupProjectDALFactory, "find">;
|
||||
projectKeyDAL: Pick<TProjectKeyDALFactory, "find" | "findLatestProjectKey" | "insertMany" | "delete">;
|
||||
projectDAL: Pick<TProjectDALFactory, "findProjectGhostUser">;
|
||||
projectBotDAL: Pick<TProjectBotDALFactory, "findOne">;
|
||||
userGroupMembershipDAL: Pick<
|
||||
TUserGroupMembershipDALFactory,
|
||||
"find" | "transaction" | "insertMany" | "filterProjectsByUserMembership" | "delete"
|
||||
>;
|
||||
userDAL: Pick<
|
||||
TUserDALFactory,
|
||||
"create" | "findOne" | "transaction" | "updateById" | "findUserEncKeyByUserIdsBatch" | "find"
|
||||
>;
|
||||
userAliasDAL: Pick<TUserAliasDALFactory, "create" | "findOne">;
|
||||
permissionService: Pick<TPermissionServiceFactory, "getOrgPermission">;
|
||||
licenseService: Pick<TLicenseServiceFactory, "getPlan">;
|
||||
@ -42,8 +73,15 @@ export type TLdapConfigServiceFactory = ReturnType<typeof ldapConfigServiceFacto
|
||||
|
||||
export const ldapConfigServiceFactory = ({
|
||||
ldapConfigDAL,
|
||||
ldapGroupMapDAL,
|
||||
orgDAL,
|
||||
orgBotDAL,
|
||||
groupDAL,
|
||||
groupProjectDAL,
|
||||
projectKeyDAL,
|
||||
projectDAL,
|
||||
projectBotDAL,
|
||||
userGroupMembershipDAL,
|
||||
userDAL,
|
||||
userAliasDAL,
|
||||
permissionService,
|
||||
@ -60,6 +98,9 @@ export const ldapConfigServiceFactory = ({
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
searchFilter,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
caCert
|
||||
}: TCreateLdapCfgDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
@ -135,6 +176,9 @@ export const ldapConfigServiceFactory = ({
|
||||
bindPassIV,
|
||||
bindPassTag,
|
||||
searchBase,
|
||||
searchFilter,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
encryptedCACert,
|
||||
caCertIV,
|
||||
caCertTag
|
||||
@ -154,6 +198,9 @@ export const ldapConfigServiceFactory = ({
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
searchFilter,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
caCert
|
||||
}: TUpdateLdapCfgDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
@ -169,7 +216,10 @@ export const ldapConfigServiceFactory = ({
|
||||
const updateQuery: TLdapConfigsUpdate = {
|
||||
isActive,
|
||||
url,
|
||||
searchBase
|
||||
searchBase,
|
||||
searchFilter,
|
||||
groupSearchBase,
|
||||
groupSearchFilter
|
||||
};
|
||||
|
||||
const orgBot = await orgBotDAL.findOne({ orgId });
|
||||
@ -271,6 +321,9 @@ export const ldapConfigServiceFactory = ({
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase: ldapConfig.searchBase,
|
||||
searchFilter: ldapConfig.searchFilter,
|
||||
groupSearchBase: ldapConfig.groupSearchBase,
|
||||
groupSearchFilter: ldapConfig.groupSearchFilter,
|
||||
caCert
|
||||
};
|
||||
};
|
||||
@ -304,8 +357,8 @@ export const ldapConfigServiceFactory = ({
|
||||
bindDN: ldapConfig.bindDN,
|
||||
bindCredentials: ldapConfig.bindPass,
|
||||
searchBase: ldapConfig.searchBase,
|
||||
searchFilter: "(uid={{username}})",
|
||||
searchAttributes: ["uid", "uidNumber", "givenName", "sn", "mail"],
|
||||
searchFilter: ldapConfig.searchFilter || "(uid={{username}})",
|
||||
// searchAttributes: ["uid", "uidNumber", "givenName", "sn", "mail"],
|
||||
...(ldapConfig.caCert !== ""
|
||||
? {
|
||||
tlsOptions: {
|
||||
@ -320,7 +373,17 @@ export const ldapConfigServiceFactory = ({
|
||||
return { opts, ldapConfig };
|
||||
};
|
||||
|
||||
const ldapLogin = async ({ externalId, username, firstName, lastName, emails, orgId, relayState }: TLdapLoginDTO) => {
|
||||
const ldapLogin = async ({
|
||||
ldapConfigId,
|
||||
externalId,
|
||||
username,
|
||||
firstName,
|
||||
lastName,
|
||||
emails,
|
||||
groups,
|
||||
orgId,
|
||||
relayState
|
||||
}: TLdapLoginDTO) => {
|
||||
const appCfg = getConfig();
|
||||
let userAlias = await userAliasDAL.findOne({
|
||||
externalId,
|
||||
@ -394,7 +457,84 @@ export const ldapConfigServiceFactory = ({
|
||||
});
|
||||
}
|
||||
|
||||
const user = await userDAL.findOne({ id: userAlias.userId });
|
||||
const user = await userDAL.transaction(async (tx) => {
|
||||
const newUser = await userDAL.findOne({ id: userAlias.userId }, tx);
|
||||
if (groups) {
|
||||
const ldapGroupIdsToBePartOf = (
|
||||
await ldapGroupMapDAL.find({
|
||||
ldapConfigId,
|
||||
$in: {
|
||||
ldapGroupCN: groups.map((group) => group.cn)
|
||||
}
|
||||
})
|
||||
).map((groupMap) => groupMap.groupId);
|
||||
|
||||
const groupsToBePartOf = await groupDAL.find({
|
||||
orgId,
|
||||
$in: {
|
||||
id: ldapGroupIdsToBePartOf
|
||||
}
|
||||
});
|
||||
const toBePartOfGroupIdsSet = new Set(groupsToBePartOf.map((groupToBePartOf) => groupToBePartOf.id));
|
||||
|
||||
const allLdapGroupMaps = await ldapGroupMapDAL.find({
|
||||
ldapConfigId
|
||||
});
|
||||
|
||||
const ldapGroupIdsCurrentlyPartOf = (
|
||||
await userGroupMembershipDAL.find({
|
||||
userId: newUser.id,
|
||||
$in: {
|
||||
groupId: allLdapGroupMaps.map((groupMap) => groupMap.groupId)
|
||||
}
|
||||
})
|
||||
).map((userGroupMembership) => userGroupMembership.groupId);
|
||||
|
||||
const userGroupMembershipGroupIdsSet = new Set(ldapGroupIdsCurrentlyPartOf);
|
||||
|
||||
for await (const group of groupsToBePartOf) {
|
||||
if (!userGroupMembershipGroupIdsSet.has(group.id)) {
|
||||
// add user to group that they should be part of
|
||||
await addUsersToGroupByUserIds({
|
||||
group,
|
||||
userIds: [newUser.id],
|
||||
userDAL,
|
||||
userGroupMembershipDAL,
|
||||
orgDAL,
|
||||
groupProjectDAL,
|
||||
projectKeyDAL,
|
||||
projectDAL,
|
||||
projectBotDAL,
|
||||
tx
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const groupsCurrentlyPartOf = await groupDAL.find({
|
||||
orgId,
|
||||
$in: {
|
||||
id: ldapGroupIdsCurrentlyPartOf
|
||||
}
|
||||
});
|
||||
|
||||
for await (const group of groupsCurrentlyPartOf) {
|
||||
if (!toBePartOfGroupIdsSet.has(group.id)) {
|
||||
// remove user from group that they should no longer be part of
|
||||
await removeUsersFromGroupByUserIds({
|
||||
group,
|
||||
userIds: [newUser.id],
|
||||
userDAL,
|
||||
userGroupMembershipDAL,
|
||||
groupProjectDAL,
|
||||
projectKeyDAL,
|
||||
tx
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newUser;
|
||||
});
|
||||
|
||||
const isUserCompleted = Boolean(user.isAccepted);
|
||||
|
||||
@ -424,6 +564,116 @@ export const ldapConfigServiceFactory = ({
|
||||
return { isUserCompleted, providerAuthToken };
|
||||
};
|
||||
|
||||
const getLdapGroupMaps = async ({
|
||||
ldapConfigId,
|
||||
actor,
|
||||
actorId,
|
||||
orgId,
|
||||
actorAuthMethod,
|
||||
actorOrgId
|
||||
}: TGetLdapGroupMapsDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Read, OrgPermissionSubjects.Ldap);
|
||||
|
||||
const ldapConfig = await ldapConfigDAL.findOne({
|
||||
id: ldapConfigId,
|
||||
orgId
|
||||
});
|
||||
|
||||
if (!ldapConfig) throw new BadRequestError({ message: "Failed to find organization LDAP data" });
|
||||
|
||||
const groupMaps = await ldapGroupMapDAL.findLdapGroupMapsByLdapConfigId(ldapConfigId);
|
||||
|
||||
return groupMaps;
|
||||
};
|
||||
|
||||
const createLdapGroupMap = async ({
|
||||
ldapConfigId,
|
||||
ldapGroupCN,
|
||||
groupSlug,
|
||||
actor,
|
||||
actorId,
|
||||
orgId,
|
||||
actorAuthMethod,
|
||||
actorOrgId
|
||||
}: TCreateLdapGroupMapDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Create, OrgPermissionSubjects.Ldap);
|
||||
|
||||
const plan = await licenseService.getPlan(orgId);
|
||||
if (!plan.ldap)
|
||||
throw new BadRequestError({
|
||||
message: "Failed to create LDAP group map due to plan restriction. Upgrade plan to create LDAP group map."
|
||||
});
|
||||
|
||||
const ldapConfig = await ldapConfigDAL.findOne({
|
||||
id: ldapConfigId,
|
||||
orgId
|
||||
});
|
||||
if (!ldapConfig) throw new BadRequestError({ message: "Failed to find organization LDAP data" });
|
||||
|
||||
const group = await groupDAL.findOne({ slug: groupSlug, orgId });
|
||||
if (!group) throw new BadRequestError({ message: "Failed to find group" });
|
||||
|
||||
const groupMap = await ldapGroupMapDAL.create({
|
||||
ldapConfigId,
|
||||
ldapGroupCN,
|
||||
groupId: group.id
|
||||
});
|
||||
|
||||
return groupMap;
|
||||
};
|
||||
|
||||
const deleteLdapGroupMap = async ({
|
||||
ldapConfigId,
|
||||
ldapGroupMapId,
|
||||
actor,
|
||||
actorId,
|
||||
orgId,
|
||||
actorAuthMethod,
|
||||
actorOrgId
|
||||
}: TDeleteLdapGroupMapDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Delete, OrgPermissionSubjects.Ldap);
|
||||
|
||||
const plan = await licenseService.getPlan(orgId);
|
||||
if (!plan.ldap)
|
||||
throw new BadRequestError({
|
||||
message: "Failed to delete LDAP group map due to plan restriction. Upgrade plan to delete LDAP group map."
|
||||
});
|
||||
|
||||
const ldapConfig = await ldapConfigDAL.findOne({
|
||||
id: ldapConfigId,
|
||||
orgId
|
||||
});
|
||||
|
||||
if (!ldapConfig) throw new BadRequestError({ message: "Failed to find organization LDAP data" });
|
||||
|
||||
const [deletedGroupMap] = await ldapGroupMapDAL.delete({
|
||||
ldapConfigId: ldapConfig.id,
|
||||
id: ldapGroupMapId
|
||||
});
|
||||
|
||||
return deletedGroupMap;
|
||||
};
|
||||
|
||||
const testLDAPConnection = async ({ actor, actorId, orgId, actorAuthMethod, actorOrgId }: TTestLdapConnectionDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Create, OrgPermissionSubjects.Ldap);
|
||||
|
||||
const plan = await licenseService.getPlan(orgId);
|
||||
if (!plan.ldap)
|
||||
throw new BadRequestError({
|
||||
message: "Failed to test LDAP connection due to plan restriction. Upgrade plan to test the LDAP connection."
|
||||
});
|
||||
|
||||
const ldapConfig = await getLdapCfg({
|
||||
orgId
|
||||
});
|
||||
|
||||
return testLDAPConfig(ldapConfig);
|
||||
};
|
||||
|
||||
return {
|
||||
createLdapCfg,
|
||||
updateLdapCfg,
|
||||
@ -431,6 +681,10 @@ export const ldapConfigServiceFactory = ({
|
||||
getLdapCfg,
|
||||
// getLdapPassportOpts,
|
||||
ldapLogin,
|
||||
bootLdap
|
||||
bootLdap,
|
||||
getLdapGroupMaps,
|
||||
createLdapGroupMap,
|
||||
deleteLdapGroupMap,
|
||||
testLDAPConnection
|
||||
};
|
||||
};
|
||||
|
@ -1,5 +1,18 @@
|
||||
import { TOrgPermission } from "@app/lib/types";
|
||||
|
||||
export type TLDAPConfig = {
|
||||
id: string;
|
||||
organization: string;
|
||||
isActive: boolean;
|
||||
url: string;
|
||||
bindDN: string;
|
||||
bindPass: string;
|
||||
searchBase: string;
|
||||
groupSearchBase: string;
|
||||
groupSearchFilter: string;
|
||||
caCert: string;
|
||||
};
|
||||
|
||||
export type TCreateLdapCfgDTO = {
|
||||
orgId: string;
|
||||
isActive: boolean;
|
||||
@ -7,6 +20,9 @@ export type TCreateLdapCfgDTO = {
|
||||
bindDN: string;
|
||||
bindPass: string;
|
||||
searchBase: string;
|
||||
searchFilter: string;
|
||||
groupSearchBase: string;
|
||||
groupSearchFilter: string;
|
||||
caCert: string;
|
||||
} & TOrgPermission;
|
||||
|
||||
@ -18,6 +34,9 @@ export type TUpdateLdapCfgDTO = {
|
||||
bindDN: string;
|
||||
bindPass: string;
|
||||
searchBase: string;
|
||||
searchFilter: string;
|
||||
groupSearchBase: string;
|
||||
groupSearchFilter: string;
|
||||
caCert: string;
|
||||
}> &
|
||||
TOrgPermission;
|
||||
@ -27,11 +46,35 @@ export type TGetLdapCfgDTO = {
|
||||
} & TOrgPermission;
|
||||
|
||||
export type TLdapLoginDTO = {
|
||||
ldapConfigId: string;
|
||||
externalId: string;
|
||||
username: string;
|
||||
firstName: string;
|
||||
lastName: string;
|
||||
emails: string[];
|
||||
orgId: string;
|
||||
groups?: {
|
||||
dn: string;
|
||||
cn: string;
|
||||
}[];
|
||||
relayState?: string;
|
||||
};
|
||||
|
||||
export type TGetLdapGroupMapsDTO = {
|
||||
ldapConfigId: string;
|
||||
} & TOrgPermission;
|
||||
|
||||
export type TCreateLdapGroupMapDTO = {
|
||||
ldapConfigId: string;
|
||||
ldapGroupCN: string;
|
||||
groupSlug: string;
|
||||
} & TOrgPermission;
|
||||
|
||||
export type TDeleteLdapGroupMapDTO = {
|
||||
ldapConfigId: string;
|
||||
ldapGroupMapId: string;
|
||||
} & TOrgPermission;
|
||||
|
||||
export type TTestLdapConnectionDTO = {
|
||||
ldapConfigId: string;
|
||||
} & TOrgPermission;
|
||||
|
119
backend/src/ee/services/ldap-config/ldap-fns.ts
Normal file
@ -0,0 +1,119 @@
|
||||
import ldapjs from "ldapjs";
|
||||
|
||||
import { logger } from "@app/lib/logger";
|
||||
|
||||
import { TLDAPConfig } from "./ldap-config-types";
|
||||
|
||||
export const isValidLdapFilter = (filter: string) => {
|
||||
try {
|
||||
ldapjs.parseFilter(filter);
|
||||
return true;
|
||||
} catch (error) {
|
||||
logger.error("Invalid LDAP filter");
|
||||
logger.error(error);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Test the LDAP configuration by attempting to bind to the LDAP server
|
||||
* @param ldapConfig - The LDAP configuration to test
|
||||
* @returns {Boolean} isConnected - Whether or not the connection was successful
|
||||
*/
|
||||
export const testLDAPConfig = async (ldapConfig: TLDAPConfig): Promise<boolean> => {
|
||||
return new Promise((resolve) => {
|
||||
const ldapClient = ldapjs.createClient({
|
||||
url: ldapConfig.url,
|
||||
bindDN: ldapConfig.bindDN,
|
||||
bindCredentials: ldapConfig.bindPass,
|
||||
...(ldapConfig.caCert !== ""
|
||||
? {
|
||||
tlsOptions: {
|
||||
ca: [ldapConfig.caCert]
|
||||
}
|
||||
}
|
||||
: {})
|
||||
});
|
||||
|
||||
ldapClient.on("error", (err) => {
|
||||
logger.error("LDAP client error:", err);
|
||||
logger.error(err);
|
||||
resolve(false);
|
||||
});
|
||||
|
||||
ldapClient.bind(ldapConfig.bindDN, ldapConfig.bindPass, (err) => {
|
||||
if (err) {
|
||||
logger.error("Error binding to LDAP");
|
||||
logger.error(err);
|
||||
ldapClient.unbind();
|
||||
resolve(false);
|
||||
} else {
|
||||
logger.info("Successfully connected and bound to LDAP.");
|
||||
ldapClient.unbind();
|
||||
resolve(true);
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Search for groups in the LDAP server
|
||||
* @param ldapConfig - The LDAP configuration to use
|
||||
* @param filter - The filter to use when searching for groups
|
||||
* @param base - The base to search from
|
||||
* @returns
|
||||
*/
|
||||
export const searchGroups = async (
|
||||
ldapConfig: TLDAPConfig,
|
||||
filter: string,
|
||||
base: string
|
||||
): Promise<{ dn: string; cn: string }[]> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const ldapClient = ldapjs.createClient({
|
||||
url: ldapConfig.url,
|
||||
bindDN: ldapConfig.bindDN,
|
||||
bindCredentials: ldapConfig.bindPass,
|
||||
...(ldapConfig.caCert !== ""
|
||||
? {
|
||||
tlsOptions: {
|
||||
ca: [ldapConfig.caCert]
|
||||
}
|
||||
}
|
||||
: {})
|
||||
});
|
||||
|
||||
ldapClient.search(
|
||||
base,
|
||||
{
|
||||
filter,
|
||||
scope: "sub"
|
||||
},
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
ldapClient.unbind();
|
||||
return reject(err);
|
||||
}
|
||||
|
||||
const groups: { dn: string; cn: string }[] = [];
|
||||
|
||||
res.on("searchEntry", (entry) => {
|
||||
const dn = entry.dn.toString();
|
||||
const regex = /cn=([^,]+)/;
|
||||
const match = dn.match(regex);
|
||||
// parse the cn from the dn
|
||||
const cn = (match && match[1]) as string;
|
||||
|
||||
groups.push({ dn, cn });
|
||||
});
|
||||
res.on("error", (error) => {
|
||||
ldapClient.unbind();
|
||||
reject(error);
|
||||
});
|
||||
res.on("end", () => {
|
||||
ldapClient.unbind();
|
||||
resolve(groups);
|
||||
});
|
||||
}
|
||||
);
|
||||
});
|
||||
};
|
41
backend/src/ee/services/ldap-config/ldap-group-map-dal.ts
Normal file
@ -0,0 +1,41 @@
|
||||
import { TDbClient } from "@app/db";
|
||||
import { TableName } from "@app/db/schemas";
|
||||
import { DatabaseError } from "@app/lib/errors";
|
||||
import { ormify, selectAllTableCols } from "@app/lib/knex";
|
||||
|
||||
export type TLdapGroupMapDALFactory = ReturnType<typeof ldapGroupMapDALFactory>;
|
||||
|
||||
export const ldapGroupMapDALFactory = (db: TDbClient) => {
|
||||
const ldapGroupMapOrm = ormify(db, TableName.LdapGroupMap);
|
||||
|
||||
const findLdapGroupMapsByLdapConfigId = async (ldapConfigId: string) => {
|
||||
try {
|
||||
const docs = await db(TableName.LdapGroupMap)
|
||||
.where(`${TableName.LdapGroupMap}.ldapConfigId`, ldapConfigId)
|
||||
.join(TableName.Groups, `${TableName.LdapGroupMap}.groupId`, `${TableName.Groups}.id`)
|
||||
.select(selectAllTableCols(TableName.LdapGroupMap))
|
||||
.select(
|
||||
db.ref("id").withSchema(TableName.Groups).as("groupId"),
|
||||
db.ref("name").withSchema(TableName.Groups).as("groupName"),
|
||||
db.ref("slug").withSchema(TableName.Groups).as("groupSlug")
|
||||
);
|
||||
|
||||
return docs.map((doc) => {
|
||||
return {
|
||||
id: doc.id,
|
||||
ldapConfigId: doc.ldapConfigId,
|
||||
ldapGroupCN: doc.ldapGroupCN,
|
||||
group: {
|
||||
id: doc.groupId,
|
||||
name: doc.groupName,
|
||||
slug: doc.groupSlug
|
||||
}
|
||||
};
|
||||
});
|
||||
} catch (error) {
|
||||
throw new DatabaseError({ error, name: "findGroupMaps" });
|
||||
}
|
||||
};
|
||||
|
||||
return { ...ldapGroupMapOrm, findLdapGroupMapsByLdapConfigId };
|
||||
};
|
@ -24,6 +24,8 @@ export const getDefaultOnPremFeatures = (): TFeatureSet => ({
|
||||
customAlerts: false,
|
||||
auditLogs: false,
|
||||
auditLogsRetentionDays: 0,
|
||||
auditLogStreams: false,
|
||||
auditLogStreamLimit: 3,
|
||||
samlSSO: false,
|
||||
scim: false,
|
||||
ldap: false,
|
||||
|
@ -40,6 +40,8 @@ export type TFeatureSet = {
|
||||
customAlerts: false;
|
||||
auditLogs: false;
|
||||
auditLogsRetentionDays: 0;
|
||||
auditLogStreams: false;
|
||||
auditLogStreamLimit: 3;
|
||||
samlSSO: false;
|
||||
scim: false;
|
||||
ldap: false;
|
||||
|
@ -340,11 +340,12 @@ export const samlConfigServiceFactory = ({
|
||||
orgId,
|
||||
inviteEmail: email,
|
||||
role: OrgMembershipRole.Member,
|
||||
status: OrgMembershipStatus.Accepted
|
||||
status: user.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
||||
},
|
||||
tx
|
||||
);
|
||||
} else if (orgMembership.status === OrgMembershipStatus.Invited) {
|
||||
// Only update the membership to Accepted if the user account is already completed.
|
||||
} else if (orgMembership.status === OrgMembershipStatus.Invited && user.isAccepted) {
|
||||
await orgDAL.updateMembershipById(
|
||||
orgMembership.id,
|
||||
{
|
||||
|
@ -495,7 +495,11 @@ export const secretApprovalRequestServiceFactory = ({
|
||||
await projectDAL.checkProjectUpgradeStatus(projectId);
|
||||
|
||||
const folder = await folderDAL.findBySecretPath(projectId, environment, secretPath);
|
||||
if (!folder) throw new BadRequestError({ message: "Folder not found", name: "GenSecretApproval" });
|
||||
if (!folder)
|
||||
throw new BadRequestError({
|
||||
message: "Folder not found for the given environment slug & secret path",
|
||||
name: "GenSecretApproval"
|
||||
});
|
||||
const folderId = folder.id;
|
||||
|
||||
const blindIndexCfg = await secretBlindIndexDAL.findOne({ projectId });
|
||||
|
@ -272,6 +272,7 @@ export const SECRETS = {
|
||||
|
||||
export const RAW_SECRETS = {
|
||||
LIST: {
|
||||
expand: "Whether or not to expand secret references",
|
||||
recursive:
|
||||
"Whether or not to fetch all secrets from the specified base path, and all of its subdirectories. Note, the max depth is 20 deep.",
|
||||
workspaceId: "The ID of the project to list secrets from.",
|
||||
@ -614,3 +615,29 @@ export const INTEGRATION = {
|
||||
integrationId: "The ID of the integration object."
|
||||
}
|
||||
};
|
||||
|
||||
export const AUDIT_LOG_STREAMS = {
|
||||
CREATE: {
|
||||
url: "The HTTP URL to push logs to.",
|
||||
headers: {
|
||||
desc: "The HTTP headers attached for the external prrovider requests.",
|
||||
key: "The HTTP header key name.",
|
||||
value: "The HTTP header value."
|
||||
}
|
||||
},
|
||||
UPDATE: {
|
||||
id: "The ID of the audit log stream to update.",
|
||||
url: "The HTTP URL to push logs to.",
|
||||
headers: {
|
||||
desc: "The HTTP headers attached for the external prrovider requests.",
|
||||
key: "The HTTP header key name.",
|
||||
value: "The HTTP header value."
|
||||
}
|
||||
},
|
||||
DELETE: {
|
||||
id: "The ID of the audit log stream to delete."
|
||||
},
|
||||
GET_BY_ID: {
|
||||
id: "The ID of the audit log stream to get details."
|
||||
}
|
||||
};
|
||||
|
@ -119,6 +119,7 @@ const envSchema = z
|
||||
})
|
||||
.transform((data) => ({
|
||||
...data,
|
||||
isCloud: Boolean(data.LICENSE_SERVER_KEY),
|
||||
isSmtpConfigured: Boolean(data.SMTP_HOST),
|
||||
isRedisConfigured: Boolean(data.REDIS_URL),
|
||||
isDevelopmentMode: data.NODE_ENV === "development",
|
||||
|
@ -17,7 +17,7 @@ export type TOrgPermission = {
|
||||
actorId: string;
|
||||
orgId: string;
|
||||
actorAuthMethod: ActorAuthMethod;
|
||||
actorOrgId: string | undefined;
|
||||
actorOrgId: string;
|
||||
};
|
||||
|
||||
export type TProjectPermission = {
|
||||
|
@ -1 +1,2 @@
|
||||
export { isDisposableEmail } from "./validate-email";
|
||||
export { validateLocalIps } from "./validate-url";
|
||||
|
18
backend/src/lib/validator/validate-url.ts
Normal file
@ -0,0 +1,18 @@
|
||||
import { getConfig } from "../config/env";
|
||||
import { BadRequestError } from "../errors";
|
||||
|
||||
export const validateLocalIps = (url: string) => {
|
||||
const validUrl = new URL(url);
|
||||
const appCfg = getConfig();
|
||||
// on cloud local ips are not allowed
|
||||
if (
|
||||
appCfg.isCloud &&
|
||||
(validUrl.host === "host.docker.internal" ||
|
||||
validUrl.host.match(/^10\.\d+\.\d+\.\d+/) ||
|
||||
validUrl.host.match(/^192\.168\.\d+\.\d+/))
|
||||
)
|
||||
throw new BadRequestError({ message: "Local IPs not allowed as URL" });
|
||||
|
||||
if (validUrl.host === "localhost" || validUrl.host === "127.0.0.1")
|
||||
throw new BadRequestError({ message: "Localhost not allowed" });
|
||||
};
|
@ -36,7 +36,7 @@ export const writeLimit: RateLimitOptions = {
|
||||
export const secretsLimit: RateLimitOptions = {
|
||||
// secrets, folders, secret imports
|
||||
timeWindow: 60 * 1000,
|
||||
max: 600,
|
||||
max: 1000,
|
||||
keyGenerator: (req) => req.realIp
|
||||
};
|
||||
|
||||
|
@ -5,6 +5,8 @@ import { registerV1EERoutes } from "@app/ee/routes/v1";
|
||||
import { auditLogDALFactory } from "@app/ee/services/audit-log/audit-log-dal";
|
||||
import { auditLogQueueServiceFactory } from "@app/ee/services/audit-log/audit-log-queue";
|
||||
import { auditLogServiceFactory } from "@app/ee/services/audit-log/audit-log-service";
|
||||
import { auditLogStreamDALFactory } from "@app/ee/services/audit-log-stream/audit-log-stream-dal";
|
||||
import { auditLogStreamServiceFactory } from "@app/ee/services/audit-log-stream/audit-log-stream-service";
|
||||
import { dynamicSecretDALFactory } from "@app/ee/services/dynamic-secret/dynamic-secret-dal";
|
||||
import { dynamicSecretServiceFactory } from "@app/ee/services/dynamic-secret/dynamic-secret-service";
|
||||
import { buildDynamicSecretProviders } from "@app/ee/services/dynamic-secret/providers";
|
||||
@ -18,6 +20,7 @@ import { identityProjectAdditionalPrivilegeDALFactory } from "@app/ee/services/i
|
||||
import { identityProjectAdditionalPrivilegeServiceFactory } from "@app/ee/services/identity-project-additional-privilege/identity-project-additional-privilege-service";
|
||||
import { ldapConfigDALFactory } from "@app/ee/services/ldap-config/ldap-config-dal";
|
||||
import { ldapConfigServiceFactory } from "@app/ee/services/ldap-config/ldap-config-service";
|
||||
import { ldapGroupMapDALFactory } from "@app/ee/services/ldap-config/ldap-group-map-dal";
|
||||
import { licenseDALFactory } from "@app/ee/services/license/license-dal";
|
||||
import { licenseServiceFactory } from "@app/ee/services/license/license-service";
|
||||
import { permissionDALFactory } from "@app/ee/services/permission/permission-dal";
|
||||
@ -192,6 +195,7 @@ export const registerRoutes = async (
|
||||
const identityUaClientSecretDAL = identityUaClientSecretDALFactory(db);
|
||||
|
||||
const auditLogDAL = auditLogDALFactory(db);
|
||||
const auditLogStreamDAL = auditLogStreamDALFactory(db);
|
||||
const trustedIpDAL = trustedIpDALFactory(db);
|
||||
const telemetryDAL = telemetryDALFactory(db);
|
||||
|
||||
@ -200,6 +204,7 @@ export const registerRoutes = async (
|
||||
const samlConfigDAL = samlConfigDALFactory(db);
|
||||
const scimDAL = scimDALFactory(db);
|
||||
const ldapConfigDAL = ldapConfigDALFactory(db);
|
||||
const ldapGroupMapDAL = ldapGroupMapDALFactory(db);
|
||||
const sapApproverDAL = secretApprovalPolicyApproverDALFactory(db);
|
||||
const secretApprovalPolicyDAL = secretApprovalPolicyDALFactory(db);
|
||||
const secretApprovalRequestDAL = secretApprovalRequestDALFactory(db);
|
||||
@ -241,9 +246,15 @@ export const registerRoutes = async (
|
||||
auditLogDAL,
|
||||
queueService,
|
||||
projectDAL,
|
||||
licenseService
|
||||
licenseService,
|
||||
auditLogStreamDAL
|
||||
});
|
||||
const auditLogService = auditLogServiceFactory({ auditLogDAL, permissionService, auditLogQueue });
|
||||
const auditLogStreamService = auditLogStreamServiceFactory({
|
||||
licenseService,
|
||||
permissionService,
|
||||
auditLogStreamDAL
|
||||
});
|
||||
const sapService = secretApprovalPolicyServiceFactory({
|
||||
projectMembershipDAL,
|
||||
projectEnvDAL,
|
||||
@ -300,8 +311,15 @@ export const registerRoutes = async (
|
||||
|
||||
const ldapService = ldapConfigServiceFactory({
|
||||
ldapConfigDAL,
|
||||
ldapGroupMapDAL,
|
||||
orgDAL,
|
||||
orgBotDAL,
|
||||
groupDAL,
|
||||
groupProjectDAL,
|
||||
projectKeyDAL,
|
||||
projectDAL,
|
||||
projectBotDAL,
|
||||
userGroupMembershipDAL,
|
||||
userDAL,
|
||||
userAliasDAL,
|
||||
permissionService,
|
||||
@ -706,6 +724,7 @@ export const registerRoutes = async (
|
||||
saml: samlService,
|
||||
ldap: ldapService,
|
||||
auditLog: auditLogService,
|
||||
auditLogStream: auditLogStreamService,
|
||||
secretScanning: secretScanningService,
|
||||
license: licenseService,
|
||||
trustedIp: trustedIpService,
|
||||
|
@ -69,3 +69,10 @@ export const SanitizedDynamicSecretSchema = DynamicSecretsSchema.omit({
|
||||
keyEncoding: true,
|
||||
algorithm: true
|
||||
});
|
||||
|
||||
export const SanitizedAuditLogStreamSchema = z.object({
|
||||
id: z.string(),
|
||||
url: z.string(),
|
||||
createdAt: z.date(),
|
||||
updatedAt: z.date()
|
||||
});
|
||||
|
@ -1,3 +1,4 @@
|
||||
import slugify from "@sindresorhus/slugify";
|
||||
import { z } from "zod";
|
||||
|
||||
import { ProjectEnvironmentsSchema } from "@app/db/schemas";
|
||||
@ -26,7 +27,13 @@ export const registerProjectEnvRouter = async (server: FastifyZodProvider) => {
|
||||
}),
|
||||
body: z.object({
|
||||
name: z.string().trim().describe(ENVIRONMENTS.CREATE.name),
|
||||
slug: z.string().trim().describe(ENVIRONMENTS.CREATE.slug)
|
||||
slug: z
|
||||
.string()
|
||||
.trim()
|
||||
.refine((v) => slugify(v) === v, {
|
||||
message: "Slug must be a valid slug"
|
||||
})
|
||||
.describe(ENVIRONMENTS.CREATE.slug)
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
@ -84,7 +91,14 @@ export const registerProjectEnvRouter = async (server: FastifyZodProvider) => {
|
||||
id: z.string().trim().describe(ENVIRONMENTS.UPDATE.id)
|
||||
}),
|
||||
body: z.object({
|
||||
slug: z.string().trim().optional().describe(ENVIRONMENTS.UPDATE.slug),
|
||||
slug: z
|
||||
.string()
|
||||
.trim()
|
||||
.optional()
|
||||
.refine((v) => !v || slugify(v) === v, {
|
||||
message: "Slug must be a valid slug"
|
||||
})
|
||||
.describe(ENVIRONMENTS.UPDATE.slug),
|
||||
name: z.string().trim().optional().describe(ENVIRONMENTS.UPDATE.name),
|
||||
position: z.number().optional().describe(ENVIRONMENTS.UPDATE.position)
|
||||
}),
|
||||
|
@ -76,6 +76,7 @@ export const registerOrgRouter = async (server: FastifyZodProvider) => {
|
||||
.object({
|
||||
id: z.string(),
|
||||
name: z.string(),
|
||||
slug: z.string(),
|
||||
organization: z.string(),
|
||||
environments: z
|
||||
.object({
|
||||
|
@ -166,6 +166,11 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
|
||||
workspaceSlug: z.string().trim().optional().describe(RAW_SECRETS.LIST.workspaceSlug),
|
||||
environment: z.string().trim().optional().describe(RAW_SECRETS.LIST.environment),
|
||||
secretPath: z.string().trim().default("/").transform(removeTrailingSlash).describe(RAW_SECRETS.LIST.secretPath),
|
||||
expandSecretReferences: z
|
||||
.enum(["true", "false"])
|
||||
.default("false")
|
||||
.transform((value) => value === "true")
|
||||
.describe(RAW_SECRETS.LIST.expand),
|
||||
recursive: z
|
||||
.enum(["true", "false"])
|
||||
.default("false")
|
||||
@ -233,6 +238,7 @@ export const registerSecretRouter = async (server: FastifyZodProvider) => {
|
||||
actor: req.permission.type,
|
||||
actorOrgId: req.permission.orgId,
|
||||
environment,
|
||||
expandSecretReferences: req.query.expandSecretReferences,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
projectId: workspaceId,
|
||||
path: secretPath,
|
||||
|
@ -191,7 +191,7 @@ export const authLoginServiceFactory = ({
|
||||
const decodedProviderToken = validateProviderAuthToken(providerAuthToken, email);
|
||||
|
||||
authMethod = decodedProviderToken.authMethod;
|
||||
if (isAuthMethodSaml(authMethod) && decodedProviderToken.orgId) {
|
||||
if ((isAuthMethodSaml(authMethod) || authMethod === AuthMethod.LDAP) && decodedProviderToken.orgId) {
|
||||
organizationId = decodedProviderToken.orgId;
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@ import { OrgMembershipStatus } from "@app/db/schemas";
|
||||
import { convertPendingGroupAdditionsToGroupMemberships } from "@app/ee/services/group/group-fns";
|
||||
import { TUserGroupMembershipDALFactory } from "@app/ee/services/group/user-group-membership-dal";
|
||||
import { TLicenseServiceFactory } from "@app/ee/services/license/license-service";
|
||||
import { isAuthMethodSaml } from "@app/ee/services/permission/permission-fns";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { isDisposableEmail } from "@app/lib/validator";
|
||||
@ -81,7 +82,7 @@ export const authSignupServiceFactory = ({
|
||||
await smtpService.sendMail({
|
||||
template: SmtpTemplates.EmailVerification,
|
||||
subjectLine: "Infisical confirmation code",
|
||||
recipients: [email],
|
||||
recipients: [user.email as string],
|
||||
substitutions: {
|
||||
code: token
|
||||
}
|
||||
@ -139,9 +140,11 @@ export const authSignupServiceFactory = ({
|
||||
throw new Error("Failed to complete account for complete user");
|
||||
}
|
||||
|
||||
let organizationId;
|
||||
let organizationId: string | null = null;
|
||||
let authMethod: AuthMethod | null = null;
|
||||
if (providerAuthToken) {
|
||||
const { orgId } = validateProviderAuthToken(providerAuthToken, user.username);
|
||||
const { orgId, authMethod: userAuthMethod } = validateProviderAuthToken(providerAuthToken, user.username);
|
||||
authMethod = userAuthMethod;
|
||||
organizationId = orgId;
|
||||
} else {
|
||||
validateSignUpAuthorization(authorization, user.id);
|
||||
@ -165,6 +168,26 @@ export const authSignupServiceFactory = ({
|
||||
},
|
||||
tx
|
||||
);
|
||||
// If it's SAML Auth and the organization ID is present, we should check if the user has a pending invite for this org, and accept it
|
||||
if (isAuthMethodSaml(authMethod) && organizationId) {
|
||||
const [pendingOrgMembership] = await orgDAL.findMembership({
|
||||
inviteEmail: email,
|
||||
userId: user.id,
|
||||
status: OrgMembershipStatus.Invited,
|
||||
orgId: organizationId
|
||||
});
|
||||
|
||||
if (pendingOrgMembership) {
|
||||
await orgDAL.updateMembershipById(
|
||||
pendingOrgMembership.id,
|
||||
{
|
||||
status: OrgMembershipStatus.Accepted
|
||||
},
|
||||
tx
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return { info: us, key: userEncKey };
|
||||
});
|
||||
|
||||
|
@ -566,20 +566,32 @@ export const integrationAuthServiceFactory = ({
|
||||
}
|
||||
});
|
||||
const kms = new AWS.KMS();
|
||||
|
||||
const aliases = await kms.listAliases({}).promise();
|
||||
const keys = await kms.listKeys({}).promise();
|
||||
const response = keys
|
||||
.Keys!.map((key) => {
|
||||
const keyAlias = aliases.Aliases!.find((alias) => key.KeyId === alias.TargetKeyId);
|
||||
if (!keyAlias?.AliasName?.includes("alias/aws/") || keyAlias?.AliasName?.includes("alias/aws/secretsmanager")) {
|
||||
return { id: String(key.KeyId), alias: String(keyAlias?.AliasName || key.KeyId) };
|
||||
}
|
||||
return { id: "null", alias: "null" };
|
||||
})
|
||||
.filter((elem) => elem.id !== "null");
|
||||
|
||||
return response;
|
||||
const keyAliases = aliases.Aliases!.filter((alias) => {
|
||||
if (!alias.TargetKeyId) return false;
|
||||
|
||||
if (integrationAuth.integration === Integrations.AWS_PARAMETER_STORE && alias.AliasName === "alias/aws/ssm")
|
||||
return true;
|
||||
|
||||
if (
|
||||
integrationAuth.integration === Integrations.AWS_SECRET_MANAGER &&
|
||||
alias.AliasName === "alias/aws/secretsmanager"
|
||||
)
|
||||
return true;
|
||||
|
||||
if (alias.AliasName?.includes("alias/aws/")) return false;
|
||||
return alias.TargetKeyId;
|
||||
});
|
||||
|
||||
const keysWithAliases = keyAliases.map((alias) => {
|
||||
return {
|
||||
id: alias.TargetKeyId!,
|
||||
alias: alias.AliasName!
|
||||
};
|
||||
});
|
||||
|
||||
return keysWithAliases;
|
||||
};
|
||||
|
||||
const getQoveryProjects = async ({
|
||||
|
@ -458,7 +458,7 @@ const syncSecretsAWSParameterStore = async ({
|
||||
});
|
||||
ssm.config.update(config);
|
||||
|
||||
const metadata = z.record(z.any()).parse(integration.metadata);
|
||||
const metadata = z.record(z.any()).parse(integration.metadata || {});
|
||||
|
||||
const params = {
|
||||
Path: integration.path as string,
|
||||
@ -477,24 +477,29 @@ const syncSecretsAWSParameterStore = async ({
|
||||
}),
|
||||
{} as Record<string, AWS.SSM.Parameter>
|
||||
);
|
||||
|
||||
// Identify secrets to create
|
||||
await Promise.all(
|
||||
Object.keys(secrets).map(async (key) => {
|
||||
if (!(key in awsParameterStoreSecretsObj)) {
|
||||
// case: secret does not exist in AWS parameter store
|
||||
// -> create secret
|
||||
await ssm
|
||||
.putParameter({
|
||||
Name: `${integration.path}${key}`,
|
||||
Type: "SecureString",
|
||||
Value: secrets[key].value,
|
||||
// Overwrite: true,
|
||||
Tags: metadata.secretAWSTag
|
||||
? metadata.secretAWSTag.map((tag: { key: string; value: string }) => ({ Key: tag.key, Value: tag.value }))
|
||||
: []
|
||||
})
|
||||
.promise();
|
||||
if (secrets[key].value) {
|
||||
await ssm
|
||||
.putParameter({
|
||||
Name: `${integration.path}${key}`,
|
||||
Type: "SecureString",
|
||||
Value: secrets[key].value,
|
||||
...(metadata.kmsKeyId && { KeyId: metadata.kmsKeyId }),
|
||||
// Overwrite: true,
|
||||
Tags: metadata.secretAWSTag
|
||||
? metadata.secretAWSTag.map((tag: { key: string; value: string }) => ({
|
||||
Key: tag.key,
|
||||
Value: tag.value
|
||||
}))
|
||||
: []
|
||||
})
|
||||
.promise();
|
||||
}
|
||||
// case: secret exists in AWS parameter store
|
||||
} else if (awsParameterStoreSecretsObj[key].Value !== secrets[key].value) {
|
||||
// case: secret value doesn't match one in AWS parameter store
|
||||
@ -544,7 +549,7 @@ const syncSecretsAWSSecretManager = async ({
|
||||
}) => {
|
||||
let secretsManager;
|
||||
const secKeyVal = getSecretKeyValuePair(secrets);
|
||||
const metadata = z.record(z.any()).parse(integration.metadata);
|
||||
const metadata = z.record(z.any()).parse(integration.metadata || {});
|
||||
try {
|
||||
if (!accessId) return;
|
||||
|
||||
@ -567,7 +572,6 @@ const syncSecretsAWSSecretManager = async ({
|
||||
if (awsSecretManagerSecret?.SecretString) {
|
||||
awsSecretManagerSecretObj = JSON.parse(awsSecretManagerSecret.SecretString);
|
||||
}
|
||||
|
||||
if (!isEqual(awsSecretManagerSecretObj, secKeyVal)) {
|
||||
await secretsManager.send(
|
||||
new UpdateSecretCommand({
|
||||
@ -582,7 +586,7 @@ const syncSecretsAWSSecretManager = async ({
|
||||
new CreateSecretCommand({
|
||||
Name: integration.app as string,
|
||||
SecretString: JSON.stringify(secKeyVal),
|
||||
KmsKeyId: metadata.kmsKeyId ? metadata.kmsKeyId : null,
|
||||
...(metadata.kmsKeyId && { KmsKeyId: metadata.kmsKeyId }),
|
||||
Tags: metadata.secretAWSTag
|
||||
? metadata.secretAWSTag.map((tag: { key: string; value: string }) => ({ Key: tag.key, Value: tag.value }))
|
||||
: []
|
||||
|
@ -1,33 +1,66 @@
|
||||
import { SecretType, TSecretImports } from "@app/db/schemas";
|
||||
import { SecretType, TSecretImports, TSecrets } from "@app/db/schemas";
|
||||
import { groupBy } from "@app/lib/fn";
|
||||
|
||||
import { TSecretDALFactory } from "../secret/secret-dal";
|
||||
import { TSecretFolderDALFactory } from "../secret-folder/secret-folder-dal";
|
||||
import { TSecretImportDALFactory } from "./secret-import-dal";
|
||||
|
||||
type TSecretImportSecrets = {
|
||||
secretPath: string;
|
||||
environment: string;
|
||||
environmentInfo: {
|
||||
id: string;
|
||||
slug: string;
|
||||
name: string;
|
||||
};
|
||||
folderId: string | undefined;
|
||||
importFolderId: string;
|
||||
secrets: (TSecrets & { workspace: string; environment: string; _id: string })[];
|
||||
};
|
||||
|
||||
const LEVEL_BREAK = 10;
|
||||
const getImportUniqKey = (envSlug: string, path: string) => `${envSlug}=${path}`;
|
||||
export const fnSecretsFromImports = async ({
|
||||
allowedImports,
|
||||
allowedImports: possibleCyclicImports,
|
||||
folderDAL,
|
||||
secretDAL
|
||||
secretDAL,
|
||||
secretImportDAL,
|
||||
depth = 0,
|
||||
cyclicDetector = new Set()
|
||||
}: {
|
||||
allowedImports: (Omit<TSecretImports, "importEnv"> & {
|
||||
importEnv: { id: string; slug: string; name: string };
|
||||
})[];
|
||||
folderDAL: Pick<TSecretFolderDALFactory, "findByManySecretPath">;
|
||||
secretDAL: Pick<TSecretDALFactory, "find">;
|
||||
secretImportDAL: Pick<TSecretImportDALFactory, "findByFolderIds">;
|
||||
depth?: number;
|
||||
cyclicDetector?: Set<string>;
|
||||
}) => {
|
||||
const importedFolders = await folderDAL.findByManySecretPath(
|
||||
allowedImports.map(({ importEnv, importPath }) => ({
|
||||
envId: importEnv.id,
|
||||
secretPath: importPath
|
||||
}))
|
||||
// avoid going more than a depth
|
||||
if (depth >= LEVEL_BREAK) return [];
|
||||
|
||||
const allowedImports = possibleCyclicImports.filter(
|
||||
({ importPath, importEnv }) => !cyclicDetector.has(getImportUniqKey(importEnv.slug, importPath))
|
||||
);
|
||||
const folderIds = importedFolders.map((el) => el?.id).filter(Boolean) as string[];
|
||||
if (!folderIds.length) {
|
||||
|
||||
const importedFolders = (
|
||||
await folderDAL.findByManySecretPath(
|
||||
allowedImports.map(({ importEnv, importPath }) => ({
|
||||
envId: importEnv.id,
|
||||
secretPath: importPath
|
||||
}))
|
||||
)
|
||||
).filter(Boolean); // remove undefined ones
|
||||
if (!importedFolders.length) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const importedFolderIds = importedFolders.map((el) => el?.id) as string[];
|
||||
const importedFolderGroupBySourceImport = groupBy(importedFolders, (i) => `${i?.envId}-${i?.path}`);
|
||||
const importedSecrets = await secretDAL.find(
|
||||
{
|
||||
$in: { folderId: folderIds },
|
||||
$in: { folderId: importedFolderIds },
|
||||
type: SecretType.Shared
|
||||
},
|
||||
{
|
||||
@ -35,18 +68,50 @@ export const fnSecretsFromImports = async ({
|
||||
}
|
||||
);
|
||||
|
||||
const importedSecsGroupByFolderId = groupBy(importedSecrets, (i) => i.folderId);
|
||||
return allowedImports.map(({ importPath, importEnv }, i) => ({
|
||||
secretPath: importPath,
|
||||
environment: importEnv.slug,
|
||||
environmentInfo: importEnv,
|
||||
folderId: importedFolders?.[i]?.id,
|
||||
// this will ensure for cases when secrets are empty. Could be due to missing folder for a path or when emtpy secrets inside a given path
|
||||
secrets: (importedSecsGroupByFolderId?.[importedFolders?.[i]?.id as string] || []).map((item) => ({
|
||||
...item,
|
||||
const importedSecretsGroupByFolderId = groupBy(importedSecrets, (i) => i.folderId);
|
||||
|
||||
allowedImports.forEach(({ importPath, importEnv }) => {
|
||||
cyclicDetector.add(getImportUniqKey(importEnv.slug, importPath));
|
||||
});
|
||||
// now we need to check recursively deeper imports made inside other imports
|
||||
// we go level wise meaning we take all imports of a tree level and then go deeper ones level by level
|
||||
const deeperImports = await secretImportDAL.findByFolderIds(importedFolderIds);
|
||||
let secretsFromDeeperImports: TSecretImportSecrets[] = [];
|
||||
if (deeperImports.length) {
|
||||
secretsFromDeeperImports = await fnSecretsFromImports({
|
||||
allowedImports: deeperImports,
|
||||
secretImportDAL,
|
||||
folderDAL,
|
||||
secretDAL,
|
||||
depth: depth + 1,
|
||||
cyclicDetector
|
||||
});
|
||||
}
|
||||
const secretsFromdeeperImportGroupedByFolderId = groupBy(secretsFromDeeperImports, (i) => i.importFolderId);
|
||||
|
||||
const secrets = allowedImports.map(({ importPath, importEnv, id, folderId }, i) => {
|
||||
const sourceImportFolder = importedFolderGroupBySourceImport[`${importEnv.id}-${importPath}`][0];
|
||||
const folderDeeperImportSecrets =
|
||||
secretsFromdeeperImportGroupedByFolderId?.[sourceImportFolder?.id || ""]?.[0]?.secrets || [];
|
||||
|
||||
return {
|
||||
secretPath: importPath,
|
||||
environment: importEnv.slug,
|
||||
workspace: "", // This field should not be used, it's only here to keep the older Python SDK versions backwards compatible with the new Postgres backend.
|
||||
_id: item.id // The old Python SDK depends on the _id field being returned. We return this to keep the older Python SDK versions backwards compatible with the new Postgres backend.
|
||||
}))
|
||||
}));
|
||||
environmentInfo: importEnv,
|
||||
folderId: importedFolders?.[i]?.id,
|
||||
id,
|
||||
importFolderId: folderId,
|
||||
// this will ensure for cases when secrets are empty. Could be due to missing folder for a path or when emtpy secrets inside a given path
|
||||
secrets: (importedSecretsGroupByFolderId?.[importedFolders?.[i]?.id as string] || [])
|
||||
.map((item) => ({
|
||||
...item,
|
||||
environment: importEnv.slug,
|
||||
workspace: "", // This field should not be used, it's only here to keep the older Python SDK versions backwards compatible with the new Postgres backend.
|
||||
_id: item.id // The old Python SDK depends on the _id field being returned. We return this to keep the older Python SDK versions backwards compatible with the new Postgres backend.
|
||||
}))
|
||||
.concat(folderDeeperImportSecrets)
|
||||
};
|
||||
});
|
||||
|
||||
return secrets;
|
||||
};
|
||||
|
@ -290,7 +290,7 @@ export const secretImportServiceFactory = ({
|
||||
})
|
||||
)
|
||||
);
|
||||
return fnSecretsFromImports({ allowedImports, folderDAL, secretDAL });
|
||||
return fnSecretsFromImports({ allowedImports, folderDAL, secretDAL, secretImportDAL });
|
||||
};
|
||||
|
||||
return {
|
||||
|
@ -575,7 +575,11 @@ export const createManySecretsRawFnFactory = ({
|
||||
await projectDAL.checkProjectUpgradeStatus(projectId);
|
||||
|
||||
const folder = await folderDAL.findBySecretPath(projectId, environment, secretPath);
|
||||
if (!folder) throw new BadRequestError({ message: "Folder not found", name: "Create secret" });
|
||||
if (!folder)
|
||||
throw new BadRequestError({
|
||||
message: "Folder not found for the given environment slug & secret path",
|
||||
name: "Create secret"
|
||||
});
|
||||
const folderId = folder.id;
|
||||
|
||||
const blindIndexCfg = await secretBlindIndexDAL.findOne({ projectId });
|
||||
@ -680,7 +684,11 @@ export const updateManySecretsRawFnFactory = ({
|
||||
await projectDAL.checkProjectUpgradeStatus(projectId);
|
||||
|
||||
const folder = await folderDAL.findBySecretPath(projectId, environment, secretPath);
|
||||
if (!folder) throw new BadRequestError({ message: "Folder not found", name: "Update secret" });
|
||||
if (!folder)
|
||||
throw new BadRequestError({
|
||||
message: "Folder not found for the given environment slug & secret path",
|
||||
name: "Update secret"
|
||||
});
|
||||
const folderId = folder.id;
|
||||
|
||||
const blindIndexCfg = await secretBlindIndexDAL.findOne({ projectId });
|
||||
|
@ -318,7 +318,7 @@ export const secretQueueFactory = ({
|
||||
});
|
||||
|
||||
// add the imported secrets to the current folder secrets
|
||||
content = { ...content, ...importedSecrets };
|
||||
content = { ...importedSecrets, ...content };
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@ import {
|
||||
fnSecretBlindIndexCheck,
|
||||
fnSecretBulkInsert,
|
||||
fnSecretBulkUpdate,
|
||||
interpolateSecrets,
|
||||
recursivelyGetSecretPaths
|
||||
} from "./secret-fns";
|
||||
import { TSecretQueueFactory } from "./secret-queue";
|
||||
@ -182,7 +183,11 @@ export const secretServiceFactory = ({
|
||||
await projectDAL.checkProjectUpgradeStatus(projectId);
|
||||
|
||||
const folder = await folderDAL.findBySecretPath(projectId, environment, path);
|
||||
if (!folder) throw new BadRequestError({ message: "Folder not found", name: "Create secret" });
|
||||
if (!folder)
|
||||
throw new BadRequestError({
|
||||
message: "Folder not found for the given environment slug & secret path",
|
||||
name: "Create secret"
|
||||
});
|
||||
const folderId = folder.id;
|
||||
|
||||
const blindIndexCfg = await secretBlindIndexDAL.findOne({ projectId });
|
||||
@ -278,7 +283,11 @@ export const secretServiceFactory = ({
|
||||
}
|
||||
|
||||
const folder = await folderDAL.findBySecretPath(projectId, environment, path);
|
||||
if (!folder) throw new BadRequestError({ message: "Folder not found", name: "Create secret" });
|
||||
if (!folder)
|
||||
throw new BadRequestError({
|
||||
message: "Folder not found for the given environment slug & secret path",
|
||||
name: "Create secret"
|
||||
});
|
||||
const folderId = folder.id;
|
||||
|
||||
const blindIndexCfg = await secretBlindIndexDAL.findOne({ projectId });
|
||||
@ -394,7 +403,11 @@ export const secretServiceFactory = ({
|
||||
await projectDAL.checkProjectUpgradeStatus(projectId);
|
||||
|
||||
const folder = await folderDAL.findBySecretPath(projectId, environment, path);
|
||||
if (!folder) throw new BadRequestError({ message: "Folder not found", name: "Create secret" });
|
||||
if (!folder)
|
||||
throw new BadRequestError({
|
||||
message: "Folder not found for the given environment slug & secret path",
|
||||
name: "Create secret"
|
||||
});
|
||||
const folderId = folder.id;
|
||||
|
||||
const blindIndexCfg = await secretBlindIndexDAL.findOne({ projectId });
|
||||
@ -513,7 +526,8 @@ export const secretServiceFactory = ({
|
||||
const importedSecrets = await fnSecretsFromImports({
|
||||
allowedImports,
|
||||
secretDAL,
|
||||
folderDAL
|
||||
folderDAL,
|
||||
secretImportDAL
|
||||
});
|
||||
|
||||
return {
|
||||
@ -562,7 +576,11 @@ export const secretServiceFactory = ({
|
||||
subject(ProjectPermissionSub.Secrets, { environment, secretPath: path })
|
||||
);
|
||||
const folder = await folderDAL.findBySecretPath(projectId, environment, path);
|
||||
if (!folder) throw new BadRequestError({ message: "Folder not found", name: "Create secret" });
|
||||
if (!folder)
|
||||
throw new BadRequestError({
|
||||
message: "Folder not found for the given environment slug & secret path",
|
||||
name: "Create secret"
|
||||
});
|
||||
const folderId = folder.id;
|
||||
|
||||
const secretBlindIndex = await interalGenSecBlindIndexByName(projectId, secretName);
|
||||
@ -614,7 +632,8 @@ export const secretServiceFactory = ({
|
||||
const importedSecrets = await fnSecretsFromImports({
|
||||
allowedImports,
|
||||
secretDAL,
|
||||
folderDAL
|
||||
folderDAL,
|
||||
secretImportDAL
|
||||
});
|
||||
for (let i = importedSecrets.length - 1; i >= 0; i -= 1) {
|
||||
for (let j = 0; j < importedSecrets[i].secrets.length; j += 1) {
|
||||
@ -658,7 +677,11 @@ export const secretServiceFactory = ({
|
||||
await projectDAL.checkProjectUpgradeStatus(projectId);
|
||||
|
||||
const folder = await folderDAL.findBySecretPath(projectId, environment, path);
|
||||
if (!folder) throw new BadRequestError({ message: "Folder not found", name: "Create secret" });
|
||||
if (!folder)
|
||||
throw new BadRequestError({
|
||||
message: "Folder not found for the given environment slug & secret path",
|
||||
name: "Create secret"
|
||||
});
|
||||
const folderId = folder.id;
|
||||
|
||||
const blindIndexCfg = await secretBlindIndexDAL.findOne({ projectId });
|
||||
@ -727,7 +750,11 @@ export const secretServiceFactory = ({
|
||||
await projectDAL.checkProjectUpgradeStatus(projectId);
|
||||
|
||||
const folder = await folderDAL.findBySecretPath(projectId, environment, path);
|
||||
if (!folder) throw new BadRequestError({ message: "Folder not found", name: "Update secret" });
|
||||
if (!folder)
|
||||
throw new BadRequestError({
|
||||
message: "Folder not found for the given environment slug & secret path",
|
||||
name: "Update secret"
|
||||
});
|
||||
const folderId = folder.id;
|
||||
|
||||
const blindIndexCfg = await secretBlindIndexDAL.findOne({ projectId });
|
||||
@ -813,7 +840,11 @@ export const secretServiceFactory = ({
|
||||
await projectDAL.checkProjectUpgradeStatus(projectId);
|
||||
|
||||
const folder = await folderDAL.findBySecretPath(projectId, environment, path);
|
||||
if (!folder) throw new BadRequestError({ message: "Folder not found", name: "Create secret" });
|
||||
if (!folder)
|
||||
throw new BadRequestError({
|
||||
message: "Folder not found for the given environment slug & secret path",
|
||||
name: "Create secret"
|
||||
});
|
||||
const folderId = folder.id;
|
||||
|
||||
const blindIndexCfg = await secretBlindIndexDAL.findOne({ projectId });
|
||||
@ -855,6 +886,7 @@ export const secretServiceFactory = ({
|
||||
actorAuthMethod,
|
||||
environment,
|
||||
includeImports,
|
||||
expandSecretReferences,
|
||||
recursive
|
||||
}: TGetSecretsRawDTO) => {
|
||||
const botKey = await projectBotService.getBotKey(projectId);
|
||||
@ -872,17 +904,66 @@ export const secretServiceFactory = ({
|
||||
recursive
|
||||
});
|
||||
|
||||
return {
|
||||
secrets: secrets.map((el) => decryptSecretRaw(el, botKey)),
|
||||
imports: (imports || [])?.map(({ secrets: importedSecrets, ...el }) => ({
|
||||
...el,
|
||||
secrets: importedSecrets.map((sec) =>
|
||||
decryptSecretRaw(
|
||||
{ ...sec, environment: el.environment, workspace: projectId, secretPath: el.secretPath },
|
||||
botKey
|
||||
)
|
||||
const decryptedSecrets = secrets.map((el) => decryptSecretRaw(el, botKey));
|
||||
const decryptedImports = (imports || [])?.map(({ secrets: importedSecrets, ...el }) => ({
|
||||
...el,
|
||||
secrets: importedSecrets.map((sec) =>
|
||||
decryptSecretRaw(
|
||||
{ ...sec, environment: el.environment, workspace: projectId, secretPath: el.secretPath },
|
||||
botKey
|
||||
)
|
||||
}))
|
||||
)
|
||||
}));
|
||||
|
||||
if (expandSecretReferences) {
|
||||
const expandSecrets = interpolateSecrets({
|
||||
folderDAL,
|
||||
projectId,
|
||||
secretDAL,
|
||||
secretEncKey: botKey
|
||||
});
|
||||
|
||||
const batchSecretsExpand = async (
|
||||
secretBatch: {
|
||||
secretKey: string;
|
||||
secretValue: string;
|
||||
secretComment?: string;
|
||||
}[]
|
||||
) => {
|
||||
const secretRecord: Record<
|
||||
string,
|
||||
{
|
||||
value: string;
|
||||
comment?: string;
|
||||
skipMultilineEncoding?: boolean;
|
||||
}
|
||||
> = {};
|
||||
|
||||
secretBatch.forEach((decryptedSecret) => {
|
||||
secretRecord[decryptedSecret.secretKey] = {
|
||||
value: decryptedSecret.secretValue,
|
||||
comment: decryptedSecret.secretComment
|
||||
};
|
||||
});
|
||||
|
||||
await expandSecrets(secretRecord);
|
||||
|
||||
secretBatch.forEach((decryptedSecret, index) => {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
secretBatch[index].secretValue = secretRecord[decryptedSecret.secretKey].value;
|
||||
});
|
||||
};
|
||||
|
||||
// expand secrets
|
||||
await batchSecretsExpand(decryptedSecrets);
|
||||
|
||||
// expand imports by batch
|
||||
await Promise.all(decryptedImports.map((decryptedImport) => batchSecretsExpand(decryptedImport.secrets)));
|
||||
}
|
||||
|
||||
return {
|
||||
secrets: decryptedSecrets,
|
||||
imports: decryptedImports
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -138,6 +138,7 @@ export type TDeleteBulkSecretDTO = {
|
||||
} & TProjectPermission;
|
||||
|
||||
export type TGetSecretsRawDTO = {
|
||||
expandSecretReferences?: boolean;
|
||||
path: string;
|
||||
environment: string;
|
||||
includeImports?: boolean;
|
||||
|
@ -22,10 +22,6 @@ var folderCmd = &cobra.Command{
|
||||
var getCmd = &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Get folders in a directory",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
util.RequireLocalWorkspaceFile()
|
||||
util.RequireLogin()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
environmentName, _ := cmd.Flags().GetString("env")
|
||||
|
@ -2,7 +2,6 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Infisical/infisical-merge/packages/api"
|
||||
@ -13,13 +12,11 @@ import (
|
||||
|
||||
func GetAllFolders(params models.GetAllFoldersParameters) ([]models.SingleFolder, error) {
|
||||
|
||||
if params.InfisicalToken == "" {
|
||||
params.InfisicalToken = os.Getenv(INFISICAL_TOKEN_NAME)
|
||||
}
|
||||
|
||||
var foldersToReturn []models.SingleFolder
|
||||
var folderErr error
|
||||
if params.InfisicalToken == "" && params.UniversalAuthAccessToken == "" {
|
||||
RequireLogin()
|
||||
RequireLocalWorkspaceFile()
|
||||
|
||||
log.Debug().Msg("GetAllFolders: Trying to fetch folders using logged in details")
|
||||
|
||||
|
97
company/documentation/getting-started/introduction.mdx
Normal file
@ -0,0 +1,97 @@
|
||||
---
|
||||
title: "What is Infisical?"
|
||||
sidebarTitle: "What is Infisical?"
|
||||
description: "An Introduction to the Infisical secret management platform."
|
||||
---
|
||||
|
||||
Infisical is an [open-source](https://github.com/infisical/infisical) secret management platform for developers.
|
||||
It provides capabilities for storing, managing, and syncing application configuration and secrets like API keys, database
|
||||
credentials, and certificates across infrastructure. In addition, Infisical prevents secrets leaks to git and enables secure
|
||||
sharing of secrets among engineers.
|
||||
|
||||
Start managing secrets securely with [Infisical Cloud](https://app.infisical.com) or learn how to [host Infisical](/self-hosting/overview) yourself.
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card
|
||||
title="Infisical Cloud"
|
||||
href="https://app.infisical.com/signup"
|
||||
icon="cloud"
|
||||
color="#000000"
|
||||
>
|
||||
Get started with Infisical Cloud in just a few minutes.
|
||||
</Card>
|
||||
<Card
|
||||
href="/self-hosting/overview"
|
||||
title="Self-hosting"
|
||||
icon="server"
|
||||
color="#000000"
|
||||
>
|
||||
Self-host Infisical on your own infrastructure.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Why Infisical?
|
||||
|
||||
Infisical helps developers achieve secure centralized secret management and provides all the tools to easily manage secrets in various environments and infrastructure components. In particular, here are some of the most common points that developers mention after adopting Infisical:
|
||||
- Streamlined **local development** processes (switching .env files to [Infisical CLI](/cli/commands/run) and removing secrets from developer machines).
|
||||
- **Best-in-class developer experience** with an easy-to-use [Web Dashboard](/documentation/platform/project).
|
||||
- Simple secret management inside **[CI/CD pipelines](/integrations/cicd/githubactions)** and staging environments.
|
||||
- Secure and compliant secret management practices in **[production environments](/sdks/overview)**.
|
||||
- **Facilitated workflows** around [secret change management](/documentation/platform/pr-workflows), [access requests](/documentation/platform/access-controls/access-requests), [temporary access provisioning](/documentation/platform/access-controls/temporary-access), and more.
|
||||
- **Improved security posture** thanks to [secret scanning](/cli/scanning-overview), [granular access control policies](/documentation/platform/access-controls/overview), [automated secret rotation](https://infisical.com/docs/documentation/platform/secret-rotation/overview), and [dynamic secrets](/documentation/platform/dynamic-secrets/overview) capabilities.
|
||||
|
||||
## How does Infisical work?
|
||||
|
||||
To make secret management effortless and secure, Infisical follows a certain structure for enabling secret management workflows as defined below.
|
||||
|
||||
**Identities** in Infisical are users or machine which have a certain set of roles and permissions assigned to them. Such identities are able to manage secrets in various **Clients** throughout the entire infrastructure. To do that, identities have to verify themselves through one of the available **Authentication Methods**.
|
||||
|
||||
As a result, the 3 main concepts that are important to understand are:
|
||||
- **[Identities](/documentation/platform/identities/overview)**: users or machines with a set permissions assigned to them.
|
||||
- **[Clients](/integrations/platforms/kubernetes)**: Infisical-developed tools for managing secrets in various infrastructure components (e.g., [Kubernetes Operator](/integrations/platforms/kubernetes), [Infisical Agent](/integrations/platforms/infisical-agent), [CLI](/cli/usage), [SDKs](/sdks/overview), [API](/api-reference/overview/introduction), [Web Dashboard](/documentation/platform/organization)).
|
||||
- **[Authentication Methods](/documentation/platform/identities/universal-auth)**: ways for Identities to authenticate inside different clients (e.g., SAML SSO for Web Dashboard, Universal Auth for Infisical Agent, etc.).
|
||||
|
||||
## How to get started with Infisical?
|
||||
|
||||
Depending on your use case, it might be helpful to look into some of the resources and guides provided below.
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card href="../../cli/overview" title="Command Line Interface (CLI)" icon="square-terminal" color="#000000">
|
||||
Inject secrets into any application process/environment.
|
||||
</Card>
|
||||
<Card
|
||||
title="SDKs"
|
||||
href="/documentation/getting-started/sdks"
|
||||
icon="boxes-stacked"
|
||||
color="#000000"
|
||||
>
|
||||
Fetch secrets with any programming language on demand.
|
||||
</Card>
|
||||
<Card href="../../integrations/platforms/docker-intro" title="Docker" icon="docker" color="#000000">
|
||||
Inject secrets into Docker containers.
|
||||
</Card>
|
||||
<Card
|
||||
href="../../integrations/platforms/kubernetes"
|
||||
title="Kubernetes"
|
||||
icon="server"
|
||||
color="#000000"
|
||||
>
|
||||
Fetch and save secrets as native Kubernetes secrets.
|
||||
</Card>
|
||||
<Card
|
||||
href="/documentation/getting-started/api"
|
||||
title="REST API"
|
||||
icon="cloud"
|
||||
color="#000000"
|
||||
>
|
||||
Fetch secrets via HTTP request.
|
||||
</Card>
|
||||
<Card
|
||||
href="/integrations/overview"
|
||||
title="Native Integrations"
|
||||
icon="clouds"
|
||||
color="#000000"
|
||||
>
|
||||
Explore integrations for GitHub, Vercel, AWS, and more.
|
||||
</Card>
|
||||
</CardGroup>
|
BIN
company/favicon.png
Normal file
After Width: | Height: | Size: 1.9 KiB |
5
company/logo/dark.svg
Normal file
After Width: | Height: | Size: 6.8 KiB |
5
company/logo/light.svg
Normal file
After Width: | Height: | Size: 6.8 KiB |
80
company/mint.json
Normal file
@ -0,0 +1,80 @@
|
||||
{
|
||||
"name": "Infisical",
|
||||
"openapi": "https://app.infisical.com/api/docs/json",
|
||||
"logo": {
|
||||
"dark": "/logo/dark.svg",
|
||||
"light": "/logo/light.svg",
|
||||
"href": "https://infisical.com"
|
||||
},
|
||||
"favicon": "/favicon.png",
|
||||
"colors": {
|
||||
"primary": "#26272b",
|
||||
"light": "#97b31d",
|
||||
"dark": "#A1B659",
|
||||
"ultraLight": "#E7F256",
|
||||
"ultraDark": "#8D9F4C",
|
||||
"background": {
|
||||
"light": "#ffffff",
|
||||
"dark": "#0D1117"
|
||||
},
|
||||
"anchors": {
|
||||
"from": "#000000",
|
||||
"to": "#707174"
|
||||
}
|
||||
},
|
||||
"modeToggle": {
|
||||
"default": "light",
|
||||
"isHidden": true
|
||||
},
|
||||
"feedback": {
|
||||
"suggestEdit": true,
|
||||
"raiseIssue": true,
|
||||
"thumbsRating": true
|
||||
},
|
||||
"api": {
|
||||
"baseUrl": ["https://app.infisical.com", "http://localhost:8080"]
|
||||
},
|
||||
"topbarLinks": [
|
||||
{
|
||||
"name": "Log In",
|
||||
"url": "https://app.infisical.com/login"
|
||||
}
|
||||
],
|
||||
"topbarCtaButton": {
|
||||
"name": "Start for Free",
|
||||
"url": "https://app.infisical.com/signup"
|
||||
},
|
||||
"tabs": [
|
||||
{
|
||||
"name": "Integrations",
|
||||
"url": "integrations"
|
||||
},
|
||||
{
|
||||
"name": "CLI",
|
||||
"url": "cli"
|
||||
},
|
||||
{
|
||||
"name": "API Reference",
|
||||
"url": "api-reference"
|
||||
},
|
||||
{
|
||||
"name": "SDKs",
|
||||
"url": "sdks"
|
||||
},
|
||||
{
|
||||
"name": "Changelog",
|
||||
"url": "changelog"
|
||||
}
|
||||
],
|
||||
"navigation": [
|
||||
{
|
||||
"group": "Getting Started",
|
||||
"pages": [
|
||||
"documentation/getting-started/introduction"
|
||||
]
|
||||
}
|
||||
],
|
||||
"integrations": {
|
||||
"intercom": "hsg644ru"
|
||||
}
|
||||
}
|
142
company/style.css
Normal file
@ -0,0 +1,142 @@
|
||||
#navbar .max-w-8xl {
|
||||
max-width: 100%;
|
||||
border-bottom: 1px solid #ebebeb;
|
||||
background-color: #fcfcfc;
|
||||
}
|
||||
|
||||
.max-w-8xl {
|
||||
/* background-color: #f5f5f5; */
|
||||
}
|
||||
|
||||
#sidebar {
|
||||
left: 0;
|
||||
padding-left: 48px;
|
||||
padding-right: 30px;
|
||||
border-right: 1px;
|
||||
border-color: #cdd64b;
|
||||
background-color: #fcfcfc;
|
||||
border-right: 1px solid #ebebeb;
|
||||
}
|
||||
|
||||
#sidebar .relative .sticky {
|
||||
opacity: 0;
|
||||
}
|
||||
|
||||
#sidebar li > div.mt-2 {
|
||||
border-radius: 0;
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
#sidebar li > a.mt-2 {
|
||||
border-radius: 0;
|
||||
padding: 5px;
|
||||
}
|
||||
|
||||
#sidebar li > a.leading-6 {
|
||||
border-radius: 0;
|
||||
padding: 0px;
|
||||
}
|
||||
|
||||
/* #sidebar ul > div.mt-12 {
|
||||
padding-top: 30px;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
#sidebar ul > div.mt-12 h5 {
|
||||
position: absolute;
|
||||
left: -12px;
|
||||
top: -0px;
|
||||
} */
|
||||
|
||||
#header {
|
||||
border-left: 1px solid #26272b;
|
||||
padding-left: 16px;
|
||||
padding-right: 16px;
|
||||
background-color: #f5f5f5;
|
||||
padding-bottom: 10px;
|
||||
padding-top: 10px;
|
||||
}
|
||||
|
||||
#content-area .mt-8 .block{
|
||||
border-radius: 0;
|
||||
border-width: 1px;
|
||||
border-color: #ebebeb;
|
||||
}
|
||||
|
||||
#content-area .mt-8 .rounded-xl{
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
#content-area .mt-8 .rounded-lg{
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
#content-area .mt-6 .rounded-xl{
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
#content-area .mt-6 .rounded-lg{
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
#content-area .mt-6 .rounded-md{
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
#content-area .mt-8 .rounded-md{
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
#content-area div.my-4{
|
||||
border-radius: 0;
|
||||
border-width: 1px;
|
||||
}
|
||||
|
||||
#content-area div.flex-1 {
|
||||
/* text-transform: uppercase; */
|
||||
opacity: 0.8;
|
||||
font-weight: 400;
|
||||
}
|
||||
|
||||
#content-area button {
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
#content-area a {
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
#content-area .not-prose {
|
||||
border-radius: 0;
|
||||
}
|
||||
|
||||
/* .eyebrow {
|
||||
text-transform: uppercase;
|
||||
font-weight: 400;
|
||||
color: red;
|
||||
} */
|
||||
|
||||
#content-container {
|
||||
/* background-color: #f5f5f5; */
|
||||
margin-top: 2rem;
|
||||
}
|
||||
|
||||
#topbar-cta-button .group .absolute {
|
||||
background-color: black;
|
||||
border-radius: 0px;
|
||||
}
|
||||
|
||||
/* #topbar-cta-button .group .absolute:hover {
|
||||
background-color: white;
|
||||
border-radius: 0px;
|
||||
} */
|
||||
|
||||
#topbar-cta-button .group .flex {
|
||||
margin-top: 5px;
|
||||
margin-bottom: 5px;
|
||||
font-size: medium;
|
||||
}
|
||||
|
||||
.flex-1 .flex .items-center {
|
||||
/* background-color: #f5f5f5; */
|
||||
}
|
59
docker-swarm/.env-example
Normal file
@ -0,0 +1,59 @@
|
||||
# Keys
|
||||
# Required key for platform encryption/decryption ops
|
||||
# THIS IS A SAMPLE ENCRYPTION KEY AND SHOULD NEVER BE USED FOR PRODUCTION
|
||||
ENCRYPTION_KEY=6c1fe4e407b8911c104518103505b218
|
||||
|
||||
# JWT
|
||||
# Required secrets to sign JWT tokens
|
||||
# THIS IS A SAMPLE AUTH_SECRET KEY AND SHOULD NEVER BE USED FOR PRODUCTION
|
||||
AUTH_SECRET=5lrMXKKWCVocS/uerPsl7V+TX/aaUaI7iDkgl3tSmLE=
|
||||
|
||||
DB_CONNECTION_URI=postgres://infisical:infisical@haproxy:5433/infisical?sslmode=no-verify
|
||||
# Redis
|
||||
REDIS_URL=redis://:123456@haproxy:6379
|
||||
|
||||
|
||||
# Website URL
|
||||
# Required
|
||||
SITE_URL=http://localhost:8080
|
||||
|
||||
# Mail/SMTP
|
||||
SMTP_HOST=
|
||||
SMTP_PORT=
|
||||
SMTP_NAME=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# Integration
|
||||
# Optional only if integration is used
|
||||
CLIENT_ID_HEROKU=
|
||||
CLIENT_ID_VERCEL=
|
||||
CLIENT_ID_NETLIFY=
|
||||
CLIENT_ID_GITHUB=
|
||||
CLIENT_ID_GITLAB=
|
||||
CLIENT_ID_BITBUCKET=
|
||||
CLIENT_SECRET_HEROKU=
|
||||
CLIENT_SECRET_VERCEL=
|
||||
CLIENT_SECRET_NETLIFY=
|
||||
CLIENT_SECRET_GITHUB=
|
||||
CLIENT_SECRET_GITLAB=
|
||||
CLIENT_SECRET_BITBUCKET=
|
||||
CLIENT_SLUG_VERCEL=
|
||||
|
||||
# Sentry (optional) for monitoring errors
|
||||
SENTRY_DSN=
|
||||
|
||||
# Infisical Cloud-specific configs
|
||||
# Ignore - Not applicable for self-hosted version
|
||||
POSTHOG_HOST=
|
||||
POSTHOG_PROJECT_API_KEY=
|
||||
|
||||
# SSO-specific variables
|
||||
CLIENT_ID_GOOGLE_LOGIN=
|
||||
CLIENT_SECRET_GOOGLE_LOGIN=
|
||||
|
||||
CLIENT_ID_GITHUB_LOGIN=
|
||||
CLIENT_SECRET_GITHUB_LOGIN=
|
||||
|
||||
CLIENT_ID_GITLAB_LOGIN=
|
||||
CLIENT_SECRET_GITLAB_LOGIN=
|
78
docker-swarm/haproxy.cfg
Normal file
@ -0,0 +1,78 @@
|
||||
global
|
||||
maxconn 10000
|
||||
log stdout format raw local0
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode tcp
|
||||
retries 3
|
||||
timeout client 30m
|
||||
timeout connect 10s
|
||||
timeout server 30m
|
||||
timeout check 5s
|
||||
|
||||
listen stats
|
||||
mode http
|
||||
bind *:7000
|
||||
stats enable
|
||||
stats uri /
|
||||
|
||||
resolvers hostdns
|
||||
nameserver dns 127.0.0.11:53
|
||||
resolve_retries 3
|
||||
timeout resolve 1s
|
||||
timeout retry 1s
|
||||
hold valid 5s
|
||||
|
||||
frontend postgres_master
|
||||
bind *:5433
|
||||
default_backend postgres_master_backend
|
||||
|
||||
frontend postgres_replicas
|
||||
bind *:5434
|
||||
default_backend postgres_replica_backend
|
||||
|
||||
|
||||
backend postgres_master_backend
|
||||
option httpchk GET /master
|
||||
http-check expect status 200
|
||||
default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions
|
||||
server postgres-1 postgres-1:5432 check port 8008 resolvers hostdns
|
||||
server postgres-2 postgres-2:5432 check port 8008 resolvers hostdns
|
||||
server postgres-3 postgres-3:5432 check port 8008 resolvers hostdns
|
||||
|
||||
backend postgres_replica_backend
|
||||
option httpchk GET /replica
|
||||
http-check expect status 200
|
||||
default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions
|
||||
server postgres-1 postgres-1:5432 check port 8008 resolvers hostdns
|
||||
server postgres-2 postgres-2:5432 check port 8008 resolvers hostdns
|
||||
server postgres-3 postgres-3:5432 check port 8008 resolvers hostdns
|
||||
|
||||
|
||||
frontend redis_master_frontend
|
||||
bind *:6379
|
||||
default_backend redis_master_backend
|
||||
|
||||
backend redis_master_backend
|
||||
option tcp-check
|
||||
tcp-check send AUTH\ 123456\r\n
|
||||
tcp-check expect string +OK
|
||||
tcp-check send PING\r\n
|
||||
tcp-check expect string +PONG
|
||||
tcp-check send info\ replication\r\n
|
||||
tcp-check expect string role:master
|
||||
tcp-check send QUIT\r\n
|
||||
tcp-check expect string +OK
|
||||
server redis_master redis_replica0:6379 check inter 1s
|
||||
server redis_replica1 redis_replica1:6379 check inter 1s
|
||||
server redis_replica2 redis_replica2:6379 check inter 1s
|
||||
|
||||
frontend infisical_frontend
|
||||
bind *:8080
|
||||
default_backend infisical_backend
|
||||
|
||||
backend infisical_backend
|
||||
option httpchk GET /api/status
|
||||
http-check expect status 200
|
||||
server infisical infisical:8080 check inter 1s
|
261
docker-swarm/stack.yaml
Normal file
@ -0,0 +1,261 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
haproxy:
|
||||
image: haproxy:latest
|
||||
ports:
|
||||
- '7001:7000'
|
||||
- '5002:5433' # Postgres master
|
||||
- '5003:5434' # Postgres read
|
||||
- '6379:6379'
|
||||
- '8080:8080'
|
||||
networks:
|
||||
- infisical
|
||||
configs:
|
||||
- source: haproxy-config
|
||||
target: /usr/local/etc/haproxy/haproxy.cfg
|
||||
deploy:
|
||||
mode: global
|
||||
|
||||
infisical:
|
||||
container_name: infisical-backend
|
||||
image: infisical/infisical:v0.60.1-postgres
|
||||
env_file: .env
|
||||
networks:
|
||||
- infisical
|
||||
secrets:
|
||||
- env_file
|
||||
deploy:
|
||||
replicas: 5
|
||||
|
||||
etcd1:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
networks:
|
||||
- infisical
|
||||
environment:
|
||||
ETCD_UNSUPPORTED_ARCH: arm64
|
||||
container_name: demo-etcd1
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node1
|
||||
hostname: etcd1
|
||||
command: |
|
||||
etcd --name etcd1
|
||||
--listen-client-urls http://0.0.0.0:2379
|
||||
--listen-peer-urls=http://0.0.0.0:2380
|
||||
--advertise-client-urls http://etcd1:2379
|
||||
--initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
|
||||
--initial-advertise-peer-urls=http://etcd1:2380
|
||||
--initial-cluster-state=new
|
||||
|
||||
etcd2:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
networks:
|
||||
- infisical
|
||||
environment:
|
||||
ETCD_UNSUPPORTED_ARCH: arm64
|
||||
container_name: demo-etcd2
|
||||
hostname: etcd2
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node2
|
||||
command: |
|
||||
etcd --name etcd2
|
||||
--listen-client-urls http://0.0.0.0:2379
|
||||
--listen-peer-urls=http://0.0.0.0:2380
|
||||
--advertise-client-urls http://etcd2:2379
|
||||
--initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
|
||||
--initial-advertise-peer-urls=http://etcd2:2380
|
||||
--initial-cluster-state=new
|
||||
|
||||
etcd3:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
networks:
|
||||
- infisical
|
||||
environment:
|
||||
ETCD_UNSUPPORTED_ARCH: arm64
|
||||
container_name: demo-etcd3
|
||||
hostname: etcd3
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node3
|
||||
command: |
|
||||
etcd --name etcd3
|
||||
--listen-client-urls http://0.0.0.0:2379
|
||||
--listen-peer-urls=http://0.0.0.0:2380
|
||||
--advertise-client-urls http://etcd3:2379
|
||||
--initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
|
||||
--initial-advertise-peer-urls=http://etcd3:2380
|
||||
--initial-cluster-state=new
|
||||
|
||||
spolo1:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
container_name: postgres-1
|
||||
networks:
|
||||
- infisical
|
||||
hostname: postgres-1
|
||||
environment:
|
||||
ETCD_HOSTS: etcd1:2379,etcd2:2379,etcd3:2379
|
||||
PGPASSWORD_SUPERUSER: "postgres"
|
||||
PGUSER_SUPERUSER: "postgres"
|
||||
SCOPE: infisical
|
||||
volumes:
|
||||
- postgres_data1:/home/postgres/pgdata
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node1
|
||||
|
||||
spolo2:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
container_name: postgres-2
|
||||
networks:
|
||||
- infisical
|
||||
hostname: postgres-2
|
||||
environment:
|
||||
ETCD_HOSTS: etcd1:2379,etcd2:2379,etcd3:2379
|
||||
PGPASSWORD_SUPERUSER: "postgres"
|
||||
PGUSER_SUPERUSER: "postgres"
|
||||
SCOPE: infisical
|
||||
volumes:
|
||||
- postgres_data2:/home/postgres/pgdata
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node2
|
||||
|
||||
spolo3:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
container_name: postgres-3
|
||||
networks:
|
||||
- infisical
|
||||
hostname: postgres-3
|
||||
environment:
|
||||
ETCD_HOSTS: etcd1:2379,etcd2:2379,etcd3:2379
|
||||
PGPASSWORD_SUPERUSER: "postgres"
|
||||
PGUSER_SUPERUSER: "postgres"
|
||||
SCOPE: infisical
|
||||
volumes:
|
||||
- postgres_data3:/home/postgres/pgdata
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node3
|
||||
|
||||
|
||||
redis_replica0:
|
||||
image: bitnami/redis:6.2.10
|
||||
environment:
|
||||
- REDIS_REPLICATION_MODE=master
|
||||
- REDIS_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node1
|
||||
|
||||
redis_replica1:
|
||||
image: bitnami/redis:6.2.10
|
||||
environment:
|
||||
- REDIS_REPLICATION_MODE=slave
|
||||
- REDIS_MASTER_HOST=redis_replica0
|
||||
- REDIS_MASTER_PORT_NUMBER=6379
|
||||
- REDIS_MASTER_PASSWORD=123456
|
||||
- REDIS_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node2
|
||||
|
||||
redis_replica2:
|
||||
image: bitnami/redis:6.2.10
|
||||
environment:
|
||||
- REDIS_REPLICATION_MODE=slave
|
||||
- REDIS_MASTER_HOST=redis_replica0
|
||||
- REDIS_MASTER_PORT_NUMBER=6379
|
||||
- REDIS_MASTER_PASSWORD=123456
|
||||
- REDIS_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node3
|
||||
|
||||
redis_sentinel1:
|
||||
image: bitnami/redis-sentinel:6.2.10
|
||||
environment:
|
||||
- REDIS_SENTINEL_QUORUM=2
|
||||
- REDIS_SENTINEL_DOWN_AFTER_MILLISECONDS=5000
|
||||
- REDIS_SENTINEL_FAILOVER_TIMEOUT=60000
|
||||
- REDIS_SENTINEL_PORT_NUMBER=26379
|
||||
- REDIS_MASTER_HOST=redis_replica1
|
||||
- REDIS_MASTER_PORT_NUMBER=6379
|
||||
- REDIS_MASTER_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node1
|
||||
|
||||
redis_sentinel2:
|
||||
image: bitnami/redis-sentinel:6.2.10
|
||||
environment:
|
||||
- REDIS_SENTINEL_QUORUM=2
|
||||
- REDIS_SENTINEL_DOWN_AFTER_MILLISECONDS=5000
|
||||
- REDIS_SENTINEL_FAILOVER_TIMEOUT=60000
|
||||
- REDIS_SENTINEL_PORT_NUMBER=26379
|
||||
- REDIS_MASTER_HOST=redis_replica1
|
||||
- REDIS_MASTER_PORT_NUMBER=6379
|
||||
- REDIS_MASTER_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node2
|
||||
|
||||
redis_sentinel3:
|
||||
image: bitnami/redis-sentinel:6.2.10
|
||||
environment:
|
||||
- REDIS_SENTINEL_QUORUM=2
|
||||
- REDIS_SENTINEL_DOWN_AFTER_MILLISECONDS=5000
|
||||
- REDIS_SENTINEL_FAILOVER_TIMEOUT=60000
|
||||
- REDIS_SENTINEL_PORT_NUMBER=26379
|
||||
- REDIS_MASTER_HOST=redis_replica1
|
||||
- REDIS_MASTER_PORT_NUMBER=6379
|
||||
- REDIS_MASTER_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node3
|
||||
|
||||
networks:
|
||||
infisical:
|
||||
|
||||
|
||||
volumes:
|
||||
postgres_data1:
|
||||
postgres_data2:
|
||||
postgres_data3:
|
||||
postgres_data4:
|
||||
redis0:
|
||||
redis1:
|
||||
redis2:
|
||||
|
||||
configs:
|
||||
haproxy-config:
|
||||
file: ./haproxy.cfg
|
||||
|
||||
secrets:
|
||||
env_file:
|
||||
file: .env
|
@ -4,7 +4,7 @@ openapi: "GET /api/v2/service-token"
|
||||
---
|
||||
|
||||
<Warning>
|
||||
This endpoint will be deprecated in the near future with the removal of service tokens in Q1/Q2 2024.
|
||||
This endpoint is deprecated and will be removed in the future.
|
||||
|
||||
We recommend switching to using [identities](/documentation/platform/identities/overview) if your client supports it.
|
||||
We recommend switching to using [Machine Identities](/documentation/platform/identities/machine-identities).
|
||||
</Warning>
|
||||
|
@ -16,36 +16,48 @@ Export environment variables from the platform into a file format.
|
||||
<Accordion title="infisical export" defaultOpen="true">
|
||||
Use this command to export environment variables from the platform into a raw file formats
|
||||
|
||||
```bash
|
||||
$ infisical export
|
||||
```bash
|
||||
$ infisical export
|
||||
|
||||
# Export variables to a .env file
|
||||
infisical export > .env
|
||||
# Export variables to a .env file
|
||||
infisical export > .env
|
||||
|
||||
# Export variables to a .env file (with export keyword)
|
||||
infisical export --format=dotenv-export > .env
|
||||
# Export variables to a .env file (with export keyword)
|
||||
infisical export --format=dotenv-export > .env
|
||||
|
||||
# Export variables to a CSV file
|
||||
infisical export --format=csv > secrets.csv
|
||||
# Export variables to a CSV file
|
||||
infisical export --format=csv > secrets.csv
|
||||
|
||||
# Export variables to a JSON file
|
||||
infisical export --format=json > secrets.json
|
||||
# Export variables to a JSON file
|
||||
infisical export --format=json > secrets.json
|
||||
|
||||
# Export variables to a YAML file
|
||||
infisical export --format=yaml > secrets.yaml
|
||||
# Export variables to a YAML file
|
||||
infisical export --format=yaml > secrets.yaml
|
||||
|
||||
# Render secrets using a custom template file
|
||||
infisical export --template=<path to template>
|
||||
```
|
||||
# Render secrets using a custom template file
|
||||
infisical export --template=<path to template>
|
||||
```
|
||||
|
||||
### Environment variables
|
||||
|
||||
### Environment variables
|
||||
<Accordion title="INFISICAL_TOKEN">
|
||||
Used to fetch secrets via a [service token](/documentation/platform/token) apposed to logged in credentials. Simply, export this variable in the terminal before running this command.
|
||||
Used to fetch secrets via a [machine identities](/documentation/platform/identities/machine-identities) apposed to logged in credentials. Simply, export this variable in the terminal before running this command.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
export INFISICAL_TOKEN=st.63e03c4a97cb4a747186c71e.ed5b46a34c078a8f94e8228f4ab0ff97.4f7f38034811995997d72badf44b42ec
|
||||
# Example
|
||||
export INFISICAL_TOKEN=$(infisical login --method=universal-auth --client-id=<identity-client-id> --client-secret=<identity-client-secret> --silent --plain) # --plain flag will output only the token, so it can be fed to an environment variable. --silent will disable any update messages.
|
||||
```
|
||||
|
||||
<Info>
|
||||
Alternatively, you may use service tokens.
|
||||
|
||||
Please note, however, that service tokens are being deprecated in favor of [machine identities](/documentation/platform/identities/machine-identities). They will be removed in the future in accordance with the deprecation notice and timeline stated [here](https://infisical.com/blog/deprecating-api-keys).
|
||||
```bash
|
||||
# Example
|
||||
export INFISICAL_TOKEN=<service-token>
|
||||
```
|
||||
|
||||
</Info>
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="INFISICAL_DISABLE_UPDATE_CHECK">
|
||||
@ -54,16 +66,18 @@ Export environment variables from the platform into a file format.
|
||||
To use, simply export this variable in the terminal before running this command.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
# Example
|
||||
export INFISICAL_DISABLE_UPDATE_CHECK=true
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
|
||||
### flags
|
||||
### flags
|
||||
|
||||
<Accordion title="--template">
|
||||
The `--template` flag specifies the path to the template file used for rendering secrets. When using templates, you can omit the other format flags.
|
||||
|
||||
```text my-template-file
|
||||
```text my-template-file
|
||||
{{$secrets := secret "<infisical-project-id>" "<environment-slug>" "<folder-path>"}}
|
||||
{{$length := len $secrets}}
|
||||
{{- "{"}}
|
||||
@ -73,24 +87,26 @@ Export environment variables from the platform into a file format.
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{ "}" -}}
|
||||
```
|
||||
```
|
||||
|
||||
```bash
|
||||
# Example
|
||||
infisical export --template="/path/to/template/file"
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
<Accordion title="--env">
|
||||
Used to set the environment that secrets are pulled from.
|
||||
Used to set the environment that secrets are pulled from.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
infisical export --env=prod
|
||||
# Example
|
||||
infisical export --env=prod
|
||||
```
|
||||
|
||||
Note: this flag only accepts environment slug names not the fully qualified name. To view the slug name of an environment, visit the project settings page.
|
||||
|
||||
default value: `dev`
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--projectId">
|
||||
@ -98,28 +114,32 @@ Export environment variables from the platform into a file format.
|
||||
This flag allows you to override this behavior by explicitly defining the project to fetch your secrets from.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
|
||||
# Example
|
||||
|
||||
infisical export --projectId=XXXXXXXXXXXXXX
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--expand">
|
||||
Parse shell parameter expansions in your secrets (e.g., `${DOMAIN}`)
|
||||
|
||||
Default value: `true`
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--format">
|
||||
Format of the output file. Accepted values: `dotenv`, `dotenv-export`, `csv`, `json` and `yaml`
|
||||
Format of the output file. Accepted values: `dotenv`, `dotenv-export`, `csv`, `json` and `yaml`
|
||||
|
||||
Default value: `dotenv`
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--secret-overriding">
|
||||
Prioritizes personal secrets with the same name over shared secrets
|
||||
|
||||
Default value: `true`
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--path">
|
||||
@ -129,19 +149,21 @@ Export environment variables from the platform into a file format.
|
||||
# Example
|
||||
infisical export --path="/path/to/folder" --env=dev
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--tags">
|
||||
When working with tags, you can use this flag to filter and retrieve only secrets that are associated with a specific tag(s).
|
||||
|
||||
```bash
|
||||
# Example
|
||||
# Example
|
||||
infisical run --tags=tag1,tag2,tag3 -- npm run dev
|
||||
```
|
||||
|
||||
Note: you must reference the tag by its slug name not its fully qualified name. Go to project settings to view all tag slugs.
|
||||
|
||||
By default, all secrets are fetched
|
||||
|
||||
</Accordion>
|
||||
|
||||
</Accordion>
|
||||
|
@ -11,6 +11,7 @@ description: "The command that injects your secrets into local environment"
|
||||
# Example
|
||||
infisical run [options] -- npm run dev
|
||||
```
|
||||
|
||||
</Tab>
|
||||
|
||||
<Tab title="Chained commands">
|
||||
@ -20,6 +21,7 @@ description: "The command that injects your secrets into local environment"
|
||||
# Example
|
||||
infisical run [options] --command "npm run bootstrap && npm run dev start; other-bash-command"
|
||||
```
|
||||
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
@ -27,27 +29,38 @@ description: "The command that injects your secrets into local environment"
|
||||
|
||||
Inject secrets from Infisical into your application process.
|
||||
|
||||
|
||||
## Subcommands & flags
|
||||
|
||||
<Accordion title="infisical run" defaultOpen="true">
|
||||
Use this command to inject secrets into your applications process
|
||||
|
||||
```bash
|
||||
$ infisical run -- <your application command>
|
||||
```bash
|
||||
$ infisical run -- <your application command>
|
||||
|
||||
# Example
|
||||
$ infisical run -- npm run dev
|
||||
```
|
||||
# Example
|
||||
$ infisical run -- npm run dev
|
||||
```
|
||||
|
||||
### Environment variables
|
||||
|
||||
### Environment variables
|
||||
<Accordion title="INFISICAL_TOKEN">
|
||||
Used to fetch secrets via a [service token](/documentation/platform/token) apposed to logged in credentials. Simply, export this variable in the terminal before running this command.
|
||||
Used to fetch secrets via a [machine identity](/documentation/platform/identities/machine-identities) apposed to logged in credentials. Simply, export this variable in the terminal before running this command.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
export INFISICAL_TOKEN=st.63e03c4a97cb4a747186c71e.ed5b46a34c078a8f94e8228f4ab0ff97.4f7f38034811995997d72badf44b42ec
|
||||
# Example
|
||||
export INFISICAL_TOKEN=$(infisical login --method=universal-auth --client-id=<identity-client-id> --client-secret=<identity-client-secret> --silent --plain) # --plain flag will output only the token, so it can be fed to an environment variable. --silent will disable any update messages.
|
||||
```
|
||||
|
||||
<Info>
|
||||
Alternatively, you may use service tokens.
|
||||
|
||||
Please note, however, that service tokens are being deprecated in favor of [machine identities](/documentation/platform/identities/machine-identities). They will be removed in the future in accordance with the deprecation notice and timeline stated [here](https://infisical.com/blog/deprecating-api-keys).
|
||||
```bash
|
||||
# Example
|
||||
export INFISICAL_TOKEN=<service-token>
|
||||
```
|
||||
|
||||
</Info>
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="INFISICAL_DISABLE_UPDATE_CHECK">
|
||||
@ -56,71 +69,90 @@ Inject secrets from Infisical into your application process.
|
||||
To use, simply export this variable in the terminal before running this command.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
# Example
|
||||
export INFISICAL_DISABLE_UPDATE_CHECK=true
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
|
||||
### Flags
|
||||
|
||||
### Flags
|
||||
|
||||
<Accordion title="--project-config-dir">
|
||||
Explicitly set the directory where the .infisical.json resides. This is useful for some monorepo setups.
|
||||
Explicitly set the directory where the .infisical.json resides. This is useful for some monorepo setups.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
# Example
|
||||
infisical run --project-config-dir=/some-dir -- printenv
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--command">
|
||||
Pass secrets into multiple commands at once
|
||||
|
||||
```bash
|
||||
# Example
|
||||
# Example
|
||||
infisical run --command="npm run build && npm run dev; more-commands..."
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--projectId">
|
||||
The project ID to fetch secrets from. This is required when using a machine identity to authenticate.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
infisical run --projectId=<project-id> -- npm run dev
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--token">
|
||||
If you are using a [service token](/documentation/platform/token) to authenticate, you can pass the token as a flag
|
||||
If you are using a [machine identity](/documentation/platform/identities/machine-identities) to authenticate, you can pass the token as a flag
|
||||
|
||||
```bash
|
||||
# Example
|
||||
infisical run --token="st.63e03c4a97cb4a747186c71e.ed5b46a34c078a8f94e8228f4ab0ff97.4f7f38034811995997d72badf44b42ec" -- npm run start
|
||||
# Example
|
||||
infisical run --token="<universal-auth-access-token>" --projectId=<project-id> -- npm run start
|
||||
```
|
||||
|
||||
You may also expose the token to the CLI by setting the environment variable `INFISICAL_TOKEN` before executing the run command. This will have the same effect as setting the token with `--token` flag
|
||||
You may also expose the token to the CLI by setting the environment variable `INFISICAL_TOKEN` before executing the run command. This will have the same effect as setting the token with `--token` flag
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--expand">
|
||||
Turn on or off the shell parameter expansion in your secrets. If you have used shell parameters in your secret(s), activating this feature will populate them before injecting them into your application process.
|
||||
|
||||
Default value: `true`
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--env">
|
||||
This is used to specify the environment from which secrets should be retrieved. The accepted values are the environment slugs defined for your project, such as `dev`, `staging`, `test`, and `prod`.
|
||||
|
||||
Default value: `dev`
|
||||
</Accordion>
|
||||
{" "}
|
||||
|
||||
<Accordion title="--env">
|
||||
This is used to specify the environment from which secrets should be
|
||||
retrieved. The accepted values are the environment slugs defined for your
|
||||
project, such as `dev`, `staging`, `test`, and `prod`. Default value: `dev`
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--secret-overriding">
|
||||
Prioritizes personal secrets with the same name over shared secrets
|
||||
|
||||
Default value: `true`
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--tags">
|
||||
When working with tags, you can use this flag to filter and retrieve only secrets that are associated with a specific tag(s).
|
||||
|
||||
```bash
|
||||
# Example
|
||||
# Example
|
||||
infisical run --tags=tag1,tag2,tag3 -- npm run dev
|
||||
```
|
||||
|
||||
Note: you must reference the tag by its slug name not its fully qualified name. Go to project settings to view all tag slugs.
|
||||
|
||||
By default, all secrets are fetched
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--path">
|
||||
|
@ -23,13 +23,23 @@ $ infisical secrets
|
||||
### Environment variables
|
||||
|
||||
<Accordion title="INFISICAL_TOKEN">
|
||||
Used to fetch secrets via a [service token](/documentation/platform/token) apposed to logged in credentials. Simply, export this variable in the terminal before running this command.
|
||||
Used to fetch secrets via a [machine identity](/documentation/platform/identities/machine-identities) apposed to logged in credentials. Simply, export this variable in the terminal before running this command.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
export INFISICAL_TOKEN=st.63e03c4a97cb4a747186c71e.ed5b46a34c078a8f94e8228f4ab0ff97.4f7f38034811995997d72badf44b42ec
|
||||
export INFISICAL_TOKEN=$(infisical login --method=universal-auth --client-id=<identity-client-id> --client-secret=<identity-client-secret> --silent --plain) # --plain flag will output only the token, so it can be fed to an environment variable. --silent will disable any update messages.
|
||||
```
|
||||
|
||||
<Info>
|
||||
Alternatively, you may use service tokens.
|
||||
|
||||
Please note, however, that service tokens are being deprecated in favor of [machine identities](/documentation/platform/identities/machine-identities). They will be removed in the future in accordance with the deprecation notice and timeline stated [here](https://infisical.com/blog/deprecating-api-keys).
|
||||
```bash
|
||||
# Example
|
||||
export INFISICAL_TOKEN=<service-token>
|
||||
```
|
||||
</Info>
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="INFISICAL_DISABLE_UPDATE_CHECK">
|
||||
@ -53,6 +63,16 @@ $ infisical secrets
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--projectId">
|
||||
The project ID to fetch secrets from. This is required when using a machine identity to authenticate.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
infisical secrets --projectId=<project-id>
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--env">
|
||||
Used to select the environment name on which actions should be taken on
|
||||
|
||||
@ -186,7 +206,7 @@ $ infisical secrets folders
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--token">
|
||||
Fetch folders using the Infisical service token
|
||||
Fetch folders using a [machine identity](/documentation/platform/identities/machine-identities) access token.
|
||||
|
||||
Default value: ``
|
||||
</Accordion>
|
||||
|
@ -3,37 +3,47 @@ title: "infisical service-token"
|
||||
description: "Manage Infisical service tokens"
|
||||
---
|
||||
|
||||
```bash
|
||||
<Warning>
|
||||
This command is deprecated and will be removed in the near future. Please
|
||||
switch to using [Machine
|
||||
Identities](/documentation/platform/identities/machine-identities) for
|
||||
authenticating with Infisical.
|
||||
</Warning>
|
||||
|
||||
```bash
|
||||
infisical service-token create --scope=dev:/global --scope=dev:/backend --access-level=read --access-level=write
|
||||
```
|
||||
|
||||
## Description
|
||||
The Infisical `service-token` command allows you to manage service tokens for a given Infisical project.
|
||||
|
||||
The Infisical `service-token` command allows you to manage service tokens for a given Infisical project.
|
||||
With this command, you can create, view, and delete service tokens.
|
||||
|
||||
<Accordion title="service-token create" defaultOpen="true">
|
||||
Use this command to create a service token
|
||||
|
||||
```bash
|
||||
$ infisical service-token create --scope=dev:/backend/** --access-level=read --access-level=write
|
||||
```
|
||||
```bash
|
||||
$ infisical service-token create --scope=dev:/backend/** --access-level=read --access-level=write
|
||||
```
|
||||
|
||||
### Flags
|
||||
|
||||
### Flags
|
||||
<Accordion title="--scope">
|
||||
```bash
|
||||
infisical service-token create --scope=dev:/global --scope=dev:/backend/** --access-level=read
|
||||
```
|
||||
|
||||
Use the scope flag to define which environments and paths your service token should be authorized to access.
|
||||
|
||||
The value of your scope flag should be in the following `<environment slug>:<path>`.
|
||||
|
||||
The value of your scope flag should be in the following `<environment slug>:<path>`.
|
||||
Here, `environment slug` refers to the slug name of the environment, and `path` indicates the folder path where your secrets are stored.
|
||||
|
||||
For specifying multiple scopes, you can use multiple --scope flags.
|
||||
|
||||
|
||||
<Info>
|
||||
The `path` can be a Glob pattern
|
||||
</Info>
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="--projectId">
|
||||
@ -41,8 +51,9 @@ With this command, you can create, view, and delete service tokens.
|
||||
infisical service-token create --scope=dev:/global --access-level=read --projectId=63cefb15c8d3175601cfa989
|
||||
```
|
||||
|
||||
The project ID you'd like to create the service token for.
|
||||
The project ID you'd like to create the service token for.
|
||||
By default, the CLI will attempt to use the linked Infisical project in `.infisical.json` generated by `infisical init` command.
|
||||
|
||||
</Accordion>
|
||||
<Accordion title="--name">
|
||||
```bash
|
||||
@ -52,6 +63,7 @@ With this command, you can create, view, and delete service tokens.
|
||||
Service token name
|
||||
|
||||
Default: `Service token generated via CLI`
|
||||
|
||||
</Accordion>
|
||||
<Accordion title="--expiry-seconds">
|
||||
```bash
|
||||
@ -61,6 +73,7 @@ With this command, you can create, view, and delete service tokens.
|
||||
Set the service token's expiration time in seconds from now. To never expire set to zero.
|
||||
|
||||
Default: `1 day`
|
||||
|
||||
</Accordion>
|
||||
<Accordion title="--access-level">
|
||||
```bash
|
||||
@ -68,6 +81,7 @@ With this command, you can create, view, and delete service tokens.
|
||||
```
|
||||
|
||||
The type of access the service token should have. Can be `read` and or `write`
|
||||
|
||||
</Accordion>
|
||||
<Accordion title="--token-only">
|
||||
```bash
|
||||
@ -77,5 +91,6 @@ With this command, you can create, view, and delete service tokens.
|
||||
When true, only the service token will be printed
|
||||
|
||||
Default: `false`
|
||||
|
||||
</Accordion>
|
||||
</Accordion>
|
||||
|
@ -1,22 +0,0 @@
|
||||
---
|
||||
title: "Infisical Token"
|
||||
description: "How to use Infisical service token within the CLI."
|
||||
---
|
||||
|
||||
Prerequisite: [Infisical Token and How to Generate One](/documentation/platform/token).
|
||||
|
||||
It's possible to use the CLI to sync environment variables without manually entering login credentials by using a service token in the prerequisite link above.
|
||||
|
||||
## Feeding Infisical Token to the CLI
|
||||
|
||||
The CLI looks out for an environment variable called the `INFISICAL_TOKEN` which you can set depending on where you run the CLI. If `INFISICAL_TOKEN` is detected by the CLI, it will authenticate and retrieve the environment variables which the token is authorized for.
|
||||
|
||||
A common use-case is to use the Infisical Token to fetch environment variables with Docker. More specifically, a token can be passed to a container as an environment variable for the CLI to authenticate and pull its corresponding secrets. Check out the integration guides for that:
|
||||
|
||||
- [Docker](../../integrations/platforms/docker)
|
||||
- [Docker Compose](../../integrations/platforms/docker-compose)
|
||||
|
||||
<Info>
|
||||
Once the token is expired, the CLI using it will no longer be able to make
|
||||
requests with it.
|
||||
</Info>
|
@ -1,141 +1,125 @@
|
||||
---
|
||||
title: "Quick usage"
|
||||
title: "Quickstart"
|
||||
description: "Manage secrets with Infisical CLI"
|
||||
---
|
||||
|
||||
The CLI is designed for a variety of applications, ranging from local secret management to CI/CD and production scenarios.
|
||||
The distinguishing factor, however, is the authentication method used.
|
||||
The CLI is designed for a variety of secret management applications ranging from local development to CI/CD and production scenarios.
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Local development only">
|
||||
To use the Infisical CLI in your local development environment, simply run the command below and follow the interactive guide.
|
||||
<Tab title="Local development">
|
||||
In the following steps, we explore how to use the Infisical CLI to fetch back environment variables from Infisical
|
||||
and inject them into your local development process.
|
||||
|
||||
<Steps>
|
||||
<Step title="Log in with the CLI">
|
||||
Start by running the `infisical login` command to authenticate with Infisical.
|
||||
|
||||
```bash
|
||||
infisical login
|
||||
```
|
||||
<Note>
|
||||
If you are in a containerized environment such as WSL 2 or Codespaces, run `infisical login -i` to avoid browser based login
|
||||
</Note>
|
||||
</Step>
|
||||
<Step title="Initialize Infisical for your project">
|
||||
Next, navigate to your project and initialize Infisical.
|
||||
|
||||
```bash
|
||||
# navigate to your project
|
||||
cd /path/to/project
|
||||
|
||||
```bash
|
||||
infisical login
|
||||
```
|
||||
# initialize infisical
|
||||
infisical init
|
||||
```
|
||||
|
||||
<Note>
|
||||
If you are in a containerized environment such as WSL 2 or Codespaces, run `infisical login -i` to avoid browser based login
|
||||
</Note>
|
||||
The `infisical init` command creates a `.infisical.json` file, containing [local project settings](./project-config), at the location where the command is executed.
|
||||
|
||||
## Initialize Infisical for your project
|
||||
<Note>
|
||||
The `.infisical.json` file does not contain any sensitive data, so you may commit it to your git repository.
|
||||
</Note>
|
||||
</Step>
|
||||
<Step title="Inject environment variables">
|
||||
Finally, pass environment variables from Infisical into your application.
|
||||
|
||||
```bash
|
||||
# navigate to your project
|
||||
cd /path/to/project
|
||||
<Tabs>
|
||||
<Tab title="Feed secrets to your application">
|
||||
```bash
|
||||
infisical run --env=dev --path=/apps/firefly -- [your application start command] # e.g. npm run dev
|
||||
|
||||
# initialize infisical
|
||||
infisical init
|
||||
```
|
||||
# example with node (nodemon)
|
||||
infisical run --env=staging --path=/apps/spotify -- nodemon index.js
|
||||
|
||||
# example with flask
|
||||
infisical run --env=prod --path=/apps/backend -- flask run
|
||||
|
||||
# example with spring boot - maven
|
||||
infisical run --env=dev --path=/apps/ -- ./mvnw spring-boot:run --quiet
|
||||
```
|
||||
|
||||
</Tab>
|
||||
<Tab title="Feed secrets via custom aliases (advanced)">
|
||||
Custom aliases can utilize secrets from Infisical. Suppose there is a custom alias `yd` in `custom.sh` that runs `yarn dev` and needs the secrets provided by Infisical.
|
||||
```bash
|
||||
#!/bin/sh
|
||||
|
||||
yd() {
|
||||
yarn dev
|
||||
}
|
||||
```
|
||||
|
||||
To make the secrets available from Infisical to `yd`, you can run the following command:
|
||||
|
||||
```bash
|
||||
infisical run --env=prod --path=/apps/reddit --command="source custom.sh && yd"
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
View all available options for `run` command [here](./commands/run)
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
This will create `.infisical.json` file at the location the command was executed. This file contains your [local project settings](./project-config). It does not contain any sensitive data.
|
||||
|
||||
</Tab>
|
||||
|
||||
<Tab title="Staging, production & all other use case">
|
||||
To use Infisical for non local development scenarios, please create a [service token](../documentation/platform/token). The service token will allow you to authenticate and interact with Infisical.
|
||||
Once you have created a service token with the required permissions, you'll need to feed the token to the CLI.
|
||||
<Tab title="Staging, production & all other use cases">
|
||||
In the following steps, we explore how to use the Infisical CLI in a non-local development scenario
|
||||
to fetch back environment variables and export them to a file.
|
||||
<Steps>
|
||||
<Step title="Create a machine identity and obtain credentials for it">
|
||||
Follow the steps listed [here](/documentation/platform/identities/universal-auth) to create a machine identity and obtain a **client ID** and **client secret** for it.
|
||||
</Step>
|
||||
<Step title="Obtain a machine identity access token">
|
||||
Run the following command to authenticate with Infisical using the **client ID** and **client secret** credentials from step 1 and set the `INFISICAL_TOKEN` environment variable to the retrieved access token.
|
||||
|
||||
```bash
|
||||
export INFISICAL_TOKEN=$(infisical login --method=universal-auth --client-id=<identity-client-id> --client-secret=<identity-client-secret> --silent --plain) # --plain flag will output only the token, so it can be fed to an environment variable. --silent will disable any update messages.
|
||||
```
|
||||
|
||||
#### Pass as flag
|
||||
You may use the --token flag to set the token
|
||||
The CLI is configured to look out for the `INFISICAL_TOKEN` environment variable, so going forward any command used will be authenticated.
|
||||
|
||||
```
|
||||
infisical export --token=<>
|
||||
infisical secrets --token=<>
|
||||
infisical run --token=<> -- npm run dev
|
||||
```
|
||||
Alternatively, assuming you have an access token on hand, you can also pass it directly to the CLI using the `--token` flag in conjunction with other CLI commands.
|
||||
|
||||
#### Pass via shell environment variable
|
||||
The CLI is configured to look for an environment variable named `INFISICAL_TOKEN`. If set, it'll attempt to use it for authentication.
|
||||
<Info>
|
||||
Keep in mind that the machine identity access token has a limited lifetime. It is recommended to use it only for the duration of the task at hand.
|
||||
You can [refresh the token](./commands/token) if needed.
|
||||
</Info>
|
||||
</Step>
|
||||
<Step title="Export environment variables back into a file">
|
||||
Finally, export the environment variables from Infisical to a file of choice.
|
||||
|
||||
```
|
||||
export INFISICAL_TOKEN=<>
|
||||
```
|
||||
|
||||
```bash
|
||||
# export variables to a .env file (with export keyword)
|
||||
infisical export --format=dotenv-export > .env
|
||||
|
||||
# export variables to a YAML file
|
||||
infisical export --format=yaml > secrets.yaml
|
||||
```
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
|
||||
## Inject environment variables
|
||||
<Tabs>
|
||||
<Tab title="Feed secrets to your application">
|
||||
```bash
|
||||
infisical run --env=dev --path=/apps/firefly -- [your application start command]
|
||||
|
||||
# example with node (nodemon)
|
||||
infisical run --env=staging --path=/apps/spotify -- nodemon index.js
|
||||
|
||||
# example with flask
|
||||
infisical run --env=prod --path=/apps/backend -- flask run
|
||||
|
||||
# example with spring boot - maven
|
||||
infisical run --env=dev --path=/apps/ -- ./mvnw spring-boot:run --quiet
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Feed secrets via custom aliases (advanced)">
|
||||
Custom aliases can utilize secrets from Infisical. Suppose there is a custom alias `yd` in `custom.sh` that runs `yarn dev` and needs the secrets provided by Infisical.
|
||||
```bash
|
||||
#!/bin/sh
|
||||
|
||||
yd() {
|
||||
yarn dev
|
||||
}
|
||||
```
|
||||
|
||||
To make the secrets available from Infisical to `yd`, you can run the following command:
|
||||
|
||||
```bash
|
||||
infisical run --env=prod --path=/apps/reddit --command="source custom.sh && yd"
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
View all available options for `run` command [here](./commands/run)
|
||||
|
||||
## Connect CLI to self hosted Infisical
|
||||
|
||||
<Accordion title="Optional: point CLI to self-hosted">
|
||||
The CLI is set to connect to Infisical Cloud by default, but if you're running your own instance of Infisical, you can direct the CLI to it using one of the methods provided below.
|
||||
|
||||
#### Method 1: Use the updated CLI
|
||||
Beginning with CLI version V0.4.0, it is now possible to choose between logging in through the Infisical cloud or your own self-hosted instance. Simply execute the `infisical login` command and follow the on-screen instructions.
|
||||
|
||||
#### Method 2: Export environment variable
|
||||
You can point the CLI to the self hosted Infisical instance by exporting the environment variable `INFISICAL_API_URL` in your terminal.
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Linux/MacOs">
|
||||
```bash
|
||||
# Set backend host
|
||||
export INFISICAL_API_URL="https://your-self-hosted-infisical.com/api"
|
||||
|
||||
# Remove backend host
|
||||
unset INFISICAL_API_URL
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Windows Powershell">
|
||||
```bash
|
||||
# Set backend host
|
||||
setx INFISICAL_API_URL "https://your-self-hosted-infisical.com/api"
|
||||
|
||||
# Remove backend host
|
||||
setx INFISICAL_API_URL ""
|
||||
|
||||
# NOTE: Once set or removed, please restart powershell for the change to take effect
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
#### Method 3: Set manually on every command
|
||||
Another option to point the CLI to your self hosted Infisical instance is to set it via a flag on every command you run.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
infisical <any-command> --domain="https://your-self-hosted-infisical.com/api"
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
## History
|
||||
|
||||
Your terminal keeps a history with the commands you run. When you create Infisical secrets directly from your terminal, they'll stay there for a while.
|
||||
@ -143,30 +127,101 @@ Your terminal keeps a history with the commands you run. When you create Infisic
|
||||
For security and privacy concerns, we recommend you to configure your terminal to ignore those specific Infisical commands.
|
||||
|
||||
<Accordion title="Ignore commands">
|
||||
<Tabs>
|
||||
<Tab title="Unix/Linux">
|
||||
<Tip>
|
||||
`$HOME/.profile` is pretty common but, you could place it under `$HOME/.profile.d/infisical.sh` or any profile file run at login
|
||||
</Tip>
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Unix/Linux">
|
||||
<Tip>
|
||||
`$HOME/.profile` is pretty common but, you could place it under `$HOME/.profile.d/infisical.sh` or any profile file run at login
|
||||
</Tip>
|
||||
```bash
|
||||
cat <<EOF >> $HOME/.profile && source $HOME/.profile
|
||||
|
||||
# Ignoring specific Infisical CLI commands
|
||||
DEFAULT_HISTIGNORE=$HISTIGNORE
|
||||
export HISTIGNORE="*infisical secrets set*:$DEFAULT_HISTIGNORE"
|
||||
EOF
|
||||
```
|
||||
|
||||
</Tab>
|
||||
<Tab title="Windows">
|
||||
If you're on WSL, then you can use the Unix/Linux method.
|
||||
|
||||
<Tip>
|
||||
Here's some [documentation](https://superuser.com/a/1658331) about how to clear the terminal history, in PowerShell and CMD
|
||||
</Tip>
|
||||
|
||||
</Tab>
|
||||
|
||||
</Tabs>
|
||||
</Accordion>
|
||||
|
||||
## FAQ
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Can I connect the CLI to my self-hosted Infisical instance?">
|
||||
Yes. The CLI is set to connect to Infisical Cloud by default, but if you're running your own instance of Infisical, you can direct the CLI to it using one of the methods provided below.
|
||||
|
||||
#### Method 1: Use the updated CLI
|
||||
|
||||
Beginning with CLI version V0.4.0, it is now possible to choose between logging in through the Infisical cloud or your own self-hosted instance. Simply execute the `infisical login` command and follow the on-screen instructions.
|
||||
|
||||
#### Method 2: Export environment variable
|
||||
|
||||
You can point the CLI to the self hosted Infisical instance by exporting the environment variable `INFISICAL_API_URL` in your terminal.
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Linux/MacOs">
|
||||
```bash
|
||||
cat <<EOF >> $HOME/.profile && source $HOME/.profile
|
||||
# set backend host
|
||||
export INFISICAL_API_URL="https://your-self-hosted-infisical.com/api"
|
||||
|
||||
# Ignoring specific Infisical CLI commands
|
||||
DEFAULT_HISTIGNORE=$HISTIGNORE
|
||||
export HISTIGNORE="*infisical secrets set*:$DEFAULT_HISTIGNORE"
|
||||
EOF
|
||||
# remove backend host
|
||||
unset INFISICAL_API_URL
|
||||
```
|
||||
|
||||
</Tab>
|
||||
<Tab title="Windows">
|
||||
If you're on WSL, then you can use the Unix/Linux method.
|
||||
</Tab>
|
||||
<Tab title="Windows Powershell">
|
||||
```bash
|
||||
# set backend host
|
||||
setx INFISICAL_API_URL "https://your-self-hosted-infisical.com/api"
|
||||
|
||||
<Tip>
|
||||
Here's some [documentation](https://superuser.com/a/1658331) about how to clear the terminal history, in PowerShell and CMD
|
||||
</Tip>
|
||||
# remove backend host
|
||||
setx INFISICAL_API_URL ""
|
||||
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Accordion>
|
||||
# NOTE: Once set or removed, please restart powershell for the change to take effect
|
||||
```
|
||||
|
||||
</Tab>
|
||||
|
||||
</Tabs>
|
||||
|
||||
#### Method 3: Set manually on every command
|
||||
|
||||
Another option to point the CLI to your self hosted Infisical instance is to set it via a flag on every command you run.
|
||||
|
||||
```bash
|
||||
# Example
|
||||
infisical <any-command> --domain="https://your-self-hosted-infisical.com/api"
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
<Accordion title="Can I use the CLI with service tokens?">
|
||||
Yes. Please note, however, that service tokens are being deprecated in favor of [machine identities](/documentation/platform/identities/machine-identities). They will be removed in the future in accordance with the deprecation notice and timeline stated [here](https://infisical.com/blog/deprecating-api-keys).
|
||||
|
||||
To use Infisical for non local development scenarios, please create a service token. The service token will allow you to authenticate and interact with Infisical. Once you have created a service token with the required permissions, you’ll need to feed the token to the CLI.
|
||||
|
||||
```bash
|
||||
infisical export --token=<service-token>
|
||||
infisical secrets --token=<service-token>
|
||||
infisical run --token=<service-token> -- npm run dev
|
||||
```
|
||||
|
||||
#### Pass via shell environment variable
|
||||
The CLI is configured to look for an environment variable named `INFISICAL_TOKEN`. If set, it’ll attempt to use it for authentication.
|
||||
|
||||
```bash
|
||||
export INFISICAL_TOKEN=<service-token>
|
||||
```
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
@ -1,87 +0,0 @@
|
||||
---
|
||||
title: "Kubernetes"
|
||||
---
|
||||
|
||||
The Infisical Secrets Operator fetches secrets from Infisical and saves them as Kubernetes secrets using the custom `InfisicalSecret` resource to define authentication and storage methods.
|
||||
The operator updates secrets continuously and can reload dependent deployments automatically on secret changes.
|
||||
|
||||
Prerequisites:
|
||||
|
||||
- Connected to your cluster via kubectl
|
||||
- Have a project with secrets ready in [Infisical Cloud](https://app.infisical.com).
|
||||
- Create an [Infisical Token](/documentation/platform/token) scoped to an environment in your project in Infisical.
|
||||
|
||||
## Installation
|
||||
|
||||
Follow the instructions for either [Helm](https://helm.sh/) or [kubectl](https://github.com/kubernetes/kubectl) to install the Infisical Secrets Operator.
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Helm">
|
||||
Install the Infisical Helm repository
|
||||
|
||||
```console
|
||||
helm repo add infisical-helm-charts 'https://dl.cloudsmith.io/public/infisical/helm-charts/helm/charts/'
|
||||
|
||||
helm repo update
|
||||
```
|
||||
|
||||
Install the Helm chart
|
||||
```console
|
||||
helm install --generate-name infisical-helm-charts/secrets-operator
|
||||
```
|
||||
|
||||
</Tab>
|
||||
<Tab title="Kubectl">
|
||||
The operator will be installed in `infisical-operator-system` namespace
|
||||
```
|
||||
kubectl apply -f https://raw.githubusercontent.com/Infisical/infisical/main/k8-operator/kubectl-install/install-secrets-operator.yaml
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
**Step 1: Create Kubernetes secret containing service token**
|
||||
|
||||
Once you have generated the service token, create a Kubernetes secret containing the service token you generated by running the command below.
|
||||
|
||||
``` bash
|
||||
kubectl create secret generic service-token --from-literal=infisicalToken=<your-service-token-here>
|
||||
```
|
||||
|
||||
**Step 2: Fill out the InfisicalSecrets CRD and apply it to your cluster**
|
||||
|
||||
```yaml infisical-secrets-config.yaml
|
||||
apiVersion: secrets.infisical.com/v1alpha1
|
||||
kind: InfisicalSecret
|
||||
metadata:
|
||||
# Name of of this InfisicalSecret resource
|
||||
name: infisicalsecret-sample
|
||||
spec:
|
||||
# The host that should be used to pull secrets from. If left empty, the value specified in Global configuration will be used
|
||||
hostAPI: https://app.infisical.com/api
|
||||
resyncInterval:
|
||||
authentication:
|
||||
serviceToken:
|
||||
serviceTokenSecretReference:
|
||||
secretName: service-token
|
||||
secretNamespace: option
|
||||
secretsScope:
|
||||
envSlug: dev
|
||||
secretsPath: "/"
|
||||
managedSecretReference:
|
||||
secretName: managed-secret # <-- the name of kubernetes secret that will be created
|
||||
secretNamespace: default # <-- where the kubernetes secret should be created
|
||||
```
|
||||
|
||||
```
|
||||
kubectl apply -f infisical-secrets-config.yaml
|
||||
```
|
||||
|
||||
You should now see a new kubernetes secret automatically created in the namespace you defined in the `managedSecretReference` property above.
|
||||
|
||||
See also:
|
||||
|
||||
- [Documentation for the Infisical Kubernetes Operator](../../integrations/platforms/kubernetes)
|
||||
|
82
docs/documentation/platform/audit-log-streams.mdx
Normal file
@ -0,0 +1,82 @@
|
||||
---
|
||||
title: "Audit Log Streams"
|
||||
description: "Learn how to stream Infisical Audit Logs to external logging providers."
|
||||
---
|
||||
|
||||
<Info>
|
||||
Audit log streams is a paid feature.
|
||||
|
||||
If you're using Infisical Cloud, then it is available under the **Enterprise Tier**. If you're self-hosting Infisical,
|
||||
then you should contact team@infisical.com to purchase an enterprise license to use it.
|
||||
</Info>
|
||||
|
||||
Infisical Audit Log Streaming enables you to transmit your organization's Audit Logs to external logging providers for monitoring and analysis.
|
||||
|
||||
The logs are formatted in JSON, requiring your logging provider to support JSON-based log parsing.
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
<Steps>
|
||||
<Step title="Navigate to Organization Settings in your sidebar." />
|
||||
<Step title="Select Audit Log Streams Tab.">
|
||||

|
||||
</Step>
|
||||
<Step title="Click on Create">
|
||||

|
||||
|
||||
Provide the following values
|
||||
<ParamField path="Endpoint URL" type="string" required>
|
||||
The HTTPS endpoint URL of the logging provider that collects the JSON stream.
|
||||
</ParamField>
|
||||
<ParamField path="Headers" type="string" >
|
||||
The HTTP headers for the logging provider for identification and authentication.
|
||||
</ParamField>
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||

|
||||
Your Audit Logs are now ready to be streamed.
|
||||
|
||||
## Example Providers
|
||||
|
||||
### Better Stack
|
||||
|
||||
<Steps>
|
||||
<Step title="Select Connect Source">
|
||||

|
||||
</Step>
|
||||
<Step title="Provide a name and select platform"/>
|
||||
<Step title="Provide Audit Log Stream inputs">
|
||||

|
||||
|
||||
1. Copy the **endpoint** from Better Stack to the **Endpoint URL** field.
|
||||
3. Create a new header with key **Authorization** and set the value as **Bearer \<source token from betterstack\>**.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
### Datadog
|
||||
|
||||
<Steps>
|
||||
<Step title="Navigate to API Keys section">
|
||||

|
||||
</Step>
|
||||
<Step title="Select New Key and provide a key name">
|
||||

|
||||

|
||||
</Step>
|
||||
<Step title="Find your Datadog region specific logging endpoint.">
|
||||

|
||||
|
||||
1. Navigate to the [Datadog Send Logs API documentation](https://docs.datadoghq.com/api/latest/logs/?code-lang=curl&site=us5#send-logs).
|
||||
2. Pick your Datadog account region.
|
||||
3. Obtain your Datadog logging endpoint URL.
|
||||
</Step>
|
||||
<Step title="Provide audit log stream inputs">
|
||||

|
||||
|
||||
1. Copy the **logging endpoint** from Datadog to the **Endpoint URL** field.
|
||||
2. Copy the **API Key** from previous step
|
||||
3. Create a new header with key **DD-API-KEY** and set the value as **API Key**.
|
||||
</Step>
|
||||
</Steps>
|
151
docs/documentation/platform/dynamic-secrets/aws-iam.mdx
Normal file
@ -0,0 +1,151 @@
|
||||
---
|
||||
title: "AWS IAM"
|
||||
description: "How to dynamically generate AWS IAM Users."
|
||||
---
|
||||
|
||||
The Infisical AWS IAM dynamic secret allows you to generate AWS IAM Users on demand based on configured AWS policy.
|
||||
|
||||
## Prerequisite
|
||||
|
||||
Infisical needs an initial AWS IAM user with the required permissions to create sub IAM users. This IAM user will be responsible for managing the lifecycle of new IAM users.
|
||||
|
||||
<Accordion title="Managing AWS IAM User minimum permission policy">
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"iam:AttachUserPolicy",
|
||||
"iam:CreateAccessKey",
|
||||
"iam:CreateUser",
|
||||
"iam:DeleteAccessKey",
|
||||
"iam:DeleteUser",
|
||||
"iam:DeleteUserPolicy",
|
||||
"iam:DetachUserPolicy",
|
||||
"iam:GetUser",
|
||||
"iam:ListAccessKeys",
|
||||
"iam:ListAttachedUserPolicies",
|
||||
"iam:ListGroupsForUser",
|
||||
"iam:ListUserPolicies",
|
||||
"iam:PutUserPolicy",
|
||||
"iam:AddUserToGroup",
|
||||
"iam:RemoveUserFromGroup"
|
||||
],
|
||||
"Resource": ["*"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
To minimize managing user access you can attach a resource in format
|
||||
|
||||
> arn:aws:iam::\<account-id\>:user/\<aws-scope-path\>
|
||||
|
||||
Replace **\<account id\>** with your AWS account id and **\<aws-scope-path\>** with a path to minimize managing user access.
|
||||
|
||||
</Accordion>
|
||||
|
||||
## Set up Dynamic Secrets with AWS IAM
|
||||
|
||||
<Steps>
|
||||
<Step title="Secret Overview Dashboard">
|
||||
Navigate to the Secret Overview dashboard and select the environment in which you would like to add a dynamic secret to.
|
||||
</Step>
|
||||
<Step title="Click on the 'Add Dynamic Secret' button">
|
||||

|
||||
</Step>
|
||||
<Step title="Select AWS IAM">
|
||||

|
||||
</Step>
|
||||
<Step title="Provide the inputs for dynamic secret parameters">
|
||||
<ParamField path="Secret Name" type="string" required>
|
||||
Name by which you want the secret to be referenced
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Default TTL" type="string" required>
|
||||
Default time-to-live for a generated secret (it is possible to modify this value when a secret is generate)
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="Max TTL" type="string" required>
|
||||
Maximum time-to-live for a generated secret
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="AWS Access Key" type="string" required>
|
||||
The managing AWS IAM User Access Key
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="AWS Secret Key" type="string" required>
|
||||
The managing AWS IAM User Secret Key
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="AWS IAM Path" type="string">
|
||||
[IAM AWS Path](https://aws.amazon.com/blogs/security/optimize-aws-administration-with-iam-paths/) to scope created IAM User resource access.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="AWS Region" type="string" required>
|
||||
The AWS data center region.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="IAM User Permission Boundary" type="string" required>
|
||||
The IAM Policy ARN of the [AWS Permissions Boundary](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) to attach to IAM users created in the role.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="AWS IAM Groups" type="string">
|
||||
The AWS IAM groups that should be assigned to the created users. Multiple values can be provided by separating them with commas
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="AWS Policy ARNs" type="string">
|
||||
The AWS IAM managed policies that should be attached to the created users. Multiple values can be provided by separating them with commas
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="AWS IAM Policy Document" type="string">
|
||||
The AWS IAM inline policy that should be attached to the created users. Multiple values can be provided by separating them with commas
|
||||
</ParamField>
|
||||
|
||||

|
||||
|
||||
</Step>
|
||||
<Step title="Click 'Submit'">
|
||||
After submitting the form, you will see a dynamic secret created in the dashboard.
|
||||
|
||||

|
||||
</Step>
|
||||
<Step title="Generate dynamic secrets">
|
||||
Once you've successfully configured the dynamic secret, you're ready to generate on-demand credentials.
|
||||
To do this, simply click on the 'Generate' button which appears when hovering over the dynamic secret item.
|
||||
Alternatively, you can initiate the creation of a new lease by selecting 'New Lease' from the dynamic secret lease list section.
|
||||
|
||||

|
||||

|
||||
|
||||
When generating these secrets, it's important to specify a Time-to-Live (TTL) duration. This will dictate how long the credentials are valid for.
|
||||
|
||||

|
||||
|
||||
<Tip>
|
||||
Ensure that the TTL for the lease fall within the maximum TTL defined when configuring the dynamic secret in step 4.
|
||||
</Tip>
|
||||
|
||||
|
||||
Once you click the `Submit` button, a new secret lease will be generated and the credentials for it will be shown to you.
|
||||
|
||||

|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Audit or Revoke Leases
|
||||
Once you have created one or more leases, you will be able to access them by clicking on the respective dynamic secret item on the dashboard.
|
||||
This will allow you see the lease details and delete the lease ahead of its expiration time.
|
||||
|
||||

|
||||
|
||||
## Renew Leases
|
||||
To extend the life of the generated dynamic secret lease past its initial time to live, simply click on the **Renew** as illustrated below.
|
||||

|
||||
|
||||
<Warning>
|
||||
Lease renewals cannot exceed the maximum TTL set when configuring the dynamic secret
|
||||
</Warning>
|
@ -1,13 +1,14 @@
|
||||
---
|
||||
title: "Overview"
|
||||
title: "Dynamic Secrets"
|
||||
sidebarTitle: "Overview"
|
||||
description: "Learn how to generate secrets dynamically on-demand."
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
||||
Contrary to static key-value secrets, which require manual input of data into the secure Infisical storage, dynamic secrets are generated on-demand upon access.
|
||||
Contrary to static key-value secrets, which require manual input of data into the secure Infisical storage, **dynamic secrets are generated on-demand upon access**.
|
||||
|
||||
Dynamic secrets are unique to every identity using them. Such secrets come are generated only at the moment they are retrieved, eliminating the possibility of theft or reuse by another identity. Thanks to Infisical's integrated revocation capabilities, dynamic secrets can be promptly invalidated post-use, significantly reducing their lifespan.
|
||||
**Dynamic secrets are unique to every identity using them**. Such secrets come are generated only at the moment they are retrieved, eliminating the possibility of theft or reuse by another identity. Thanks to Infisical's integrated revocation capabilities, dynamic secrets can be promptly invalidated post-use, significantly reducing their lifespan.
|
||||
|
||||
## Benefits of Dynamic Secrets
|
||||
|
||||
@ -23,8 +24,12 @@ This approach offers several advantages in terms of security and management:
|
||||
|
||||
- **Scalability**: Dynamic secret management systems can scale more effectively to handle a large number of services and applications, as they automate much of the overhead associated with manual secret management.
|
||||
|
||||
Dynamic secrets are particularly useful in environments with stringent security requirements, such as cloud environments, distributed systems, and microservices architectures, where they help to manage database credentials, API keys, service tokens, and other types of secrets.
|
||||
Dynamic secrets are particularly useful in environments with stringent security requirements, such as cloud environments, distributed systems, and microservices architectures, where they help to manage database credentials, API keys, tokens, and other types of secrets.
|
||||
|
||||
## Infisical Dynamic Secret Templates
|
||||
|
||||
1. [PostgreSQL](./postgresql)
|
||||
2. [MySQL](./mysql)
|
||||
3. [Cassandra](./cassandra)
|
||||
4. [Oracle](./oracle)
|
||||
5. [AWS IAM](./aws-iam)
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Machine Identities
|
||||
title: Machine Identities
|
||||
description: "Learn how to use Machine Identities to programmatically interact with Infisical."
|
||||
---
|
||||
|
||||
@ -21,18 +21,17 @@ Key Features:
|
||||
A typical workflow for using identities consists of four steps:
|
||||
|
||||
1. Creating the identity with a name and [role](/documentation/platform/role-based-access-controls) in Organization Access Control > Machine Identities.
|
||||
This step also involves configuring an authentication method for it such as [Universal Auth](/documentation/platform/identities/universal-auth).
|
||||
This step also involves configuring an authentication method for it such as [Universal Auth](/documentation/platform/identities/universal-auth).
|
||||
2. Adding the identity to the project(s) you want it to have access to.
|
||||
3. Authenticating the identity with the Infisical API based on the configured authentication method on it and receiving a short-lived access token back.
|
||||
4. Authenticating subsequent requests with the Infisical API using the short-lived access token.
|
||||
|
||||
|
||||
<Note>
|
||||
Currently, identities can only be used to make authenticated requests to the Infisical API, SDKs, Terraform, Kubernetes Operator, and Infisical Agent. They do not work with clients such as CLI, Ansible look up plugin, etc.
|
||||
|
||||
Machine Identity support for the rest of the clients is planned to be released in the current quarter.
|
||||
</Note>
|
||||
Machine Identity support for the rest of the clients is planned to be released in the current quarter.
|
||||
|
||||
</Note>
|
||||
|
||||
## Authentication Methods
|
||||
|
||||
@ -43,8 +42,16 @@ To interact with various resources in Infisical, Machine Identities are able to
|
||||
## FAQ
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Can I use machine identities with the CLI?">
|
||||
|
||||
Yes - Identities can be used with the CLI.
|
||||
|
||||
You can learn more about how to do this in the CLI quickstart [here](/cli/usage).
|
||||
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="What is the difference between an identity and service token?">
|
||||
A service token is a project-level authentication method that is being phased out in favor of identities.
|
||||
A service token is a project-level authentication method that is being deprecated in favor of identities. The service token method will be removed in the future in accordance with the deprecation notice and timeline stated [here](https://infisical.com/blog/deprecating-api-keys).
|
||||
|
||||
Amongst many differences, identities provide broader access over the Infisical API, utilizes the same
|
||||
permission system as user identities, and come with a significantly larger number of configurable authentication and security features.
|
||||
|
@ -17,7 +17,6 @@ Upon being added to an organization and projects, users assume a certain set of
|
||||
|
||||
To interact with various resources in Infisical, users are able to utilize a number of authentication methods:
|
||||
- **Email & Password**: the most common authentication method that is used for authentication into Web Dashboard and Infisical CLI. It is recommended to utilize [Multi-factor Authentication](/documentation/platform/mfa) in addition to it.
|
||||
- **Service Tokens**: Service tokens allow users authenticate into CLI and other clients under their own identity. For the majority of use cases, it is not a recommended approach. Instead, it is often a good idea to utilize [Machine Identities](./machine-identities) with [Universal Authentication](/documentation/platform/identities/universal-auth).
|
||||
- **SSO**: Infisical natively integrates with a number of SSO identity providers like [Google](/documentation/platform/sso/google), [GitHub](/documentation/platform/sso/github), and [GitLab](/documentation/platform/sso/gitlab).
|
||||
- **SAML SSO**: It is also possible to set up SAML SSO integration with identity providers like [Okta](/documentation/platform/sso/okta), [Microsoft Entra ID](/documentation/platform/sso/azure) (formerly known as Azure AD), [JumpCloud](/documentation/platform/sso/jumpcloud), [Google](/documentation/platform/sso/google-saml), and more.
|
||||
- **LDAP**: For organizations with more advanced needs, Infisical also provides user authentication with [LDAP](/documentation/platform/ldap/overview) that includes a number of LDAP providers.
|
||||
|
@ -1,38 +0,0 @@
|
||||
---
|
||||
title: "IP Allowlisting"
|
||||
description: "Restrict access to your secrets in Infisical using trusted IPs"
|
||||
---
|
||||
|
||||
<Warning>
|
||||
IP allowlisting at the project-level is being replaced with IP allowlisting at the token-level now available with the Service Token V3 authentication method.
|
||||
|
||||
Instead of providing trusted IPs (specific IPs and CIDR ranges) to be applied across all service tokens,
|
||||
you can now specify trusted IPs at the token-level.
|
||||
|
||||
</Warning>
|
||||
<Info>
|
||||
Note that IP Allowlisting is a paid feature.
|
||||
|
||||
If you're using Infisical Cloud, then it is available under the **Pro Tier**. If you're self-hosting Infisical,
|
||||
then you should contact sales@infisical.com to purchase an enterprise license to use it.
|
||||
</Info>
|
||||
|
||||
Projects in Infisical can be configured to restrict client access to specific IP addresses or CIDR ranges. This applies to any client using service tokens and
|
||||
can be useful, for example, for limiting access to traffic coming from corporate networks.
|
||||
|
||||
By default, each project is initialized with the `0.0.0.0/0` entry, representing all possible IPv4 addresses.
|
||||
For enhanced security, we strongly recommend replacing the default entry with your client IPs to tighten access to your secrets.
|
||||
|
||||
<Note>
|
||||
You must be a project `admin` to manage your project's IP whitelist.
|
||||
</Note>
|
||||
|
||||

|
||||
|
||||
## Creating a trusted IP entry
|
||||
|
||||
To create a trusted IP entry, head over to the **IP Whitelist** tab in your project. When creating an entry,
|
||||
you can specify either a specific IP address like `192.0.2.1` or a CIDR range like `2001:db8::/32`; both IPv4 and IPv6
|
||||
formats are accepted.
|
||||
|
||||

|
@ -1,36 +0,0 @@
|
||||
---
|
||||
title: "LDAP"
|
||||
description: "Log in to Infisical with LDAP"
|
||||
---
|
||||
|
||||
<Info>
|
||||
LDAP is a paid feature.
|
||||
|
||||
If you're using Infisical Cloud, then it is available under the **Enterprise Tier**. If you're self-hosting Infisical,
|
||||
then you should contact sales@infisical.com to purchase an enterprise license to use it.
|
||||
</Info>
|
||||
|
||||
You can configure your organization in Infisical to have members authenticate with the platform via [LDAP](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol).
|
||||
|
||||
<Steps>
|
||||
<Step title="Prepare the LDAP configuration in Infisical">
|
||||
In Infisical, head to your Organization Settings > Authentication > LDAP Configuration and select **Set up LDAP**.
|
||||
|
||||
Next, input your LDAP server settings.
|
||||
|
||||

|
||||
|
||||
Here's some guidance for each field:
|
||||
|
||||
- URL: The LDAP server to connect to such as `ldap://ldap.your-org.com`, `ldaps://ldap.myorg.com:636` (for connection over SSL/TLS), etc.
|
||||
- Bind DN: The distinguished name of object to bind when performing the user search such as `cn=infisical,ou=Users,dc=acme,dc=com`.
|
||||
- Bind Pass: The password to use along with `Bind DN` when performing the user search.
|
||||
- Search Base / User DN: Base DN under which to perform user search such as `ou=Users,dc=example,dc=com`
|
||||
- CA Certificate: The CA certificate to use when verifying the LDAP server certificate.
|
||||
</Step>
|
||||
<Step title="Enable LDAP in Infisical">
|
||||
Enabling LDAP allows members in your organization to log into Infisical via LDAP.
|
||||
|
||||

|
||||
</Step>
|
||||
</Steps>
|
@ -4,16 +4,17 @@ description: "Learn how to log in to Infisical with LDAP."
|
||||
---
|
||||
|
||||
<Info>
|
||||
LDAP is a paid feature.
|
||||
If you're using Infisical Cloud, then it is available under the **Enterprise Tier**. If you're self-hosting Infisical,
|
||||
then you should contact sales@infisical.com to purchase an enterprise license to use it.
|
||||
LDAP is a paid feature. If you're using Infisical Cloud, then it is available
|
||||
under the **Enterprise Tier**. If you're self-hosting Infisical, then you
|
||||
should contact sales@infisical.com to purchase an enterprise license to use
|
||||
it.
|
||||
</Info>
|
||||
|
||||
You can configure your organization in Infisical to have members authenticate with the platform via [LDAP](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol)
|
||||
|
||||
<Steps>
|
||||
<Step title="Prepare the LDAP configuration in Infisical">
|
||||
In Infisical, head to your Organization Settings > Authentication > LDAP Configuration and select **Set up LDAP**.
|
||||
In Infisical, head to your Organization Settings > Security > LDAP and select **Manage**.
|
||||
|
||||
Next, input your LDAP server settings.
|
||||
|
||||
@ -24,11 +25,50 @@ You can configure your organization in Infisical to have members authenticate wi
|
||||
- URL: The LDAP server to connect to such as `ldap://ldap.your-org.com`, `ldaps://ldap.myorg.com:636` (for connection over SSL/TLS), etc.
|
||||
- Bind DN: The distinguished name of object to bind when performing the user search such as `cn=infisical,ou=Users,dc=acme,dc=com`.
|
||||
- Bind Pass: The password to use along with `Bind DN` when performing the user search.
|
||||
- Search Base / User DN: Base DN under which to perform user search such as `ou=Users,dc=example,dc=com`
|
||||
- User Search Base / User DN: Base DN under which to perform user search such as `ou=Users,dc=acme,dc=com`.
|
||||
- User Search Filter (optional): Template used to construct the LDAP user search filter such as `(uid={{username}})`; use literal `{{username}}` to have the given username used in the search. The default is `(uid={{username}})` which is compatible with several common directory schemas.
|
||||
- Group Search Base / Group DN (optional): LDAP search base to use for group membership search such as `ou=Groups,dc=acme,dc=com`.
|
||||
- Group Filter (optional): Template used when constructing the group membership query such as `(&(objectClass=posixGroup)(memberUid={{.Username}}))`. The template can access the following context variables: [`UserDN`, `UserName`]. The default is `(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))` which is compatible with several common directory schemas.
|
||||
- CA Certificate: The CA certificate to use when verifying the LDAP server certificate.
|
||||
|
||||
<Note>
|
||||
The **Group Search Base / Group DN** and **Group Filter** fields are both required if you wish to sync LDAP groups to Infisical.
|
||||
</Note>
|
||||
|
||||
</Step>
|
||||
<Step title="Test the LDAP connection">
|
||||
Once you've filled out the LDAP configuration, you can test that part of the configuration is correct by pressing the **Test Connection** button.
|
||||
|
||||
Infisical will attempt to bind to the LDAP server using the provided **URL**, **Bind DN**, and **Bind Pass**. If the operation is successful, then Infisical will display a success message; if not, then Infisical will display an error message and provide a fuller error in the server logs.
|
||||
|
||||

|
||||
</Step>
|
||||
|
||||
<Step title="Define mappings from LDAP groups to groups in Infisical">
|
||||
In order to sync LDAP groups to Infisical, head to the **LDAP Group Mappings** section to define mappings from LDAP groups to groups in Infisical.
|
||||
|
||||

|
||||
|
||||
Group mappings ensure that users who log into Infisical via LDAP are added to or removed from the Infisical group(s) that corresponds to the LDAP group(s) they are a member of.
|
||||
|
||||

|
||||
|
||||
Each group mapping consists of two parts:
|
||||
- LDAP Group CN: The common name of the LDAP group to map.
|
||||
- Infisical Group: The Infisical group to map the LDAP group to.
|
||||
|
||||
For example, suppose you want to automatically add a user who is part of the LDAP group with CN `Engineers` to the Infisical group `Engineers` when the user sets up their account with Infisical.
|
||||
|
||||
In this case, you would specify a mapping from the LDAP group with CN `Engineers` to the Infisical group `Engineers`.
|
||||
Now when the user logs into Infisical via LDAP, Infisical will check the LDAP groups that the user is a part of whilst referencing the group mappings you created earlier. Since the user is a member of the LDAP group with CN `Engineers`, they will be added to the Infisical group `Engineers`.
|
||||
In the future, if the user is no longer part of the LDAP group with CN `Engineers`, they will be removed from the Infisical group `Engineers` upon their next login.
|
||||
<Note>
|
||||
Prior to defining any group mappings, ensure that you've created the Infisical groups that you want to map the LDAP groups to.
|
||||
You can read more about creating (user) groups in Infisical [here](/documentation/platform/groups).
|
||||
</Note>
|
||||
</Step>
|
||||
<Step title="Enable LDAP in Infisical">
|
||||
Enabling LDAP allows members in your organization to log into Infisical via LDAP.
|
||||

|
||||
</Step>
|
||||
</Steps>
|
||||
</Steps>
|
||||
|
@ -4,9 +4,10 @@ description: "Learn how to configure JumpCloud LDAP for authenticating into Infi
|
||||
---
|
||||
|
||||
<Info>
|
||||
LDAP is a paid feature.
|
||||
If you're using Infisical Cloud, then it is available under the **Enterprise Tier**. If you're self-hosting Infisical,
|
||||
then you should contact sales@infisical.com to purchase an enterprise license to use it.
|
||||
LDAP is a paid feature. If you're using Infisical Cloud, then it is available
|
||||
under the **Enterprise Tier**. If you're self-hosting Infisical, then you
|
||||
should contact sales@infisical.com to purchase an enterprise license to use
|
||||
it.
|
||||
</Info>
|
||||
|
||||
<Steps>
|
||||
@ -17,13 +18,12 @@ description: "Learn how to configure JumpCloud LDAP for authenticating into Infi
|
||||
When creating the user, input their **First Name**, **Last Name**, **Username** (required), **Company Email** (required), and **Description**.
|
||||
Also, create a password for the user.
|
||||
|
||||
Next, under User Security Settings and Permissions > Permission Settings, check the box next to **Enable as LDAP Bind DN**.
|
||||
Next, under User Security Settings and Permissions > Permission Settings, check the box next to **Enable as LDAP Bind DN**.
|
||||
|
||||

|
||||
|
||||
</Step>
|
||||
<Step title="Prepare the LDAP configuration in Infisical">
|
||||
In Infisical, head to your Organization Settings > Authentication > LDAP Configuration and select **Set up LDAP**.
|
||||
In Infisical, head to your Organization Settings > Security > LDAP and select **Manage**.
|
||||
|
||||
Next, input your JumpCloud LDAP server settings.
|
||||
|
||||
@ -34,21 +34,57 @@ description: "Learn how to configure JumpCloud LDAP for authenticating into Infi
|
||||
- URL: The LDAP server to connect to (`ldaps://ldap.jumpcloud.com:636`).
|
||||
- Bind DN: The distinguished name of object to bind when performing the user search (`uid=<ldap-user-username>,ou=Users,o=<your-org-id>,dc=jumpcloud,dc=com`).
|
||||
- Bind Pass: The password to use along with `Bind DN` when performing the user search.
|
||||
- Search Base / User DN: Base DN under which to perform user search (`ou=Users,o=<your-org-id>,dc=jumpcloud,dc=com`).
|
||||
- User Search Base / User DN: Base DN under which to perform user search (`ou=Users,o=<your-org-id>,dc=jumpcloud,dc=com`).
|
||||
- User Search Filter (optional): Template used to construct the LDAP user search filter (`(uid={{username}})`).
|
||||
- Group Search Base / Group DN (optional): LDAP search base to use for group membership search (`ou=Users,o=<your-org-id>,dc=jumpcloud,dc=com`).
|
||||
- Group Filter (optional): Template used when constructing the group membership query (`(&(objectClass=groupOfNames)(member=uid={{.Username}},ou=Users,o=<your-org-id>,dc=jumpcloud,dc=com))`)
|
||||
- CA Certificate: The CA certificate to use when verifying the LDAP server certificate (instructions to obtain the certificate for JumpCloud [here](https://jumpcloud.com/support/connect-to-ldap-with-tls-ssl)).
|
||||
|
||||
<Tip>
|
||||
When filling out the **Bind DN** and **Bind Pass** fields, refer to the username and password of the user created in Step 1.
|
||||
|
||||
Also, for the **Bind DN** and **Search Base / User DN** fields, you'll want to use the organization ID that appears
|
||||
Also, for the **Bind DN** and **Search Base / User DN** fields, you'll want to use the organization ID that appears
|
||||
in your LDAP instance **ORG DN**.
|
||||
</Tip>
|
||||
</Step>
|
||||
<Step title="Test the LDAP connection">
|
||||
Once you've filled out the LDAP configuration, you can test that part of the configuration is correct by pressing the **Test Connection** button.
|
||||
|
||||
Infisical will attempt to bind to the LDAP server using the provided **URL**, **Bind DN**, and **Bind Pass**. If the operation is successful, then Infisical will display a success message; if not, then Infisical will display an error message and provide a fuller error in the server logs.
|
||||
|
||||

|
||||
</Step>
|
||||
<Step title="Define mappings from LDAP groups to groups in Infisical">
|
||||
In order to sync LDAP groups to Infisical, head to the **LDAP Group Mappings** section to define mappings from LDAP groups to groups in Infisical.
|
||||
|
||||

|
||||
|
||||
Group mappings ensure that users who log into Infisical via LDAP are added to or removed from the Infisical group(s) that corresponds to the LDAP group(s) they are a member of.
|
||||
|
||||

|
||||
|
||||
Each group mapping consists of two parts:
|
||||
- LDAP Group CN: The common name of the LDAP group to map.
|
||||
- Infisical Group: The Infisical group to map the LDAP group to.
|
||||
|
||||
For example, suppose you want to automatically add a user who is part of the LDAP group with CN `Engineers` to the Infisical group `Engineers` when the user sets up their account with Infisical.
|
||||
|
||||
In this case, you would specify a mapping from the LDAP group with CN `Engineers` to the Infisical group `Engineers`.
|
||||
Now when the user logs into Infisical via LDAP, Infisical will check the LDAP groups that the user is a part of whilst referencing the group mappings you created earlier. Since the user is a member of the LDAP group with CN `Engineers`, they will be added to the Infisical group `Engineers`.
|
||||
In the future, if the user is no longer part of the LDAP group with CN `Engineers`, they will be removed from the Infisical group `Engineers` upon their next login.
|
||||
<Note>
|
||||
Prior to defining any group mappings, ensure that you've created the Infisical groups that you want to map the LDAP groups to.
|
||||
You can read more about creating (user) groups in Infisical [here](/documentation/platform/groups).
|
||||
</Note>
|
||||
|
||||
</Step>
|
||||
<Step title="Enable LDAP in Infisical">
|
||||
Enabling LDAP allows members in your organization to log into Infisical via LDAP.
|
||||

|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
Resources:
|
||||
- [JumpCloud Cloud LDAP Guide](https://jumpcloud.com/support/use-cloud-ldap)
|
||||
|
||||
- [JumpCloud Cloud LDAP Guide](https://jumpcloud.com/support/use-cloud-ldap)
|
||||
|
@ -19,7 +19,7 @@ This means that updating the value of a base secret propagates directly to other
|
||||
|
||||

|
||||
|
||||
Since secret referencing works by reconstructing values back on the client side, the client, be it a user or service token, fetching back secrets
|
||||
Since secret referencing works by reconstructing values back on the client side, the client, be it a user, service token, or a machine identity, fetching back secrets
|
||||
must be permissioned access to all base and dependent secrets.
|
||||
|
||||
For example, to access some secret `A` whose values depend on secrets `B` and `C` from different scopes, a client must have `read` access to the scopes of secrets `A`, `B`, and `C`.
|
||||
|
@ -5,7 +5,7 @@ description: "Learn how secret versioning works in Infisical."
|
||||
|
||||
Every time a secret change is persformed, a new version of the same secret is created.
|
||||
|
||||
Such versions can be accessed visually by opening up the [secret sidebar](/documentation/platform/project#drawer) (as seen below) or [retrived via API](/api-reference/endpoints/secrets/read)
|
||||
Such versions can be accessed visually by opening up the [secret sidebar](/documentation/platform/project#drawer) (as seen below) or [retrieved via API](/api-reference/endpoints/secrets/read)
|
||||
by specifying the `version` query parameter.
|
||||
|
||||

|
||||
|
@ -3,6 +3,13 @@ title: "Service Token"
|
||||
description: "Infisical service tokens allow users to programmatically interact with Infisical."
|
||||
---
|
||||
|
||||
<Warning>
|
||||
Service tokens are being deprecated in favor of [machine identities](/documentation/platform/identities/machine-identities).
|
||||
|
||||
They will be removed in the future in accordance with the deprecation notice and timeline stated [here](https://infisical.com/blog/deprecating-api-keys).
|
||||
|
||||
</Warning>
|
||||
|
||||
Service tokens are authentication credentials that services can use to access designated endpoints in the Infisical API to manage project resources like secrets.
|
||||
Each service token can be provisioned scoped access to select environment(s) and path(s) within them.
|
||||
|
||||
@ -17,8 +24,8 @@ Service Token (ST) is the current widely-used authentication method for managing
|
||||
Here's a few pointers to get you acquainted with it:
|
||||
|
||||
- When you create a ST, you get a token prefixed with `st`. The part after the last `.` delimiter is a symmetric key; everything
|
||||
before it is an access token. When authenticating with the Infisical API, it is important to send in only the access token portion
|
||||
of the token.
|
||||
before it is an access token. When authenticating with the Infisical API, it is important to send in only the access token portion
|
||||
of the token.
|
||||
- ST supports expiration; it gets deleted automatically upon expiration.
|
||||
- ST supports provisioning `read` and/or `write` permissions broadly applied to all accessible environment(s) and path(s).
|
||||
- ST is not editable.
|
||||
@ -35,7 +42,7 @@ the token access to. Here's some guidance for each field:
|
||||
- Name: A friendly name for the token.
|
||||
- Scopes: The environment(s) and path(s) the token should have access to.
|
||||
- Permissions: You can indicate whether or not the token should have `read/write` access to the paths.
|
||||
Also, note that Infisical supports [glob patterns](https://www.malikbrowne.com/blog/a-beginners-guide-glob-patterns/) when defining access scopes to path(s).
|
||||
Also, note that Infisical supports [glob patterns](https://www.malikbrowne.com/blog/a-beginners-guide-glob-patterns/) when defining access scopes to path(s).
|
||||
- Expiration: The time when this token should be rendered inactive.
|
||||
|
||||

|
||||
@ -44,28 +51,31 @@ In the above screenshot, you can see that we are creating a token token with `re
|
||||
of the `/common` path within the development environment of the project; the token expires in 6 months and can be used from any IP address.
|
||||
|
||||
<Note>
|
||||
For a deeper understanding of service tokens, it is recommended to read [this guide](https://infisical.com/docs/internals/service-tokens).
|
||||
For a deeper understanding of service tokens, it is recommended to read [this
|
||||
guide](https://infisical.com/docs/internals/service-tokens).
|
||||
</Note>
|
||||
|
||||
**FAQ**
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Why is the Infisical API rejecting my service token?">
|
||||
There are a few reasons for why this might happen:
|
||||
<Accordion title="Why is the Infisical API rejecting my service token?">
|
||||
There are a few reasons for why this might happen:
|
||||
|
||||
- The service token has expired.
|
||||
- The service token is insufficiently permissioned to interact with the secrets in the given environment and path.
|
||||
- You are attempting to access a `/raw` secrets endpoint that requires your project to disable E2EE.
|
||||
- (If using ST V3) The service token has not been activated yet.
|
||||
- (If using ST V3) The service token is being used from an untrusted IP.
|
||||
</Accordion>
|
||||
<Accordion title="Can you provide examples for using glob patterns?">
|
||||
1. `/**`: This pattern matches all folders at any depth in the directory structure. For example, it would match folders like `/folder1/`, `/folder1/subfolder/`, and so on.
|
||||
- The service token has expired.
|
||||
- The service token is insufficiently permissioned to interact with the secrets in the given environment and path.
|
||||
- You are attempting to access a `/raw` secrets endpoint that requires your project to disable E2EE.
|
||||
- (If using ST V3) The service token has not been activated yet.
|
||||
- (If using ST V3) The service token is being used from an untrusted IP.
|
||||
|
||||
2. `/*`: This pattern matches all immediate subfolders in the current directory. It does not match any folders at a deeper level. For example, it would match folders like `/folder1/`, `/folder2/`, but not `/folder1/subfolder/`.
|
||||
</Accordion>
|
||||
<Accordion title="Can you provide examples for using glob patterns?">
|
||||
1. `/**`: This pattern matches all folders at any depth in the directory structure. For example, it would match folders like `/folder1/`, `/folder1/subfolder/`, and so on.
|
||||
|
||||
3. `/*/*`: This pattern matches all subfolders at a depth of two levels in the current directory. It does not match any folders at a shallower or deeper level. For example, it would match folders like `/folder1/subfolder/`, `/folder2/subfolder/`, but not `/folder1/` or `/folder1/subfolder/subsubfolder/`.
|
||||
2. `/*`: This pattern matches all immediate subfolders in the current directory. It does not match any folders at a deeper level. For example, it would match folders like `/folder1/`, `/folder2/`, but not `/folder1/subfolder/`.
|
||||
|
||||
4. `/folder1/*`: This pattern matches all immediate subfolders within the `/folder1/` directory. It does not match any folders outside of `/folder1/`, nor does it match any subfolders within those immediate subfolders. For example, it would match folders like `/folder1/subfolder1/`, `/folder1/subfolder2/`, but not `/folder2/subfolder/`.
|
||||
</Accordion>
|
||||
3. `/*/*`: This pattern matches all subfolders at a depth of two levels in the current directory. It does not match any folders at a shallower or deeper level. For example, it would match folders like `/folder1/subfolder/`, `/folder2/subfolder/`, but not `/folder1/` or `/folder1/subfolder/subsubfolder/`.
|
||||
|
||||
4. `/folder1/*`: This pattern matches all immediate subfolders within the `/folder1/` directory. It does not match any folders outside of `/folder1/`, nor does it match any subfolders within those immediate subfolders. For example, it would match folders like `/folder1/subfolder1/`, `/folder1/subfolder2/`, but not `/folder2/subfolder/`.
|
||||
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
After Width: | Height: | Size: 58 KiB |
Before Width: | Height: | Size: 181 KiB After Width: | Height: | Size: 131 KiB |
Before Width: | Height: | Size: 199 KiB After Width: | Height: | Size: 160 KiB |
BIN
docs/images/integrations/jenkins/jenkins_10_identity.png
Normal file
After Width: | Height: | Size: 268 KiB |
BIN
docs/images/integrations/jenkins/jenkins_11_identity.png
Normal file
After Width: | Height: | Size: 87 KiB |
BIN
docs/images/integrations/jenkins/jenkins_4_identity_id.png
Normal file
After Width: | Height: | Size: 184 KiB |
BIN
docs/images/integrations/jenkins/jenkins_4_identity_secret.png
Normal file
After Width: | Height: | Size: 186 KiB |
BIN
docs/images/integrations/jenkins/jenkins_5_identity.png
Normal file
After Width: | Height: | Size: 210 KiB |
BIN
docs/images/integrations/jenkins/jenkins_9_identity.png
Normal file
After Width: | Height: | Size: 229 KiB |
After Width: | Height: | Size: 126 KiB |