mirror of
https://github.com/Infisical/infisical.git
synced 2025-03-29 22:02:57 +00:00
Compare commits
55 Commits
daniel/cli
...
daniel/cli
Author | SHA1 | Date | |
---|---|---|---|
de21b44486 | |||
519d6f98a2 | |||
973ed37018 | |||
e008fb26a2 | |||
34543ef127 | |||
83107f56bb | |||
35071af478 | |||
eb5f71cb05 | |||
9cf1dd38a6 | |||
144a563609 | |||
ca0062f049 | |||
2ed9aa888e | |||
8c7d329f8f | |||
a0aa06e2f5 | |||
1dd0167ac8 | |||
55aea364da | |||
afee47ab45 | |||
9387d9aaac | |||
2b215a510c | |||
89ff6a6c93 | |||
3bcf406688 | |||
580b86cde8 | |||
7a20251261 | |||
ae63898d5e | |||
d4d3c2b10f | |||
0e3cc4fdeb | |||
4050e56e60 | |||
e453ddf937 | |||
3f68807179 | |||
ba42aca069 | |||
22c589e2cf | |||
943945f6d7 | |||
b598dd3d47 | |||
ad6d18a905 | |||
46a91515b1 | |||
b79ce8a880 | |||
d31d98b5e0 | |||
cb6cbafcae | |||
bcb3eaab74 | |||
12d5fb1043 | |||
8bf09789d6 | |||
7ab8db0471 | |||
6b473d2b36 | |||
7581b33b3b | |||
be74f4d34c | |||
e973a62753 | |||
94fa294455 | |||
be63e538d7 | |||
02e423f52c | |||
3cb226908b | |||
ba37b1c083 | |||
d23b39abba | |||
e2967f5e61 | |||
97afc4ff51 | |||
c47a91715f |
112
.github/workflows/release_build_infisical_cli.yml
vendored
112
.github/workflows/release_build_infisical_cli.yml
vendored
@ -1,60 +1,64 @@
|
||||
name: Build and release CLI
|
||||
|
||||
on:
|
||||
push:
|
||||
# run only against tags
|
||||
tags:
|
||||
- "infisical-cli/v*.*.*"
|
||||
push:
|
||||
# run only against tags
|
||||
tags:
|
||||
- "infisical-cli/v*.*.*"
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
# packages: write
|
||||
# issues: write
|
||||
|
||||
contents: write
|
||||
# packages: write
|
||||
# issues: write
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: 🐋 Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: 🔧 Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- run: git fetch --force --tags
|
||||
- run: echo "Ref name ${{github.ref_name}}"
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ">=1.19.3"
|
||||
cache: true
|
||||
cache-dependency-path: cli/go.sum
|
||||
- name: libssl1.1 => libssl1.0-dev for OSXCross
|
||||
run: |
|
||||
echo 'deb http://security.ubuntu.com/ubuntu bionic-security main' | sudo tee -a /etc/apt/sources.list
|
||||
sudo apt update && apt-cache policy libssl1.0-dev
|
||||
sudo apt-get install libssl1.0-dev
|
||||
- name: OSXCross for CGO Support
|
||||
run: |
|
||||
mkdir ../../osxcross
|
||||
git clone https://github.com/plentico/osxcross-target.git ../../osxcross/target
|
||||
- uses: goreleaser/goreleaser-action@v4
|
||||
with:
|
||||
distribution: goreleaser-pro
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GO_RELEASER_GITHUB_TOKEN }}
|
||||
POSTHOG_API_KEY_FOR_CLI: ${{ secrets.POSTHOG_API_KEY_FOR_CLI }}
|
||||
FURY_TOKEN: ${{ secrets.FURYPUSHTOKEN }}
|
||||
AUR_KEY: ${{ secrets.AUR_KEY }}
|
||||
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
||||
- uses: actions/setup-python@v4
|
||||
- run: pip install --upgrade cloudsmith-cli
|
||||
- name: Publish to CloudSmith
|
||||
run: sh cli/upload_to_cloudsmith.sh
|
||||
env:
|
||||
CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}
|
||||
cli-integration-tests:
|
||||
name: Run tests before deployment
|
||||
uses: ./.github/workflows/run-cli-tests.yml
|
||||
|
||||
goreleaser:
|
||||
runs-on: ubuntu-20.04
|
||||
needs: [cli-integration-tests]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: 🐋 Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: 🔧 Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- run: git fetch --force --tags
|
||||
- run: echo "Ref name ${{github.ref_name}}"
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: ">=1.19.3"
|
||||
cache: true
|
||||
cache-dependency-path: cli/go.sum
|
||||
- name: libssl1.1 => libssl1.0-dev for OSXCross
|
||||
run: |
|
||||
echo 'deb http://security.ubuntu.com/ubuntu bionic-security main' | sudo tee -a /etc/apt/sources.list
|
||||
sudo apt update && apt-cache policy libssl1.0-dev
|
||||
sudo apt-get install libssl1.0-dev
|
||||
- name: OSXCross for CGO Support
|
||||
run: |
|
||||
mkdir ../../osxcross
|
||||
git clone https://github.com/plentico/osxcross-target.git ../../osxcross/target
|
||||
- uses: goreleaser/goreleaser-action@v4
|
||||
with:
|
||||
distribution: goreleaser-pro
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GO_RELEASER_GITHUB_TOKEN }}
|
||||
POSTHOG_API_KEY_FOR_CLI: ${{ secrets.POSTHOG_API_KEY_FOR_CLI }}
|
||||
FURY_TOKEN: ${{ secrets.FURYPUSHTOKEN }}
|
||||
AUR_KEY: ${{ secrets.AUR_KEY }}
|
||||
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
||||
- uses: actions/setup-python@v4
|
||||
- run: pip install --upgrade cloudsmith-cli
|
||||
- name: Publish to CloudSmith
|
||||
run: sh cli/upload_to_cloudsmith.sh
|
||||
env:
|
||||
CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}
|
||||
|
34
.github/workflows/run-cli-tests.yml
vendored
Normal file
34
.github/workflows/run-cli-tests.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
name: Go CLI Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
paths:
|
||||
- "cli/**"
|
||||
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
test:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./cli
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: "1.21.x"
|
||||
- name: Install dependencies
|
||||
run: go get .
|
||||
- name: Test with the Go CLI
|
||||
env:
|
||||
CLI_TESTS_UA_CLIENT_ID: ${{ secrets.CLI_TESTS_UA_CLIENT_ID }}
|
||||
CLI_TESTS_UA_CLIENT_SECRET: ${{ secrets.CLI_TESTS_UA_CLIENT_SECRET }}
|
||||
CLI_TESTS_SERVICE_TOKEN: ${{ secrets.CLI_TESTS_SERVICE_TOKEN }}
|
||||
CLI_TESTS_PROJECT_ID: ${{ secrets.CLI_TESTS_PROJECT_ID }}
|
||||
CLI_TESTS_ENV_SLUG: ${{ secrets.CLI_TESTS_ENV_SLUG }}
|
||||
|
||||
run: go test -v -count=1 ./test
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -67,3 +67,5 @@ yarn-error.log*
|
||||
frontend-build
|
||||
|
||||
*.tgz
|
||||
cli/infisical-merge
|
||||
cli/test/infisical-merge
|
||||
|
@ -23,16 +23,17 @@ module.exports = {
|
||||
root: true,
|
||||
overrides: [
|
||||
{
|
||||
files: ["./e2e-test/**/*"],
|
||||
files: ["./e2e-test/**/*", "./src/db/migrations/**/*"],
|
||||
rules: {
|
||||
"@typescript-eslint/no-unsafe-member-access": "off",
|
||||
"@typescript-eslint/no-unsafe-assignment": "off",
|
||||
"@typescript-eslint/no-unsafe-argument": "off",
|
||||
"@typescript-eslint/no-unsafe-return": "off",
|
||||
"@typescript-eslint/no-unsafe-call": "off",
|
||||
"@typescript-eslint/no-unsafe-call": "off"
|
||||
}
|
||||
}
|
||||
],
|
||||
|
||||
rules: {
|
||||
"@typescript-eslint/no-empty-function": "off",
|
||||
"@typescript-eslint/no-unsafe-enum-comparison": "off",
|
||||
|
@ -0,0 +1,47 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { ProjectMembershipRole, TableName } from "../schemas";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
const doesProjectRoleFieldExist = await knex.schema.hasColumn(TableName.ProjectMembership, "role");
|
||||
const doesProjectRoleIdFieldExist = await knex.schema.hasColumn(TableName.ProjectMembership, "roleId");
|
||||
await knex.schema.alterTable(TableName.ProjectMembership, (t) => {
|
||||
if (doesProjectRoleFieldExist) t.dropColumn("roleId");
|
||||
if (doesProjectRoleIdFieldExist) t.dropColumn("role");
|
||||
});
|
||||
|
||||
const doesIdentityProjectRoleFieldExist = await knex.schema.hasColumn(TableName.IdentityProjectMembership, "role");
|
||||
const doesIdentityProjectRoleIdFieldExist = await knex.schema.hasColumn(
|
||||
TableName.IdentityProjectMembership,
|
||||
"roleId"
|
||||
);
|
||||
await knex.schema.alterTable(TableName.IdentityProjectMembership, (t) => {
|
||||
if (doesIdentityProjectRoleFieldExist) t.dropColumn("roleId");
|
||||
if (doesIdentityProjectRoleIdFieldExist) t.dropColumn("role");
|
||||
});
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
const doesProjectRoleFieldExist = await knex.schema.hasColumn(TableName.ProjectMembership, "role");
|
||||
const doesProjectRoleIdFieldExist = await knex.schema.hasColumn(TableName.ProjectMembership, "roleId");
|
||||
await knex.schema.alterTable(TableName.ProjectMembership, (t) => {
|
||||
if (!doesProjectRoleFieldExist) t.string("role").defaultTo(ProjectMembershipRole.Member);
|
||||
if (!doesProjectRoleIdFieldExist) {
|
||||
t.uuid("roleId");
|
||||
t.foreign("roleId").references("id").inTable(TableName.ProjectRoles);
|
||||
}
|
||||
});
|
||||
|
||||
const doesIdentityProjectRoleFieldExist = await knex.schema.hasColumn(TableName.IdentityProjectMembership, "role");
|
||||
const doesIdentityProjectRoleIdFieldExist = await knex.schema.hasColumn(
|
||||
TableName.IdentityProjectMembership,
|
||||
"roleId"
|
||||
);
|
||||
await knex.schema.alterTable(TableName.IdentityProjectMembership, (t) => {
|
||||
if (!doesIdentityProjectRoleFieldExist) t.string("role").defaultTo(ProjectMembershipRole.Member);
|
||||
if (!doesIdentityProjectRoleIdFieldExist) {
|
||||
t.uuid("roleId");
|
||||
t.foreign("roleId").references("id").inTable(TableName.ProjectRoles);
|
||||
}
|
||||
});
|
||||
}
|
@ -9,8 +9,6 @@ import { TImmutableDBKeys } from "./models";
|
||||
|
||||
export const IdentityProjectMembershipsSchema = z.object({
|
||||
id: z.string().uuid(),
|
||||
role: z.string(),
|
||||
roleId: z.string().uuid().nullable().optional(),
|
||||
projectId: z.string(),
|
||||
identityId: z.string().uuid(),
|
||||
createdAt: z.date(),
|
||||
|
@ -9,12 +9,10 @@ import { TImmutableDBKeys } from "./models";
|
||||
|
||||
export const ProjectMembershipsSchema = z.object({
|
||||
id: z.string().uuid(),
|
||||
role: z.string(),
|
||||
createdAt: z.date(),
|
||||
updatedAt: z.date(),
|
||||
userId: z.string().uuid(),
|
||||
projectId: z.string(),
|
||||
roleId: z.string().uuid().nullable().optional()
|
||||
projectId: z.string()
|
||||
});
|
||||
|
||||
export type TProjectMemberships = z.infer<typeof ProjectMembershipsSchema>;
|
||||
|
@ -33,8 +33,7 @@ export async function seed(knex: Knex): Promise<void> {
|
||||
const projectMembership = await knex(TableName.ProjectMembership)
|
||||
.insert({
|
||||
projectId: project.id,
|
||||
userId: seedData1.id,
|
||||
role: ProjectMembershipRole.Admin
|
||||
userId: seedData1.id
|
||||
})
|
||||
.returning("*");
|
||||
await knex(TableName.ProjectUserMembershipRole).insert({
|
||||
|
@ -78,8 +78,7 @@ export async function seed(knex: Knex): Promise<void> {
|
||||
const identityProjectMembership = await knex(TableName.IdentityProjectMembership)
|
||||
.insert({
|
||||
identityId: seedData1.machineIdentity.id,
|
||||
projectId: seedData1.project.id,
|
||||
role: ProjectMembershipRole.Admin
|
||||
projectId: seedData1.project.id
|
||||
})
|
||||
.returning("*");
|
||||
|
||||
|
@ -72,7 +72,6 @@ export const permissionDALFactory = (db: TDbClient) => {
|
||||
.select(selectAllTableCols(TableName.GroupProjectMembershipRole))
|
||||
.select(
|
||||
db.ref("id").withSchema(TableName.GroupProjectMembership).as("membershipId"),
|
||||
// TODO(roll-forward-migration): remove this field when we drop this in next migration after a week
|
||||
db.ref("createdAt").withSchema(TableName.GroupProjectMembership).as("membershipCreatedAt"),
|
||||
db.ref("updatedAt").withSchema(TableName.GroupProjectMembership).as("membershipUpdatedAt"),
|
||||
db.ref("projectId").withSchema(TableName.GroupProjectMembership),
|
||||
@ -105,7 +104,6 @@ export const permissionDALFactory = (db: TDbClient) => {
|
||||
.select(selectAllTableCols(TableName.ProjectUserMembershipRole))
|
||||
.select(
|
||||
db.ref("id").withSchema(TableName.ProjectMembership).as("membershipId"),
|
||||
// TODO(roll-forward-migration): remove this field when we drop this in next migration after a week
|
||||
db.ref("createdAt").withSchema(TableName.ProjectMembership).as("membershipCreatedAt"),
|
||||
db.ref("updatedAt").withSchema(TableName.ProjectMembership).as("membershipUpdatedAt"),
|
||||
db.ref("projectId").withSchema(TableName.ProjectMembership),
|
||||
@ -131,11 +129,10 @@ export const permissionDALFactory = (db: TDbClient) => {
|
||||
const permission = sqlNestRelationships({
|
||||
data: docs,
|
||||
key: "projectId",
|
||||
parentMapper: ({ orgId, orgAuthEnforced, membershipId, membershipCreatedAt, membershipUpdatedAt, role }) => ({
|
||||
parentMapper: ({ orgId, orgAuthEnforced, membershipId, membershipCreatedAt, membershipUpdatedAt }) => ({
|
||||
orgId,
|
||||
orgAuthEnforced,
|
||||
userId,
|
||||
role,
|
||||
id: membershipId,
|
||||
projectId,
|
||||
createdAt: membershipCreatedAt,
|
||||
@ -179,18 +176,10 @@ export const permissionDALFactory = (db: TDbClient) => {
|
||||
? sqlNestRelationships({
|
||||
data: groupDocs,
|
||||
key: "projectId",
|
||||
parentMapper: ({
|
||||
orgId,
|
||||
orgAuthEnforced,
|
||||
membershipId,
|
||||
membershipCreatedAt,
|
||||
membershipUpdatedAt,
|
||||
role
|
||||
}) => ({
|
||||
parentMapper: ({ orgId, orgAuthEnforced, membershipId, membershipCreatedAt, membershipUpdatedAt }) => ({
|
||||
orgId,
|
||||
orgAuthEnforced,
|
||||
userId,
|
||||
role,
|
||||
id: membershipId,
|
||||
projectId,
|
||||
createdAt: membershipCreatedAt,
|
||||
@ -270,7 +259,6 @@ export const permissionDALFactory = (db: TDbClient) => {
|
||||
.select(
|
||||
db.ref("id").withSchema(TableName.IdentityProjectMembership).as("membershipId"),
|
||||
db.ref("orgId").withSchema(TableName.Project).as("orgId"), // Now you can select orgId from Project
|
||||
db.ref("role").withSchema(TableName.IdentityProjectMembership).as("oldRoleField"),
|
||||
db.ref("createdAt").withSchema(TableName.IdentityProjectMembership).as("membershipCreatedAt"),
|
||||
db.ref("updatedAt").withSchema(TableName.IdentityProjectMembership).as("membershipUpdatedAt"),
|
||||
db.ref("slug").withSchema(TableName.ProjectRoles).as("customRoleSlug"),
|
||||
@ -299,11 +287,10 @@ export const permissionDALFactory = (db: TDbClient) => {
|
||||
const permission = sqlNestRelationships({
|
||||
data: docs,
|
||||
key: "membershipId",
|
||||
parentMapper: ({ membershipId, membershipCreatedAt, membershipUpdatedAt, oldRoleField, orgId }) => ({
|
||||
parentMapper: ({ membershipId, membershipCreatedAt, membershipUpdatedAt, orgId }) => ({
|
||||
id: membershipId,
|
||||
identityId,
|
||||
projectId,
|
||||
role: oldRoleField,
|
||||
createdAt: membershipCreatedAt,
|
||||
updatedAt: membershipUpdatedAt,
|
||||
orgId,
|
||||
|
@ -35,31 +35,28 @@ export const registerProjectMembershipRouter = async (server: FastifyZodProvider
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
memberships: ProjectMembershipsSchema.omit({ role: true })
|
||||
.merge(
|
||||
memberships: ProjectMembershipsSchema.extend({
|
||||
user: UsersSchema.pick({
|
||||
email: true,
|
||||
firstName: true,
|
||||
lastName: true,
|
||||
id: true
|
||||
}).merge(UserEncryptionKeysSchema.pick({ publicKey: true })),
|
||||
roles: z.array(
|
||||
z.object({
|
||||
user: UsersSchema.pick({
|
||||
email: true,
|
||||
firstName: true,
|
||||
lastName: true,
|
||||
id: true
|
||||
}).merge(UserEncryptionKeysSchema.pick({ publicKey: true })),
|
||||
roles: z.array(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
role: z.string(),
|
||||
customRoleId: z.string().optional().nullable(),
|
||||
customRoleName: z.string().optional().nullable(),
|
||||
customRoleSlug: z.string().optional().nullable(),
|
||||
isTemporary: z.boolean(),
|
||||
temporaryMode: z.string().optional().nullable(),
|
||||
temporaryRange: z.string().nullable().optional(),
|
||||
temporaryAccessStartTime: z.date().nullable().optional(),
|
||||
temporaryAccessEndTime: z.date().nullable().optional()
|
||||
})
|
||||
)
|
||||
id: z.string(),
|
||||
role: z.string(),
|
||||
customRoleId: z.string().optional().nullable(),
|
||||
customRoleName: z.string().optional().nullable(),
|
||||
customRoleSlug: z.string().optional().nullable(),
|
||||
isTemporary: z.boolean(),
|
||||
temporaryMode: z.string().optional().nullable(),
|
||||
temporaryRange: z.string().nullable().optional(),
|
||||
temporaryAccessStartTime: z.date().nullable().optional(),
|
||||
temporaryAccessEndTime: z.date().nullable().optional()
|
||||
})
|
||||
)
|
||||
})
|
||||
.omit({ createdAt: true, updatedAt: true })
|
||||
.array()
|
||||
})
|
||||
|
@ -70,32 +70,29 @@ export const registerProjectRouter = async (server: FastifyZodProvider) => {
|
||||
}),
|
||||
response: {
|
||||
200: z.object({
|
||||
users: ProjectMembershipsSchema.omit({ role: true })
|
||||
.merge(
|
||||
users: ProjectMembershipsSchema.extend({
|
||||
user: UsersSchema.pick({
|
||||
email: true,
|
||||
username: true,
|
||||
firstName: true,
|
||||
lastName: true,
|
||||
id: true
|
||||
}).merge(UserEncryptionKeysSchema.pick({ publicKey: true })),
|
||||
roles: z.array(
|
||||
z.object({
|
||||
user: UsersSchema.pick({
|
||||
username: true,
|
||||
email: true,
|
||||
firstName: true,
|
||||
lastName: true,
|
||||
id: true
|
||||
}).merge(UserEncryptionKeysSchema.pick({ publicKey: true })),
|
||||
roles: z.array(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
role: z.string(),
|
||||
customRoleId: z.string().optional().nullable(),
|
||||
customRoleName: z.string().optional().nullable(),
|
||||
customRoleSlug: z.string().optional().nullable(),
|
||||
isTemporary: z.boolean(),
|
||||
temporaryMode: z.string().optional().nullable(),
|
||||
temporaryRange: z.string().nullable().optional(),
|
||||
temporaryAccessStartTime: z.date().nullable().optional(),
|
||||
temporaryAccessEndTime: z.date().nullable().optional()
|
||||
})
|
||||
)
|
||||
id: z.string(),
|
||||
role: z.string(),
|
||||
customRoleId: z.string().optional().nullable(),
|
||||
customRoleName: z.string().optional().nullable(),
|
||||
customRoleSlug: z.string().optional().nullable(),
|
||||
isTemporary: z.boolean(),
|
||||
temporaryMode: z.string().optional().nullable(),
|
||||
temporaryRange: z.string().nullable().optional(),
|
||||
temporaryAccessStartTime: z.date().nullable().optional(),
|
||||
temporaryAccessEndTime: z.date().nullable().optional()
|
||||
})
|
||||
)
|
||||
})
|
||||
.omit({ createdAt: true, updatedAt: true })
|
||||
.array()
|
||||
})
|
||||
|
@ -93,9 +93,7 @@ export const identityProjectServiceFactory = ({
|
||||
const identityProjectMembership = await identityProjectDAL.create(
|
||||
{
|
||||
identityId,
|
||||
projectId: project.id,
|
||||
role: isCustomRole ? ProjectMembershipRole.Custom : role,
|
||||
roleId: customRole?.id
|
||||
projectId: project.id
|
||||
},
|
||||
tx
|
||||
);
|
||||
|
@ -232,8 +232,7 @@ export const projectQueueFactory = ({
|
||||
const projectMembership = await projectMembershipDAL.create(
|
||||
{
|
||||
projectId: project.id,
|
||||
userId: ghostUser.user.id,
|
||||
role: ProjectMembershipRole.Admin
|
||||
userId: ghostUser.user.id
|
||||
},
|
||||
tx
|
||||
);
|
||||
|
@ -141,8 +141,7 @@ export const projectServiceFactory = ({
|
||||
const projectMembership = await projectMembershipDAL.create(
|
||||
{
|
||||
userId: ghostUser.user.id,
|
||||
projectId: project.id,
|
||||
role: ProjectMembershipRole.Admin
|
||||
projectId: project.id
|
||||
},
|
||||
tx
|
||||
);
|
||||
@ -244,8 +243,7 @@ export const projectServiceFactory = ({
|
||||
const userProjectMembership = await projectMembershipDAL.create(
|
||||
{
|
||||
projectId: project.id,
|
||||
userId: user.id,
|
||||
role: projectAdmin.projectRole
|
||||
userId: user.id
|
||||
},
|
||||
tx
|
||||
);
|
||||
@ -302,9 +300,7 @@ export const projectServiceFactory = ({
|
||||
const identityProjectMembership = await identityProjectDAL.create(
|
||||
{
|
||||
identityId: actorId,
|
||||
projectId: project.id,
|
||||
role: isCustomRole ? ProjectMembershipRole.Custom : ProjectMembershipRole.Admin,
|
||||
roleId: customRole?.id
|
||||
projectId: project.id
|
||||
},
|
||||
tx
|
||||
);
|
||||
|
@ -29,6 +29,7 @@ require (
|
||||
require (
|
||||
github.com/alessio/shellescape v1.4.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect
|
||||
github.com/bradleyjkemp/cupaloy/v2 v2.8.0 // indirect
|
||||
github.com/chzyer/readline v1.5.1 // indirect
|
||||
github.com/danieljoos/wincred v1.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
|
@ -51,6 +51,8 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGL
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||
github.com/bradleyjkemp/cupaloy/v2 v2.8.0 h1:any4BmKE+jGIaMpnU8YgH/I2LPiLBufr6oMMlVBbn9M=
|
||||
github.com/bradleyjkemp/cupaloy/v2 v2.8.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/charmbracelet/lipgloss v0.5.0 h1:lulQHuVeodSgDez+3rGiuxlPVXSnhth442DATR2/8t8=
|
||||
github.com/charmbracelet/lipgloss v0.5.0/go.mod h1:EZLha/HbzEt7cYqdFPovlqy5FZPj0xFhg5SaqxScmgs=
|
||||
@ -324,6 +326,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
|
@ -512,16 +512,23 @@ func CallUniversalAuthRefreshAccessToken(httpClient *resty.Client, request Unive
|
||||
|
||||
func CallGetRawSecretsV3(httpClient *resty.Client, request GetRawSecretsV3Request) (GetRawSecretsV3Response, error) {
|
||||
var getRawSecretsV3Response GetRawSecretsV3Response
|
||||
response, err := httpClient.
|
||||
req := httpClient.
|
||||
R().
|
||||
SetResult(&getRawSecretsV3Response).
|
||||
SetHeader("User-Agent", USER_AGENT).
|
||||
SetBody(request).
|
||||
SetQueryParam("workspaceId", request.WorkspaceId).
|
||||
SetQueryParam("environment", request.Environment).
|
||||
SetQueryParam("secretPath", request.SecretPath).
|
||||
SetQueryParam("include_imports", "false").
|
||||
Get(fmt.Sprintf("%v/v3/secrets/raw", config.INFISICAL_URL))
|
||||
SetQueryParam("secretPath", request.SecretPath)
|
||||
|
||||
if request.IncludeImport {
|
||||
req.SetQueryParam("include_imports", "true")
|
||||
}
|
||||
if request.Recursive {
|
||||
req.SetQueryParam("recursive", "true")
|
||||
}
|
||||
|
||||
response, err := req.Get(fmt.Sprintf("%v/v3/secrets/raw", config.INFISICAL_URL))
|
||||
|
||||
if err != nil {
|
||||
return GetRawSecretsV3Response{}, fmt.Errorf("CallGetRawSecretsV3: Unable to complete api request [err=%w]", err)
|
||||
|
@ -371,6 +371,22 @@ type ImportedSecretV3 struct {
|
||||
Secrets []EncryptedSecretV3 `json:"secrets"`
|
||||
}
|
||||
|
||||
type ImportedRawSecretV3 struct {
|
||||
SecretPath string `json:"secretPath"`
|
||||
Environment string `json:"environment"`
|
||||
FolderId string `json:"folderId"`
|
||||
Secrets []struct {
|
||||
ID string `json:"id"`
|
||||
Workspace string `json:"workspace"`
|
||||
Environment string `json:"environment"`
|
||||
Version int `json:"version"`
|
||||
Type string `json:"type"`
|
||||
SecretKey string `json:"secretKey"`
|
||||
SecretValue string `json:"secretValue"`
|
||||
SecretComment string `json:"secretComment"`
|
||||
} `json:"secrets"`
|
||||
}
|
||||
|
||||
type GetEncryptedSecretsV3Response struct {
|
||||
Secrets []EncryptedSecretV3 `json:"secrets"`
|
||||
ImportedSecrets []ImportedSecretV3 `json:"imports,omitempty"`
|
||||
@ -542,6 +558,6 @@ type GetRawSecretsV3Response struct {
|
||||
SecretValue string `json:"secretValue"`
|
||||
SecretComment string `json:"secretComment"`
|
||||
} `json:"secrets"`
|
||||
Imports []any `json:"imports"`
|
||||
Imports []ImportedRawSecretV3 `json:"imports"`
|
||||
ETag string
|
||||
}
|
||||
|
@ -118,6 +118,8 @@ var exportCmd = &cobra.Command{
|
||||
secrets = util.ExpandSecrets(secrets, authParams, "")
|
||||
}
|
||||
secrets = util.FilterSecretsByTag(secrets, tagSlugs)
|
||||
secrets = util.SortSecretsByKeys(secrets)
|
||||
|
||||
output, err = formatEnvs(secrets, format)
|
||||
if err != nil {
|
||||
util.HandleError(err)
|
||||
|
@ -116,6 +116,9 @@ var secretsCmd = &cobra.Command{
|
||||
secrets = util.ExpandSecrets(secrets, authParams, "")
|
||||
}
|
||||
|
||||
// Sort the secrets by key so we can create a consistent output
|
||||
secrets = util.SortSecretsByKeys(secrets)
|
||||
|
||||
visualize.PrintAllSecretDetails(secrets)
|
||||
Telemetry.CaptureEvent("cli-command:secrets", posthog.NewProperties().Set("secretCount", len(secrets)).Set("version", util.CLI_VERSION))
|
||||
},
|
||||
|
@ -8,6 +8,7 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -53,6 +54,14 @@ func GetBase64DecodedSymmetricEncryptionDetails(key string, cipher string, IV st
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Helper function to sort the secrets by key so we can create a consistent output
|
||||
func SortSecretsByKeys(secrets []models.SingleEnvironmentVariable) []models.SingleEnvironmentVariable {
|
||||
sort.Slice(secrets, func(i, j int) bool {
|
||||
return secrets[i].Key < secrets[j].Key
|
||||
})
|
||||
return secrets
|
||||
}
|
||||
|
||||
func IsSecretEnvironmentValid(env string) bool {
|
||||
if env == "prod" || env == "dev" || env == "test" || env == "staging" {
|
||||
return true
|
||||
|
@ -186,12 +186,12 @@ func GetPlainTextSecretsViaMachineIdentity(accessToken string, workspaceId strin
|
||||
plainTextSecrets = append(plainTextSecrets, models.SingleEnvironmentVariable{Key: secret.SecretKey, Value: secret.SecretValue, Type: secret.Type, WorkspaceId: secret.Workspace})
|
||||
}
|
||||
|
||||
// if includeImports {
|
||||
// plainTextSecrets, err = InjectImportedSecret(plainTextWorkspaceKey, plainTextSecrets, encryptedSecrets.ImportedSecrets)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// }
|
||||
if includeImports {
|
||||
plainTextSecrets, err = InjectRawImportedSecret(plainTextSecrets, rawSecrets.Imports)
|
||||
if err != nil {
|
||||
return models.PlaintextSecretResult{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return models.PlaintextSecretResult{
|
||||
Secrets: plainTextSecrets,
|
||||
@ -252,6 +252,36 @@ func InjectImportedSecret(plainTextWorkspaceKey []byte, secrets []models.SingleE
|
||||
return secrets, nil
|
||||
}
|
||||
|
||||
func InjectRawImportedSecret(secrets []models.SingleEnvironmentVariable, importedSecrets []api.ImportedRawSecretV3) ([]models.SingleEnvironmentVariable, error) {
|
||||
if importedSecrets == nil {
|
||||
return secrets, nil
|
||||
}
|
||||
|
||||
hasOverriden := make(map[string]bool)
|
||||
for _, sec := range secrets {
|
||||
hasOverriden[sec.Key] = true
|
||||
}
|
||||
|
||||
for i := len(importedSecrets) - 1; i >= 0; i-- {
|
||||
importSec := importedSecrets[i]
|
||||
plainTextImportedSecrets := importSec.Secrets
|
||||
|
||||
for _, sec := range plainTextImportedSecrets {
|
||||
if _, ok := hasOverriden[sec.SecretKey]; !ok {
|
||||
secrets = append(secrets, models.SingleEnvironmentVariable{
|
||||
Key: sec.SecretKey,
|
||||
WorkspaceId: sec.Workspace,
|
||||
Value: sec.SecretValue,
|
||||
Type: sec.Type,
|
||||
ID: sec.ID,
|
||||
})
|
||||
hasOverriden[sec.SecretKey] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return secrets, nil
|
||||
}
|
||||
|
||||
func FilterSecretsByTag(plainTextSecrets []models.SingleEnvironmentVariable, tagSlugs string) []models.SingleEnvironmentVariable {
|
||||
if tagSlugs == "" {
|
||||
return plainTextSecrets
|
||||
|
@ -0,0 +1,5 @@
|
||||
STAGING-SECRET-1='staging-value-1'
|
||||
STAGING-SECRET-2='staging-value-2'
|
||||
TEST-SECRET-1='test-value-1'
|
||||
TEST-SECRET-2='test-value-2'
|
||||
TEST-SECRET-3='test-value-3'
|
@ -0,0 +1,3 @@
|
||||
TEST-SECRET-1='test-value-1'
|
||||
TEST-SECRET-2='test-value-2'
|
||||
TEST-SECRET-3='test-value-3'
|
@ -0,0 +1,7 @@
|
||||
┌─────────────────┬────────────────┬─────────────┐
|
||||
│ SECRET NAME │ SECRET VALUE │ SECRET TYPE │
|
||||
├─────────────────┼────────────────┼─────────────┤
|
||||
│ TEST-SECRET-1 │ test-value-1 │ shared │
|
||||
│ TEST-SECRET-2 │ test-value-2 │ shared │
|
||||
│ FOLDER-SECRET-1 │ folder-value-1 │ shared │
|
||||
└─────────────────┴────────────────┴─────────────┘
|
@ -0,0 +1,7 @@
|
||||
┌──────────────────┬─────────────────┬─────────────┐
|
||||
│ SECRET NAME │ SECRET VALUE │ SECRET TYPE │
|
||||
├──────────────────┼─────────────────┼─────────────┤
|
||||
│ TEST-SECRET-1 │ test-value-1 │ shared │
|
||||
│ STAGING-SECRET-2 │ staging-value-2 │ shared │
|
||||
│ FOLDER-SECRET-1 │ folder-value-1 │ shared │
|
||||
└──────────────────┴─────────────────┴─────────────┘
|
@ -0,0 +1,8 @@
|
||||
┌─────────────────┬────────────────┬─────────────┐
|
||||
│ SECRET NAME │ SECRET VALUE │ SECRET TYPE │
|
||||
├─────────────────┼────────────────┼─────────────┤
|
||||
│ TEST-SECRET-1 │ test-value-1 │ shared │
|
||||
│ TEST-SECRET-2 │ test-value-2 │ shared │
|
||||
│ FOLDER-SECRET-1 │ folder-value-1 │ shared │
|
||||
│ DOES-NOT-EXIST │ *not found* │ *not found* │
|
||||
└─────────────────┴────────────────┴─────────────┘
|
@ -0,0 +1,2 @@
|
||||
[0m Injecting 6 Infisical secrets into your application process
|
||||
hello world
|
@ -0,0 +1,2 @@
|
||||
[0m Injecting 5 Infisical secrets into your application process
|
||||
hello world
|
@ -0,0 +1,2 @@
|
||||
[0m Injecting 3 Infisical secrets into your application process
|
||||
hello world
|
@ -0,0 +1,10 @@
|
||||
┌──────────────────┬─────────────────┬─────────────┐
|
||||
│ SECRET NAME │ SECRET VALUE │ SECRET TYPE │
|
||||
├──────────────────┼─────────────────┼─────────────┤
|
||||
│ FOLDER-SECRET-1 │ folder-value-1 │ shared │
|
||||
│ STAGING-SECRET-1 │ staging-value-1 │ shared │
|
||||
│ STAGING-SECRET-2 │ staging-value-2 │ shared │
|
||||
│ TEST-SECRET-1 │ test-value-1 │ shared │
|
||||
│ TEST-SECRET-2 │ test-value-2 │ shared │
|
||||
│ TEST-SECRET-3 │ test-value-3 │ shared │
|
||||
└──────────────────┴─────────────────┴─────────────┘
|
@ -0,0 +1,7 @@
|
||||
┌───────────────┬──────────────┬─────────────┐
|
||||
│ SECRET NAME │ SECRET VALUE │ SECRET TYPE │
|
||||
├───────────────┼──────────────┼─────────────┤
|
||||
│ TEST-SECRET-1 │ test-value-1 │ shared │
|
||||
│ TEST-SECRET-2 │ test-value-2 │ shared │
|
||||
│ TEST-SECRET-3 │ test-value-3 │ shared │
|
||||
└───────────────┴──────────────┴─────────────┘
|
@ -0,0 +1,5 @@
|
||||
STAGING-SECRET-1='staging-value-1'
|
||||
STAGING-SECRET-2='staging-value-2'
|
||||
TEST-SECRET-1='test-value-1'
|
||||
TEST-SECRET-2='test-value-2'
|
||||
TEST-SECRET-3='test-value-3'
|
@ -0,0 +1,3 @@
|
||||
TEST-SECRET-1='test-value-1'
|
||||
TEST-SECRET-2='test-value-2'
|
||||
TEST-SECRET-3='test-value-3'
|
@ -0,0 +1,7 @@
|
||||
┌─────────────────┬────────────────┬─────────────┐
|
||||
│ SECRET NAME │ SECRET VALUE │ SECRET TYPE │
|
||||
├─────────────────┼────────────────┼─────────────┤
|
||||
│ TEST-SECRET-1 │ test-value-1 │ shared │
|
||||
│ TEST-SECRET-2 │ test-value-2 │ shared │
|
||||
│ FOLDER-SECRET-1 │ folder-value-1 │ shared │
|
||||
└─────────────────┴────────────────┴─────────────┘
|
@ -0,0 +1,7 @@
|
||||
┌──────────────────┬─────────────────┬─────────────┐
|
||||
│ SECRET NAME │ SECRET VALUE │ SECRET TYPE │
|
||||
├──────────────────┼─────────────────┼─────────────┤
|
||||
│ TEST-SECRET-1 │ test-value-1 │ shared │
|
||||
│ STAGING-SECRET-2 │ staging-value-2 │ shared │
|
||||
│ FOLDER-SECRET-1 │ folder-value-1 │ shared │
|
||||
└──────────────────┴─────────────────┴─────────────┘
|
@ -0,0 +1,8 @@
|
||||
┌─────────────────┬────────────────┬─────────────┐
|
||||
│ SECRET NAME │ SECRET VALUE │ SECRET TYPE │
|
||||
├─────────────────┼────────────────┼─────────────┤
|
||||
│ TEST-SECRET-1 │ test-value-1 │ shared │
|
||||
│ TEST-SECRET-2 │ test-value-2 │ shared │
|
||||
│ FOLDER-SECRET-1 │ folder-value-1 │ shared │
|
||||
│ DOES-NOT-EXIST │ *not found* │ *not found* │
|
||||
└─────────────────┴────────────────┴─────────────┘
|
@ -0,0 +1,2 @@
|
||||
[0m Injecting 6 Infisical secrets into your application process
|
||||
hello world
|
@ -0,0 +1,2 @@
|
||||
[0m Injecting 5 Infisical secrets into your application process
|
||||
hello world
|
@ -0,0 +1,2 @@
|
||||
[0m Injecting 3 Infisical secrets into your application process
|
||||
hello world
|
@ -0,0 +1,10 @@
|
||||
┌──────────────────┬─────────────────┬─────────────┐
|
||||
│ SECRET NAME │ SECRET VALUE │ SECRET TYPE │
|
||||
├──────────────────┼─────────────────┼─────────────┤
|
||||
│ FOLDER-SECRET-1 │ folder-value-1 │ shared │
|
||||
│ STAGING-SECRET-1 │ staging-value-1 │ shared │
|
||||
│ STAGING-SECRET-2 │ staging-value-2 │ shared │
|
||||
│ TEST-SECRET-1 │ test-value-1 │ shared │
|
||||
│ TEST-SECRET-2 │ test-value-2 │ shared │
|
||||
│ TEST-SECRET-3 │ test-value-3 │ shared │
|
||||
└──────────────────┴─────────────────┴─────────────┘
|
@ -0,0 +1,7 @@
|
||||
┌───────────────┬──────────────┬─────────────┐
|
||||
│ SECRET NAME │ SECRET VALUE │ SECRET TYPE │
|
||||
├───────────────┼──────────────┼─────────────┤
|
||||
│ TEST-SECRET-1 │ test-value-1 │ shared │
|
||||
│ TEST-SECRET-2 │ test-value-2 │ shared │
|
||||
│ TEST-SECRET-3 │ test-value-3 │ shared │
|
||||
└───────────────┴──────────────┴─────────────┘
|
@ -0,0 +1,4 @@
|
||||
error: CallGetRawSecretsV3: Unsuccessful response [GET https://app.infisical.com/api/v3/secrets/raw?environment=invalid-env&include_imports=true&recursive=true&secretPath=%2F&workspaceId=bef697d4-849b-4a75-b284-0922f87f8ba2] [status-code=500] [response={"statusCode":500,"error":"Internal Server Error","message":"'invalid-env' environment not found in project with ID bef697d4-849b-4a75-b284-0922f87f8ba2"}]
|
||||
|
||||
|
||||
If this issue continues, get support at https://infisical.com/slack
|
73
cli/test/export_test.go
Normal file
73
cli/test/export_test.go
Normal file
@ -0,0 +1,73 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/bradleyjkemp/cupaloy/v2"
|
||||
)
|
||||
|
||||
func TestUniversalAuth_ExportSecretsWithImports(t *testing.T) {
|
||||
MachineIdentityLoginCmd(t)
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "export", "--token", creds.UAAccessToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceToken_ExportSecretsWithImports(t *testing.T) {
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "export", "--token", creds.ServiceToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniversalAuth_ExportSecretsWithoutImports(t *testing.T) {
|
||||
MachineIdentityLoginCmd(t)
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "export", "--token", creds.UAAccessToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--silent", "--include-imports=false")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceToken_ExportSecretsWithoutImports(t *testing.T) {
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "export", "--token", creds.ServiceToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--silent", "--include-imports=false")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
64
cli/test/helper.go
Normal file
64
cli/test/helper.go
Normal file
@ -0,0 +1,64 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
CLI_NAME = "infisical-merge"
|
||||
)
|
||||
|
||||
var (
|
||||
FORMATTED_CLI_NAME = fmt.Sprintf("./%s", CLI_NAME)
|
||||
)
|
||||
|
||||
type Credentials struct {
|
||||
ClientID string
|
||||
ClientSecret string
|
||||
UAAccessToken string
|
||||
ServiceToken string
|
||||
ProjectID string
|
||||
EnvSlug string
|
||||
}
|
||||
|
||||
var creds = Credentials{
|
||||
UAAccessToken: "",
|
||||
ClientID: os.Getenv("CLI_TESTS_UA_CLIENT_ID"),
|
||||
ClientSecret: os.Getenv("CLI_TESTS_UA_CLIENT_SECRET"),
|
||||
ServiceToken: os.Getenv("CLI_TESTS_SERVICE_TOKEN"),
|
||||
ProjectID: os.Getenv("CLI_TESTS_PROJECT_ID"),
|
||||
EnvSlug: os.Getenv("CLI_TESTS_ENV_SLUG"),
|
||||
}
|
||||
|
||||
func ExecuteCliCommand(command string, args ...string) (string, error) {
|
||||
cmd := exec.Command(command, args...)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return strings.TrimSpace(string(output)), err
|
||||
}
|
||||
return strings.TrimSpace(string(output)), nil
|
||||
}
|
||||
|
||||
func SetupCli(t *testing.T) {
|
||||
|
||||
if creds.ClientID == "" || creds.ClientSecret == "" || creds.ServiceToken == "" || creds.ProjectID == "" || creds.EnvSlug == "" {
|
||||
panic("Missing required environment variables")
|
||||
}
|
||||
|
||||
// check if the CLI is already built, if not build it
|
||||
alreadyBuilt := false
|
||||
if _, err := os.Stat(FORMATTED_CLI_NAME); err == nil {
|
||||
alreadyBuilt = true
|
||||
}
|
||||
|
||||
if !alreadyBuilt {
|
||||
if err := exec.Command("go", "build", "../.").Run(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
29
cli/test/login_test.go
Normal file
29
cli/test/login_test.go
Normal file
@ -0,0 +1,29 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func MachineIdentityLoginCmd(t *testing.T) {
|
||||
SetupCli(t)
|
||||
|
||||
if creds.UAAccessToken != "" {
|
||||
return
|
||||
}
|
||||
|
||||
jwtPattern := `^[A-Za-z0-9-_]+\.[A-Za-z0-9-_]+\.[A-Za-z0-9-_]*$`
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "login", "--method=universal-auth", "--client-id", creds.ClientID, "--client-secret", creds.ClientSecret, "--plain", "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
assert.Regexp(t, jwtPattern, output)
|
||||
|
||||
creds.UAAccessToken = output
|
||||
|
||||
// We can't use snapshot testing here because the output will be different every time
|
||||
}
|
120
cli/test/run_test.go
Normal file
120
cli/test/run_test.go
Normal file
@ -0,0 +1,120 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/bradleyjkemp/cupaloy/v2"
|
||||
)
|
||||
|
||||
func TestServiceToken_RunCmdRecursiveAndImports(t *testing.T) {
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "run", "--token", creds.ServiceToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--recursive", "--silent", "--", "echo", "hello world")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
output = string(bytes.Split([]byte(output), []byte("INF"))[1])
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
func TestServiceToken_RunCmdWithImports(t *testing.T) {
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "run", "--token", creds.ServiceToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--silent", "--", "echo", "hello world")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
output = string(bytes.Split([]byte(output), []byte("INF"))[1])
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniversalAuth_RunCmdRecursiveAndImports(t *testing.T) {
|
||||
MachineIdentityLoginCmd(t)
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "run", "--token", creds.UAAccessToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--recursive", "--silent", "--", "echo", "hello world")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
output = string(bytes.Split([]byte(output), []byte("INF"))[1])
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniversalAuth_RunCmdWithImports(t *testing.T) {
|
||||
MachineIdentityLoginCmd(t)
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "run", "--token", creds.UAAccessToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--silent", "--", "echo", "hello world")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// remove the first few characters from the output because we don't care about the time, and it will change every time
|
||||
output = string(bytes.Split([]byte(output), []byte("INF"))[1])
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniversalAuth_RunCmdWithoutImports(t *testing.T) {
|
||||
MachineIdentityLoginCmd(t)
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "run", "--token", creds.UAAccessToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--silent", "--include-imports=false", "--", "echo", "hello world")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
output = string(bytes.Split([]byte(output), []byte("INF"))[1])
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceToken_RunCmdWithoutImports(t *testing.T) {
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "run", "--token", creds.ServiceToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--silent", "--include-imports=false", "--", "echo", "hello world")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Remove everything before "INF" because it's not relevant to the test
|
||||
output = string(bytes.Split([]byte(output), []byte("INF"))[1])
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
106
cli/test/secrets_by_name_test.go
Normal file
106
cli/test/secrets_by_name_test.go
Normal file
@ -0,0 +1,106 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/bradleyjkemp/cupaloy/v2"
|
||||
)
|
||||
|
||||
func TestServiceToken_GetSecretsByNameRecursive(t *testing.T) {
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "secrets", "get", "TEST-SECRET-1", "TEST-SECRET-2", "FOLDER-SECRET-1", "--token", creds.ServiceToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--recursive", "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceToken_GetSecretsByNameWithNotFoundSecret(t *testing.T) {
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "secrets", "get", "TEST-SECRET-1", "TEST-SECRET-2", "FOLDER-SECRET-1", "DOES-NOT-EXIST", "--token", creds.ServiceToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--recursive", "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceToken_GetSecretsByNameWithImports(t *testing.T) {
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "secrets", "get", "TEST-SECRET-1", "STAGING-SECRET-2", "FOLDER-SECRET-1", "--token", creds.ServiceToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--recursive", "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniversalAuth_GetSecretsByNameRecursive(t *testing.T) {
|
||||
MachineIdentityLoginCmd(t)
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "secrets", "get", "TEST-SECRET-1", "TEST-SECRET-2", "FOLDER-SECRET-1", "--token", creds.UAAccessToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--recursive", "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniversalAuth_GetSecretsByNameWithNotFoundSecret(t *testing.T) {
|
||||
MachineIdentityLoginCmd(t)
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "secrets", "get", "TEST-SECRET-1", "TEST-SECRET-2", "FOLDER-SECRET-1", "DOES-NOT-EXIST", "--token", creds.UAAccessToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--recursive", "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniversalAuth_GetSecretsByNameWithImports(t *testing.T) {
|
||||
MachineIdentityLoginCmd(t)
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "secrets", "get", "TEST-SECRET-1", "STAGING-SECRET-2", "FOLDER-SECRET-1", "--token", creds.UAAccessToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--recursive", "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
87
cli/test/secrets_test.go
Normal file
87
cli/test/secrets_test.go
Normal file
@ -0,0 +1,87 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/bradleyjkemp/cupaloy/v2"
|
||||
)
|
||||
|
||||
func TestServiceToken_SecretsGetWithImportsAndRecursiveCmd(t *testing.T) {
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "secrets", "--token", creds.ServiceToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--recursive", "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceToken_SecretsGetWithoutImportsAndWithoutRecursiveCmd(t *testing.T) {
|
||||
SetupCli(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "secrets", "--token", creds.ServiceToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--include-imports=false", "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniversalAuth_SecretsGetWithImportsAndRecursiveCmd(t *testing.T) {
|
||||
SetupCli(t)
|
||||
MachineIdentityLoginCmd(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "secrets", "--token", creds.UAAccessToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--recursive", "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniversalAuth_SecretsGetWithoutImportsAndWithoutRecursiveCmd(t *testing.T) {
|
||||
SetupCli(t)
|
||||
MachineIdentityLoginCmd(t)
|
||||
|
||||
output, err := ExecuteCliCommand(FORMATTED_CLI_NAME, "secrets", "--token", creds.UAAccessToken, "--projectId", creds.ProjectID, "--env", creds.EnvSlug, "--include-imports=false", "--silent")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("error running CLI command: %v", err)
|
||||
}
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err = cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniversalAuth_SecretsGetWrongEnvironment(t *testing.T) {
|
||||
SetupCli(t)
|
||||
MachineIdentityLoginCmd(t)
|
||||
|
||||
output, _ := ExecuteCliCommand(FORMATTED_CLI_NAME, "secrets", "--token", creds.UAAccessToken, "--projectId", creds.ProjectID, "--env", "invalid-env", "--recursive", "--silent")
|
||||
|
||||
// Use cupaloy to snapshot test the output
|
||||
err := cupaloy.Snapshot(output)
|
||||
if err != nil {
|
||||
t.Fatalf("snapshot failed: %v", err)
|
||||
}
|
||||
|
||||
}
|
@ -66,6 +66,8 @@ services:
|
||||
environment:
|
||||
- DB_CONNECTION_URI=postgres://infisical:infisical@db/infisical?sslmode=disable
|
||||
command: npm run migration:latest
|
||||
volumes:
|
||||
- ./backend/src:/app/src
|
||||
|
||||
backend:
|
||||
container_name: infisical-dev-api
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
title: "Email and Pasword"
|
||||
title: "Email and Password"
|
||||
description: "Learn how to authenticate into Infisical with email and password."
|
||||
---
|
||||
|
||||
@ -9,6 +9,6 @@ It is currently possible to use the **Email and Password** auth method to authen
|
||||
|
||||
Every **Email and Password** is accompanied by an emergency kit given to users during signup. If the password is lost or forgotten, emergency kit is only way to retrieve the access to your account. It is possible to generate a new emergency kit with the following steps:
|
||||
1. Open the `Personal Settings` menu.
|
||||

|
||||

|
||||
2. Scroll down to the `Emergency Kit` section.
|
||||
3. Enter your current password and click `Save`.
|
||||
3. Enter your current password and click `Save`.
|
||||
|
Binary file not shown.
After Width: | Height: | Size: 100 KiB |
@ -212,7 +212,8 @@
|
||||
{
|
||||
"group": "Reference architectures",
|
||||
"pages": [
|
||||
"self-hosting/reference-architectures/aws-ecs"
|
||||
"self-hosting/reference-architectures/aws-ecs",
|
||||
"self-hosting/reference-architectures/on-premise"
|
||||
]
|
||||
},
|
||||
"self-hosting/ee",
|
||||
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
title: "AWS EC2"
|
||||
description: "Learn to install Infisical on EC2 using Cloud Formation template"
|
||||
---
|
||||
|
||||
<iframe width="560" height="315" src="https://www.youtube.com/embed/jR-gM7vIY2c" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" allowfullscreen></iframe>
|
||||
This deployment option will use AWS Cloudformation to auto deploy an instance of Infisical on a single EC2 via Docker Compose.
|
||||
|
||||
**Resources that will be provisioned**
|
||||
- 1 EC2 instance
|
||||
- 1 DocumentDB cluster
|
||||
- 1 DocumentDB instance
|
||||
- Security groups
|
||||
|
||||
<Info>
|
||||
Once installation is complete, you will have to create the first account. No default account is provided.
|
||||
</Info>
|
||||
|
||||
<a href="https://us-east-1.console.aws.amazon.com/cloudformation/home?region=us-east-1#/stacks/create/review?templateURL=https://ec2-instance-cloudformation.s3.amazonaws.com/infisical-ec2-deployment.template&stackName=infisical">
|
||||
<img width="200" src="../../images/deploy-aws-button.png" />
|
||||
</a>
|
@ -1,66 +0,0 @@
|
||||
---
|
||||
title: "AWS Lightsail"
|
||||
description: "Deploy Infisical with AWS Lightsail"
|
||||
---
|
||||
|
||||
Prerequisites:
|
||||
- Have an account with [Amazon Web Services (AWS)](https://aws.amazon.com/)
|
||||
|
||||
<Steps>
|
||||
<Step title="Create a container service in AWS Lightsail">
|
||||
1.1. In AWS, navigate to the **Lightsail** service and press **Create container service** under the **Containers** tab.
|
||||

|
||||
|
||||

|
||||
|
||||
1.2. In the **Container service location** section, select the AWS region that's closest to your infrastructure.
|
||||
|
||||
Afterwards, in the **Container service capacity** section, set the power level and scale to fit your needs; you may opt for the default setting
|
||||
and adjust accordingly in the future.
|
||||
|
||||

|
||||
|
||||
1.3. In the **Set up your first deployment** section, select the **Specify a custom deployment** option. Give the container a friendly name like **infisical** and fill in your intended [Infisical public Docker image](https://hub.docker.com/r/infisical/infisical) in the **Image** field; this will pull the image from Docker Hub.
|
||||
|
||||
For example, in order to opt for Infisical `v0.43.4`, you would input: `infisical/infisical:v0.43.4`.
|
||||
|
||||

|
||||
|
||||
1.4. Running Infisical requires a few environment variables to be set for the container service.
|
||||
At minimum, Infisical requires that you set the variables `ENCRYPTION_KEY`, `AUTH_SECRET`, `MONGO_URL`, and `REDIS_URL`
|
||||
which you can read more about [here](/self-hosting/configuration/envars).
|
||||
|
||||
In the **Environment variables** section, fill in the required environment variables.
|
||||
|
||||
<Note>
|
||||
To use more features like emailing and single sign-on, you can set additional configuration options [here](/self-hosting/configuration/envars).
|
||||
</Note>
|
||||
|
||||
Also, under the **Open ports** section, add an entry for port `8080` and protocol `HTTP` since Infisical listens on port `8080`.
|
||||
|
||||

|
||||
|
||||
1.5. In the **Public endpoint** section, select the container from the previous steps from the dropdown; this will make the container accessible over the public internet.
|
||||
|
||||

|
||||
|
||||
1.6. Finally, in the **Identify your service** section, give the container service a unique name like infisical and press **Create container service**.
|
||||
|
||||

|
||||
</Step>
|
||||
<Step title="Navigate to your deployed instance of Infisical">
|
||||
On the newly-created container service page, wait for the **Status** to turn to **Running** and check out the **Public domain** of the container service; you can access your instance of Infisical by this URL.
|
||||
|
||||

|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Do you have any recommendations for deploying Infisical with AWS Lightsail?">
|
||||
Yes, here are a few that come to mind:
|
||||
- In step 1.3, we recommend pinning the Docker image to a specific [version of Infisical](https://hub.docker.com/r/infisical/infisical/tags)
|
||||
instead of referring to the `latest` tag to avoid any unexpected version-to-version migration issues.
|
||||
|
||||
We're working on putting together a fuller list of deployment best practices as well as minimum resource configuration requirements for running Infisical so stay tuned!
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
@ -1,71 +0,0 @@
|
||||
---
|
||||
title: "Azure App Services"
|
||||
description: "Deploy Infisical with Azure App Service"
|
||||
---
|
||||
|
||||
Prerequisites:
|
||||
- Have an account with [Microsoft Azure](https://azure.microsoft.com/en-us)
|
||||
|
||||
<Steps>
|
||||
<Step title="Create a Web App in Azure App Services">
|
||||
1.1. In Azure, navigate to the **App Services** solution and press **Create > Web App**.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
1.2. In the **Basics** section, specify the **Subscription** and **Resource group** to manage the deployed resource.
|
||||
|
||||
Also, give the container a friendly name like Infisical and specify a **Region** for it to be deployed to.
|
||||
|
||||

|
||||
|
||||
1.3. In the **Docker** section, select the **Single Container** option under **Options** and specify **Docker Hub** as the image source
|
||||
|
||||
Next, under the **Docker hub options** sub-section, select the **Public** option under **Access Type** and fill in your intended [Infisical public Docker image](https://hub.docker.com/r/infisical/infisical) in the **Image and tag** field; this will pull the image from Docker Hub.
|
||||
|
||||
For example, in order to opt for Infisical `v0.43.4`, you would input: `infisical/infisical:v0.43.4`.
|
||||
|
||||

|
||||
|
||||
1.4. Finally, in the **Review + create** section, double check the information from the previous steps and press **Create** to create the Azure app service.
|
||||
|
||||

|
||||
|
||||
1.5. Next, wait a minute or two on the deployment overview page for the app to be created. Once the deployment is complete, press **Go to resource**
|
||||
to head to the **App Service dashboard** for the newly-created app.
|
||||
|
||||

|
||||
|
||||
1.6. Running Infisical requires a few environment variables to be set for the Azure app service.
|
||||
At minimum, Infisical requires that you set the variables `ENCRYPTION_KEY`, `AUTH_SECRET`, `MONGO_URL`, and `REDIS_URL`
|
||||
which you can read more about [here](/self-hosting/configuration/envars).
|
||||
|
||||
<Note>
|
||||
To use more features like emailing and single sign-on, you can set additional configuration options [here](/self-hosting/configuration/envars).
|
||||
</Note>
|
||||
|
||||
Additionally, you must set the variable `WEBSITES_PORT=8080` since
|
||||
Infisical listens on port `8080`.
|
||||
|
||||
In the **Settings > Configuration** section of the newly-created app service, fill in the required environment variables.
|
||||
|
||||

|
||||
</Step>
|
||||
<Step title="Navigate to your deployed instance of Infisical">
|
||||
In the **Overview** section, check out the **Default domain** for your instance of Infisical; you can visit the instance at this URL.
|
||||
|
||||

|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Do you have any recommendations for deploying Infisical with Azure App Services?">
|
||||
Yes, here are a few that come to mind:
|
||||
- In step 1.3, we recommend pinning the Docker image to a specific [version of Infisical](https://hub.docker.com/r/infisical/infisical/tags)
|
||||
instead of referring to the `latest` tag to avoid any unexpected version-to-version migration issues.
|
||||
- In step 1.2, we recommend selecting a **Region** option that is closest to your infrastructure/clients to reduce latency.
|
||||
|
||||
We're working on putting together a fuller list of deployment best practices as well as minimum resource configuration requirements for running Infisical so stay tuned!
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
@ -1,88 +0,0 @@
|
||||
---
|
||||
title: "Azure Container Instances"
|
||||
description: "Deploy Infisical with Azure Container Instances"
|
||||
---
|
||||
|
||||
Prerequisites:
|
||||
- Have an account with [Microsoft Azure](https://azure.microsoft.com/en-us)
|
||||
|
||||
<Note>
|
||||
This brief goes over how to deploy an instance of Infisical with Azure Container Instances without TLS/SSL configuration.
|
||||
|
||||
There are various options for enabling TLS/SSL with Azure Container Instances more suitable for production including:
|
||||
- [Enabling a TLS endpoint in a sidecar container](https://learn.microsoft.com/en-us/azure/container-instances/container-instances-container-group-ssl).
|
||||
- [Enabling automatic HTTPS with Caddy in a sidecar container](https://learn.microsoft.com/en-us/azure/container-instances/container-instances-container-group-automatic-ssl).
|
||||
- Using Azure Function Proxies, Application Gateway, etc.
|
||||
|
||||
For a simpler deployment experience with complete TLS/SSL setup, you may try [deploying Infisical with Azure App Services](/self-hosting/deployment-options/azure-app-services).
|
||||
</Note>
|
||||
|
||||
<Steps>
|
||||
<Step title="Create a container instance in Azure Container Instances">
|
||||
1.1. In Azure, navigate to the **Container Instances** solution and press **Create**.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
1.2. In the **Basics** section, specify the **Subscription** and **Resource group** to manage the deployed resource.
|
||||
|
||||
Also, give the container a friendly name like Infisical and specify a **Region** for it to be deployed to.
|
||||
|
||||

|
||||
|
||||
Next, select the **Public** option under **Image type** and fill in your intended [Infisical public Docker image](https://hub.docker.com/r/infisical/infisical) in the **Image** field; this will pull the image from Docker Hub.
|
||||
|
||||
For example, in order to opt for Infisical `v0.43.4`, you would input: `infisical/infisical:v0.43.4`.
|
||||
|
||||

|
||||
|
||||
<Note>
|
||||
Depending on your use-case and requirements, you may find it helpful to further configure your Azure container instance.
|
||||
|
||||
For example, you may want to adjust the **Region** option to specify which region to deploy the container for your
|
||||
instance of Infisical to minimize distance and therefore latency between the instance and your infrastructure.
|
||||
</Note>
|
||||
|
||||
1.3. In the **Networking** section, select the **Public** option under **Networking type**; this will make the container accessible over the public internet.
|
||||
|
||||
Next, under the **Ports** section, add an entry for port `8080` and protocol `TCP` since Infisical listens on port `8080`.
|
||||
|
||||

|
||||
|
||||
1.4. Running Infisical requires a few environment variables to be set for the Azure container instance.
|
||||
At minimum, Infisical requires that you set the variables `ENCRYPTION_KEY`, `AUTH_SECRET`, `MONGO_URL`, and `REDIS_URL`
|
||||
which you can read more about [here](/self-hosting/configuration/envars).
|
||||
|
||||
In the **Advanced** section, fill in the required environment variables.
|
||||
|
||||
<Note>
|
||||
To use more features like emailing and single sign-on, you can set additional configuration options [here](/self-hosting/configuration/envars).
|
||||
</Note>
|
||||
|
||||

|
||||
|
||||
1.5. Finally, in the **Review + create** section, double check the information from the previous steps and press **Create** to create the Azure container instance.
|
||||
|
||||

|
||||
</Step>
|
||||
<Step title="Navigate to your deployed instance of Infisical">
|
||||
Head to the **Overview** page of the newly-created container instance to view its **IP address (Public)**; you can access your instance of Infisical by this IP address under the port `:8080`.
|
||||
|
||||
For example, in the image below, the IP address of the sample deployed container instance is `4.255.87.109`; the instance would be accessible in the browser by heading to `4.255.87.109:8080`.
|
||||
|
||||

|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Do you have any recommendations for deploying Infisical with Azure Container Instances?">
|
||||
Yes, here are a few that come to mind:
|
||||
- In step 1.2, we recommend pinning the Docker image to a specific [version of Infisical](https://hub.docker.com/r/infisical/infisical/tags)
|
||||
instead of referring to the `latest` tag to avoid any unexpected version-to-version migration issues.
|
||||
- In step 1.2, we recommend selecting a **Region** option that is closest to your infrastructure/clients to reduce latency.
|
||||
- Enable TLS/SSL with Azure Container Instances. There are various options for doing so including [enabling a TLS endpoint in a sidecar container](https://learn.microsoft.com/en-us/azure/container-instances/container-instances-container-group-ssl), [enabling automatic HTTPS with Caddy in a sidecar container](https://learn.microsoft.com/en-us/azure/container-instances/container-instances-container-group-automatic-ssl), and using Azure Function Proxies, Application Gateway, etc.
|
||||
|
||||
We're working on putting together a fuller list of deployment best practices as well as minimum resource configuration requirements for running Infisical so stay tuned!
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
@ -1,27 +0,0 @@
|
||||
---
|
||||
title: "Digital Ocean"
|
||||
description: "Learn to install Infisical on Digital Ocean"
|
||||
---
|
||||
|
||||
Infisical can be deployed on a Kubernetes cluster with a single click through our Digital Ocean marketplace application.
|
||||
The initiation of the installation process triggers the creation of a Kubernetes cluster, followed by the installation of Infisical onto that cluster.
|
||||
|
||||
This automated deployment method uses the same process under the hood as the manual [Kubernetes installation guide](./kubernetes-helm).
|
||||
|
||||
### Initiate the installation
|
||||
|
||||
To start the process, click the following button and follow the instructions there.
|
||||
|
||||
<a href="https://docs.digitalocean.com/products/marketplace/catalog/infisical/">
|
||||
<img src="../../images/do-k8-install-btn.png" width="300"/>
|
||||
</a>
|
||||
|
||||
### Access Infisical Web
|
||||
Once the installation finishes, head to the `Networking` section via the sidebar and select `Load Balancers`.
|
||||
Within this section, you'll find the newly created load balancer for Infisical. You can access Infisical at the IP address allocated to that load balancer.
|
||||
|
||||
### Adjusting configurations
|
||||
If you need to either upgrade or downgrade Infisical, or modify environment variables to alter its functionality, refer to our [Kubernetes installation](./kubernetes-helm) page for detailed instructions.
|
||||
|
||||
Because Digital Ocean deploys the same Helm application as described in our [Kubernetes installation](./kubernetes-helm) guide, you can utilize that guide to implement the required changes.
|
||||
It's important to note that any modifications requires familiarly with Helm package manager.
|
@ -1,108 +0,0 @@
|
||||
---
|
||||
title: "Fly.io"
|
||||
description: "Deploy Infisical with Fly.io"
|
||||
---
|
||||
|
||||
Prerequisites:
|
||||
- Have an account with [Fly.io](https://fly.io/)
|
||||
- Have installed the [Fly.io CLI](https://fly.io/docs/hands-on/install-flyctl/)
|
||||
|
||||
<Steps>
|
||||
<Step title="Create an app with Fly.io">
|
||||
In your terminal, run the following command from the source directory of your project to create a new Fly.io app
|
||||
with a `fly.toml` configuration file:
|
||||
|
||||
```
|
||||
fly launch
|
||||
```
|
||||
</Step>
|
||||
<Step title="Edit the fly.toml configuration file">
|
||||
Add a **build** section to the `fly.toml` file to specify the [Infisical public Docker image](https://hub.docker.com/r/infisical/infisical):
|
||||
|
||||
```
|
||||
[build]
|
||||
image = "infisical/infisical:v0.43.4"
|
||||
```
|
||||
|
||||
Afterwards, your `fly.toml` file should look similar to:
|
||||
|
||||
```
|
||||
app = "infisical"
|
||||
primary_region = "lax"
|
||||
|
||||
[http_service]
|
||||
internal_port = 8080
|
||||
force_https = true
|
||||
auto_stop_machines = true
|
||||
auto_start_machines = true
|
||||
min_machines_running = 0
|
||||
processes = ["app"]
|
||||
|
||||
[[vm]]
|
||||
cpu_kind = "shared"
|
||||
cpus = 1
|
||||
memory_mb = 1024
|
||||
|
||||
[build]
|
||||
image = "infisical/infisical:v0.43.4"
|
||||
```
|
||||
|
||||
<Note>
|
||||
Depending on your use-case and requirements, you may find it helpful to further configure your `fly.toml` file
|
||||
with options [here](https://fly.io/docs/reference/configuration/).
|
||||
|
||||
For example, you may want to adjust the `primary-region` option to specify which [region](https://fly.io/docs/reference/regions/) to create the new machine for your
|
||||
instance of Infisical to minimize distance and therefore latency between the instance and your infrastructure.
|
||||
</Note>
|
||||
|
||||
</Step>
|
||||
<Step title="Set secrets for your Fly.io app">
|
||||
Running Infisical requires a few environment variables to be set on the Fly.io machine.
|
||||
At minimum, Infisical requires that you set the variables `ENCRYPTION_KEY`, `AUTH_SECRET`, `MONGO_URL`, and `REDIS_URL`
|
||||
which you can read more about [here](/self-hosting/configuration/envars).
|
||||
|
||||
For this step, we recommend setting the variables as Fly.io [app secrets](https://fly.io/docs/reference/secrets/) which
|
||||
are made available to the app as environment variables. You can set the variables either via the Fly.io CLI or project [dashboard](https://fly.io/dashboard).
|
||||
|
||||
<Tabs>
|
||||
<Tab title="CLI">
|
||||
Run the following command (with each `VALUE` replaced) in the source directory of your project to set the required variables:
|
||||
|
||||
```
|
||||
flyctl secrets set ENCRYPTION_KEY=VALUE AUTH_SECRET=VALUE MONGO_URL=VALUE REDIS_URL=VALUE...
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Dashboard">
|
||||
In Fly.io, head to your Project > Secrets and add the required variables.
|
||||
|
||||

|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Note>
|
||||
To use more features like emailing and single sign-on, you can set additional configuration options [here](/self-hosting/configuration/envars).
|
||||
</Note>
|
||||
</Step>
|
||||
<Step title="Deploy the Fly.io app">
|
||||
Finally, run the following command in the source directory of your project to deploy your Infisical instance on Fly.io
|
||||
with the updated `fly.toml` configuration file from step 2 and secrets from step 3:
|
||||
|
||||
```
|
||||
fly deploy
|
||||
```
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Do you have any recommendations for deploying Infisical with Fly.io?">
|
||||
Yes, here are a few that come to mind:
|
||||
- In step 2, we recommend pinning the Docker image to a specific [version of Infisical](https://hub.docker.com/r/infisical/infisical/tags)
|
||||
instead of referring to the `latest` tag to avoid any unexpected version-to-version migration issues.
|
||||
- In step 2, we recommend selecting a `primary_region` option that is closest to your infrastructure/clients to reduce latency; a full list of regions supported by Fly.io can be found [here](https://fly.io/docs/reference/regions/).
|
||||
|
||||
We're working on putting together a fuller list of deployment best practices as well as minimum resource configuration requirements for running Infisical so stay tuned!
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
Resources:
|
||||
- [Fly.io documentation](https://fly.io/docs/)
|
@ -1,67 +0,0 @@
|
||||
---
|
||||
title: "GCP Cloud Run"
|
||||
description: "Deploy Infisical with GCP Cloud Run"
|
||||
---
|
||||
|
||||
Prerequisites:
|
||||
- Have an account with [Google Cloud Platform (GCP)](https://cloud.google.com/)
|
||||
|
||||
<Steps>
|
||||
<Step title="Create a project in GCP">
|
||||
In GCP, create a new project and give it a friendly name like Infisical.
|
||||
|
||||

|
||||
|
||||

|
||||
</Step>
|
||||
<Step title="Create a service in GCP Cloud Run">
|
||||
2.1. Inside the GCP project, navigate to the **Cloud Run** product and create a new service.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
2.2. In the service creation form, select the **Deploy one revision from an existing container image** option and fill in your intended [Infisical public Docker image](https://hub.docker.com/r/infisical/infisical) in the container image URL.
|
||||
|
||||
For example, in order to opt for Infisical `v0.43.4`, you would input: `docker.io/infisical/infisical:v0.43.4`.
|
||||
|
||||

|
||||
|
||||
2.3. Running Infisical requires a few environment variables to be set for the GCP Cloud Run service.
|
||||
At minimum, Infisical requires that you set the variables `ENCRYPTION_KEY`, `AUTH_SECRET`, `MONGO_URL`, and `REDIS_URL`
|
||||
which you can read more about [here](/self-hosting/configuration/envars).
|
||||
|
||||
For this step, fill in the required environment variables in the Edit Container > Variables & Secrets > Environment variables section.
|
||||
|
||||
<Note>
|
||||
To use more features like emailing and single sign-on, you can set additional configuration options [here](/self-hosting/configuration/envars).
|
||||
</Note>
|
||||
|
||||

|
||||
|
||||
<Note>
|
||||
Depending on your use-case and requirements, you may find it helpful to further configure your GCP Cloud Run service.
|
||||
|
||||
For example, you may want to adjust the **Region** option to specify which region to deploy the underlying container for your
|
||||
instance of Infisical to minimize distance and therefore latency between the instance and your infrastructure.
|
||||
</Note>
|
||||
|
||||
Finally, press **Create** to finish setting up the GCP Cloud Run service.
|
||||
</Step>
|
||||
<Step title="Navigate to your deployed instance of Infisical">
|
||||
Head to the **Service details** of the newly-created service to view its URL; you can access your instance of Infisical by clicking on the URL.
|
||||
|
||||

|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Do you have any recommendations for deploying Infisical with GCP Cloud Run?">
|
||||
Yes, here are a few that come to mind:
|
||||
- In step 2, we recommend pinning the Docker image to a specific [version of Infisical](https://hub.docker.com/r/infisical/infisical/tags)
|
||||
instead of referring to the `latest` tag to avoid any unexpected version-to-version migration issues.
|
||||
- In step 2, we recommend selecting a **Region** option that is closest to your infrastructure/clients to reduce latency.
|
||||
|
||||
We're working on putting together a fuller list of deployment best practices as well as minimum resource configuration requirements for running Infisical so stay tuned!
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
@ -1,61 +0,0 @@
|
||||
---
|
||||
title: "Railway"
|
||||
description: "Deploy Infisical with Railway"
|
||||
---
|
||||
|
||||
Prerequisites:
|
||||
- Have an account with [Railway](https://railway.app/)
|
||||
|
||||
<Steps>
|
||||
<Step title="Deploy the Infisical template with Railway">
|
||||
1.1. In Railway, create a new project and select **Deploy a template > Infisical**.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
1.2. At minimum, Infisical requires that you set the variables `ENCRYPTION_KEY`, `AUTH_SECRET`, `MONGO_URL`, and `REDIS_URL`
|
||||
which you can read more about [here](/self-hosting/configuration/envars).
|
||||
|
||||
By default, the Infisical template on Railway pre-configures environment variables on each service in the deployment but requires you to supply two for the Redis and MongoDB services.
|
||||
|
||||
On the MongoDB service, supply a value for the `MONGO_INITDB_ROOT_PASSWORD` variable.
|
||||
|
||||

|
||||
|
||||
On the Redis service, supply a value for the `REDIS_PASSWORD` variable.
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
<Note>
|
||||
To use more features like emailing and single sign-on, you can set additional configuration options on the Infisical service [here](/self-hosting/configuration/envars).
|
||||
</Note>
|
||||
|
||||
Finally, press **Deploy** to create the project and deploy the services within it.
|
||||
|
||||

|
||||
|
||||

|
||||
</Step>
|
||||
<Step title="Navigate to your deployed instance of Infisical">
|
||||
Head to the newly-created Infisical service to view its URL under Networking > Public Networking; you can access your instance of Infisical by clicking on the URL.
|
||||
|
||||

|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Do you have any recommendations for deploying Infisical with Railway?">
|
||||
Yes, here are a few that come to mind:
|
||||
- While the Infisical template on Railway uses the `latest` tag to get the latest version of Infisical, we recommend creating a Railway deployment that pins the Docker image to a specific [version of Infisical](https://hub.docker.com/r/infisical/infisical/tags) to avoid any unexpected version-to-version migration issues.
|
||||
- We recommend selecting **Deployment region** options for your Railway service deployments to be closest to your infrastructure/clients to reduce latency.
|
||||
|
||||
We're working on putting together a fuller list of deployment best practices as well as minimum resource configuration requirements for running Infisical so stay tuned!
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
@ -1,21 +0,0 @@
|
||||
---
|
||||
title: "Render.com"
|
||||
description: "Learn to install Infisical Render.com"
|
||||
---
|
||||
|
||||
**Prerequisites**
|
||||
- An account at Render.com
|
||||
- A document DB instance
|
||||
|
||||
Deploying on Render is one of the quickest ways to have Infisical running in production.
|
||||
Before you start deployment, you will need to obtain document db connection string. This will be used for `MONGO_URL` environment variable required during installation.
|
||||
|
||||
You can create a document db database using services such as [MongoDB](https://www.mongodb.com/), [AWS DocumentDB](https://aws.amazon.com/documentdb/), and others. Once done, click the link below to start deployment.
|
||||
|
||||
### **[Deploy to Render](https://render.com/deploy?repo=https://github.com/Infisical/infisical)**
|
||||
|
||||
#
|
||||
|
||||
<Info>
|
||||
Once installation is complete, you will have to create the first account. No default account is provided.
|
||||
</Info>
|
70
docs/self-hosting/reference-architectures/on-premise.mdx
Normal file
70
docs/self-hosting/reference-architectures/on-premise.mdx
Normal file
@ -0,0 +1,70 @@
|
||||
---
|
||||
title: "On-premise"
|
||||
description: "Reference architecture for self-hosting Infisical on premise"
|
||||
---
|
||||
|
||||
Deploying Infisical on-premise with high availability requires deep knowledge in areas like networking, container orchestration, and database management.
|
||||
This guide presents a reference architecture that outlines how to achieve such a deployment effectively.
|
||||
For organizations that do not have the necessary resources or expertise, we recommend opting for managed, dedicated Infisical instances or engaging professional services to mitigate the complexities.
|
||||
|
||||
## System Overview
|
||||

|
||||
|
||||
The architecture above utilizes a combination of Kubernetes for orchestrating stateless components and virtual machines (VMs) or bare metal for stateful components.
|
||||
The infrastructure spans multiple data centers for redundancy and load distribution, enhancing availability and disaster recovery capabilities.
|
||||
You may duplicate the architecture in multiple data centers and join them via Consul to increase availability. This way, if one data center is out of order, active data centers will take over workloads.
|
||||
|
||||
### Stateful vs stateless workloads
|
||||
|
||||
To reduce the challenges of managing state within Kubernetes, including storage provisioning, persistent volume management, and intricate data backup and recovery processes, we strongly recommend deploying stateful components on Virtual Machines (VMs) or bare metal.
|
||||
As depicted in the architecture, Infisical is intentionally deployed on Kubernetes to leverage its strengths in managing stateless applications.
|
||||
Being stateless, Infisical fully benefits from Kubernetes' features like horizontal scaling, self-healing, and rolling updates and rollbacks.
|
||||
|
||||
## Core Components
|
||||
|
||||
### Kubernetes Cluster
|
||||
Infisical is deployed on a Kubernetes cluster, which allows for container management, auto-scaling, and self-healing capabilities.
|
||||
A load balancer sits in front of the Kubernetes cluster, directing traffic and ensuring even load distribution across the application nodes.
|
||||
This is the entry point where all other services will interact with Infisical.
|
||||
|
||||
|
||||
### Consul as the Networking Backbone
|
||||
Consul is an critical component in the reference architecture, serving as a unified service networking layer that links and controls services across different environments and data centers.
|
||||
It functions as the common communication channel between data centers for stateless applications on Kubernetes and stateful services such as databases on dedicated VMs or bare metal.
|
||||
|
||||
|
||||
### Postgres with Patroni
|
||||
The database layer is powered by Postgres, with [Patroni](https://patroni.readthedocs.io/en/latest/) providing automated management to create a high availability setup. Patroni leverages Consul for several critical operations:
|
||||
|
||||
- **Redundancy:** By managing a cluster of one primary and multiple secondary Postgres nodes, the architecture ensures redundancy.
|
||||
The primary node handles all the write operations, and secondary nodes handle read operations and are prepared to step up in case of primary failure.
|
||||
|
||||
- **Failover and Service Discovery:** Consul is integrated with Patroni for service discovery and health checks.
|
||||
When Patroni detects that the primary node is unhealthy, it uses Consul to elect a new primary node from the secondaries, thereby ensuring that the database service remains available.
|
||||
|
||||
- **Data Center Awareness:** Patroni configured with Consul is aware of the multi-data center setup and can handle failover across data centers if necessary, which further enhances the system's availability.
|
||||
|
||||
### Redis with Redis Sentinel
|
||||
For caching and message brokering:
|
||||
|
||||
- Redis is deployed with a primary-replica setup.
|
||||
- Redis Sentinel monitors the Redis nodes, providing automatic failover and service discovery.
|
||||
- Write operations go to the primary node, and replicas serve read operations, ensuring data integrity and availability.
|
||||
|
||||
## Multi data center deployment
|
||||
Infisical can be deployed across a number of data centers to both increase performance and resiliency to disaster scenarios.
|
||||
For mission critical deployment of Infisical, we recommend deploying Infisical on at least 3 data centers to reduce downtime in the event of complete data center malfunction.
|
||||
|
||||
### Data Center A
|
||||
Data Center A houses the primary nodes of both Postgres and Redis, which handle all write operations. The secondary nodes and replicas serve as hot standbys for failover. Consul servers maintain the state of the cluster, elect a leader, and facilitate service discovery.
|
||||
|
||||
### $n^{th}$ data center
|
||||
The $n^{th}$ data center acts as a performance and disaster recovery site, featuring a mesh gateway that enables cross-data center service discovery and configuration. It houses additional secondary nodes for Postgres and Redis replicas, which are ready to be promoted in case the primary data center fails. Additionally, this data center can reduce the latency of applications that need to interact with Infisical, particularly if those applications or services are geographically closer to this data center.
|
||||
|
||||
## Considerations
|
||||
|
||||
The complexity of an on-premise deployment scales with the level of availability required. This reference architecture provides a robust framework for organizations aiming for high availability and disaster resilience. However, it's important to recognize that this is not a one-size-fits-all solution.
|
||||
|
||||
Organizations with less stringent Recovery Time Objectives (RTO) might find that [simpler deployments methods](/self-hosting/deployment-options/docker-compose) using tools such as Docker Compose are adequate. Such setups can still provide a reasonable level of service continuity without the complexities involved in managing a multi-data center environment with Kubernetes, Consul, and other high-availability components.
|
||||
|
||||
Ultimately, the choice of architecture should be guided by a thorough analysis of business needs, available resources, and expertise.
|
Reference in New Issue
Block a user