mirror of
https://github.com/Infisical/infisical.git
synced 2025-04-17 19:37:38 +00:00
Compare commits
24 Commits
docker-swa
...
infisical/
Author | SHA1 | Date | |
---|---|---|---|
9992fbf3dd | |||
3ca596d4af | |||
1c95b3abe7 | |||
1f3c72b997 | |||
e55b981cea | |||
49d4e67e07 | |||
a54d156bf0 | |||
f3fc898232 | |||
c61602370e | |||
5178663797 | |||
f04f3aee25 | |||
e5333e2718 | |||
f27d9f8cee | |||
cbd568b714 | |||
b330c5570d | |||
d222bbf131 | |||
961c6391a8 | |||
d68d7df0f8 | |||
c44c7810ce | |||
b7893a6a72 | |||
7a3d425b0e | |||
bd570bd02f | |||
b94ffb8a82 | |||
2d51445dd9 |
@ -1,6 +1,8 @@
|
||||
name: Build and release CLI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
push:
|
||||
# run only against tags
|
||||
tags:
|
||||
@ -14,6 +16,12 @@ jobs:
|
||||
cli-integration-tests:
|
||||
name: Run tests before deployment
|
||||
uses: ./.github/workflows/run-cli-tests.yml
|
||||
secrets:
|
||||
CLI_TESTS_UA_CLIENT_ID: ${{ secrets.CLI_TESTS_UA_CLIENT_ID }}
|
||||
CLI_TESTS_UA_CLIENT_SECRET: ${{ secrets.CLI_TESTS_UA_CLIENT_SECRET }}
|
||||
CLI_TESTS_SERVICE_TOKEN: ${{ secrets.CLI_TESTS_SERVICE_TOKEN }}
|
||||
CLI_TESTS_PROJECT_ID: ${{ secrets.CLI_TESTS_PROJECT_ID }}
|
||||
CLI_TESTS_ENV_SLUG: ${{ secrets.CLI_TESTS_ENV_SLUG }}
|
||||
|
||||
goreleaser:
|
||||
runs-on: ubuntu-20.04
|
||||
|
13
.github/workflows/run-cli-tests.yml
vendored
13
.github/workflows/run-cli-tests.yml
vendored
@ -6,7 +6,20 @@ on:
|
||||
paths:
|
||||
- "cli/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
workflow_call:
|
||||
secrets:
|
||||
CLI_TESTS_UA_CLIENT_ID:
|
||||
required: true
|
||||
CLI_TESTS_UA_CLIENT_SECRET:
|
||||
required: true
|
||||
CLI_TESTS_SERVICE_TOKEN:
|
||||
required: true
|
||||
CLI_TESTS_PROJECT_ID:
|
||||
required: true
|
||||
CLI_TESTS_ENV_SLUG:
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
|
117
backend/package-lock.json
generated
117
backend/package-lock.json
generated
@ -45,6 +45,7 @@
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"jsrp": "^0.2.4",
|
||||
"knex": "^3.0.1",
|
||||
"ldapjs": "^3.0.7",
|
||||
"libsodium-wrappers": "^0.7.13",
|
||||
"lodash.isequal": "^4.5.0",
|
||||
"ms": "^2.1.3",
|
||||
@ -2510,6 +2511,83 @@
|
||||
"@jridgewell/sourcemap-codec": "^1.4.10"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/asn1": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/asn1/-/asn1-2.0.0.tgz",
|
||||
"integrity": "sha512-G9+DkEOirNgdPmD0I8nu57ygQJKOOgFEMKknEuQvIHbGLwP3ny1mY+OTUYLCbCaGJP4sox5eYgBJRuSUpnAddA=="
|
||||
},
|
||||
"node_modules/@ldapjs/attribute": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/attribute/-/attribute-1.0.0.tgz",
|
||||
"integrity": "sha512-ptMl2d/5xJ0q+RgmnqOi3Zgwk/TMJYG7dYMC0Keko+yZU6n+oFM59MjQOUht5pxJeS4FWrImhu/LebX24vJNRQ==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "2.0.0",
|
||||
"@ldapjs/protocol": "^1.2.1",
|
||||
"process-warning": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/change": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/change/-/change-1.0.0.tgz",
|
||||
"integrity": "sha512-EOQNFH1RIku3M1s0OAJOzGfAohuFYXFY4s73wOhRm4KFGhmQQ7MChOh2YtYu9Kwgvuq1B0xKciXVzHCGkB5V+Q==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "2.0.0",
|
||||
"@ldapjs/attribute": "1.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/controls": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/controls/-/controls-2.1.0.tgz",
|
||||
"integrity": "sha512-2pFdD1yRC9V9hXfAWvCCO2RRWK9OdIEcJIos/9cCVP9O4k72BY1bLDQQ4KpUoJnl4y/JoD4iFgM+YWT3IfITWw==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "^1.2.0",
|
||||
"@ldapjs/protocol": "^1.2.1"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/controls/node_modules/@ldapjs/asn1": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/asn1/-/asn1-1.2.0.tgz",
|
||||
"integrity": "sha512-KX/qQJ2xxzvO2/WOvr1UdQ+8P5dVvuOLk/C9b1bIkXxZss8BaR28njXdPgFCpj5aHaf1t8PmuVnea+N9YG9YMw=="
|
||||
},
|
||||
"node_modules/@ldapjs/dn": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/dn/-/dn-1.1.0.tgz",
|
||||
"integrity": "sha512-R72zH5ZeBj/Fujf/yBu78YzpJjJXG46YHFo5E4W1EqfNpo1UsVPqdLrRMXeKIsJT3x9dJVIfR6OpzgINlKpi0A==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "2.0.0",
|
||||
"process-warning": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/filter": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/filter/-/filter-2.1.1.tgz",
|
||||
"integrity": "sha512-TwPK5eEgNdUO1ABPBUQabcZ+h9heDORE4V9WNZqCtYLKc06+6+UAJ3IAbr0L0bYTnkkWC/JEQD2F+zAFsuikNw==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "2.0.0",
|
||||
"@ldapjs/protocol": "^1.2.1",
|
||||
"process-warning": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/messages": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/messages/-/messages-1.3.0.tgz",
|
||||
"integrity": "sha512-K7xZpXJ21bj92jS35wtRbdcNrwmxAtPwy4myeh9duy/eR3xQKvikVycbdWVzkYEAVE5Ce520VXNOwCHjomjCZw==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "^2.0.0",
|
||||
"@ldapjs/attribute": "^1.0.0",
|
||||
"@ldapjs/change": "^1.0.0",
|
||||
"@ldapjs/controls": "^2.1.0",
|
||||
"@ldapjs/dn": "^1.1.0",
|
||||
"@ldapjs/filter": "^2.1.1",
|
||||
"@ldapjs/protocol": "^1.2.1",
|
||||
"process-warning": "^2.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@ldapjs/protocol": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@ldapjs/protocol/-/protocol-1.2.1.tgz",
|
||||
"integrity": "sha512-O89xFDLW2gBoZWNXuXpBSM32/KealKCTb3JGtJdtUQc7RjAk8XzrRgyz02cPAwGKwKPxy0ivuC7UP9bmN87egQ=="
|
||||
},
|
||||
"node_modules/@lukeed/ms": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/@lukeed/ms/-/ms-2.0.1.tgz",
|
||||
@ -9304,15 +9382,7 @@
|
||||
"node": ">=0.8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ldapauth-fork/node_modules/lru-cache": {
|
||||
"version": "7.18.3",
|
||||
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
|
||||
"integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/ldapjs": {
|
||||
"node_modules/ldapauth-fork/node_modules/ldapjs": {
|
||||
"version": "2.3.3",
|
||||
"resolved": "https://registry.npmjs.org/ldapjs/-/ldapjs-2.3.3.tgz",
|
||||
"integrity": "sha512-75QiiLJV/PQqtpH+HGls44dXweviFwQ6SiIK27EqzKQ5jU/7UFrl2E5nLdQ3IYRBzJ/AVFJI66u0MZ0uofKYwg==",
|
||||
@ -9330,6 +9400,35 @@
|
||||
"node": ">=10.13.0"
|
||||
}
|
||||
},
|
||||
"node_modules/ldapauth-fork/node_modules/lru-cache": {
|
||||
"version": "7.18.3",
|
||||
"resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-7.18.3.tgz",
|
||||
"integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==",
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/ldapjs": {
|
||||
"version": "3.0.7",
|
||||
"resolved": "https://registry.npmjs.org/ldapjs/-/ldapjs-3.0.7.tgz",
|
||||
"integrity": "sha512-1ky+WrN+4CFMuoekUOv7Y1037XWdjKpu0xAPwSP+9KdvmV9PG+qOKlssDV6a+U32apwxdD3is/BZcWOYzN30cg==",
|
||||
"dependencies": {
|
||||
"@ldapjs/asn1": "^2.0.0",
|
||||
"@ldapjs/attribute": "^1.0.0",
|
||||
"@ldapjs/change": "^1.0.0",
|
||||
"@ldapjs/controls": "^2.1.0",
|
||||
"@ldapjs/dn": "^1.1.0",
|
||||
"@ldapjs/filter": "^2.1.1",
|
||||
"@ldapjs/messages": "^1.3.0",
|
||||
"@ldapjs/protocol": "^1.2.1",
|
||||
"abstract-logging": "^2.0.1",
|
||||
"assert-plus": "^1.0.0",
|
||||
"backoff": "^2.5.0",
|
||||
"once": "^1.4.0",
|
||||
"vasync": "^2.2.1",
|
||||
"verror": "^1.10.1"
|
||||
}
|
||||
},
|
||||
"node_modules/leven": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/leven/-/leven-2.1.0.tgz",
|
||||
|
@ -106,6 +106,7 @@
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"jsrp": "^0.2.4",
|
||||
"knex": "^3.0.1",
|
||||
"ldapjs": "^3.0.7",
|
||||
"libsodium-wrappers": "^0.7.13",
|
||||
"lodash.isequal": "^4.5.0",
|
||||
"ms": "^2.1.3",
|
||||
|
4
backend/src/@types/knex.d.ts
vendored
4
backend/src/@types/knex.d.ts
vendored
@ -74,6 +74,9 @@ import {
|
||||
TLdapConfigs,
|
||||
TLdapConfigsInsert,
|
||||
TLdapConfigsUpdate,
|
||||
TLdapGroupMaps,
|
||||
TLdapGroupMapsInsert,
|
||||
TLdapGroupMapsUpdate,
|
||||
TOrganizations,
|
||||
TOrganizationsInsert,
|
||||
TOrganizationsUpdate,
|
||||
@ -398,6 +401,7 @@ declare module "knex/types/tables" {
|
||||
>;
|
||||
[TableName.SamlConfig]: Knex.CompositeTableType<TSamlConfigs, TSamlConfigsInsert, TSamlConfigsUpdate>;
|
||||
[TableName.LdapConfig]: Knex.CompositeTableType<TLdapConfigs, TLdapConfigsInsert, TLdapConfigsUpdate>;
|
||||
[TableName.LdapGroupMap]: Knex.CompositeTableType<TLdapGroupMaps, TLdapGroupMapsInsert, TLdapGroupMapsUpdate>;
|
||||
[TableName.OrgBot]: Knex.CompositeTableType<TOrgBots, TOrgBotsInsert, TOrgBotsUpdate>;
|
||||
[TableName.AuditLog]: Knex.CompositeTableType<TAuditLogs, TAuditLogsInsert, TAuditLogsUpdate>;
|
||||
[TableName.GitAppInstallSession]: Knex.CompositeTableType<
|
||||
|
@ -0,0 +1,34 @@
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "../schemas";
|
||||
import { createOnUpdateTrigger, dropOnUpdateTrigger } from "../utils";
|
||||
|
||||
export async function up(knex: Knex): Promise<void> {
|
||||
if (!(await knex.schema.hasTable(TableName.LdapGroupMap))) {
|
||||
await knex.schema.createTable(TableName.LdapGroupMap, (t) => {
|
||||
t.uuid("id", { primaryKey: true }).defaultTo(knex.fn.uuid());
|
||||
t.uuid("ldapConfigId").notNullable();
|
||||
t.foreign("ldapConfigId").references("id").inTable(TableName.LdapConfig).onDelete("CASCADE");
|
||||
t.string("ldapGroupCN").notNullable();
|
||||
t.uuid("groupId").notNullable();
|
||||
t.foreign("groupId").references("id").inTable(TableName.Groups).onDelete("CASCADE");
|
||||
t.unique(["ldapGroupCN", "groupId", "ldapConfigId"]);
|
||||
});
|
||||
}
|
||||
|
||||
await createOnUpdateTrigger(knex, TableName.LdapGroupMap);
|
||||
|
||||
await knex.schema.alterTable(TableName.LdapConfig, (t) => {
|
||||
t.string("groupSearchBase").notNullable().defaultTo("");
|
||||
t.string("groupSearchFilter").notNullable().defaultTo("");
|
||||
});
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
await knex.schema.dropTableIfExists(TableName.LdapGroupMap);
|
||||
await dropOnUpdateTrigger(knex, TableName.LdapGroupMap);
|
||||
await knex.schema.alterTable(TableName.LdapConfig, (t) => {
|
||||
t.dropColumn("groupSearchBase");
|
||||
t.dropColumn("groupSearchFilter");
|
||||
});
|
||||
}
|
@ -22,6 +22,7 @@ export * from "./incident-contacts";
|
||||
export * from "./integration-auths";
|
||||
export * from "./integrations";
|
||||
export * from "./ldap-configs";
|
||||
export * from "./ldap-group-maps";
|
||||
export * from "./models";
|
||||
export * from "./org-bots";
|
||||
export * from "./org-memberships";
|
||||
|
@ -23,7 +23,9 @@ export const LdapConfigsSchema = z.object({
|
||||
caCertIV: z.string(),
|
||||
caCertTag: z.string(),
|
||||
createdAt: z.date(),
|
||||
updatedAt: z.date()
|
||||
updatedAt: z.date(),
|
||||
groupSearchBase: z.string().default(""),
|
||||
groupSearchFilter: z.string().default("")
|
||||
});
|
||||
|
||||
export type TLdapConfigs = z.infer<typeof LdapConfigsSchema>;
|
||||
|
19
backend/src/db/schemas/ldap-group-maps.ts
Normal file
19
backend/src/db/schemas/ldap-group-maps.ts
Normal file
@ -0,0 +1,19 @@
|
||||
// Code generated by automation script, DO NOT EDIT.
|
||||
// Automated by pulling database and generating zod schema
|
||||
// To update. Just run npm run generate:schema
|
||||
// Written by akhilmhdh.
|
||||
|
||||
import { z } from "zod";
|
||||
|
||||
import { TImmutableDBKeys } from "./models";
|
||||
|
||||
export const LdapGroupMapsSchema = z.object({
|
||||
id: z.string().uuid(),
|
||||
ldapConfigId: z.string().uuid(),
|
||||
ldapGroupCN: z.string(),
|
||||
groupId: z.string().uuid()
|
||||
});
|
||||
|
||||
export type TLdapGroupMaps = z.infer<typeof LdapGroupMapsSchema>;
|
||||
export type TLdapGroupMapsInsert = Omit<z.input<typeof LdapGroupMapsSchema>, TImmutableDBKeys>;
|
||||
export type TLdapGroupMapsUpdate = Partial<Omit<z.input<typeof LdapGroupMapsSchema>, TImmutableDBKeys>>;
|
@ -60,6 +60,7 @@ export enum TableName {
|
||||
SecretRotationOutput = "secret_rotation_outputs",
|
||||
SamlConfig = "saml_configs",
|
||||
LdapConfig = "ldap_configs",
|
||||
LdapGroupMap = "ldap_group_maps",
|
||||
AuditLog = "audit_logs",
|
||||
GitAppInstallSession = "git_app_install_sessions",
|
||||
GitAppOrg = "git_app_org",
|
||||
|
@ -14,7 +14,9 @@ import { FastifyRequest } from "fastify";
|
||||
import LdapStrategy from "passport-ldapauth";
|
||||
import { z } from "zod";
|
||||
|
||||
import { LdapConfigsSchema } from "@app/db/schemas";
|
||||
import { LdapConfigsSchema, LdapGroupMapsSchema } from "@app/db/schemas";
|
||||
import { TLDAPConfig } from "@app/ee/services/ldap-config/ldap-config-types";
|
||||
import { searchGroups } from "@app/ee/services/ldap-config/ldap-fns";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { logger } from "@app/lib/logger";
|
||||
import { readLimit, writeLimit } from "@app/server/config/rateLimiter";
|
||||
@ -50,20 +52,33 @@ export const registerLdapRouter = async (server: FastifyZodProvider) => {
|
||||
// eslint-disable-next-line
|
||||
async (req: IncomingMessage, user, cb) => {
|
||||
try {
|
||||
const ldapConfig = (req as unknown as FastifyRequest).ldapConfig as TLDAPConfig;
|
||||
|
||||
const groupFilter = "(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))";
|
||||
const searchFilter =
|
||||
ldapConfig.groupSearchFilter ||
|
||||
groupFilter.replace("{{.Username}}", user.uid).replace("{{.UserDN}}", user.dn);
|
||||
|
||||
const shouldProcessGroups = ldapConfig.groupSearchFilter && ldapConfig.groupSearchBase;
|
||||
|
||||
const { isUserCompleted, providerAuthToken } = await server.services.ldap.ldapLogin({
|
||||
ldapConfigId: ldapConfig.id,
|
||||
externalId: user.uidNumber,
|
||||
username: user.uid,
|
||||
firstName: user.givenName,
|
||||
lastName: user.sn,
|
||||
firstName: user.givenName ?? user.cn ?? "",
|
||||
lastName: user.sn ?? "",
|
||||
emails: user.mail ? [user.mail] : [],
|
||||
groups: shouldProcessGroups
|
||||
? await searchGroups(ldapConfig, searchFilter, ldapConfig.groupSearchBase)
|
||||
: undefined,
|
||||
relayState: ((req as unknown as FastifyRequest).body as { RelayState?: string }).RelayState,
|
||||
orgId: (req as unknown as FastifyRequest).ldapConfig.organization
|
||||
});
|
||||
|
||||
return cb(null, { isUserCompleted, providerAuthToken });
|
||||
} catch (err) {
|
||||
logger.error(err);
|
||||
return cb(err, false);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return cb(error, false);
|
||||
}
|
||||
}
|
||||
)
|
||||
@ -117,6 +132,8 @@ export const registerLdapRouter = async (server: FastifyZodProvider) => {
|
||||
bindDN: z.string(),
|
||||
bindPass: z.string(),
|
||||
searchBase: z.string(),
|
||||
groupSearchBase: z.string(),
|
||||
groupSearchFilter: z.string(),
|
||||
caCert: z.string()
|
||||
})
|
||||
}
|
||||
@ -148,6 +165,8 @@ export const registerLdapRouter = async (server: FastifyZodProvider) => {
|
||||
bindDN: z.string().trim(),
|
||||
bindPass: z.string().trim(),
|
||||
searchBase: z.string().trim(),
|
||||
groupSearchBase: z.string().trim().default(""),
|
||||
groupSearchFilter: z.string().trim().default(""),
|
||||
caCert: z.string().trim().default("")
|
||||
}),
|
||||
response: {
|
||||
@ -183,6 +202,8 @@ export const registerLdapRouter = async (server: FastifyZodProvider) => {
|
||||
bindDN: z.string().trim(),
|
||||
bindPass: z.string().trim(),
|
||||
searchBase: z.string().trim(),
|
||||
groupSearchBase: z.string().trim(),
|
||||
groupSearchFilter: z.string().trim(),
|
||||
caCert: z.string().trim()
|
||||
})
|
||||
.partial()
|
||||
@ -204,4 +225,106 @@ export const registerLdapRouter = async (server: FastifyZodProvider) => {
|
||||
return ldap;
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "GET",
|
||||
url: "/config/:configId/group-maps",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT]),
|
||||
schema: {
|
||||
params: z.object({
|
||||
configId: z.string().trim()
|
||||
}),
|
||||
response: {
|
||||
200: z.array(
|
||||
z.object({
|
||||
id: z.string(),
|
||||
ldapConfigId: z.string(),
|
||||
ldapGroupCN: z.string(),
|
||||
group: z.object({
|
||||
id: z.string(),
|
||||
name: z.string(),
|
||||
slug: z.string()
|
||||
})
|
||||
})
|
||||
)
|
||||
}
|
||||
},
|
||||
handler: async (req) => {
|
||||
const ldapGroupMaps = await server.services.ldap.getLdapGroupMaps({
|
||||
actor: req.permission.type,
|
||||
actorId: req.permission.id,
|
||||
orgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
actorOrgId: req.permission.orgId,
|
||||
ldapConfigId: req.params.configId
|
||||
});
|
||||
return ldapGroupMaps;
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "POST",
|
||||
url: "/config/:configId/group-maps",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT]),
|
||||
schema: {
|
||||
params: z.object({
|
||||
configId: z.string().trim()
|
||||
}),
|
||||
body: z.object({
|
||||
ldapGroupCN: z.string().trim(),
|
||||
groupSlug: z.string().trim()
|
||||
}),
|
||||
response: {
|
||||
200: LdapGroupMapsSchema
|
||||
}
|
||||
},
|
||||
handler: async (req) => {
|
||||
const ldapGroupMap = await server.services.ldap.createLdapGroupMap({
|
||||
actor: req.permission.type,
|
||||
actorId: req.permission.id,
|
||||
orgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
actorOrgId: req.permission.orgId,
|
||||
ldapConfigId: req.params.configId,
|
||||
...req.body
|
||||
});
|
||||
return ldapGroupMap;
|
||||
}
|
||||
});
|
||||
|
||||
server.route({
|
||||
method: "DELETE",
|
||||
url: "/config/:configId/group-maps/:groupMapId",
|
||||
config: {
|
||||
rateLimit: readLimit
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT]),
|
||||
schema: {
|
||||
params: z.object({
|
||||
configId: z.string().trim(),
|
||||
groupMapId: z.string().trim()
|
||||
}),
|
||||
response: {
|
||||
200: LdapGroupMapsSchema
|
||||
}
|
||||
},
|
||||
handler: async (req) => {
|
||||
const ldapGroupMap = await server.services.ldap.deleteLdapGroupMap({
|
||||
actor: req.permission.type,
|
||||
actorId: req.permission.id,
|
||||
orgId: req.permission.orgId,
|
||||
actorAuthMethod: req.permission.authMethod,
|
||||
actorOrgId: req.permission.orgId,
|
||||
ldapConfigId: req.params.configId,
|
||||
ldapGroupMapId: req.params.groupMapId
|
||||
});
|
||||
return ldapGroupMap;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
@ -22,10 +22,6 @@ const addAcceptedUsersToGroup = async ({
|
||||
projectBotDAL,
|
||||
tx
|
||||
}: TAddUsersToGroup) => {
|
||||
console.log("addAcceptedUsersToGroup args: ", {
|
||||
userIds,
|
||||
group
|
||||
});
|
||||
const users = await userDAL.findUserEncKeyByUserIdsBatch(
|
||||
{
|
||||
userIds
|
||||
|
@ -2,6 +2,9 @@ import { ForbiddenError } from "@casl/ability";
|
||||
import jwt from "jsonwebtoken";
|
||||
|
||||
import { OrgMembershipRole, OrgMembershipStatus, SecretKeyEncoding, TLdapConfigsUpdate } from "@app/db/schemas";
|
||||
import { TGroupDALFactory } from "@app/ee/services/group/group-dal";
|
||||
import { addUsersToGroupByUserIds, removeUsersFromGroupByUserIds } from "@app/ee/services/group/group-fns";
|
||||
import { TUserGroupMembershipDALFactory } from "@app/ee/services/group/user-group-membership-dal";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import {
|
||||
decryptSymmetric,
|
||||
@ -13,8 +16,12 @@ import {
|
||||
} from "@app/lib/crypto/encryption";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { AuthMethod, AuthTokenType } from "@app/services/auth/auth-type";
|
||||
import { TGroupProjectDALFactory } from "@app/services/group-project/group-project-dal";
|
||||
import { TOrgBotDALFactory } from "@app/services/org/org-bot-dal";
|
||||
import { TOrgDALFactory } from "@app/services/org/org-dal";
|
||||
import { TProjectDALFactory } from "@app/services/project/project-dal";
|
||||
import { TProjectBotDALFactory } from "@app/services/project-bot/project-bot-dal";
|
||||
import { TProjectKeyDALFactory } from "@app/services/project-key/project-key-dal";
|
||||
import { TUserDALFactory } from "@app/services/user/user-dal";
|
||||
import { normalizeUsername } from "@app/services/user/user-fns";
|
||||
import { TUserAliasDALFactory } from "@app/services/user-alias/user-alias-dal";
|
||||
@ -23,16 +30,38 @@ import { TLicenseServiceFactory } from "../license/license-service";
|
||||
import { OrgPermissionActions, OrgPermissionSubjects } from "../permission/org-permission";
|
||||
import { TPermissionServiceFactory } from "../permission/permission-service";
|
||||
import { TLdapConfigDALFactory } from "./ldap-config-dal";
|
||||
import { TCreateLdapCfgDTO, TGetLdapCfgDTO, TLdapLoginDTO, TUpdateLdapCfgDTO } from "./ldap-config-types";
|
||||
import {
|
||||
TCreateLdapCfgDTO,
|
||||
TCreateLdapGroupMapDTO,
|
||||
TDeleteLdapGroupMapDTO,
|
||||
TGetLdapCfgDTO,
|
||||
TGetLdapGroupMapsDTO,
|
||||
TLdapLoginDTO,
|
||||
TUpdateLdapCfgDTO
|
||||
} from "./ldap-config-types";
|
||||
import { TLdapGroupMapDALFactory } from "./ldap-group-map-dal";
|
||||
|
||||
type TLdapConfigServiceFactoryDep = {
|
||||
ldapConfigDAL: TLdapConfigDALFactory;
|
||||
ldapConfigDAL: Pick<TLdapConfigDALFactory, "create" | "update" | "findOne">;
|
||||
ldapGroupMapDAL: Pick<TLdapGroupMapDALFactory, "find" | "create" | "delete" | "findLdapGroupMapsByLdapConfigId">;
|
||||
orgDAL: Pick<
|
||||
TOrgDALFactory,
|
||||
"createMembership" | "updateMembershipById" | "findMembership" | "findOrgById" | "findOne" | "updateById"
|
||||
>;
|
||||
orgBotDAL: Pick<TOrgBotDALFactory, "findOne" | "create" | "transaction">;
|
||||
userDAL: Pick<TUserDALFactory, "create" | "findOne" | "transaction" | "updateById">;
|
||||
groupDAL: Pick<TGroupDALFactory, "find" | "findOne">;
|
||||
groupProjectDAL: Pick<TGroupProjectDALFactory, "find">;
|
||||
projectKeyDAL: Pick<TProjectKeyDALFactory, "find" | "findLatestProjectKey" | "insertMany" | "delete">;
|
||||
projectDAL: Pick<TProjectDALFactory, "findProjectGhostUser">;
|
||||
projectBotDAL: Pick<TProjectBotDALFactory, "findOne">;
|
||||
userGroupMembershipDAL: Pick<
|
||||
TUserGroupMembershipDALFactory,
|
||||
"find" | "transaction" | "insertMany" | "filterProjectsByUserMembership" | "delete"
|
||||
>;
|
||||
userDAL: Pick<
|
||||
TUserDALFactory,
|
||||
"create" | "findOne" | "transaction" | "updateById" | "findUserEncKeyByUserIdsBatch" | "find"
|
||||
>;
|
||||
userAliasDAL: Pick<TUserAliasDALFactory, "create" | "findOne">;
|
||||
permissionService: Pick<TPermissionServiceFactory, "getOrgPermission">;
|
||||
licenseService: Pick<TLicenseServiceFactory, "getPlan">;
|
||||
@ -42,8 +71,15 @@ export type TLdapConfigServiceFactory = ReturnType<typeof ldapConfigServiceFacto
|
||||
|
||||
export const ldapConfigServiceFactory = ({
|
||||
ldapConfigDAL,
|
||||
ldapGroupMapDAL,
|
||||
orgDAL,
|
||||
orgBotDAL,
|
||||
groupDAL,
|
||||
groupProjectDAL,
|
||||
projectKeyDAL,
|
||||
projectDAL,
|
||||
projectBotDAL,
|
||||
userGroupMembershipDAL,
|
||||
userDAL,
|
||||
userAliasDAL,
|
||||
permissionService,
|
||||
@ -60,6 +96,8 @@ export const ldapConfigServiceFactory = ({
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
caCert
|
||||
}: TCreateLdapCfgDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
@ -135,6 +173,8 @@ export const ldapConfigServiceFactory = ({
|
||||
bindPassIV,
|
||||
bindPassTag,
|
||||
searchBase,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
encryptedCACert,
|
||||
caCertIV,
|
||||
caCertTag
|
||||
@ -154,6 +194,8 @@ export const ldapConfigServiceFactory = ({
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
caCert
|
||||
}: TUpdateLdapCfgDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
@ -169,7 +211,9 @@ export const ldapConfigServiceFactory = ({
|
||||
const updateQuery: TLdapConfigsUpdate = {
|
||||
isActive,
|
||||
url,
|
||||
searchBase
|
||||
searchBase,
|
||||
groupSearchBase,
|
||||
groupSearchFilter
|
||||
};
|
||||
|
||||
const orgBot = await orgBotDAL.findOne({ orgId });
|
||||
@ -271,6 +315,8 @@ export const ldapConfigServiceFactory = ({
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase: ldapConfig.searchBase,
|
||||
groupSearchBase: ldapConfig.groupSearchBase,
|
||||
groupSearchFilter: ldapConfig.groupSearchFilter,
|
||||
caCert
|
||||
};
|
||||
};
|
||||
@ -305,7 +351,7 @@ export const ldapConfigServiceFactory = ({
|
||||
bindCredentials: ldapConfig.bindPass,
|
||||
searchBase: ldapConfig.searchBase,
|
||||
searchFilter: "(uid={{username}})",
|
||||
searchAttributes: ["uid", "uidNumber", "givenName", "sn", "mail"],
|
||||
// searchAttributes: ["uid", "uidNumber", "givenName", "sn", "mail"],
|
||||
...(ldapConfig.caCert !== ""
|
||||
? {
|
||||
tlsOptions: {
|
||||
@ -320,7 +366,17 @@ export const ldapConfigServiceFactory = ({
|
||||
return { opts, ldapConfig };
|
||||
};
|
||||
|
||||
const ldapLogin = async ({ externalId, username, firstName, lastName, emails, orgId, relayState }: TLdapLoginDTO) => {
|
||||
const ldapLogin = async ({
|
||||
ldapConfigId,
|
||||
externalId,
|
||||
username,
|
||||
firstName,
|
||||
lastName,
|
||||
emails,
|
||||
groups,
|
||||
orgId,
|
||||
relayState
|
||||
}: TLdapLoginDTO) => {
|
||||
const appCfg = getConfig();
|
||||
let userAlias = await userAliasDAL.findOne({
|
||||
externalId,
|
||||
@ -394,7 +450,84 @@ export const ldapConfigServiceFactory = ({
|
||||
});
|
||||
}
|
||||
|
||||
const user = await userDAL.findOne({ id: userAlias.userId });
|
||||
const user = await userDAL.transaction(async (tx) => {
|
||||
const newUser = await userDAL.findOne({ id: userAlias.userId }, tx);
|
||||
if (groups) {
|
||||
const ldapGroupIdsToBePartOf = (
|
||||
await ldapGroupMapDAL.find({
|
||||
ldapConfigId,
|
||||
$in: {
|
||||
ldapGroupCN: groups.map((group) => group.cn)
|
||||
}
|
||||
})
|
||||
).map((groupMap) => groupMap.groupId);
|
||||
|
||||
const groupsToBePartOf = await groupDAL.find({
|
||||
orgId,
|
||||
$in: {
|
||||
id: ldapGroupIdsToBePartOf
|
||||
}
|
||||
});
|
||||
const toBePartOfGroupIdsSet = new Set(groupsToBePartOf.map((groupToBePartOf) => groupToBePartOf.id));
|
||||
|
||||
const allLdapGroupMaps = await ldapGroupMapDAL.find({
|
||||
ldapConfigId
|
||||
});
|
||||
|
||||
const ldapGroupIdsCurrentlyPartOf = (
|
||||
await userGroupMembershipDAL.find({
|
||||
userId: newUser.id,
|
||||
$in: {
|
||||
groupId: allLdapGroupMaps.map((groupMap) => groupMap.groupId)
|
||||
}
|
||||
})
|
||||
).map((userGroupMembership) => userGroupMembership.groupId);
|
||||
|
||||
const userGroupMembershipGroupIdsSet = new Set(ldapGroupIdsCurrentlyPartOf);
|
||||
|
||||
for await (const group of groupsToBePartOf) {
|
||||
if (!userGroupMembershipGroupIdsSet.has(group.id)) {
|
||||
// add user to group that they should be part of
|
||||
await addUsersToGroupByUserIds({
|
||||
group,
|
||||
userIds: [newUser.id],
|
||||
userDAL,
|
||||
userGroupMembershipDAL,
|
||||
orgDAL,
|
||||
groupProjectDAL,
|
||||
projectKeyDAL,
|
||||
projectDAL,
|
||||
projectBotDAL,
|
||||
tx
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const groupsCurrentlyPartOf = await groupDAL.find({
|
||||
orgId,
|
||||
$in: {
|
||||
id: ldapGroupIdsCurrentlyPartOf
|
||||
}
|
||||
});
|
||||
|
||||
for await (const group of groupsCurrentlyPartOf) {
|
||||
if (!toBePartOfGroupIdsSet.has(group.id)) {
|
||||
// remove user from group that they should no longer be part of
|
||||
await removeUsersFromGroupByUserIds({
|
||||
group,
|
||||
userIds: [newUser.id],
|
||||
userDAL,
|
||||
userGroupMembershipDAL,
|
||||
groupProjectDAL,
|
||||
projectKeyDAL,
|
||||
tx
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newUser;
|
||||
});
|
||||
|
||||
const isUserCompleted = Boolean(user.isAccepted);
|
||||
|
||||
@ -424,6 +557,99 @@ export const ldapConfigServiceFactory = ({
|
||||
return { isUserCompleted, providerAuthToken };
|
||||
};
|
||||
|
||||
const getLdapGroupMaps = async ({
|
||||
ldapConfigId,
|
||||
actor,
|
||||
actorId,
|
||||
orgId,
|
||||
actorAuthMethod,
|
||||
actorOrgId
|
||||
}: TGetLdapGroupMapsDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Read, OrgPermissionSubjects.Ldap);
|
||||
|
||||
const ldapConfig = await ldapConfigDAL.findOne({
|
||||
id: ldapConfigId,
|
||||
orgId
|
||||
});
|
||||
|
||||
if (!ldapConfig) throw new BadRequestError({ message: "Failed to find organization LDAP data" });
|
||||
|
||||
const groupMaps = await ldapGroupMapDAL.findLdapGroupMapsByLdapConfigId(ldapConfigId);
|
||||
|
||||
return groupMaps;
|
||||
};
|
||||
|
||||
const createLdapGroupMap = async ({
|
||||
ldapConfigId,
|
||||
ldapGroupCN,
|
||||
groupSlug,
|
||||
actor,
|
||||
actorId,
|
||||
orgId,
|
||||
actorAuthMethod,
|
||||
actorOrgId
|
||||
}: TCreateLdapGroupMapDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Create, OrgPermissionSubjects.Ldap);
|
||||
|
||||
const plan = await licenseService.getPlan(orgId);
|
||||
if (!plan.ldap)
|
||||
throw new BadRequestError({
|
||||
message: "Failed to create LDAP group map due to plan restriction. Upgrade plan to create LDAP group map."
|
||||
});
|
||||
|
||||
const ldapConfig = await ldapConfigDAL.findOne({
|
||||
id: ldapConfigId,
|
||||
orgId
|
||||
});
|
||||
if (!ldapConfig) throw new BadRequestError({ message: "Failed to find organization LDAP data" });
|
||||
|
||||
const group = await groupDAL.findOne({ slug: groupSlug, orgId });
|
||||
if (!group) throw new BadRequestError({ message: "Failed to find group" });
|
||||
|
||||
const groupMap = await ldapGroupMapDAL.create({
|
||||
ldapConfigId,
|
||||
ldapGroupCN,
|
||||
groupId: group.id
|
||||
});
|
||||
|
||||
return groupMap;
|
||||
};
|
||||
|
||||
const deleteLdapGroupMap = async ({
|
||||
ldapConfigId,
|
||||
ldapGroupMapId,
|
||||
actor,
|
||||
actorId,
|
||||
orgId,
|
||||
actorAuthMethod,
|
||||
actorOrgId
|
||||
}: TDeleteLdapGroupMapDTO) => {
|
||||
const { permission } = await permissionService.getOrgPermission(actor, actorId, orgId, actorAuthMethod, actorOrgId);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Delete, OrgPermissionSubjects.Ldap);
|
||||
|
||||
const plan = await licenseService.getPlan(orgId);
|
||||
if (!plan.ldap)
|
||||
throw new BadRequestError({
|
||||
message: "Failed to delete LDAP group map due to plan restriction. Upgrade plan to delete LDAP group map."
|
||||
});
|
||||
|
||||
const ldapConfig = await ldapConfigDAL.findOne({
|
||||
id: ldapConfigId,
|
||||
orgId
|
||||
});
|
||||
|
||||
if (!ldapConfig) throw new BadRequestError({ message: "Failed to find organization LDAP data" });
|
||||
|
||||
const [deletedGroupMap] = await ldapGroupMapDAL.delete({
|
||||
ldapConfigId: ldapConfig.id,
|
||||
id: ldapGroupMapId
|
||||
});
|
||||
|
||||
return deletedGroupMap;
|
||||
};
|
||||
|
||||
return {
|
||||
createLdapCfg,
|
||||
updateLdapCfg,
|
||||
@ -431,6 +657,9 @@ export const ldapConfigServiceFactory = ({
|
||||
getLdapCfg,
|
||||
// getLdapPassportOpts,
|
||||
ldapLogin,
|
||||
bootLdap
|
||||
bootLdap,
|
||||
getLdapGroupMaps,
|
||||
createLdapGroupMap,
|
||||
deleteLdapGroupMap
|
||||
};
|
||||
};
|
||||
|
@ -1,5 +1,18 @@
|
||||
import { TOrgPermission } from "@app/lib/types";
|
||||
|
||||
export type TLDAPConfig = {
|
||||
id: string;
|
||||
organization: string;
|
||||
isActive: boolean;
|
||||
url: string;
|
||||
bindDN: string;
|
||||
bindPass: string;
|
||||
searchBase: string;
|
||||
groupSearchBase: string;
|
||||
groupSearchFilter: string;
|
||||
caCert: string;
|
||||
};
|
||||
|
||||
export type TCreateLdapCfgDTO = {
|
||||
orgId: string;
|
||||
isActive: boolean;
|
||||
@ -7,6 +20,8 @@ export type TCreateLdapCfgDTO = {
|
||||
bindDN: string;
|
||||
bindPass: string;
|
||||
searchBase: string;
|
||||
groupSearchBase: string;
|
||||
groupSearchFilter: string;
|
||||
caCert: string;
|
||||
} & TOrgPermission;
|
||||
|
||||
@ -18,6 +33,8 @@ export type TUpdateLdapCfgDTO = {
|
||||
bindDN: string;
|
||||
bindPass: string;
|
||||
searchBase: string;
|
||||
groupSearchBase: string;
|
||||
groupSearchFilter: string;
|
||||
caCert: string;
|
||||
}> &
|
||||
TOrgPermission;
|
||||
@ -27,11 +44,31 @@ export type TGetLdapCfgDTO = {
|
||||
} & TOrgPermission;
|
||||
|
||||
export type TLdapLoginDTO = {
|
||||
ldapConfigId: string;
|
||||
externalId: string;
|
||||
username: string;
|
||||
firstName: string;
|
||||
lastName: string;
|
||||
emails: string[];
|
||||
orgId: string;
|
||||
groups?: {
|
||||
dn: string;
|
||||
cn: string;
|
||||
}[];
|
||||
relayState?: string;
|
||||
};
|
||||
|
||||
export type TGetLdapGroupMapsDTO = {
|
||||
ldapConfigId: string;
|
||||
} & TOrgPermission;
|
||||
|
||||
export type TCreateLdapGroupMapDTO = {
|
||||
ldapConfigId: string;
|
||||
ldapGroupCN: string;
|
||||
groupSlug: string;
|
||||
} & TOrgPermission;
|
||||
|
||||
export type TDeleteLdapGroupMapDTO = {
|
||||
ldapConfigId: string;
|
||||
ldapGroupMapId: string;
|
||||
} & TOrgPermission;
|
||||
|
72
backend/src/ee/services/ldap-config/ldap-fns.ts
Normal file
72
backend/src/ee/services/ldap-config/ldap-fns.ts
Normal file
@ -0,0 +1,72 @@
|
||||
import ldapjs from "ldapjs";
|
||||
|
||||
import { logger } from "@app/lib/logger";
|
||||
|
||||
import { TLDAPConfig } from "./ldap-config-types";
|
||||
|
||||
export const searchGroups = async (
|
||||
ldapConfig: TLDAPConfig,
|
||||
filter: string,
|
||||
base: string
|
||||
): Promise<{ dn: string; cn: string }[]> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const ldapClient = ldapjs.createClient({
|
||||
url: ldapConfig.url,
|
||||
bindDN: ldapConfig.bindDN,
|
||||
bindCredentials: ldapConfig.bindPass,
|
||||
...(ldapConfig.caCert !== ""
|
||||
? {
|
||||
tlsOptions: {
|
||||
ca: [ldapConfig.caCert]
|
||||
}
|
||||
}
|
||||
: {})
|
||||
});
|
||||
|
||||
ldapClient.search(
|
||||
base,
|
||||
{
|
||||
filter,
|
||||
scope: "sub"
|
||||
},
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
ldapClient.unbind((unbindError) => {
|
||||
if (unbindError) {
|
||||
logger.error("Error unbinding LDAP client:", unbindError);
|
||||
}
|
||||
});
|
||||
return reject(err);
|
||||
}
|
||||
|
||||
const groups: { dn: string; cn: string }[] = [];
|
||||
|
||||
res.on("searchEntry", (entry) => {
|
||||
const dn = entry.dn.toString();
|
||||
const regex = /cn=([^,]+)/;
|
||||
const match = dn.match(regex);
|
||||
// parse the cn from the dn
|
||||
const cn = (match && match[1]) as string;
|
||||
|
||||
groups.push({ dn, cn });
|
||||
});
|
||||
res.on("error", (error) => {
|
||||
ldapClient.unbind((unbindError) => {
|
||||
if (unbindError) {
|
||||
logger.error("Error unbinding LDAP client:", unbindError);
|
||||
}
|
||||
});
|
||||
reject(error);
|
||||
});
|
||||
res.on("end", () => {
|
||||
ldapClient.unbind((unbindError) => {
|
||||
if (unbindError) {
|
||||
logger.error("Error unbinding LDAP client:", unbindError);
|
||||
}
|
||||
});
|
||||
resolve(groups);
|
||||
});
|
||||
}
|
||||
);
|
||||
});
|
||||
};
|
41
backend/src/ee/services/ldap-config/ldap-group-map-dal.ts
Normal file
41
backend/src/ee/services/ldap-config/ldap-group-map-dal.ts
Normal file
@ -0,0 +1,41 @@
|
||||
import { TDbClient } from "@app/db";
|
||||
import { TableName } from "@app/db/schemas";
|
||||
import { DatabaseError } from "@app/lib/errors";
|
||||
import { ormify, selectAllTableCols } from "@app/lib/knex";
|
||||
|
||||
export type TLdapGroupMapDALFactory = ReturnType<typeof ldapGroupMapDALFactory>;
|
||||
|
||||
export const ldapGroupMapDALFactory = (db: TDbClient) => {
|
||||
const ldapGroupMapOrm = ormify(db, TableName.LdapGroupMap);
|
||||
|
||||
const findLdapGroupMapsByLdapConfigId = async (ldapConfigId: string) => {
|
||||
try {
|
||||
const docs = await db(TableName.LdapGroupMap)
|
||||
.where(`${TableName.LdapGroupMap}.ldapConfigId`, ldapConfigId)
|
||||
.join(TableName.Groups, `${TableName.LdapGroupMap}.groupId`, `${TableName.Groups}.id`)
|
||||
.select(selectAllTableCols(TableName.LdapGroupMap))
|
||||
.select(
|
||||
db.ref("id").withSchema(TableName.Groups).as("groupId"),
|
||||
db.ref("name").withSchema(TableName.Groups).as("groupName"),
|
||||
db.ref("slug").withSchema(TableName.Groups).as("groupSlug")
|
||||
);
|
||||
|
||||
return docs.map((doc) => {
|
||||
return {
|
||||
id: doc.id,
|
||||
ldapConfigId: doc.ldapConfigId,
|
||||
ldapGroupCN: doc.ldapGroupCN,
|
||||
group: {
|
||||
id: doc.groupId,
|
||||
name: doc.groupName,
|
||||
slug: doc.groupSlug
|
||||
}
|
||||
};
|
||||
});
|
||||
} catch (error) {
|
||||
throw new DatabaseError({ error, name: "findGroupMaps" });
|
||||
}
|
||||
};
|
||||
|
||||
return { ...ldapGroupMapOrm, findLdapGroupMapsByLdapConfigId };
|
||||
};
|
@ -18,6 +18,7 @@ import { identityProjectAdditionalPrivilegeDALFactory } from "@app/ee/services/i
|
||||
import { identityProjectAdditionalPrivilegeServiceFactory } from "@app/ee/services/identity-project-additional-privilege/identity-project-additional-privilege-service";
|
||||
import { ldapConfigDALFactory } from "@app/ee/services/ldap-config/ldap-config-dal";
|
||||
import { ldapConfigServiceFactory } from "@app/ee/services/ldap-config/ldap-config-service";
|
||||
import { ldapGroupMapDALFactory } from "@app/ee/services/ldap-config/ldap-group-map-dal";
|
||||
import { licenseDALFactory } from "@app/ee/services/license/license-dal";
|
||||
import { licenseServiceFactory } from "@app/ee/services/license/license-service";
|
||||
import { permissionDALFactory } from "@app/ee/services/permission/permission-dal";
|
||||
@ -200,6 +201,7 @@ export const registerRoutes = async (
|
||||
const samlConfigDAL = samlConfigDALFactory(db);
|
||||
const scimDAL = scimDALFactory(db);
|
||||
const ldapConfigDAL = ldapConfigDALFactory(db);
|
||||
const ldapGroupMapDAL = ldapGroupMapDALFactory(db);
|
||||
const sapApproverDAL = secretApprovalPolicyApproverDALFactory(db);
|
||||
const secretApprovalPolicyDAL = secretApprovalPolicyDALFactory(db);
|
||||
const secretApprovalRequestDAL = secretApprovalRequestDALFactory(db);
|
||||
@ -300,8 +302,15 @@ export const registerRoutes = async (
|
||||
|
||||
const ldapService = ldapConfigServiceFactory({
|
||||
ldapConfigDAL,
|
||||
ldapGroupMapDAL,
|
||||
orgDAL,
|
||||
orgBotDAL,
|
||||
groupDAL,
|
||||
groupProjectDAL,
|
||||
projectKeyDAL,
|
||||
projectDAL,
|
||||
projectBotDAL,
|
||||
userGroupMembershipDAL,
|
||||
userDAL,
|
||||
userAliasDAL,
|
||||
permissionService,
|
||||
|
@ -191,7 +191,7 @@ export const authLoginServiceFactory = ({
|
||||
const decodedProviderToken = validateProviderAuthToken(providerAuthToken, email);
|
||||
|
||||
authMethod = decodedProviderToken.authMethod;
|
||||
if (isAuthMethodSaml(authMethod) && decodedProviderToken.orgId) {
|
||||
if ((isAuthMethodSaml(authMethod) || authMethod === AuthMethod.LDAP) && decodedProviderToken.orgId) {
|
||||
organizationId = decodedProviderToken.orgId;
|
||||
}
|
||||
}
|
||||
|
@ -22,10 +22,6 @@ var folderCmd = &cobra.Command{
|
||||
var getCmd = &cobra.Command{
|
||||
Use: "get",
|
||||
Short: "Get folders in a directory",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
util.RequireLocalWorkspaceFile()
|
||||
util.RequireLogin()
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
environmentName, _ := cmd.Flags().GetString("env")
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
@ -204,8 +205,10 @@ var secretsSetCmd = &cobra.Command{
|
||||
// decrypt workspace key
|
||||
plainTextEncryptionKey := crypto.DecryptAsymmetric(encryptedWorkspaceKey, encryptedWorkspaceKeyNonce, encryptedWorkspaceKeySenderPublicKey, currentUsersPrivateKey)
|
||||
|
||||
infisicalTokenEnv := os.Getenv(util.INFISICAL_TOKEN_NAME)
|
||||
|
||||
// pull current secrets
|
||||
secrets, err := util.GetAllEnvironmentVariables(models.GetAllSecretsParameters{Environment: environmentName, SecretsPath: secretsPath}, "")
|
||||
secrets, err := util.GetAllEnvironmentVariables(models.GetAllSecretsParameters{Environment: environmentName, SecretsPath: secretsPath, InfisicalToken: infisicalTokenEnv}, "")
|
||||
if err != nil {
|
||||
util.HandleError(err, "unable to retrieve secrets")
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/Infisical/infisical-merge/packages/api"
|
||||
@ -13,13 +12,11 @@ import (
|
||||
|
||||
func GetAllFolders(params models.GetAllFoldersParameters) ([]models.SingleFolder, error) {
|
||||
|
||||
if params.InfisicalToken == "" {
|
||||
params.InfisicalToken = os.Getenv(INFISICAL_TOKEN_NAME)
|
||||
}
|
||||
|
||||
var foldersToReturn []models.SingleFolder
|
||||
var folderErr error
|
||||
if params.InfisicalToken == "" && params.UniversalAuthAccessToken == "" {
|
||||
RequireLogin()
|
||||
RequireLocalWorkspaceFile()
|
||||
|
||||
log.Debug().Msg("GetAllFolders: Trying to fetch folders using logged in details")
|
||||
|
||||
|
@ -307,10 +307,6 @@ func FilterSecretsByTag(plainTextSecrets []models.SingleEnvironmentVariable, tag
|
||||
}
|
||||
|
||||
func GetAllEnvironmentVariables(params models.GetAllSecretsParameters, projectConfigFilePath string) ([]models.SingleEnvironmentVariable, error) {
|
||||
if params.InfisicalToken == "" {
|
||||
params.InfisicalToken = os.Getenv(INFISICAL_TOKEN_NAME)
|
||||
}
|
||||
|
||||
isConnected := CheckIsConnectedToInternet()
|
||||
var secretsToReturn []models.SingleEnvironmentVariable
|
||||
// var serviceTokenDetails api.GetServiceTokenDetailsResponse
|
||||
|
@ -1,36 +0,0 @@
|
||||
---
|
||||
title: "LDAP"
|
||||
description: "Log in to Infisical with LDAP"
|
||||
---
|
||||
|
||||
<Info>
|
||||
LDAP is a paid feature.
|
||||
|
||||
If you're using Infisical Cloud, then it is available under the **Enterprise Tier**. If you're self-hosting Infisical,
|
||||
then you should contact sales@infisical.com to purchase an enterprise license to use it.
|
||||
</Info>
|
||||
|
||||
You can configure your organization in Infisical to have members authenticate with the platform via [LDAP](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol).
|
||||
|
||||
<Steps>
|
||||
<Step title="Prepare the LDAP configuration in Infisical">
|
||||
In Infisical, head to your Organization Settings > Authentication > LDAP Configuration and select **Set up LDAP**.
|
||||
|
||||
Next, input your LDAP server settings.
|
||||
|
||||

|
||||
|
||||
Here's some guidance for each field:
|
||||
|
||||
- URL: The LDAP server to connect to such as `ldap://ldap.your-org.com`, `ldaps://ldap.myorg.com:636` (for connection over SSL/TLS), etc.
|
||||
- Bind DN: The distinguished name of object to bind when performing the user search such as `cn=infisical,ou=Users,dc=acme,dc=com`.
|
||||
- Bind Pass: The password to use along with `Bind DN` when performing the user search.
|
||||
- Search Base / User DN: Base DN under which to perform user search such as `ou=Users,dc=example,dc=com`
|
||||
- CA Certificate: The CA certificate to use when verifying the LDAP server certificate.
|
||||
</Step>
|
||||
<Step title="Enable LDAP in Infisical">
|
||||
Enabling LDAP allows members in your organization to log into Infisical via LDAP.
|
||||
|
||||

|
||||
</Step>
|
||||
</Steps>
|
@ -4,16 +4,17 @@ description: "Learn how to log in to Infisical with LDAP."
|
||||
---
|
||||
|
||||
<Info>
|
||||
LDAP is a paid feature.
|
||||
If you're using Infisical Cloud, then it is available under the **Enterprise Tier**. If you're self-hosting Infisical,
|
||||
then you should contact sales@infisical.com to purchase an enterprise license to use it.
|
||||
LDAP is a paid feature. If you're using Infisical Cloud, then it is available
|
||||
under the **Enterprise Tier**. If you're self-hosting Infisical, then you
|
||||
should contact sales@infisical.com to purchase an enterprise license to use
|
||||
it.
|
||||
</Info>
|
||||
|
||||
You can configure your organization in Infisical to have members authenticate with the platform via [LDAP](https://en.wikipedia.org/wiki/Lightweight_Directory_Access_Protocol)
|
||||
|
||||
<Steps>
|
||||
<Step title="Prepare the LDAP configuration in Infisical">
|
||||
In Infisical, head to your Organization Settings > Authentication > LDAP Configuration and select **Set up LDAP**.
|
||||
In Infisical, head to your Organization Settings > Security > LDAP and select **Manage**.
|
||||
|
||||
Next, input your LDAP server settings.
|
||||
|
||||
@ -24,11 +25,41 @@ You can configure your organization in Infisical to have members authenticate wi
|
||||
- URL: The LDAP server to connect to such as `ldap://ldap.your-org.com`, `ldaps://ldap.myorg.com:636` (for connection over SSL/TLS), etc.
|
||||
- Bind DN: The distinguished name of object to bind when performing the user search such as `cn=infisical,ou=Users,dc=acme,dc=com`.
|
||||
- Bind Pass: The password to use along with `Bind DN` when performing the user search.
|
||||
- Search Base / User DN: Base DN under which to perform user search such as `ou=Users,dc=example,dc=com`
|
||||
- Search Base / User DN: Base DN under which to perform user search such as `ou=Users,dc=acme,dc=com`
|
||||
- Group Search Base / Group DN (optional): LDAP search base to use for group membership search such as `ou=Groups,dc=acme,dc=com`.
|
||||
- Group Filter (optional): Template used when constructing the group membership query such as `(objectClass=posixGroup)`. The template can access the following context variables: [`UserDN`, `UserUID`, `UserName`]. The default is `(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))` which is compatible with several common directory schemas.
|
||||
- CA Certificate: The CA certificate to use when verifying the LDAP server certificate.
|
||||
|
||||
<Note>
|
||||
The **Group Search Base / Group DN** and **Group Filter** fields are both required if you wish to sync LDAP groups to Infisical.
|
||||
</Note>
|
||||
|
||||
</Step>
|
||||
<Step title="Define mappings from LDAP groups to groups in Infisical">
|
||||
In order to sync LDAP groups to Infisical, head to the **LDAP Group Mappings** section to define mappings from LDAP groups to groups in Infisical.
|
||||
|
||||

|
||||
|
||||
Group mappings ensure that users who log into Infisical via LDAP are added to or removed from the Infisical group(s) that corresponds to the LDAP group(s) they are a member of.
|
||||
|
||||

|
||||
|
||||
Each group mapping consists of two parts:
|
||||
- LDAP Group CN: The common name of the LDAP group to map.
|
||||
- Infisical Group: The Infisical group to map the LDAP group to.
|
||||
|
||||
For example, suppose you want to automatically add a user who is part of the LDAP group with CN `Engineers` to the Infisical group `Engineers` when the user sets up their account with Infisical.
|
||||
|
||||
In this case, you would specify a mapping from the LDAP group with CN `Engineers` to the Infisical group `Engineers`.
|
||||
Now when the user logs into Infisical via LDAP, Infisical will check the LDAP groups that the user is a part of whilst referencing the group mappings you created earlier. Since the user is a member of the LDAP group with CN `Engineers`, they will be added to the Infisical group `Engineers`.
|
||||
In the future, if the user is no longer part of the LDAP group with CN `Engineers`, they will be removed from the Infisical group `Engineers` upon their next login.
|
||||
<Note>
|
||||
Prior to defining any group mappings, ensure that you've created the Infisical groups that you want to map the LDAP groups to.
|
||||
You can read more about creating (user) groups in Infisical [here](/documentation/platform/groups).
|
||||
</Note>
|
||||
</Step>
|
||||
<Step title="Enable LDAP in Infisical">
|
||||
Enabling LDAP allows members in your organization to log into Infisical via LDAP.
|
||||

|
||||
</Step>
|
||||
</Steps>
|
||||
</Steps>
|
||||
|
@ -4,9 +4,10 @@ description: "Learn how to configure JumpCloud LDAP for authenticating into Infi
|
||||
---
|
||||
|
||||
<Info>
|
||||
LDAP is a paid feature.
|
||||
If you're using Infisical Cloud, then it is available under the **Enterprise Tier**. If you're self-hosting Infisical,
|
||||
then you should contact sales@infisical.com to purchase an enterprise license to use it.
|
||||
LDAP is a paid feature. If you're using Infisical Cloud, then it is available
|
||||
under the **Enterprise Tier**. If you're self-hosting Infisical, then you
|
||||
should contact sales@infisical.com to purchase an enterprise license to use
|
||||
it.
|
||||
</Info>
|
||||
|
||||
<Steps>
|
||||
@ -17,13 +18,13 @@ description: "Learn how to configure JumpCloud LDAP for authenticating into Infi
|
||||
When creating the user, input their **First Name**, **Last Name**, **Username** (required), **Company Email** (required), and **Description**.
|
||||
Also, create a password for the user.
|
||||
|
||||
Next, under User Security Settings and Permissions > Permission Settings, check the box next to **Enable as LDAP Bind DN**.
|
||||
Next, under User Security Settings and Permissions > Permission Settings, check the box next to **Enable as LDAP Bind DN**.
|
||||
|
||||

|
||||
|
||||
</Step>
|
||||
<Step title="Prepare the LDAP configuration in Infisical">
|
||||
In Infisical, head to your Organization Settings > Authentication > LDAP Configuration and select **Set up LDAP**.
|
||||
In Infisical, head to your Organization Settings > Security > LDAP and select **Manage**.
|
||||
|
||||
Next, input your JumpCloud LDAP server settings.
|
||||
|
||||
@ -35,20 +36,48 @@ description: "Learn how to configure JumpCloud LDAP for authenticating into Infi
|
||||
- Bind DN: The distinguished name of object to bind when performing the user search (`uid=<ldap-user-username>,ou=Users,o=<your-org-id>,dc=jumpcloud,dc=com`).
|
||||
- Bind Pass: The password to use along with `Bind DN` when performing the user search.
|
||||
- Search Base / User DN: Base DN under which to perform user search (`ou=Users,o=<your-org-id>,dc=jumpcloud,dc=com`).
|
||||
- Group Search Base / Group DN (optional): LDAP search base to use for group membership search (`ou=Users,o=<your-org-id>,dc=jumpcloud,dc=com`).
|
||||
- Group Filter (optional): Template used when constructing the group membership query (`(objectClass=groupOfNames)`).
|
||||
- CA Certificate: The CA certificate to use when verifying the LDAP server certificate (instructions to obtain the certificate for JumpCloud [here](https://jumpcloud.com/support/connect-to-ldap-with-tls-ssl)).
|
||||
|
||||
<Tip>
|
||||
When filling out the **Bind DN** and **Bind Pass** fields, refer to the username and password of the user created in Step 1.
|
||||
|
||||
Also, for the **Bind DN** and **Search Base / User DN** fields, you'll want to use the organization ID that appears
|
||||
Also, for the **Bind DN** and **Search Base / User DN** fields, you'll want to use the organization ID that appears
|
||||
in your LDAP instance **ORG DN**.
|
||||
</Tip>
|
||||
</Step>
|
||||
<Step title="Define mappings from LDAP groups to groups in Infisical">
|
||||
In order to sync LDAP groups to Infisical, head to the **LDAP Group Mappings** section to define mappings from LDAP groups to groups in Infisical.
|
||||
|
||||

|
||||
|
||||
Group mappings ensure that users who log into Infisical via LDAP are added to or removed from the Infisical group(s) that corresponds to the LDAP group(s) they are a member of.
|
||||
|
||||

|
||||
|
||||
Each group mapping consists of two parts:
|
||||
- LDAP Group CN: The common name of the LDAP group to map.
|
||||
- Infisical Group: The Infisical group to map the LDAP group to.
|
||||
|
||||
For example, suppose you want to automatically add a user who is part of the LDAP group with CN `Engineers` to the Infisical group `Engineers` when the user sets up their account with Infisical.
|
||||
|
||||
In this case, you would specify a mapping from the LDAP group with CN `Engineers` to the Infisical group `Engineers`.
|
||||
Now when the user logs into Infisical via LDAP, Infisical will check the LDAP groups that the user is a part of whilst referencing the group mappings you created earlier. Since the user is a member of the LDAP group with CN `Engineers`, they will be added to the Infisical group `Engineers`.
|
||||
In the future, if the user is no longer part of the LDAP group with CN `Engineers`, they will be removed from the Infisical group `Engineers` upon their next login.
|
||||
<Note>
|
||||
Prior to defining any group mappings, ensure that you've created the Infisical groups that you want to map the LDAP groups to.
|
||||
You can read more about creating (user) groups in Infisical [here](/documentation/platform/groups).
|
||||
</Note>
|
||||
|
||||
</Step>
|
||||
<Step title="Enable LDAP in Infisical">
|
||||
Enabling LDAP allows members in your organization to log into Infisical via LDAP.
|
||||

|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
Resources:
|
||||
- [JumpCloud Cloud LDAP Guide](https://jumpcloud.com/support/use-cloud-ldap)
|
||||
|
||||
- [JumpCloud Cloud LDAP Guide](https://jumpcloud.com/support/use-cloud-ldap)
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 427 KiB After Width: | Height: | Size: 439 KiB |
BIN
docs/images/platform/ldap/ldap-group-mappings-section.png
Normal file
BIN
docs/images/platform/ldap/ldap-group-mappings-section.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 721 KiB |
BIN
docs/images/platform/ldap/ldap-group-mappings-table.png
Normal file
BIN
docs/images/platform/ldap/ldap-group-mappings-table.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 456 KiB |
Binary file not shown.
Before Width: | Height: | Size: 537 KiB After Width: | Height: | Size: 599 KiB |
@ -201,7 +201,6 @@
|
||||
"group": "Installation methods",
|
||||
"pages": [
|
||||
"self-hosting/deployment-options/standalone-infisical",
|
||||
"self-hosting/deployment-options//docker-swarm",
|
||||
"self-hosting/deployment-options/docker-compose",
|
||||
"self-hosting/deployment-options/kubernetes-helm"
|
||||
]
|
||||
@ -217,7 +216,8 @@
|
||||
{
|
||||
"group": "Reference architectures",
|
||||
"pages": [
|
||||
"self-hosting/reference-architectures/aws-ecs"
|
||||
"self-hosting/reference-architectures/aws-ecs",
|
||||
"self-hosting/reference-architectures/on-premise"
|
||||
]
|
||||
},
|
||||
"self-hosting/ee",
|
||||
|
@ -12,7 +12,7 @@ to run a functional instance of Infisical.
|
||||
<Warning>
|
||||
This Docker Compose configuration is not designed for high-availability production scenarios.
|
||||
It includes just the essential components needed to set up an Infisical proof of concept (POC).
|
||||
To run Infisical in a highly available manner, give the [Docker Swarm guide](/self-hosting/deployment-options/docker-swarm).
|
||||
Additional configuration is required to enhance data redundancy and ensure higher availability for production environments.
|
||||
</Warning>
|
||||
|
||||
## Verify prerequisites
|
||||
|
@ -1,571 +0,0 @@
|
||||
---
|
||||
title: "Docker Swarm"
|
||||
description: "How to self Infisical with Docker Swarm (HA)."
|
||||
---
|
||||
|
||||
# Self-Hosting Infisical with Docker Swarm
|
||||
|
||||
This guide will provide step-by-step instructions on how to self-host Infisical using Docker Swarm. This is particularly helpful for those wanting to self host Infisical on premise while still maintaining high availability (HA) for the core Infisical components.
|
||||
The guide will demonstrate a setup with three nodes, ensuring that the cluster can tolerate the failure of one node while remaining fully operational.
|
||||
|
||||
## Docker Swarm
|
||||
|
||||
[Docker Swarm](https://docs.docker.com/engine/swarm/) is a native clustering and orchestration solution for Docker containers.
|
||||
It simplifies the deployment and management of containerized applications across multiple nodes, making it a great choice for self-hosting Infisical.
|
||||
|
||||
Unlike Kubernetes, which requires a deep understanding of the Kubernetes ecosystem, if you're accustomed to Docker and Docker Compose, you're already familiar with most of Docker Swarm.
|
||||
For this reason, we suggest teams use Docker Swarm to deploy Infisical in a highly available and fault tolerant manner.
|
||||
|
||||
## Prerequisites
|
||||
- Understanding of Docker Swarm
|
||||
- Bare/Virtual Machines with Docker installed on each VM.
|
||||
- Docker Swarm initialized on the VMs.
|
||||
|
||||
## Core Components for High Availability
|
||||
|
||||
The provided Docker stack includes the following core components to achieve high availability:
|
||||
|
||||
1. **Spilo**: [Spilo](https://github.com/zalando/spilo) is used to run PostgreSQL with [Patroni](https://github.com/zalando/patroni) for HA and automatic failover. It utilizes etcd for leader election of the PostgreSQL instances.
|
||||
|
||||
2. **Redis**: Redis is used for caching and is set up with Redis Sentinel for HA.
|
||||
The stack includes three Redis replicas and three Redis Sentinel instances for monitoring and failover.
|
||||
|
||||
3. **Infisical**: Infisical is stateless, allowing for easy scaling and replication across multiple nodes.
|
||||
|
||||
4. **HAProxy**: HAProxy is used as a load balancer to distribute traffic to the PostgreSQL and Redis instances.
|
||||
It is configured to perform health checks and route requests to the appropriate backend services.
|
||||
|
||||
## Node Failure Tolerance
|
||||
|
||||
To ensure Infisical is highly available and fault tolerant, it's important to choose the number of nodes in the cluster.
|
||||
The following table shows the relationship between the number of nodes and the maximum number of nodes that can be down while the cluster continues to function:
|
||||
|
||||
| Total Nodes | Max Nodes Down | Min Nodes Required |
|
||||
|-------------|----------------|-------------------|
|
||||
| 1 | 0 | 1 |
|
||||
| 2 | 0 | 2 |
|
||||
| 3 | 1 | 2 |
|
||||
| 4 | 1 | 3 |
|
||||
| 5 | 2 | 3 |
|
||||
| 6 | 2 | 4 |
|
||||
| 7 | 3 | 4 |
|
||||
|
||||
The formula for calculating the minimum number of nodes required is: `floor(n/2) + 1`, where `n` is the total number of nodes.
|
||||
|
||||
This guide will demonstrate a setup with three nodes, which allows for one node to be down while the cluster remains operational. This fault tolerance applies to the following components:
|
||||
|
||||
- Redis Sentinel: With three Sentinel instances, one instance can be down, and the remaining two can still form a quorum to make decisions.
|
||||
- Redis: With three Redis instances (one master and two replicas), one instance can be down, and the remaining two can continue to provide caching services.
|
||||
- PostgreSQL: With three PostgreSQL instances managed by Patroni and etcd, one instance can be down, and the remaining two can maintain data consistency and availability.
|
||||
- Manager Nodes: In a Docker Swarm cluster with three manager nodes, one manager node can be down, and the remaining two can continue to manage the cluster.
|
||||
For the sake of simplicity, the example in this guide only contains one manager node.
|
||||
|
||||
It's important to note that while the cluster can tolerate the failure of one node in a three-node setup, it's recommended to have a minimum of three nodes to ensure high availability.
|
||||
With two nodes, the failure of a single node can result in a loss of quorum and potential downtime.
|
||||
|
||||
## Docker Deployment Stack
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Docker Swarm stack">
|
||||
```yaml infisical-stack.yaml
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
haproxy:
|
||||
image: haproxy:latest
|
||||
ports:
|
||||
- '7001:7000'
|
||||
- '5002:5433'
|
||||
- '5003:5434'
|
||||
- '6379:6379'
|
||||
- '8080:8080'
|
||||
networks:
|
||||
- infisical
|
||||
configs:
|
||||
- source: haproxy-config
|
||||
target: /usr/local/etc/haproxy/haproxy.cfg
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node1
|
||||
|
||||
infisical:
|
||||
container_name: infisical-backend
|
||||
image: infisical/infisical:v0.60.0-postgres
|
||||
env_file: .env
|
||||
ports:
|
||||
- 80:8080
|
||||
environment:
|
||||
- NODE_ENV=production
|
||||
networks:
|
||||
- infisical
|
||||
secrets:
|
||||
- env_file
|
||||
|
||||
etcd1:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
networks:
|
||||
- infisical
|
||||
environment:
|
||||
ETCD_UNSUPPORTED_ARCH: arm64
|
||||
container_name: demo-etcd1
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node1
|
||||
hostname: etcd1
|
||||
command: |
|
||||
etcd --name etcd1
|
||||
--listen-client-urls http://0.0.0.0:2379
|
||||
--listen-peer-urls=http://0.0.0.0:2380
|
||||
--advertise-client-urls http://etcd1:2379
|
||||
--initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
|
||||
--initial-advertise-peer-urls=http://etcd1:2380
|
||||
--initial-cluster-state=new
|
||||
|
||||
etcd2:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
networks:
|
||||
- infisical
|
||||
environment:
|
||||
ETCD_UNSUPPORTED_ARCH: arm64
|
||||
container_name: demo-etcd2
|
||||
hostname: etcd2
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node2
|
||||
command: |
|
||||
etcd --name etcd2
|
||||
--listen-client-urls http://0.0.0.0:2379
|
||||
--listen-peer-urls=http://0.0.0.0:2380
|
||||
--advertise-client-urls http://etcd2:2379
|
||||
--initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
|
||||
--initial-advertise-peer-urls=http://etcd2:2380
|
||||
--initial-cluster-state=new
|
||||
|
||||
etcd3:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
networks:
|
||||
- infisical
|
||||
environment:
|
||||
ETCD_UNSUPPORTED_ARCH: arm64
|
||||
container_name: demo-etcd3
|
||||
hostname: etcd3
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node3
|
||||
command: |
|
||||
etcd --name etcd3
|
||||
--listen-client-urls http://0.0.0.0:2379
|
||||
--listen-peer-urls=http://0.0.0.0:2380
|
||||
--advertise-client-urls http://etcd3:2379
|
||||
--initial-cluster=etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380
|
||||
--initial-advertise-peer-urls=http://etcd3:2380
|
||||
--initial-cluster-state=new
|
||||
|
||||
spolo1:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
container_name: postgres-1
|
||||
networks:
|
||||
- infisical
|
||||
hostname: postgres-1
|
||||
environment:
|
||||
ETCD_HOSTS: etcd1:2379,etcd2:2379,etcd3:2379
|
||||
PGPASSWORD_SUPERUSER: "postgres"
|
||||
PGUSER_SUPERUSER: "postgres"
|
||||
SCOPE: infisical
|
||||
volumes:
|
||||
- postgres_data1:/home/postgres/pgdata
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node1
|
||||
|
||||
spolo2:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
container_name: postgres-2
|
||||
networks:
|
||||
- infisical
|
||||
hostname: postgres-2
|
||||
environment:
|
||||
ETCD_HOSTS: etcd1:2379,etcd2:2379,etcd3:2379
|
||||
PGPASSWORD_SUPERUSER: "postgres"
|
||||
PGUSER_SUPERUSER: "postgres"
|
||||
SCOPE: infisical
|
||||
volumes:
|
||||
- postgres_data2:/home/postgres/pgdata
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node2
|
||||
|
||||
spolo3:
|
||||
image: ghcr.io/zalando/spilo-16:3.2-p2
|
||||
container_name: postgres-3
|
||||
networks:
|
||||
- infisical
|
||||
hostname: postgres-3
|
||||
environment:
|
||||
ETCD_HOSTS: etcd1:2379,etcd2:2379,etcd3:2379
|
||||
PGPASSWORD_SUPERUSER: "postgres"
|
||||
PGUSER_SUPERUSER: "postgres"
|
||||
SCOPE: infisical
|
||||
volumes:
|
||||
- postgres_data3:/home/postgres/pgdata
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node3
|
||||
|
||||
|
||||
redis_replica0:
|
||||
image: bitnami/redis:6.2.10
|
||||
environment:
|
||||
- REDIS_REPLICATION_MODE=master
|
||||
- REDIS_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node1
|
||||
|
||||
redis_replica1:
|
||||
image: bitnami/redis:6.2.10
|
||||
environment:
|
||||
- REDIS_REPLICATION_MODE=slave
|
||||
- REDIS_MASTER_HOST=redis_replica0
|
||||
- REDIS_MASTER_PORT_NUMBER=6379
|
||||
- REDIS_MASTER_PASSWORD=123456
|
||||
- REDIS_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node2
|
||||
|
||||
redis_replica2:
|
||||
image: bitnami/redis:6.2.10
|
||||
environment:
|
||||
- REDIS_REPLICATION_MODE=slave
|
||||
- REDIS_MASTER_HOST=redis_replica0
|
||||
- REDIS_MASTER_PORT_NUMBER=6379
|
||||
- REDIS_MASTER_PASSWORD=123456
|
||||
- REDIS_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node3
|
||||
|
||||
redis_sentinel1:
|
||||
image: bitnami/redis-sentinel:6.2.10
|
||||
environment:
|
||||
- REDIS_SENTINEL_QUORUM=2
|
||||
- REDIS_SENTINEL_DOWN_AFTER_MILLISECONDS=5000
|
||||
- REDIS_SENTINEL_FAILOVER_TIMEOUT=60000
|
||||
- REDIS_SENTINEL_PORT_NUMBER=26379
|
||||
- REDIS_MASTER_HOST=redis_replica1
|
||||
- REDIS_MASTER_PORT_NUMBER=6379
|
||||
- REDIS_MASTER_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node1
|
||||
|
||||
redis_sentinel2:
|
||||
image: bitnami/redis-sentinel:6.2.10
|
||||
environment:
|
||||
- REDIS_SENTINEL_QUORUM=2
|
||||
- REDIS_SENTINEL_DOWN_AFTER_MILLISECONDS=5000
|
||||
- REDIS_SENTINEL_FAILOVER_TIMEOUT=60000
|
||||
- REDIS_SENTINEL_PORT_NUMBER=26379
|
||||
- REDIS_MASTER_HOST=redis_replica1
|
||||
- REDIS_MASTER_PORT_NUMBER=6379
|
||||
- REDIS_MASTER_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node2
|
||||
|
||||
redis_sentinel3:
|
||||
image: bitnami/redis-sentinel:6.2.10
|
||||
environment:
|
||||
- REDIS_SENTINEL_QUORUM=2
|
||||
- REDIS_SENTINEL_DOWN_AFTER_MILLISECONDS=5000
|
||||
- REDIS_SENTINEL_FAILOVER_TIMEOUT=60000
|
||||
- REDIS_SENTINEL_PORT_NUMBER=26379
|
||||
- REDIS_MASTER_HOST=redis_replica1
|
||||
- REDIS_MASTER_PORT_NUMBER=6379
|
||||
- REDIS_MASTER_PASSWORD=123456
|
||||
networks:
|
||||
- infisical
|
||||
deploy:
|
||||
placement:
|
||||
constraints:
|
||||
- node.labels.name == node3
|
||||
|
||||
networks:
|
||||
infisical:
|
||||
|
||||
|
||||
volumes:
|
||||
postgres_data1:
|
||||
postgres_data2:
|
||||
postgres_data3:
|
||||
postgres_data4:
|
||||
redis0:
|
||||
redis1:
|
||||
redis2:
|
||||
|
||||
configs:
|
||||
haproxy-config:
|
||||
file: ./haproxy.cfg
|
||||
|
||||
secrets:
|
||||
env_file:
|
||||
file: .env
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="HA Proxy config">
|
||||
```text haproxy.cfg
|
||||
global
|
||||
maxconn 10000
|
||||
log stdout format raw local0
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode tcp
|
||||
retries 3
|
||||
timeout client 30m
|
||||
timeout connect 10s
|
||||
timeout server 30m
|
||||
timeout check 5s
|
||||
|
||||
listen stats
|
||||
mode http
|
||||
bind *:7000
|
||||
stats enable
|
||||
stats uri /
|
||||
|
||||
resolvers hostdns
|
||||
nameserver dns 127.0.0.11:53
|
||||
resolve_retries 3
|
||||
timeout resolve 1s
|
||||
timeout retry 1s
|
||||
hold valid 5s
|
||||
|
||||
frontend master
|
||||
bind *:5433
|
||||
default_backend master_backend
|
||||
|
||||
frontend replicas
|
||||
bind *:5434
|
||||
default_backend replica_backend
|
||||
|
||||
|
||||
backend master_backend
|
||||
option httpchk GET /master
|
||||
http-check expect status 200
|
||||
default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions
|
||||
server postgres-1 postgres-1:5432 check port 8008 resolvers hostdns
|
||||
server postgres-2 postgres-2:5432 check port 8008 resolvers hostdns
|
||||
server postgres-3 postgres-3:5432 check port 8008 resolvers hostdns
|
||||
|
||||
backend replica_backend
|
||||
option httpchk GET /replica
|
||||
http-check expect status 200
|
||||
default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions
|
||||
server postgres-1 postgres-1:5432 check port 8008 resolvers hostdns
|
||||
server postgres-2 postgres-2:5432 check port 8008 resolvers hostdns
|
||||
server postgres-3 postgres-3:5432 check port 8008 resolvers hostdns
|
||||
|
||||
|
||||
frontend redis_frontend
|
||||
bind *:6379
|
||||
default_backend redis_backend
|
||||
|
||||
backend redis_backend
|
||||
option tcp-check
|
||||
tcp-check send AUTH\ 123456\r\n
|
||||
tcp-check expect string +OK
|
||||
tcp-check send PING\r\n
|
||||
tcp-check expect string +PONG
|
||||
tcp-check send info\ replication\r\n
|
||||
tcp-check expect string role:master
|
||||
tcp-check send QUIT\r\n
|
||||
tcp-check expect string +OK
|
||||
server redis_master redis_replica0:6379 check inter 1s
|
||||
server redis_replica1 redis_replica1:6379 check inter 1s
|
||||
server redis_replica2 redis_replica2:6379 check inter 1s
|
||||
|
||||
frontend infisical_frontend
|
||||
bind *:8080
|
||||
default_backend infisical_backend
|
||||
|
||||
backend infisical_backend
|
||||
option httpchk GET /api/status
|
||||
http-check expect status 200
|
||||
server infisical infisical:8080 check inter 1s
|
||||
```
|
||||
</Tab>
|
||||
<Tab title=".example-env">
|
||||
```env .env
|
||||
# Keys
|
||||
# Required key for platform encryption/decryption ops
|
||||
# THIS IS A SAMPLE ENCRYPTION KEY AND SHOULD NEVER BE USED FOR PRODUCTION
|
||||
ENCRYPTION_KEY=6c1fe4e407b8911c104518103505b218
|
||||
|
||||
# JWT
|
||||
# Required secrets to sign JWT tokens
|
||||
# THIS IS A SAMPLE AUTH_SECRET KEY AND SHOULD NEVER BE USED FOR PRODUCTION
|
||||
AUTH_SECRET=5lrMXKKWCVocS/uerPsl7V+TX/aaUaI7iDkgl3tSmLE=
|
||||
|
||||
DB_CONNECTION_URI=postgres://infisical:infisical@haproxy:5433/infisical?sslmode=no-verify
|
||||
# Redis
|
||||
REDIS_URL=redis://:123456@haproxy:6379
|
||||
|
||||
|
||||
# Website URL
|
||||
# Required
|
||||
SITE_URL=http://localhost:8080
|
||||
|
||||
# Mail/SMTP
|
||||
SMTP_HOST=
|
||||
SMTP_PORT=
|
||||
SMTP_NAME=
|
||||
SMTP_USERNAME=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
# Integration
|
||||
# Optional only if integration is used
|
||||
CLIENT_ID_HEROKU=
|
||||
CLIENT_ID_VERCEL=
|
||||
CLIENT_ID_NETLIFY=
|
||||
CLIENT_ID_GITHUB=
|
||||
CLIENT_ID_GITLAB=
|
||||
CLIENT_ID_BITBUCKET=
|
||||
CLIENT_SECRET_HEROKU=
|
||||
CLIENT_SECRET_VERCEL=
|
||||
CLIENT_SECRET_NETLIFY=
|
||||
CLIENT_SECRET_GITHUB=
|
||||
CLIENT_SECRET_GITLAB=
|
||||
CLIENT_SECRET_BITBUCKET=
|
||||
CLIENT_SLUG_VERCEL=
|
||||
|
||||
# Sentry (optional) for monitoring errors
|
||||
SENTRY_DSN=
|
||||
|
||||
# Infisical Cloud-specific configs
|
||||
# Ignore - Not applicable for self-hosted version
|
||||
POSTHOG_HOST=
|
||||
POSTHOG_PROJECT_API_KEY=
|
||||
|
||||
# SSO-specific variables
|
||||
CLIENT_ID_GOOGLE_LOGIN=
|
||||
CLIENT_SECRET_GOOGLE_LOGIN=
|
||||
|
||||
CLIENT_ID_GITHUB_LOGIN=
|
||||
CLIENT_SECRET_GITHUB_LOGIN=
|
||||
|
||||
CLIENT_ID_GITLAB_LOGIN=
|
||||
CLIENT_SECRET_GITLAB_LOGIN=
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
The provided Docker stack YAML file defines the services and their configurations for deploying Infisical with high availability. The main components of this stack are as follows.
|
||||
|
||||
1. **HAProxy**: The HAProxy service is configured to expose ports for accessing PostgreSQL (5433 for the master, 5434 for replicas), Redis master (6379), and the Infisical backend (8080). It uses a config file (`haproxy.cfg`) to define the load balancing and health check rules.
|
||||
|
||||
2. **Infisical**: The Infisical backend service is deployed with the latest PostgreSQL-compatible image. It is connected to the `infisical` network and uses secrets for environment variables.
|
||||
|
||||
3. **etcd**: Three etcd instances (etcd1, etcd2, etcd3) are deployed, one on each node, to provide distributed key-value storage for leader election and configuration management.
|
||||
|
||||
4. **Spilo**: Three Spilo instances (spolo1, spolo2, spolo3) are deployed, one on each node, to run PostgreSQL with Patroni for high availability. They are connected to the `infisical` network and use persistent volumes for data storage.
|
||||
|
||||
5. **Redis**: Three Redis instances (redis_replica0, redis_replica1, redis_replica2) are deployed, one on each node, with redis_replica0 acting as the master. They are connected to the `infisical` network.
|
||||
|
||||
6. **Redis Sentinel**: Three Redis Sentinel instances (redis_sentinel1, redis_sentinel2, redis_sentinel3) are deployed, one on each node, to monitor and manage the Redis instances. They are connected to the `infisical` network.
|
||||
|
||||
## HAProxy Configuration
|
||||
|
||||
The HAProxy configuration file (`haproxy.cfg`) defines the load balancing and health check rules for the PostgreSQL and Redis instances.
|
||||
|
||||
1. **Stats**: This section enables the HAProxy statistics dashboard, accessible at port 7000.
|
||||
|
||||
2. **Resolvers**: This section defines the DNS resolver for service discovery, using the Docker embedded DNS server.
|
||||
|
||||
3. **Frontend**: There are separate frontend sections for the PostgreSQL master (port 5433), PostgreSQL replicas (port 5434), Redis (port 6379), and the Infisical backend (port 8080). Each frontend binds to the respective port and defines the default backend.
|
||||
|
||||
4. **Backend**: The backend sections define the servers and health check rules for each service.
|
||||
- For PostgreSQL, there are separate backends for the master and replicas. The health check is performed using an HTTP request to the `/master` or `/replica` endpoint, expecting a 200 status code.
|
||||
- For Redis, the backend uses a TCP health check with authentication and expects the role to be `master` for the Redis master instance.
|
||||
- For the Infisical backend, the health check is performed using an HTTP request to the `/api/status` endpoint, expecting a 200 status code.
|
||||
|
||||
## Setting Up Docker Nodes
|
||||
|
||||
1. Initialize Docker Swarm on one of the VMs by running the following command:
|
||||
|
||||
```
|
||||
docker swarm init --advertise-addr <MANAGER_NODE_IP>
|
||||
```
|
||||
|
||||
Replace `<MANAGER_NODE_IP>` with the IP address of the VM that will serve as the manager node. Remember to copy the join token returned by the this init command.
|
||||
|
||||
2. On the other VMs, join the Docker Swarm by running the command provided by the manager node:
|
||||
|
||||
```
|
||||
docker swarm join --token <JOIN_TOKEN> <MANAGER_NODE_IP>:2377
|
||||
```
|
||||
|
||||
Replace `<JOIN_TOKEN>` with the token provided by the manager node during initialization.
|
||||
|
||||
3. Label the nodes with `node.labels.name` to specify their roles. For example:
|
||||
|
||||
```
|
||||
docker node update --label-add name=node1 <NODE1_ID>
|
||||
docker node update --label-add name=node2 <NODE2_ID>
|
||||
docker node update --label-add name=node3 <NODE3_ID>
|
||||
```
|
||||
|
||||
Replace `<NODE1_ID>`, `<NODE2_ID>`, and `<NODE3_ID>` with the respective node IDs.
|
||||
To view the list of nodes and their ids, run the following on the manager node `docker node ls`.
|
||||
|
||||
## Deploying the Docker Stack
|
||||
|
||||
1. Copy the provided Docker stack YAML file and the HAProxy configuration file to the manager node.
|
||||
|
||||
2. Deploy the stack using the following command:
|
||||
|
||||
```
|
||||
docker stack deploy -c infisical-stack.yaml infisical
|
||||
```
|
||||
|
||||
This command deploys the stack with the specified configuration.
|
||||
3. Run the [schema migration](/self-hosting/configuration/schema-migrations) to initialize the database.
|
||||
To connect to the Postgres database, use the following default credentials: username: `postgres` and password: `postgres`.
|
||||
|
||||
## Scaling and Resilience
|
||||
|
||||
To further scale and make the system more resilient, you can add more nodes to the Docker Swarm and update the stack configuration accordingly:
|
||||
|
||||
1. Add new VMs and join them to the Docker Swarm as worker nodes.
|
||||
|
||||
2. Update the Docker stack YAML file to include the new nodes in the `deploy` section of the relevant services, specifying the appropriate `node.labels.name` constraints.
|
||||
|
||||
3. Update the HAProxy configuration file (`haproxy.cfg`) to include the new nodes in the backend sections for PostgreSQL and Redis.
|
||||
|
||||
4. Redeploy the updated stack using the `docker stack deploy` command.
|
||||
|
||||
Note that the database containers (PostgreSQL) are stateful and cannot be simply replicated. Instead, one database instance is deployed per node to ensure data consistency and avoid conflicts.
|
||||
|
||||
<Check>Once all services are running as expected, you may visit the IP address of the node where the HA Proxy was deployed. This should take you to the Infisical installation wizard.</Check>
|
@ -22,7 +22,7 @@ description: "Learn how to use Helm chart to install Infisical on your Kubernete
|
||||
</Step>
|
||||
<Step title="Select Infisical version">
|
||||
By default, the Infisical version set in your helm chart will likely be outdated.
|
||||
Choose the latest Infisical docker image tag from here [here](https://hub.docker.com/r/infisical/infisical/tags).
|
||||
Choose the latest Infisical docker image tag from [here](https://hub.docker.com/r/infisical/infisical/tags).
|
||||
|
||||
|
||||
```yaml values.yaml
|
||||
|
70
docs/self-hosting/reference-architectures/on-premise.mdx
Normal file
70
docs/self-hosting/reference-architectures/on-premise.mdx
Normal file
@ -0,0 +1,70 @@
|
||||
---
|
||||
title: "On-premise"
|
||||
description: "Reference architecture for self-hosting Infisical on premise"
|
||||
---
|
||||
|
||||
Deploying Infisical on-premise with high availability requires deep knowledge in areas like networking, container orchestration, and database management.
|
||||
This guide presents a reference architecture that outlines how to achieve such a deployment effectively.
|
||||
For organizations that do not have the necessary resources or expertise, we recommend opting for managed, dedicated Infisical instances or engaging professional services to mitigate the complexities.
|
||||
|
||||
## System Overview
|
||||

|
||||
|
||||
The architecture above utilizes a combination of Kubernetes for orchestrating stateless components and virtual machines (VMs) or bare metal for stateful components.
|
||||
The infrastructure spans multiple data centers for redundancy and load distribution, enhancing availability and disaster recovery capabilities.
|
||||
You may duplicate the architecture in multiple data centers and join them via Consul to increase availability. This way, if one data center is out of order, active data centers will take over workloads.
|
||||
|
||||
### Stateful vs stateless workloads
|
||||
|
||||
To reduce the challenges of managing state within Kubernetes, including storage provisioning, persistent volume management, and intricate data backup and recovery processes, we strongly recommend deploying stateful components on Virtual Machines (VMs) or bare metal.
|
||||
As depicted in the architecture, Infisical is intentionally deployed on Kubernetes to leverage its strengths in managing stateless applications.
|
||||
Being stateless, Infisical fully benefits from Kubernetes' features like horizontal scaling, self-healing, and rolling updates and rollbacks.
|
||||
|
||||
## Core Components
|
||||
|
||||
### Kubernetes Cluster
|
||||
Infisical is deployed on a Kubernetes cluster, which allows for container management, auto-scaling, and self-healing capabilities.
|
||||
A load balancer sits in front of the Kubernetes cluster, directing traffic and ensuring even load distribution across the application nodes.
|
||||
This is the entry point where all other services will interact with Infisical.
|
||||
|
||||
|
||||
### Consul as the Networking Backbone
|
||||
Consul is an critical component in the reference architecture, serving as a unified service networking layer that links and controls services across different environments and data centers.
|
||||
It functions as the common communication channel between data centers for stateless applications on Kubernetes and stateful services such as databases on dedicated VMs or bare metal.
|
||||
|
||||
|
||||
### Postgres with Patroni
|
||||
The database layer is powered by Postgres, with [Patroni](https://patroni.readthedocs.io/en/latest/) providing automated management to create a high availability setup. Patroni leverages Consul for several critical operations:
|
||||
|
||||
- **Redundancy:** By managing a cluster of one primary and multiple secondary Postgres nodes, the architecture ensures redundancy.
|
||||
The primary node handles all the write operations, and secondary nodes handle read operations and are prepared to step up in case of primary failure.
|
||||
|
||||
- **Failover and Service Discovery:** Consul is integrated with Patroni for service discovery and health checks.
|
||||
When Patroni detects that the primary node is unhealthy, it uses Consul to elect a new primary node from the secondaries, thereby ensuring that the database service remains available.
|
||||
|
||||
- **Data Center Awareness:** Patroni configured with Consul is aware of the multi-data center setup and can handle failover across data centers if necessary, which further enhances the system's availability.
|
||||
|
||||
### Redis with Redis Sentinel
|
||||
For caching and message brokering:
|
||||
|
||||
- Redis is deployed with a primary-replica setup.
|
||||
- Redis Sentinel monitors the Redis nodes, providing automatic failover and service discovery.
|
||||
- Write operations go to the primary node, and replicas serve read operations, ensuring data integrity and availability.
|
||||
|
||||
## Multi data center deployment
|
||||
Infisical can be deployed across a number of data centers to both increase performance and resiliency to disaster scenarios.
|
||||
For mission critical deployment of Infisical, we recommend deploying Infisical on at least 3 data centers to reduce downtime in the event of complete data center malfunction.
|
||||
|
||||
### Data Center A
|
||||
Data Center A houses the primary nodes of both Postgres and Redis, which handle all write operations. The secondary nodes and replicas serve as hot standbys for failover. Consul servers maintain the state of the cluster, elect a leader, and facilitate service discovery.
|
||||
|
||||
### $n^{th}$ data center
|
||||
The $n^{th}$ data center acts as a performance and disaster recovery site, featuring a mesh gateway that enables cross-data center service discovery and configuration. It houses additional secondary nodes for Postgres and Redis replicas, which are ready to be promoted in case the primary data center fails. Additionally, this data center can reduce the latency of applications that need to interact with Infisical, particularly if those applications or services are geographically closer to this data center.
|
||||
|
||||
## Considerations
|
||||
|
||||
The complexity of an on-premise deployment scales with the level of availability required. This reference architecture provides a robust framework for organizations aiming for high availability and disaster resilience. However, it's important to recognize that this is not a one-size-fits-all solution.
|
||||
|
||||
Organizations with less stringent Recovery Time Objectives (RTO) might find that [simpler deployments methods](/self-hosting/deployment-options/docker-compose) using tools such as Docker Compose are adequate. Such setups can still provide a reasonable level of service continuity without the complexities involved in managing a multi-data center environment with Kubernetes, Consul, and other high-availability components.
|
||||
|
||||
Ultimately, the choice of architecture should be guided by a thorough analysis of business needs, available resources, and expertise.
|
@ -1 +1,6 @@
|
||||
export { useCreateLDAPConfig, useGetLDAPConfig, useUpdateLDAPConfig } from "./queries";
|
||||
export {
|
||||
useCreateLDAPConfig,
|
||||
useCreateLDAPGroupMapping,
|
||||
useDeleteLDAPGroupMapping,
|
||||
useUpdateLDAPConfig} from "./mutations";
|
||||
export { useGetLDAPConfig, useGetLDAPGroupMaps } from "./queries";
|
||||
|
138
frontend/src/hooks/api/ldapConfig/mutations.tsx
Normal file
138
frontend/src/hooks/api/ldapConfig/mutations.tsx
Normal file
@ -0,0 +1,138 @@
|
||||
import { useMutation, useQueryClient } from "@tanstack/react-query";
|
||||
|
||||
import { apiRequest } from "@app/config/request";
|
||||
|
||||
import { ldapConfigKeys } from "./queries";
|
||||
|
||||
export const useCreateLDAPConfig = () => {
|
||||
const queryClient = useQueryClient();
|
||||
return useMutation({
|
||||
mutationFn: async ({
|
||||
organizationId,
|
||||
isActive,
|
||||
url,
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
caCert
|
||||
}: {
|
||||
organizationId: string;
|
||||
isActive: boolean;
|
||||
url: string;
|
||||
bindDN: string;
|
||||
bindPass: string;
|
||||
searchBase: string;
|
||||
groupSearchBase: string;
|
||||
groupSearchFilter: string;
|
||||
caCert?: string;
|
||||
}) => {
|
||||
const { data } = await apiRequest.post("/api/v1/ldap/config", {
|
||||
organizationId,
|
||||
isActive,
|
||||
url,
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
caCert
|
||||
});
|
||||
|
||||
return data;
|
||||
},
|
||||
onSuccess(_, dto) {
|
||||
queryClient.invalidateQueries(ldapConfigKeys.getLDAPConfig(dto.organizationId));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
export const useUpdateLDAPConfig = () => {
|
||||
const queryClient = useQueryClient();
|
||||
return useMutation({
|
||||
mutationFn: async ({
|
||||
organizationId,
|
||||
isActive,
|
||||
url,
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
caCert
|
||||
}: {
|
||||
organizationId: string;
|
||||
isActive?: boolean;
|
||||
url?: string;
|
||||
bindDN?: string;
|
||||
bindPass?: string;
|
||||
searchBase?: string;
|
||||
groupSearchBase?: string;
|
||||
groupSearchFilter?: string;
|
||||
caCert?: string;
|
||||
}) => {
|
||||
const { data } = await apiRequest.patch("/api/v1/ldap/config", {
|
||||
organizationId,
|
||||
isActive,
|
||||
url,
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
caCert
|
||||
});
|
||||
|
||||
return data;
|
||||
},
|
||||
onSuccess(_, dto) {
|
||||
queryClient.invalidateQueries(ldapConfigKeys.getLDAPConfig(dto.organizationId));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
export const useCreateLDAPGroupMapping = () => {
|
||||
const queryClient = useQueryClient();
|
||||
return useMutation({
|
||||
mutationFn: async ({
|
||||
ldapConfigId,
|
||||
ldapGroupCN,
|
||||
groupSlug
|
||||
}: {
|
||||
ldapConfigId: string;
|
||||
ldapGroupCN: string;
|
||||
groupSlug: string;
|
||||
}) => {
|
||||
const { data } = await apiRequest.post(`/api/v1/ldap/config/${ldapConfigId}/group-maps`, {
|
||||
ldapGroupCN,
|
||||
groupSlug
|
||||
});
|
||||
return data;
|
||||
},
|
||||
onSuccess(_, { ldapConfigId }) {
|
||||
queryClient.invalidateQueries(ldapConfigKeys.getLDAPGroupMaps(ldapConfigId));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
export const useDeleteLDAPGroupMapping = () => {
|
||||
const queryClient = useQueryClient();
|
||||
return useMutation({
|
||||
mutationFn: async ({
|
||||
ldapConfigId,
|
||||
ldapGroupMapId
|
||||
}: {
|
||||
ldapConfigId: string;
|
||||
ldapGroupMapId: string;
|
||||
}) => {
|
||||
const { data } = await apiRequest.delete(
|
||||
`/api/v1/ldap/config/${ldapConfigId}/group-maps/${ldapGroupMapId}`
|
||||
);
|
||||
return data;
|
||||
},
|
||||
onSuccess(_, { ldapConfigId }) {
|
||||
queryClient.invalidateQueries(ldapConfigKeys.getLDAPGroupMaps(ldapConfigId));
|
||||
}
|
||||
});
|
||||
};
|
@ -1,9 +1,12 @@
|
||||
import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query";
|
||||
import { useQuery } from "@tanstack/react-query";
|
||||
|
||||
import { apiRequest } from "@app/config/request";
|
||||
|
||||
const ldapConfigKeys = {
|
||||
getLDAPConfig: (orgId: string) => [{ orgId }, "organization-ldap"] as const
|
||||
import { LDAPGroupMap } from "./types";
|
||||
|
||||
export const ldapConfigKeys = {
|
||||
getLDAPConfig: (orgId: string) => [{ orgId }, "organization-ldap"] as const,
|
||||
getLDAPGroupMaps: (ldapConfigId: string) => [{ ldapConfigId }, "ldap-group-maps"] as const
|
||||
};
|
||||
|
||||
export const useGetLDAPConfig = (organizationId: string) => {
|
||||
@ -18,78 +21,18 @@ export const useGetLDAPConfig = (organizationId: string) => {
|
||||
});
|
||||
};
|
||||
|
||||
export const useCreateLDAPConfig = () => {
|
||||
const queryClient = useQueryClient();
|
||||
return useMutation({
|
||||
mutationFn: async ({
|
||||
organizationId,
|
||||
isActive,
|
||||
url,
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
caCert
|
||||
}: {
|
||||
organizationId: string;
|
||||
isActive: boolean;
|
||||
url: string;
|
||||
bindDN: string;
|
||||
bindPass: string;
|
||||
searchBase: string;
|
||||
caCert?: string;
|
||||
}) => {
|
||||
const { data } = await apiRequest.post("/api/v1/ldap/config", {
|
||||
organizationId,
|
||||
isActive,
|
||||
url,
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
caCert
|
||||
});
|
||||
export const useGetLDAPGroupMaps = (ldapConfigId: string) => {
|
||||
return useQuery({
|
||||
queryKey: ldapConfigKeys.getLDAPGroupMaps(ldapConfigId),
|
||||
queryFn: async () => {
|
||||
if (!ldapConfigId) return [];
|
||||
|
||||
const { data } = await apiRequest.get<LDAPGroupMap[]>(
|
||||
`/api/v1/ldap/config/${ldapConfigId}/group-maps`
|
||||
);
|
||||
|
||||
return data;
|
||||
},
|
||||
onSuccess(_, dto) {
|
||||
queryClient.invalidateQueries(ldapConfigKeys.getLDAPConfig(dto.organizationId));
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
export const useUpdateLDAPConfig = () => {
|
||||
const queryClient = useQueryClient();
|
||||
return useMutation({
|
||||
mutationFn: async ({
|
||||
organizationId,
|
||||
isActive,
|
||||
url,
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
caCert
|
||||
}: {
|
||||
organizationId: string;
|
||||
isActive?: boolean;
|
||||
url?: string;
|
||||
bindDN?: string;
|
||||
bindPass?: string;
|
||||
searchBase?: string;
|
||||
caCert?: string;
|
||||
}) => {
|
||||
const { data } = await apiRequest.patch("/api/v1/ldap/config", {
|
||||
organizationId,
|
||||
isActive,
|
||||
url,
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
caCert
|
||||
});
|
||||
|
||||
return data;
|
||||
},
|
||||
onSuccess(_, dto) {
|
||||
queryClient.invalidateQueries(ldapConfigKeys.getLDAPConfig(dto.organizationId));
|
||||
}
|
||||
enabled: true
|
||||
});
|
||||
};
|
||||
|
10
frontend/src/hooks/api/ldapConfig/types.ts
Normal file
10
frontend/src/hooks/api/ldapConfig/types.ts
Normal file
@ -0,0 +1,10 @@
|
||||
export type LDAPGroupMap = {
|
||||
id: string;
|
||||
ldapConfigId: string;
|
||||
ldapGroupCN: string;
|
||||
group: {
|
||||
id: string;
|
||||
name: string;
|
||||
slug: string;
|
||||
};
|
||||
};
|
@ -0,0 +1,256 @@
|
||||
import { Controller, useForm } from "react-hook-form";
|
||||
import { faUsers, faXmark } from "@fortawesome/free-solid-svg-icons";
|
||||
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
|
||||
import { zodResolver } from "@hookform/resolvers/zod";
|
||||
import { z } from "zod";
|
||||
|
||||
import { createNotification } from "@app/components/notifications";
|
||||
import {
|
||||
Button,
|
||||
DeleteActionModal,
|
||||
EmptyState,
|
||||
FormControl,
|
||||
IconButton,
|
||||
Input,
|
||||
Modal,
|
||||
ModalContent,
|
||||
Select,
|
||||
SelectItem,
|
||||
Table,
|
||||
TableContainer,
|
||||
TableSkeleton,
|
||||
TBody,
|
||||
Td,
|
||||
Th,
|
||||
THead,
|
||||
Tr
|
||||
} from "@app/components/v2";
|
||||
import { useOrganization } from "@app/context";
|
||||
import {
|
||||
useCreateLDAPGroupMapping,
|
||||
useDeleteLDAPGroupMapping,
|
||||
useGetLDAPConfig,
|
||||
useGetLDAPGroupMaps,
|
||||
useGetOrganizationGroups
|
||||
} from "@app/hooks/api";
|
||||
import { UsePopUpState } from "@app/hooks/usePopUp";
|
||||
|
||||
const schema = z.object({
|
||||
ldapGroupCN: z.string().min(1, "LDAP Group CN is required"),
|
||||
groupSlug: z.string().min(1, "Group Slug is required")
|
||||
});
|
||||
|
||||
export type TFormData = z.infer<typeof schema>;
|
||||
|
||||
type Props = {
|
||||
popUp: UsePopUpState<["ldapGroupMap", "deleteLdapGroupMap"]>;
|
||||
handlePopUpOpen: (
|
||||
popUpName: keyof UsePopUpState<["deleteLdapGroupMap"]>,
|
||||
data?: {
|
||||
ldapGroupMapId: string;
|
||||
ldapGroupCN: string;
|
||||
}
|
||||
) => void;
|
||||
handlePopUpToggle: (
|
||||
popUpName: keyof UsePopUpState<["ldapGroupMap", "deleteLdapGroupMap"]>,
|
||||
state?: boolean
|
||||
) => void;
|
||||
};
|
||||
|
||||
export const LDAPGroupMapModal = ({ popUp, handlePopUpOpen, handlePopUpToggle }: Props) => {
|
||||
const { currentOrg } = useOrganization();
|
||||
|
||||
const { data: ldapConfig } = useGetLDAPConfig(currentOrg?.id ?? "");
|
||||
const { data: groups } = useGetOrganizationGroups(currentOrg?.id ?? "");
|
||||
const { data: groupMaps, isLoading } = useGetLDAPGroupMaps(ldapConfig?.id ?? "");
|
||||
const { mutateAsync: createLDAPGroupMapping, isLoading: createIsLoading } =
|
||||
useCreateLDAPGroupMapping();
|
||||
const { mutateAsync: deleteLDAPGroupMapping } = useDeleteLDAPGroupMapping();
|
||||
|
||||
const { control, handleSubmit, reset } = useForm<TFormData>({
|
||||
resolver: zodResolver(schema),
|
||||
defaultValues: {
|
||||
ldapGroupCN: "",
|
||||
groupSlug: ""
|
||||
}
|
||||
});
|
||||
|
||||
const onFormSubmit = async ({ groupSlug, ldapGroupCN }: TFormData) => {
|
||||
try {
|
||||
if (!ldapConfig) return;
|
||||
|
||||
await createLDAPGroupMapping({
|
||||
ldapConfigId: ldapConfig.id,
|
||||
groupSlug,
|
||||
ldapGroupCN
|
||||
});
|
||||
|
||||
reset();
|
||||
|
||||
createNotification({
|
||||
text: `Successfully added LDAP group mapping for ${ldapGroupCN}`,
|
||||
type: "success"
|
||||
});
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
createNotification({
|
||||
text: `Failed to add LDAP group mapping for ${ldapGroupCN}`,
|
||||
type: "error"
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
const onDeleteGroupMapSubmit = async ({
|
||||
ldapConfigId,
|
||||
ldapGroupMapId,
|
||||
ldapGroupCN
|
||||
}: {
|
||||
ldapConfigId: string;
|
||||
ldapGroupMapId: string;
|
||||
ldapGroupCN: string;
|
||||
}) => {
|
||||
try {
|
||||
await deleteLDAPGroupMapping({
|
||||
ldapConfigId,
|
||||
ldapGroupMapId
|
||||
});
|
||||
|
||||
handlePopUpToggle("deleteLdapGroupMap", false);
|
||||
|
||||
createNotification({
|
||||
text: `Successfully deleted LDAP group mapping ${ldapGroupCN}`,
|
||||
type: "success"
|
||||
});
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
createNotification({
|
||||
text: `Failed to delete LDAP group mapping ${ldapGroupCN}`,
|
||||
type: "error"
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Modal
|
||||
isOpen={popUp?.ldapGroupMap?.isOpen}
|
||||
onOpenChange={(isOpen) => {
|
||||
handlePopUpToggle("ldapGroupMap", isOpen);
|
||||
reset();
|
||||
}}
|
||||
>
|
||||
<ModalContent title="Manage LDAP Group Mappings">
|
||||
<h2 className="mb-4">New Group Mapping</h2>
|
||||
<form onSubmit={handleSubmit(onFormSubmit)} className="mb-8">
|
||||
<div className="flex">
|
||||
<Controller
|
||||
control={control}
|
||||
name="ldapGroupCN"
|
||||
render={({ field, fieldState: { error } }) => (
|
||||
<FormControl
|
||||
label="LDAP Group CN"
|
||||
errorText={error?.message}
|
||||
isError={Boolean(error)}
|
||||
>
|
||||
<Input {...field} placeholder="Engineering" />
|
||||
</FormControl>
|
||||
)}
|
||||
/>
|
||||
<Controller
|
||||
control={control}
|
||||
name="groupSlug"
|
||||
defaultValue=""
|
||||
render={({ field: { onChange, ...field }, fieldState: { error } }) => (
|
||||
<FormControl
|
||||
label="Infisical Group"
|
||||
errorText={error?.message}
|
||||
isError={Boolean(error)}
|
||||
className="ml-4 w-full"
|
||||
>
|
||||
<div className="flex">
|
||||
<Select
|
||||
defaultValue={field.value}
|
||||
{...field}
|
||||
onValueChange={(e) => onChange(e)}
|
||||
className="w-full"
|
||||
>
|
||||
{(groups || []).map(({ name, id, slug }) => (
|
||||
<SelectItem value={slug} key={`internal-group-${id}`}>
|
||||
{name}
|
||||
</SelectItem>
|
||||
))}
|
||||
</Select>
|
||||
<Button className="ml-4" size="sm" type="submit" isLoading={createIsLoading}>
|
||||
Add mapping
|
||||
</Button>
|
||||
</div>
|
||||
</FormControl>
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
</form>
|
||||
<h2 className="mb-4">Group Mappings</h2>
|
||||
<TableContainer>
|
||||
<Table>
|
||||
<THead>
|
||||
<Tr>
|
||||
<Th>LDAP Group CN</Th>
|
||||
<Th>Infisical Group</Th>
|
||||
<Th className="w-5" />
|
||||
</Tr>
|
||||
</THead>
|
||||
<TBody>
|
||||
{isLoading && <TableSkeleton columns={3} innerKey="ldap-group-maps" />}
|
||||
{!isLoading &&
|
||||
groupMaps?.map(({ id, ldapGroupCN, group }) => {
|
||||
return (
|
||||
<Tr className="h-10 items-center" key={`ldap-group-map-${id}`}>
|
||||
<Td>{ldapGroupCN}</Td>
|
||||
<Td>{group.name}</Td>
|
||||
<Td>
|
||||
<IconButton
|
||||
onClick={() => {
|
||||
handlePopUpOpen("deleteLdapGroupMap", {
|
||||
ldapGroupMapId: id,
|
||||
ldapGroupCN
|
||||
});
|
||||
}}
|
||||
size="lg"
|
||||
colorSchema="danger"
|
||||
variant="plain"
|
||||
ariaLabel="update"
|
||||
>
|
||||
<FontAwesomeIcon icon={faXmark} />
|
||||
</IconButton>
|
||||
</Td>
|
||||
</Tr>
|
||||
);
|
||||
})}
|
||||
</TBody>
|
||||
</Table>
|
||||
{groupMaps?.length === 0 && (
|
||||
<EmptyState title="No LDAP group mappings found" icon={faUsers} />
|
||||
)}
|
||||
</TableContainer>
|
||||
<DeleteActionModal
|
||||
isOpen={popUp.deleteLdapGroupMap.isOpen}
|
||||
title={`Are you sure want to delete the group mapping for ${
|
||||
(popUp?.deleteLdapGroupMap?.data as { ldapGroupCN: string })?.ldapGroupCN || ""
|
||||
}?`}
|
||||
onChange={(isOpen) => handlePopUpToggle("deleteLdapGroupMap", isOpen)}
|
||||
deleteKey="confirm"
|
||||
onDeleteApproved={() => {
|
||||
const deleteLdapGroupMapData = popUp?.deleteLdapGroupMap?.data as {
|
||||
ldapGroupMapId: string;
|
||||
ldapGroupCN: string;
|
||||
};
|
||||
return onDeleteGroupMapSubmit({
|
||||
ldapConfigId: ldapConfig?.id ?? "",
|
||||
ldapGroupMapId: deleteLdapGroupMapData.ldapGroupMapId,
|
||||
ldapGroupCN: deleteLdapGroupMapData.ldapGroupCN
|
||||
});
|
||||
}}
|
||||
/>
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
);
|
||||
};
|
@ -14,6 +14,8 @@ const LDAPFormSchema = z.object({
|
||||
bindDN: z.string().default(""),
|
||||
bindPass: z.string().default(""),
|
||||
searchBase: z.string().default(""),
|
||||
groupSearchBase: z.string().default(""),
|
||||
groupSearchFilter: z.string().default(""),
|
||||
caCert: z.string().optional()
|
||||
});
|
||||
|
||||
@ -27,7 +29,7 @@ type Props = {
|
||||
|
||||
export const LDAPModal = ({ popUp, handlePopUpClose, handlePopUpToggle }: Props) => {
|
||||
const { currentOrg } = useOrganization();
|
||||
|
||||
|
||||
const { mutateAsync: createMutateAsync, isLoading: createIsLoading } = useCreateLDAPConfig();
|
||||
const { mutateAsync: updateMutateAsync, isLoading: updateIsLoading } = useUpdateLDAPConfig();
|
||||
const { data } = useGetLDAPConfig(currentOrg?.id ?? "");
|
||||
@ -43,12 +45,22 @@ export const LDAPModal = ({ popUp, handlePopUpClose, handlePopUpToggle }: Props)
|
||||
bindDN: data?.bindDN ?? "",
|
||||
bindPass: data?.bindPass ?? "",
|
||||
searchBase: data?.searchBase ?? "",
|
||||
groupSearchBase: data?.groupSearchBase ?? "",
|
||||
groupSearchFilter: data?.groupSearchFilter ?? "",
|
||||
caCert: data?.caCert ?? ""
|
||||
});
|
||||
}
|
||||
}, [data]);
|
||||
|
||||
const onSSOModalSubmit = async ({ url, bindDN, bindPass, searchBase, caCert }: TLDAPFormData) => {
|
||||
const onSSOModalSubmit = async ({
|
||||
url,
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
caCert
|
||||
}: TLDAPFormData) => {
|
||||
try {
|
||||
if (!currentOrg) return;
|
||||
|
||||
@ -60,6 +72,8 @@ export const LDAPModal = ({ popUp, handlePopUpClose, handlePopUpToggle }: Props)
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
caCert
|
||||
});
|
||||
} else {
|
||||
@ -70,6 +84,8 @@ export const LDAPModal = ({ popUp, handlePopUpClose, handlePopUpToggle }: Props)
|
||||
bindDN,
|
||||
bindPass,
|
||||
searchBase,
|
||||
groupSearchBase,
|
||||
groupSearchFilter,
|
||||
caCert
|
||||
});
|
||||
}
|
||||
@ -139,6 +155,32 @@ export const LDAPModal = ({ popUp, handlePopUpClose, handlePopUpToggle }: Props)
|
||||
</FormControl>
|
||||
)}
|
||||
/>
|
||||
<Controller
|
||||
control={control}
|
||||
name="groupSearchBase"
|
||||
render={({ field, fieldState: { error } }) => (
|
||||
<FormControl
|
||||
label="Group Search Base / Group DN (Optional)"
|
||||
errorText={error?.message}
|
||||
isError={Boolean(error)}
|
||||
>
|
||||
<Input {...field} placeholder="ou=groups,dc=acme,dc=com" />
|
||||
</FormControl>
|
||||
)}
|
||||
/>
|
||||
<Controller
|
||||
control={control}
|
||||
name="groupSearchFilter"
|
||||
render={({ field, fieldState: { error } }) => (
|
||||
<FormControl
|
||||
label="Group Filter (Optional)"
|
||||
errorText={error?.message}
|
||||
isError={Boolean(error)}
|
||||
>
|
||||
<Input {...field} placeholder="(objectClass=posixGroup)" />
|
||||
</FormControl>
|
||||
)}
|
||||
/>
|
||||
<Controller
|
||||
control={control}
|
||||
name="caCert"
|
||||
|
@ -10,16 +10,20 @@ import {
|
||||
import { useCreateLDAPConfig, useGetLDAPConfig, useUpdateLDAPConfig } from "@app/hooks/api";
|
||||
import { usePopUp } from "@app/hooks/usePopUp";
|
||||
|
||||
import { LDAPGroupMapModal } from "./LDAPGroupMapModal";
|
||||
import { LDAPModal } from "./LDAPModal";
|
||||
|
||||
export const OrgLDAPSection = (): JSX.Element => {
|
||||
const { currentOrg } = useOrganization();
|
||||
const { subscription } = useSubscription();
|
||||
|
||||
|
||||
const { data } = useGetLDAPConfig(currentOrg?.id ?? "");
|
||||
|
||||
const { mutateAsync } = useUpdateLDAPConfig();
|
||||
const { popUp, handlePopUpOpen, handlePopUpClose, handlePopUpToggle } = usePopUp([
|
||||
"addLDAP",
|
||||
"ldapGroupMap",
|
||||
"deleteLdapGroupMap",
|
||||
"upgradePlan"
|
||||
] as const);
|
||||
|
||||
@ -63,7 +67,9 @@ export const OrgLDAPSection = (): JSX.Element => {
|
||||
url: "",
|
||||
bindDN: "",
|
||||
bindPass: "",
|
||||
searchBase: ""
|
||||
searchBase: "",
|
||||
groupSearchBase: "",
|
||||
groupSearchFilter: ""
|
||||
});
|
||||
}
|
||||
|
||||
@ -76,6 +82,15 @@ export const OrgLDAPSection = (): JSX.Element => {
|
||||
}
|
||||
};
|
||||
|
||||
const openLDAPGroupMapModal = () => {
|
||||
if (!subscription?.ldap) {
|
||||
handlePopUpOpen("upgradePlan");
|
||||
return;
|
||||
}
|
||||
|
||||
handlePopUpOpen("ldapGroupMap");
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<hr className="border-mineshaft-600" />
|
||||
@ -92,6 +107,25 @@ export const OrgLDAPSection = (): JSX.Element => {
|
||||
</div>
|
||||
<p className="text-sm text-mineshaft-300">Manage LDAP authentication configuration</p>
|
||||
</div>
|
||||
<div className="py-4">
|
||||
<div className="mb-2 flex items-center justify-between">
|
||||
<h2 className="text-md text-mineshaft-100">LDAP Group Mappings</h2>
|
||||
<OrgPermissionCan I={OrgPermissionActions.Create} a={OrgPermissionSubjects.Ldap}>
|
||||
{(isAllowed) => (
|
||||
<Button
|
||||
onClick={openLDAPGroupMapModal}
|
||||
colorSchema="secondary"
|
||||
isDisabled={!isAllowed}
|
||||
>
|
||||
Manage
|
||||
</Button>
|
||||
)}
|
||||
</OrgPermissionCan>
|
||||
</div>
|
||||
<p className="text-sm text-mineshaft-300">
|
||||
Manage how LDAP groups are mapped to internal groups in Infisical
|
||||
</p>
|
||||
</div>
|
||||
{data && (
|
||||
<div className="py-4">
|
||||
<div className="mb-2 flex items-center justify-between">
|
||||
@ -119,6 +153,11 @@ export const OrgLDAPSection = (): JSX.Element => {
|
||||
handlePopUpClose={handlePopUpClose}
|
||||
handlePopUpToggle={handlePopUpToggle}
|
||||
/>
|
||||
<LDAPGroupMapModal
|
||||
popUp={popUp}
|
||||
handlePopUpOpen={handlePopUpOpen}
|
||||
handlePopUpToggle={handlePopUpToggle}
|
||||
/>
|
||||
<UpgradePlanModal
|
||||
isOpen={popUp.upgradePlan.isOpen}
|
||||
onOpenChange={(isOpen) => handlePopUpToggle("upgradePlan", isOpen)}
|
||||
|
Reference in New Issue
Block a user