mirror of
https://github.com/Infisical/infisical.git
synced 2025-07-25 14:07:47 +00:00
Compare commits
26 Commits
improve-au
...
daniel/dep
Author | SHA1 | Date | |
---|---|---|---|
|
7622cac07e | ||
|
a101602e0a | ||
|
ca63a7baa7 | ||
|
ff4f15c437 | ||
|
d6c2715852 | ||
|
fc386c0cbc | ||
|
263a88379f | ||
|
95a4661787 | ||
|
7e9c846ba3 | ||
|
c141b916d3 | ||
|
1ae375188b | ||
|
22b954b657 | ||
|
9efeb8926f | ||
|
389bbfcade | ||
|
0b8427a004 | ||
|
8a470772e3 | ||
|
853f3c40bc | ||
|
fed44f328d | ||
|
a1d00f2c41 | ||
|
95a68f2c2d | ||
|
db7c0c45f6 | ||
|
82bca03162 | ||
|
560cd81a1c | ||
|
6eae98c1d4 | ||
|
c2ddb7e2fe | ||
|
356afd18c4 |
@@ -0,0 +1,25 @@
|
|||||||
|
import { Knex } from "knex";
|
||||||
|
|
||||||
|
import { TableName } from "../schemas";
|
||||||
|
|
||||||
|
export async function up(knex: Knex): Promise<void> {
|
||||||
|
if (await knex.schema.hasTable(TableName.OrgMembership)) {
|
||||||
|
const doesUserIdExist = await knex.schema.hasColumn(TableName.OrgMembership, "userId");
|
||||||
|
const doesOrgIdExist = await knex.schema.hasColumn(TableName.OrgMembership, "orgId");
|
||||||
|
await knex.schema.alterTable(TableName.OrgMembership, (t) => {
|
||||||
|
t.boolean("isActive").notNullable().defaultTo(true);
|
||||||
|
if (doesUserIdExist && doesOrgIdExist) t.index(["userId", "orgId"]);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function down(knex: Knex): Promise<void> {
|
||||||
|
if (await knex.schema.hasTable(TableName.OrgMembership)) {
|
||||||
|
const doesUserIdExist = await knex.schema.hasColumn(TableName.OrgMembership, "userId");
|
||||||
|
const doesOrgIdExist = await knex.schema.hasColumn(TableName.OrgMembership, "orgId");
|
||||||
|
await knex.schema.alterTable(TableName.OrgMembership, (t) => {
|
||||||
|
t.dropColumn("isActive");
|
||||||
|
if (doesUserIdExist && doesOrgIdExist) t.dropIndex(["userId", "orgId"]);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
@@ -17,7 +17,8 @@ export const OrgMembershipsSchema = z.object({
|
|||||||
userId: z.string().uuid().nullable().optional(),
|
userId: z.string().uuid().nullable().optional(),
|
||||||
orgId: z.string().uuid(),
|
orgId: z.string().uuid(),
|
||||||
roleId: z.string().uuid().nullable().optional(),
|
roleId: z.string().uuid().nullable().optional(),
|
||||||
projectFavorites: z.string().array().nullable().optional()
|
projectFavorites: z.string().array().nullable().optional(),
|
||||||
|
isActive: z.boolean()
|
||||||
});
|
});
|
||||||
|
|
||||||
export type TOrgMemberships = z.infer<typeof OrgMembershipsSchema>;
|
export type TOrgMemberships = z.infer<typeof OrgMembershipsSchema>;
|
||||||
|
@@ -29,7 +29,8 @@ export async function seed(knex: Knex): Promise<void> {
|
|||||||
role: OrgMembershipRole.Admin,
|
role: OrgMembershipRole.Admin,
|
||||||
orgId: org.id,
|
orgId: org.id,
|
||||||
status: OrgMembershipStatus.Accepted,
|
status: OrgMembershipStatus.Accepted,
|
||||||
userId: user.id
|
userId: user.id,
|
||||||
|
isActive: true
|
||||||
}
|
}
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
@@ -39,7 +39,7 @@ export const registerExternalKmsRouter = async (server: FastifyZodProvider) => {
|
|||||||
},
|
},
|
||||||
schema: {
|
schema: {
|
||||||
body: z.object({
|
body: z.object({
|
||||||
slug: z.string().min(1).trim().optional(),
|
slug: z.string().min(1).trim().toLowerCase().optional(),
|
||||||
description: z.string().min(1).trim().optional(),
|
description: z.string().min(1).trim().optional(),
|
||||||
provider: ExternalKmsInputSchema
|
provider: ExternalKmsInputSchema
|
||||||
}),
|
}),
|
||||||
@@ -75,7 +75,7 @@ export const registerExternalKmsRouter = async (server: FastifyZodProvider) => {
|
|||||||
id: z.string().trim().min(1)
|
id: z.string().trim().min(1)
|
||||||
}),
|
}),
|
||||||
body: z.object({
|
body: z.object({
|
||||||
slug: z.string().min(1).trim().optional(),
|
slug: z.string().min(1).trim().toLowerCase().optional(),
|
||||||
description: z.string().min(1).trim().optional(),
|
description: z.string().min(1).trim().optional(),
|
||||||
provider: ExternalKmsInputUpdateSchema
|
provider: ExternalKmsInputUpdateSchema
|
||||||
}),
|
}),
|
||||||
|
@@ -186,7 +186,13 @@ export const registerScimRouter = async (server: FastifyZodProvider) => {
|
|||||||
})
|
})
|
||||||
),
|
),
|
||||||
displayName: z.string().trim(),
|
displayName: z.string().trim(),
|
||||||
active: z.boolean()
|
active: z.boolean(),
|
||||||
|
groups: z.array(
|
||||||
|
z.object({
|
||||||
|
value: z.string().trim(),
|
||||||
|
display: z.string().trim()
|
||||||
|
})
|
||||||
|
)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -344,7 +350,12 @@ export const registerScimRouter = async (server: FastifyZodProvider) => {
|
|||||||
schemas: z.array(z.string()),
|
schemas: z.array(z.string()),
|
||||||
id: z.string().trim(),
|
id: z.string().trim(),
|
||||||
displayName: z.string().trim(),
|
displayName: z.string().trim(),
|
||||||
members: z.array(z.any()).length(0),
|
members: z.array(
|
||||||
|
z.object({
|
||||||
|
value: z.string(),
|
||||||
|
display: z.string()
|
||||||
|
})
|
||||||
|
),
|
||||||
meta: z.object({
|
meta: z.object({
|
||||||
resourceType: z.string().trim()
|
resourceType: z.string().trim()
|
||||||
})
|
})
|
||||||
@@ -417,7 +428,7 @@ export const registerScimRouter = async (server: FastifyZodProvider) => {
|
|||||||
displayName: z.string().trim(),
|
displayName: z.string().trim(),
|
||||||
members: z.array(
|
members: z.array(
|
||||||
z.object({
|
z.object({
|
||||||
value: z.string(), // infisical orgMembershipId
|
value: z.string(),
|
||||||
display: z.string()
|
display: z.string()
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
@@ -572,7 +583,13 @@ export const registerScimRouter = async (server: FastifyZodProvider) => {
|
|||||||
})
|
})
|
||||||
),
|
),
|
||||||
displayName: z.string().trim(),
|
displayName: z.string().trim(),
|
||||||
active: z.boolean()
|
active: z.boolean(),
|
||||||
|
groups: z.array(
|
||||||
|
z.object({
|
||||||
|
value: z.string().trim(),
|
||||||
|
display: z.string().trim()
|
||||||
|
})
|
||||||
|
)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@@ -52,7 +52,7 @@ export const externalKmsServiceFactory = ({
|
|||||||
actorOrgId
|
actorOrgId
|
||||||
);
|
);
|
||||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Edit, OrgPermissionSubjects.Settings);
|
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Edit, OrgPermissionSubjects.Settings);
|
||||||
const kmsSlug = slug ? slugify(slug) : slugify(alphaNumericNanoId(32));
|
const kmsSlug = slug ? slugify(slug) : slugify(alphaNumericNanoId(8).toLowerCase());
|
||||||
|
|
||||||
let sanitizedProviderInput = "";
|
let sanitizedProviderInput = "";
|
||||||
switch (provider.type) {
|
switch (provider.type) {
|
||||||
|
@@ -162,11 +162,60 @@ export const userGroupMembershipDALFactory = (db: TDbClient) => {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const findGroupMembershipsByUserIdInOrg = async (userId: string, orgId: string) => {
|
||||||
|
try {
|
||||||
|
const docs = await db
|
||||||
|
.replicaNode()(TableName.UserGroupMembership)
|
||||||
|
.join(TableName.Groups, `${TableName.UserGroupMembership}.groupId`, `${TableName.Groups}.id`)
|
||||||
|
.join(TableName.OrgMembership, `${TableName.UserGroupMembership}.userId`, `${TableName.OrgMembership}.userId`)
|
||||||
|
.join(TableName.Users, `${TableName.UserGroupMembership}.userId`, `${TableName.Users}.id`)
|
||||||
|
.where(`${TableName.UserGroupMembership}.userId`, userId)
|
||||||
|
.where(`${TableName.Groups}.orgId`, orgId)
|
||||||
|
.select(
|
||||||
|
db.ref("id").withSchema(TableName.UserGroupMembership),
|
||||||
|
db.ref("groupId").withSchema(TableName.UserGroupMembership),
|
||||||
|
db.ref("name").withSchema(TableName.Groups).as("groupName"),
|
||||||
|
db.ref("id").withSchema(TableName.OrgMembership).as("orgMembershipId"),
|
||||||
|
db.ref("firstName").withSchema(TableName.Users).as("firstName"),
|
||||||
|
db.ref("lastName").withSchema(TableName.Users).as("lastName")
|
||||||
|
);
|
||||||
|
|
||||||
|
return docs;
|
||||||
|
} catch (error) {
|
||||||
|
throw new DatabaseError({ error, name: "Find group memberships by user id in org" });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const findGroupMembershipsByGroupIdInOrg = async (groupId: string, orgId: string) => {
|
||||||
|
try {
|
||||||
|
const docs = await db
|
||||||
|
.replicaNode()(TableName.UserGroupMembership)
|
||||||
|
.join(TableName.Groups, `${TableName.UserGroupMembership}.groupId`, `${TableName.Groups}.id`)
|
||||||
|
.join(TableName.OrgMembership, `${TableName.UserGroupMembership}.userId`, `${TableName.OrgMembership}.userId`)
|
||||||
|
.join(TableName.Users, `${TableName.UserGroupMembership}.userId`, `${TableName.Users}.id`)
|
||||||
|
.where(`${TableName.Groups}.id`, groupId)
|
||||||
|
.where(`${TableName.Groups}.orgId`, orgId)
|
||||||
|
.select(
|
||||||
|
db.ref("id").withSchema(TableName.UserGroupMembership),
|
||||||
|
db.ref("groupId").withSchema(TableName.UserGroupMembership),
|
||||||
|
db.ref("name").withSchema(TableName.Groups).as("groupName"),
|
||||||
|
db.ref("id").withSchema(TableName.OrgMembership).as("orgMembershipId"),
|
||||||
|
db.ref("firstName").withSchema(TableName.Users).as("firstName"),
|
||||||
|
db.ref("lastName").withSchema(TableName.Users).as("lastName")
|
||||||
|
);
|
||||||
|
return docs;
|
||||||
|
} catch (error) {
|
||||||
|
throw new DatabaseError({ error, name: "Find group memberships by group id in org" });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
return {
|
return {
|
||||||
...userGroupMembershipOrm,
|
...userGroupMembershipOrm,
|
||||||
filterProjectsByUserMembership,
|
filterProjectsByUserMembership,
|
||||||
findUserGroupMembershipsInProject,
|
findUserGroupMembershipsInProject,
|
||||||
findGroupMembersNotInProject,
|
findGroupMembersNotInProject,
|
||||||
deletePendingUserGroupMembershipsByUserIds
|
deletePendingUserGroupMembershipsByUserIds,
|
||||||
|
findGroupMembershipsByUserIdInOrg,
|
||||||
|
findGroupMembershipsByGroupIdInOrg
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@@ -449,7 +449,8 @@ export const ldapConfigServiceFactory = ({
|
|||||||
userId: userAlias.userId,
|
userId: userAlias.userId,
|
||||||
orgId,
|
orgId,
|
||||||
role: OrgMembershipRole.Member,
|
role: OrgMembershipRole.Member,
|
||||||
status: OrgMembershipStatus.Accepted
|
status: OrgMembershipStatus.Accepted,
|
||||||
|
isActive: true
|
||||||
},
|
},
|
||||||
tx
|
tx
|
||||||
);
|
);
|
||||||
@@ -534,7 +535,8 @@ export const ldapConfigServiceFactory = ({
|
|||||||
inviteEmail: email,
|
inviteEmail: email,
|
||||||
orgId,
|
orgId,
|
||||||
role: OrgMembershipRole.Member,
|
role: OrgMembershipRole.Member,
|
||||||
status: newUser.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
status: newUser.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited, // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
||||||
|
isActive: true
|
||||||
},
|
},
|
||||||
tx
|
tx
|
||||||
);
|
);
|
||||||
|
@@ -193,7 +193,8 @@ export const oidcConfigServiceFactory = ({
|
|||||||
inviteEmail: email,
|
inviteEmail: email,
|
||||||
orgId,
|
orgId,
|
||||||
role: OrgMembershipRole.Member,
|
role: OrgMembershipRole.Member,
|
||||||
status: foundUser.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
status: foundUser.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited, // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
||||||
|
isActive: true
|
||||||
},
|
},
|
||||||
tx
|
tx
|
||||||
);
|
);
|
||||||
@@ -266,7 +267,8 @@ export const oidcConfigServiceFactory = ({
|
|||||||
inviteEmail: email,
|
inviteEmail: email,
|
||||||
orgId,
|
orgId,
|
||||||
role: OrgMembershipRole.Member,
|
role: OrgMembershipRole.Member,
|
||||||
status: newUser.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
status: newUser.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited, // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
||||||
|
isActive: true
|
||||||
},
|
},
|
||||||
tx
|
tx
|
||||||
);
|
);
|
||||||
|
@@ -370,7 +370,8 @@ export const samlConfigServiceFactory = ({
|
|||||||
inviteEmail: email,
|
inviteEmail: email,
|
||||||
orgId,
|
orgId,
|
||||||
role: OrgMembershipRole.Member,
|
role: OrgMembershipRole.Member,
|
||||||
status: foundUser.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
status: foundUser.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited, // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
||||||
|
isActive: true
|
||||||
},
|
},
|
||||||
tx
|
tx
|
||||||
);
|
);
|
||||||
@@ -457,7 +458,8 @@ export const samlConfigServiceFactory = ({
|
|||||||
inviteEmail: email,
|
inviteEmail: email,
|
||||||
orgId,
|
orgId,
|
||||||
role: OrgMembershipRole.Member,
|
role: OrgMembershipRole.Member,
|
||||||
status: newUser.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
status: newUser.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited, // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
||||||
|
isActive: true
|
||||||
},
|
},
|
||||||
tx
|
tx
|
||||||
);
|
);
|
||||||
|
@@ -32,12 +32,19 @@ export const parseScimFilter = (filterToParse: string | undefined) => {
|
|||||||
return { [attributeName]: parsedValue.replace(/"/g, "") };
|
return { [attributeName]: parsedValue.replace(/"/g, "") };
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export function extractScimValueFromPath(path: string): string | null {
|
||||||
|
const regex = /members\[value eq "([^"]+)"\]/;
|
||||||
|
const match = path.match(regex);
|
||||||
|
return match ? match[1] : null;
|
||||||
|
}
|
||||||
|
|
||||||
export const buildScimUser = ({
|
export const buildScimUser = ({
|
||||||
orgMembershipId,
|
orgMembershipId,
|
||||||
username,
|
username,
|
||||||
email,
|
email,
|
||||||
firstName,
|
firstName,
|
||||||
lastName,
|
lastName,
|
||||||
|
groups = [],
|
||||||
active
|
active
|
||||||
}: {
|
}: {
|
||||||
orgMembershipId: string;
|
orgMembershipId: string;
|
||||||
@@ -45,6 +52,10 @@ export const buildScimUser = ({
|
|||||||
email?: string | null;
|
email?: string | null;
|
||||||
firstName: string;
|
firstName: string;
|
||||||
lastName: string;
|
lastName: string;
|
||||||
|
groups?: {
|
||||||
|
value: string;
|
||||||
|
display: string;
|
||||||
|
}[];
|
||||||
active: boolean;
|
active: boolean;
|
||||||
}): TScimUser => {
|
}): TScimUser => {
|
||||||
const scimUser = {
|
const scimUser = {
|
||||||
@@ -67,7 +78,7 @@ export const buildScimUser = ({
|
|||||||
]
|
]
|
||||||
: [],
|
: [],
|
||||||
active,
|
active,
|
||||||
groups: [],
|
groups,
|
||||||
meta: {
|
meta: {
|
||||||
resourceType: "User",
|
resourceType: "User",
|
||||||
location: null
|
location: null
|
||||||
|
@@ -9,6 +9,7 @@ import { TUserGroupMembershipDALFactory } from "@app/ee/services/group/user-grou
|
|||||||
import { TScimDALFactory } from "@app/ee/services/scim/scim-dal";
|
import { TScimDALFactory } from "@app/ee/services/scim/scim-dal";
|
||||||
import { getConfig } from "@app/lib/config/env";
|
import { getConfig } from "@app/lib/config/env";
|
||||||
import { BadRequestError, ScimRequestError, UnauthorizedError } from "@app/lib/errors";
|
import { BadRequestError, ScimRequestError, UnauthorizedError } from "@app/lib/errors";
|
||||||
|
import { logger } from "@app/lib/logger";
|
||||||
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
import { alphaNumericNanoId } from "@app/lib/nanoid";
|
||||||
import { TOrgPermission } from "@app/lib/types";
|
import { TOrgPermission } from "@app/lib/types";
|
||||||
import { AuthTokenType } from "@app/services/auth/auth-type";
|
import { AuthTokenType } from "@app/services/auth/auth-type";
|
||||||
@@ -30,7 +31,14 @@ import { UserAliasType } from "@app/services/user-alias/user-alias-types";
|
|||||||
import { TLicenseServiceFactory } from "../license/license-service";
|
import { TLicenseServiceFactory } from "../license/license-service";
|
||||||
import { OrgPermissionActions, OrgPermissionSubjects } from "../permission/org-permission";
|
import { OrgPermissionActions, OrgPermissionSubjects } from "../permission/org-permission";
|
||||||
import { TPermissionServiceFactory } from "../permission/permission-service";
|
import { TPermissionServiceFactory } from "../permission/permission-service";
|
||||||
import { buildScimGroup, buildScimGroupList, buildScimUser, buildScimUserList, parseScimFilter } from "./scim-fns";
|
import {
|
||||||
|
buildScimGroup,
|
||||||
|
buildScimGroupList,
|
||||||
|
buildScimUser,
|
||||||
|
buildScimUserList,
|
||||||
|
extractScimValueFromPath,
|
||||||
|
parseScimFilter
|
||||||
|
} from "./scim-fns";
|
||||||
import {
|
import {
|
||||||
TCreateScimGroupDTO,
|
TCreateScimGroupDTO,
|
||||||
TCreateScimTokenDTO,
|
TCreateScimTokenDTO,
|
||||||
@@ -44,6 +52,7 @@ import {
|
|||||||
TListScimUsers,
|
TListScimUsers,
|
||||||
TListScimUsersDTO,
|
TListScimUsersDTO,
|
||||||
TReplaceScimUserDTO,
|
TReplaceScimUserDTO,
|
||||||
|
TScimGroup,
|
||||||
TScimTokenJwtPayload,
|
TScimTokenJwtPayload,
|
||||||
TUpdateScimGroupNamePatchDTO,
|
TUpdateScimGroupNamePatchDTO,
|
||||||
TUpdateScimGroupNamePutDTO,
|
TUpdateScimGroupNamePutDTO,
|
||||||
@@ -61,7 +70,7 @@ type TScimServiceFactoryDep = {
|
|||||||
TOrgDALFactory,
|
TOrgDALFactory,
|
||||||
"createMembership" | "findById" | "findMembership" | "deleteMembershipById" | "transaction" | "updateMembershipById"
|
"createMembership" | "findById" | "findMembership" | "deleteMembershipById" | "transaction" | "updateMembershipById"
|
||||||
>;
|
>;
|
||||||
orgMembershipDAL: Pick<TOrgMembershipDALFactory, "find" | "findOne" | "create" | "updateById">;
|
orgMembershipDAL: Pick<TOrgMembershipDALFactory, "find" | "findOne" | "create" | "updateById" | "findById">;
|
||||||
projectDAL: Pick<TProjectDALFactory, "find" | "findProjectGhostUser">;
|
projectDAL: Pick<TProjectDALFactory, "find" | "findProjectGhostUser">;
|
||||||
projectMembershipDAL: Pick<TProjectMembershipDALFactory, "find" | "delete" | "findProjectMembershipsByUserId">;
|
projectMembershipDAL: Pick<TProjectMembershipDALFactory, "find" | "delete" | "findProjectMembershipsByUserId">;
|
||||||
groupDAL: Pick<
|
groupDAL: Pick<
|
||||||
@@ -71,7 +80,13 @@ type TScimServiceFactoryDep = {
|
|||||||
groupProjectDAL: Pick<TGroupProjectDALFactory, "find">;
|
groupProjectDAL: Pick<TGroupProjectDALFactory, "find">;
|
||||||
userGroupMembershipDAL: Pick<
|
userGroupMembershipDAL: Pick<
|
||||||
TUserGroupMembershipDALFactory,
|
TUserGroupMembershipDALFactory,
|
||||||
"find" | "transaction" | "insertMany" | "filterProjectsByUserMembership" | "delete"
|
| "find"
|
||||||
|
| "transaction"
|
||||||
|
| "insertMany"
|
||||||
|
| "filterProjectsByUserMembership"
|
||||||
|
| "delete"
|
||||||
|
| "findGroupMembershipsByUserIdInOrg"
|
||||||
|
| "findGroupMembershipsByGroupIdInOrg"
|
||||||
>;
|
>;
|
||||||
projectKeyDAL: Pick<TProjectKeyDALFactory, "find" | "findLatestProjectKey" | "insertMany" | "delete">;
|
projectKeyDAL: Pick<TProjectKeyDALFactory, "find" | "findLatestProjectKey" | "insertMany" | "delete">;
|
||||||
projectBotDAL: Pick<TProjectBotDALFactory, "findOne">;
|
projectBotDAL: Pick<TProjectBotDALFactory, "findOne">;
|
||||||
@@ -197,14 +212,14 @@ export const scimServiceFactory = ({
|
|||||||
findOpts
|
findOpts
|
||||||
);
|
);
|
||||||
|
|
||||||
const scimUsers = users.map(({ id, externalId, username, firstName, lastName, email }) =>
|
const scimUsers = users.map(({ id, externalId, username, firstName, lastName, email, isActive }) =>
|
||||||
buildScimUser({
|
buildScimUser({
|
||||||
orgMembershipId: id ?? "",
|
orgMembershipId: id ?? "",
|
||||||
username: externalId ?? username,
|
username: externalId ?? username,
|
||||||
firstName: firstName ?? "",
|
firstName: firstName ?? "",
|
||||||
lastName: lastName ?? "",
|
lastName: lastName ?? "",
|
||||||
email,
|
email,
|
||||||
active: true
|
active: isActive
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -240,13 +255,22 @@ export const scimServiceFactory = ({
|
|||||||
status: 403
|
status: 403
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const groupMembershipsInOrg = await userGroupMembershipDAL.findGroupMembershipsByUserIdInOrg(
|
||||||
|
membership.userId,
|
||||||
|
orgId
|
||||||
|
);
|
||||||
|
|
||||||
return buildScimUser({
|
return buildScimUser({
|
||||||
orgMembershipId: membership.id,
|
orgMembershipId: membership.id,
|
||||||
username: membership.externalId ?? membership.username,
|
username: membership.externalId ?? membership.username,
|
||||||
email: membership.email ?? "",
|
email: membership.email ?? "",
|
||||||
firstName: membership.firstName as string,
|
firstName: membership.firstName as string,
|
||||||
lastName: membership.lastName as string,
|
lastName: membership.lastName as string,
|
||||||
active: true
|
active: membership.isActive,
|
||||||
|
groups: groupMembershipsInOrg.map((group) => ({
|
||||||
|
value: group.groupId,
|
||||||
|
display: group.groupName
|
||||||
|
}))
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -296,7 +320,8 @@ export const scimServiceFactory = ({
|
|||||||
inviteEmail: email,
|
inviteEmail: email,
|
||||||
orgId,
|
orgId,
|
||||||
role: OrgMembershipRole.Member,
|
role: OrgMembershipRole.Member,
|
||||||
status: user.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
status: user.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited, // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
||||||
|
isActive: true
|
||||||
},
|
},
|
||||||
tx
|
tx
|
||||||
);
|
);
|
||||||
@@ -364,7 +389,8 @@ export const scimServiceFactory = ({
|
|||||||
inviteEmail: email,
|
inviteEmail: email,
|
||||||
orgId,
|
orgId,
|
||||||
role: OrgMembershipRole.Member,
|
role: OrgMembershipRole.Member,
|
||||||
status: user.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
status: user.isAccepted ? OrgMembershipStatus.Accepted : OrgMembershipStatus.Invited, // if user is fully completed, then set status to accepted, otherwise set it to invited so we can update it later
|
||||||
|
isActive: true
|
||||||
},
|
},
|
||||||
tx
|
tx
|
||||||
);
|
);
|
||||||
@@ -401,7 +427,7 @@ export const scimServiceFactory = ({
|
|||||||
firstName: createdUser.firstName as string,
|
firstName: createdUser.firstName as string,
|
||||||
lastName: createdUser.lastName as string,
|
lastName: createdUser.lastName as string,
|
||||||
email: createdUser.email ?? "",
|
email: createdUser.email ?? "",
|
||||||
active: true
|
active: createdOrgMembership.isActive
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -445,14 +471,8 @@ export const scimServiceFactory = ({
|
|||||||
});
|
});
|
||||||
|
|
||||||
if (!active) {
|
if (!active) {
|
||||||
await deleteOrgMembershipFn({
|
await orgMembershipDAL.updateById(membership.id, {
|
||||||
orgMembershipId: membership.id,
|
isActive: false
|
||||||
orgId: membership.orgId,
|
|
||||||
orgDAL,
|
|
||||||
projectMembershipDAL,
|
|
||||||
projectKeyDAL,
|
|
||||||
userAliasDAL,
|
|
||||||
licenseService
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -491,17 +511,14 @@ export const scimServiceFactory = ({
|
|||||||
status: 403
|
status: 403
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!active) {
|
await orgMembershipDAL.updateById(membership.id, {
|
||||||
await deleteOrgMembershipFn({
|
isActive: active
|
||||||
orgMembershipId: membership.id,
|
});
|
||||||
orgId: membership.orgId,
|
|
||||||
orgDAL,
|
const groupMembershipsInOrg = await userGroupMembershipDAL.findGroupMembershipsByUserIdInOrg(
|
||||||
projectMembershipDAL,
|
membership.userId,
|
||||||
projectKeyDAL,
|
orgId
|
||||||
userAliasDAL,
|
);
|
||||||
licenseService
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
return buildScimUser({
|
return buildScimUser({
|
||||||
orgMembershipId: membership.id,
|
orgMembershipId: membership.id,
|
||||||
@@ -509,7 +526,11 @@ export const scimServiceFactory = ({
|
|||||||
email: membership.email,
|
email: membership.email,
|
||||||
firstName: membership.firstName as string,
|
firstName: membership.firstName as string,
|
||||||
lastName: membership.lastName as string,
|
lastName: membership.lastName as string,
|
||||||
active
|
active,
|
||||||
|
groups: groupMembershipsInOrg.map((group) => ({
|
||||||
|
value: group.groupId,
|
||||||
|
display: group.groupName
|
||||||
|
}))
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -577,13 +598,20 @@ export const scimServiceFactory = ({
|
|||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
const scimGroups = groups.map((group) =>
|
const scimGroups: TScimGroup[] = [];
|
||||||
buildScimGroup({
|
|
||||||
|
for await (const group of groups) {
|
||||||
|
const members = await userGroupMembershipDAL.findGroupMembershipsByGroupIdInOrg(group.id, orgId);
|
||||||
|
const scimGroup = buildScimGroup({
|
||||||
groupId: group.id,
|
groupId: group.id,
|
||||||
name: group.name,
|
name: group.name,
|
||||||
members: [] // does this need to be populated?
|
members: members.map((member) => ({
|
||||||
})
|
value: member.orgMembershipId,
|
||||||
);
|
display: `${member.firstName ?? ""} ${member.lastName ?? ""}`
|
||||||
|
}))
|
||||||
|
});
|
||||||
|
scimGroups.push(scimGroup);
|
||||||
|
}
|
||||||
|
|
||||||
return buildScimGroupList({
|
return buildScimGroupList({
|
||||||
scimGroups,
|
scimGroups,
|
||||||
@@ -860,28 +888,43 @@ export const scimServiceFactory = ({
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case "add": {
|
case "add": {
|
||||||
const orgMemberships = await orgMembershipDAL.find({
|
try {
|
||||||
$in: {
|
const orgMemberships = await orgMembershipDAL.find({
|
||||||
id: operation.value.map((member) => member.value)
|
$in: {
|
||||||
}
|
id: operation.value.map((member) => member.value)
|
||||||
});
|
}
|
||||||
|
});
|
||||||
|
|
||||||
await addUsersToGroupByUserIds({
|
await addUsersToGroupByUserIds({
|
||||||
group,
|
group,
|
||||||
userIds: orgMemberships.map((membership) => membership.userId as string),
|
userIds: orgMemberships.map((membership) => membership.userId as string),
|
||||||
userDAL,
|
userDAL,
|
||||||
userGroupMembershipDAL,
|
userGroupMembershipDAL,
|
||||||
orgDAL,
|
orgDAL,
|
||||||
groupProjectDAL,
|
groupProjectDAL,
|
||||||
projectKeyDAL,
|
projectKeyDAL,
|
||||||
projectDAL,
|
projectDAL,
|
||||||
projectBotDAL
|
projectBotDAL
|
||||||
});
|
});
|
||||||
|
} catch {
|
||||||
|
logger.info("Repeat SCIM user-group add operation");
|
||||||
|
}
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case "remove": {
|
case "remove": {
|
||||||
// TODO
|
const orgMembershipId = extractScimValueFromPath(operation.path);
|
||||||
|
if (!orgMembershipId) throw new ScimRequestError({ detail: "Invalid path value", status: 400 });
|
||||||
|
const orgMembership = await orgMembershipDAL.findById(orgMembershipId);
|
||||||
|
if (!orgMembership) throw new ScimRequestError({ detail: "Org Membership Not Found", status: 400 });
|
||||||
|
await removeUsersFromGroupByUserIds({
|
||||||
|
group,
|
||||||
|
userIds: [orgMembership.userId as string],
|
||||||
|
userDAL,
|
||||||
|
userGroupMembershipDAL,
|
||||||
|
groupProjectDAL,
|
||||||
|
projectKeyDAL
|
||||||
|
});
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
@@ -893,10 +936,15 @@ export const scimServiceFactory = ({
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const members = await userGroupMembershipDAL.findGroupMembershipsByGroupIdInOrg(group.id, orgId);
|
||||||
|
|
||||||
return buildScimGroup({
|
return buildScimGroup({
|
||||||
groupId: group.id,
|
groupId: group.id,
|
||||||
name: group.name,
|
name: group.name,
|
||||||
members: []
|
members: members.map((member) => ({
|
||||||
|
value: member.orgMembershipId,
|
||||||
|
display: `${member.firstName ?? ""} ${member.lastName ?? ""}`
|
||||||
|
}))
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -158,7 +158,10 @@ export type TScimUser = {
|
|||||||
type: string;
|
type: string;
|
||||||
}[];
|
}[];
|
||||||
active: boolean;
|
active: boolean;
|
||||||
groups: string[];
|
groups: {
|
||||||
|
value: string;
|
||||||
|
display: string;
|
||||||
|
}[];
|
||||||
meta: {
|
meta: {
|
||||||
resourceType: string;
|
resourceType: string;
|
||||||
location: null;
|
location: null;
|
||||||
|
@@ -345,7 +345,7 @@ export const registerRoutes = async (
|
|||||||
permissionService,
|
permissionService,
|
||||||
secretApprovalPolicyDAL
|
secretApprovalPolicyDAL
|
||||||
});
|
});
|
||||||
const tokenService = tokenServiceFactory({ tokenDAL: authTokenDAL, userDAL });
|
const tokenService = tokenServiceFactory({ tokenDAL: authTokenDAL, userDAL, orgMembershipDAL });
|
||||||
|
|
||||||
const samlService = samlConfigServiceFactory({
|
const samlService = samlConfigServiceFactory({
|
||||||
permissionService,
|
permissionService,
|
||||||
|
@@ -4,7 +4,8 @@ import bcrypt from "bcrypt";
|
|||||||
|
|
||||||
import { TAuthTokens, TAuthTokenSessions } from "@app/db/schemas";
|
import { TAuthTokens, TAuthTokenSessions } from "@app/db/schemas";
|
||||||
import { getConfig } from "@app/lib/config/env";
|
import { getConfig } from "@app/lib/config/env";
|
||||||
import { UnauthorizedError } from "@app/lib/errors";
|
import { ForbiddenRequestError, UnauthorizedError } from "@app/lib/errors";
|
||||||
|
import { TOrgMembershipDALFactory } from "@app/services/org-membership/org-membership-dal";
|
||||||
|
|
||||||
import { AuthModeJwtTokenPayload } from "../auth/auth-type";
|
import { AuthModeJwtTokenPayload } from "../auth/auth-type";
|
||||||
import { TUserDALFactory } from "../user/user-dal";
|
import { TUserDALFactory } from "../user/user-dal";
|
||||||
@@ -14,6 +15,7 @@ import { TCreateTokenForUserDTO, TIssueAuthTokenDTO, TokenType, TValidateTokenFo
|
|||||||
type TAuthTokenServiceFactoryDep = {
|
type TAuthTokenServiceFactoryDep = {
|
||||||
tokenDAL: TTokenDALFactory;
|
tokenDAL: TTokenDALFactory;
|
||||||
userDAL: Pick<TUserDALFactory, "findById" | "transaction">;
|
userDAL: Pick<TUserDALFactory, "findById" | "transaction">;
|
||||||
|
orgMembershipDAL: Pick<TOrgMembershipDALFactory, "findOne">;
|
||||||
};
|
};
|
||||||
|
|
||||||
export type TAuthTokenServiceFactory = ReturnType<typeof tokenServiceFactory>;
|
export type TAuthTokenServiceFactory = ReturnType<typeof tokenServiceFactory>;
|
||||||
@@ -67,7 +69,7 @@ export const getTokenConfig = (tokenType: TokenType) => {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
export const tokenServiceFactory = ({ tokenDAL, userDAL }: TAuthTokenServiceFactoryDep) => {
|
export const tokenServiceFactory = ({ tokenDAL, userDAL, orgMembershipDAL }: TAuthTokenServiceFactoryDep) => {
|
||||||
const createTokenForUser = async ({ type, userId, orgId }: TCreateTokenForUserDTO) => {
|
const createTokenForUser = async ({ type, userId, orgId }: TCreateTokenForUserDTO) => {
|
||||||
const { token, ...tkCfg } = getTokenConfig(type);
|
const { token, ...tkCfg } = getTokenConfig(type);
|
||||||
const appCfg = getConfig();
|
const appCfg = getConfig();
|
||||||
@@ -154,6 +156,16 @@ export const tokenServiceFactory = ({ tokenDAL, userDAL }: TAuthTokenServiceFact
|
|||||||
const user = await userDAL.findById(session.userId);
|
const user = await userDAL.findById(session.userId);
|
||||||
if (!user || !user.isAccepted) throw new UnauthorizedError({ name: "Token user not found" });
|
if (!user || !user.isAccepted) throw new UnauthorizedError({ name: "Token user not found" });
|
||||||
|
|
||||||
|
if (token.organizationId) {
|
||||||
|
const orgMembership = await orgMembershipDAL.findOne({
|
||||||
|
userId: user.id,
|
||||||
|
orgId: token.organizationId
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!orgMembership) throw new ForbiddenRequestError({ message: "User not member of organization" });
|
||||||
|
if (!orgMembership.isActive) throw new ForbiddenRequestError({ message: "User not active in organization" });
|
||||||
|
}
|
||||||
|
|
||||||
return { user, tokenVersionId: token.tokenVersionId, orgId: token.organizationId };
|
return { user, tokenVersionId: token.tokenVersionId, orgId: token.organizationId };
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -56,15 +56,18 @@ export const kmsServiceFactory = ({
|
|||||||
const cipher = symmetricCipherService(SymmetricEncryption.AES_GCM_256);
|
const cipher = symmetricCipherService(SymmetricEncryption.AES_GCM_256);
|
||||||
const kmsKeyMaterial = randomSecureBytes(32);
|
const kmsKeyMaterial = randomSecureBytes(32);
|
||||||
const encryptedKeyMaterial = cipher.encrypt(kmsKeyMaterial, ROOT_ENCRYPTION_KEY);
|
const encryptedKeyMaterial = cipher.encrypt(kmsKeyMaterial, ROOT_ENCRYPTION_KEY);
|
||||||
const sanitizedSlug = slug ? slugify(slug) : slugify(alphaNumericNanoId(32));
|
const sanitizedSlug = slug ? slugify(slug) : slugify(alphaNumericNanoId(8).toLowerCase());
|
||||||
const dbQuery = async (db: Knex) => {
|
const dbQuery = async (db: Knex) => {
|
||||||
const kmsDoc = await kmsDAL.create({
|
const kmsDoc = await kmsDAL.create(
|
||||||
slug: sanitizedSlug,
|
{
|
||||||
orgId,
|
slug: sanitizedSlug,
|
||||||
isReserved
|
orgId,
|
||||||
});
|
isReserved
|
||||||
|
},
|
||||||
|
db
|
||||||
|
);
|
||||||
|
|
||||||
const { encryptedKey, ...doc } = await internalKmsDAL.create(
|
await internalKmsDAL.create(
|
||||||
{
|
{
|
||||||
version: 1,
|
version: 1,
|
||||||
encryptedKey: encryptedKeyMaterial,
|
encryptedKey: encryptedKeyMaterial,
|
||||||
@@ -73,7 +76,7 @@ export const kmsServiceFactory = ({
|
|||||||
},
|
},
|
||||||
db
|
db
|
||||||
);
|
);
|
||||||
return doc;
|
return kmsDoc;
|
||||||
};
|
};
|
||||||
if (tx) return dbQuery(tx);
|
if (tx) return dbQuery(tx);
|
||||||
const doc = await kmsDAL.transaction(async (tx2) => dbQuery(tx2));
|
const doc = await kmsDAL.transaction(async (tx2) => dbQuery(tx2));
|
||||||
|
@@ -74,6 +74,7 @@ export const orgDALFactory = (db: TDbClient) => {
|
|||||||
db.ref("role").withSchema(TableName.OrgMembership),
|
db.ref("role").withSchema(TableName.OrgMembership),
|
||||||
db.ref("roleId").withSchema(TableName.OrgMembership),
|
db.ref("roleId").withSchema(TableName.OrgMembership),
|
||||||
db.ref("status").withSchema(TableName.OrgMembership),
|
db.ref("status").withSchema(TableName.OrgMembership),
|
||||||
|
db.ref("isActive").withSchema(TableName.OrgMembership),
|
||||||
db.ref("email").withSchema(TableName.Users),
|
db.ref("email").withSchema(TableName.Users),
|
||||||
db.ref("username").withSchema(TableName.Users),
|
db.ref("username").withSchema(TableName.Users),
|
||||||
db.ref("firstName").withSchema(TableName.Users),
|
db.ref("firstName").withSchema(TableName.Users),
|
||||||
|
@@ -204,7 +204,8 @@ export const orgServiceFactory = ({
|
|||||||
orgId,
|
orgId,
|
||||||
userId: user.id,
|
userId: user.id,
|
||||||
role: OrgMembershipRole.Admin,
|
role: OrgMembershipRole.Admin,
|
||||||
status: OrgMembershipStatus.Accepted
|
status: OrgMembershipStatus.Accepted,
|
||||||
|
isActive: true
|
||||||
};
|
};
|
||||||
|
|
||||||
await orgDAL.createMembership(createMembershipData, tx);
|
await orgDAL.createMembership(createMembershipData, tx);
|
||||||
@@ -308,7 +309,8 @@ export const orgServiceFactory = ({
|
|||||||
userId,
|
userId,
|
||||||
orgId: org.id,
|
orgId: org.id,
|
||||||
role: OrgMembershipRole.Admin,
|
role: OrgMembershipRole.Admin,
|
||||||
status: OrgMembershipStatus.Accepted
|
status: OrgMembershipStatus.Accepted,
|
||||||
|
isActive: true
|
||||||
},
|
},
|
||||||
tx
|
tx
|
||||||
);
|
);
|
||||||
@@ -457,7 +459,8 @@ export const orgServiceFactory = ({
|
|||||||
inviteEmail: inviteeEmail,
|
inviteEmail: inviteeEmail,
|
||||||
orgId,
|
orgId,
|
||||||
role: OrgMembershipRole.Member,
|
role: OrgMembershipRole.Member,
|
||||||
status: OrgMembershipStatus.Invited
|
status: OrgMembershipStatus.Invited,
|
||||||
|
isActive: true
|
||||||
},
|
},
|
||||||
tx
|
tx
|
||||||
);
|
);
|
||||||
@@ -488,7 +491,8 @@ export const orgServiceFactory = ({
|
|||||||
orgId,
|
orgId,
|
||||||
userId: user.id,
|
userId: user.id,
|
||||||
role: OrgMembershipRole.Member,
|
role: OrgMembershipRole.Member,
|
||||||
status: OrgMembershipStatus.Invited
|
status: OrgMembershipStatus.Invited,
|
||||||
|
isActive: true
|
||||||
},
|
},
|
||||||
tx
|
tx
|
||||||
);
|
);
|
||||||
|
@@ -10,7 +10,7 @@ require (
|
|||||||
github.com/fatih/semgroup v1.2.0
|
github.com/fatih/semgroup v1.2.0
|
||||||
github.com/gitleaks/go-gitdiff v0.8.0
|
github.com/gitleaks/go-gitdiff v0.8.0
|
||||||
github.com/h2non/filetype v1.1.3
|
github.com/h2non/filetype v1.1.3
|
||||||
github.com/infisical/go-sdk v0.2.0
|
github.com/infisical/go-sdk v0.3.0
|
||||||
github.com/mattn/go-isatty v0.0.14
|
github.com/mattn/go-isatty v0.0.14
|
||||||
github.com/muesli/ansi v0.0.0-20221106050444-61f0cd9a192a
|
github.com/muesli/ansi v0.0.0-20221106050444-61f0cd9a192a
|
||||||
github.com/muesli/mango-cobra v1.2.0
|
github.com/muesli/mango-cobra v1.2.0
|
||||||
|
@@ -263,8 +263,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
|
|||||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
|
||||||
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/infisical/go-sdk v0.2.0 h1:n1/KNdYpeQavSqVwC9BfeV8VRzf3N2X9zO1tzQOSj5Q=
|
github.com/infisical/go-sdk v0.3.0 h1:Ls71t227F4CWVQWdStcwv8WDyfHe8eRlyAuMRNHsmlQ=
|
||||||
github.com/infisical/go-sdk v0.2.0/go.mod h1:vHTDVw3k+wfStXab513TGk1n53kaKF2xgLqpw/xvtl4=
|
github.com/infisical/go-sdk v0.3.0/go.mod h1:vHTDVw3k+wfStXab513TGk1n53kaKF2xgLqpw/xvtl4=
|
||||||
github.com/jedib0t/go-pretty v4.3.0+incompatible h1:CGs8AVhEKg/n9YbUenWmNStRW2PHJzaeDodcfvRAbIo=
|
github.com/jedib0t/go-pretty v4.3.0+incompatible h1:CGs8AVhEKg/n9YbUenWmNStRW2PHJzaeDodcfvRAbIo=
|
||||||
github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag=
|
github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4FwCTKeG3oo7hrHJAoznj9nag=
|
||||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
|
@@ -122,6 +122,21 @@ func handleAwsIamAuthLogin(cmd *cobra.Command, infisicalClient infisicalSdk.Infi
|
|||||||
return infisicalClient.Auth().AwsIamAuthLogin(identityId)
|
return infisicalClient.Auth().AwsIamAuthLogin(identityId)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func handleOidcAuthLogin(cmd *cobra.Command, infisicalClient infisicalSdk.InfisicalClientInterface) (credential infisicalSdk.MachineIdentityCredential, e error) {
|
||||||
|
|
||||||
|
identityId, err := util.GetCmdFlagOrEnv(cmd, "machine-identity-id", util.INFISICAL_MACHINE_IDENTITY_ID_NAME)
|
||||||
|
if err != nil {
|
||||||
|
return infisicalSdk.MachineIdentityCredential{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
jwt, err := util.GetCmdFlagOrEnv(cmd, "oidc-jwt", util.INFISICAL_OIDC_AUTH_JWT_NAME)
|
||||||
|
if err != nil {
|
||||||
|
return infisicalSdk.MachineIdentityCredential{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return infisicalClient.Auth().OidcAuthLogin(identityId, jwt)
|
||||||
|
}
|
||||||
|
|
||||||
func formatAuthMethod(authMethod string) string {
|
func formatAuthMethod(authMethod string) string {
|
||||||
return strings.ReplaceAll(authMethod, "-", " ")
|
return strings.ReplaceAll(authMethod, "-", " ")
|
||||||
}
|
}
|
||||||
@@ -257,6 +272,7 @@ var loginCmd = &cobra.Command{
|
|||||||
util.AuthStrategy.GCP_ID_TOKEN_AUTH: handleGcpIdTokenAuthLogin,
|
util.AuthStrategy.GCP_ID_TOKEN_AUTH: handleGcpIdTokenAuthLogin,
|
||||||
util.AuthStrategy.GCP_IAM_AUTH: handleGcpIamAuthLogin,
|
util.AuthStrategy.GCP_IAM_AUTH: handleGcpIamAuthLogin,
|
||||||
util.AuthStrategy.AWS_IAM_AUTH: handleAwsIamAuthLogin,
|
util.AuthStrategy.AWS_IAM_AUTH: handleAwsIamAuthLogin,
|
||||||
|
util.AuthStrategy.OIDC_AUTH: handleOidcAuthLogin,
|
||||||
}
|
}
|
||||||
|
|
||||||
credential, err := authStrategies[strategy](cmd, infisicalClient)
|
credential, err := authStrategies[strategy](cmd, infisicalClient)
|
||||||
@@ -456,6 +472,7 @@ func init() {
|
|||||||
loginCmd.Flags().String("machine-identity-id", "", "machine identity id for kubernetes, azure, gcp-id-token, gcp-iam, and aws-iam auth methods")
|
loginCmd.Flags().String("machine-identity-id", "", "machine identity id for kubernetes, azure, gcp-id-token, gcp-iam, and aws-iam auth methods")
|
||||||
loginCmd.Flags().String("service-account-token-path", "", "service account token path for kubernetes auth")
|
loginCmd.Flags().String("service-account-token-path", "", "service account token path for kubernetes auth")
|
||||||
loginCmd.Flags().String("service-account-key-file-path", "", "service account key file path for GCP IAM auth")
|
loginCmd.Flags().String("service-account-key-file-path", "", "service account key file path for GCP IAM auth")
|
||||||
|
loginCmd.Flags().String("oidc-jwt", "", "JWT for OIDC authentication")
|
||||||
}
|
}
|
||||||
|
|
||||||
func DomainOverridePrompt() (bool, error) {
|
func DomainOverridePrompt() (bool, error) {
|
||||||
@@ -616,7 +633,7 @@ func getFreshUserCredentials(email string, password string) (*api.GetLoginOneV2R
|
|||||||
loginTwoResponseResult, err := api.CallLogin2V2(httpClient, api.GetLoginTwoV2Request{
|
loginTwoResponseResult, err := api.CallLogin2V2(httpClient, api.GetLoginTwoV2Request{
|
||||||
Email: email,
|
Email: email,
|
||||||
ClientProof: hex.EncodeToString(srpM1),
|
ClientProof: hex.EncodeToString(srpM1),
|
||||||
Password: password,
|
Password: password,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -9,6 +9,7 @@ var AuthStrategy = struct {
|
|||||||
GCP_ID_TOKEN_AUTH AuthStrategyType
|
GCP_ID_TOKEN_AUTH AuthStrategyType
|
||||||
GCP_IAM_AUTH AuthStrategyType
|
GCP_IAM_AUTH AuthStrategyType
|
||||||
AWS_IAM_AUTH AuthStrategyType
|
AWS_IAM_AUTH AuthStrategyType
|
||||||
|
OIDC_AUTH AuthStrategyType
|
||||||
}{
|
}{
|
||||||
UNIVERSAL_AUTH: "universal-auth",
|
UNIVERSAL_AUTH: "universal-auth",
|
||||||
KUBERNETES_AUTH: "kubernetes",
|
KUBERNETES_AUTH: "kubernetes",
|
||||||
@@ -16,6 +17,7 @@ var AuthStrategy = struct {
|
|||||||
GCP_ID_TOKEN_AUTH: "gcp-id-token",
|
GCP_ID_TOKEN_AUTH: "gcp-id-token",
|
||||||
GCP_IAM_AUTH: "gcp-iam",
|
GCP_IAM_AUTH: "gcp-iam",
|
||||||
AWS_IAM_AUTH: "aws-iam",
|
AWS_IAM_AUTH: "aws-iam",
|
||||||
|
OIDC_AUTH: "oidc-auth",
|
||||||
}
|
}
|
||||||
|
|
||||||
var AVAILABLE_AUTH_STRATEGIES = []AuthStrategyType{
|
var AVAILABLE_AUTH_STRATEGIES = []AuthStrategyType{
|
||||||
@@ -25,6 +27,7 @@ var AVAILABLE_AUTH_STRATEGIES = []AuthStrategyType{
|
|||||||
AuthStrategy.GCP_ID_TOKEN_AUTH,
|
AuthStrategy.GCP_ID_TOKEN_AUTH,
|
||||||
AuthStrategy.GCP_IAM_AUTH,
|
AuthStrategy.GCP_IAM_AUTH,
|
||||||
AuthStrategy.AWS_IAM_AUTH,
|
AuthStrategy.AWS_IAM_AUTH,
|
||||||
|
AuthStrategy.OIDC_AUTH,
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsAuthMethodValid(authMethod string, allowUserAuth bool) (isValid bool, strategy AuthStrategyType) {
|
func IsAuthMethodValid(authMethod string, allowUserAuth bool) (isValid bool, strategy AuthStrategyType) {
|
||||||
|
@@ -19,6 +19,9 @@ const (
|
|||||||
// GCP Auth
|
// GCP Auth
|
||||||
INFISICAL_GCP_IAM_SERVICE_ACCOUNT_KEY_FILE_PATH_NAME = "INFISICAL_GCP_IAM_SERVICE_ACCOUNT_KEY_FILE_PATH"
|
INFISICAL_GCP_IAM_SERVICE_ACCOUNT_KEY_FILE_PATH_NAME = "INFISICAL_GCP_IAM_SERVICE_ACCOUNT_KEY_FILE_PATH"
|
||||||
|
|
||||||
|
// OIDC Auth
|
||||||
|
INFISICAL_OIDC_AUTH_JWT_NAME = "INFISICAL_OIDC_AUTH_JWT"
|
||||||
|
|
||||||
// Generic env variable used for auth methods that require a machine identity ID
|
// Generic env variable used for auth methods that require a machine identity ID
|
||||||
INFISICAL_MACHINE_IDENTITY_ID_NAME = "INFISICAL_MACHINE_IDENTITY_ID"
|
INFISICAL_MACHINE_IDENTITY_ID_NAME = "INFISICAL_MACHINE_IDENTITY_ID"
|
||||||
|
|
||||||
|
@@ -8,7 +8,8 @@ infisical login
|
|||||||
```
|
```
|
||||||
|
|
||||||
### Description
|
### Description
|
||||||
The CLI uses authentication to verify your identity. When you enter the correct email and password for your account, a token is generated and saved in your system Keyring to allow you to make future interactions with the CLI.
|
|
||||||
|
The CLI uses authentication to verify your identity. When you enter the correct email and password for your account, a token is generated and saved in your system Keyring to allow you to make future interactions with the CLI.
|
||||||
|
|
||||||
To change where the login credentials are stored, visit the [vaults command](./vault).
|
To change where the login credentials are stored, visit the [vaults command](./vault).
|
||||||
|
|
||||||
@@ -17,12 +18,12 @@ If you have added multiple users, you can switch between the users by using the
|
|||||||
<Info>
|
<Info>
|
||||||
When you authenticate with **any other method than `user`**, an access token will be printed to the console upon successful login. This token can be used to authenticate with the Infisical API and the CLI by passing it in the `--token` flag when applicable.
|
When you authenticate with **any other method than `user`**, an access token will be printed to the console upon successful login. This token can be used to authenticate with the Infisical API and the CLI by passing it in the `--token` flag when applicable.
|
||||||
|
|
||||||
Use flag `--plain` along with `--silent` to print only the token in plain text when using a machine identity auth method.
|
Use flag `--plain` along with `--silent` to print only the token in plain text when using a machine identity auth method.
|
||||||
|
|
||||||
</Info>
|
</Info>
|
||||||
|
|
||||||
|
|
||||||
### Flags
|
### Flags
|
||||||
|
|
||||||
The login command supports a number of flags that you can use for different authentication methods. Below is a list of all the flags that can be used with the login command.
|
The login command supports a number of flags that you can use for different authentication methods. Below is a list of all the flags that can be used with the login command.
|
||||||
|
|
||||||
<AccordionGroup>
|
<AccordionGroup>
|
||||||
@@ -52,6 +53,7 @@ The login command supports a number of flags that you can use for different auth
|
|||||||
<Tip>
|
<Tip>
|
||||||
The `client-id` flag can be substituted with the `INFISICAL_UNIVERSAL_AUTH_CLIENT_ID` environment variable.
|
The `client-id` flag can be substituted with the `INFISICAL_UNIVERSAL_AUTH_CLIENT_ID` environment variable.
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
<Accordion title="--client-secret">
|
<Accordion title="--client-secret">
|
||||||
```bash
|
```bash
|
||||||
@@ -63,6 +65,7 @@ The login command supports a number of flags that you can use for different auth
|
|||||||
<Tip>
|
<Tip>
|
||||||
The `client-secret` flag can be substituted with the `INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET` environment variable.
|
The `client-secret` flag can be substituted with the `INFISICAL_UNIVERSAL_AUTH_CLIENT_SECRET` environment variable.
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
<Accordion title="--machine-identity-id">
|
<Accordion title="--machine-identity-id">
|
||||||
```bash
|
```bash
|
||||||
@@ -75,6 +78,7 @@ The login command supports a number of flags that you can use for different auth
|
|||||||
<Tip>
|
<Tip>
|
||||||
The `machine-identity-id` flag can be substituted with the `INFISICAL_MACHINE_IDENTITY_ID` environment variable.
|
The `machine-identity-id` flag can be substituted with the `INFISICAL_MACHINE_IDENTITY_ID` environment variable.
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
<Accordion title="--service-account-token-path">
|
<Accordion title="--service-account-token-path">
|
||||||
```bash
|
```bash
|
||||||
@@ -88,6 +92,7 @@ The login command supports a number of flags that you can use for different auth
|
|||||||
<Tip>
|
<Tip>
|
||||||
The `service-account-token-path` flag can be substituted with the `INFISICAL_KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH` environment variable.
|
The `service-account-token-path` flag can be substituted with the `INFISICAL_KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH` environment variable.
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
<Accordion title="--service-account-key-file-path">
|
<Accordion title="--service-account-key-file-path">
|
||||||
```bash
|
```bash
|
||||||
@@ -100,9 +105,23 @@ The login command supports a number of flags that you can use for different auth
|
|||||||
<Tip>
|
<Tip>
|
||||||
The `service-account-key-path` flag can be substituted with the `INFISICAL_GCP_IAM_SERVICE_ACCOUNT_KEY_FILE_PATH` environment variable.
|
The `service-account-key-path` flag can be substituted with the `INFISICAL_GCP_IAM_SERVICE_ACCOUNT_KEY_FILE_PATH` environment variable.
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
</AccordionGroup>
|
</AccordionGroup>
|
||||||
|
|
||||||
|
<Accordion title="--oidc-jwt">
|
||||||
|
```bash
|
||||||
|
infisical login --oidc-jwt=<oidc-jwt-token>
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Description
|
||||||
|
The JWT provided by an identity provider for OIDC authentication.
|
||||||
|
|
||||||
|
<Tip>
|
||||||
|
The `oidc-jwt` flag can be substituted with the `INFISICAL_OIDC_AUTH_JWT` environment variable.
|
||||||
|
</Tip>
|
||||||
|
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
### Authentication Methods
|
### Authentication Methods
|
||||||
|
|
||||||
@@ -121,6 +140,7 @@ The Infisical CLI supports multiple authentication methods. Below are the availa
|
|||||||
Your machine identity client secret.
|
Your machine identity client secret.
|
||||||
</ParamField>
|
</ParamField>
|
||||||
</Expandable>
|
</Expandable>
|
||||||
|
|
||||||
</ParamField>
|
</ParamField>
|
||||||
|
|
||||||
<Steps>
|
<Steps>
|
||||||
@@ -134,6 +154,7 @@ The Infisical CLI supports multiple authentication methods. Below are the availa
|
|||||||
infisical login --method=universal-auth --client-id=<client-id> --client-secret=<client-secret>
|
infisical login --method=universal-auth --client-id=<client-id> --client-secret=<client-secret>
|
||||||
```
|
```
|
||||||
</Step>
|
</Step>
|
||||||
|
|
||||||
</Steps>
|
</Steps>
|
||||||
</Accordion>
|
</Accordion>
|
||||||
<Accordion title="Native Kubernetes">
|
<Accordion title="Native Kubernetes">
|
||||||
@@ -148,6 +169,7 @@ The Infisical CLI supports multiple authentication methods. Below are the availa
|
|||||||
Path to the Kubernetes service account token to use. Default: `/var/run/secrets/kubernetes.io/serviceaccount/token`.
|
Path to the Kubernetes service account token to use. Default: `/var/run/secrets/kubernetes.io/serviceaccount/token`.
|
||||||
</ParamField>
|
</ParamField>
|
||||||
</Expandable>
|
</Expandable>
|
||||||
|
|
||||||
</ParamField>
|
</ParamField>
|
||||||
|
|
||||||
<Steps>
|
<Steps>
|
||||||
@@ -162,6 +184,7 @@ The Infisical CLI supports multiple authentication methods. Below are the availa
|
|||||||
infisical login --method=kubernetes --machine-identity-id=<machine-identity-id> --service-account-token-path=<service-account-token-path>
|
infisical login --method=kubernetes --machine-identity-id=<machine-identity-id> --service-account-token-path=<service-account-token-path>
|
||||||
```
|
```
|
||||||
</Step>
|
</Step>
|
||||||
|
|
||||||
</Steps>
|
</Steps>
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
@@ -213,6 +236,7 @@ The Infisical CLI supports multiple authentication methods. Below are the availa
|
|||||||
```
|
```
|
||||||
</Step>
|
</Step>
|
||||||
</Steps>
|
</Steps>
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
<Accordion title="GCP IAM">
|
<Accordion title="GCP IAM">
|
||||||
The GCP IAM method is used to authenticate with Infisical with a GCP service account key.
|
The GCP IAM method is used to authenticate with Infisical with a GCP service account key.
|
||||||
@@ -235,11 +259,12 @@ The Infisical CLI supports multiple authentication methods. Below are the availa
|
|||||||
<Step title="Obtain an access token">
|
<Step title="Obtain an access token">
|
||||||
Run the `login` command with the following flags to obtain an access token:
|
Run the `login` command with the following flags to obtain an access token:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
infisical login --method=gcp-iam --machine-identity-id=<machine-identity-id> --service-account-key-file-path=<service-account-key-file-path>
|
infisical login --method=gcp-iam --machine-identity-id=<machine-identity-id> --service-account-key-file-path=<service-account-key-file-path>
|
||||||
```
|
```
|
||||||
</Step>
|
</Step>
|
||||||
</Steps>
|
</Steps>
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
<Accordion title="Native AWS IAM">
|
<Accordion title="Native AWS IAM">
|
||||||
The AWS IAM method is used to authenticate with Infisical with an AWS IAM role while running in an AWS environment like EC2, Lambda, etc.
|
The AWS IAM method is used to authenticate with Infisical with an AWS IAM role while running in an AWS environment like EC2, Lambda, etc.
|
||||||
@@ -264,10 +289,40 @@ The Infisical CLI supports multiple authentication methods. Below are the availa
|
|||||||
```
|
```
|
||||||
</Step>
|
</Step>
|
||||||
</Steps>
|
</Steps>
|
||||||
|
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="OIDC Auth">
|
||||||
|
The OIDC Auth method is used to authenticate with Infisical via identity tokens with OIDC.
|
||||||
|
|
||||||
|
<ParamField query="Flags">
|
||||||
|
<Expandable title="properties">
|
||||||
|
<ParamField query="machine-identity-id" type="string" required>
|
||||||
|
Your machine identity ID.
|
||||||
|
</ParamField>
|
||||||
|
<ParamField query="oidc-jwt" type="string" required>
|
||||||
|
The OIDC JWT from the identity provider.
|
||||||
|
</ParamField>
|
||||||
|
</Expandable>
|
||||||
|
</ParamField>
|
||||||
|
|
||||||
|
<Steps>
|
||||||
|
<Step title="Create an OIDC machine identity">
|
||||||
|
To create an OIDC machine identity, follow the step by step guide outlined [here](/documentation/platform/identities/oidc-auth/general).
|
||||||
|
</Step>
|
||||||
|
<Step title="Obtain an access token">
|
||||||
|
Run the `login` command with the following flags to obtain an access token:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
infisical login --method=oidc-auth --machine-identity-id=<machine-identity-id> --oidc-jwt=<oidc-jwt>
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
</Steps>
|
||||||
|
|
||||||
</Accordion>
|
</Accordion>
|
||||||
</AccordionGroup>
|
</AccordionGroup>
|
||||||
|
|
||||||
### Machine Identity Authentication Quick Start
|
### Machine Identity Authentication Quick Start
|
||||||
|
|
||||||
In this example we'll be using the `universal-auth` method to login to obtain an Infisical access token, which we will then use to fetch secrets with.
|
In this example we'll be using the `universal-auth` method to login to obtain an Infisical access token, which we will then use to fetch secrets with.
|
||||||
|
|
||||||
<Steps>
|
<Steps>
|
||||||
@@ -277,8 +332,8 @@ In this example we'll be using the `universal-auth` method to login to obtain an
|
|||||||
```
|
```
|
||||||
|
|
||||||
Now that we've set the `INFISICAL_TOKEN` environment variable, we can use the CLI to interact with Infisical. The CLI will automatically check for the presence of the `INFISICAL_TOKEN` environment variable and use it for authentication.
|
Now that we've set the `INFISICAL_TOKEN` environment variable, we can use the CLI to interact with Infisical. The CLI will automatically check for the presence of the `INFISICAL_TOKEN` environment variable and use it for authentication.
|
||||||
|
|
||||||
|
|
||||||
Alternatively, if you would rather use the `--token` flag to pass the token directly, you can do so by running the following command:
|
Alternatively, if you would rather use the `--token` flag to pass the token directly, you can do so by running the following command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -297,6 +352,7 @@ In this example we'll be using the `universal-auth` method to login to obtain an
|
|||||||
The `--recursive`, and `--env` flag is optional and will fetch all secrets in subfolders. The default environment is `dev` if no `--env` flag is provided.
|
The `--recursive`, and `--env` flag is optional and will fetch all secrets in subfolders. The default environment is `dev` if no `--env` flag is provided.
|
||||||
</Info>
|
</Info>
|
||||||
</Step>
|
</Step>
|
||||||
|
|
||||||
</Steps>
|
</Steps>
|
||||||
|
|
||||||
And that's it! Now you're ready to start using the Infisical CLI to interact with your secrets, with the use of Machine Identities.
|
And that's it! Now you're ready to start using the Infisical CLI to interact with your secrets, with the use of Machine Identities.
|
||||||
|
BIN
docs/images/self-hosting/deployment-options/native/ha-stack.png
Normal file
BIN
docs/images/self-hosting/deployment-options/native/ha-stack.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 67 KiB |
Binary file not shown.
After Width: | Height: | Size: 330 KiB |
@@ -217,7 +217,14 @@
|
|||||||
"pages": [
|
"pages": [
|
||||||
"self-hosting/overview",
|
"self-hosting/overview",
|
||||||
{
|
{
|
||||||
"group": "Installation methods",
|
"group": "Native installation methods",
|
||||||
|
"pages": [
|
||||||
|
"self-hosting/deployment-options/native/standalone-binary",
|
||||||
|
"self-hosting/deployment-options/native/high-availability"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"group": "Containerized installation methods",
|
||||||
"pages": [
|
"pages": [
|
||||||
"self-hosting/deployment-options/standalone-infisical",
|
"self-hosting/deployment-options/standalone-infisical",
|
||||||
"self-hosting/deployment-options/docker-swarm",
|
"self-hosting/deployment-options/docker-swarm",
|
||||||
|
@@ -0,0 +1,520 @@
|
|||||||
|
---
|
||||||
|
title: "Automatically deploy Infisical with High Availability"
|
||||||
|
sidebarTitle: "High Availability"
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
# Self-Hosting Infisical with a native High Availability (HA) deployment
|
||||||
|
|
||||||
|
This page describes the Infisical architecture designed to provide high availability (HA) and how to deploy Infisical with high availability. The high availability deployment is designed to ensure that Infisical services are always available and can handle service failures gracefully, without causing service disruptions.
|
||||||
|
|
||||||
|
<Info>
|
||||||
|
This deployment option is currently only available for Debian-based nodes (e.g., Ubuntu, Debian).
|
||||||
|
We plan on adding support for other operating systems in the future.
|
||||||
|
</Info>
|
||||||
|
|
||||||
|
## High availability architecture
|
||||||
|
| Service | Nodes | Configuration | GCP | AWS |
|
||||||
|
|----------------------------------|----------------|------------------------------|---------------|--------------|
|
||||||
|
| External load balancer$^1$ | 1 | 4 vCPU, 3.6 GB memory | n1-highcpu-4 | c5n.xlarge |
|
||||||
|
| Internal load balancer$^2$ | 1 | 4 vCPU, 3.6 GB memory | n1-highcpu-4 | c5n.xlarge |
|
||||||
|
| Etcd cluster$^3$ | 3 | 4 vCPU, 3.6 GB memory | n1-highcpu-4 | c5n.xlarge |
|
||||||
|
| PostgreSQL$^4$ | 3 | 2 vCPU, 7.5 GB memory | n1-standard-2 | m5.large |
|
||||||
|
| Sentinel$^4$ | 3 | 2 vCPU, 7.5 GB memory | n1-standard-2 | m5.large |
|
||||||
|
| Redis$^4$ | 3 | 2 vCPU, 7.5 GB memory | n1-standard-2 | m5.large |
|
||||||
|
| Infisical Core | 3 | 8 vCPU, 7.2 GB memory | n1-highcpu-8 | c5.2xlarge |
|
||||||
|
|
||||||
|
**Footnotes:**
|
||||||
|
1. External load balancer: If you wish to have multiple instances of the internal load balancer, you will need to use an external load balancer to distribute incoming traffic across multiple internal load balancers.
|
||||||
|
Using multiple internal load balancers is recommended for high-traffic environments. In the following guide we will use a single internal load balancer, as external load balancing falls outside the scope of this guide.
|
||||||
|
2. Internal load balancer: The internal load balancer (a HAProxy instance) is used to distribute incoming traffic across multiple Infisical Core instances, Postgres nodes, and Redis nodes. The internal load balancer exposes a set of ports _(80 for Infiscial, 5000 for Read/Write postgres, 5001 for Read-only postgres, and 6379 for Redis)_. Where these ports route to is determained by the internal load balancer based on the availability and health of the service nodes.
|
||||||
|
The internal load balancer is only accessible from within the same network, and is not exposed to the public internet.
|
||||||
|
3. Etcd cluster: Etcd is a distributed key-value store used to store and distribute data between the PostgreSQL nodes. Etcd is dependent on high disk I/O performance, therefore it is highly recommended to use highly performant SSD disks for the Etcd nodes, with _at least_ 80GB of disk space.
|
||||||
|
4. The Redis and PostgreSQL nodes will automatically be configured for high availability and used in your Infisical Core instances. However, you can optionally choose to bring your own database (BYOD), and skip these nodes. See more on how to [provide your own databases](#provide-your-own-databases).
|
||||||
|
|
||||||
|
<Info>
|
||||||
|
For all services that require multiple nodes, it is recommended to deploy them across multiple availability zones (AZs) to ensure high availability and fault tolerance. This will help prevent service disruptions in the event of an AZ failure.
|
||||||
|
</Info>
|
||||||
|
|
||||||
|

|
||||||
|
The image above shows how a high availability deployment of Infisical is structured. In this example, an external load balancer is used to distribute incoming traffic across multiple internal load balancers. The internal load balancers. The external load balancer isn't required, and it will require additional configuration to set up.
|
||||||
|
|
||||||
|
### Fault Tolerance
|
||||||
|
This setup provides N+1 redundancy, meaning it can tolerate the failure of any single node without service interruption.
|
||||||
|
|
||||||
|
## Ansible
|
||||||
|
### What is Ansible
|
||||||
|
Ansible is an open-source automation tool that simplifies application deployment, configuration management, and task automation.
|
||||||
|
At Infisical, we use Ansible to automate the deployment of Infisical services. The Ansible roles are designed to make it easy to deploy Infisical services in a high availability environment.
|
||||||
|
|
||||||
|
### Installing Ansible
|
||||||
|
<Steps>
|
||||||
|
<Step title="Install using the pipx Python package manager">
|
||||||
|
```bash
|
||||||
|
pipx install --include-deps ansible
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
<Step title="Verify the installation">
|
||||||
|
```bash
|
||||||
|
ansible --version
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
</Steps>
|
||||||
|
|
||||||
|
|
||||||
|
### Understanding Ansible Concepts
|
||||||
|
|
||||||
|
* Inventory _(inventory.ini)_: A file that lists your target hosts.
|
||||||
|
* Playbook _(playbook.yml)_: YAML file containing a set of tasks to be executed on hosts.
|
||||||
|
* Roles: Reusable units of organization for playbooks. Roles are used to group tasks together in a structured and reusable manner.
|
||||||
|
|
||||||
|
|
||||||
|
### Basic Ansible Commands
|
||||||
|
Running a playbook with with an invetory file:
|
||||||
|
```bash
|
||||||
|
ansible-playbook -i inventory.ini playbook.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
This is how you would run the playbook containing the roles for setting up Infisical in a high availability environment.
|
||||||
|
|
||||||
|
### Installing the Infisical High Availability Deployment Ansible Role
|
||||||
|
The Infisical Ansible role is available on Ansible Galaxy. You can install the role by running the following command:
|
||||||
|
```bash
|
||||||
|
ansible-galaxy collection install infisical.infisical_core_ha_deployment
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Set up components
|
||||||
|
1. External load balancer (optional, and not covered in this guide)
|
||||||
|
2. [Configure Etcd cluster](#configure-etcd-cluster)
|
||||||
|
3. [Configure PostgreSQL database](#configure-postgresql-database)
|
||||||
|
4. [Configure Redis/Sentinel](#configure-redis-and-sentinel)
|
||||||
|
5. [Configure Infisical Core](#configure-infisical-core)
|
||||||
|
|
||||||
|
|
||||||
|
The servers start on the same 52.1.0.0/24 private network range, and can connect to each other freely on these addresses.
|
||||||
|
|
||||||
|
The following list includes descriptions of each server and its assigned IP:
|
||||||
|
|
||||||
|
52.1.0.1: External Load Balancer
|
||||||
|
52.1.0.2: Internal Load Balancer
|
||||||
|
52.1.0.3: Etcd 1
|
||||||
|
52.1.0.4: Etcd 2
|
||||||
|
52.1.0.5: Etcd 3
|
||||||
|
52.1.0.6: PostgreSQL 1
|
||||||
|
52.1.0.7: PostgreSQL 2
|
||||||
|
52.1.0.8: PostgreSQL 3
|
||||||
|
52.1.0.9: Redis 1
|
||||||
|
52.1.0.10: Redis 2
|
||||||
|
52.1.0.11: Redis 3
|
||||||
|
52.1.0.12: Sentinel 1
|
||||||
|
52.1.0.13: Sentinel 2
|
||||||
|
52.1.0.14: Sentinel 3
|
||||||
|
52.1.0.15: Infisical Core 1
|
||||||
|
52.1.0.16: Infisical Core 2
|
||||||
|
52.1.0.17: Infisical Core 3
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Configure Etcd cluster
|
||||||
|
|
||||||
|
Configuring the ETCD cluster is the first step in setting up a high availability deployment of Infisical.
|
||||||
|
The ETCD cluster is used to store and distribute data between the PostgreSQL nodes. The ETCD cluster is a distributed key-value store that is highly available and fault-tolerant.
|
||||||
|
|
||||||
|
```yaml example.playbook.yml
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- name: Set up etcd cluster
|
||||||
|
hosts: etcd
|
||||||
|
become: true
|
||||||
|
collections:
|
||||||
|
- infisical.infisical_core_ha_deployment
|
||||||
|
roles:
|
||||||
|
- role: etcd
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini example.inventory.ini
|
||||||
|
[etcd]
|
||||||
|
etcd1 ansible_host=52.1.0.3
|
||||||
|
etcd2 ansible_host=52.1.0.4
|
||||||
|
etcd3 ansible_host=52.1.0.5
|
||||||
|
|
||||||
|
[etcd:vars]
|
||||||
|
ansible_user=ubuntu
|
||||||
|
ansible_ssh_private_key_file=./ssh-key.pem
|
||||||
|
ansible_ssh_common_args='-o StrictHostKeyChecking=no'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configure PostgreSQL database
|
||||||
|
|
||||||
|
The Postgres role takes a set of parameters that are used to configure your PostgreSQL database.
|
||||||
|
|
||||||
|
Make sure to set the following variables in your playbook.yml file:
|
||||||
|
- `postgres_super_user_password`: The password for the 'postgres' database user.
|
||||||
|
- `postgres_db_name`: The name of the database that will be created on the leader node and replicated to the secondary nodes.
|
||||||
|
- `postgres_user`: The name of the user that will be created on the leader node and replicated to the secondary nodes.
|
||||||
|
- `postgres_user_password`: The password for the user that will be created on the leader node and replicated to the secondary nodes.
|
||||||
|
- `etcd_hosts`: The list of etcd hosts that the PostgreSQL nodes will use to communicate with etcd. By default you want to keep this value set to `"{{ groups['etcd'] }}"`
|
||||||
|
|
||||||
|
```yaml example.playbook.yml
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- name: Set up PostgreSQL with Patroni
|
||||||
|
hosts: postgres
|
||||||
|
become: true
|
||||||
|
collections:
|
||||||
|
- infisical.infisical_core_ha_deployment
|
||||||
|
roles:
|
||||||
|
- role: postgres
|
||||||
|
vars:
|
||||||
|
postgres_super_user_password: "your-super-user-password"
|
||||||
|
postgres_user: infisical-user
|
||||||
|
postgres_user_password: "your-password"
|
||||||
|
postgres_db_name: infisical-db
|
||||||
|
|
||||||
|
etcd_hosts: "{{ groups['etcd'] }}"
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini example.inventory.ini
|
||||||
|
[postgres]
|
||||||
|
postgres1 ansible_host=52.1.0.6
|
||||||
|
postgres2 ansible_host=52.1.0.7
|
||||||
|
postgres3 ansible_host=52.1.0.8
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configure Redis and Sentinel
|
||||||
|
|
||||||
|
The Redis role takes a single variable as input, which is the redis password.
|
||||||
|
The Sentinel and Redis hosts will run the same role, therefore we are running the task for both the sentinel and redis hosts, `hosts: redis:sentinel`.
|
||||||
|
|
||||||
|
- `redis_password`: The password that will be set for the Redis instance.
|
||||||
|
|
||||||
|
```yaml example.playbook.yml
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- name: Setup Redis and Sentinel
|
||||||
|
hosts: redis:sentinel
|
||||||
|
become: true
|
||||||
|
collections:
|
||||||
|
- infisical.infisical_core_ha_deployment
|
||||||
|
roles:
|
||||||
|
- role: redis
|
||||||
|
vars:
|
||||||
|
redis_password: "REDIS_PASSWORD"
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini example.inventory.ini
|
||||||
|
[redis]
|
||||||
|
redis1 ansible_host=52.1.0.9
|
||||||
|
redis2 ansible_host=52.1.0.10
|
||||||
|
redis3 ansible_host=52.1.0.11
|
||||||
|
|
||||||
|
[sentinel]
|
||||||
|
sentinel1 ansible_host=52.1.0.12
|
||||||
|
sentinel2 ansible_host=52.1.0.13
|
||||||
|
sentinel3 ansible_host=52.1.0.14
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configure Internal Load Balancer
|
||||||
|
|
||||||
|
The internal load balancer used is HAProxy. HAProxy will expose a set of ports as listed below. Each port will route to a different service based on the availability and health of the service nodes.
|
||||||
|
|
||||||
|
- Port 80: Infisical Core
|
||||||
|
- Port 5000: Read/Write PostgreSQL
|
||||||
|
- Port 5001: Read-only PostgreSQL
|
||||||
|
- Port 6379: Redis
|
||||||
|
- Port 7000: HAProxy monitoring
|
||||||
|
These ports will need to be exposed on your network to become accessible from the outside world.
|
||||||
|
|
||||||
|
The HAProxy configuration file is generated by the Infisical Core role, and is located at `/etc/haproxy/haproxy.cfg` on your internal load balancer node.
|
||||||
|
|
||||||
|
The HAProxy setup comes with a monitoring panel. You have to set the username/password combination for the monitoring panel by setting the `stats_user` and `stats_password` variables in the HAProxy role.
|
||||||
|
|
||||||
|
|
||||||
|
Once the HAProxy role has fully executed, you can monitor your HA setup by navigating to `http://52.1.0.2:7000/haproxy?stats` in your browser.
|
||||||
|
|
||||||
|
```ini example.inventory.ini
|
||||||
|
[haproxy]
|
||||||
|
internal_lb ansible_host=52.1.0.2
|
||||||
|
```
|
||||||
|
|
||||||
|
```yaml example.playbook.yml
|
||||||
|
- name: Set up HAProxy
|
||||||
|
hosts: haproxy
|
||||||
|
become: true
|
||||||
|
collections:
|
||||||
|
- infisical.infisical_core_ha_deployment
|
||||||
|
roles:
|
||||||
|
- role: haproxy
|
||||||
|
vars:
|
||||||
|
stats_user: "stats-username"
|
||||||
|
stats_password: "stats-password!"
|
||||||
|
|
||||||
|
postgres_servers: "{{ groups['postgres'] }}"
|
||||||
|
infisical_servers: "{{ groups['infisical'] }}"
|
||||||
|
redis_servers: "{{ groups['redis'] }}"
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Configure Infisical Core
|
||||||
|
|
||||||
|
The Infisical Core role will set up your actual Infisical instances.
|
||||||
|
|
||||||
|
The `env_vars` variable is used to set the environment variables that Infisical will use. The minimum required environment variables are `ENCRYPTION_KEY` and `AUTH_SECRET`. You can find a list of all available environment variables [here](/docs/self-hosting/configuration/envars#general-platform).
|
||||||
|
The `DB_CONNECTION_URI` and `REDIS_URL` variables will automatically be set if you're running the full playbook. However, you can choose to set them yourself, and skip the Postgres, etcd, redis/sentinel roles entirely.
|
||||||
|
|
||||||
|
<Info>
|
||||||
|
If you later need to add new environment varibles to your Infisical deployments, it's important you add the variables to **all** your Infisical nodes.<br/>
|
||||||
|
You can find the environment file for Infisical at `/etc/infisical/environment`.<br/>
|
||||||
|
After editing the environment file, you need to reload the Infisical service by doing `systemctl restart infisical`.
|
||||||
|
</Info>
|
||||||
|
|
||||||
|
```yaml example.playbook.yml
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- name: Setup Infisical
|
||||||
|
hosts: infisical
|
||||||
|
become: true
|
||||||
|
collections:
|
||||||
|
- infisical.infisical_core_ha_deployment
|
||||||
|
roles:
|
||||||
|
- role: infisical
|
||||||
|
env_vars:
|
||||||
|
ENCRYPTION_KEY: "YOUR_ENCRYPTION_KEY" # openssl rand -hex 16
|
||||||
|
AUTH_SECRET: "YOUR_AUTH_SECRET" # openssl rand -base64 32
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini example.inventory.ini
|
||||||
|
[infisical]
|
||||||
|
infisical1 ansible_host=52.1.0.15
|
||||||
|
infisical2 ansible_host=52.1.0.16
|
||||||
|
infisical3 ansible_host=52.1.0.17
|
||||||
|
```
|
||||||
|
|
||||||
|
## Provide your own databases
|
||||||
|
Bringing your own database is an option using the Infisical Core deployment role.
|
||||||
|
By bringing your own database, you're able to skip the Etcd, Postgres, and Redis/Sentinel roles entirely.
|
||||||
|
|
||||||
|
To bring your own database, you need to set the `DB_CONNECTION_URI` and `REDIS_URL` environment variables in the Infisical Core role.
|
||||||
|
|
||||||
|
```yaml example.playbook.yml
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- name: Setup Infisical
|
||||||
|
hosts: infisical
|
||||||
|
become: true
|
||||||
|
collections:
|
||||||
|
- infisical.infisical_core_ha_deployment
|
||||||
|
roles:
|
||||||
|
- role: infisical
|
||||||
|
env_vars:
|
||||||
|
ENCRYPTION_KEY: "YOUR_ENCRYPTION_KEY" # openssl rand -hex 16
|
||||||
|
AUTH_SECRET: "YOUR_AUTH_SECRET" # openssl rand -base64 32
|
||||||
|
DB_CONNECTION_URI: "postgres://user:password@localhost:5432/infisical"
|
||||||
|
REDIS_URL: "redis://localhost:6379"
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini example.inventory.ini
|
||||||
|
[infisical]
|
||||||
|
infisical1 ansible_host=52.1.0.15
|
||||||
|
infisical2 ansible_host=52.1.0.16
|
||||||
|
infisical3 ansible_host=52.1.0.17
|
||||||
|
```
|
||||||
|
|
||||||
|
## Full deployment example
|
||||||
|
To make it easier to get started, we've provided a full deployment example that you can use to deploy Infisical in a high availability environment.
|
||||||
|
|
||||||
|
- This deployment does not use an external load balancer.
|
||||||
|
- You **must** change the environment variables defined in the `playbook.yml` example.
|
||||||
|
- You have update the IP addresses in the `inventory.ini` file to match your own network configuration.
|
||||||
|
- You need to set the SSH key and ssh user in the `inventory.ini` file.
|
||||||
|
|
||||||
|
<Steps>
|
||||||
|
<Step title="Install Ansible">
|
||||||
|
Install Ansible using the pipx Python package manager.
|
||||||
|
```bash
|
||||||
|
pipx install --include-deps ansible
|
||||||
|
```
|
||||||
|
|
||||||
|
</Step>
|
||||||
|
<Step title="Install the Infisical deployment Ansible Role">
|
||||||
|
Install the Infisical deployment role from Ansible Galaxy.
|
||||||
|
```bash
|
||||||
|
ansible-galaxy collection install infisical.infisical_core_ha_deployment
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
<Step title="Setup your hosts">
|
||||||
|
|
||||||
|
Create an `inventory.ini` file, and define your hosts and their IP addresses. You can use the example below as a template, and update the IP addresses to match your own network configuration.
|
||||||
|
Make sure to set the SSH key and ssh user in the `inventory.ini` file. Please see the example below.
|
||||||
|
|
||||||
|
```ini example.inventory.ini
|
||||||
|
[etcd]
|
||||||
|
etcd1 ansible_host=52.1.0.3
|
||||||
|
etcd2 ansible_host=52.1.0.4
|
||||||
|
etcd3 ansible_host=52.1.0.5
|
||||||
|
|
||||||
|
[postgres]
|
||||||
|
postgres1 ansible_host=52.1.0.6
|
||||||
|
postgres2 ansible_host=52.1.0.7
|
||||||
|
postgres3 ansible_host=52.1.0.8
|
||||||
|
|
||||||
|
[infisical]
|
||||||
|
infisical1 ansible_host=52.1.0.15
|
||||||
|
infisical2 ansible_host=52.1.0.16
|
||||||
|
infisical3 ansible_host=52.1.0.17
|
||||||
|
|
||||||
|
[redis]
|
||||||
|
redis1 ansible_host=52.1.0.9
|
||||||
|
redis2 ansible_host=52.1.0.10
|
||||||
|
redis3 ansible_host=52.1.0.11
|
||||||
|
|
||||||
|
[sentinel]
|
||||||
|
sentinel1 ansible_host=52.1.0.12
|
||||||
|
sentinel2 ansible_host=52.1.0.13
|
||||||
|
sentinel3 ansible_host=52.1.0.14
|
||||||
|
|
||||||
|
[haproxy]
|
||||||
|
internal_lb ansible_host=52.1.0.2
|
||||||
|
|
||||||
|
; This can be defined individually for each host, or globally for all hosts.
|
||||||
|
; In this case the credentials are the same for all hosts, so we define them globally as seen below ([all:vars]).
|
||||||
|
[all:vars]
|
||||||
|
ansible_user=ubuntu
|
||||||
|
ansible_ssh_private_key_file=./your-ssh-key.pem
|
||||||
|
ansible_ssh_common_args='-o StrictHostKeyChecking=no'
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
<Step title="Setup your Ansible playbook">
|
||||||
|
The Ansible playbook is where you define which roles/tasks to execute on which hosts.
|
||||||
|
|
||||||
|
```yaml example.playbook.yml
|
||||||
|
---
|
||||||
|
# Important, we must gather facts from all hosts prior to running the roles to ensure we have all the information we need.
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- name: Set up etcd cluster
|
||||||
|
hosts: etcd
|
||||||
|
become: true
|
||||||
|
collections:
|
||||||
|
- infisical.infisical_core_ha_deployment
|
||||||
|
roles:
|
||||||
|
- role: etcd
|
||||||
|
|
||||||
|
- name: Set up PostgreSQL with Patroni
|
||||||
|
hosts: postgres
|
||||||
|
become: true
|
||||||
|
collections:
|
||||||
|
- infisical.infisical_core_ha_deployment
|
||||||
|
roles:
|
||||||
|
- role: postgres
|
||||||
|
vars:
|
||||||
|
postgres_super_user_password: "<ENTER_SUPERUSER_PASSWORD>" # Password for the 'postgres' database user
|
||||||
|
|
||||||
|
# A database with these credentials will be created on the leader node, and replicated to the secondary nodes.
|
||||||
|
postgres_db_name: <ENTER_DB_NAME>
|
||||||
|
postgres_user: <ENTER_DB_USER>
|
||||||
|
postgres_user_password: <ENTER_DB_USER_PASSWORD>
|
||||||
|
|
||||||
|
etcd_hosts: "{{ groups['etcd'] }}"
|
||||||
|
|
||||||
|
- name: Setup Redis and Sentinel
|
||||||
|
hosts: redis:sentinel
|
||||||
|
become: true
|
||||||
|
collections:
|
||||||
|
- infisical.infisical_core_ha_deployment
|
||||||
|
roles:
|
||||||
|
- role: redis
|
||||||
|
vars:
|
||||||
|
redis_password: "<ENTER_REDIS_PASSWORD>"
|
||||||
|
|
||||||
|
- name: Set up HAProxy
|
||||||
|
hosts: haproxy
|
||||||
|
become: true
|
||||||
|
collections:
|
||||||
|
- infisical.infisical_core_ha_deployment
|
||||||
|
roles:
|
||||||
|
- role: haproxy
|
||||||
|
vars:
|
||||||
|
stats_user: "<ENTER_HAPROXY_STATS_USERNAME>"
|
||||||
|
stats_password: "<ENTER_HAPROXY_STATS_PASSWORD>"
|
||||||
|
|
||||||
|
postgres_servers: "{{ groups['postgres'] }}"
|
||||||
|
infisical_servers: "{{ groups['infisical'] }}"
|
||||||
|
redis_servers: "{{ groups['redis'] }}"
|
||||||
|
- name: Setup Infisical
|
||||||
|
hosts: infisical
|
||||||
|
become: true
|
||||||
|
collections:
|
||||||
|
- infisical.infisical_core_ha_deployment
|
||||||
|
roles:
|
||||||
|
- role: infisical
|
||||||
|
env_vars:
|
||||||
|
ENCRYPTION_KEY: "YOUR_ENCRYPTION_KEY" # openssl rand -hex 16
|
||||||
|
AUTH_SECRET: "YOUR_AUTH_SECRET" # openssl rand -base64 32
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
<Step title="Run the Ansible playbook">
|
||||||
|
After creating the `playbook.yml` and `inventory.ini` files, you can run the playbook using the following command
|
||||||
|
```bash
|
||||||
|
ansible-playbook -i inventory.ini playbook.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
This step may take upwards of 10 minutes to complete, depending on the number of nodes and the network speed.
|
||||||
|
Once the playbook has completed, you should have a fully deployed high availability Infisical environment.
|
||||||
|
|
||||||
|
To access Infisical, you can try navigating to `http://52.1.0.2`, in order to view your newly deployed Infisical instance.
|
||||||
|
</Step>
|
||||||
|
</Steps>
|
||||||
|
|
||||||
|
|
||||||
|
## Post-deployment steps
|
||||||
|
After deploying Infisical in a high availability environment, you should perform the following post-deployment steps:
|
||||||
|
- Check your deployment to ensure that all services are running as expected. You can use the HAProxy monitoring panel to check the status of your services (http://52.1.0.2:7000/haproxy?stats)
|
||||||
|
- Attempt to access the Infisical Core instances to ensure that they are accessible from the internal load balancer. (http://52.1.0.2)
|
||||||
|
|
||||||
|
A HAProxy stats page indicating success will look like the image below
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
### Network Security
|
||||||
|
Secure the network that your instances run on. While this falls outside the scope of Infisical deployment, it's crucial for overall security.
|
||||||
|
AWS-specific recommendations:
|
||||||
|
|
||||||
|
Use Virtual Private Cloud (VPC) to isolate your infrastructure.
|
||||||
|
Configure security groups to restrict inbound and outbound traffic.
|
||||||
|
Use Network Access Control Lists (NACLs) for additional network-level security.
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
Please take note that the Infisical team cannot provide infrastructure support for **free self-hosted** deployments.<br/>If you need help with infrastructure, we recommend upgrading to a [paid plan](https://infisical.com/pricing) which includes infrastructure support.
|
||||||
|
|
||||||
|
You can also join our community [Slack](https://infisical.com/slack) for help and support from the community.
|
||||||
|
</Note>
|
||||||
|
|
||||||
|
|
||||||
|
### Troubleshooting
|
||||||
|
<Accordion title="Ansible: Failed to set permissions on the temporary files Ansible needs to create when becoming an unprivileged user">
|
||||||
|
If you encounter this issue, please update your ansible config (`ansible.cfg`) file with the following configuration:
|
||||||
|
```ini
|
||||||
|
[defaults]
|
||||||
|
allow_world_readable_tmpfiles = true
|
||||||
|
```
|
||||||
|
|
||||||
|
You can read more about the solution [here](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/sh_shell.html#parameter-world_readable_temp)
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="I'm unable to connect to access the Infisical instance on the web">
|
||||||
|
This issue can be caused by a number of reasons, mostly realted to the network configuration. Here are a few things you can check:
|
||||||
|
1. Ensure that the firewall is not blocking the connection. You can check this by running `ufw status`. Ensure that port 80 is open.
|
||||||
|
2. If you're using a cloud provider like AWS or GCP, ensure that the security group allows traffic on port 80.
|
||||||
|
3. Ensure that the HAProxy service is running. You can check this by running `systemctl status haproxy`.
|
||||||
|
4. Ensure that the Infisical service is running. You can check this by running `systemctl status infisical`.
|
||||||
|
</Accordion>
|
@@ -0,0 +1,203 @@
|
|||||||
|
---
|
||||||
|
title: "Standalone"
|
||||||
|
description: "Learn how to deploy Infisical in a standalone environment."
|
||||||
|
---
|
||||||
|
|
||||||
|
# Self-Hosting Infisical with Standalone Infisical
|
||||||
|
|
||||||
|
Deploying Infisical in a standalone environment is a great way to get started with Infisical without having to use containers. This guide will walk you through the process of deploying Infisical in a standalone environment.
|
||||||
|
This is one of the easiest ways to deploy Infisical. It is a single executable, currently only supported on Debian-based systems.
|
||||||
|
|
||||||
|
The standalone deployment implements the "bring your own database" (BYOD) approach. This means that you will need to provide your own databases (specifically Postgres and Redis) for the Infisical services to use. The standalone deployment does not include any databases.
|
||||||
|
|
||||||
|
If you wish to streamline the deployment process, we recommend using the Ansible role for Infisical. The Ansible role automates the deployment process and includes the databases:
|
||||||
|
- [Automated Deployment](https://google.com)
|
||||||
|
- [Automated Deployment with high availability (HA)](https://google.com)
|
||||||
|
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
- A server running a Debian-based operating system (e.g., Ubuntu, Debian)
|
||||||
|
- A Postgres database
|
||||||
|
- A Redis database
|
||||||
|
|
||||||
|
## Installing Infisical
|
||||||
|
Installing Infisical is as simple as running a single command. You can install Infisical by running the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ curl -1sLf 'https://dl.cloudsmith.io/public/infisical/infisical-core/cfg/setup/bash.deb.sh' | sudo bash && sudo apt-get install -y infisical-core
|
||||||
|
```
|
||||||
|
|
||||||
|
## Running Infisical
|
||||||
|
Running Infisical and serving it to the web has a few steps. Below are the steps to get you started with running Infisical in a standalone environment.
|
||||||
|
* Setup environment variables
|
||||||
|
* Running Postgres migrations
|
||||||
|
* Create system daemon
|
||||||
|
* Exposing Infisical to the internet
|
||||||
|
|
||||||
|
|
||||||
|
<Steps>
|
||||||
|
<Step title="Setup environment variables">
|
||||||
|
To use Infisical you'll need to configure the environment variables beforehand. You can acheive this by creating an environment file to be used by Infisical.
|
||||||
|
|
||||||
|
|
||||||
|
#### Create environment file
|
||||||
|
```bash
|
||||||
|
$ mkdir -p /etc/infisical && touch /etc/infisical/environment
|
||||||
|
```
|
||||||
|
|
||||||
|
After creating the environment file, you'll need to fill it out with your environment variables.
|
||||||
|
|
||||||
|
#### Edit environment file
|
||||||
|
```bash
|
||||||
|
$ nano /etc/infisical/environment
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
DB_CONNECTION_URI=postgres://user:password@localhost:5432/infisical # Replace with your Postgres database connection URI
|
||||||
|
REDIS_URL=redis://localhost:6379 # Replace with your Redis connection URI
|
||||||
|
ENCRYPTION_KEY=your_encryption_key # Replace with your encryption key (can be generated with: openssl rand -hex 16)
|
||||||
|
AUTH_SECRET=your_auth_secret # Replace with your auth secret (can be generated with: openssl rand -base64 32)
|
||||||
|
```
|
||||||
|
|
||||||
|
<Info>
|
||||||
|
The minimum required environment variables are `DB_CONNECTION_URI`, `REDIS_URL`, `ENCRYPTION_KEY`, and `AUTH_SECRET`. We recommend You take a look at our [list of all available environment variables](/docs/self-hosting/configuration/envars#general-platform), and configure the ones you need.
|
||||||
|
</Info>
|
||||||
|
</Step>
|
||||||
|
<Step title="Running Postgres migrations">
|
||||||
|
|
||||||
|
Assuming you're starting with a fresh Postgres database, you'll need to run the Postgres migrations to syncronize the database schema.
|
||||||
|
The migration command will use the environment variables you configured in the previous step.
|
||||||
|
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ eval $(cat /etc/infisical/environment) infisical-core migration:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
<Info>
|
||||||
|
This step will need to be repeated if you update Infisical in the future.
|
||||||
|
</Info>
|
||||||
|
|
||||||
|
</Step>
|
||||||
|
|
||||||
|
<Step title="Create service file">
|
||||||
|
```bash
|
||||||
|
$ nano /etc/systemd/system/infisical.service
|
||||||
|
```
|
||||||
|
</Step>
|
||||||
|
<Step title="Create Infisical service">
|
||||||
|
|
||||||
|
Create a systemd service file for Infisical. Creating a systemd service file will allow Infisical to start automatically when the system boots or in case of a crash.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ nano /etc/systemd/system/infisical.service
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Unit]
|
||||||
|
Description=Infisical Service
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
# The path to the environment file we created in the previous step
|
||||||
|
EnvironmentFile=/etc/infisical/environment
|
||||||
|
Type=simple
|
||||||
|
# Change the user to the user you want to run Infisical as
|
||||||
|
User=root
|
||||||
|
ExecStart=/usr/local/bin/infisical-core
|
||||||
|
Restart=always
|
||||||
|
RestartSec=30
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
```
|
||||||
|
|
||||||
|
Now we need to reload the systemd daemon and start the Infisical service.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ systemctl daemon-reload
|
||||||
|
$ systemctl start infisical
|
||||||
|
$ systemctl enable infisical
|
||||||
|
```
|
||||||
|
|
||||||
|
<Info>
|
||||||
|
You can check the status of the Infisical service by running `systemctl status infisical`.
|
||||||
|
It is also a good idea to check the logs for any errors by running `journalctl --no-pager -u infisical`.
|
||||||
|
</Info>
|
||||||
|
</Step>
|
||||||
|
<Step title="Exposing Infisical to the internet">
|
||||||
|
Exposing Infisical to the internet requires setting up a reverse proxy. You can use any reverse proxy of your choice, but we recommend using HAProxy or Nginx. Below is an example of how to set up a reverse proxy using HAProxy.
|
||||||
|
|
||||||
|
#### Install HAProxy
|
||||||
|
```bash
|
||||||
|
$ apt-get install -y haproxy
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Edit HAProxy configuration
|
||||||
|
```bash
|
||||||
|
$ nano /etc/haproxy/haproxy.cfg
|
||||||
|
```
|
||||||
|
|
||||||
|
```ini
|
||||||
|
global
|
||||||
|
log /dev/log local0
|
||||||
|
log /dev/log local1 notice
|
||||||
|
chroot /var/lib/haproxy
|
||||||
|
stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
|
||||||
|
stats timeout 30s
|
||||||
|
user haproxy
|
||||||
|
group haproxy
|
||||||
|
daemon
|
||||||
|
|
||||||
|
defaults
|
||||||
|
log global
|
||||||
|
mode http
|
||||||
|
option httplog
|
||||||
|
option dontlognull
|
||||||
|
timeout connect 5000
|
||||||
|
timeout client 50000
|
||||||
|
timeout server 50000
|
||||||
|
|
||||||
|
frontend http-in
|
||||||
|
bind *:80
|
||||||
|
default_backend infisical
|
||||||
|
|
||||||
|
backend infisical
|
||||||
|
server infisicalapp 127.0.0.1:8080 check
|
||||||
|
```
|
||||||
|
|
||||||
|
<Warning>
|
||||||
|
If you decide to use Nginx, then please be aware that the configuration will be different. **Infisical listens on port 8080**.
|
||||||
|
</Warning>
|
||||||
|
|
||||||
|
#### Restart HAProxy
|
||||||
|
```bash
|
||||||
|
$ systemctl restart haproxy
|
||||||
|
```
|
||||||
|
|
||||||
|
</Step>
|
||||||
|
</Steps>
|
||||||
|
|
||||||
|
And that's it! You have successfully deployed Infisical in a standalone environment. You can now access Infisical by visiting `http://your-server-ip`.
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
Please take note that the Infisical team cannot provide infrastructure support for **free self-hosted** deployments.<br/>If you need help with infrastructure, we recommend upgrading to a [paid plan](https://infisical.com/pricing) which includes infrastructure support.
|
||||||
|
|
||||||
|
You can also join our community [Slack](https://infisical.com/slack) for help and support from the community.
|
||||||
|
</Note>
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
<Accordion title="I'm getting a error related to the HAProxy (Missing LF on last line, file might have been truncated at position X)">
|
||||||
|
This is a common issue related to the HAProxy configuration file. The error is caused by the missing newline character at the end of the file. You can fix this by adding a newline character at the end of the file.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ echo "" >> /etc/haproxy/haproxy.cfg
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
<Accordion title="I'm unable to connect to access the Infisical instance on the web">
|
||||||
|
This issue can be caused by a number of reasons, mostly realted to the network configuration. Here are a few things you can check:
|
||||||
|
1. Ensure that the firewall is not blocking the connection. You can check this by running `ufw status`. Ensure that port 80 is open.
|
||||||
|
2. If you're using a cloud provider like AWS or GCP, ensure that the security group allows traffic on port 80.
|
||||||
|
3. Ensure that the HAProxy service is running. You can check this by running `systemctl status haproxy`.
|
||||||
|
4. Ensure that the Infisical service is running. You can check this by running `systemctl status infisical`.
|
||||||
|
</Accordion>
|
@@ -15,15 +15,30 @@ This guide walks through how you can use these paid features on a self hosted in
|
|||||||
</Step>
|
</Step>
|
||||||
<Step title="Activate the license">
|
<Step title="Activate the license">
|
||||||
Depending on whether or not the environment where Infisical is deployed has internet access, you may be issued a regular license or an offline license.
|
Depending on whether or not the environment where Infisical is deployed has internet access, you may be issued a regular license or an offline license.
|
||||||
|
|
||||||
- If using a regular license, you should set the value of the environment variable `LICENSE_KEY` in Infisical to the issued license key.
|
|
||||||
- If using an offline license, you should set the value of the environment variable `LICENSE_KEY_OFFLINE` in Infisical to the issued license key.
|
|
||||||
|
|
||||||
<Note>
|
|
||||||
How you set the environment variable will depend on the deployment method you used. Please refer to the documentation of your deployment method for specific instructions.
|
<Tabs>
|
||||||
</Note>
|
<Tab title="Regular License">
|
||||||
|
- Assign the issued license key to the `LICENSE_KEY` environment variable in your Infisical instance.
|
||||||
|
|
||||||
|
- Your Infisical instance will need to communicate with the Infisical license server to validate the license key.
|
||||||
|
If you want to limit outgoing connections only to the Infisical license server, you can use the following IP addresses: `13.248.249.247` and `35.71.190.59`
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
Ensure that your firewall or network settings allow outbound connections to these IP addresses to avoid any issues with license validation.
|
||||||
|
</Note>
|
||||||
|
</Tab>
|
||||||
|
<Tab title="Offline License">
|
||||||
|
- Assign the issued license key to the `LICENSE_KEY_OFFLINE` environment variable in your Infisical instance.
|
||||||
|
|
||||||
|
<Note>
|
||||||
|
How you set the environment variable will depend on the deployment method you used. Please refer to the documentation of your deployment method for specific instructions.
|
||||||
|
</Note>
|
||||||
|
</Tab>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
Once your instance starts up, the license key will be validated and you’ll be able to use the paid features.
|
Once your instance starts up, the license key will be validated and you’ll be able to use the paid features.
|
||||||
However, when the license expires, Infisical will continue to run, but EE features will be disabled until the license is renewed or a new one is purchased.
|
However, when the license expires, Infisical will continue to run, but EE features will be disabled until the license is renewed or a new one is purchased.
|
||||||
</Step>
|
</Step>
|
||||||
</Steps>
|
</Steps>
|
||||||
|
|
||||||
|
@@ -33,3 +33,21 @@ Choose from a number of deployment options listed below to get started.
|
|||||||
Use our Helm chart to Install Infisical on your Kubernetes cluster.
|
Use our Helm chart to Install Infisical on your Kubernetes cluster.
|
||||||
</Card>
|
</Card>
|
||||||
</CardGroup>
|
</CardGroup>
|
||||||
|
<CardGroup cols={2}>
|
||||||
|
<Card
|
||||||
|
title="Native Deployment"
|
||||||
|
color="#000000"
|
||||||
|
icon="box"
|
||||||
|
href="deployment-options/native/standalone-binary"
|
||||||
|
>
|
||||||
|
Install Infisical on your Debian-based system without containers using our standalone binary.
|
||||||
|
</Card>
|
||||||
|
<Card
|
||||||
|
title="Native Deployment, High Availability"
|
||||||
|
color="#000000"
|
||||||
|
icon="boxes-stacked"
|
||||||
|
href="deployment-options/native/high-availability"
|
||||||
|
>
|
||||||
|
Install Infisical on your Debian-based instances without containers using our standalone binary with high availability out of the box.
|
||||||
|
</Card>
|
||||||
|
</CardGroup>
|
||||||
|
2
frontend/package-lock.json
generated
2
frontend/package-lock.json
generated
@@ -136,7 +136,7 @@
|
|||||||
"eslint-plugin-react-hooks": "^4.6.0",
|
"eslint-plugin-react-hooks": "^4.6.0",
|
||||||
"eslint-plugin-simple-import-sort": "^8.0.0",
|
"eslint-plugin-simple-import-sort": "^8.0.0",
|
||||||
"eslint-plugin-storybook": "^0.6.12",
|
"eslint-plugin-storybook": "^0.6.12",
|
||||||
"postcss": "^8.4.14",
|
"postcss": "^8.4.39",
|
||||||
"prettier": "^2.8.3",
|
"prettier": "^2.8.3",
|
||||||
"prettier-plugin-tailwindcss": "^0.2.2",
|
"prettier-plugin-tailwindcss": "^0.2.2",
|
||||||
"storybook": "^7.6.20",
|
"storybook": "^7.6.20",
|
||||||
|
@@ -144,7 +144,7 @@
|
|||||||
"eslint-plugin-react-hooks": "^4.6.0",
|
"eslint-plugin-react-hooks": "^4.6.0",
|
||||||
"eslint-plugin-simple-import-sort": "^8.0.0",
|
"eslint-plugin-simple-import-sort": "^8.0.0",
|
||||||
"eslint-plugin-storybook": "^0.6.12",
|
"eslint-plugin-storybook": "^0.6.12",
|
||||||
"postcss": "^8.4.14",
|
"postcss": "^8.4.39",
|
||||||
"prettier": "^2.8.3",
|
"prettier": "^2.8.3",
|
||||||
"prettier-plugin-tailwindcss": "^0.2.2",
|
"prettier-plugin-tailwindcss": "^0.2.2",
|
||||||
"storybook": "^7.6.20",
|
"storybook": "^7.6.20",
|
||||||
|
@@ -60,6 +60,7 @@ export type OrgUser = {
|
|||||||
status: "invited" | "accepted" | "verified" | "completed";
|
status: "invited" | "accepted" | "verified" | "completed";
|
||||||
deniedPermissions: any[];
|
deniedPermissions: any[];
|
||||||
roleId: string;
|
roleId: string;
|
||||||
|
isActive: boolean;
|
||||||
};
|
};
|
||||||
|
|
||||||
export type TProjectMembership = {
|
export type TProjectMembership = {
|
||||||
|
@@ -5,7 +5,7 @@ import { z } from "zod";
|
|||||||
|
|
||||||
import { createNotification } from "@app/components/notifications";
|
import { createNotification } from "@app/components/notifications";
|
||||||
import { Button, FormControl, Modal, ModalContent, Select, SelectItem } from "@app/components/v2";
|
import { Button, FormControl, Modal, ModalContent, Select, SelectItem } from "@app/components/v2";
|
||||||
import { useWorkspace } from "@app/context";
|
import { useOrganization,useWorkspace } from "@app/context";
|
||||||
import {
|
import {
|
||||||
useAddIdentityToWorkspace,
|
useAddIdentityToWorkspace,
|
||||||
useGetIdentityProjectMemberships,
|
useGetIdentityProjectMemberships,
|
||||||
@@ -33,6 +33,7 @@ type Props = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
export const IdentityAddToProjectModal = ({ identityId, popUp, handlePopUpToggle }: Props) => {
|
export const IdentityAddToProjectModal = ({ identityId, popUp, handlePopUpToggle }: Props) => {
|
||||||
|
const { currentOrg } = useOrganization();
|
||||||
const { workspaces } = useWorkspace();
|
const { workspaces } = useWorkspace();
|
||||||
const { mutateAsync: addIdentityToWorkspace } = useAddIdentityToWorkspace();
|
const { mutateAsync: addIdentityToWorkspace } = useAddIdentityToWorkspace();
|
||||||
|
|
||||||
@@ -58,7 +59,9 @@ export const IdentityAddToProjectModal = ({ identityId, popUp, handlePopUpToggle
|
|||||||
wsWorkspaceIds.set(projectMembership.project.id, true);
|
wsWorkspaceIds.set(projectMembership.project.id, true);
|
||||||
});
|
});
|
||||||
|
|
||||||
return (workspaces || []).filter(({ id }) => !wsWorkspaceIds.has(id));
|
return (workspaces || []).filter(
|
||||||
|
({ id, orgId }) => !wsWorkspaceIds.has(id) && orgId === currentOrg?.id
|
||||||
|
);
|
||||||
}, [workspaces, projectMemberships]);
|
}, [workspaces, projectMemberships]);
|
||||||
|
|
||||||
const onFormSubmit = async ({ projectId: workspaceId, role }: FormData) => {
|
const onFormSubmit = async ({ projectId: workspaceId, role }: FormData) => {
|
||||||
|
@@ -171,14 +171,14 @@ export const OrgMembersTable = ({ handlePopUpOpen, setCompleteInviteLink }: Prop
|
|||||||
{isLoading && <TableSkeleton columns={5} innerKey="org-members" />}
|
{isLoading && <TableSkeleton columns={5} innerKey="org-members" />}
|
||||||
{!isLoading &&
|
{!isLoading &&
|
||||||
filterdUser?.map(
|
filterdUser?.map(
|
||||||
({ user: u, inviteEmail, role, roleId, id: orgMembershipId, status }) => {
|
({ user: u, inviteEmail, role, roleId, id: orgMembershipId, status, isActive }) => {
|
||||||
const name = u && u.firstName ? `${u.firstName} ${u.lastName}` : "-";
|
const name = u && u.firstName ? `${u.firstName} ${u.lastName}` : "-";
|
||||||
const email = u?.email || inviteEmail;
|
const email = u?.email || inviteEmail;
|
||||||
const username = u?.username ?? inviteEmail ?? "-";
|
const username = u?.username ?? inviteEmail ?? "-";
|
||||||
return (
|
return (
|
||||||
<Tr key={`org-membership-${orgMembershipId}`} className="w-full">
|
<Tr key={`org-membership-${orgMembershipId}`} className="w-full">
|
||||||
<Td>{name}</Td>
|
<Td className={isActive ? "" : "text-mineshaft-400"}>{name}</Td>
|
||||||
<Td>{username}</Td>
|
<Td className={isActive ? "" : "text-mineshaft-400"}>{username}</Td>
|
||||||
<Td>
|
<Td>
|
||||||
<OrgPermissionCan
|
<OrgPermissionCan
|
||||||
I={OrgPermissionActions.Edit}
|
I={OrgPermissionActions.Edit}
|
||||||
@@ -186,7 +186,18 @@ export const OrgMembersTable = ({ handlePopUpOpen, setCompleteInviteLink }: Prop
|
|||||||
>
|
>
|
||||||
{(isAllowed) => (
|
{(isAllowed) => (
|
||||||
<>
|
<>
|
||||||
{status === "accepted" && (
|
{!isActive && (
|
||||||
|
<Button
|
||||||
|
isDisabled
|
||||||
|
className="w-40"
|
||||||
|
colorSchema="primary"
|
||||||
|
variant="outline_bg"
|
||||||
|
onClick={() => {}}
|
||||||
|
>
|
||||||
|
Suspended
|
||||||
|
</Button>
|
||||||
|
)}
|
||||||
|
{isActive && status === "accepted" && (
|
||||||
<Select
|
<Select
|
||||||
value={role === "custom" ? findRoleFromId(roleId)?.slug : role}
|
value={role === "custom" ? findRoleFromId(roleId)?.slug : role}
|
||||||
isDisabled={userId === u?.id || !isAllowed}
|
isDisabled={userId === u?.id || !isAllowed}
|
||||||
@@ -207,7 +218,8 @@ export const OrgMembersTable = ({ handlePopUpOpen, setCompleteInviteLink }: Prop
|
|||||||
))}
|
))}
|
||||||
</Select>
|
</Select>
|
||||||
)}
|
)}
|
||||||
{(status === "invited" || status === "verified") &&
|
{isActive &&
|
||||||
|
(status === "invited" || status === "verified") &&
|
||||||
email &&
|
email &&
|
||||||
serverDetails?.emailConfigured && (
|
serverDetails?.emailConfigured && (
|
||||||
<Button
|
<Button
|
||||||
|
Reference in New Issue
Block a user