mirror of
https://github.com/Infisical/infisical.git
synced 2025-07-11 12:11:38 +00:00
Compare commits
28 Commits
daniel/gat
...
daniel/gat
Author | SHA1 | Date | |
---|---|---|---|
4c8bf9bd92 | |||
a6554deb80 | |||
4bd1eb6f70 | |||
022ecf75e1 | |||
ce170a6a47 | |||
cb8e36ae15 | |||
16ce1f441e | |||
8043b61c9f | |||
d374ff2093 | |||
9a935c9177 | |||
9d24eb15dc | |||
7acd7fd522 | |||
2148b636f5 | |||
e40b4a0a4b | |||
311bf8b515 | |||
a467b13069 | |||
9cc17452fa | |||
93ba6f7b58 | |||
0fcb66e9ab | |||
135f425fcf | |||
9c149cb4bf | |||
ce45c1a43d | |||
1a14c71564 | |||
e7fe2ea51e | |||
30d7e63a67 | |||
1101707d8b | |||
54435d0ad9 | |||
698260cba6 |
@ -117,6 +117,7 @@ export const OCIVaultSyncFns = {
|
||||
syncSecrets: async (secretSync: TOCIVaultSyncWithCredentials, secretMap: TSecretMap) => {
|
||||
const {
|
||||
connection,
|
||||
environment,
|
||||
destinationConfig: { compartmentOcid, vaultOcid, keyOcid }
|
||||
} = secretSync;
|
||||
|
||||
@ -213,7 +214,7 @@ export const OCIVaultSyncFns = {
|
||||
// Update and delete secrets
|
||||
for await (const [key, variable] of Object.entries(variables)) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(key, secretSync.syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(key, environment?.slug || "", secretSync.syncOptions.keySchema)) continue;
|
||||
|
||||
// Only update / delete active secrets
|
||||
if (variable.lifecycleState === vault.models.SecretSummary.LifecycleState.Active) {
|
||||
|
@ -10,7 +10,8 @@ export const PgSqlLock = {
|
||||
KmsRootKeyInit: 2025,
|
||||
OrgGatewayRootCaInit: (orgId: string) => pgAdvisoryLockHashText(`org-gateway-root-ca:${orgId}`),
|
||||
OrgGatewayCertExchange: (orgId: string) => pgAdvisoryLockHashText(`org-gateway-cert-exchange:${orgId}`),
|
||||
SecretRotationV2Creation: (folderId: string) => pgAdvisoryLockHashText(`secret-rotation-v2-creation:${folderId}`)
|
||||
SecretRotationV2Creation: (folderId: string) => pgAdvisoryLockHashText(`secret-rotation-v2-creation:${folderId}`),
|
||||
CreateProject: (orgId: string) => pgAdvisoryLockHashText(`create-project:${orgId}`)
|
||||
} as const;
|
||||
|
||||
export type TKeyStoreFactory = ReturnType<typeof keyStoreFactory>;
|
||||
|
@ -11,7 +11,7 @@ export const globalRateLimiterCfg = (): RateLimitPluginOptions => {
|
||||
return {
|
||||
errorResponseBuilder: (_, context) => {
|
||||
throw new RateLimitError({
|
||||
message: `Rate limit exceeded. Please try again in ${context.after}`
|
||||
message: `Rate limit exceeded. Please try again in ${Math.ceil(context.ttl / 1000)} seconds`
|
||||
});
|
||||
},
|
||||
timeWindow: 60 * 1000,
|
||||
@ -113,3 +113,12 @@ export const requestAccessLimit: RateLimitOptions = {
|
||||
max: 10,
|
||||
keyGenerator: (req) => req.realIp
|
||||
};
|
||||
|
||||
export const smtpRateLimit = ({
|
||||
keyGenerator = (req) => req.realIp
|
||||
}: Pick<RateLimitOptions, "keyGenerator"> = {}): RateLimitOptions => ({
|
||||
timeWindow: 40 * 1000,
|
||||
hook: "preValidation",
|
||||
max: 2,
|
||||
keyGenerator
|
||||
});
|
||||
|
@ -1,7 +1,7 @@
|
||||
import { z } from "zod";
|
||||
|
||||
import { OrgMembershipRole, ProjectMembershipRole, UsersSchema } from "@app/db/schemas";
|
||||
import { inviteUserRateLimit } from "@app/server/config/rateLimiter";
|
||||
import { inviteUserRateLimit, smtpRateLimit } from "@app/server/config/rateLimiter";
|
||||
import { getTelemetryDistinctId } from "@app/server/lib/telemetry";
|
||||
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
|
||||
import { ActorType, AuthMode } from "@app/services/auth/auth-type";
|
||||
@ -11,7 +11,7 @@ export const registerInviteOrgRouter = async (server: FastifyZodProvider) => {
|
||||
server.route({
|
||||
url: "/signup",
|
||||
config: {
|
||||
rateLimit: inviteUserRateLimit
|
||||
rateLimit: smtpRateLimit()
|
||||
},
|
||||
method: "POST",
|
||||
schema: {
|
||||
@ -81,7 +81,10 @@ export const registerInviteOrgRouter = async (server: FastifyZodProvider) => {
|
||||
server.route({
|
||||
url: "/signup-resend",
|
||||
config: {
|
||||
rateLimit: inviteUserRateLimit
|
||||
rateLimit: smtpRateLimit({
|
||||
keyGenerator: (req) =>
|
||||
(req.body as { membershipId?: string })?.membershipId?.trim().substring(0, 100) ?? req.realIp
|
||||
})
|
||||
},
|
||||
method: "POST",
|
||||
schema: {
|
||||
|
@ -2,9 +2,9 @@ import { z } from "zod";
|
||||
|
||||
import { ProjectMembershipsSchema } from "@app/db/schemas";
|
||||
import { EventType } from "@app/ee/services/audit-log/audit-log-types";
|
||||
import { readLimit, writeLimit } from "@app/server/config/rateLimiter";
|
||||
import { readLimit, smtpRateLimit } from "@app/server/config/rateLimiter";
|
||||
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
|
||||
import { AuthMode } from "@app/services/auth/auth-type";
|
||||
import { ActorType, AuthMode } from "@app/services/auth/auth-type";
|
||||
|
||||
import { SanitizedProjectSchema } from "../sanitizedSchemas";
|
||||
|
||||
@ -47,7 +47,9 @@ export const registerOrgAdminRouter = async (server: FastifyZodProvider) => {
|
||||
method: "POST",
|
||||
url: "/projects/:projectId/grant-admin-access",
|
||||
config: {
|
||||
rateLimit: writeLimit
|
||||
rateLimit: smtpRateLimit({
|
||||
keyGenerator: (req) => (req.auth.actor === ActorType.USER ? req.auth.userId : req.realIp)
|
||||
})
|
||||
},
|
||||
schema: {
|
||||
params: z.object({
|
||||
|
@ -2,10 +2,10 @@ import { z } from "zod";
|
||||
|
||||
import { BackupPrivateKeySchema, UsersSchema } from "@app/db/schemas";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { authRateLimit } from "@app/server/config/rateLimiter";
|
||||
import { authRateLimit, smtpRateLimit } from "@app/server/config/rateLimiter";
|
||||
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
|
||||
import { validateSignUpAuthorization } from "@app/services/auth/auth-fns";
|
||||
import { AuthMode } from "@app/services/auth/auth-type";
|
||||
import { ActorType, AuthMode } from "@app/services/auth/auth-type";
|
||||
import { UserEncryption } from "@app/services/user/user-types";
|
||||
|
||||
export const registerPasswordRouter = async (server: FastifyZodProvider) => {
|
||||
@ -80,7 +80,9 @@ export const registerPasswordRouter = async (server: FastifyZodProvider) => {
|
||||
method: "POST",
|
||||
url: "/email/password-reset",
|
||||
config: {
|
||||
rateLimit: authRateLimit
|
||||
rateLimit: smtpRateLimit({
|
||||
keyGenerator: (req) => (req.body as { email?: string })?.email?.trim().substring(0, 100) ?? req.realIp
|
||||
})
|
||||
},
|
||||
schema: {
|
||||
body: z.object({
|
||||
@ -224,7 +226,9 @@ export const registerPasswordRouter = async (server: FastifyZodProvider) => {
|
||||
method: "POST",
|
||||
url: "/email/password-setup",
|
||||
config: {
|
||||
rateLimit: authRateLimit
|
||||
rateLimit: smtpRateLimit({
|
||||
keyGenerator: (req) => (req.auth.actor === ActorType.USER ? req.auth.userId : req.realIp)
|
||||
})
|
||||
},
|
||||
schema: {
|
||||
response: {
|
||||
@ -233,6 +237,7 @@ export const registerPasswordRouter = async (server: FastifyZodProvider) => {
|
||||
})
|
||||
}
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT]),
|
||||
handler: async (req) => {
|
||||
await server.services.password.sendPasswordSetupEmail(req.permission);
|
||||
|
||||
@ -267,6 +272,7 @@ export const registerPasswordRouter = async (server: FastifyZodProvider) => {
|
||||
})
|
||||
}
|
||||
},
|
||||
onRequest: verifyAuth([AuthMode.JWT]),
|
||||
handler: async (req, res) => {
|
||||
await server.services.password.setupPassword(req.body, req.permission);
|
||||
|
||||
|
@ -2,7 +2,7 @@ import { z } from "zod";
|
||||
|
||||
import { AuthTokenSessionsSchema, UserEncryptionKeysSchema, UsersSchema } from "@app/db/schemas";
|
||||
import { ApiKeysSchema } from "@app/db/schemas/api-keys";
|
||||
import { authRateLimit, readLimit, writeLimit } from "@app/server/config/rateLimiter";
|
||||
import { authRateLimit, readLimit, smtpRateLimit, writeLimit } from "@app/server/config/rateLimiter";
|
||||
import { verifyAuth } from "@app/server/plugins/auth/verify-auth";
|
||||
import { AuthMethod, AuthMode, MfaMethod } from "@app/services/auth/auth-type";
|
||||
import { sanitizedOrganizationSchema } from "@app/services/org/org-schema";
|
||||
@ -12,7 +12,9 @@ export const registerUserRouter = async (server: FastifyZodProvider) => {
|
||||
method: "POST",
|
||||
url: "/me/emails/code",
|
||||
config: {
|
||||
rateLimit: authRateLimit
|
||||
rateLimit: smtpRateLimit({
|
||||
keyGenerator: (req) => (req.body as { username?: string })?.username?.trim().substring(0, 100) ?? req.realIp
|
||||
})
|
||||
},
|
||||
schema: {
|
||||
body: z.object({
|
||||
|
@ -3,7 +3,7 @@ import { z } from "zod";
|
||||
import { UsersSchema } from "@app/db/schemas";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { ForbiddenRequestError } from "@app/lib/errors";
|
||||
import { authRateLimit } from "@app/server/config/rateLimiter";
|
||||
import { authRateLimit, smtpRateLimit } from "@app/server/config/rateLimiter";
|
||||
import { GenericResourceNameSchema } from "@app/server/lib/schemas";
|
||||
import { getServerCfg } from "@app/services/super-admin/super-admin-service";
|
||||
import { PostHogEventTypes } from "@app/services/telemetry/telemetry-types";
|
||||
@ -13,7 +13,9 @@ export const registerSignupRouter = async (server: FastifyZodProvider) => {
|
||||
url: "/email/signup",
|
||||
method: "POST",
|
||||
config: {
|
||||
rateLimit: authRateLimit
|
||||
rateLimit: smtpRateLimit({
|
||||
keyGenerator: (req) => (req.body as { email?: string })?.email?.trim().substring(0, 100) ?? req.realIp
|
||||
})
|
||||
},
|
||||
schema: {
|
||||
body: z.object({
|
||||
|
@ -274,9 +274,27 @@ export const identityKubernetesAuthServiceFactory = ({
|
||||
|
||||
if (identityKubernetesAuth.tokenReviewMode === IdentityKubernetesAuthTokenReviewMode.Gateway) {
|
||||
const { kubernetesHost } = identityKubernetesAuth;
|
||||
const lastColonIndex = kubernetesHost.lastIndexOf(":");
|
||||
const k8sHost = kubernetesHost.substring(0, lastColonIndex);
|
||||
const k8sPort = kubernetesHost.substring(lastColonIndex + 1);
|
||||
|
||||
let urlString = kubernetesHost;
|
||||
if (!kubernetesHost.startsWith("http://") && !kubernetesHost.startsWith("https://")) {
|
||||
urlString = `https://${kubernetesHost}`;
|
||||
}
|
||||
|
||||
const url = new URL(urlString);
|
||||
let { port: k8sPort } = url;
|
||||
const { protocol, hostname: k8sHost } = url;
|
||||
|
||||
const cleanedProtocol = new RE2(/[^a-zA-Z0-9]/g).replace(protocol, "").toLowerCase();
|
||||
|
||||
if (!["https", "http"].includes(cleanedProtocol)) {
|
||||
throw new BadRequestError({
|
||||
message: "Invalid Kubernetes host URL, must start with http:// or https://"
|
||||
});
|
||||
}
|
||||
|
||||
if (!k8sPort) {
|
||||
k8sPort = cleanedProtocol === "https" ? "443" : "80";
|
||||
}
|
||||
|
||||
if (!identityKubernetesAuth.gatewayId) {
|
||||
throw new BadRequestError({
|
||||
@ -287,7 +305,7 @@ export const identityKubernetesAuthServiceFactory = ({
|
||||
data = await $gatewayProxyWrapper(
|
||||
{
|
||||
gatewayId: identityKubernetesAuth.gatewayId,
|
||||
targetHost: k8sHost, // note(daniel): must include the protocol (https|http)
|
||||
targetHost: `${cleanedProtocol}://${k8sHost}`, // note(daniel): must include the protocol (https|http)
|
||||
targetPort: k8sPort ? Number(k8sPort) : 443,
|
||||
caCert,
|
||||
reviewTokenThroughGateway: true
|
||||
|
@ -30,7 +30,7 @@ import { TSshCertificateDALFactory } from "@app/ee/services/ssh-certificate/ssh-
|
||||
import { TSshCertificateTemplateDALFactory } from "@app/ee/services/ssh-certificate-template/ssh-certificate-template-dal";
|
||||
import { TSshHostDALFactory } from "@app/ee/services/ssh-host/ssh-host-dal";
|
||||
import { TSshHostGroupDALFactory } from "@app/ee/services/ssh-host-group/ssh-host-group-dal";
|
||||
import { TKeyStoreFactory } from "@app/keystore/keystore";
|
||||
import { PgSqlLock, TKeyStoreFactory } from "@app/keystore/keystore";
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { infisicalSymmetricEncypt } from "@app/lib/crypto/encryption";
|
||||
import { BadRequestError, ForbiddenRequestError, NotFoundError } from "@app/lib/errors";
|
||||
@ -259,16 +259,17 @@ export const projectServiceFactory = ({
|
||||
);
|
||||
ForbiddenError.from(permission).throwUnlessCan(OrgPermissionActions.Create, OrgPermissionSubjects.Workspace);
|
||||
|
||||
const plan = await licenseService.getPlan(organization.id);
|
||||
if (plan.workspaceLimit !== null && plan.workspacesUsed >= plan.workspaceLimit) {
|
||||
// case: limit imposed on number of workspaces allowed
|
||||
// case: number of workspaces used exceeds the number of workspaces allowed
|
||||
throw new BadRequestError({
|
||||
message: "Failed to create workspace due to plan limit reached. Upgrade plan to add more workspaces."
|
||||
});
|
||||
}
|
||||
|
||||
const results = await (trx || projectDAL).transaction(async (tx) => {
|
||||
await tx.raw("SELECT pg_advisory_xact_lock(?)", [PgSqlLock.CreateProject(organization.id)]);
|
||||
|
||||
const plan = await licenseService.getPlan(organization.id);
|
||||
if (plan.workspaceLimit !== null && plan.workspacesUsed >= plan.workspaceLimit) {
|
||||
// case: limit imposed on number of workspaces allowed
|
||||
// case: number of workspaces used exceeds the number of workspaces allowed
|
||||
throw new BadRequestError({
|
||||
message: "Failed to create workspace due to plan limit reached. Upgrade plan to add more workspaces."
|
||||
});
|
||||
}
|
||||
const ghostUser = await orgService.addGhostUser(organization.id, tx);
|
||||
|
||||
if (kmsKeyId) {
|
||||
|
@ -127,6 +127,7 @@ export const OnePassSyncFns = {
|
||||
syncSecrets: async (secretSync: TOnePassSyncWithCredentials, secretMap: TSecretMap) => {
|
||||
const {
|
||||
connection,
|
||||
environment,
|
||||
destinationConfig: { vaultId }
|
||||
} = secretSync;
|
||||
|
||||
@ -164,7 +165,7 @@ export const OnePassSyncFns = {
|
||||
|
||||
for await (const [key, variable] of Object.entries(items)) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(key, secretSync.syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(key, environment?.slug || "", secretSync.syncOptions.keySchema)) continue;
|
||||
|
||||
if (!(key in secretMap)) {
|
||||
try {
|
||||
|
@ -294,7 +294,7 @@ const deleteParametersBatch = async (
|
||||
|
||||
export const AwsParameterStoreSyncFns = {
|
||||
syncSecrets: async (secretSync: TAwsParameterStoreSyncWithCredentials, secretMap: TSecretMap) => {
|
||||
const { destinationConfig, syncOptions } = secretSync;
|
||||
const { destinationConfig, syncOptions, environment } = secretSync;
|
||||
|
||||
const ssm = await getSSM(secretSync);
|
||||
|
||||
@ -391,7 +391,7 @@ export const AwsParameterStoreSyncFns = {
|
||||
const [key, parameter] = entry;
|
||||
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(key, syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(key, environment?.slug || "", syncOptions.keySchema)) continue;
|
||||
|
||||
if (!(key in secretMap) || !secretMap[key].value) {
|
||||
parametersToDelete.push(parameter);
|
||||
|
@ -57,7 +57,11 @@ const sleep = async () =>
|
||||
setTimeout(resolve, 1000);
|
||||
});
|
||||
|
||||
const getSecretsRecord = async (client: SecretsManagerClient, keySchema?: string): Promise<TAwsSecretsRecord> => {
|
||||
const getSecretsRecord = async (
|
||||
client: SecretsManagerClient,
|
||||
environment: string,
|
||||
keySchema?: string
|
||||
): Promise<TAwsSecretsRecord> => {
|
||||
const awsSecretsRecord: TAwsSecretsRecord = {};
|
||||
let hasNext = true;
|
||||
let nextToken: string | undefined;
|
||||
@ -72,7 +76,7 @@ const getSecretsRecord = async (client: SecretsManagerClient, keySchema?: string
|
||||
|
||||
if (output.SecretList) {
|
||||
output.SecretList.forEach((secretEntry) => {
|
||||
if (secretEntry.Name && matchesSchema(secretEntry.Name, keySchema)) {
|
||||
if (secretEntry.Name && matchesSchema(secretEntry.Name, environment, keySchema)) {
|
||||
awsSecretsRecord[secretEntry.Name] = secretEntry;
|
||||
}
|
||||
});
|
||||
@ -307,11 +311,11 @@ const processTags = ({
|
||||
|
||||
export const AwsSecretsManagerSyncFns = {
|
||||
syncSecrets: async (secretSync: TAwsSecretsManagerSyncWithCredentials, secretMap: TSecretMap) => {
|
||||
const { destinationConfig, syncOptions } = secretSync;
|
||||
const { destinationConfig, syncOptions, environment } = secretSync;
|
||||
|
||||
const client = await getSecretsManagerClient(secretSync);
|
||||
|
||||
const awsSecretsRecord = await getSecretsRecord(client, syncOptions.keySchema);
|
||||
const awsSecretsRecord = await getSecretsRecord(client, environment?.slug || "", syncOptions.keySchema);
|
||||
|
||||
const awsValuesRecord = await getSecretValuesRecord(client, awsSecretsRecord);
|
||||
|
||||
@ -401,7 +405,7 @@ export const AwsSecretsManagerSyncFns = {
|
||||
|
||||
for await (const secretKey of Object.keys(awsSecretsRecord)) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(secretKey, syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(secretKey, environment?.slug || "", syncOptions.keySchema)) continue;
|
||||
|
||||
if (!(secretKey in secretMap) || !secretMap[secretKey].value) {
|
||||
try {
|
||||
@ -468,7 +472,11 @@ export const AwsSecretsManagerSyncFns = {
|
||||
getSecrets: async (secretSync: TAwsSecretsManagerSyncWithCredentials): Promise<TSecretMap> => {
|
||||
const client = await getSecretsManagerClient(secretSync);
|
||||
|
||||
const awsSecretsRecord = await getSecretsRecord(client, secretSync.syncOptions.keySchema);
|
||||
const awsSecretsRecord = await getSecretsRecord(
|
||||
client,
|
||||
secretSync.environment?.slug || "",
|
||||
secretSync.syncOptions.keySchema
|
||||
);
|
||||
const awsValuesRecord = await getSecretValuesRecord(client, awsSecretsRecord);
|
||||
|
||||
const { destinationConfig } = secretSync;
|
||||
@ -503,11 +511,11 @@ export const AwsSecretsManagerSyncFns = {
|
||||
}
|
||||
},
|
||||
removeSecrets: async (secretSync: TAwsSecretsManagerSyncWithCredentials, secretMap: TSecretMap) => {
|
||||
const { destinationConfig, syncOptions } = secretSync;
|
||||
const { destinationConfig, syncOptions, environment } = secretSync;
|
||||
|
||||
const client = await getSecretsManagerClient(secretSync);
|
||||
|
||||
const awsSecretsRecord = await getSecretsRecord(client, syncOptions.keySchema);
|
||||
const awsSecretsRecord = await getSecretsRecord(client, environment?.slug || "", syncOptions.keySchema);
|
||||
|
||||
if (destinationConfig.mappingBehavior === AwsSecretsManagerSyncMappingBehavior.OneToOne) {
|
||||
for await (const secretKey of Object.keys(awsSecretsRecord)) {
|
||||
|
@ -141,7 +141,7 @@ export const azureAppConfigurationSyncFactory = ({
|
||||
|
||||
for await (const key of Object.keys(azureAppConfigSecrets)) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(key, secretSync.syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(key, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema)) continue;
|
||||
|
||||
const azureSecret = azureAppConfigSecrets[key];
|
||||
if (
|
||||
|
@ -194,7 +194,7 @@ export const azureKeyVaultSyncFactory = ({ kmsService, appConnectionDAL }: TAzur
|
||||
|
||||
for await (const deleteSecretKey of deleteSecrets.filter(
|
||||
(secret) =>
|
||||
matchesSchema(secret, secretSync.syncOptions.keySchema) &&
|
||||
matchesSchema(secret, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema) &&
|
||||
!setSecrets.find((setSecret) => setSecret.key === secret)
|
||||
)) {
|
||||
await request.delete(`${secretSync.destinationConfig.vaultBaseUrl}/secrets/${deleteSecretKey}?api-version=7.3`, {
|
||||
|
@ -118,7 +118,7 @@ export const camundaSyncFactory = ({ kmsService, appConnectionDAL }: TCamundaSec
|
||||
|
||||
for await (const secret of Object.keys(camundaSecrets)) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(secret, secretSync.syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(secret, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema)) continue;
|
||||
|
||||
if (!(secret in secretMap) || !secretMap[secret].value) {
|
||||
try {
|
||||
|
@ -117,7 +117,7 @@ export const databricksSyncFactory = ({ kmsService, appConnectionDAL }: TDatabri
|
||||
|
||||
for await (const secret of databricksSecretKeys) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(secret.key, secretSync.syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(secret.key, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema)) continue;
|
||||
|
||||
if (!(secret.key in secretMap)) {
|
||||
await deleteDatabricksSecrets({
|
||||
|
@ -155,7 +155,7 @@ export const GcpSyncFns = {
|
||||
|
||||
for await (const key of Object.keys(gcpSecrets)) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(key, secretSync.syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(key, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema)) continue;
|
||||
|
||||
try {
|
||||
if (!(key in secretMap) || !secretMap[key].value) {
|
||||
|
@ -223,8 +223,9 @@ export const GithubSyncFns = {
|
||||
if (secretSync.syncOptions.disableSecretDeletion) return;
|
||||
|
||||
for await (const encryptedSecret of encryptedSecrets) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(encryptedSecret.name, secretSync.syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(encryptedSecret.name, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema))
|
||||
// eslint-disable-next-line no-continue
|
||||
continue;
|
||||
|
||||
if (!(encryptedSecret.name in secretMap)) {
|
||||
await deleteSecret(client, secretSync, encryptedSecret);
|
||||
|
@ -68,6 +68,7 @@ export const HCVaultSyncFns = {
|
||||
syncSecrets: async (secretSync: THCVaultSyncWithCredentials, secretMap: TSecretMap) => {
|
||||
const {
|
||||
connection,
|
||||
environment,
|
||||
destinationConfig: { mount, path },
|
||||
syncOptions: { disableSecretDeletion, keySchema }
|
||||
} = secretSync;
|
||||
@ -97,7 +98,7 @@ export const HCVaultSyncFns = {
|
||||
|
||||
for await (const [key] of Object.entries(variables)) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(key, keySchema)) continue;
|
||||
if (!matchesSchema(key, environment?.slug || "", keySchema)) continue;
|
||||
|
||||
if (!(key in secretMap)) {
|
||||
delete variables[key];
|
||||
|
@ -200,8 +200,9 @@ export const HumanitecSyncFns = {
|
||||
if (secretSync.syncOptions.disableSecretDeletion) return;
|
||||
|
||||
for await (const humanitecSecret of humanitecSecrets) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(humanitecSecret.key, secretSync.syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(humanitecSecret.key, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema))
|
||||
// eslint-disable-next-line no-continue
|
||||
continue;
|
||||
|
||||
if (!secretMap[humanitecSecret.key]) {
|
||||
await deleteSecret(secretSync, humanitecSecret);
|
||||
|
@ -1,5 +1,5 @@
|
||||
import { AxiosError } from "axios";
|
||||
import RE2 from "re2";
|
||||
import handlebars from "handlebars";
|
||||
|
||||
import { TLicenseServiceFactory } from "@app/ee/services/license/license-service";
|
||||
import { OCI_VAULT_SYNC_LIST_OPTION, OCIVaultSyncFns } from "@app/ee/services/secret-sync/oci-vault";
|
||||
@ -68,13 +68,17 @@ type TSyncSecretDeps = {
|
||||
};
|
||||
|
||||
// Add schema to secret keys
|
||||
const addSchema = (unprocessedSecretMap: TSecretMap, schema?: string): TSecretMap => {
|
||||
const addSchema = (unprocessedSecretMap: TSecretMap, environment: string, schema?: string): TSecretMap => {
|
||||
if (!schema) return unprocessedSecretMap;
|
||||
|
||||
const processedSecretMap: TSecretMap = {};
|
||||
|
||||
for (const [key, value] of Object.entries(unprocessedSecretMap)) {
|
||||
const newKey = new RE2("{{secretKey}}").replace(schema, key);
|
||||
const newKey = handlebars.compile(schema)({
|
||||
secretKey: key,
|
||||
environment
|
||||
});
|
||||
|
||||
processedSecretMap[newKey] = value;
|
||||
}
|
||||
|
||||
@ -82,10 +86,17 @@ const addSchema = (unprocessedSecretMap: TSecretMap, schema?: string): TSecretMa
|
||||
};
|
||||
|
||||
// Strip schema from secret keys
|
||||
const stripSchema = (unprocessedSecretMap: TSecretMap, schema?: string): TSecretMap => {
|
||||
const stripSchema = (unprocessedSecretMap: TSecretMap, environment: string, schema?: string): TSecretMap => {
|
||||
if (!schema) return unprocessedSecretMap;
|
||||
|
||||
const [prefix, suffix] = schema.split("{{secretKey}}");
|
||||
const compiledSchemaPattern = handlebars.compile(schema)({
|
||||
secretKey: "{{secretKey}}", // Keep secretKey
|
||||
environment
|
||||
});
|
||||
|
||||
const parts = compiledSchemaPattern.split("{{secretKey}}");
|
||||
const prefix = parts[0];
|
||||
const suffix = parts[parts.length - 1];
|
||||
|
||||
const strippedMap: TSecretMap = {};
|
||||
|
||||
@ -103,21 +114,40 @@ const stripSchema = (unprocessedSecretMap: TSecretMap, schema?: string): TSecret
|
||||
};
|
||||
|
||||
// Checks if a key matches a schema
|
||||
export const matchesSchema = (key: string, schema?: string): boolean => {
|
||||
export const matchesSchema = (key: string, environment: string, schema?: string): boolean => {
|
||||
if (!schema) return true;
|
||||
|
||||
const [prefix, suffix] = schema.split("{{secretKey}}");
|
||||
if (prefix === undefined || suffix === undefined) return true;
|
||||
const compiledSchemaPattern = handlebars.compile(schema)({
|
||||
secretKey: "{{secretKey}}", // Keep secretKey
|
||||
environment
|
||||
});
|
||||
|
||||
return key.startsWith(prefix) && key.endsWith(suffix);
|
||||
// This edge-case shouldn't be possible
|
||||
if (!compiledSchemaPattern.includes("{{secretKey}}")) {
|
||||
return key === compiledSchemaPattern;
|
||||
}
|
||||
|
||||
const parts = compiledSchemaPattern.split("{{secretKey}}");
|
||||
const prefix = parts[0];
|
||||
const suffix = parts[parts.length - 1];
|
||||
|
||||
if (prefix === "" && suffix === "") return true;
|
||||
|
||||
// If prefix is empty, key must end with suffix
|
||||
if (prefix === "") return key.endsWith(suffix);
|
||||
|
||||
// If suffix is empty, key must start with prefix
|
||||
if (suffix === "") return key.startsWith(prefix);
|
||||
|
||||
return key.startsWith(prefix) && key.endsWith(suffix) && key.length >= prefix.length + suffix.length;
|
||||
};
|
||||
|
||||
// Filter only for secrets with keys that match the schema
|
||||
const filterForSchema = (secretMap: TSecretMap, schema?: string): TSecretMap => {
|
||||
const filterForSchema = (secretMap: TSecretMap, environment: string, schema?: string): TSecretMap => {
|
||||
const filteredMap: TSecretMap = {};
|
||||
|
||||
for (const [key, value] of Object.entries(secretMap)) {
|
||||
if (matchesSchema(key, schema)) {
|
||||
if (matchesSchema(key, environment, schema)) {
|
||||
filteredMap[key] = value;
|
||||
}
|
||||
}
|
||||
@ -131,7 +161,7 @@ export const SecretSyncFns = {
|
||||
secretMap: TSecretMap,
|
||||
{ kmsService, appConnectionDAL }: TSyncSecretDeps
|
||||
): Promise<void> => {
|
||||
const schemaSecretMap = addSchema(secretMap, secretSync.syncOptions.keySchema);
|
||||
const schemaSecretMap = addSchema(secretMap, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema);
|
||||
|
||||
switch (secretSync.destination) {
|
||||
case SecretSync.AWSParameterStore:
|
||||
@ -255,14 +285,16 @@ export const SecretSyncFns = {
|
||||
);
|
||||
}
|
||||
|
||||
return stripSchema(filterForSchema(secretMap), secretSync.syncOptions.keySchema);
|
||||
const filtered = filterForSchema(secretMap, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema);
|
||||
const stripped = stripSchema(filtered, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema);
|
||||
return stripped;
|
||||
},
|
||||
removeSecrets: (
|
||||
secretSync: TSecretSyncWithCredentials,
|
||||
secretMap: TSecretMap,
|
||||
{ kmsService, appConnectionDAL }: TSyncSecretDeps
|
||||
): Promise<void> => {
|
||||
const schemaSecretMap = addSchema(secretMap, secretSync.syncOptions.keySchema);
|
||||
const schemaSecretMap = addSchema(secretMap, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema);
|
||||
|
||||
switch (secretSync.destination) {
|
||||
case SecretSync.AWSParameterStore:
|
||||
|
@ -28,10 +28,30 @@ const BaseSyncOptionsSchema = <T extends AnyZodObject | undefined = undefined>({
|
||||
keySchema: z
|
||||
.string()
|
||||
.optional()
|
||||
.refine((val) => !val || new RE2(/^(?:[a-zA-Z0-9_\-/]*)(?:\{\{secretKey\}\})(?:[a-zA-Z0-9_\-/]*)$/).test(val), {
|
||||
message:
|
||||
"Key schema must include one {{secretKey}} and only contain letters, numbers, dashes, underscores, slashes, and the {{secretKey}} placeholder."
|
||||
})
|
||||
.refine(
|
||||
(val) => {
|
||||
if (!val) return true;
|
||||
|
||||
const allowedOptionalPlaceholders = ["{{environment}}"];
|
||||
|
||||
const allowedPlaceholdersRegexPart = ["{{secretKey}}", ...allowedOptionalPlaceholders]
|
||||
.map((p) => p.replace(/[-/\\^$*+?.()|[\]{}]/g, "\\$&")) // Escape regex special characters
|
||||
.join("|");
|
||||
|
||||
const allowedContentRegex = new RE2(`^([a-zA-Z0-9_\\-/]|${allowedPlaceholdersRegexPart})*$`);
|
||||
const contentIsValid = allowedContentRegex.test(val);
|
||||
|
||||
// Check if {{secretKey}} is present
|
||||
const secretKeyRegex = new RE2(/\{\{secretKey\}\}/);
|
||||
const secretKeyIsPresent = secretKeyRegex.test(val);
|
||||
|
||||
return contentIsValid && secretKeyIsPresent;
|
||||
},
|
||||
{
|
||||
message:
|
||||
"Key schema must include exactly one {{secretKey}} placeholder. It can also include {{environment}} placeholders. Only alphanumeric characters (a-z, A-Z, 0-9), dashes (-), underscores (_), and slashes (/) are allowed besides the placeholders."
|
||||
}
|
||||
)
|
||||
.describe(SecretSyncs.SYNC_OPTIONS(destination).keySchema),
|
||||
disableSecretDeletion: z.boolean().optional().describe(SecretSyncs.SYNC_OPTIONS(destination).disableSecretDeletion)
|
||||
});
|
||||
|
@ -127,7 +127,7 @@ export const TeamCitySyncFns = {
|
||||
|
||||
for await (const [key, variable] of Object.entries(variables)) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(key, secretSync.syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(key, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema)) continue;
|
||||
|
||||
if (!(key in secretMap)) {
|
||||
try {
|
||||
|
@ -232,8 +232,11 @@ export const TerraformCloudSyncFns = {
|
||||
if (secretSync.syncOptions.disableSecretDeletion) return;
|
||||
|
||||
for (const terraformCloudVariable of terraformCloudVariables) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(terraformCloudVariable.key, secretSync.syncOptions.keySchema)) continue;
|
||||
if (
|
||||
!matchesSchema(terraformCloudVariable.key, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema)
|
||||
)
|
||||
// eslint-disable-next-line no-continue
|
||||
continue;
|
||||
|
||||
if (!Object.prototype.hasOwnProperty.call(secretMap, terraformCloudVariable.key)) {
|
||||
await deleteVariable(secretSync, terraformCloudVariable);
|
||||
|
@ -291,8 +291,9 @@ export const VercelSyncFns = {
|
||||
if (secretSync.syncOptions.disableSecretDeletion) return;
|
||||
|
||||
for await (const vercelSecret of vercelSecrets) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(vercelSecret.key, secretSync.syncOptions.keySchema)) continue;
|
||||
if (!matchesSchema(vercelSecret.key, secretSync.environment?.slug || "", secretSync.syncOptions.keySchema))
|
||||
// eslint-disable-next-line no-continue
|
||||
continue;
|
||||
|
||||
if (!secretMap[vercelSecret.key]) {
|
||||
await deleteSecret(secretSync, vercelSecret);
|
||||
|
@ -128,6 +128,7 @@ export const WindmillSyncFns = {
|
||||
syncSecrets: async (secretSync: TWindmillSyncWithCredentials, secretMap: TSecretMap) => {
|
||||
const {
|
||||
connection,
|
||||
environment,
|
||||
destinationConfig: { path },
|
||||
syncOptions: { disableSecretDeletion, keySchema }
|
||||
} = secretSync;
|
||||
@ -171,7 +172,7 @@ export const WindmillSyncFns = {
|
||||
|
||||
for await (const [key, variable] of Object.entries(variables)) {
|
||||
// eslint-disable-next-line no-continue
|
||||
if (!matchesSchema(key, keySchema)) continue;
|
||||
if (!matchesSchema(key, environment?.slug || "", keySchema)) continue;
|
||||
|
||||
if (!(key in secretMap)) {
|
||||
try {
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -25,9 +26,13 @@ func handleConnection(ctx context.Context, quicConn quic.Connection) {
|
||||
log.Info().Msgf("New connection from: %s", quicConn.RemoteAddr().String())
|
||||
// Use WaitGroup to track all streams
|
||||
var wg sync.WaitGroup
|
||||
|
||||
contextWithTimeout, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
// Accept the first stream, which we'll use for commands
|
||||
stream, err := quicConn.AcceptStream(ctx)
|
||||
stream, err := quicConn.AcceptStream(contextWithTimeout)
|
||||
if err != nil {
|
||||
log.Printf("Failed to accept QUIC stream: %v", err)
|
||||
break
|
||||
@ -51,7 +56,12 @@ func handleStream(stream quic.Stream, quicConn quic.Connection) {
|
||||
|
||||
// Use buffered reader for better handling of fragmented data
|
||||
reader := bufio.NewReader(stream)
|
||||
defer stream.Close()
|
||||
defer func() {
|
||||
log.Info().Msgf("Closing stream %d", streamID)
|
||||
if stream != nil {
|
||||
stream.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
msg, err := reader.ReadBytes('\n')
|
||||
@ -106,6 +116,11 @@ func handleStream(stream quic.Stream, quicConn quic.Connection) {
|
||||
|
||||
targetURL := string(argParts[0])
|
||||
|
||||
if !isValidURL(targetURL) {
|
||||
log.Error().Msgf("Invalid target URL: %s", targetURL)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse optional parameters
|
||||
var caCertB64, verifyParam string
|
||||
for _, part := range argParts[1:] {
|
||||
@ -160,7 +175,6 @@ func handleHTTPProxy(stream quic.Stream, reader *bufio.Reader, targetURL string,
|
||||
}
|
||||
}
|
||||
|
||||
// set certificate verification based on what the gateway client sent
|
||||
if verifyParam != "" {
|
||||
tlsConfig.InsecureSkipVerify = verifyParam == "false"
|
||||
log.Info().Msgf("TLS verification set to: %s", verifyParam)
|
||||
@ -169,82 +183,94 @@ func handleHTTPProxy(stream quic.Stream, reader *bufio.Reader, targetURL string,
|
||||
transport.TLSClientConfig = tlsConfig
|
||||
}
|
||||
|
||||
// read and parse the http request from the stream
|
||||
req, err := http.ReadRequest(reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read HTTP request: %v", err)
|
||||
}
|
||||
|
||||
actionHeader := req.Header.Get("x-infisical-action")
|
||||
if actionHeader != "" {
|
||||
|
||||
if actionHeader == "inject-k8s-sa-auth-token" {
|
||||
token, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
|
||||
|
||||
if err != nil {
|
||||
stream.Write([]byte(buildHttpInternalServerError("failed to read k8s sa auth token")))
|
||||
return fmt.Errorf("failed to read k8s sa auth token: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", string(token)))
|
||||
log.Info().Msgf("Injected gateway k8s SA auth token in request to %s", targetURL)
|
||||
}
|
||||
|
||||
req.Header.Del("x-infisical-action")
|
||||
}
|
||||
|
||||
var targetFullURL string
|
||||
if strings.HasPrefix(targetURL, "http://") || strings.HasPrefix(targetURL, "https://") {
|
||||
baseURL := strings.TrimSuffix(targetURL, "/")
|
||||
targetFullURL = baseURL + req.URL.Path
|
||||
if req.URL.RawQuery != "" {
|
||||
targetFullURL += "?" + req.URL.RawQuery
|
||||
}
|
||||
} else {
|
||||
baseURL := strings.TrimSuffix("http://"+targetURL, "/")
|
||||
targetFullURL = baseURL + req.URL.Path
|
||||
if req.URL.RawQuery != "" {
|
||||
targetFullURL += "?" + req.URL.RawQuery
|
||||
}
|
||||
}
|
||||
|
||||
// create the request to the target
|
||||
proxyReq, err := http.NewRequest(req.Method, targetFullURL, req.Body)
|
||||
proxyReq.Header = req.Header.Clone()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create proxy request: %v", err)
|
||||
}
|
||||
|
||||
log.Info().Msgf("Proxying %s %s to %s", req.Method, req.URL.Path, targetFullURL)
|
||||
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
// make the request to the target
|
||||
resp, err := client.Do(proxyReq)
|
||||
if err != nil {
|
||||
stream.Write([]byte(buildHttpInternalServerError(fmt.Sprintf("failed to reach target due to networking error: %s", err.Error()))))
|
||||
return fmt.Errorf("failed to reach target due to networking error: %v", err)
|
||||
// Loop to handle multiple HTTP requests on the same stream
|
||||
for {
|
||||
req, err := http.ReadRequest(reader)
|
||||
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
log.Info().Msg("Client closed HTTP connection")
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to read HTTP request: %v", err)
|
||||
}
|
||||
log.Info().Msgf("Received HTTP request: %s", req.URL.Path)
|
||||
|
||||
actionHeader := req.Header.Get("x-infisical-action")
|
||||
if actionHeader != "" {
|
||||
if actionHeader == "inject-k8s-sa-auth-token" {
|
||||
token, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token")
|
||||
if err != nil {
|
||||
stream.Write([]byte(buildHttpInternalServerError("failed to read k8s sa auth token")))
|
||||
continue // Continue to next request instead of returning
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", string(token)))
|
||||
log.Info().Msgf("Injected gateway k8s SA auth token in request to %s", targetURL)
|
||||
}
|
||||
req.Header.Del("x-infisical-action")
|
||||
}
|
||||
|
||||
// Build full target URL
|
||||
var targetFullURL string
|
||||
if strings.HasPrefix(targetURL, "http://") || strings.HasPrefix(targetURL, "https://") {
|
||||
baseURL := strings.TrimSuffix(targetURL, "/")
|
||||
targetFullURL = baseURL + req.URL.Path
|
||||
if req.URL.RawQuery != "" {
|
||||
targetFullURL += "?" + req.URL.RawQuery
|
||||
}
|
||||
} else {
|
||||
baseURL := strings.TrimSuffix("http://"+targetURL, "/")
|
||||
targetFullURL = baseURL + req.URL.Path
|
||||
if req.URL.RawQuery != "" {
|
||||
targetFullURL += "?" + req.URL.RawQuery
|
||||
}
|
||||
}
|
||||
|
||||
// create the request to the target
|
||||
proxyReq, err := http.NewRequest(req.Method, targetFullURL, req.Body)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Failed to create proxy request: %v", err)
|
||||
stream.Write([]byte(buildHttpInternalServerError("failed to create proxy request")))
|
||||
continue // Continue to next request
|
||||
}
|
||||
proxyReq.Header = req.Header.Clone()
|
||||
|
||||
log.Info().Msgf("Proxying %s %s to %s", req.Method, req.URL.Path, targetFullURL)
|
||||
|
||||
resp, err := client.Do(proxyReq)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Failed to reach target: %v", err)
|
||||
stream.Write([]byte(buildHttpInternalServerError(fmt.Sprintf("failed to reach target due to networking error: %s", err.Error()))))
|
||||
continue // Continue to next request
|
||||
}
|
||||
|
||||
// Write the entire response (status line, headers, body) to the stream
|
||||
// http.Response.Write handles this for "Connection: close" correctly.
|
||||
// For other connection tokens, manual removal might be needed if they cause issues with QUIC.
|
||||
// For a simple proxy, this is generally sufficient.
|
||||
resp.Header.Del("Connection") // Good practice for proxies
|
||||
|
||||
log.Info().Msgf("Writing response to stream: %s", resp.Status)
|
||||
|
||||
if err := resp.Write(stream); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to write response to stream")
|
||||
resp.Body.Close()
|
||||
return fmt.Errorf("failed to write response to stream: %w", err)
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
|
||||
// Check if client wants to close connection
|
||||
if req.Header.Get("Connection") == "close" {
|
||||
log.Info().Msg("Client requested connection close")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Write the entire response (status line, headers, body) to the stream
|
||||
// http.Response.Write handles this for "Connection: close" correctly.
|
||||
// For other connection tokens, manual removal might be needed if they cause issues with QUIC.
|
||||
// For a simple proxy, this is generally sufficient.
|
||||
resp.Header.Del("Connection") // Good practice for proxies
|
||||
|
||||
log.Info().Msgf("Writing response to stream: %s", resp.Status)
|
||||
if err := resp.Write(stream); err != nil {
|
||||
// If writing the response fails, the connection to the client might be broken.
|
||||
// Logging the error is important. The original error will be returned.
|
||||
log.Error().Err(err).Msg("Failed to write response to stream")
|
||||
return fmt.Errorf("failed to write response to stream: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildHttpInternalServerError(message string) string {
|
||||
@ -255,6 +281,11 @@ type CloseWrite interface {
|
||||
CloseWrite() error
|
||||
}
|
||||
|
||||
func isValidURL(str string) bool {
|
||||
u, err := url.Parse(str)
|
||||
return err == nil && u.Scheme != "" && u.Host != ""
|
||||
}
|
||||
|
||||
func CopyDataFromQuicToTcp(quicStream quic.Stream, tcpConn net.Conn) {
|
||||
// Create a WaitGroup to wait for both copy operations
|
||||
var wg sync.WaitGroup
|
||||
|
@ -89,22 +89,3 @@ The relay system provides secure tunneling:
|
||||
- Gateways only accept connections to approved resources
|
||||
- Each connection requires explicit project authorization
|
||||
- Resources remain private to their assigned organization
|
||||
|
||||
## Security Measures
|
||||
|
||||
### Certificate Lifecycle
|
||||
- Certificates have limited validity periods
|
||||
- Automatic certificate rotation
|
||||
- Immediate certificate revocation capabilities
|
||||
|
||||
### Monitoring and Verification
|
||||
1. **Continuous Verification**:
|
||||
- Regular heartbeat checks
|
||||
- Certificate chain validation
|
||||
- Connection state monitoring
|
||||
|
||||
2. **Security Controls**:
|
||||
- Automatic connection termination on verification failure
|
||||
- Audit logging of all access attempts
|
||||
- Machine identity based authentication
|
||||
|
||||
|
168
docs/documentation/platform/gateways/networking.mdx
Normal file
168
docs/documentation/platform/gateways/networking.mdx
Normal file
@ -0,0 +1,168 @@
|
||||
---
|
||||
title: "Networking"
|
||||
description: "Network configuration and firewall requirements for Infisical Gateway"
|
||||
---
|
||||
|
||||
The Infisical Gateway requires outbound network connectivity to establish secure communication with Infisical's relay infrastructure.
|
||||
This page outlines the required ports, protocols, and firewall configurations needed for optimal gateway usage.
|
||||
|
||||
## Network Architecture
|
||||
|
||||
The gateway uses a relay-based architecture to establish secure connections:
|
||||
|
||||
1. **Gateway** connects outbound to **Relay Servers** using UDP/QUIC protocol
|
||||
2. **Relay Servers** facilitate secure communication between Gateway and Infisical Cloud
|
||||
3. All traffic is end-to-end encrypted using mutual TLS over QUIC
|
||||
|
||||
## Required Network Connectivity
|
||||
|
||||
### Outbound Connections (Required)
|
||||
|
||||
The gateway requires the following outbound connectivity:
|
||||
|
||||
| Protocol | Destination | Ports | Purpose |
|
||||
|----------|-------------|-------|---------|
|
||||
| UDP | Relay Servers | 49152-65535 | Allocated relay communication (TLS) |
|
||||
| TCP | app.infisical.com / eu.infisical.com | 443 | API communication and relay allocation |
|
||||
|
||||
### Relay Server IP Addresses
|
||||
|
||||
Your firewall must allow outbound connectivity to the following Infisical relay servers on dynamically allocated ports.
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Infisical cloud (US)">
|
||||
```
|
||||
54.235.197.91:49152-65535
|
||||
18.215.196.229:49152-65535
|
||||
3.222.120.233:49152-65535
|
||||
34.196.115.157:49152-65535
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Infisical cloud (EU)">
|
||||
```
|
||||
3.125.237.40:49152-65535
|
||||
52.28.157.98:49152-65535
|
||||
3.125.176.90:49152-65535
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Infisical dedicated">
|
||||
Please contact your Infisical account manager for dedicated relay server IP addresses.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Warning>
|
||||
These IP addresses are static and managed by Infisical. Any changes will be communicated with 60-day advance notice.
|
||||
</Warning>
|
||||
|
||||
## Protocol Details
|
||||
|
||||
### QUIC over UDP
|
||||
|
||||
The gateway uses QUIC (Quick UDP Internet Connections) for primary communication:
|
||||
|
||||
- **Port 5349**: STUN/TURN over TLS (secure relay communication)
|
||||
- **Built-in features**: Connection migration, multiplexing, reduced latency
|
||||
- **Encryption**: TLS 1.3 with certificate pinning
|
||||
|
||||
## Understanding Firewall Behavior with UDP
|
||||
|
||||
Unlike TCP connections, UDP is a stateless protocol, and depending on your organization's firewall configuration, you may need to adjust network rules accordingly.
|
||||
When the gateway sends UDP packets to a relay server, the return responses need to be allowed back through the firewall.
|
||||
Modern firewalls handle this through "connection tracking" (also called "stateful inspection"), but the behavior can vary depending on your firewall configuration.
|
||||
|
||||
|
||||
### Connection Tracking
|
||||
|
||||
Modern firewalls automatically track UDP connections and allow return responses. This is the preferred configuration as it:
|
||||
- Automatically handles return responses
|
||||
- Reduces firewall rule complexity
|
||||
- Avoids the need for manual IP whitelisting
|
||||
|
||||
In the event that your firewall does not support connection tracking, you will need to whitelist the relay IPs to explicitly define return traffic manually.
|
||||
|
||||
## Common Network Scenarios
|
||||
|
||||
### Corporate Firewalls
|
||||
|
||||
For corporate environments with strict egress filtering:
|
||||
|
||||
1. **Whitelist relay IP addresses** (listed above)
|
||||
2. **Allow UDP port 5349** outbound
|
||||
3. **Configure connection tracking** for UDP return traffic
|
||||
4. **Allow ephemeral port range** 49152-65535 for return traffic if connection tracking is disabled
|
||||
|
||||
### Cloud Environments (AWS/GCP/Azure)
|
||||
|
||||
Configure security groups to allow:
|
||||
- **Outbound UDP** to relay IPs on port 5349
|
||||
- **Outbound HTTPS** to app.infisical.com/eu.infisical.com on port 443
|
||||
- **Inbound UDP** on ephemeral ports (if not using stateful rules)
|
||||
|
||||
## Frequently Asked Questions
|
||||
|
||||
<Accordion title="What happens if there is a network interruption?">
|
||||
The gateway is designed to handle network interruptions gracefully:
|
||||
|
||||
- **Automatic reconnection**: The gateway will automatically attempt to reconnect to relay servers every 5 seconds if the connection is lost
|
||||
- **Connection retry logic**: Built-in retry mechanisms handle temporary network outages without manual intervention
|
||||
- **Multiple relay servers**: If one relay server is unavailable, the gateway can connect to alternative relay servers
|
||||
- **Persistent sessions**: Existing connections are maintained where possible during brief network interruptions
|
||||
- **Graceful degradation**: The gateway logs connection issues and continues attempting to restore connectivity
|
||||
|
||||
No manual intervention is typically required during network interruptions.
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Why does the gateway use QUIC instead of TCP?">
|
||||
QUIC (Quick UDP Internet Connections) provides several advantages over traditional TCP for gateway communication:
|
||||
|
||||
- **Faster connection establishment**: QUIC combines transport and security handshakes, reducing connection setup time
|
||||
- **Built-in encryption**: TLS 1.3 is integrated into the protocol, ensuring all traffic is encrypted by default
|
||||
- **Connection migration**: QUIC connections can survive IP address changes (useful for NAT rebinding)
|
||||
- **Reduced head-of-line blocking**: Multiple data streams can be multiplexed without blocking each other
|
||||
- **Better performance over unreliable networks**: Advanced congestion control and packet loss recovery
|
||||
- **Lower latency**: Optimized for real-time communication between gateway and cloud services
|
||||
|
||||
While TCP is stateful and easier for firewalls to track, QUIC's performance benefits outweigh the additional firewall configuration requirements.
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Do I need to open any inbound ports on my firewall?">
|
||||
No inbound ports need to be opened. The gateway only makes outbound connections:
|
||||
|
||||
- **Outbound UDP** to relay servers on ports 49152-65535
|
||||
- **Outbound HTTPS** to Infisical API endpoints
|
||||
- **Return responses** are handled by connection tracking or explicit IP whitelisting
|
||||
|
||||
This design maintains security by avoiding the need for inbound firewall rules that could expose your network to external threats.
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="What if my firewall blocks the required UDP ports?">
|
||||
If your firewall has strict UDP restrictions:
|
||||
|
||||
1. **Work with your network team** to allow outbound UDP to the specific relay IP addresses
|
||||
2. **Use explicit IP whitelisting** if connection tracking is disabled
|
||||
3. **Consider network policy exceptions** for the gateway host
|
||||
4. **Monitor firewall logs** to identify which specific rules are blocking traffic
|
||||
|
||||
The gateway requires UDP connectivity to function - TCP-only configurations are not supported.
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="How many relay servers does the gateway connect to?">
|
||||
The gateway connects to **one relay server at a time**:
|
||||
|
||||
- **Single active connection**: Only one relay connection is established per gateway instance
|
||||
- **Automatic failover**: If the current relay becomes unavailable, the gateway will connect to an alternative relay
|
||||
- **Load distribution**: Different gateway instances may connect to different relay servers for load balancing
|
||||
- **No manual selection**: The Infisical API automatically assigns the optimal relay server based on availability and proximity
|
||||
|
||||
You should whitelist all relay IP addresses to ensure proper failover functionality.
|
||||
</Accordion>
|
||||
<Accordion title="Can the relay servers decrypt traffic going through them?">
|
||||
No, relay servers cannot decrypt any traffic passing through them:
|
||||
|
||||
- **End-to-end encryption**: All traffic between the gateway and Infisical Cloud is encrypted using mutual TLS with certificate pinning
|
||||
- **Relay acts as a tunnel**: The relay server only forwards encrypted packets - it has no access to encryption keys
|
||||
- **No data storage**: Relay servers do not store any traffic or network-identifiable information
|
||||
- **Certificate isolation**: Each organization has its own private PKI system, ensuring complete tenant isolation
|
||||
|
||||
The relay infrastructure is designed as a secure forwarding mechanism, similar to a VPN tunnel, where the relay provider cannot see the contents of the traffic flowing through it.
|
||||
</Accordion>
|
@ -32,7 +32,7 @@ For detailed installation instructions, refer to the Infisical [CLI Installation
|
||||
To function, the Gateway must authenticate with Infisical. This requires a machine identity configured with the appropriate permissions to create and manage a Gateway.
|
||||
Once authenticated, the Gateway establishes a secure connection with Infisical to allow your private resources to be reachable.
|
||||
|
||||
### Deployment process
|
||||
### Get started
|
||||
|
||||
<Steps>
|
||||
<Step title="Create a Gateway Identity">
|
||||
|
@ -4,33 +4,36 @@ sidebarTitle: "Networking"
|
||||
description: "Network configuration details for Infisical Cloud"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
When integrating your infrastructure with Infisical Cloud, you may need to configure network access controls. This page provides the IP addresses that Infisical uses to communicate with your services.
|
||||
|
||||
## Egress IP Addresses
|
||||
## Infisical IP Addresses
|
||||
|
||||
Infisical Cloud operates from two regions: US and EU. If your infrastructure has strict network policies, you may need to allow traffic from Infisical by adding the following IP addresses to your ingress rules. These are the egress IPs Infisical uses when making outbound requests to your services.
|
||||
Infisical Cloud operates from multiple regions. If your infrastructure has strict network policies, you may need to allow traffic from Infisical by adding the following IP addresses to your ingress rules. These are the IP addresses that Infisical uses when making outbound requests to your services.
|
||||
|
||||
### US Region
|
||||
<Tabs>
|
||||
<Tab title="US Region">
|
||||
```
|
||||
3.213.63.16
|
||||
54.164.68.7
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="EU Region">
|
||||
```
|
||||
3.77.89.19
|
||||
3.125.209.189
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="Dedicated Cloud">
|
||||
For dedicated Infisical deployments, please contact your account manager for the specific IP addresses used in your dedicated environment.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
To allow connections from Infisical US, add these IP addresses to your ingress rules:
|
||||
<Warning>
|
||||
These IP addresses are static and managed by Infisical. Any changes will be communicated with 60-day advance notice.
|
||||
</Warning>
|
||||
|
||||
- `3.213.63.16`
|
||||
- `54.164.68.7`
|
||||
## What These IP Addresses Are Used For
|
||||
|
||||
### EU Region
|
||||
|
||||
To allow connections from Infisical EU, add these IP addresses to your ingress rules:
|
||||
|
||||
- `3.77.89.19`
|
||||
- `3.125.209.189`
|
||||
|
||||
## Common Use Cases
|
||||
|
||||
You may need to allow Infisical’s egress IPs if your services require inbound connections for:
|
||||
|
||||
- Secret rotation - When Infisical needs to send requests to your systems to automatically rotate credentials
|
||||
- Dynamic secrets - When Infisical generates and manages temporary credentials for your cloud services
|
||||
- Secret integrations - When syncing secrets with third-party services like Azure Key Vault
|
||||
- Native authentication with machine identities - When using methods like Kubernetes authentication
|
||||
These IP addresses represent the source IPs you'll see when Infisical Cloud makes connections to your infrastructure. All outbound traffic from Infisical Cloud originates from these IP addresses, ensuring predictable source IP addresses for your firewall rules.
|
||||
|
@ -46,7 +46,7 @@ description: "Learn how to configure a 1Password Sync for Infisical."
|
||||
- **Overwrite Destination Secrets**: Removes any secrets at the destination endpoint not present in Infisical.
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over 1Password when keys conflict.
|
||||
- **Import Secrets (Prioritize 1Password)**: Imports secrets from the destination endpoint before syncing, prioritizing values from 1Password over Infisical when keys conflict.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -40,7 +40,7 @@ description: "Learn how to configure an AWS Parameter Store Sync for Infisical."
|
||||
- **Overwrite Destination Secrets**: Removes any secrets at the destination endpoint not present in Infisical.
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over Parameter Store when keys conflict.
|
||||
- **Import Secrets (Prioritize AWS Parameter Store)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Parameter Store over Infisical when keys conflict.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -43,7 +43,7 @@ description: "Learn how to configure an AWS Secrets Manager Sync for Infisical."
|
||||
- **Overwrite Destination Secrets**: Removes any secrets at the destination endpoint not present in Infisical.
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over Secrets Manager when keys conflict.
|
||||
- **Import Secrets (Prioritize AWS Secrets Manager)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Secrets Manager over Infisical when keys conflict.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -48,7 +48,7 @@ description: "Learn how to configure an Azure App Configuration Sync for Infisic
|
||||
- **Overwrite Destination Secrets**: Removes any secrets at the destination endpoint not present in Infisical.
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over Secrets Manager when keys conflict.
|
||||
- **Import Secrets (Prioritize Azure App Configuration)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Secrets Manager over Infisical when keys conflict.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -51,7 +51,7 @@ description: "Learn how to configure a Azure Key Vault Sync for Infisical."
|
||||
- **Overwrite Destination Secrets**: Removes any secrets at the destination endpoint not present in Infisical.
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over Secrets Manager when keys conflict.
|
||||
- **Import Secrets (Prioritize Azure Key Vault)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Secrets Manager over Infisical when keys conflict.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -39,7 +39,7 @@ description: "Learn how to configure a Camunda Sync for Infisical."
|
||||
- **Overwrite Destination Secrets**: Removes any secrets at the destination endpoint not present in Infisical.
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over Camunda when keys conflict.
|
||||
- **Import Secrets (Prioritize Camunda)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Camunda over Infisical when keys conflict.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -46,7 +46,7 @@ description: "Learn how to configure a Databricks Sync for Infisical."
|
||||
<Note>
|
||||
Databricks does not support importing secrets.
|
||||
</Note>
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -42,7 +42,7 @@ description: "Learn how to configure a GCP Secret Manager Sync for Infisical."
|
||||
- **Overwrite Destination Secrets**: Removes any secrets at the destination endpoint not present in Infisical.
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over GCP Secret Manager when keys conflict.
|
||||
- **Import Secrets (Prioritize GCP Secret Manager)**: Imports secrets from the destination endpoint before syncing, prioritizing values from GCP Secret Manager over Infisical when keys conflict.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -62,7 +62,7 @@ description: "Learn how to configure a GitHub Sync for Infisical."
|
||||
<Note>
|
||||
GitHub does not support importing secrets.
|
||||
</Note>
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -54,7 +54,7 @@ description: "Learn how to configure a Hashicorp Vault Sync for Infisical."
|
||||
- **Overwrite Destination Secrets**: Removes any secrets at the destination endpoint not present in Infisical.
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over Hashicorp Vault when keys conflict.
|
||||
- **Import Secrets (Prioritize Hashicorp Vault)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Hashicorp Vault over Infisical when keys conflict.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -55,7 +55,7 @@ description: "Learn how to configure a Humanitec Sync for Infisical."
|
||||
<Note>
|
||||
Humanitec does not support importing secrets.
|
||||
</Note>
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -57,7 +57,7 @@ description: "Learn how to configure an Oracle Cloud Infrastructure Vault Sync f
|
||||
- **Overwrite Destination Secrets**: Removes any secrets at the destination endpoint not present in Infisical.
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over OCI Vault when keys conflict.
|
||||
- **Import Secrets (Prioritize OCI Vault)**: Imports secrets from the destination endpoint before syncing, prioritizing values from OCI Vault over Infisical when keys conflict.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -101,6 +101,10 @@ Key Schemas transform your secret keys by applying a prefix, suffix, or format p
|
||||
|
||||
Any destination secrets which do not match the schema will not get deleted or updated by Infisical.
|
||||
|
||||
Key Schemas use handlebars syntax to define dynamic values. Here's a full list of available variables:
|
||||
- `{{secretKey}}` - The key of the secret
|
||||
- `{{environment}}` - The environment which the secret is in (e.g. dev, staging, prod)
|
||||
|
||||
**Example:**
|
||||
- Infisical key: `SECRET_1`
|
||||
- Schema: `INFISICAL_{{secretKey}}`
|
||||
|
@ -48,7 +48,7 @@ description: "Learn how to configure a TeamCity Sync for Infisical."
|
||||
<Note>
|
||||
Infisical only syncs secrets from within the target scope; inherited secrets will not be imported.
|
||||
</Note>
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -56,7 +56,7 @@ description: "Learn how to configure a Terraform Cloud Sync for Infisical."
|
||||
<Note>
|
||||
Terraform Cloud does not support importing secrets.
|
||||
</Note>
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -43,7 +43,7 @@ description: "Learn how to configure a Vercel Sync for Infisical."
|
||||
- **Overwrite Destination Secrets**: Removes any secrets at the destination endpoint not present in Infisical.
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over Vercel when keys conflict.
|
||||
- **Import Secrets (Prioritize Vercel)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Vercel over Infisical when keys conflict.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -44,7 +44,7 @@ description: "Learn how to configure a Windmill Sync for Infisical."
|
||||
- **Overwrite Destination Secrets**: Removes any secrets at the destination endpoint not present in Infisical.
|
||||
- **Import Secrets (Prioritize Infisical)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Infisical over Windmill when keys conflict.
|
||||
- **Import Secrets (Prioritize Windmill)**: Imports secrets from the destination endpoint before syncing, prioritizing values from Windmill over Infisical when keys conflict.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name.
|
||||
- **Key Schema**: Template that determines how secret names are transformed when syncing, using `{{secretKey}}` as a placeholder for the original secret name and `{{environment}}` for the environment.
|
||||
<Note>
|
||||
We highly recommend using a Key Schema to ensure that Infisical only manages the specific keys you intend, keeping everything else untouched.
|
||||
</Note>
|
||||
|
@ -233,7 +233,8 @@
|
||||
"group": "Gateway",
|
||||
"pages": [
|
||||
"documentation/platform/gateways/overview",
|
||||
"documentation/platform/gateways/gateway-security"
|
||||
"documentation/platform/gateways/gateway-security",
|
||||
"documentation/platform/gateways/networking"
|
||||
]
|
||||
},
|
||||
"documentation/platform/project-templates",
|
||||
|
@ -78,11 +78,14 @@ export default function CodeInputStep({
|
||||
const resendVerificationEmail = async () => {
|
||||
setIsResendingVerificationEmail(true);
|
||||
setIsLoading(true);
|
||||
await mutateAsync({ email });
|
||||
setTimeout(() => {
|
||||
setIsLoading(false);
|
||||
setIsResendingVerificationEmail(false);
|
||||
}, 2000);
|
||||
try {
|
||||
await mutateAsync({ email });
|
||||
} finally {
|
||||
setTimeout(() => {
|
||||
setIsLoading(false);
|
||||
setIsResendingVerificationEmail(false);
|
||||
}, 1000);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
|
@ -131,7 +131,27 @@ export const SecretSyncOptionsFields = ({ hideInitialSync }: Props) => {
|
||||
render={({ field: { value, onChange }, fieldState: { error } }) => (
|
||||
<FormControl
|
||||
tooltipClassName="max-w-md"
|
||||
tooltipText="When a secret is synced, its key will be injected into the key schema before it reaches the destination. This is useful for organization."
|
||||
tooltipText={
|
||||
<div className="flex flex-col gap-3">
|
||||
<span>
|
||||
When a secret is synced, values will be injected into the key schema before it
|
||||
reaches the destination. This is useful for organization.
|
||||
</span>
|
||||
|
||||
<div className="flex flex-col">
|
||||
<span>Available keys:</span>
|
||||
<ul className="list-disc pl-4 text-sm">
|
||||
<li>
|
||||
<code>{"{{secretKey}}"}</code> - The key of the secret
|
||||
</li>
|
||||
<li>
|
||||
<code>{"{{environment}}"}</code> - The environment which the secret is in
|
||||
(e.g. dev, staging, prod)
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
}
|
||||
isError={Boolean(error)}
|
||||
isOptional
|
||||
errorText={error?.message}
|
||||
|
@ -13,11 +13,27 @@ export const BaseSecretSyncSchema = <T extends AnyZodObject | undefined = undefi
|
||||
.string()
|
||||
.optional()
|
||||
.refine(
|
||||
(val) =>
|
||||
!val || /^(?:[a-zA-Z0-9_\-/]*)(?:\{\{secretKey\}\})(?:[a-zA-Z0-9_\-/]*)$/.test(val),
|
||||
(val) => {
|
||||
if (!val) return true;
|
||||
|
||||
const allowedOptionalPlaceholders = ["{{environment}}"];
|
||||
|
||||
const allowedPlaceholdersRegexPart = ["{{secretKey}}", ...allowedOptionalPlaceholders]
|
||||
.map((p) => p.replace(/[-/\\^$*+?.()|[\]{}]/g, "\\$&")) // Escape regex special characters
|
||||
.join("|");
|
||||
|
||||
const allowedContentRegex = new RegExp(
|
||||
`^([a-zA-Z0-9_\\-/]|${allowedPlaceholdersRegexPart})*$`
|
||||
);
|
||||
const contentIsValid = allowedContentRegex.test(val);
|
||||
|
||||
const secretKeyCount = (val.match(/\{\{secretKey\}\}/g) || []).length;
|
||||
|
||||
return contentIsValid && secretKeyCount === 1;
|
||||
},
|
||||
{
|
||||
message:
|
||||
"Key schema must include one {{secretKey}} and only contain letters, numbers, dashes, underscores, slashes, and the {{secretKey}} placeholder."
|
||||
"Key schema must include exactly one {{secretKey}} placeholder. It can also include {{environment}} placeholders. Only alphanumeric characters (a-z, A-Z, 0-9), dashes (-), underscores (_), and slashes (/) are allowed besides the placeholders."
|
||||
}
|
||||
)
|
||||
});
|
||||
|
@ -96,7 +96,7 @@ export const useUpdateOrgRole = () => {
|
||||
data: { role }
|
||||
} = await apiRequest.patch(`/api/v1/organization/${orgId}/roles/${id}`, {
|
||||
...dto,
|
||||
permissions: permissions?.length ? packRules(permissions) : undefined
|
||||
permissions: permissions ? packRules(permissions) : undefined
|
||||
});
|
||||
|
||||
return role;
|
||||
|
@ -22,8 +22,12 @@ export const VerifyEmailPage = () => {
|
||||
*/
|
||||
const sendVerificationEmail = async () => {
|
||||
if (email) {
|
||||
await mutateAsync({ email });
|
||||
setStep(2);
|
||||
try {
|
||||
await mutateAsync({ email });
|
||||
setStep(2);
|
||||
} catch {
|
||||
setLoading(false);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -5,6 +5,7 @@ import { faInfoCircle } from "@fortawesome/free-solid-svg-icons";
|
||||
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
|
||||
import { useNavigate, useSearch } from "@tanstack/react-router";
|
||||
|
||||
import { OrgPermissionGuardBanner } from "@app/components/permissions/OrgPermissionCan";
|
||||
import { Button, PageHeader, Tab, TabList, TabPanel, Tabs } from "@app/components/v2";
|
||||
import { ROUTE_PATHS } from "@app/const/routes";
|
||||
import {
|
||||
@ -72,6 +73,8 @@ export const AccessManagementPage = () => {
|
||||
}
|
||||
];
|
||||
|
||||
const hasNoAccess = tabSections.every((tab) => tab.isHidden);
|
||||
|
||||
return (
|
||||
<div className="container mx-auto flex flex-col justify-between bg-bunker-800 text-white">
|
||||
<Helmet>
|
||||
@ -126,6 +129,7 @@ export const AccessManagementPage = () => {
|
||||
))}
|
||||
</Tabs>
|
||||
</div>
|
||||
{hasNoAccess && <OrgPermissionGuardBanner />}
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
@ -159,6 +159,7 @@ export const AddOrgMemberModal = ({
|
||||
text: "Failed to invite user to org",
|
||||
type: "error"
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (serverDetails?.emailConfigured) {
|
||||
|
@ -163,6 +163,7 @@ export const AddMemberModal = ({ popUp, handlePopUpToggle }: Props) => {
|
||||
text: "Failed to add user to project",
|
||||
type: "error"
|
||||
});
|
||||
return;
|
||||
}
|
||||
handlePopUpToggle("addMember", false);
|
||||
reset();
|
||||
|
@ -226,6 +226,23 @@ const ConditionSchema = z
|
||||
: el.rhs.trim().startsWith("/")
|
||||
),
|
||||
{ message: "Invalid Secret Path. Must start with '/'" }
|
||||
)
|
||||
.refine(
|
||||
(val) =>
|
||||
val
|
||||
.filter((el) => el.operator === PermissionConditionOperators.$EQ)
|
||||
.every((el) => !el.rhs.includes(",")),
|
||||
{ message: '"Equal" checks cannot contain comma separated values. Use "IN" operator instead.' }
|
||||
)
|
||||
.refine(
|
||||
(val) =>
|
||||
val
|
||||
.filter((el) => el.operator === PermissionConditionOperators.$NEQ)
|
||||
.every((el) => !el.rhs.includes(",")),
|
||||
{
|
||||
message:
|
||||
'"Not Equal" checks cannot contain comma separated values. Use "IN" operator with "Forbid" instead.'
|
||||
}
|
||||
);
|
||||
|
||||
export const projectRoleFormSchema = z.object({
|
||||
|
@ -1,3 +1,12 @@
|
||||
## 0.0.3 (June 6, 2025)
|
||||
|
||||
* Minor fix for handling malformed URLs for HTTP forwarding
|
||||
|
||||
## 0.0.2 (June 6, 2025)
|
||||
|
||||
* Bumped default CLI image version from 0.41.1 -> 0.41.8.
|
||||
* This new image version supports using the gateway as a token reviewer for the Identity Kubernetes Auth method.
|
||||
|
||||
## 0.0.1 (May 1, 2025)
|
||||
|
||||
* Initial helm release
|
@ -15,10 +15,10 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.0.1
|
||||
version: 0.0.3
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.0.1"
|
||||
appVersion: "0.0.3"
|
||||
|
@ -1,6 +1,6 @@
|
||||
image:
|
||||
pullPolicy: IfNotPresent
|
||||
tag: "0.41.1"
|
||||
tag: "0.41.82"
|
||||
|
||||
secret:
|
||||
# The secret that contains the environment variables to be used by the gateway, such as INFISICAL_API_URL and TOKEN
|
||||
|
Reference in New Issue
Block a user