Compare commits

...

56 Commits

Author SHA1 Message Date
Maidul Islam
53044f3d39 reduce ttl 2024-03-01 15:06:36 -05:00
Maidul Islam
93268f5767 increase license server ttl 2024-03-01 13:06:00 -05:00
Maidul Islam
318dedb987 Merge pull request #1513 from akhilmhdh/fix/delay-audit-log
feat(server): moved back audit log to queue now with keystore license
2024-03-01 12:36:22 -05:00
Akhil Mohan
291edf71aa feat(server): moved back audit log to queue now with keystore license 2024-03-01 23:01:18 +05:30
Maidul Islam
342665783e Merge pull request #1512 from akhilmhdh/fix/delay-audit-log
feat(server): changed license service to use redis cache keystore
2024-03-01 11:53:58 -05:00
Akhil Mohan
6a7241d7d1 feat(server): uninstalled node-cache 2024-03-01 22:20:25 +05:30
Akhil Mohan
51fb680f9c feat(server): changed license service to use redis cache keystore 2024-03-01 22:16:08 +05:30
Daniel Hougaard
0710c9a84a Merge pull request #1509 from rhythmbhiwani/fix-etag-hash-mistype
Fixed mistype from Hash to Etag to fix the cli issue
2024-03-01 17:31:09 +01:00
Maidul Islam
e46bce1520 Update requirements.mdx 2024-03-01 10:55:19 -05:00
Maidul Islam
3919393d33 Merge pull request #1510 from akhilmhdh/fix/audit-log-queue
fix(server): auditlog won't push if retention period is zero
2024-03-01 10:27:49 -05:00
Akhil Mohan
c8b7c37aee fix(server): identity login audit log fixed 2024-03-01 20:10:27 +05:30
Maidul Islam
2641fccce5 add etag field 2024-03-01 09:05:44 -05:00
Akhil Mohan
213f2ed29b fix(server): auditlog won't push if retention period is zero 2024-03-01 19:24:29 +05:30
Rhythm Bhiwani
4dcd000dd1 Fixed mistype from Hash to Etag to fix the cli issue 2024-03-01 17:43:47 +05:30
Maidul Islam
f64cb10282 Merge pull request #1505 from Infisical/daniel/agent-improvements
Feat: Agent exec and custom polling interval
2024-03-01 02:13:13 -05:00
Maidul Islam
a0ea2627ed change hash to etag 2024-03-01 02:11:50 -05:00
Maidul Islam
5c40b538af remove ExecuteCommandWithTimeout 2024-03-01 02:11:27 -05:00
Maidul Islam
8dd94a4e10 move ExecuteCommandWithTimeout to agent file 2024-03-01 02:11:03 -05:00
Maidul Islam
041c4a20a0 example config 2024-03-01 02:10:26 -05:00
Daniel Hougaard
4a2a5f42a8 Renamed to exec to execute, and cleanup 🧼 2024-03-01 07:26:31 +01:00
Daniel Hougaard
9fcdf17a04 Update agent.go 2024-03-01 07:17:27 +01:00
Daniel Hougaard
97ac8cb45a Update agent.go 2024-03-01 07:02:26 +01:00
Daniel Hougaard
e952659415 Update agent.go 2024-03-01 07:02:04 +01:00
Daniel Hougaard
1f3f061a06 Fix: Agent output 2024-03-01 06:46:09 +01:00
Daniel Hougaard
5096ce3bdc Feat: Agent improvements 2024-03-01 06:41:17 +01:00
BlackMagiq
621683f787 Merge pull request #1504 from Infisical/changelog
Update changelog to include updates for Feb
2024-02-29 19:19:37 -08:00
Tuan Dang
f63850e9e9 Add February updates to changelog 2024-02-29 19:17:58 -08:00
Maidul Islam
4ee0a2ec6c update mongo to postgres pin 2024-02-29 18:03:04 -05:00
Maidul Islam
9569d3971a update helm secrets def in docs 2024-02-29 18:01:57 -05:00
Maidul Islam
443b8f747b Update kubernetes-helm.mdx 2024-02-29 17:54:53 -05:00
Maidul Islam
803393c385 Update 20240226094411_instance-id.ts 2024-02-29 17:47:24 -05:00
Maidul Islam
8e95189fd2 Merge pull request #1500 from Infisical/snyk-upgrade-f77609d160bda3cea5e59890389a6fda
[Snyk] Upgrade posthog-node from 3.6.0 to 3.6.2
2024-02-29 17:40:32 -05:00
Maidul Islam
c5f38b6ade Merge pull request #1503 from Infisical/patch-super-user-migration
update admin config to  default uuid if it doesn't exist
2024-02-29 17:11:15 -05:00
Maidul Islam
30a1c5ac86 only add admin config if it doesn't exist 2024-02-29 17:01:03 -05:00
snyk-bot
bbad2ba047 fix: upgrade posthog-node from 3.6.0 to 3.6.2
Snyk has created this PR to upgrade posthog-node from 3.6.0 to 3.6.2.

See this package in npm:
https://www.npmjs.com/package/posthog-node

See this project in Snyk:
https://app.snyk.io/org/maidul98/project/35057e82-ed7d-4e19-ba4d-719a42135cd6?utm_source=github&utm_medium=referral&page=upgrade-pr
2024-02-29 21:47:31 +00:00
Maidul Islam
1445df7015 Merge pull request #1498 from Infisical/patch-super-user-migration
patch super user migration
2024-02-29 16:35:56 -05:00
Maidul Islam
ae4a2089d5 ignore whole file 2024-02-29 16:30:58 -05:00
Maidul Islam
0b924b6e45 add ignore type script 2024-02-29 16:28:36 -05:00
Maidul Islam
1fcac4cadf ignore multi line eslint 2024-02-29 16:15:52 -05:00
Maidul Islam
155e315347 skip verify 2024-02-29 16:00:28 -05:00
Maidul Islam
3dce03180f patch super user migration 2024-02-29 15:35:54 -05:00
Maidul Islam
4748b546c2 Merge pull request #1497 from Infisical/add-cert-to-knex-command
Add postgres cert to migration knex command
2024-02-29 15:05:55 -05:00
Maidul Islam
96887cdbfa add cert support to knex migration 2024-02-29 14:37:01 -05:00
Maidul Islam
553b56e57e fix make down command from Makefile 2024-02-29 14:18:33 -05:00
Maidul Islam
a33f542647 Merge pull request #1493 from akhilmhdh/fix/dup-sec-del
fix(server): duplicate secret deletion made possible
2024-02-29 14:16:44 -05:00
Maidul Islam
06b03fc450 update fnSecretBlindIndexCheck function comment 2024-02-29 14:01:32 -05:00
Maidul Islam
031a834ab1 Merge pull request #1495 from Salman2301/feat-cloud-worker-path
feat: add support for secret path for cloud worker
2024-02-29 12:24:44 -05:00
Akhil Mohan
89e942fea3 Merge pull request #1496 from Infisical/tag-migration-guide
update mongo to postgres doc
2024-02-29 22:28:43 +05:30
Maidul Islam
3c0908a788 update mongo to postgres doc 2024-02-29 11:56:48 -05:00
Salman
14e42b7ff2 feat: add support for secret path for cloud worker 2024-02-29 21:40:19 +05:30
Akhil Mohan
21d5c44ea1 fix(server): duplicate secret deletion made possible 2024-02-29 14:58:45 +05:30
Daniel Hougaard
fb8c4bd415 Feat: Agent improvements 2024-02-29 07:12:30 +01:00
Daniel Hougaard
48bf41ac8c Update cli.go 2024-02-29 07:12:18 +01:00
Daniel Hougaard
1ad916a784 Feat: Agent improvements, Secrets state manager 2024-02-29 07:12:10 +01:00
Daniel Hougaard
c91456838e Update model.go 2024-02-29 07:12:01 +01:00
Daniel Hougaard
79efe64504 Feat: Agent improvements, get ETag from secrets request 2024-02-29 07:11:56 +01:00
23 changed files with 378 additions and 135 deletions

View File

@@ -11,4 +11,4 @@ up-prod:
docker-compose -f docker-compose.prod.yml up --build
down:
docker-compose down
docker compose -f docker-compose.dev.yml down

View File

@@ -47,7 +47,6 @@
"lodash.isequal": "^4.5.0",
"mysql2": "^3.9.1",
"nanoid": "^5.0.4",
"node-cache": "^5.1.2",
"nodemailer": "^6.9.9",
"ora": "^7.0.1",
"passport-github": "^1.1.0",
@@ -56,7 +55,7 @@
"pg": "^8.11.3",
"picomatch": "^3.0.1",
"pino": "^8.16.2",
"posthog-node": "^3.6.0",
"posthog-node": "^3.6.2",
"probot": "^13.0.0",
"smee-client": "^2.0.0",
"tweetnacl": "^1.0.3",
@@ -5706,14 +5705,6 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/clone": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz",
"integrity": "sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w==",
"engines": {
"node": ">=0.8"
}
},
"node_modules/cluster-key-slot": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/cluster-key-slot/-/cluster-key-slot-1.1.2.tgz",
@@ -9258,17 +9249,6 @@
"resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-5.1.0.tgz",
"integrity": "sha512-eh0GgfEkpnoWDq+VY8OyvYhFEzBk6jIYbRKdIlyTiAXIVJ8PyBaKb0rp7oDtoddbdoHWhq8wwr+XZ81F1rpNdA=="
},
"node_modules/node-cache": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/node-cache/-/node-cache-5.1.2.tgz",
"integrity": "sha512-t1QzWwnk4sjLWaQAS8CHgOJ+RAfmHpxFWmc36IWTiWHQfs0w5JDMBS1b1ZxQteo0vVVuWJvIUKHDkkeK7vIGCg==",
"dependencies": {
"clone": "2.x"
},
"engines": {
"node": ">= 8.0.0"
}
},
"node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
@@ -10318,9 +10298,9 @@
"dev": true
},
"node_modules/posthog-node": {
"version": "3.6.0",
"resolved": "https://registry.npmjs.org/posthog-node/-/posthog-node-3.6.0.tgz",
"integrity": "sha512-N/4//SIQR4fhwbHnDdJ2rQCYdu9wo0EVPK4lVgZswp5R/E42RKlpuO6ZfPsBl+Bcg06OYiOd/WR/jLV90FCoSw==",
"version": "3.6.2",
"resolved": "https://registry.npmjs.org/posthog-node/-/posthog-node-3.6.2.tgz",
"integrity": "sha512-tVIaShR3SxBx17AlAUS86jQTweKuJIFRedBB504fCz7YPnXJTYSrVcUHn5IINE2wu4jUQimQK6ihQr90Djrdrg==",
"dependencies": {
"axios": "^1.6.2",
"rusha": "^0.8.14"

View File

@@ -108,7 +108,6 @@
"lodash.isequal": "^4.5.0",
"mysql2": "^3.9.1",
"nanoid": "^5.0.4",
"node-cache": "^5.1.2",
"nodemailer": "^6.9.9",
"ora": "^7.0.1",
"passport-github": "^1.1.0",
@@ -117,7 +116,7 @@
"pg": "^8.11.3",
"picomatch": "^3.0.1",
"pino": "^8.16.2",
"posthog-node": "^3.6.0",
"posthog-node": "^3.6.2",
"probot": "^13.0.0",
"smee-client": "^2.0.0",
"tweetnacl": "^1.0.3",

View File

@@ -17,7 +17,15 @@ dotenv.config({
export default {
development: {
client: "postgres",
connection: process.env.DB_CONNECTION_URI,
connection: {
connectionString: process.env.DB_CONNECTION_URI,
ssl: process.env.DB_ROOT_CERT
? {
rejectUnauthorized: true,
ca: Buffer.from(process.env.DB_ROOT_CERT, "base64").toString("ascii")
}
: false
},
pool: {
min: 2,
max: 10
@@ -31,7 +39,15 @@ export default {
},
production: {
client: "postgres",
connection: process.env.DB_CONNECTION_URI,
connection: {
connectionString: process.env.DB_CONNECTION_URI,
ssl: process.env.DB_ROOT_CERT
? {
rejectUnauthorized: true,
ca: Buffer.from(process.env.DB_ROOT_CERT, "base64").toString("ascii")
}
: false
},
pool: {
min: 2,
max: 10

View File

@@ -1,3 +1,5 @@
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-nocheck
import { Knex } from "knex";
import { TableName } from "../schemas";
@@ -8,10 +10,12 @@ export async function up(knex: Knex): Promise<void> {
await knex.schema.alterTable(TableName.SuperAdmin, (t) => {
t.uuid("instanceId").notNullable().defaultTo(knex.fn.uuid());
});
// this is updated to avoid race condition on replication
// eslint-disable-next-line
// @ts-ignore
await knex(TableName.SuperAdmin).update({ id: ADMIN_CONFIG_UUID }).whereNotNull("id").limit(1);
const superUserConfigExists = await knex(TableName.SuperAdmin).where("id", ADMIN_CONFIG_UUID).first();
if (!superUserConfigExists) {
// eslint-disable-next-line
await knex(TableName.SuperAdmin).update({ id: ADMIN_CONFIG_UUID }).whereNotNull("id").limit(1);
}
}
export async function down(knex: Knex): Promise<void> {

View File

@@ -24,7 +24,7 @@ export const auditLogQueueServiceFactory = ({
const pushToLog = async (data: TCreateAuditLogDTO) => {
await queueService.queue(QueueName.AuditLog, QueueJobs.AuditLog, data, {
removeOnFail: {
count: 5
count: 3
},
removeOnComplete: true
});
@@ -46,6 +46,7 @@ export const auditLogQueueServiceFactory = ({
const ttl = plan.auditLogsRetentionDays * MS_IN_DAY;
// skip inserting if audit log retention is 0 meaning its not supported
if (ttl === 0) return;
await auditLogDAL.create({
actor: actor.type,
actorMetadata: actor.metadata,

View File

@@ -5,8 +5,8 @@
// TODO(akhilmhdh): With tony find out the api structure and fill it here
import { ForbiddenError } from "@casl/ability";
import NodeCache from "node-cache";
import { TKeyStoreFactory } from "@app/keystore/keystore";
import { getConfig } from "@app/lib/config/env";
import { BadRequestError } from "@app/lib/errors";
import { logger } from "@app/lib/logger";
@@ -39,6 +39,7 @@ type TLicenseServiceFactoryDep = {
orgDAL: Pick<TOrgDALFactory, "findOrgById">;
permissionService: Pick<TPermissionServiceFactory, "getOrgPermission">;
licenseDAL: TLicenseDALFactory;
keyStore: Pick<TKeyStoreFactory, "setItemWithExpiry" | "getItem" | "deleteItem">;
};
export type TLicenseServiceFactory = ReturnType<typeof licenseServiceFactory>;
@@ -46,12 +47,18 @@ export type TLicenseServiceFactory = ReturnType<typeof licenseServiceFactory>;
const LICENSE_SERVER_CLOUD_LOGIN = "/api/auth/v1/license-server-login";
const LICENSE_SERVER_ON_PREM_LOGIN = "/api/auth/v1/license-login";
const FEATURE_CACHE_KEY = (orgId: string, projectId?: string) => `${orgId}-${projectId || ""}`;
export const licenseServiceFactory = ({ orgDAL, permissionService, licenseDAL }: TLicenseServiceFactoryDep) => {
const LICENSE_SERVER_CLOUD_PLAN_TTL = 30; // 30 second
const FEATURE_CACHE_KEY = (orgId: string) => `infisical-cloud-plan-${orgId}`;
export const licenseServiceFactory = ({
orgDAL,
permissionService,
licenseDAL,
keyStore
}: TLicenseServiceFactoryDep) => {
let isValidLicense = false;
let instanceType = InstanceType.OnPrem;
let onPremFeatures: TFeatureSet = getDefaultOnPremFeatures();
const featureStore = new NodeCache({ stdTTL: 60 });
const appCfg = getConfig();
const licenseServerCloudApi = setupLicenceRequestWithStore(
@@ -75,6 +82,7 @@ export const licenseServiceFactory = ({ orgDAL, permissionService, licenseDAL }:
isValidLicense = true;
return;
}
if (appCfg.LICENSE_KEY) {
const token = await licenseServerOnPremApi.refreshLicence();
if (token) {
@@ -100,22 +108,21 @@ export const licenseServiceFactory = ({ orgDAL, permissionService, licenseDAL }:
logger.info(`getPlan: attempting to fetch plan for [orgId=${orgId}] [projectId=${projectId}]`);
try {
if (instanceType === InstanceType.Cloud) {
const cachedPlan = featureStore.get<TFeatureSet>(FEATURE_CACHE_KEY(orgId, projectId));
if (cachedPlan) return cachedPlan;
const cachedPlan = await keyStore.getItem(FEATURE_CACHE_KEY(orgId));
if (cachedPlan) return JSON.parse(cachedPlan) as TFeatureSet;
const org = await orgDAL.findOrgById(orgId);
if (!org) throw new BadRequestError({ message: "Org not found" });
const {
data: { currentPlan }
} = await licenseServerCloudApi.request.get<{ currentPlan: TFeatureSet }>(
`/api/license-server/v1/customers/${org.customerId}/cloud-plan`,
{
params: {
workspaceId: projectId
}
}
`/api/license-server/v1/customers/${org.customerId}/cloud-plan`
);
await keyStore.setItemWithExpiry(
FEATURE_CACHE_KEY(org.id),
LICENSE_SERVER_CLOUD_PLAN_TTL,
JSON.stringify(currentPlan)
);
featureStore.set(FEATURE_CACHE_KEY(org.id, projectId), currentPlan);
return currentPlan;
}
} catch (error) {
@@ -123,15 +130,20 @@ export const licenseServiceFactory = ({ orgDAL, permissionService, licenseDAL }:
`getPlan: encountered an error when fetching pan [orgId=${orgId}] [projectId=${projectId}] [error]`,
error
);
await keyStore.setItemWithExpiry(
FEATURE_CACHE_KEY(orgId),
LICENSE_SERVER_CLOUD_PLAN_TTL,
JSON.stringify(onPremFeatures)
);
return onPremFeatures;
}
return onPremFeatures;
};
const refreshPlan = async (orgId: string, projectId?: string) => {
const refreshPlan = async (orgId: string) => {
if (instanceType === InstanceType.Cloud) {
featureStore.del(FEATURE_CACHE_KEY(orgId, projectId));
await getPlan(orgId, projectId);
await keyStore.deleteItem(FEATURE_CACHE_KEY(orgId));
await getPlan(orgId);
}
};
@@ -166,7 +178,7 @@ export const licenseServiceFactory = ({ orgDAL, permissionService, licenseDAL }:
quantity: count
});
}
featureStore.del(orgId);
await keyStore.deleteItem(FEATURE_CACHE_KEY(orgId));
} else if (instanceType === InstanceType.EnterpriseOnPrem) {
const usedSeats = await licenseDAL.countOfOrgMembers(null);
await licenseServerOnPremApi.request.patch(`/api/license/v1/license`, { usedSeats });
@@ -215,7 +227,7 @@ export const licenseServiceFactory = ({ orgDAL, permissionService, licenseDAL }:
`/api/license-server/v1/customers/${organization.customerId}/session/trial`,
{ success_url }
);
featureStore.del(FEATURE_CACHE_KEY(orgId));
await keyStore.deleteItem(FEATURE_CACHE_KEY(orgId));
return { url };
};

View File

@@ -194,7 +194,7 @@ export const registerRoutes = async (
projectRoleDAL,
serviceTokenDAL
});
const licenseService = licenseServiceFactory({ permissionService, orgDAL, licenseDAL });
const licenseService = licenseServiceFactory({ permissionService, orgDAL, licenseDAL, keyStore });
const trustedIpService = trustedIpServiceFactory({
licenseService,
projectDAL,

View File

@@ -39,11 +39,12 @@ export const registerIdentityUaRouter = async (server: FastifyZodProvider) => {
}
},
handler: async (req) => {
const { identityUa, accessToken, identityAccessToken, validClientSecretInfo } =
const { identityUa, accessToken, identityAccessToken, validClientSecretInfo, identityMembershipOrg } =
await server.services.identityUa.login(req.body.clientId, req.body.clientSecret, req.realIp);
await server.services.auditLog.createAuditLog({
...req.auditLogInfo,
orgId: identityMembershipOrg?.orgId,
event: {
type: EventType.LOGIN_IDENTITY_UNIVERSAL_AUTH,
metadata: {

View File

@@ -54,6 +54,8 @@ export const identityUaServiceFactory = ({
const identityUa = await identityUaDAL.findOne({ clientId });
if (!identityUa) throw new UnauthorizedError();
const identityMembershipOrg = await identityOrgMembershipDAL.findOne({ identityId: identityUa.identityId });
checkIPAgainstBlocklist({
ipAddress: ip,
trustedIps: identityUa.clientSecretTrustedIps as TIp[]
@@ -131,7 +133,7 @@ export const identityUaServiceFactory = ({
}
);
return { accessToken, identityUa, validClientSecretInfo, identityAccessToken };
return { accessToken, identityUa, validClientSecretInfo, identityAccessToken, identityMembershipOrg };
};
const attachUa = async ({

View File

@@ -7,7 +7,7 @@ import { TSecretSnapshotServiceFactory } from "@app/ee/services/secret-snapshot/
import { getConfig } from "@app/lib/config/env";
import { buildSecretBlindIndexFromName, encryptSymmetric128BitHexKeyUTF8 } from "@app/lib/crypto";
import { BadRequestError } from "@app/lib/errors";
import { groupBy, pick } from "@app/lib/fn";
import { groupBy, pick, unique } from "@app/lib/fn";
import { logger } from "@app/lib/logger";
import { ActorType } from "../auth/auth-type";
@@ -202,12 +202,13 @@ export const secretServiceFactory = ({
return deletedSecrets;
};
// this is a utility function for secret modification
// this will check given secret name blind index exist or not
// if its a created secret set isNew to true
// thus if these blindindex exist it will throw an error
// vice versa when u need to check for updated secret
// this will also return the blind index grouped by secretName
/**
* Checks and handles secrets using a blind index method.
* The function generates mappings between secret names and their blind indexes, validates user IDs for personal secrets, and retrieves secrets from the database based on their blind indexes.
* For new secrets (isNew = true), it ensures they don't already exist in the database.
* For existing secrets, it verifies their presence in the database.
* If discrepancies are found, errors are thrown. The function returns mappings and the fetched secrets.
*/
const fnSecretBlindIndexCheck = async ({
inputSecrets,
folderId,
@@ -242,10 +243,18 @@ export const secretServiceFactory = ({
if (isNew) {
if (secrets.length) throw new BadRequestError({ message: "Secret already exist" });
} else if (secrets.length !== inputSecrets.length)
throw new BadRequestError({
message: `Secret not found: blind index ${JSON.stringify(keyName2BlindIndex)}`
});
} else {
const secretKeysInDB = unique(secrets, (el) => el.secretBlindIndex as string).map(
(el) => blindIndex2KeyName[el.secretBlindIndex as string]
);
const hasUnknownSecretsProvided = secretKeysInDB.length !== inputSecrets.length;
if (hasUnknownSecretsProvided) {
const keysMissingInDB = Object.keys(keyName2BlindIndex).filter((key) => !secretKeysInDB.includes(key));
throw new BadRequestError({
message: `Secret not found: blind index ${keysMissingInDB.join(",")}`
});
}
}
return { blindIndex2KeyName, keyName2BlindIndex, secrets };
};

View File

@@ -1,5 +1,5 @@
infisical:
address: "http://localhost:8080"
address: "https://app.infisical.com/"
auth:
type: "universal-auth"
config:
@@ -13,3 +13,12 @@ sinks:
templates:
- source-path: my-dot-ev-secret-template
destination-path: my-dot-env.env
config:
polling-interval: 60s
execute:
command: docker-compose -f docker-compose.prod.yml down && docker-compose -f docker-compose.prod.yml up -d
- source-path: my-dot-ev-secret-template1
destination-path: my-dot-env-1.env
config:
exec:
command: mkdir hello-world1

View File

@@ -490,5 +490,7 @@ func CallGetRawSecretsV3(httpClient *resty.Client, request GetRawSecretsV3Reques
return GetRawSecretsV3Response{}, fmt.Errorf("CallGetRawSecretsV3: Unsuccessful response [%v %v] [status-code=%v] [response=%v]", response.Request.Method, response.Request.URL, response.StatusCode(), response.String())
}
getRawSecretsV3Response.ETag = response.Header().Get(("etag"))
return getRawSecretsV3Response, nil
}

View File

@@ -505,4 +505,5 @@ type GetRawSecretsV3Response struct {
SecretComment string `json:"secretComment"`
} `json:"secrets"`
Imports []any `json:"imports"`
ETag string
}

View File

@@ -5,12 +5,15 @@ package cmd
import (
"bytes"
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"path"
"runtime"
"strings"
"sync"
"syscall"
@@ -71,12 +74,56 @@ type Template struct {
SourcePath string `yaml:"source-path"`
Base64TemplateContent string `yaml:"base64-template-content"`
DestinationPath string `yaml:"destination-path"`
Config struct { // Configurations for the template
PollingInterval string `yaml:"polling-interval"` // How often to poll for changes in the secret
Execute struct {
Command string `yaml:"command"` // Command to execute once the template has been rendered
Timeout int64 `yaml:"timeout"` // Timeout for the command
} `yaml:"execute"` // Command to execute once the template has been rendered
} `yaml:"config"`
}
func ReadFile(filePath string) ([]byte, error) {
return ioutil.ReadFile(filePath)
}
func ExecuteCommandWithTimeout(command string, timeout int64) error {
shell := [2]string{"sh", "-c"}
if runtime.GOOS == "windows" {
shell = [2]string{"cmd", "/C"}
} else {
currentShell := os.Getenv("SHELL")
if currentShell != "" {
shell[0] = currentShell
}
}
ctx := context.Background()
if timeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
defer cancel()
}
cmd := exec.CommandContext(ctx, shell[0], shell[1], command)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
if exitError, ok := err.(*exec.ExitError); ok { // type assertion
if exitError.ProcessState.ExitCode() == -1 {
return fmt.Errorf("command timed out")
}
}
return err
} else {
return nil
}
}
func FileExists(filepath string) bool {
info, err := os.Stat(filepath)
if os.IsNotExist(err) {
@@ -170,20 +217,24 @@ func ParseAgentConfig(configFile []byte) (*Config, error) {
return config, nil
}
func secretTemplateFunction(accessToken string) func(string, string, string) ([]models.SingleEnvironmentVariable, error) {
func secretTemplateFunction(accessToken string, existingEtag string, currentEtag *string) func(string, string, string) ([]models.SingleEnvironmentVariable, error) {
return func(projectID, envSlug, secretPath string) ([]models.SingleEnvironmentVariable, error) {
secrets, err := util.GetPlainTextSecretsViaMachineIdentity(accessToken, projectID, envSlug, secretPath, false)
res, err := util.GetPlainTextSecretsViaMachineIdentity(accessToken, projectID, envSlug, secretPath, false)
if err != nil {
return nil, err
}
return secrets, nil
if existingEtag != res.Etag {
*currentEtag = res.Etag
}
return res.Secrets, nil
}
}
func ProcessTemplate(templatePath string, data interface{}, accessToken string) (*bytes.Buffer, error) {
func ProcessTemplate(templatePath string, data interface{}, accessToken string, existingEtag string, currentEtag *string) (*bytes.Buffer, error) {
// custom template function to fetch secrets from Infisical
secretFunction := secretTemplateFunction(accessToken)
secretFunction := secretTemplateFunction(accessToken, existingEtag, currentEtag)
funcs := template.FuncMap{
"secret": secretFunction,
}
@@ -203,7 +254,7 @@ func ProcessTemplate(templatePath string, data interface{}, accessToken string)
return &buf, nil
}
func ProcessBase64Template(encodedTemplate string, data interface{}, accessToken string) (*bytes.Buffer, error) {
func ProcessBase64Template(encodedTemplate string, data interface{}, accessToken string, existingEtag string, currentEtag *string) (*bytes.Buffer, error) {
// custom template function to fetch secrets from Infisical
decoded, err := base64.StdEncoding.DecodeString(encodedTemplate)
if err != nil {
@@ -212,7 +263,7 @@ func ProcessBase64Template(encodedTemplate string, data interface{}, accessToken
templateString := string(decoded)
secretFunction := secretTemplateFunction(accessToken)
secretFunction := secretTemplateFunction(accessToken, existingEtag, currentEtag) // TODO: Fix this
funcs := template.FuncMap{
"secret": secretFunction,
}
@@ -250,7 +301,16 @@ type TokenManager struct {
}
func NewTokenManager(fileDeposits []Sink, templates []Template, clientIdPath string, clientSecretPath string, newAccessTokenNotificationChan chan bool, removeClientSecretOnRead bool, exitAfterAuth bool) *TokenManager {
return &TokenManager{filePaths: fileDeposits, templates: templates, clientIdPath: clientIdPath, clientSecretPath: clientSecretPath, newAccessTokenNotificationChan: newAccessTokenNotificationChan, removeClientSecretOnRead: removeClientSecretOnRead, exitAfterAuth: exitAfterAuth}
return &TokenManager{
filePaths: fileDeposits,
templates: templates,
clientIdPath: clientIdPath,
clientSecretPath: clientSecretPath,
newAccessTokenNotificationChan: newAccessTokenNotificationChan,
removeClientSecretOnRead: removeClientSecretOnRead,
exitAfterAuth: exitAfterAuth,
}
}
func (tm *TokenManager) SetToken(token string, accessTokenTTL time.Duration, accessTokenMaxTTL time.Duration) {
@@ -428,38 +488,80 @@ func (tm *TokenManager) WriteTokenToFiles() {
}
}
func (tm *TokenManager) FetchSecrets() {
log.Info().Msgf("template engine started...")
func (tm *TokenManager) WriteTemplateToFile(bytes *bytes.Buffer, template *Template) {
if err := WriteBytesToFile(bytes, template.DestinationPath); err != nil {
log.Error().Msgf("template engine: unable to write secrets to path because %s. Will try again on next cycle", err)
return
}
log.Info().Msgf("template engine: secret template at path %s has been rendered and saved to path %s", template.SourcePath, template.DestinationPath)
}
func (tm *TokenManager) MonitorSecretChanges(secretTemplate Template, sigChan chan os.Signal) {
pollingInterval := time.Duration(5 * time.Minute)
if secretTemplate.Config.PollingInterval != "" {
interval, err := util.ConvertPollingIntervalToTime(secretTemplate.Config.PollingInterval)
if err != nil {
log.Error().Msgf("unable to convert polling interval to time because %v", err)
sigChan <- syscall.SIGINT
return
} else {
pollingInterval = interval
}
}
var existingEtag string
var currentEtag string
var firstRun = true
execTimeout := secretTemplate.Config.Execute.Timeout
execCommand := secretTemplate.Config.Execute.Command
for {
token := tm.GetToken()
if token != "" {
for _, secretTemplate := range tm.templates {
var processedTemplate *bytes.Buffer
var err error
if secretTemplate.SourcePath != "" {
processedTemplate, err = ProcessTemplate(secretTemplate.SourcePath, nil, token)
} else {
processedTemplate, err = ProcessBase64Template(secretTemplate.Base64TemplateContent, nil, token)
}
if err != nil {
log.Error().Msgf("template engine: unable to render secrets because %s. Will try again on next cycle", err)
var processedTemplate *bytes.Buffer
var err error
continue
}
if err := WriteBytesToFile(processedTemplate, secretTemplate.DestinationPath); err != nil {
log.Error().Msgf("template engine: unable to write secrets to path because %s. Will try again on next cycle", err)
continue
}
log.Info().Msgf("template engine: secret template at path %s has been rendered and saved to path %s", secretTemplate.SourcePath, secretTemplate.DestinationPath)
if secretTemplate.SourcePath != "" {
processedTemplate, err = ProcessTemplate(secretTemplate.SourcePath, nil, token, existingEtag, &currentEtag)
} else {
processedTemplate, err = ProcessBase64Template(secretTemplate.Base64TemplateContent, nil, token, existingEtag, &currentEtag)
}
// fetch new secrets every 5 minutes (TODO: add PubSub in the future )
time.Sleep(5 * time.Minute)
if err != nil {
log.Error().Msgf("unable to process template because %v", err)
} else {
if (existingEtag != currentEtag) || firstRun {
tm.WriteTemplateToFile(processedTemplate, &secretTemplate)
existingEtag = currentEtag
if !firstRun && execCommand != "" {
log.Info().Msgf("executing command: %s", execCommand)
err := ExecuteCommandWithTimeout(execCommand, execTimeout)
if err != nil {
log.Error().Msgf("unable to execute command because %v", err)
}
}
if firstRun {
firstRun = false
}
}
}
time.Sleep(pollingInterval)
} else {
// It fails to get the access token. So we will re-try in 3 seconds. We do this because if we don't, the user will have to wait for the next polling interval to get the first secret render.
time.Sleep(3 * time.Second)
}
}
}
@@ -544,7 +646,11 @@ var agentCmd = &cobra.Command{
tm := NewTokenManager(filePaths, agentConfig.Templates, configUniversalAuthType.ClientIDPath, configUniversalAuthType.ClientSecretPath, tokenRefreshNotifier, configUniversalAuthType.RemoveClientSecretOnRead, agentConfig.Infisical.ExitAfterAuth)
go tm.ManageTokenLifecycle()
go tm.FetchSecrets()
for i, template := range agentConfig.Templates {
log.Info().Msgf("template engine started for template %v...", i+1)
go tm.MonitorSecretChanges(template, sigChan)
}
for {
select {

View File

@@ -34,6 +34,11 @@ type SingleEnvironmentVariable struct {
Comment string `json:"comment"`
}
type PlaintextSecretResult struct {
Secrets []SingleEnvironmentVariable
Etag string
}
type SingleFolder struct {
ID string `json:"_id"`
Name string `json:"name"`

View File

@@ -0,0 +1,41 @@
package util
import (
"fmt"
"strconv"
"time"
)
// ConvertPollingIntervalToTime converts a string representation of a polling interval to a time.Duration
func ConvertPollingIntervalToTime(pollingInterval string) (time.Duration, error) {
length := len(pollingInterval)
if length < 2 {
return 0, fmt.Errorf("invalid format")
}
unit := pollingInterval[length-1:]
numberPart := pollingInterval[:length-1]
number, err := strconv.Atoi(numberPart)
if err != nil {
return 0, err
}
switch unit {
case "s":
if number < 60 {
return 0, fmt.Errorf("polling interval should be at least 60 seconds")
}
return time.Duration(number) * time.Second, nil
case "m":
return time.Duration(number) * time.Minute, nil
case "h":
return time.Duration(number) * time.Hour, nil
case "d":
return time.Duration(number) * 24 * time.Hour, nil
case "w":
return time.Duration(number) * 7 * 24 * time.Hour, nil
default:
return 0, fmt.Errorf("invalid time unit")
}
}

View File

@@ -152,7 +152,7 @@ func GetPlainTextSecretsViaJTW(JTWToken string, receiversPrivateKey string, work
return plainTextSecrets, nil
}
func GetPlainTextSecretsViaMachineIdentity(accessToken string, workspaceId string, environmentName string, secretsPath string, includeImports bool) ([]models.SingleEnvironmentVariable, error) {
func GetPlainTextSecretsViaMachineIdentity(accessToken string, workspaceId string, environmentName string, secretsPath string, includeImports bool) (models.PlaintextSecretResult, error) {
httpClient := resty.New()
httpClient.SetAuthToken(accessToken).
SetHeader("Accept", "application/json")
@@ -170,12 +170,12 @@ func GetPlainTextSecretsViaMachineIdentity(accessToken string, workspaceId strin
rawSecrets, err := api.CallGetRawSecretsV3(httpClient, api.GetRawSecretsV3Request{WorkspaceId: workspaceId, SecretPath: secretsPath, Environment: environmentName})
if err != nil {
return nil, err
return models.PlaintextSecretResult{}, err
}
plainTextSecrets := []models.SingleEnvironmentVariable{}
if err != nil {
return nil, fmt.Errorf("unable to decrypt your secrets [err=%v]", err)
return models.PlaintextSecretResult{}, fmt.Errorf("unable to decrypt your secrets [err=%v]", err)
}
for _, secret := range rawSecrets.Secrets {
@@ -189,7 +189,10 @@ func GetPlainTextSecretsViaMachineIdentity(accessToken string, workspaceId strin
// }
// }
return plainTextSecrets, nil
return models.PlaintextSecretResult{
Secrets: plainTextSecrets,
Etag: rawSecrets.ETag,
}, nil
}
func InjectImportedSecret(plainTextWorkspaceKey []byte, secrets []models.SingleEnvironmentVariable, importedSecrets []api.ImportedSecretV3) ([]models.SingleEnvironmentVariable, error) {

View File

@@ -4,7 +4,22 @@ title: "Changelog"
The changelog below reflects new product developments and updates on a monthly basis.
## January 2024
## Feb 2024
- Added org-scoped authentication enforcement for SAML
- Added support for [SCIM](https://infisical.com/docs/documentation/platform/scim/overview) along with instructions for setting it up with [Okta](https://infisical.com/docs/documentation/platform/scim/okta), [Azure](https://infisical.com/docs/documentation/platform/scim/azure), and [JumpCloud](https://infisical.com/docs/documentation/platform/scim/jumpcloud).
- Pushed out project update for non-E2EE w/ new endpoints like for project creation and member invitation.
- Added API Integration testing for new backend.
- Added capability to create projects in Terraform.
- Added slug-based capabilities to both organizations and projects to gradually make the API more developer-friendly moving forward.
- Fixed + improved various analytics/telemetry-related items.
- Fixed various issues associated with the Python SDK: build during installation on Mac OS, Rust dependency.
- Updated self-hosting documentation to reflect [new backend](https://infisical.com/docs/self-hosting/overview).
- Released [Postgres-based Infisical helm chart](https://cloudsmith.io/~infisical/repos/helm-charts/packages/detail/helm/infisical-standalone/).
- Added checks to ensure that breaking API changes don't get released.
- Automated API reference documentation to be inline with latest releases of Infisical.
## Jan 2024
- Completed Postgres migration initiative with restructed Fastify-based backend.
- Reduced size of Infisical Node.js SDK by ≈90%.
- Added secret fallback support to all SDK's.
- Added Machine Identity support to [Terraform Provider](https://github.com/Infisical/terraform-provider-infisical).
@@ -12,21 +27,21 @@ The changelog below reflects new product developments and updates on a monthly b
- Added symmetric encryption support to all SDK's.
- Fixed secret reminders bug, where reminders were not being updated correctly.
## December 2023
## Dec 2023
- Released [(machine) identities](https://infisical.com/docs/documentation/platform/identities/overview) and [universal auth](https://infisical.com/docs/documentation/platform/identities/universal-auth) features.
- Created new cross-language SDKs for [Python](https://infisical.com/docs/sdks/languages/python), [Node](https://infisical.com/docs/sdks/languages/node), and [Java](https://infisical.com/docs/sdks/languages/java).
- Released first version of the [Infisical Agent](https://infisical.com/docs/infisical-agent/overview)
- Added ability to [manage folders via CLI](https://infisical.com/docs/cli/commands/secrets).
## November 2023
## Nov 2023
- Replaced internal [Winston](https://github.com/winstonjs/winston) with [Pino](https://github.com/pinojs/pino) logging library with external logging to AWS CloudWatch
- Added admin panel to self-hosting experience.
- Released [secret rotation](https://infisical.com/docs/documentation/platform/secret-rotation/overview) feature with preliminary support for rotating [SendGrid](https://infisical.com/docs/documentation/platform/secret-rotation/sendgrid), [PostgreSQL/CockroachDB](https://infisical.com/docs/documentation/platform/secret-rotation/postgres), and [MySQL/MariaDB](https://infisical.com/docs/documentation/platform/secret-rotation/mysql) credentials.
- Released secret reminders feature.
## October 2023
## Oct 2023
- Added support for [GitLab SSO](https://infisical.com/docs/documentation/platform/sso/gitlab).
- Became SOC 2 (Type II) certified.
@@ -35,7 +50,7 @@ The changelog below reflects new product developments and updates on a monthly b
- Added native [Hasura Cloud integration](https://infisical.com/docs/integrations/cloud/hasura-cloud).
- Updated resource deletion logic for user, organization, and project deletion.
## September 2023
## Sep 2023
- Released [secret approvals](https://infisical.com/docs/documentation/platform/pr-workflows) feature.
- Released an update to access controls; every user role now clearly defines and enforces a certain set of conditions across Infisical.
@@ -43,7 +58,7 @@ The changelog below reflects new product developments and updates on a monthly b
- Added a native integration with [Qovery](https://infisical.com/docs/integrations/cloud/qovery).
- Added service token generation capability for the CLI.
## August 2023
## Aug 2023
- Release Audit Logs V2.
- Add support for [GitHub SSO](https://infisical.com/docs/documentation/platform/sso/github).
@@ -171,7 +186,7 @@ The changelog below reflects new product developments and updates on a monthly b
- Added sorting capability to sort keys by name alphabetically in dashboard.
- Added downloading secrets back as `.env` file capability.
## August 2022
## Aug 2022
- Released first version of the Infisical platform with push/pull capability and end-to-end encryption.
- Improved security handling of authentication tokens by storing refresh tokens in HttpOnly cookies.

View File

@@ -58,7 +58,7 @@ Redis requirements:
- Use Redis versions 6.x or 7.x. We advise upgrading to at least Redis 6.2.
- Redis Cluster mode is currently not supported; use Redis Standalone, with or without High Availability (HA).
- Redis storage needs are minimal: a setup with 1 vCPU, 1 GB RAM, and 1GB SSD will be sufficient for most deployments.
- Redis storage needs are minimal: a setup with 1 vCPU, 1 GB RAM, and 1GB SSD will be sufficient for small deployments.
## Supported Web Browsers
@@ -68,4 +68,4 @@ Infisical supports a range of web browsers. However, features such as browser-ba
- [Google Chrome](https://www.google.com/chrome/)
- [Chromium](https://www.chromium.org/getting-involved/dev-channel/)
- [Apple Safari](https://www.apple.com/safari/)
- [Microsoft Edge](https://www.microsoft.com/en-us/edge?form=MA13FJ)
- [Microsoft Edge](https://www.microsoft.com/en-us/edge?form=MA13FJ)

View File

@@ -46,27 +46,34 @@ description: "Use Helm chart to install Infisical on your Kubernetes cluster"
<Tabs>
<Tab title="Proof of concept deployment">
For test or proof-of-concept purposes, you may omit `DB_CONNECTION_URI` and `REDIS_URL` from `infisical-secrets`. This is because the Helm chart will automatically provision and connect to the in-cluster instances of Postgres and Redis by default.
For test or proof-of-concept purposes, you may omit `DB_CONNECTION_URI` and `REDIS_URL` from `infisical-secrets`. This is because the Helm chart will automatically provision and connect to the in-cluster instances of Postgres and Redis by default.
```yaml simple-values-example.yaml
apiVersion: v1
kind: Secret
metadata:
name: infisical-secrets
type: Opaque
stringData:
AUTH_SECRET: <>
ENCRYPTION_KEY: <>
```
</Tab>
<Tab title="Production deployment">
For production environments, we recommend using Cloud-based Platform as a Service (PaaS) solutions for PostgreSQL and Redis to ensure high availability. In on-premise setups, it's recommended to configure Redis and Postgres for high availability, either by using Bitnami charts or a custom configuration.
```yaml simple-values-example.yaml
apiVersion: v1
kind: Secret
metadata:
name: infisical-secrets
type: Opaque
stringData:
AUTH_SECRET: <>
ENCRYPTION_KEY: <>
REDIS_URL: <>
DB_CONNECTION_URI: <>
```
</Tab>
</Tabs>
```yaml simple-values-example.yaml
apiVersion: v1
kind: Secret
metadata:
name: infisical-secrets
type: Opaque
stringData:
AUTH_SECRET: <>
ENCRYPTION_KEY: <>
REDIS_URL: <>
DB_CONNECTION_URI: <>
```
</Step>
<Step title="Database schema migration ">

View File

@@ -14,6 +14,7 @@ The newly released Postgres version of Infisical is the only version of Infisica
Before starting the migration, ensure you have the following command line tools installed:
- [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
- [pg_dump](https://www.postgresql.org/docs/current/app-pgrestore.html)
- [pg_restore](https://www.postgresql.org/docs/current/app-pgdump.html)
- [mongodump](https://www.mongodb.com/docs/database-tools/mongodump/)
@@ -103,7 +104,7 @@ Once started, the migration script will transform MongoDB data into an equivalen
<Step title="Clone Infisical Repository">
Clone the Infisical MongoDB repository.
```
git clone https://github.com/Infisical/infisical.git
git clone -b infisical/v0.46.11-postgres https://github.com/Infisical/infisical.git
```
</Step>
<Step title="Install dependencies for backend">
@@ -186,9 +187,14 @@ Rather than transferring the data row-by-row from your local machine to the prod
## Post-Migration Steps
After successfully migrating the data to PostgreSQL, you can proceed to deploy Infisical using your preferred deployment method.
Refer to [Infisical's self-hosting documentation](https://infisical.com/docs/self-hosting/overview) for deployment options.
Remember to use your production PostgreSQL connection string for the new deployment and transfer all [environment variables](/self-hosting/configuration/envars) from the MongoDB version of Infisical to the new version (they are all compatible).
Once the data migration to PostgreSQL is complete, you're ready to deploy Infisical using the deployment method of your choice.
For guidance on deployment options, please visit the [self-hosting documentation](/self-hosting/overview).
Remember to transfer the necessary [environment variables](/self-hosting/configuration/envars) from the MongoDB version of Infisical to the new Postgres based Infisical; rest assured, they are fully compatible.
<Warning>
The first deployment of Postgres based Infisical must be deployed with Docker image tag `v0.46.11-postgres`.
After deploying this version, you can proceed to update to any subsequent versions.
</Warning>
## Additional discussion
- When you visit Infisical's [docker hub](https://hub.docker.com/r/infisical/infisical) page, you will notice that image tags end with `-postgres`.

View File

@@ -1,10 +1,12 @@
import { useEffect, useState } from "react";
import { useRouter } from "next/router";
import axios from "axios";
import queryString from "query-string";
import { useNotificationContext } from "@app/components/context/Notifications/NotificationProvider";
import { useCreateIntegration, useGetWorkspaceById } from "@app/hooks/api";
import { Button, Card, CardTitle, FormControl, Select, SelectItem } from "../../../components/v2";
import { Button, Card, CardTitle, FormControl, Input, Select, SelectItem } from "../../../components/v2";
import {
useGetIntegrationAuthApps,
useGetIntegrationAuthById
@@ -13,6 +15,7 @@ import {
export default function CloudflareWorkersIntegrationPage() {
const router = useRouter();
const { mutateAsync } = useCreateIntegration();
const { createNotification } = useNotificationContext();
const { integrationAuthId } = queryString.parse(router.asPath.split("?")[1]);
const { data: workspace } = useGetWorkspaceById(localStorage.getItem("projectData.id") ?? "");
@@ -22,6 +25,8 @@ export default function CloudflareWorkersIntegrationPage() {
});
const [selectedSourceEnvironment, setSelectedSourceEnvironment] = useState("");
const [secretPath, setSecretPath] = useState("/");
const [targetApp, setTargetApp] = useState("");
const [targetAppId, setTargetAppId] = useState("");
@@ -56,7 +61,7 @@ export default function CloudflareWorkersIntegrationPage() {
app: targetApp,
appId: targetAppId,
sourceEnvironment: selectedSourceEnvironment,
secretPath: "/"
secretPath
});
setIsLoading(false);
@@ -64,6 +69,18 @@ export default function CloudflareWorkersIntegrationPage() {
router.push(`/integrations/${localStorage.getItem("projectData.id")}`);
} catch (err) {
console.error(err);
let errorMessage: string = "Something went wrong!";
if (axios.isAxiosError(err)) {
const { message } = err?.response?.data as { message: string };
errorMessage = message;
}
createNotification({
text: errorMessage,
type: "error"
});
setIsLoading(false);
}
};
@@ -96,6 +113,13 @@ export default function CloudflareWorkersIntegrationPage() {
))}
</Select>
</FormControl>
<FormControl label="Infisical Secret Path" className="mt-2 px-6">
<Input
value={secretPath}
onChange={(evt) => setSecretPath(evt.target.value)}
placeholder="Provide a path, default is /"
/>
</FormControl>
<FormControl label="Cloudflare Workers Project" className="mt-4 px-6">
<Select
value={targetApp}