mirror of
https://github.com/Infisical/infisical.git
synced 2025-08-19 21:17:10 +00:00
Compare commits
15 Commits
infisical/
...
daniel/age
Author | SHA1 | Date | |
---|---|---|---|
|
a0ea2627ed | ||
|
5c40b538af | ||
|
8dd94a4e10 | ||
|
041c4a20a0 | ||
|
4a2a5f42a8 | ||
|
9fcdf17a04 | ||
|
97ac8cb45a | ||
|
e952659415 | ||
|
1f3f061a06 | ||
|
5096ce3bdc | ||
|
fb8c4bd415 | ||
|
48bf41ac8c | ||
|
1ad916a784 | ||
|
c91456838e | ||
|
79efe64504 |
2
Makefile
2
Makefile
@@ -11,4 +11,4 @@ up-prod:
|
||||
docker-compose -f docker-compose.prod.yml up --build
|
||||
|
||||
down:
|
||||
docker compose -f docker-compose.dev.yml down
|
||||
docker-compose down
|
||||
|
@@ -17,15 +17,7 @@ dotenv.config({
|
||||
export default {
|
||||
development: {
|
||||
client: "postgres",
|
||||
connection: {
|
||||
connectionString: process.env.DB_CONNECTION_URI,
|
||||
ssl: process.env.DB_ROOT_CERT
|
||||
? {
|
||||
rejectUnauthorized: true,
|
||||
ca: Buffer.from(process.env.DB_ROOT_CERT, "base64").toString("ascii")
|
||||
}
|
||||
: false
|
||||
},
|
||||
connection: process.env.DB_CONNECTION_URI,
|
||||
pool: {
|
||||
min: 2,
|
||||
max: 10
|
||||
@@ -39,15 +31,7 @@ export default {
|
||||
},
|
||||
production: {
|
||||
client: "postgres",
|
||||
connection: {
|
||||
connectionString: process.env.DB_CONNECTION_URI,
|
||||
ssl: process.env.DB_ROOT_CERT
|
||||
? {
|
||||
rejectUnauthorized: true,
|
||||
ca: Buffer.from(process.env.DB_ROOT_CERT, "base64").toString("ascii")
|
||||
}
|
||||
: false
|
||||
},
|
||||
connection: process.env.DB_CONNECTION_URI,
|
||||
pool: {
|
||||
min: 2,
|
||||
max: 10
|
||||
|
@@ -1,5 +1,3 @@
|
||||
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
||||
// @ts-nocheck
|
||||
import { Knex } from "knex";
|
||||
|
||||
import { TableName } from "../schemas";
|
||||
@@ -10,18 +8,10 @@ export async function up(knex: Knex): Promise<void> {
|
||||
await knex.schema.alterTable(TableName.SuperAdmin, (t) => {
|
||||
t.uuid("instanceId").notNullable().defaultTo(knex.fn.uuid());
|
||||
});
|
||||
// this is updated to avoid race condition on replication
|
||||
// eslint-disable-next-line
|
||||
await knex(TableName.SuperAdmin)
|
||||
.update({ id: ADMIN_CONFIG_UUID })
|
||||
.whereNotNull("id")
|
||||
.andWhere("id", "<>", ADMIN_CONFIG_UUID)
|
||||
.limit(1);
|
||||
|
||||
const superUserConfigExists = await knex(TableName.SuperAdmin).where("id", ADMIN_CONFIG_UUID).first();
|
||||
|
||||
if (!superUserConfigExists) {
|
||||
await knex(TableName.SuperAdmin).update({ id: ADMIN_CONFIG_UUID }).whereNotNull("id").limit(1);
|
||||
}
|
||||
// @ts-ignore
|
||||
await knex(TableName.SuperAdmin).update({ id: ADMIN_CONFIG_UUID }).whereNotNull("id").limit(1);
|
||||
}
|
||||
|
||||
export async function down(knex: Knex): Promise<void> {
|
||||
|
@@ -19,7 +19,7 @@ export const secretTagServiceFactory = ({ secretTagDAL, permissionService }: TSe
|
||||
const { permission } = await permissionService.getProjectPermission(actor, actorId, projectId, actorOrgId);
|
||||
ForbiddenError.from(permission).throwUnlessCan(ProjectPermissionActions.Create, ProjectPermissionSub.Tags);
|
||||
|
||||
const existingTag = await secretTagDAL.findOne({ slug, projectId });
|
||||
const existingTag = await secretTagDAL.findOne({ slug });
|
||||
if (existingTag) throw new BadRequestError({ message: "Tag already exist" });
|
||||
|
||||
const newTag = await secretTagDAL.create({
|
||||
|
@@ -7,7 +7,7 @@ import { TSecretSnapshotServiceFactory } from "@app/ee/services/secret-snapshot/
|
||||
import { getConfig } from "@app/lib/config/env";
|
||||
import { buildSecretBlindIndexFromName, encryptSymmetric128BitHexKeyUTF8 } from "@app/lib/crypto";
|
||||
import { BadRequestError } from "@app/lib/errors";
|
||||
import { groupBy, pick, unique } from "@app/lib/fn";
|
||||
import { groupBy, pick } from "@app/lib/fn";
|
||||
import { logger } from "@app/lib/logger";
|
||||
|
||||
import { ActorType } from "../auth/auth-type";
|
||||
@@ -202,13 +202,12 @@ export const secretServiceFactory = ({
|
||||
return deletedSecrets;
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks and handles secrets using a blind index method.
|
||||
* The function generates mappings between secret names and their blind indexes, validates user IDs for personal secrets, and retrieves secrets from the database based on their blind indexes.
|
||||
* For new secrets (isNew = true), it ensures they don't already exist in the database.
|
||||
* For existing secrets, it verifies their presence in the database.
|
||||
* If discrepancies are found, errors are thrown. The function returns mappings and the fetched secrets.
|
||||
*/
|
||||
// this is a utility function for secret modification
|
||||
// this will check given secret name blind index exist or not
|
||||
// if its a created secret set isNew to true
|
||||
// thus if these blindindex exist it will throw an error
|
||||
// vice versa when u need to check for updated secret
|
||||
// this will also return the blind index grouped by secretName
|
||||
const fnSecretBlindIndexCheck = async ({
|
||||
inputSecrets,
|
||||
folderId,
|
||||
@@ -243,18 +242,10 @@ export const secretServiceFactory = ({
|
||||
|
||||
if (isNew) {
|
||||
if (secrets.length) throw new BadRequestError({ message: "Secret already exist" });
|
||||
} else {
|
||||
const secretKeysInDB = unique(secrets, (el) => el.secretBlindIndex as string).map(
|
||||
(el) => blindIndex2KeyName[el.secretBlindIndex as string]
|
||||
);
|
||||
const hasUnknownSecretsProvided = secretKeysInDB.length !== inputSecrets.length;
|
||||
if (hasUnknownSecretsProvided) {
|
||||
const keysMissingInDB = Object.keys(keyName2BlindIndex).filter((key) => !secretKeysInDB.includes(key));
|
||||
throw new BadRequestError({
|
||||
message: `Secret not found: blind index ${keysMissingInDB.join(",")}`
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if (secrets.length !== inputSecrets.length)
|
||||
throw new BadRequestError({
|
||||
message: `Secret not found: blind index ${JSON.stringify(keyName2BlindIndex)}`
|
||||
});
|
||||
|
||||
return { blindIndex2KeyName, keyName2BlindIndex, secrets };
|
||||
};
|
||||
|
@@ -1,5 +1,5 @@
|
||||
infisical:
|
||||
address: "http://localhost:8080"
|
||||
address: "https://app.infisical.com/"
|
||||
auth:
|
||||
type: "universal-auth"
|
||||
config:
|
||||
@@ -13,3 +13,12 @@ sinks:
|
||||
templates:
|
||||
- source-path: my-dot-ev-secret-template
|
||||
destination-path: my-dot-env.env
|
||||
config:
|
||||
polling-interval: 60s
|
||||
execute:
|
||||
command: docker-compose -f docker-compose.prod.yml down && docker-compose -f docker-compose.prod.yml up -d
|
||||
- source-path: my-dot-ev-secret-template1
|
||||
destination-path: my-dot-env-1.env
|
||||
config:
|
||||
exec:
|
||||
command: mkdir hello-world1
|
||||
|
@@ -490,5 +490,7 @@ func CallGetRawSecretsV3(httpClient *resty.Client, request GetRawSecretsV3Reques
|
||||
return GetRawSecretsV3Response{}, fmt.Errorf("CallGetRawSecretsV3: Unsuccessful response [%v %v] [status-code=%v] [response=%v]", response.Request.Method, response.Request.URL, response.StatusCode(), response.String())
|
||||
}
|
||||
|
||||
getRawSecretsV3Response.ETag = response.Header().Get(("etag"))
|
||||
|
||||
return getRawSecretsV3Response, nil
|
||||
}
|
||||
|
@@ -505,4 +505,5 @@ type GetRawSecretsV3Response struct {
|
||||
SecretComment string `json:"secretComment"`
|
||||
} `json:"secrets"`
|
||||
Imports []any `json:"imports"`
|
||||
ETag string
|
||||
}
|
||||
|
@@ -5,12 +5,15 @@ package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
@@ -71,12 +74,56 @@ type Template struct {
|
||||
SourcePath string `yaml:"source-path"`
|
||||
Base64TemplateContent string `yaml:"base64-template-content"`
|
||||
DestinationPath string `yaml:"destination-path"`
|
||||
|
||||
Config struct { // Configurations for the template
|
||||
PollingInterval string `yaml:"polling-interval"` // How often to poll for changes in the secret
|
||||
Execute struct {
|
||||
Command string `yaml:"command"` // Command to execute once the template has been rendered
|
||||
Timeout int64 `yaml:"timeout"` // Timeout for the command
|
||||
} `yaml:"execute"` // Command to execute once the template has been rendered
|
||||
} `yaml:"config"`
|
||||
}
|
||||
|
||||
func ReadFile(filePath string) ([]byte, error) {
|
||||
return ioutil.ReadFile(filePath)
|
||||
}
|
||||
|
||||
func ExecuteCommandWithTimeout(command string, timeout int64) error {
|
||||
|
||||
shell := [2]string{"sh", "-c"}
|
||||
if runtime.GOOS == "windows" {
|
||||
shell = [2]string{"cmd", "/C"}
|
||||
} else {
|
||||
currentShell := os.Getenv("SHELL")
|
||||
if currentShell != "" {
|
||||
shell[0] = currentShell
|
||||
}
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
if timeout > 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, shell[0], shell[1], command)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
if exitError, ok := err.(*exec.ExitError); ok { // type assertion
|
||||
if exitError.ProcessState.ExitCode() == -1 {
|
||||
return fmt.Errorf("command timed out")
|
||||
}
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func FileExists(filepath string) bool {
|
||||
info, err := os.Stat(filepath)
|
||||
if os.IsNotExist(err) {
|
||||
@@ -170,20 +217,24 @@ func ParseAgentConfig(configFile []byte) (*Config, error) {
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func secretTemplateFunction(accessToken string) func(string, string, string) ([]models.SingleEnvironmentVariable, error) {
|
||||
func secretTemplateFunction(accessToken string, existingEtag string, currentEtag *string) func(string, string, string) ([]models.SingleEnvironmentVariable, error) {
|
||||
return func(projectID, envSlug, secretPath string) ([]models.SingleEnvironmentVariable, error) {
|
||||
secrets, err := util.GetPlainTextSecretsViaMachineIdentity(accessToken, projectID, envSlug, secretPath, false)
|
||||
res, err := util.GetPlainTextSecretsViaMachineIdentity(accessToken, projectID, envSlug, secretPath, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return secrets, nil
|
||||
if existingEtag != res.Etag {
|
||||
*currentEtag = res.Etag
|
||||
}
|
||||
|
||||
return res.Secrets, nil
|
||||
}
|
||||
}
|
||||
|
||||
func ProcessTemplate(templatePath string, data interface{}, accessToken string) (*bytes.Buffer, error) {
|
||||
func ProcessTemplate(templatePath string, data interface{}, accessToken string, existingEtag string, currentEtag *string) (*bytes.Buffer, error) {
|
||||
// custom template function to fetch secrets from Infisical
|
||||
secretFunction := secretTemplateFunction(accessToken)
|
||||
secretFunction := secretTemplateFunction(accessToken, existingEtag, currentEtag)
|
||||
funcs := template.FuncMap{
|
||||
"secret": secretFunction,
|
||||
}
|
||||
@@ -203,7 +254,7 @@ func ProcessTemplate(templatePath string, data interface{}, accessToken string)
|
||||
return &buf, nil
|
||||
}
|
||||
|
||||
func ProcessBase64Template(encodedTemplate string, data interface{}, accessToken string) (*bytes.Buffer, error) {
|
||||
func ProcessBase64Template(encodedTemplate string, data interface{}, accessToken string, existingEtag string, currentEtag *string) (*bytes.Buffer, error) {
|
||||
// custom template function to fetch secrets from Infisical
|
||||
decoded, err := base64.StdEncoding.DecodeString(encodedTemplate)
|
||||
if err != nil {
|
||||
@@ -212,7 +263,7 @@ func ProcessBase64Template(encodedTemplate string, data interface{}, accessToken
|
||||
|
||||
templateString := string(decoded)
|
||||
|
||||
secretFunction := secretTemplateFunction(accessToken)
|
||||
secretFunction := secretTemplateFunction(accessToken, existingEtag, currentEtag) // TODO: Fix this
|
||||
funcs := template.FuncMap{
|
||||
"secret": secretFunction,
|
||||
}
|
||||
@@ -250,7 +301,16 @@ type TokenManager struct {
|
||||
}
|
||||
|
||||
func NewTokenManager(fileDeposits []Sink, templates []Template, clientIdPath string, clientSecretPath string, newAccessTokenNotificationChan chan bool, removeClientSecretOnRead bool, exitAfterAuth bool) *TokenManager {
|
||||
return &TokenManager{filePaths: fileDeposits, templates: templates, clientIdPath: clientIdPath, clientSecretPath: clientSecretPath, newAccessTokenNotificationChan: newAccessTokenNotificationChan, removeClientSecretOnRead: removeClientSecretOnRead, exitAfterAuth: exitAfterAuth}
|
||||
return &TokenManager{
|
||||
filePaths: fileDeposits,
|
||||
templates: templates,
|
||||
clientIdPath: clientIdPath,
|
||||
clientSecretPath: clientSecretPath,
|
||||
newAccessTokenNotificationChan: newAccessTokenNotificationChan,
|
||||
removeClientSecretOnRead: removeClientSecretOnRead,
|
||||
exitAfterAuth: exitAfterAuth,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (tm *TokenManager) SetToken(token string, accessTokenTTL time.Duration, accessTokenMaxTTL time.Duration) {
|
||||
@@ -428,38 +488,80 @@ func (tm *TokenManager) WriteTokenToFiles() {
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TokenManager) FetchSecrets() {
|
||||
log.Info().Msgf("template engine started...")
|
||||
func (tm *TokenManager) WriteTemplateToFile(bytes *bytes.Buffer, template *Template) {
|
||||
if err := WriteBytesToFile(bytes, template.DestinationPath); err != nil {
|
||||
log.Error().Msgf("template engine: unable to write secrets to path because %s. Will try again on next cycle", err)
|
||||
return
|
||||
}
|
||||
log.Info().Msgf("template engine: secret template at path %s has been rendered and saved to path %s", template.SourcePath, template.DestinationPath)
|
||||
}
|
||||
|
||||
func (tm *TokenManager) MonitorSecretChanges(secretTemplate Template, sigChan chan os.Signal) {
|
||||
|
||||
pollingInterval := time.Duration(5 * time.Minute)
|
||||
|
||||
if secretTemplate.Config.PollingInterval != "" {
|
||||
interval, err := util.ConvertPollingIntervalToTime(secretTemplate.Config.PollingInterval)
|
||||
|
||||
if err != nil {
|
||||
log.Error().Msgf("unable to convert polling interval to time because %v", err)
|
||||
sigChan <- syscall.SIGINT
|
||||
return
|
||||
|
||||
} else {
|
||||
pollingInterval = interval
|
||||
}
|
||||
}
|
||||
|
||||
var existingEtag string
|
||||
var currentEtag string
|
||||
var firstRun = true
|
||||
|
||||
execTimeout := secretTemplate.Config.Execute.Timeout
|
||||
execCommand := secretTemplate.Config.Execute.Command
|
||||
|
||||
for {
|
||||
token := tm.GetToken()
|
||||
|
||||
if token != "" {
|
||||
for _, secretTemplate := range tm.templates {
|
||||
var processedTemplate *bytes.Buffer
|
||||
var err error
|
||||
if secretTemplate.SourcePath != "" {
|
||||
processedTemplate, err = ProcessTemplate(secretTemplate.SourcePath, nil, token)
|
||||
} else {
|
||||
processedTemplate, err = ProcessBase64Template(secretTemplate.Base64TemplateContent, nil, token)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Error().Msgf("template engine: unable to render secrets because %s. Will try again on next cycle", err)
|
||||
var processedTemplate *bytes.Buffer
|
||||
var err error
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if err := WriteBytesToFile(processedTemplate, secretTemplate.DestinationPath); err != nil {
|
||||
log.Error().Msgf("template engine: unable to write secrets to path because %s. Will try again on next cycle", err)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info().Msgf("template engine: secret template at path %s has been rendered and saved to path %s", secretTemplate.SourcePath, secretTemplate.DestinationPath)
|
||||
if secretTemplate.SourcePath != "" {
|
||||
processedTemplate, err = ProcessTemplate(secretTemplate.SourcePath, nil, token, existingEtag, ¤tEtag)
|
||||
} else {
|
||||
processedTemplate, err = ProcessBase64Template(secretTemplate.Base64TemplateContent, nil, token, existingEtag, ¤tEtag)
|
||||
}
|
||||
|
||||
// fetch new secrets every 5 minutes (TODO: add PubSub in the future )
|
||||
time.Sleep(5 * time.Minute)
|
||||
if err != nil {
|
||||
log.Error().Msgf("unable to process template because %v", err)
|
||||
} else {
|
||||
if (existingEtag != currentEtag) || firstRun {
|
||||
|
||||
tm.WriteTemplateToFile(processedTemplate, &secretTemplate)
|
||||
existingEtag = currentEtag
|
||||
|
||||
if !firstRun && execCommand != "" {
|
||||
log.Info().Msgf("executing command: %s", execCommand)
|
||||
err := ExecuteCommandWithTimeout(execCommand, execTimeout)
|
||||
|
||||
if err != nil {
|
||||
log.Error().Msgf("unable to execute command because %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
if firstRun {
|
||||
firstRun = false
|
||||
}
|
||||
}
|
||||
}
|
||||
time.Sleep(pollingInterval)
|
||||
} else {
|
||||
// It fails to get the access token. So we will re-try in 3 seconds. We do this because if we don't, the user will have to wait for the next polling interval to get the first secret render.
|
||||
time.Sleep(3 * time.Second)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -544,7 +646,11 @@ var agentCmd = &cobra.Command{
|
||||
tm := NewTokenManager(filePaths, agentConfig.Templates, configUniversalAuthType.ClientIDPath, configUniversalAuthType.ClientSecretPath, tokenRefreshNotifier, configUniversalAuthType.RemoveClientSecretOnRead, agentConfig.Infisical.ExitAfterAuth)
|
||||
|
||||
go tm.ManageTokenLifecycle()
|
||||
go tm.FetchSecrets()
|
||||
|
||||
for i, template := range agentConfig.Templates {
|
||||
log.Info().Msgf("template engine started for template %v...", i+1)
|
||||
go tm.MonitorSecretChanges(template, sigChan)
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
|
@@ -34,6 +34,11 @@ type SingleEnvironmentVariable struct {
|
||||
Comment string `json:"comment"`
|
||||
}
|
||||
|
||||
type PlaintextSecretResult struct {
|
||||
Secrets []SingleEnvironmentVariable
|
||||
Etag string
|
||||
}
|
||||
|
||||
type SingleFolder struct {
|
||||
ID string `json:"_id"`
|
||||
Name string `json:"name"`
|
||||
|
41
cli/packages/util/agent.go
Normal file
41
cli/packages/util/agent.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ConvertPollingIntervalToTime converts a string representation of a polling interval to a time.Duration
|
||||
func ConvertPollingIntervalToTime(pollingInterval string) (time.Duration, error) {
|
||||
length := len(pollingInterval)
|
||||
if length < 2 {
|
||||
return 0, fmt.Errorf("invalid format")
|
||||
}
|
||||
|
||||
unit := pollingInterval[length-1:]
|
||||
numberPart := pollingInterval[:length-1]
|
||||
|
||||
number, err := strconv.Atoi(numberPart)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch unit {
|
||||
case "s":
|
||||
if number < 60 {
|
||||
return 0, fmt.Errorf("polling interval should be at least 60 seconds")
|
||||
}
|
||||
return time.Duration(number) * time.Second, nil
|
||||
case "m":
|
||||
return time.Duration(number) * time.Minute, nil
|
||||
case "h":
|
||||
return time.Duration(number) * time.Hour, nil
|
||||
case "d":
|
||||
return time.Duration(number) * 24 * time.Hour, nil
|
||||
case "w":
|
||||
return time.Duration(number) * 7 * 24 * time.Hour, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid time unit")
|
||||
}
|
||||
}
|
@@ -152,7 +152,7 @@ func GetPlainTextSecretsViaJTW(JTWToken string, receiversPrivateKey string, work
|
||||
return plainTextSecrets, nil
|
||||
}
|
||||
|
||||
func GetPlainTextSecretsViaMachineIdentity(accessToken string, workspaceId string, environmentName string, secretsPath string, includeImports bool) ([]models.SingleEnvironmentVariable, error) {
|
||||
func GetPlainTextSecretsViaMachineIdentity(accessToken string, workspaceId string, environmentName string, secretsPath string, includeImports bool) (models.PlaintextSecretResult, error) {
|
||||
httpClient := resty.New()
|
||||
httpClient.SetAuthToken(accessToken).
|
||||
SetHeader("Accept", "application/json")
|
||||
@@ -170,12 +170,12 @@ func GetPlainTextSecretsViaMachineIdentity(accessToken string, workspaceId strin
|
||||
|
||||
rawSecrets, err := api.CallGetRawSecretsV3(httpClient, api.GetRawSecretsV3Request{WorkspaceId: workspaceId, SecretPath: secretsPath, Environment: environmentName})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return models.PlaintextSecretResult{}, err
|
||||
}
|
||||
|
||||
plainTextSecrets := []models.SingleEnvironmentVariable{}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to decrypt your secrets [err=%v]", err)
|
||||
return models.PlaintextSecretResult{}, fmt.Errorf("unable to decrypt your secrets [err=%v]", err)
|
||||
}
|
||||
|
||||
for _, secret := range rawSecrets.Secrets {
|
||||
@@ -189,7 +189,10 @@ func GetPlainTextSecretsViaMachineIdentity(accessToken string, workspaceId strin
|
||||
// }
|
||||
// }
|
||||
|
||||
return plainTextSecrets, nil
|
||||
return models.PlaintextSecretResult{
|
||||
Secrets: plainTextSecrets,
|
||||
Hash: rawSecrets.ETag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func InjectImportedSecret(plainTextWorkspaceKey []byte, secrets []models.SingleEnvironmentVariable, importedSecrets []api.ImportedSecretV3) ([]models.SingleEnvironmentVariable, error) {
|
||||
|
@@ -14,7 +14,6 @@ The newly released Postgres version of Infisical is the only version of Infisica
|
||||
|
||||
Before starting the migration, ensure you have the following command line tools installed:
|
||||
|
||||
- [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)
|
||||
- [pg_dump](https://www.postgresql.org/docs/current/app-pgrestore.html)
|
||||
- [pg_restore](https://www.postgresql.org/docs/current/app-pgdump.html)
|
||||
- [mongodump](https://www.mongodb.com/docs/database-tools/mongodump/)
|
||||
@@ -104,7 +103,7 @@ Once started, the migration script will transform MongoDB data into an equivalen
|
||||
<Step title="Clone Infisical Repository">
|
||||
Clone the Infisical MongoDB repository.
|
||||
```
|
||||
git clone -b infisical/v0.46.7-postgres https://github.com/Infisical/infisical.git
|
||||
git clone https://github.com/Infisical/infisical.git
|
||||
```
|
||||
</Step>
|
||||
<Step title="Install dependencies for backend">
|
||||
@@ -187,14 +186,9 @@ Rather than transferring the data row-by-row from your local machine to the prod
|
||||
|
||||
## Post-Migration Steps
|
||||
|
||||
Once the data migration to PostgreSQL is complete, you're ready to deploy Infisical using the deployment method of your choice.
|
||||
For guidance on deployment options, please visit the [self-hosting documentation](/self-hosting/overview).
|
||||
Remember to transfer the necessary [environment variables](/self-hosting/configuration/envars) from the MongoDB version of Infisical to the new Postgres based Infisical; rest assured, they are fully compatible.
|
||||
|
||||
<Warning>
|
||||
The first deployment of Postgres based Infisical must be deployed with Docker image tag `v0.46.7-postgres`.
|
||||
After deploying this version, you can proceed to update to any subsequent versions.
|
||||
</Warning>
|
||||
After successfully migrating the data to PostgreSQL, you can proceed to deploy Infisical using your preferred deployment method.
|
||||
Refer to [Infisical's self-hosting documentation](https://infisical.com/docs/self-hosting/overview) for deployment options.
|
||||
Remember to use your production PostgreSQL connection string for the new deployment and transfer all [environment variables](/self-hosting/configuration/envars) from the MongoDB version of Infisical to the new version (they are all compatible).
|
||||
|
||||
## Additional discussion
|
||||
- When you visit Infisical's [docker hub](https://hub.docker.com/r/infisical/infisical) page, you will notice that image tags end with `-postgres`.
|
||||
|
@@ -1,12 +1,10 @@
|
||||
import { useEffect, useState } from "react";
|
||||
import { useRouter } from "next/router";
|
||||
import axios from "axios";
|
||||
import queryString from "query-string";
|
||||
|
||||
import { useNotificationContext } from "@app/components/context/Notifications/NotificationProvider";
|
||||
import { useCreateIntegration, useGetWorkspaceById } from "@app/hooks/api";
|
||||
|
||||
import { Button, Card, CardTitle, FormControl, Input, Select, SelectItem } from "../../../components/v2";
|
||||
import { Button, Card, CardTitle, FormControl, Select, SelectItem } from "../../../components/v2";
|
||||
import {
|
||||
useGetIntegrationAuthApps,
|
||||
useGetIntegrationAuthById
|
||||
@@ -15,7 +13,6 @@ import {
|
||||
export default function CloudflareWorkersIntegrationPage() {
|
||||
const router = useRouter();
|
||||
const { mutateAsync } = useCreateIntegration();
|
||||
const { createNotification } = useNotificationContext();
|
||||
|
||||
const { integrationAuthId } = queryString.parse(router.asPath.split("?")[1]);
|
||||
const { data: workspace } = useGetWorkspaceById(localStorage.getItem("projectData.id") ?? "");
|
||||
@@ -25,8 +22,6 @@ export default function CloudflareWorkersIntegrationPage() {
|
||||
});
|
||||
|
||||
const [selectedSourceEnvironment, setSelectedSourceEnvironment] = useState("");
|
||||
const [secretPath, setSecretPath] = useState("/");
|
||||
|
||||
const [targetApp, setTargetApp] = useState("");
|
||||
const [targetAppId, setTargetAppId] = useState("");
|
||||
|
||||
@@ -61,7 +56,7 @@ export default function CloudflareWorkersIntegrationPage() {
|
||||
app: targetApp,
|
||||
appId: targetAppId,
|
||||
sourceEnvironment: selectedSourceEnvironment,
|
||||
secretPath
|
||||
secretPath: "/"
|
||||
});
|
||||
|
||||
setIsLoading(false);
|
||||
@@ -69,18 +64,6 @@ export default function CloudflareWorkersIntegrationPage() {
|
||||
router.push(`/integrations/${localStorage.getItem("projectData.id")}`);
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
|
||||
let errorMessage: string = "Something went wrong!";
|
||||
if (axios.isAxiosError(err)) {
|
||||
const { message } = err?.response?.data as { message: string };
|
||||
errorMessage = message;
|
||||
}
|
||||
|
||||
createNotification({
|
||||
text: errorMessage,
|
||||
type: "error"
|
||||
});
|
||||
setIsLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -113,13 +96,6 @@ export default function CloudflareWorkersIntegrationPage() {
|
||||
))}
|
||||
</Select>
|
||||
</FormControl>
|
||||
<FormControl label="Infisical Secret Path" className="mt-2 px-6">
|
||||
<Input
|
||||
value={secretPath}
|
||||
onChange={(evt) => setSecretPath(evt.target.value)}
|
||||
placeholder="Provide a path, default is /"
|
||||
/>
|
||||
</FormControl>
|
||||
<FormControl label="Cloudflare Workers Project" className="mt-4 px-6">
|
||||
<Select
|
||||
value={targetApp}
|
||||
|
@@ -7,7 +7,7 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 1.0.6
|
||||
version: 1.0.5
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
|
@@ -13,9 +13,8 @@ metadata:
|
||||
name: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: k8s-wait-for-infisical-schema-migration
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
apiGroup: rbac.authorization.k8s.io
|
@@ -532,8 +532,9 @@ const main = async () => {
|
||||
postgresTableName: TableName.SuperAdmin,
|
||||
returnKeys: ["id"],
|
||||
preProcessing: async (doc) => {
|
||||
const id = uuidV4();
|
||||
return {
|
||||
id: "00000000-0000-0000-0000-000000000000",
|
||||
id,
|
||||
allowSignUp: doc.allowSignUp,
|
||||
initialized: doc.initialized,
|
||||
createdAt: new Date((doc as any).createdAt),
|
||||
|
Reference in New Issue
Block a user