Compare commits

...

18 Commits

Author SHA1 Message Date
Maidul Islam
e35135e4e3 test 2025-04-08 17:08:56 -04:00
Maidul Islam
c95dd69167 test upload 2025-04-08 16:58:35 -04:00
Maidul Islam
a50b8120fd Merge pull request #3378 from akhilmhdh/fix/doc-p-access-image
feat: updated ruby action
2025-04-08 16:21:06 -04:00
=
f1ee53d417 feat: updated ruby action 2025-04-09 01:49:35 +05:30
Maidul Islam
229ad79f49 Merge pull request #3377 from akhilmhdh/fix/doc-p-access-image
feat: added passphrase
2025-04-08 15:56:34 -04:00
=
d7dbd01ecf feat: banner respect silent 2025-04-09 01:24:38 +05:30
=
026fd21fd4 feat: added passphrase 2025-04-09 01:05:31 +05:30
Maidul Islam
9b9c1a52b3 Merge pull request #3376 from akhilmhdh/fix/doc-p-access-image
feat: added s3 deb pipeline
2025-04-08 15:05:32 -04:00
Maidul Islam
98aa424e2e Update .github/workflows/release_build_infisical_cli.yml
Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com>
2025-04-08 15:02:47 -04:00
=
2cd5df1ab3 feat: updated message 2025-04-09 00:30:48 +05:30
Daniel Hougaard
e0d863e06e Merge pull request #3375 from Infisical/helm-update-v0.9.1
Update Helm chart to version v0.9.1
2025-04-08 22:52:42 +04:00
=
d991af557b feat: added s3 deb pipeline 2025-04-09 00:22:00 +05:30
DanielHougaard
ae54d04357 Update Helm chart to version v0.9.1 2025-04-08 18:51:31 +00:00
Daniel Hougaard
fa590ba697 Merge pull request #3348 from Infisical/daniel/k8s-auto-reviewer-token
feat(k8s): k8s auth automatic service account token creation
2025-04-08 22:45:57 +04:00
Maidul Islam
1da2896bb0 Create codeql.yml 2025-04-07 21:00:43 -04:00
Daniel Hougaard
3f190426fe fix: added docs for operator managed service account tokens & made audience optional 2025-04-04 03:15:11 +04:00
Daniel Hougaard
3d072c2f48 feat(k8s): automatic service account token creation for k8s auth 2025-04-01 23:39:22 +04:00
Daniel Hougaard
82b828c10e feat(k8s): automatic service account token creation for k8s auth 2025-04-01 23:16:38 +04:00
29 changed files with 1595 additions and 384 deletions

102
.github/workflows/codeql.yml vendored Normal file
View File

@@ -0,0 +1,102 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL Advanced"
on:
push:
branches: [ "main", "development" ]
pull_request:
branches: [ "main", "development" ]
schedule:
- cron: '33 7 * * 3'
jobs:
analyze:
name: Analyze (${{ matrix.language }})
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners (GitHub.com only)
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
permissions:
# required for all workflows
security-events: write
# required to fetch internal or private CodeQL packs
packages: read
# only required for workflows in private repositories
actions: read
contents: read
strategy:
fail-fast: false
matrix:
include:
- language: actions
build-mode: none
- language: go
build-mode: autobuild
- language: javascript-typescript
build-mode: none
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
# Use `c-cpp` to analyze code written in C, C++ or both
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Add any setup steps before running the `github/codeql-action/init` action.
# This includes steps like installing compilers or runtimes (`actions/setup-node`
# or others). This is typically only required for manual builds.
# - name: Setup runtime (example)
# uses: actions/setup-example@v1
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# If the analyze step fails for one of the languages you are analyzing with
# "We were unable to automatically build your code", modify the matrix above
# to set the build mode to "manual" for that language. Then modify this step
# to build your code.
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- if: matrix.build-mode == 'manual'
shell: bash
run: |
echo 'If you are using a "manual" build mode for one or more of the' \
'languages you are analyzing, replace this with the commands to build' \
'your code, for example:'
echo ' make bootstrap'
echo ' make release'
exit 1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View File

@@ -12,75 +12,75 @@ permissions:
contents: write contents: write
jobs: jobs:
cli-integration-tests: # cli-integration-tests:
name: Run tests before deployment # name: Run tests before deployment
uses: ./.github/workflows/run-cli-tests.yml # uses: ./.github/workflows/run-cli-tests.yml
secrets: # secrets:
CLI_TESTS_UA_CLIENT_ID: ${{ secrets.CLI_TESTS_UA_CLIENT_ID }} # CLI_TESTS_UA_CLIENT_ID: ${{ secrets.CLI_TESTS_UA_CLIENT_ID }}
CLI_TESTS_UA_CLIENT_SECRET: ${{ secrets.CLI_TESTS_UA_CLIENT_SECRET }} # CLI_TESTS_UA_CLIENT_SECRET: ${{ secrets.CLI_TESTS_UA_CLIENT_SECRET }}
CLI_TESTS_SERVICE_TOKEN: ${{ secrets.CLI_TESTS_SERVICE_TOKEN }} # CLI_TESTS_SERVICE_TOKEN: ${{ secrets.CLI_TESTS_SERVICE_TOKEN }}
CLI_TESTS_PROJECT_ID: ${{ secrets.CLI_TESTS_PROJECT_ID }} # CLI_TESTS_PROJECT_ID: ${{ secrets.CLI_TESTS_PROJECT_ID }}
CLI_TESTS_ENV_SLUG: ${{ secrets.CLI_TESTS_ENV_SLUG }} # CLI_TESTS_ENV_SLUG: ${{ secrets.CLI_TESTS_ENV_SLUG }}
CLI_TESTS_USER_EMAIL: ${{ secrets.CLI_TESTS_USER_EMAIL }} # CLI_TESTS_USER_EMAIL: ${{ secrets.CLI_TESTS_USER_EMAIL }}
CLI_TESTS_USER_PASSWORD: ${{ secrets.CLI_TESTS_USER_PASSWORD }} # CLI_TESTS_USER_PASSWORD: ${{ secrets.CLI_TESTS_USER_PASSWORD }}
CLI_TESTS_INFISICAL_VAULT_FILE_PASSPHRASE: ${{ secrets.CLI_TESTS_INFISICAL_VAULT_FILE_PASSPHRASE }} # CLI_TESTS_INFISICAL_VAULT_FILE_PASSPHRASE: ${{ secrets.CLI_TESTS_INFISICAL_VAULT_FILE_PASSPHRASE }}
npm-release: # npm-release:
runs-on: ubuntu-latest # runs-on: ubuntu-latest
env: # env:
working-directory: ./npm # working-directory: ./npm
needs: # needs:
- cli-integration-tests # - cli-integration-tests
- goreleaser # - goreleaser
steps: # steps:
- uses: actions/checkout@v3 # - uses: actions/checkout@v3
with: # with:
fetch-depth: 0 # fetch-depth: 0
- name: Extract version # - name: Extract version
run: | # run: |
VERSION=$(echo ${{ github.ref_name }} | sed 's/infisical-cli\/v//') # VERSION=$(echo ${{ github.ref_name }} | sed 's/infisical-cli\/v//')
echo "Version extracted: $VERSION" # echo "Version extracted: $VERSION"
echo "CLI_VERSION=$VERSION" >> $GITHUB_ENV # echo "CLI_VERSION=$VERSION" >> $GITHUB_ENV
- name: Print version # - name: Print version
run: echo ${{ env.CLI_VERSION }} # run: echo ${{ env.CLI_VERSION }}
- name: Setup Node # - name: Setup Node
uses: actions/setup-node@8f152de45cc393bb48ce5d89d36b731f54556e65 # v4.0.0 # uses: actions/setup-node@8f152de45cc393bb48ce5d89d36b731f54556e65 # v4.0.0
with: # with:
node-version: 20 # node-version: 20
cache: "npm" # cache: "npm"
cache-dependency-path: ./npm/package-lock.json # cache-dependency-path: ./npm/package-lock.json
- name: Install dependencies # - name: Install dependencies
working-directory: ${{ env.working-directory }} # working-directory: ${{ env.working-directory }}
run: npm install --ignore-scripts # run: npm install --ignore-scripts
- name: Set NPM version # - name: Set NPM version
working-directory: ${{ env.working-directory }} # working-directory: ${{ env.working-directory }}
run: npm version ${{ env.CLI_VERSION }} --allow-same-version --no-git-tag-version # run: npm version ${{ env.CLI_VERSION }} --allow-same-version --no-git-tag-version
- name: Setup NPM # - name: Setup NPM
working-directory: ${{ env.working-directory }} # working-directory: ${{ env.working-directory }}
run: | # run: |
echo 'registry="https://registry.npmjs.org/"' > ./.npmrc # echo 'registry="https://registry.npmjs.org/"' > ./.npmrc
echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ./.npmrc # echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ./.npmrc
echo 'registry="https://registry.npmjs.org/"' > ~/.npmrc # echo 'registry="https://registry.npmjs.org/"' > ~/.npmrc
echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc # echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc
env: # env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }} # NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Pack NPM # - name: Pack NPM
working-directory: ${{ env.working-directory }} # working-directory: ${{ env.working-directory }}
run: npm pack # run: npm pack
- name: Publish NPM # - name: Publish NPM
working-directory: ${{ env.working-directory }} # working-directory: ${{ env.working-directory }}
run: npm publish --tarball=./infisical-sdk-${{github.ref_name}} --access public --registry=https://registry.npmjs.org/ # run: npm publish --tarball=./infisical-sdk-${{github.ref_name}} --access public --registry=https://registry.npmjs.org/
env: # env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }} # NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} # NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
goreleaser: goreleaser:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@@ -126,7 +126,22 @@ jobs:
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }} GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
- run: pip install --upgrade cloudsmith-cli - run: pip install --upgrade cloudsmith-cli
- uses: ruby/setup-ruby@354a1ad156761f5ee2b7b13fa8e09943a5e8d252
with:
ruby-version: "3.3" # Not needed with a .ruby-version, .tool-versions or mise.toml
bundler-cache: true # runs 'bundle install' and caches installed gems automatically
- name: Install deb-s3
run: gem install deb-s3
- name: Configure GPG Key
run: echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --passphrase "$GPG_SIGNING_KEY_PASSPHRASE" --import
env:
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
GPG_SIGNING_KEY_PASSPHRASE: ${{ secrets.GPG_SIGNING_KEY_PASSPHRASE }}
- name: Publish to CloudSmith - name: Publish to CloudSmith
run: sh cli/upload_to_cloudsmith.sh run: sh cli/upload_to_cloudsmith.sh
env: env:
CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }} CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}
INFISICAL_CLI_S3_BUCKET: ${{ secrets.INFISICAL_CLI_S3_BUCKET }}
INFISICAL_CLI_REPO_SIGNING_KEY_ID: ${{ secrets.INFISICAL_CLI_REPO_SIGNING_KEY_ID }}
AWS_ACCESS_KEY_ID: ${{ secrets.INFISICAL_CLI_REPO_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.INFISICAL_CLI_REPO_AWS_SECRET_ACCESS_KEY }}

View File

@@ -16,23 +16,23 @@ monorepo:
dir: cli dir: cli
builds: builds:
- id: darwin-build # - id: darwin-build
binary: infisical # binary: infisical
ldflags: # ldflags:
- -X github.com/Infisical/infisical-merge/packages/util.CLI_VERSION={{ .Version }} # - -X github.com/Infisical/infisical-merge/packages/util.CLI_VERSION={{ .Version }}
- -X github.com/Infisical/infisical-merge/packages/telemetry.POSTHOG_API_KEY_FOR_CLI={{ .Env.POSTHOG_API_KEY_FOR_CLI }} # - -X github.com/Infisical/infisical-merge/packages/telemetry.POSTHOG_API_KEY_FOR_CLI={{ .Env.POSTHOG_API_KEY_FOR_CLI }}
flags: # flags:
- -trimpath # - -trimpath
env: # env:
- CGO_ENABLED=1 # - CGO_ENABLED=1
- CC=/home/runner/work/osxcross/target/bin/o64-clang # - CC=/home/runner/work/osxcross/target/bin/o64-clang
- CXX=/home/runner/work/osxcross/target/bin/o64-clang++ # - CXX=/home/runner/work/osxcross/target/bin/o64-clang++
goos: # goos:
- darwin # - darwin
ignore: # ignore:
- goos: darwin # - goos: darwin
goarch: "386" # goarch: "386"
dir: ./cli # dir: ./cli
- id: all-other-builds - id: all-other-builds
env: env:
@@ -44,11 +44,11 @@ builds:
flags: flags:
- -trimpath - -trimpath
goos: goos:
- freebsd # - freebsd
- linux - linux
- netbsd # - netbsd
- openbsd # - openbsd
- windows # - windows
goarch: goarch:
- "386" - "386"
- amd64 - amd64
@@ -75,8 +75,10 @@ archives:
- ../completions/* - ../completions/*
release: release:
replace_existing_draft: true # replace_existing_draft: true
mode: "replace" # mode: "replace"
disable: true
skip_upload: true
checksum: checksum:
name_template: "checksums.txt" name_template: "checksums.txt"
@@ -91,39 +93,39 @@ snapshot:
# dir: "{{ dir .ArtifactPath }}" # dir: "{{ dir .ArtifactPath }}"
# cmd: curl -F package=@{{ .ArtifactName }} https://{{ .Env.FURY_TOKEN }}@push.fury.io/infisical/ # cmd: curl -F package=@{{ .ArtifactName }} https://{{ .Env.FURY_TOKEN }}@push.fury.io/infisical/
brews: # brews:
- name: infisical # - name: infisical
tap: # tap:
owner: Infisical # owner: Infisical
name: homebrew-get-cli # name: homebrew-get-cli
commit_author: # commit_author:
name: "Infisical" # name: "Infisical"
email: ai@infisical.com # email: ai@infisical.com
folder: Formula # folder: Formula
homepage: "https://infisical.com" # homepage: "https://infisical.com"
description: "The official Infisical CLI" # description: "The official Infisical CLI"
install: |- # install: |-
bin.install "infisical" # bin.install "infisical"
bash_completion.install "completions/infisical.bash" => "infisical" # bash_completion.install "completions/infisical.bash" => "infisical"
zsh_completion.install "completions/infisical.zsh" => "_infisical" # zsh_completion.install "completions/infisical.zsh" => "_infisical"
fish_completion.install "completions/infisical.fish" # fish_completion.install "completions/infisical.fish"
man1.install "manpages/infisical.1.gz" # man1.install "manpages/infisical.1.gz"
- name: "infisical@{{.Version}}" # - name: "infisical@{{.Version}}"
tap: # tap:
owner: Infisical # owner: Infisical
name: homebrew-get-cli # name: homebrew-get-cli
commit_author: # commit_author:
name: "Infisical" # name: "Infisical"
email: ai@infisical.com # email: ai@infisical.com
folder: Formula # folder: Formula
homepage: "https://infisical.com" # homepage: "https://infisical.com"
description: "The official Infisical CLI" # description: "The official Infisical CLI"
install: |- # install: |-
bin.install "infisical" # bin.install "infisical"
bash_completion.install "completions/infisical.bash" => "infisical" # bash_completion.install "completions/infisical.bash" => "infisical"
zsh_completion.install "completions/infisical.zsh" => "_infisical" # zsh_completion.install "completions/infisical.zsh" => "_infisical"
fish_completion.install "completions/infisical.fish" # fish_completion.install "completions/infisical.fish"
man1.install "manpages/infisical.1.gz" # man1.install "manpages/infisical.1.gz"
nfpms: nfpms:
- id: infisical - id: infisical
@@ -136,10 +138,10 @@ nfpms:
description: The offical Infisical CLI description: The offical Infisical CLI
license: MIT license: MIT
formats: formats:
- rpm # - rpm
- deb - deb
- apk # - apk
- archlinux # - archlinux
bindir: /usr/bin bindir: /usr/bin
contents: contents:
- src: ./completions/infisical.bash - src: ./completions/infisical.bash
@@ -151,73 +153,73 @@ nfpms:
- src: ./manpages/infisical.1.gz - src: ./manpages/infisical.1.gz
dst: /usr/share/man/man1/infisical.1.gz dst: /usr/share/man/man1/infisical.1.gz
scoop: # scoop:
bucket: # bucket:
owner: Infisical # owner: Infisical
name: scoop-infisical # name: scoop-infisical
commit_author: # commit_author:
name: "Infisical" # name: "Infisical"
email: ai@infisical.com # email: ai@infisical.com
homepage: "https://infisical.com" # homepage: "https://infisical.com"
description: "The official Infisical CLI" # description: "The official Infisical CLI"
license: MIT # license: MIT
aurs: # aurs:
- name: infisical-bin # - name: infisical-bin
homepage: "https://infisical.com" # homepage: "https://infisical.com"
description: "The official Infisical CLI" # description: "The official Infisical CLI"
maintainers: # maintainers:
- Infisical, Inc <support@infisical.com> # - Infisical, Inc <support@infisical.com>
license: MIT # license: MIT
private_key: "{{ .Env.AUR_KEY }}" # private_key: "{{ .Env.AUR_KEY }}"
git_url: "ssh://aur@aur.archlinux.org/infisical-bin.git" # git_url: "ssh://aur@aur.archlinux.org/infisical-bin.git"
package: |- # package: |-
# bin # # bin
install -Dm755 "./infisical" "${pkgdir}/usr/bin/infisical" # install -Dm755 "./infisical" "${pkgdir}/usr/bin/infisical"
# license # # license
install -Dm644 "./LICENSE" "${pkgdir}/usr/share/licenses/infisical/LICENSE" # install -Dm644 "./LICENSE" "${pkgdir}/usr/share/licenses/infisical/LICENSE"
# completions # # completions
mkdir -p "${pkgdir}/usr/share/bash-completion/completions/" # mkdir -p "${pkgdir}/usr/share/bash-completion/completions/"
mkdir -p "${pkgdir}/usr/share/zsh/site-functions/" # mkdir -p "${pkgdir}/usr/share/zsh/site-functions/"
mkdir -p "${pkgdir}/usr/share/fish/vendor_completions.d/" # mkdir -p "${pkgdir}/usr/share/fish/vendor_completions.d/"
install -Dm644 "./completions/infisical.bash" "${pkgdir}/usr/share/bash-completion/completions/infisical" # install -Dm644 "./completions/infisical.bash" "${pkgdir}/usr/share/bash-completion/completions/infisical"
install -Dm644 "./completions/infisical.zsh" "${pkgdir}/usr/share/zsh/site-functions/_infisical" # install -Dm644 "./completions/infisical.zsh" "${pkgdir}/usr/share/zsh/site-functions/_infisical"
install -Dm644 "./completions/infisical.fish" "${pkgdir}/usr/share/fish/vendor_completions.d/infisical.fish" # install -Dm644 "./completions/infisical.fish" "${pkgdir}/usr/share/fish/vendor_completions.d/infisical.fish"
# man pages # # man pages
install -Dm644 "./manpages/infisical.1.gz" "${pkgdir}/usr/share/man/man1/infisical.1.gz" # install -Dm644 "./manpages/infisical.1.gz" "${pkgdir}/usr/share/man/man1/infisical.1.gz"
dockers: # dockers:
- dockerfile: docker/alpine # - dockerfile: docker/alpine
goos: linux # goos: linux
goarch: amd64 # goarch: amd64
use: buildx # use: buildx
ids: # ids:
- all-other-builds # - all-other-builds
image_templates: # image_templates:
- "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-amd64" # - "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-amd64"
- "infisical/cli:latest-amd64" # - "infisical/cli:latest-amd64"
build_flag_templates: # build_flag_templates:
- "--pull" # - "--pull"
- "--platform=linux/amd64" # - "--platform=linux/amd64"
- dockerfile: docker/alpine # - dockerfile: docker/alpine
goos: linux # goos: linux
goarch: amd64 # goarch: amd64
use: buildx # use: buildx
ids: # ids:
- all-other-builds # - all-other-builds
image_templates: # image_templates:
- "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-arm64" # - "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-arm64"
- "infisical/cli:latest-arm64" # - "infisical/cli:latest-arm64"
build_flag_templates: # build_flag_templates:
- "--pull" # - "--pull"
- "--platform=linux/arm64" # - "--platform=linux/arm64"
docker_manifests: # docker_manifests:
- name_template: "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}" # - name_template: "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}"
image_templates: # image_templates:
- "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-amd64" # - "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-amd64"
- "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-arm64" # - "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-arm64"
- name_template: "infisical/cli:latest" # - name_template: "infisical/cli:latest"
image_templates: # image_templates:
- "infisical/cli:latest-amd64" # - "infisical/cli:latest-amd64"
- "infisical/cli:latest-arm64" # - "infisical/cli:latest-arm64"

View File

@@ -50,6 +50,7 @@ func init() {
config.INFISICAL_URL = util.AppendAPIEndpoint(config.INFISICAL_URL) config.INFISICAL_URL = util.AppendAPIEndpoint(config.INFISICAL_URL)
util.DisplayAptInstallationChangeBanner(silent)
if !util.IsRunningInDocker() && !silent { if !util.IsRunningInDocker() && !silent {
util.CheckForUpdate() util.CheckForUpdate()
} }

View File

@@ -53,6 +53,25 @@ func CheckForUpdate() {
} }
} }
func DisplayAptInstallationChangeBanner(isSilent bool) {
if isSilent {
return
}
if runtime.GOOS == "linux" {
_, err := exec.LookPath("apt-get")
isApt := err == nil
if isApt {
yellow := color.New(color.FgYellow).SprintFunc()
msg := fmt.Sprintf("%s",
yellow("Update Required: Your current package installation script is outdated and will no longer receive updates.\nPlease update to the new installation script which can be found here https://infisical.com/docs/cli/overview#installation debian section\n"),
)
fmt.Fprintln(os.Stderr, msg)
}
}
}
func getLatestTag(repoOwner string, repoName string) (string, string, error) { func getLatestTag(repoOwner string, repoName string) (string, string, error) {
url := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", repoOwner, repoName) url := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", repoOwner, repoName)
resp, err := http.Get(url) resp, err := http.Get(url)

View File

@@ -0,0 +1,551 @@
#!/usr/bin/env bash
#
# The core commands execute start from the "MAIN" section below.
#
test -z "$BASH_SOURCE" && {
self="sudo -E bash"
prefix="<curl command> |"
} || {
self=$(readlink -f ${BASH_SOURCE:-$0})
prefix=""
}
tmp_log=$(mktemp .s3_setup_XXXXXXXXX)
# Environment variables that can be set
PKG_URL=${PKG_URL:-"https://artifacts-cli.infisical.com"}
PKG_PATH=${PKG_PATH:-"deb"}
PACKAGE_NAME=${PACKAGE_NAME:-"infisical"}
GPG_KEY_URL=${GPG_KEY_URL:-"${PKG_URL}/infisical.gpg"}
colours=$(tput colors 2>/dev/null || echo "256")
no_colour="\e[39;49m"
green_colour="\e[32m"
red_colour="\e[41;97m"
bold="\e[1m"
reset="\e[0m"
use_colours=$(test -n "$colours" && test $colours -ge 8 && echo "yes")
test "$use_colours" == "yes" || {
no_colour=""
green_colour=""
red_colour=""
bold=""
reset=""
}
example_name="Ubuntu/Focal (20.04)"
example_distro="ubuntu"
example_codename="focal"
example_version="20.04"
function echo_helptext {
local help_text="$*"
echo " ^^^^: ... $help_text"
}
function die {
local text="$@"
test ! -z "$text" && {
echo_helptext "$text" 1>&2
}
local prefix="${red_colour} !!!!${no_colour}"
echo -e "$prefix: Oh no, your setup failed! :-( ... But we might be able to help. :-)"
echo -e "$prefix: "
echo -e "$prefix: ${bold}Please check your S3 bucket configuration and try again.${reset}"
echo -e "$prefix: "
test -f "$tmp_log" && {
local n=20
echo -e "$prefix: Last $n log lines from $tmp_log (might not be errors, nor even relevant):"
echo -e "$prefix:"
check_tool_silent "xargs" && {
check_tool_silent "fmt" && {
tail -n $n $tmp_log | fmt -t | xargs -Ilog echo -e "$prefix: > log"
} || {
tail -n $n $tmp_log | xargs -Ilog echo -e "$prefix: > log"
}
} || {
echo
tail -n $n $tmp_log
}
}
exit 1
}
function echo_colour {
local colour="${1:-"no"}_colour"; shift
echo -e "${!colour}$@${no_colour}"
}
function echo_green_or_red {
local rc="$1"
local good="${2:-YES}"
local bad="${3:-NO}"
test "$rc" -eq 0 && {
echo_colour "green" "$good"
} || {
echo_colour "red" "$bad"
}
return $rc
}
function echo_clearline {
local rc="$?"
echo -e -n "\033[1K\r"
return $rc
}
function echo_status {
local rc="$1"
local good="$2"
local bad="$3"
local text="$4"
local help_text="$5"
local newline=$(test "$6" != "no" && echo "\n" || echo "")
local status_text=$(echo_green_or_red "$rc" "$good" "$bad")
echo_clearline
local width=$(test "$use_colours" == "yes" && echo "16" || echo "5")
printf "%${width}s %s${newline}" "${status_text}:" "$text"
test $rc -ne 0 && test ! -z "$help_text" && {
echo_helptext "$help_text"
echo
}
return $rc
}
function echo_running {
local rc=$?
local text="$1"
echo_status 0 " RUN" " RUN" "$text" "" "no"
return $rc
}
function echo_okfail_rc {
local rc=$1
local text="$2"
local help_text="$3"
echo_clearline
echo_status $rc " OK" " NOPE" "$text" "$help_text"
return $rc
}
function echo_okfail {
echo_okfail_rc $? "$@"
return $?
}
function check_tool_silent {
local tool=${1}
command -v $tool &>/dev/null || which $tool &>/dev/null
return $?
}
function check_tool {
local tool=${1}
local optional=${2:-false}
local required_text="optional"
if ! $optional; then required_text="required"; fi
local text="Checking for $required_text executable '$tool' ..."
echo_running "$text"
check_tool_silent "$tool"
echo_okfail "$text" || {
if ! $optional; then
die "$tool is not installed, but is required by this script."
fi
return 1
}
return 0
}
function cleanup {
echo
rm -rf $tmp_log
}
function shutdown {
echo_colour "red" " !!!!: Operation cancelled by user!"
exit 2
}
function check_os {
test ! -z "$distro" && test ! -z "${version}${codename}"
return $?
}
function detect_os_system {
check_os && return 0
echo_running "$text"
local text="Detecting your OS distribution and release using system methods ..."
local tool_rc=1
test -f '/etc/os-release' && {
. /etc/os-release
distro=${distro:-$ID}
codename=${codename:-$VERSION_CODENAME}
codename=${codename:-$(echo $VERSION | cut -d '(' -f 2 | cut -d ')' -f 1)}
version=${version:-$VERSION_ID}
test -z "${version}${codename}" && test -f '/etc/debian_version' && {
# Workaround for Debian unstable releases; get the codename from debian_version
codename=$(cat /etc/debian_version | cut -d '/' -f1)
}
tool_rc=0
}
check_os
local rc=$?
echo_okfail_rc $rc "$text"
test $tool_rc -eq 0 && {
report_os_expanded
}
return $rc
}
function report_os_attribute {
local name=$1
local value=$2
local coloured=""
echo -n "$name="
test -z "$value" && {
echo -e -n "${red_colour}<empty>${no_colour} "
} || {
echo -e -n "${green_colour}${value}${no_colour} "
}
}
function report_os_expanded {
echo_helptext "Detected/provided for your OS/distribution, version and architecture:"
echo " >>>>:"
report_os_values
}
function report_os_values {
echo -n " >>>>: ... "
report_os_attribute "distro" $distro
report_os_attribute "codename" "stable (fixed)"
report_os_attribute "arch" $arch
echo
echo " >>>>:"
}
function detect_os_legacy_python {
check_os && return 0
local text="Detecting your OS distribution and release using legacy python ..."
echo_running "$text"
IFS='' read -r -d '' script <<-'EOF'
from __future__ import unicode_literals, print_function
import platform;
info = platform.linux_distribution() or ('', '', '');
for key, value in zip(('distro', 'version', 'codename'), info):
print("local guess_%s=\"%s\"\n" % (key, value.lower().replace(' ', '')));
EOF
local tool_rc=1
check_tool_silent "python" && {
eval $(python -c "$script")
distro=${distro:-$guess_distro}
codename=${codename:-$guess_codename}
version=${version:-$guess_version}
tool_rc=$?
}
check_os
local rc=$?
echo_okfail_rc $rc "$text"
check_tool_silent "python" || {
echo_helptext "Python isn't available, so skipping detection method (hint: install python)"
}
test $tool_rc -eq 0 && {
report_os
}
return $rc
}
function detect_os_modern_python {
check_os && return 0
check_tool_silent "python" && {
local text="Ensuring python-pip is installed ..."
echo_running "$text"
check_tool_silent "pip"
echo_okfail "$text" || {
local text="Checking if pip can be bootstrapped without get-pip ..."
echo_running "$text"
python -m ensurepip --default-pip &>$tmp_log
echo_okfail "$text" || {
local text="Installing pip via get-pip bootstrap ..."
echo_running "$text"
curl -1sLf https://bootstrap.pypa.io/get-pip.py 2>$tmp/log | python &>$tmp_log
echo_okfail "$text" || die "Failed to install pip!"
}
}
local text="Installing 'distro' python library ..."
echo_running "$text"
python -c 'import distro' &>$tmp_log || python -m pip install distro &>$tmp_log
echo_okfail "$text" || die "Failed to install required 'distro' python library!"
}
IFS='' read -r -d '' script <<-'EOF'
from __future__ import unicode_literals, print_function
import distro;
info = distro.linux_distribution(full_distribution_name=False) or ('', '', '');
for key, value in zip(('distro', 'version', 'codename'), info):
print("local guess_%s=\"%s\"\n" % (key, value.lower().replace(' ', '')));
EOF
local text="Detecting your OS distribution and release using modern python ..."
echo_running "$text"
local tool_rc=1
check_tool_silent "python" && {
eval $(python -c "$script")
distro=${distro:-$guess_distro}
codename=${codename:-$guess_codename}
version=${version:-$guess_version}
tool_rc=$?
}
check_os
local rc=$?
echo_okfail_rc $rc "$text"
check_tool_silent "python" || {
echo_helptext "Python isn't available, so skipping detection method (hint: install python)"
}
test $tool_rc -eq 0 && {
report_os_expanded
}
return $rc
}
function detect_os {
# Backwards compat for old distribution parameter names
distro=${distro:-$os}
# Always use "stable" as the codename
codename="stable"
arch=${arch:-$(arch || uname -m)}
# Only detect OS if not manually specified
if [ -z "$distro" ]; then
detect_os_system ||
detect_os_legacy_python ||
detect_os_modern_python
fi
# Always ensure we have a distro
(test -z "$distro") && {
echo_okfail_rc "1" "Unable to detect your OS distribution!"
cat <<EOF
>>>>:
>>>>: The 'distro' value is required. Without it, the install script
>>>>: cannot retrieve the correct configuration for this system.
>>>>:
>>>>: You can force this script to use a particular value by specifying distro
>>>>: via environment variable. E.g., to specify a distro
>>>>: such as $example_name, use the following:
>>>>:
>>>>: $prefix distro=$example_distro $self
>>>>:
EOF
die
}
}
function create_repo_config {
if [ -z "$PKG_PATH" ]; then
repo_url="${PKG_URL}"
else
repo_url="${PKG_URL}/${PKG_PATH}"
fi
# Create configuration with GPG key verification
local gpg_keyring_path="/usr/share/keyrings/${PACKAGE_NAME}-archive-keyring.gpg"
local apt_conf=$(cat <<EOF
deb [arch=$(dpkg --print-architecture) signed-by=${gpg_keyring_path}] ${repo_url} stable main
EOF
)
echo "$apt_conf"
return 0
}
function check_gpg_key {
local text="Checking if GPG key is accessible at ${GPG_KEY_URL} ..."
echo_running "$text"
local code="$(curl -1IsL -w "%{http_code}\\n" "$GPG_KEY_URL" -o /dev/null --connect-timeout 15 --max-time 60)"
test "$code" == "200" && {
echo_okfail_rc 0 "$text"
return 0
} || {
echo_okfail_rc 1 "$text"
echo_helptext "Failed to access the GPG key. Please check that it exists in your S3 bucket."
cat <<EOF
>>>>:
>>>>: It looks like we can't access the GPG key at ${GPG_KEY_URL}
>>>>:
EOF
die
}
}
function check_dpkg_tool {
local tool=${1}
local required=${2:-true}
local install=${3:-true}
local text="Checking for apt dependency '$tool' ..."
echo_running "$text"
dpkg -l | grep "$tool\>" &>$tmp_log
echo_okfail "$text" || {
if $install; then
test "$apt_updated" == "yes" || update_apt
local text="Attempting to install '$tool' ..."
echo_running "$text"
apt-get install -y "$tool" &>$tmp_log
echo_okfail "$text" || {
if $required; then
die "Could not install '$tool', check your permissions, etc."
fi
}
else {
if $required; then
die "$tool is not installed, but is required by this script."
fi
}
fi
}
return 0
}
function update_apt {
local text="Updating apt repository metadata cache ..."
local tmp_log=$(mktemp .s3_deb_output_XXXXXXXXX.log)
echo_running "$text"
apt-get update &>$tmp_log
echo_okfail "$text" || {
echo_colour "red" "Failed to update via apt-get update"
cat $tmp_log
rm -rf $tmp_log
die "Failed to update via apt-get update - Context above (maybe no packages?)."
}
rm -rf $tmp_log
apt_updated="yes"
}
function install_apt_prereqs {
# Debian-archive-keyring has to be installed for apt-transport-https.
test "${distro}" == "debian" && {
check_dpkg_tool "debian-keyring"
check_dpkg_tool "debian-archive-keyring"
}
check_dpkg_tool "apt-transport-https"
check_dpkg_tool "ca-certificates" false
check_dpkg_tool "gnupg"
}
function import_gpg_key {
local text="Importing '$PACKAGE_NAME' repository GPG key from S3 ..."
echo_running "$text"
local gpg_keyring_path="/usr/share/keyrings/${PACKAGE_NAME}-archive-keyring.gpg"
# Check if GPG key is accessible
check_gpg_key
# Download and import GPG key
curl -1sLf "${GPG_KEY_URL}" | gpg --dearmor > $gpg_keyring_path
chmod 644 $gpg_keyring_path
# Check for older apt versions that don't support signed-by
local signed_by_version="1.1"
local detected_version=$(dpkg -s apt | grep Version | cut -d' ' -f2)
if [ "$(printf "%s\n" $detected_version $signed_by_version | sort -V | head -n 1)" != "$signed_by_version" ]; then
echo_helptext "Detected older apt version without signed-by support. Copying key to trusted.gpg.d."
cp ${gpg_keyring_path} /etc/apt/trusted.gpg.d/${PACKAGE_NAME}.gpg
chmod 644 /etc/apt/trusted.gpg.d/${PACKAGE_NAME}.gpg
fi
echo_okfail "$text" || die "Could not import the GPG key for this repository"
}
function setup_repository {
local repo_path="/etc/apt/sources.list.d/${PACKAGE_NAME}.list"
local text="Installing '$PACKAGE_NAME' repository via apt ..."
echo_running "$text"
create_repo_config > "$repo_path"
chmod 644 $repo_path
echo_okfail "$text" || die "Could not install the repository, do you have permissions?"
}
function usage () {
cat <<EOF
Usage: $self [opts]
-h Displays this usage text.
-i Ignore repository setup errors during setup and
continue with install. This will leave the repository config
in place rather than removing it upon errors.
-p Package name to use for repository setup (default: ${PACKAGE_NAME})
-k GPG key URL (default: ${GPG_KEY_URL})
EOF
exit 0
}
trap cleanup EXIT
trap shutdown INT
ignore_errors=1
apt_updated="no"
while getopts ":ihp:b:s:k:" OPT; do
case $OPT in
i) ignore_errors=0 ;;
h) usage ;;
p) PACKAGE_NAME=$OPTARG ;;
b) PKG_URL=$OPTARG ;;
s) PKG_PATH=$OPTARG ;;
k) GPG_KEY_URL=$OPTARG ;;
\?) usage ;;
esac
done
shift $(($OPTIND - 1))
#
# MAIN
#
echo "Executing the setup script for the '$PACKAGE_NAME' S3 repository ..."
echo
check_tool "curl"
check_tool "apt-get"
detect_os
install_apt_prereqs
import_gpg_key
setup_repository
update_apt
echo_okfail_rc "0" "The repository has been installed successfully - You're ready to rock!"
echo
echo "You can now install the package with: apt install $PACKAGE_NAME"

View File

@@ -1,15 +1,21 @@
cd dist cd dist
for i in *.apk; do # for i in *.apk; do
[ -f "$i" ] || break # [ -f "$i" ] || break
cloudsmith push alpine --republish infisical/infisical-cli/alpine/any-version $i # cloudsmith push alpine --republish infisical/infisical-cli/alpine/any-version $i
done # done
# for i in *.deb; do
# [ -f "$i" ] || break
# cloudsmith push deb --republish infisical/infisical-cli/any-distro/any-version $i
# done
for i in *.deb; do for i in *.deb; do
[ -f "$i" ] || break [ -f "$i" ] || break
cloudsmith push deb --republish infisical/infisical-cli/any-distro/any-version $i deb-s3 upload --bucket=$INFISICAL_CLI_S3_BUCKET --prefix=deb --visibility=private --sign=$INFISICAL_CLI_REPO_SIGNING_KEY_ID --preserve-versions $i
done done
for i in *.rpm; do
[ -f "$i" ] || break # for i in *.rpm; do
cloudsmith push rpm --republish infisical/infisical-cli/any-distro/any-version $i # [ -f "$i" ] || break
done # cloudsmith push rpm --republish infisical/infisical-cli/any-distro/any-version $i
# done

View File

@@ -264,6 +264,7 @@ The available authentication methods are `universalAuth`, `kubernetesAuth`, `aws
- `credentialsRef.secretName`: The name of the Kubernetes secret. - `credentialsRef.secretName`: The name of the Kubernetes secret.
- `credentialsRef.secretNamespace`: The namespace of the Kubernetes secret. - `credentialsRef.secretNamespace`: The namespace of the Kubernetes secret.
Example: Example:
```yaml ```yaml
@@ -296,6 +297,9 @@ The available authentication methods are `universalAuth`, `kubernetesAuth`, `aws
- `serviceAccountRef`: The name and namespace of the service account that will be used to authenticate with Infisical. - `serviceAccountRef`: The name and namespace of the service account that will be used to authenticate with Infisical.
- `serviceAccountRef.name`: The name of the service account. - `serviceAccountRef.name`: The name of the service account.
- `serviceAccountRef.namespace`: The namespace of the service account. - `serviceAccountRef.namespace`: The namespace of the service account.
- `autoCreateServiceAccountToken`: If set to `true`, the operator will automatically create a short-lived service account token on-demand for the service account. Defaults to `false`.
- `serviceAccountTokenAudiences`: Optionally specify audience for the service account token. This field is only relevant if you have set `autoCreateServiceAccountToken` to `true`. No audience is specified by default.
Example: Example:
@@ -303,6 +307,9 @@ The available authentication methods are `universalAuth`, `kubernetesAuth`, `aws
spec: spec:
kubernetesAuth: kubernetesAuth:
identityId: <machine-identity-id> identityId: <machine-identity-id>
autoCreateServiceAccountToken: true # Automatically creates short-lived service account tokens for the service account.
serviceAccountTokenAudiences:
- <audience> # Optionally specify audience for the service account token. No audience is specified by default.
serviceAccountRef: serviceAccountRef:
name: <secret-name> name: <secret-name>
namespace: <secret-namespace> namespace: <secret-namespace>

View File

@@ -291,6 +291,8 @@ After applying the InfisicalPushSecret CRD, you should notice that the secrets y
- `serviceAccountRef`: The name and namespace of the service account that will be used to authenticate with Infisical. - `serviceAccountRef`: The name and namespace of the service account that will be used to authenticate with Infisical.
- `serviceAccountRef.name`: The name of the service account. - `serviceAccountRef.name`: The name of the service account.
- `serviceAccountRef.namespace`: The namespace of the service account. - `serviceAccountRef.namespace`: The namespace of the service account.
- `autoCreateServiceAccountToken`: If set to `true`, the operator will automatically create a short-lived service account token on-demand for the service account. Defaults to `false`.
- `serviceAccountTokenAudiences`: Optionally specify audience for the service account token. This field is only relevant if you have set `autoCreateServiceAccountToken` to `true`. No audience is specified by default.
Example: Example:
@@ -298,6 +300,9 @@ After applying the InfisicalPushSecret CRD, you should notice that the secrets y
spec: spec:
kubernetesAuth: kubernetesAuth:
identityId: <machine-identity-id> identityId: <machine-identity-id>
autoCreateServiceAccountToken: true # Automatically creates short-lived service account tokens for the service account.
serviceAccountTokenAudiences:
- <audience> # Optionally specify audience for the service account token. No audience is specified by default.
serviceAccountRef: serviceAccountRef:
name: <secret-name> name: <secret-name>
namespace: <secret-namespace> namespace: <secret-namespace>

View File

@@ -156,30 +156,34 @@ spec:
<Accordion title="authentication.kubernetesAuth"> <Accordion title="authentication.kubernetesAuth">
The Kubernetes machine identity authentication method is used to authenticate with Infisical. The identity ID is stored in a field in the InfisicalSecret resource. This authentication method can only be used within a Kubernetes environment. The Kubernetes machine identity authentication method is used to authenticate with Infisical. The identity ID is stored in a field in the InfisicalSecret resource. This authentication method can only be used within a Kubernetes environment.
<Tabs>
<Tab title="Short-lived service account tokens (Recommended)">
Short-lived service account tokens are automatically created by the operator and are valid only for a short period of time. This is the recommended approach for using Kubernetes auth in the Infisical Secrets Operator.
<Steps> <Steps>
<Step title="Obtaining the token reviewer JWT for Infisical"> <Step title="Obtaining the token reviewer JWT for Infisical">
1.1. Start by creating a service account in your Kubernetes cluster that will be used by Infisical to authenticate with the Kubernetes API Server. **1.1.** Start by creating a reviewer service account in your Kubernetes cluster that will be used by Infisical to authenticate with the Kubernetes API Server.
```yaml infisical-service-account.yaml ```yaml infisical-reviewer-service-account.yaml
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: infisical-auth name: infisical-token-reviewer
namespace: default namespace: default
``` ```
``` ```bash
kubectl apply -f infisical-service-account.yaml kubectl apply -f infisical-reviewer-service-account.yaml
``` ```
1.2. Bind the service account to the `system:auth-delegator` cluster role. As described [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#other-component-roles), this role allows delegated authentication and authorization checks, specifically for Infisical to access the [TokenReview API](https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-review-v1/). You can apply the following configuration file: **1.2.** Bind the reviewer service account to the `system:auth-delegator` cluster role. As described [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#other-component-roles), this role allows delegated authentication and authorization checks, specifically for Infisical to access the [TokenReview API](https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-review-v1/). You can apply the following configuration file:
```yaml cluster-role-binding.yaml ```yaml infisical-reviewer-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
name: role-tokenreview-binding name: infisical-token-reviewer-role-binding
namespace: default namespace: default
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
@@ -187,45 +191,44 @@ spec:
name: system:auth-delegator name: system:auth-delegator
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: infisical-auth name: infisical-token-reviewer
namespace: default namespace: default
``` ```
``` ```bash
kubectl apply -f cluster-role-binding.yaml kubectl apply -f infisical-reviewer-cluster-role-binding.yaml
``` ```
1.3. Next, create a long-lived service account JWT token (i.e. the token reviewer JWT token) for the service account using this configuration file for a new `Secret` resource: **1.3.** Next, create a long-lived service account JWT token (i.e. the token reviewer JWT token) for the service account using this configuration file for a new `Secret` resource:
```yaml service-account-token.yaml ```yaml service-account-reviewer-token.yaml
apiVersion: v1 apiVersion: v1
kind: Secret kind: Secret
type: kubernetes.io/service-account-token type: kubernetes.io/service-account-token
metadata: metadata:
name: infisical-auth-token name: infisical-token-reviewer-token
annotations: annotations:
kubernetes.io/service-account.name: "infisical-auth" kubernetes.io/service-account.name: "infisical-token-reviewer"
``` ```
```
kubectl apply -f service-account-token.yaml
```
1.4. Link the secret in step 1.3 to the service account in step 1.1:
```bash ```bash
kubectl patch serviceaccount infisical-auth -p '{"secrets": [{"name": "infisical-auth-token"}]}' -n default kubectl apply -f service-account-reviewer-token.yaml
``` ```
1.5. Finally, retrieve the token reviewer JWT token from the secret. **1.4.** Link the secret in step 1.3 to the service account in step 1.1:
```bash ```bash
kubectl get secret infisical-auth-token -n default -o=jsonpath='{.data.token}' | base64 --decode kubectl patch serviceaccount infisical-token-reviewer -p '{"secrets": [{"name": "infisical-token-reviewer-token"}]}' -n default
```
**1.5.** Finally, retrieve the token reviewer JWT token from the secret.
```bash
kubectl get secret infisical-token-reviewer-token -n default -o=jsonpath='{.data.token}' | base64 --decode
``` ```
Keep this JWT token handy as you will need it for the **Token Reviewer JWT** field when configuring the Kubernetes Auth authentication method for the identity in step 2. Keep this JWT token handy as you will need it for the **Token Reviewer JWT** field when configuring the Kubernetes Auth authentication method for the identity in step 2.
</Step> </Step>
<Step title="Creating an identity"> <Step title="Creating an identity">
@@ -264,6 +267,221 @@ spec:
![identities project create](/images/platform/identities/identities-project-create.png) ![identities project create](/images/platform/identities/identities-project-create.png)
</Step> </Step>
<Step title="Create a new Kubernetes service account to authenticate with Infisical">
You have already created the reviewer service account in step **1.1**. Now, create a new Kubernetes service account that will be used to authenticate with Infisical.
This service account will create short-lived tokens that will be used to authenticate with Infisical. The operator itself will handle the creation of these tokens automatically.
```yaml infisical-service-account.yaml
kind: ServiceAccount
apiVersion: v1
metadata:
name: infisical-service-account
```
```bash
kubectl apply -f infisical-service-account.yaml -n default
```
</Step>
<Step title="Add your identity ID & service account to your InfisicalSecret resource">
Once you have created your machine identity and added it to your project(s), you will need to add the identity ID to your InfisicalSecret resource.
In the `authentication.kubernetesAuth.identityId` field, add the identity ID of the machine identity you created.
See the example below for more details.
</Step>
<Step title="Add your Kubernetes service account token to the InfisicalSecret resource">
Add the service account details from the previous steps under `authentication.kubernetesAuth.serviceAccountRef`.
Here you will need to enter the name and namespace of the service account.
The example below shows a complete InfisicalSecret resource with all required fields defined.
Make sure you set `authentication.kubernetesAuth.autoCreateServiceAccountToken` to `true` to automatically create short-lived service account tokens for the service account.
</Step>
</Steps>
<Info>
Make sure to also populate the `secretsScope` field with the project slug
_`projectSlug`_, environment slug _`envSlug`_, and secrets path
_`secretsPath`_ that you want to fetch secrets from. Please see the example
below.
</Info>
## Example
```yaml example-kubernetes-auth.yaml
apiVersion: secrets.infisical.com/v1alpha1
kind: InfisicalSecret
metadata:
name: infisicalsecret-sample-crd
spec:
authentication:
kubernetesAuth:
identityId: <machine-identity-id>
autoCreateServiceAccountToken: true # Automatically creates short-lived service account tokens for the service account.
serviceAccountTokenAudiences:
- <audience> # Optionally specify audience for the service account token. No audience is specified by default.
serviceAccountRef:
name: infisical-service-account # The service account we just created in the previous step.
namespace: <service-account-namespace>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
...
```
</Tab>
<Tab title="Manual long-lived service account tokens">
Manual long-lived service account tokens are manually created by the user and are valid indefinitely unless deleted or rotated. In most cases, you should be using the automatic short-lived service account tokens as they are more secure and easier to use.
<Steps>
<Step title="Obtaining the token reviewer JWT for Infisical">
**1.1.** Start by creating a reviewer service account in your Kubernetes cluster that will be used by Infisical to authenticate with the Kubernetes API Server.
```yaml infisical-reviewer-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: infisical-token-reviewer
namespace: default
```
```bash
kubectl apply -f infisical-reviewer-service-account.yaml
```
**1.2.** Bind the reviewer service account to the `system:auth-delegator` cluster role. As described [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#other-component-roles), this role allows delegated authentication and authorization checks, specifically for Infisical to access the [TokenReview API](https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-review-v1/). You can apply the following configuration file:
```yaml infisical-reviewer-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: infisical-token-reviewer-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: infisical-token-reviewer
namespace: default
```
```bash
kubectl apply -f infisical-reviewer-cluster-role-binding.yaml
```
**1.3.** Next, create a long-lived service account JWT token (i.e. the token reviewer JWT token) for the service account using this configuration file for a new `Secret` resource:
```yaml service-account-reviewer-token.yaml
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: infisical-token-reviewer-token
annotations:
kubernetes.io/service-account.name: "infisical-token-reviewer"
```
```bash
kubectl apply -f service-account-reviewer-token.yaml
```
**1.4.** Link the secret in step 1.3 to the service account in step 1.1:
```bash
kubectl patch serviceaccount infisical-token-reviewer -p '{"secrets": [{"name": "infisical-token-reviewer-token"}]}' -n default
```
**1.5.** Finally, retrieve the token reviewer JWT token from the secret.
```bash
kubectl get secret infisical-token-reviewer-token -n default -o=jsonpath='{.data.token}' | base64 --decode
```
Keep this JWT token handy as you will need it for the **Token Reviewer JWT** field when configuring the Kubernetes Auth authentication method for the identity in step 2.
</Step>
<Step title="Creating an identity">
To create an identity, head to your Organization Settings > Access Control > Machine Identities and press **Create identity**.
![identities organization](/images/platform/identities/identities-org.png)
When creating an identity, you specify an organization level [role](/documentation/platform/role-based-access-controls) for it to assume; you can configure roles in Organization Settings > Access Control > Organization Roles.
![identities organization create](/images/platform/identities/identities-org-create.png)
Now input a few details for your new identity. Here's some guidance for each field:
- Name (required): A friendly name for the identity.
- Role (required): A role from the **Organization Roles** tab for the identity to assume. The organization role assigned will determine what organization level resources this identity can have access to.
Once you've created an identity, you'll be prompted to configure the authentication method for it. Here, select **Kubernetes Auth**.
<Info>
To learn more about each field of the Kubernetes native authentication method, see step 2 of [guide](/documentation/platform/identities/kubernetes-auth#guide).
</Info>
![identities organization create auth method](/images/platform/identities/identities-org-create-kubernetes-auth-method.png)
</Step>
<Step title="Adding an identity to a project">
To allow the operator to use the given identity to access secrets, you will need to add the identity to project(s) that you would like to grant it access to.
To do this, head over to the project you want to add the identity to and go to Project Settings > Access Control > Machine Identities and press **Add identity**.
Next, select the identity you want to add to the project and the project level role you want to allow it to assume. The project role assigned will determine what project level resources this identity can have access to.
![identities project](/images/platform/identities/identities-project.png)
![identities project create](/images/platform/identities/identities-project-create.png)
</Step>
<Step title="Create a new Kubernetes service account to authenticate with Infisical">
You have already created the reviewer service account in step **1.1**. Now, create a new Kubernetes service account that will be used to authenticate with Infisical.
```yaml infisical-service-account.yaml
kind: ServiceAccount
apiVersion: v1
metadata:
name: infisical-service-account
```
```bash
kubectl apply -f infisical-service-account.yaml -n default
```
</Step>
<Step title="Create a service account token for the Kubernetes service account">
Create a service account token for the newly created Kubernetes service account from the previous step.
```yaml infisical-service-account-token.yaml
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: infisical-service-account-token
annotations:
kubernetes.io/service-account.name: "infisical-service-account"
```
```bash
kubectl apply -f infisical-service-account-token.yaml -n default
```
Patch the service account with the newly created service account token.
```bash
kubectl patch serviceaccount infisical-service-account -p '{"secrets": [{"name": "infisical-service-account-token"}]}' -n default
```
</Step>
<Step title="Add your identity ID & service account to your InfisicalSecret resource"> <Step title="Add your identity ID & service account to your InfisicalSecret resource">
Once you have created your machine identity and added it to your project(s), you will need to add the identity ID to your InfisicalSecret resource. Once you have created your machine identity and added it to your project(s), you will need to add the identity ID to your InfisicalSecret resource.
In the `authentication.kubernetesAuth.identityId` field, add the identity ID of the machine identity you created. In the `authentication.kubernetesAuth.identityId` field, add the identity ID of the machine identity you created.
@@ -274,7 +492,6 @@ spec:
Here you will need to enter the name and namespace of the service account. Here you will need to enter the name and namespace of the service account.
The example below shows a complete InfisicalSecret resource with all required fields defined. The example below shows a complete InfisicalSecret resource with all required fields defined.
</Step> </Step>
</Steps> </Steps>
<Info> <Info>
@@ -296,7 +513,7 @@ spec:
kubernetesAuth: kubernetesAuth:
identityId: <machine-identity-id> identityId: <machine-identity-id>
serviceAccountRef: serviceAccountRef:
name: <service-account-name> name: infisical-service-account # The service account we just created in the previous step. (*not* the reviewer service account)
namespace: <service-account-namespace> namespace: <service-account-namespace>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample. # secretsScope is identical to the secrets scope in the universalAuth field in this sample.
@@ -308,6 +525,52 @@ spec:
... ...
``` ```
</Tab>
</Tabs>
</Accordion> </Accordion>
<Accordion title="authentication.awsIamAuth"> <Accordion title="authentication.awsIamAuth">

View File

@@ -13,9 +13,9 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: v0.9.0 version: v0.9.1
# This is the version number of the application being deployed. This version number should be # This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "v0.9.0" appVersion: "v0.9.1"

View File

@@ -74,6 +74,13 @@ spec:
type: object type: object
kubernetesAuth: kubernetesAuth:
properties: properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set to
`true`, the operator will automatically create a service account
token for the configured service account. This field is recommended
in most cases.
type: boolean
identityId: identityId:
type: string type: string
serviceAccountRef: serviceAccountRef:
@@ -86,6 +93,13 @@ spec:
- name - name
- namespace - namespace
type: object type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account token.
This is only relevant if `autoCreateServiceAccountToken` is
true.
items:
type: string
type: array
required: required:
- identityId - identityId
- serviceAccountRef - serviceAccountRef

View File

@@ -74,6 +74,13 @@ spec:
type: object type: object
kubernetesAuth: kubernetesAuth:
properties: properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set to
`true`, the operator will automatically create a service account
token for the configured service account. This field is recommended
in most cases.
type: boolean
identityId: identityId:
type: string type: string
serviceAccountRef: serviceAccountRef:
@@ -86,6 +93,13 @@ spec:
- name - name
- namespace - namespace
type: object type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account token.
This is only relevant if `autoCreateServiceAccountToken` is
true.
items:
type: string
type: array
required: required:
- identityId - identityId
- serviceAccountRef - serviceAccountRef

View File

@@ -137,6 +137,12 @@ spec:
type: object type: object
kubernetesAuth: kubernetesAuth:
properties: properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set to
`true`, the operator will automatically create a service account
token for the configured service account.
type: boolean
identityId: identityId:
type: string type: string
secretsScope: secretsScope:
@@ -164,6 +170,13 @@ spec:
- name - name
- namespace - namespace
type: object type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account token.
This is only relevant if `autoCreateServiceAccountToken` is
true.
items:
type: string
type: array
required: required:
- identityId - identityId
- secretsScope - secretsScope

View File

@@ -23,6 +23,13 @@ rules:
- list - list
- update - update
- watch - watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@@ -42,6 +49,12 @@ rules:
- get - get
- list - list
- watch - watch
- apiGroups:
- ""
resources:
- serviceaccounts/token
verbs:
- create
- apiGroups: - apiGroups:
- apps - apps
resources: resources:
@@ -62,6 +75,12 @@ rules:
- list - list
- update - update
- watch - watch
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups: - apiGroups:
- secrets.infisical.com - secrets.infisical.com
resources: resources:

View File

@@ -32,7 +32,7 @@ controllerManager:
- ALL - ALL
image: image:
repository: infisical/kubernetes-operator repository: infisical/kubernetes-operator
tag: v0.9.0 tag: v0.9.1
resources: resources:
limits: limits:
cpu: 500m cpu: 500m

View File

@@ -24,3 +24,6 @@ Dockerfile.cross
*.swp *.swp
*.swo *.swo
*~ *~
# Testing directories
auto-token

View File

@@ -49,6 +49,14 @@ type GenericKubernetesAuth struct {
IdentityID string `json:"identityId"` IdentityID string `json:"identityId"`
// +kubebuilder:validation:Required // +kubebuilder:validation:Required
ServiceAccountRef KubernetesServiceAccountRef `json:"serviceAccountRef"` ServiceAccountRef KubernetesServiceAccountRef `json:"serviceAccountRef"`
// Optionally automatically create a service account token for the configured service account.
// If this is set to `true`, the operator will automatically create a service account token for the configured service account. This field is recommended in most cases.
// +kubebuilder:validation:Optional
AutoCreateServiceAccountToken bool `json:"autoCreateServiceAccountToken"`
// The audiences to use for the service account token. This is only relevant if `autoCreateServiceAccountToken` is true.
// +kubebuilder:validation:Optional
ServiceAccountTokenAudiences []string `json:"serviceAccountTokenAudiences"`
} }
type TLSConfig struct { type TLSConfig struct {

View File

@@ -38,6 +38,14 @@ type KubernetesAuthDetails struct {
// +kubebuilder:validation:Required // +kubebuilder:validation:Required
SecretsScope MachineIdentityScopeInWorkspace `json:"secretsScope"` SecretsScope MachineIdentityScopeInWorkspace `json:"secretsScope"`
// Optionally automatically create a service account token for the configured service account.
// If this is set to `true`, the operator will automatically create a service account token for the configured service account.
// +kubebuilder:validation:Optional
AutoCreateServiceAccountToken bool `json:"autoCreateServiceAccountToken"`
// The audiences to use for the service account token. This is only relevant if `autoCreateServiceAccountToken` is true.
// +kubebuilder:validation:Optional
ServiceAccountTokenAudiences []string `json:"serviceAccountTokenAudiences"`
} }
type KubernetesServiceAccountRef struct { type KubernetesServiceAccountRef struct {

View File

@@ -48,7 +48,7 @@ func (in *Authentication) DeepCopyInto(out *Authentication) {
out.ServiceAccount = in.ServiceAccount out.ServiceAccount = in.ServiceAccount
out.ServiceToken = in.ServiceToken out.ServiceToken = in.ServiceToken
out.UniversalAuth = in.UniversalAuth out.UniversalAuth = in.UniversalAuth
out.KubernetesAuth = in.KubernetesAuth in.KubernetesAuth.DeepCopyInto(&out.KubernetesAuth)
out.AwsIamAuth = in.AwsIamAuth out.AwsIamAuth = in.AwsIamAuth
out.AzureAuth = in.AzureAuth out.AzureAuth = in.AzureAuth
out.GcpIdTokenAuth = in.GcpIdTokenAuth out.GcpIdTokenAuth = in.GcpIdTokenAuth
@@ -207,7 +207,7 @@ func (in *GenericGcpIdTokenAuth) DeepCopy() *GenericGcpIdTokenAuth {
func (in *GenericInfisicalAuthentication) DeepCopyInto(out *GenericInfisicalAuthentication) { func (in *GenericInfisicalAuthentication) DeepCopyInto(out *GenericInfisicalAuthentication) {
*out = *in *out = *in
out.UniversalAuth = in.UniversalAuth out.UniversalAuth = in.UniversalAuth
out.KubernetesAuth = in.KubernetesAuth in.KubernetesAuth.DeepCopyInto(&out.KubernetesAuth)
out.AwsIamAuth = in.AwsIamAuth out.AwsIamAuth = in.AwsIamAuth
out.AzureAuth = in.AzureAuth out.AzureAuth = in.AzureAuth
out.GcpIdTokenAuth = in.GcpIdTokenAuth out.GcpIdTokenAuth = in.GcpIdTokenAuth
@@ -228,6 +228,11 @@ func (in *GenericInfisicalAuthentication) DeepCopy() *GenericInfisicalAuthentica
func (in *GenericKubernetesAuth) DeepCopyInto(out *GenericKubernetesAuth) { func (in *GenericKubernetesAuth) DeepCopyInto(out *GenericKubernetesAuth) {
*out = *in *out = *in
out.ServiceAccountRef = in.ServiceAccountRef out.ServiceAccountRef = in.ServiceAccountRef
if in.ServiceAccountTokenAudiences != nil {
in, out := &in.ServiceAccountTokenAudiences, &out.ServiceAccountTokenAudiences
*out = make([]string, len(*in))
copy(*out, *in)
}
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericKubernetesAuth. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericKubernetesAuth.
@@ -336,7 +341,7 @@ func (in *InfisicalDynamicSecretList) DeepCopyObject() runtime.Object {
func (in *InfisicalDynamicSecretSpec) DeepCopyInto(out *InfisicalDynamicSecretSpec) { func (in *InfisicalDynamicSecretSpec) DeepCopyInto(out *InfisicalDynamicSecretSpec) {
*out = *in *out = *in
in.ManagedSecretReference.DeepCopyInto(&out.ManagedSecretReference) in.ManagedSecretReference.DeepCopyInto(&out.ManagedSecretReference)
out.Authentication = in.Authentication in.Authentication.DeepCopyInto(&out.Authentication)
out.DynamicSecret = in.DynamicSecret out.DynamicSecret = in.DynamicSecret
out.TLS = in.TLS out.TLS = in.TLS
} }
@@ -476,7 +481,7 @@ func (in *InfisicalPushSecretSecretSource) DeepCopy() *InfisicalPushSecretSecret
func (in *InfisicalPushSecretSpec) DeepCopyInto(out *InfisicalPushSecretSpec) { func (in *InfisicalPushSecretSpec) DeepCopyInto(out *InfisicalPushSecretSpec) {
*out = *in *out = *in
out.Destination = in.Destination out.Destination = in.Destination
out.Authentication = in.Authentication in.Authentication.DeepCopyInto(&out.Authentication)
in.Push.DeepCopyInto(&out.Push) in.Push.DeepCopyInto(&out.Push)
out.TLS = in.TLS out.TLS = in.TLS
} }
@@ -583,7 +588,7 @@ func (in *InfisicalSecretList) DeepCopyObject() runtime.Object {
func (in *InfisicalSecretSpec) DeepCopyInto(out *InfisicalSecretSpec) { func (in *InfisicalSecretSpec) DeepCopyInto(out *InfisicalSecretSpec) {
*out = *in *out = *in
out.TokenSecretReference = in.TokenSecretReference out.TokenSecretReference = in.TokenSecretReference
out.Authentication = in.Authentication in.Authentication.DeepCopyInto(&out.Authentication)
in.ManagedSecretReference.DeepCopyInto(&out.ManagedSecretReference) in.ManagedSecretReference.DeepCopyInto(&out.ManagedSecretReference)
if in.ManagedKubeSecretReferences != nil { if in.ManagedKubeSecretReferences != nil {
in, out := &in.ManagedKubeSecretReferences, &out.ManagedKubeSecretReferences in, out := &in.ManagedKubeSecretReferences, &out.ManagedKubeSecretReferences
@@ -654,6 +659,11 @@ func (in *KubernetesAuthDetails) DeepCopyInto(out *KubernetesAuthDetails) {
*out = *in *out = *in
out.ServiceAccountRef = in.ServiceAccountRef out.ServiceAccountRef = in.ServiceAccountRef
out.SecretsScope = in.SecretsScope out.SecretsScope = in.SecretsScope
if in.ServiceAccountTokenAudiences != nil {
in, out := &in.ServiceAccountTokenAudiences, &out.ServiceAccountTokenAudiences
*out = make([]string, len(*in))
copy(*out, *in)
}
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesAuthDetails. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesAuthDetails.

View File

@@ -73,6 +73,13 @@ spec:
type: object type: object
kubernetesAuth: kubernetesAuth:
properties: properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set
to `true`, the operator will automatically create a service
account token for the configured service account. This field
is recommended in most cases.
type: boolean
identityId: identityId:
type: string type: string
serviceAccountRef: serviceAccountRef:
@@ -85,6 +92,13 @@ spec:
- name - name
- namespace - namespace
type: object type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account
token. This is only relevant if `autoCreateServiceAccountToken`
is true.
items:
type: string
type: array
required: required:
- identityId - identityId
- serviceAccountRef - serviceAccountRef

View File

@@ -73,6 +73,13 @@ spec:
type: object type: object
kubernetesAuth: kubernetesAuth:
properties: properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set
to `true`, the operator will automatically create a service
account token for the configured service account. This field
is recommended in most cases.
type: boolean
identityId: identityId:
type: string type: string
serviceAccountRef: serviceAccountRef:
@@ -85,6 +92,13 @@ spec:
- name - name
- namespace - namespace
type: object type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account
token. This is only relevant if `autoCreateServiceAccountToken`
is true.
items:
type: string
type: array
required: required:
- identityId - identityId
- serviceAccountRef - serviceAccountRef

View File

@@ -136,6 +136,12 @@ spec:
type: object type: object
kubernetesAuth: kubernetesAuth:
properties: properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set
to `true`, the operator will automatically create a service
account token for the configured service account.
type: boolean
identityId: identityId:
type: string type: string
secretsScope: secretsScope:
@@ -163,6 +169,13 @@ spec:
- name - name
- namespace - namespace
type: object type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account
token. This is only relevant if `autoCreateServiceAccountToken`
is true.
items:
type: string
type: array
required: required:
- identityId - identityId
- secretsScope - secretsScope

View File

@@ -16,6 +16,13 @@ rules:
- list - list
- update - update
- watch - watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@@ -35,6 +42,12 @@ rules:
- get - get
- list - list
- watch - watch
- apiGroups:
- ""
resources:
- serviceaccounts/token
verbs:
- create
- apiGroups: - apiGroups:
- apps - apps
resources: resources:
@@ -55,6 +68,12 @@ rules:
- list - list
- update - update
- watch - watch
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups: - apiGroups:
- secrets.infisical.com - secrets.infisical.com
resources: resources:

View File

@@ -45,6 +45,9 @@ func (r *InfisicalDynamicSecretReconciler) GetLogger(req ctrl.Request) logr.Logg
// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete // +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=list;watch;get;update // +kubebuilder:rbac:groups=apps,resources=deployments,verbs=list;watch;get;update
// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list
//+kubebuilder:rbac:groups="authentication.k8s.io",resources=tokenreviews,verbs=create
//+kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create
func (r *InfisicalDynamicSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { func (r *InfisicalDynamicSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {

View File

@@ -48,6 +48,9 @@ func (r *InfisicalPushSecretReconciler) GetLogger(req ctrl.Request) logr.Logger
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete //+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete
//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=list;watch;get;update //+kubebuilder:rbac:groups=apps,resources=deployments,verbs=list;watch;get;update
//+kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch //+kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list
//+kubebuilder:rbac:groups="authentication.k8s.io",resources=tokenreviews,verbs=create
//+kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create
// Reconcile is part of the main kubernetes reconciliation loop which aims to // Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state. // move the current state of the cluster closer to the desired state.

View File

@@ -44,6 +44,9 @@ func (r *InfisicalSecretReconciler) GetLogger(req ctrl.Request) logr.Logger {
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete //+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete
//+kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;statefulsets,verbs=list;watch;get;update //+kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;statefulsets,verbs=list;watch;get;update
//+kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch //+kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list
//+kubebuilder:rbac:groups="authentication.k8s.io",resources=tokenreviews,verbs=create
//+kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create
// Reconcile is part of the main kubernetes reconciliation loop which aims to // Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state. // move the current state of the cluster closer to the desired state.

View File

@@ -8,12 +8,51 @@ import (
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
authenticationv1 "k8s.io/api/authentication/v1"
"github.com/Infisical/infisical/k8-operator/api/v1alpha1" "github.com/Infisical/infisical/k8-operator/api/v1alpha1"
"github.com/aws/smithy-go/ptr"
infisicalSdk "github.com/infisical/go-sdk" infisicalSdk "github.com/infisical/go-sdk"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
) )
func GetServiceAccountToken(k8sClient client.Client, namespace string, serviceAccountName string) (string, error) { func GetServiceAccountToken(k8sClient client.Client, namespace string, serviceAccountName string, autoCreateServiceAccountToken bool, serviceAccountTokenAudiences []string) (string, error) {
if autoCreateServiceAccountToken {
restClient, err := GetRestClientFromClient()
if err != nil {
return "", fmt.Errorf("failed to get REST client: %w", err)
}
tokenRequest := &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
ExpirationSeconds: ptr.Int64(600), // 10 minutes. the token only needs to be valid for when we do the initial k8s login.
},
}
if len(serviceAccountTokenAudiences) > 0 {
// Conditionally add the audiences if they are specified.
// Failing to do this causes a default audience to be used, which is not what we want if the user doesn't specify any.
tokenRequest.Spec.Audiences = serviceAccountTokenAudiences
}
result := &authenticationv1.TokenRequest{}
err = restClient.
Post().
Namespace(namespace).
Resource("serviceaccounts").
Name(serviceAccountName).
SubResource("token").
Body(tokenRequest).
Do(context.Background()).
Into(result)
if err != nil {
return "", fmt.Errorf("failed to create token: %w", err)
}
return result.Status.Token, nil
}
serviceAccount := &corev1.ServiceAccount{} serviceAccount := &corev1.ServiceAccount{}
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: serviceAccountName, Namespace: namespace}, serviceAccount) err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: serviceAccountName, Namespace: namespace}, serviceAccount)
@@ -173,6 +212,8 @@ func HandleKubernetesAuth(ctx context.Context, reconcilerClient client.Client, s
Name: infisicalPushSecret.Spec.Authentication.KubernetesAuth.ServiceAccountRef.Name, Name: infisicalPushSecret.Spec.Authentication.KubernetesAuth.ServiceAccountRef.Name,
}, },
SecretsScope: v1alpha1.MachineIdentityScopeInWorkspace{}, SecretsScope: v1alpha1.MachineIdentityScopeInWorkspace{},
AutoCreateServiceAccountToken: infisicalPushSecret.Spec.Authentication.KubernetesAuth.AutoCreateServiceAccountToken,
ServiceAccountTokenAudiences: infisicalPushSecret.Spec.Authentication.KubernetesAuth.ServiceAccountTokenAudiences,
} }
case SecretCrd.INFISICAL_DYNAMIC_SECRET: case SecretCrd.INFISICAL_DYNAMIC_SECRET:
@@ -189,6 +230,8 @@ func HandleKubernetesAuth(ctx context.Context, reconcilerClient client.Client, s
Name: infisicalDynamicSecret.Spec.Authentication.KubernetesAuth.ServiceAccountRef.Name, Name: infisicalDynamicSecret.Spec.Authentication.KubernetesAuth.ServiceAccountRef.Name,
}, },
SecretsScope: v1alpha1.MachineIdentityScopeInWorkspace{}, SecretsScope: v1alpha1.MachineIdentityScopeInWorkspace{},
AutoCreateServiceAccountToken: infisicalDynamicSecret.Spec.Authentication.KubernetesAuth.AutoCreateServiceAccountToken,
ServiceAccountTokenAudiences: infisicalDynamicSecret.Spec.Authentication.KubernetesAuth.ServiceAccountTokenAudiences,
} }
} }
@@ -196,7 +239,14 @@ func HandleKubernetesAuth(ctx context.Context, reconcilerClient client.Client, s
return AuthenticationDetails{}, ErrAuthNotApplicable return AuthenticationDetails{}, ErrAuthNotApplicable
} }
serviceAccountToken, err := GetServiceAccountToken(reconcilerClient, kubernetesAuthSpec.ServiceAccountRef.Namespace, kubernetesAuthSpec.ServiceAccountRef.Name) serviceAccountToken, err := GetServiceAccountToken(
reconcilerClient,
kubernetesAuthSpec.ServiceAccountRef.Namespace,
kubernetesAuthSpec.ServiceAccountRef.Name,
kubernetesAuthSpec.AutoCreateServiceAccountToken,
kubernetesAuthSpec.ServiceAccountTokenAudiences,
)
if err != nil { if err != nil {
return AuthenticationDetails{}, fmt.Errorf("unable to get service account token [err=%s]", err) return AuthenticationDetails{}, fmt.Errorf("unable to get service account token [err=%s]", err)
} }

View File

@@ -9,6 +9,9 @@ import (
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
k8Errors "k8s.io/apimachinery/pkg/api/errors" k8Errors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
) )
@@ -58,3 +61,32 @@ func GetInfisicalUniversalAuthFromKubeSecret(ctx context.Context, reconcilerClie
return model.MachineIdentityDetails{ClientId: string(clientIdFromSecret), ClientSecret: string(clientSecretFromSecret)}, nil return model.MachineIdentityDetails{ClientId: string(clientIdFromSecret), ClientSecret: string(clientSecretFromSecret)}, nil
} }
func getKubeClusterConfig() (*rest.Config, error) {
config, err := rest.InClusterConfig()
if err != nil {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
configOverrides := &clientcmd.ConfigOverrides{}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
return kubeConfig.ClientConfig()
}
return config, nil
}
func GetRestClientFromClient() (rest.Interface, error) {
config, err := getKubeClusterConfig()
if err != nil {
return nil, err
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return clientset.CoreV1().RESTClient(), nil
}