Compare commits

...

19 Commits

Author SHA1 Message Date
e35135e4e3 test 2025-04-08 17:08:56 -04:00
c95dd69167 test upload 2025-04-08 16:58:35 -04:00
a50b8120fd Merge pull request #3378 from akhilmhdh/fix/doc-p-access-image
feat: updated ruby action
2025-04-08 16:21:06 -04:00
=
f1ee53d417 feat: updated ruby action 2025-04-09 01:49:35 +05:30
229ad79f49 Merge pull request #3377 from akhilmhdh/fix/doc-p-access-image
feat: added passphrase
2025-04-08 15:56:34 -04:00
=
d7dbd01ecf feat: banner respect silent 2025-04-09 01:24:38 +05:30
=
026fd21fd4 feat: added passphrase 2025-04-09 01:05:31 +05:30
9b9c1a52b3 Merge pull request #3376 from akhilmhdh/fix/doc-p-access-image
feat: added s3 deb pipeline
2025-04-08 15:05:32 -04:00
98aa424e2e Update .github/workflows/release_build_infisical_cli.yml
Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com>
2025-04-08 15:02:47 -04:00
=
2cd5df1ab3 feat: updated message 2025-04-09 00:30:48 +05:30
e0d863e06e Merge pull request #3375 from Infisical/helm-update-v0.9.1
Update Helm chart to version v0.9.1
2025-04-08 22:52:42 +04:00
=
d991af557b feat: added s3 deb pipeline 2025-04-09 00:22:00 +05:30
ae54d04357 Update Helm chart to version v0.9.1 2025-04-08 18:51:31 +00:00
fa590ba697 Merge pull request #3348 from Infisical/daniel/k8s-auto-reviewer-token
feat(k8s): k8s auth automatic service account token creation
2025-04-08 22:45:57 +04:00
1da2896bb0 Create codeql.yml 2025-04-07 21:00:43 -04:00
423a2f38ea Merge pull request #3371 from Infisical/misc/add-missing-version-filter
misc: add missing version filter in get secret by name
2025-04-08 02:55:21 +08:00
3f190426fe fix: added docs for operator managed service account tokens & made audience optional 2025-04-04 03:15:11 +04:00
3d072c2f48 feat(k8s): automatic service account token creation for k8s auth 2025-04-01 23:39:22 +04:00
82b828c10e feat(k8s): automatic service account token creation for k8s auth 2025-04-01 23:16:38 +04:00
29 changed files with 1595 additions and 384 deletions

102
.github/workflows/codeql.yml vendored Normal file
View File

@ -0,0 +1,102 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL Advanced"
on:
push:
branches: [ "main", "development" ]
pull_request:
branches: [ "main", "development" ]
schedule:
- cron: '33 7 * * 3'
jobs:
analyze:
name: Analyze (${{ matrix.language }})
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners (GitHub.com only)
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
permissions:
# required for all workflows
security-events: write
# required to fetch internal or private CodeQL packs
packages: read
# only required for workflows in private repositories
actions: read
contents: read
strategy:
fail-fast: false
matrix:
include:
- language: actions
build-mode: none
- language: go
build-mode: autobuild
- language: javascript-typescript
build-mode: none
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
# Use `c-cpp` to analyze code written in C, C++ or both
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Add any setup steps before running the `github/codeql-action/init` action.
# This includes steps like installing compilers or runtimes (`actions/setup-node`
# or others). This is typically only required for manual builds.
# - name: Setup runtime (example)
# uses: actions/setup-example@v1
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# If the analyze step fails for one of the languages you are analyzing with
# "We were unable to automatically build your code", modify the matrix above
# to set the build mode to "manual" for that language. Then modify this step
# to build your code.
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- if: matrix.build-mode == 'manual'
shell: bash
run: |
echo 'If you are using a "manual" build mode for one or more of the' \
'languages you are analyzing, replace this with the commands to build' \
'your code, for example:'
echo ' make bootstrap'
echo ' make release'
exit 1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

View File

@ -1,132 +1,147 @@
name: Build and release CLI
on:
workflow_dispatch:
workflow_dispatch:
push:
# run only against tags
tags:
- "infisical-cli/v*.*.*"
push:
# run only against tags
tags:
- "infisical-cli/v*.*.*"
permissions:
contents: write
contents: write
jobs:
cli-integration-tests:
name: Run tests before deployment
uses: ./.github/workflows/run-cli-tests.yml
secrets:
CLI_TESTS_UA_CLIENT_ID: ${{ secrets.CLI_TESTS_UA_CLIENT_ID }}
CLI_TESTS_UA_CLIENT_SECRET: ${{ secrets.CLI_TESTS_UA_CLIENT_SECRET }}
CLI_TESTS_SERVICE_TOKEN: ${{ secrets.CLI_TESTS_SERVICE_TOKEN }}
CLI_TESTS_PROJECT_ID: ${{ secrets.CLI_TESTS_PROJECT_ID }}
CLI_TESTS_ENV_SLUG: ${{ secrets.CLI_TESTS_ENV_SLUG }}
CLI_TESTS_USER_EMAIL: ${{ secrets.CLI_TESTS_USER_EMAIL }}
CLI_TESTS_USER_PASSWORD: ${{ secrets.CLI_TESTS_USER_PASSWORD }}
CLI_TESTS_INFISICAL_VAULT_FILE_PASSPHRASE: ${{ secrets.CLI_TESTS_INFISICAL_VAULT_FILE_PASSPHRASE }}
# cli-integration-tests:
# name: Run tests before deployment
# uses: ./.github/workflows/run-cli-tests.yml
# secrets:
# CLI_TESTS_UA_CLIENT_ID: ${{ secrets.CLI_TESTS_UA_CLIENT_ID }}
# CLI_TESTS_UA_CLIENT_SECRET: ${{ secrets.CLI_TESTS_UA_CLIENT_SECRET }}
# CLI_TESTS_SERVICE_TOKEN: ${{ secrets.CLI_TESTS_SERVICE_TOKEN }}
# CLI_TESTS_PROJECT_ID: ${{ secrets.CLI_TESTS_PROJECT_ID }}
# CLI_TESTS_ENV_SLUG: ${{ secrets.CLI_TESTS_ENV_SLUG }}
# CLI_TESTS_USER_EMAIL: ${{ secrets.CLI_TESTS_USER_EMAIL }}
# CLI_TESTS_USER_PASSWORD: ${{ secrets.CLI_TESTS_USER_PASSWORD }}
# CLI_TESTS_INFISICAL_VAULT_FILE_PASSPHRASE: ${{ secrets.CLI_TESTS_INFISICAL_VAULT_FILE_PASSPHRASE }}
npm-release:
runs-on: ubuntu-latest
# npm-release:
# runs-on: ubuntu-latest
# env:
# working-directory: ./npm
# needs:
# - cli-integration-tests
# - goreleaser
# steps:
# - uses: actions/checkout@v3
# with:
# fetch-depth: 0
# - name: Extract version
# run: |
# VERSION=$(echo ${{ github.ref_name }} | sed 's/infisical-cli\/v//')
# echo "Version extracted: $VERSION"
# echo "CLI_VERSION=$VERSION" >> $GITHUB_ENV
# - name: Print version
# run: echo ${{ env.CLI_VERSION }}
# - name: Setup Node
# uses: actions/setup-node@8f152de45cc393bb48ce5d89d36b731f54556e65 # v4.0.0
# with:
# node-version: 20
# cache: "npm"
# cache-dependency-path: ./npm/package-lock.json
# - name: Install dependencies
# working-directory: ${{ env.working-directory }}
# run: npm install --ignore-scripts
# - name: Set NPM version
# working-directory: ${{ env.working-directory }}
# run: npm version ${{ env.CLI_VERSION }} --allow-same-version --no-git-tag-version
# - name: Setup NPM
# working-directory: ${{ env.working-directory }}
# run: |
# echo 'registry="https://registry.npmjs.org/"' > ./.npmrc
# echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ./.npmrc
# echo 'registry="https://registry.npmjs.org/"' > ~/.npmrc
# echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc
# env:
# NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
# - name: Pack NPM
# working-directory: ${{ env.working-directory }}
# run: npm pack
# - name: Publish NPM
# working-directory: ${{ env.working-directory }}
# run: npm publish --tarball=./infisical-sdk-${{github.ref_name}} --access public --registry=https://registry.npmjs.org/
# env:
# NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
# NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
goreleaser:
runs-on: ubuntu-latest
needs: [cli-integration-tests]
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: 🐋 Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: 🔧 Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- run: git fetch --force --tags
- run: echo "Ref name ${{github.ref_name}}"
- uses: actions/setup-go@v3
with:
go-version: ">=1.19.3"
cache: true
cache-dependency-path: cli/go.sum
- name: Setup for libssl1.0-dev
run: |
echo 'deb http://security.ubuntu.com/ubuntu bionic-security main' | sudo tee -a /etc/apt/sources.list
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32
sudo apt update
sudo apt-get install -y libssl1.0-dev
- name: OSXCross for CGO Support
run: |
mkdir ../../osxcross
git clone https://github.com/plentico/osxcross-target.git ../../osxcross/target
- uses: goreleaser/goreleaser-action@v4
with:
distribution: goreleaser-pro
version: v1.26.2-pro
args: release --clean
env:
working-directory: ./npm
needs:
- cli-integration-tests
- goreleaser
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Extract version
run: |
VERSION=$(echo ${{ github.ref_name }} | sed 's/infisical-cli\/v//')
echo "Version extracted: $VERSION"
echo "CLI_VERSION=$VERSION" >> $GITHUB_ENV
- name: Print version
run: echo ${{ env.CLI_VERSION }}
- name: Setup Node
uses: actions/setup-node@8f152de45cc393bb48ce5d89d36b731f54556e65 # v4.0.0
with:
node-version: 20
cache: "npm"
cache-dependency-path: ./npm/package-lock.json
- name: Install dependencies
working-directory: ${{ env.working-directory }}
run: npm install --ignore-scripts
- name: Set NPM version
working-directory: ${{ env.working-directory }}
run: npm version ${{ env.CLI_VERSION }} --allow-same-version --no-git-tag-version
- name: Setup NPM
working-directory: ${{ env.working-directory }}
run: |
echo 'registry="https://registry.npmjs.org/"' > ./.npmrc
echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ./.npmrc
echo 'registry="https://registry.npmjs.org/"' > ~/.npmrc
echo "//registry.npmjs.org/:_authToken=$NPM_TOKEN" >> ~/.npmrc
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- name: Pack NPM
working-directory: ${{ env.working-directory }}
run: npm pack
- name: Publish NPM
working-directory: ${{ env.working-directory }}
run: npm publish --tarball=./infisical-sdk-${{github.ref_name}} --access public --registry=https://registry.npmjs.org/
env:
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
goreleaser:
runs-on: ubuntu-latest
needs: [cli-integration-tests]
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: 🐋 Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: 🔧 Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- run: git fetch --force --tags
- run: echo "Ref name ${{github.ref_name}}"
- uses: actions/setup-go@v3
with:
go-version: ">=1.19.3"
cache: true
cache-dependency-path: cli/go.sum
- name: Setup for libssl1.0-dev
run: |
echo 'deb http://security.ubuntu.com/ubuntu bionic-security main' | sudo tee -a /etc/apt/sources.list
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 3B4FE6ACC0B21F32
sudo apt update
sudo apt-get install -y libssl1.0-dev
- name: OSXCross for CGO Support
run: |
mkdir ../../osxcross
git clone https://github.com/plentico/osxcross-target.git ../../osxcross/target
- uses: goreleaser/goreleaser-action@v4
with:
distribution: goreleaser-pro
version: v1.26.2-pro
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GO_RELEASER_GITHUB_TOKEN }}
POSTHOG_API_KEY_FOR_CLI: ${{ secrets.POSTHOG_API_KEY_FOR_CLI }}
FURY_TOKEN: ${{ secrets.FURYPUSHTOKEN }}
AUR_KEY: ${{ secrets.AUR_KEY }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
- uses: actions/setup-python@v4
- run: pip install --upgrade cloudsmith-cli
- name: Publish to CloudSmith
run: sh cli/upload_to_cloudsmith.sh
env:
CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}
GITHUB_TOKEN: ${{ secrets.GO_RELEASER_GITHUB_TOKEN }}
POSTHOG_API_KEY_FOR_CLI: ${{ secrets.POSTHOG_API_KEY_FOR_CLI }}
FURY_TOKEN: ${{ secrets.FURYPUSHTOKEN }}
AUR_KEY: ${{ secrets.AUR_KEY }}
GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
- uses: actions/setup-python@v4
- run: pip install --upgrade cloudsmith-cli
- uses: ruby/setup-ruby@354a1ad156761f5ee2b7b13fa8e09943a5e8d252
with:
ruby-version: "3.3" # Not needed with a .ruby-version, .tool-versions or mise.toml
bundler-cache: true # runs 'bundle install' and caches installed gems automatically
- name: Install deb-s3
run: gem install deb-s3
- name: Configure GPG Key
run: echo -n "$GPG_SIGNING_KEY" | base64 --decode | gpg --batch --passphrase "$GPG_SIGNING_KEY_PASSPHRASE" --import
env:
GPG_SIGNING_KEY: ${{ secrets.GPG_SIGNING_KEY }}
GPG_SIGNING_KEY_PASSPHRASE: ${{ secrets.GPG_SIGNING_KEY_PASSPHRASE }}
- name: Publish to CloudSmith
run: sh cli/upload_to_cloudsmith.sh
env:
CLOUDSMITH_API_KEY: ${{ secrets.CLOUDSMITH_API_KEY }}
INFISICAL_CLI_S3_BUCKET: ${{ secrets.INFISICAL_CLI_S3_BUCKET }}
INFISICAL_CLI_REPO_SIGNING_KEY_ID: ${{ secrets.INFISICAL_CLI_REPO_SIGNING_KEY_ID }}
AWS_ACCESS_KEY_ID: ${{ secrets.INFISICAL_CLI_REPO_AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.INFISICAL_CLI_REPO_AWS_SECRET_ACCESS_KEY }}

View File

@ -16,23 +16,23 @@ monorepo:
dir: cli
builds:
- id: darwin-build
binary: infisical
ldflags:
- -X github.com/Infisical/infisical-merge/packages/util.CLI_VERSION={{ .Version }}
- -X github.com/Infisical/infisical-merge/packages/telemetry.POSTHOG_API_KEY_FOR_CLI={{ .Env.POSTHOG_API_KEY_FOR_CLI }}
flags:
- -trimpath
env:
- CGO_ENABLED=1
- CC=/home/runner/work/osxcross/target/bin/o64-clang
- CXX=/home/runner/work/osxcross/target/bin/o64-clang++
goos:
- darwin
ignore:
- goos: darwin
goarch: "386"
dir: ./cli
# - id: darwin-build
# binary: infisical
# ldflags:
# - -X github.com/Infisical/infisical-merge/packages/util.CLI_VERSION={{ .Version }}
# - -X github.com/Infisical/infisical-merge/packages/telemetry.POSTHOG_API_KEY_FOR_CLI={{ .Env.POSTHOG_API_KEY_FOR_CLI }}
# flags:
# - -trimpath
# env:
# - CGO_ENABLED=1
# - CC=/home/runner/work/osxcross/target/bin/o64-clang
# - CXX=/home/runner/work/osxcross/target/bin/o64-clang++
# goos:
# - darwin
# ignore:
# - goos: darwin
# goarch: "386"
# dir: ./cli
- id: all-other-builds
env:
@ -44,11 +44,11 @@ builds:
flags:
- -trimpath
goos:
- freebsd
# - freebsd
- linux
- netbsd
- openbsd
- windows
# - netbsd
# - openbsd
# - windows
goarch:
- "386"
- amd64
@ -75,8 +75,10 @@ archives:
- ../completions/*
release:
replace_existing_draft: true
mode: "replace"
# replace_existing_draft: true
# mode: "replace"
disable: true
skip_upload: true
checksum:
name_template: "checksums.txt"
@ -91,39 +93,39 @@ snapshot:
# dir: "{{ dir .ArtifactPath }}"
# cmd: curl -F package=@{{ .ArtifactName }} https://{{ .Env.FURY_TOKEN }}@push.fury.io/infisical/
brews:
- name: infisical
tap:
owner: Infisical
name: homebrew-get-cli
commit_author:
name: "Infisical"
email: ai@infisical.com
folder: Formula
homepage: "https://infisical.com"
description: "The official Infisical CLI"
install: |-
bin.install "infisical"
bash_completion.install "completions/infisical.bash" => "infisical"
zsh_completion.install "completions/infisical.zsh" => "_infisical"
fish_completion.install "completions/infisical.fish"
man1.install "manpages/infisical.1.gz"
- name: "infisical@{{.Version}}"
tap:
owner: Infisical
name: homebrew-get-cli
commit_author:
name: "Infisical"
email: ai@infisical.com
folder: Formula
homepage: "https://infisical.com"
description: "The official Infisical CLI"
install: |-
bin.install "infisical"
bash_completion.install "completions/infisical.bash" => "infisical"
zsh_completion.install "completions/infisical.zsh" => "_infisical"
fish_completion.install "completions/infisical.fish"
man1.install "manpages/infisical.1.gz"
# brews:
# - name: infisical
# tap:
# owner: Infisical
# name: homebrew-get-cli
# commit_author:
# name: "Infisical"
# email: ai@infisical.com
# folder: Formula
# homepage: "https://infisical.com"
# description: "The official Infisical CLI"
# install: |-
# bin.install "infisical"
# bash_completion.install "completions/infisical.bash" => "infisical"
# zsh_completion.install "completions/infisical.zsh" => "_infisical"
# fish_completion.install "completions/infisical.fish"
# man1.install "manpages/infisical.1.gz"
# - name: "infisical@{{.Version}}"
# tap:
# owner: Infisical
# name: homebrew-get-cli
# commit_author:
# name: "Infisical"
# email: ai@infisical.com
# folder: Formula
# homepage: "https://infisical.com"
# description: "The official Infisical CLI"
# install: |-
# bin.install "infisical"
# bash_completion.install "completions/infisical.bash" => "infisical"
# zsh_completion.install "completions/infisical.zsh" => "_infisical"
# fish_completion.install "completions/infisical.fish"
# man1.install "manpages/infisical.1.gz"
nfpms:
- id: infisical
@ -136,10 +138,10 @@ nfpms:
description: The offical Infisical CLI
license: MIT
formats:
- rpm
# - rpm
- deb
- apk
- archlinux
# - apk
# - archlinux
bindir: /usr/bin
contents:
- src: ./completions/infisical.bash
@ -151,73 +153,73 @@ nfpms:
- src: ./manpages/infisical.1.gz
dst: /usr/share/man/man1/infisical.1.gz
scoop:
bucket:
owner: Infisical
name: scoop-infisical
commit_author:
name: "Infisical"
email: ai@infisical.com
homepage: "https://infisical.com"
description: "The official Infisical CLI"
license: MIT
# scoop:
# bucket:
# owner: Infisical
# name: scoop-infisical
# commit_author:
# name: "Infisical"
# email: ai@infisical.com
# homepage: "https://infisical.com"
# description: "The official Infisical CLI"
# license: MIT
aurs:
- name: infisical-bin
homepage: "https://infisical.com"
description: "The official Infisical CLI"
maintainers:
- Infisical, Inc <support@infisical.com>
license: MIT
private_key: "{{ .Env.AUR_KEY }}"
git_url: "ssh://aur@aur.archlinux.org/infisical-bin.git"
package: |-
# bin
install -Dm755 "./infisical" "${pkgdir}/usr/bin/infisical"
# license
install -Dm644 "./LICENSE" "${pkgdir}/usr/share/licenses/infisical/LICENSE"
# completions
mkdir -p "${pkgdir}/usr/share/bash-completion/completions/"
mkdir -p "${pkgdir}/usr/share/zsh/site-functions/"
mkdir -p "${pkgdir}/usr/share/fish/vendor_completions.d/"
install -Dm644 "./completions/infisical.bash" "${pkgdir}/usr/share/bash-completion/completions/infisical"
install -Dm644 "./completions/infisical.zsh" "${pkgdir}/usr/share/zsh/site-functions/_infisical"
install -Dm644 "./completions/infisical.fish" "${pkgdir}/usr/share/fish/vendor_completions.d/infisical.fish"
# man pages
install -Dm644 "./manpages/infisical.1.gz" "${pkgdir}/usr/share/man/man1/infisical.1.gz"
# aurs:
# - name: infisical-bin
# homepage: "https://infisical.com"
# description: "The official Infisical CLI"
# maintainers:
# - Infisical, Inc <support@infisical.com>
# license: MIT
# private_key: "{{ .Env.AUR_KEY }}"
# git_url: "ssh://aur@aur.archlinux.org/infisical-bin.git"
# package: |-
# # bin
# install -Dm755 "./infisical" "${pkgdir}/usr/bin/infisical"
# # license
# install -Dm644 "./LICENSE" "${pkgdir}/usr/share/licenses/infisical/LICENSE"
# # completions
# mkdir -p "${pkgdir}/usr/share/bash-completion/completions/"
# mkdir -p "${pkgdir}/usr/share/zsh/site-functions/"
# mkdir -p "${pkgdir}/usr/share/fish/vendor_completions.d/"
# install -Dm644 "./completions/infisical.bash" "${pkgdir}/usr/share/bash-completion/completions/infisical"
# install -Dm644 "./completions/infisical.zsh" "${pkgdir}/usr/share/zsh/site-functions/_infisical"
# install -Dm644 "./completions/infisical.fish" "${pkgdir}/usr/share/fish/vendor_completions.d/infisical.fish"
# # man pages
# install -Dm644 "./manpages/infisical.1.gz" "${pkgdir}/usr/share/man/man1/infisical.1.gz"
dockers:
- dockerfile: docker/alpine
goos: linux
goarch: amd64
use: buildx
ids:
- all-other-builds
image_templates:
- "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-amd64"
- "infisical/cli:latest-amd64"
build_flag_templates:
- "--pull"
- "--platform=linux/amd64"
- dockerfile: docker/alpine
goos: linux
goarch: amd64
use: buildx
ids:
- all-other-builds
image_templates:
- "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-arm64"
- "infisical/cli:latest-arm64"
build_flag_templates:
- "--pull"
- "--platform=linux/arm64"
# dockers:
# - dockerfile: docker/alpine
# goos: linux
# goarch: amd64
# use: buildx
# ids:
# - all-other-builds
# image_templates:
# - "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-amd64"
# - "infisical/cli:latest-amd64"
# build_flag_templates:
# - "--pull"
# - "--platform=linux/amd64"
# - dockerfile: docker/alpine
# goos: linux
# goarch: amd64
# use: buildx
# ids:
# - all-other-builds
# image_templates:
# - "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-arm64"
# - "infisical/cli:latest-arm64"
# build_flag_templates:
# - "--pull"
# - "--platform=linux/arm64"
docker_manifests:
- name_template: "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}"
image_templates:
- "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-amd64"
- "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-arm64"
- name_template: "infisical/cli:latest"
image_templates:
- "infisical/cli:latest-amd64"
- "infisical/cli:latest-arm64"
# docker_manifests:
# - name_template: "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}"
# image_templates:
# - "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-amd64"
# - "infisical/cli:{{ .Major }}.{{ .Minor }}.{{ .Patch }}-arm64"
# - name_template: "infisical/cli:latest"
# image_templates:
# - "infisical/cli:latest-amd64"
# - "infisical/cli:latest-arm64"

View File

@ -50,6 +50,7 @@ func init() {
config.INFISICAL_URL = util.AppendAPIEndpoint(config.INFISICAL_URL)
util.DisplayAptInstallationChangeBanner(silent)
if !util.IsRunningInDocker() && !silent {
util.CheckForUpdate()
}

View File

@ -53,6 +53,25 @@ func CheckForUpdate() {
}
}
func DisplayAptInstallationChangeBanner(isSilent bool) {
if isSilent {
return
}
if runtime.GOOS == "linux" {
_, err := exec.LookPath("apt-get")
isApt := err == nil
if isApt {
yellow := color.New(color.FgYellow).SprintFunc()
msg := fmt.Sprintf("%s",
yellow("Update Required: Your current package installation script is outdated and will no longer receive updates.\nPlease update to the new installation script which can be found here https://infisical.com/docs/cli/overview#installation debian section\n"),
)
fmt.Fprintln(os.Stderr, msg)
}
}
}
func getLatestTag(repoOwner string, repoName string) (string, string, error) {
url := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", repoOwner, repoName)
resp, err := http.Get(url)

View File

@ -0,0 +1,551 @@
#!/usr/bin/env bash
#
# The core commands execute start from the "MAIN" section below.
#
test -z "$BASH_SOURCE" && {
self="sudo -E bash"
prefix="<curl command> |"
} || {
self=$(readlink -f ${BASH_SOURCE:-$0})
prefix=""
}
tmp_log=$(mktemp .s3_setup_XXXXXXXXX)
# Environment variables that can be set
PKG_URL=${PKG_URL:-"https://artifacts-cli.infisical.com"}
PKG_PATH=${PKG_PATH:-"deb"}
PACKAGE_NAME=${PACKAGE_NAME:-"infisical"}
GPG_KEY_URL=${GPG_KEY_URL:-"${PKG_URL}/infisical.gpg"}
colours=$(tput colors 2>/dev/null || echo "256")
no_colour="\e[39;49m"
green_colour="\e[32m"
red_colour="\e[41;97m"
bold="\e[1m"
reset="\e[0m"
use_colours=$(test -n "$colours" && test $colours -ge 8 && echo "yes")
test "$use_colours" == "yes" || {
no_colour=""
green_colour=""
red_colour=""
bold=""
reset=""
}
example_name="Ubuntu/Focal (20.04)"
example_distro="ubuntu"
example_codename="focal"
example_version="20.04"
function echo_helptext {
local help_text="$*"
echo " ^^^^: ... $help_text"
}
function die {
local text="$@"
test ! -z "$text" && {
echo_helptext "$text" 1>&2
}
local prefix="${red_colour} !!!!${no_colour}"
echo -e "$prefix: Oh no, your setup failed! :-( ... But we might be able to help. :-)"
echo -e "$prefix: "
echo -e "$prefix: ${bold}Please check your S3 bucket configuration and try again.${reset}"
echo -e "$prefix: "
test -f "$tmp_log" && {
local n=20
echo -e "$prefix: Last $n log lines from $tmp_log (might not be errors, nor even relevant):"
echo -e "$prefix:"
check_tool_silent "xargs" && {
check_tool_silent "fmt" && {
tail -n $n $tmp_log | fmt -t | xargs -Ilog echo -e "$prefix: > log"
} || {
tail -n $n $tmp_log | xargs -Ilog echo -e "$prefix: > log"
}
} || {
echo
tail -n $n $tmp_log
}
}
exit 1
}
function echo_colour {
local colour="${1:-"no"}_colour"; shift
echo -e "${!colour}$@${no_colour}"
}
function echo_green_or_red {
local rc="$1"
local good="${2:-YES}"
local bad="${3:-NO}"
test "$rc" -eq 0 && {
echo_colour "green" "$good"
} || {
echo_colour "red" "$bad"
}
return $rc
}
function echo_clearline {
local rc="$?"
echo -e -n "\033[1K\r"
return $rc
}
function echo_status {
local rc="$1"
local good="$2"
local bad="$3"
local text="$4"
local help_text="$5"
local newline=$(test "$6" != "no" && echo "\n" || echo "")
local status_text=$(echo_green_or_red "$rc" "$good" "$bad")
echo_clearline
local width=$(test "$use_colours" == "yes" && echo "16" || echo "5")
printf "%${width}s %s${newline}" "${status_text}:" "$text"
test $rc -ne 0 && test ! -z "$help_text" && {
echo_helptext "$help_text"
echo
}
return $rc
}
function echo_running {
local rc=$?
local text="$1"
echo_status 0 " RUN" " RUN" "$text" "" "no"
return $rc
}
function echo_okfail_rc {
local rc=$1
local text="$2"
local help_text="$3"
echo_clearline
echo_status $rc " OK" " NOPE" "$text" "$help_text"
return $rc
}
function echo_okfail {
echo_okfail_rc $? "$@"
return $?
}
function check_tool_silent {
local tool=${1}
command -v $tool &>/dev/null || which $tool &>/dev/null
return $?
}
function check_tool {
local tool=${1}
local optional=${2:-false}
local required_text="optional"
if ! $optional; then required_text="required"; fi
local text="Checking for $required_text executable '$tool' ..."
echo_running "$text"
check_tool_silent "$tool"
echo_okfail "$text" || {
if ! $optional; then
die "$tool is not installed, but is required by this script."
fi
return 1
}
return 0
}
function cleanup {
echo
rm -rf $tmp_log
}
function shutdown {
echo_colour "red" " !!!!: Operation cancelled by user!"
exit 2
}
function check_os {
test ! -z "$distro" && test ! -z "${version}${codename}"
return $?
}
function detect_os_system {
check_os && return 0
echo_running "$text"
local text="Detecting your OS distribution and release using system methods ..."
local tool_rc=1
test -f '/etc/os-release' && {
. /etc/os-release
distro=${distro:-$ID}
codename=${codename:-$VERSION_CODENAME}
codename=${codename:-$(echo $VERSION | cut -d '(' -f 2 | cut -d ')' -f 1)}
version=${version:-$VERSION_ID}
test -z "${version}${codename}" && test -f '/etc/debian_version' && {
# Workaround for Debian unstable releases; get the codename from debian_version
codename=$(cat /etc/debian_version | cut -d '/' -f1)
}
tool_rc=0
}
check_os
local rc=$?
echo_okfail_rc $rc "$text"
test $tool_rc -eq 0 && {
report_os_expanded
}
return $rc
}
function report_os_attribute {
local name=$1
local value=$2
local coloured=""
echo -n "$name="
test -z "$value" && {
echo -e -n "${red_colour}<empty>${no_colour} "
} || {
echo -e -n "${green_colour}${value}${no_colour} "
}
}
function report_os_expanded {
echo_helptext "Detected/provided for your OS/distribution, version and architecture:"
echo " >>>>:"
report_os_values
}
function report_os_values {
echo -n " >>>>: ... "
report_os_attribute "distro" $distro
report_os_attribute "codename" "stable (fixed)"
report_os_attribute "arch" $arch
echo
echo " >>>>:"
}
function detect_os_legacy_python {
check_os && return 0
local text="Detecting your OS distribution and release using legacy python ..."
echo_running "$text"
IFS='' read -r -d '' script <<-'EOF'
from __future__ import unicode_literals, print_function
import platform;
info = platform.linux_distribution() or ('', '', '');
for key, value in zip(('distro', 'version', 'codename'), info):
print("local guess_%s=\"%s\"\n" % (key, value.lower().replace(' ', '')));
EOF
local tool_rc=1
check_tool_silent "python" && {
eval $(python -c "$script")
distro=${distro:-$guess_distro}
codename=${codename:-$guess_codename}
version=${version:-$guess_version}
tool_rc=$?
}
check_os
local rc=$?
echo_okfail_rc $rc "$text"
check_tool_silent "python" || {
echo_helptext "Python isn't available, so skipping detection method (hint: install python)"
}
test $tool_rc -eq 0 && {
report_os
}
return $rc
}
function detect_os_modern_python {
check_os && return 0
check_tool_silent "python" && {
local text="Ensuring python-pip is installed ..."
echo_running "$text"
check_tool_silent "pip"
echo_okfail "$text" || {
local text="Checking if pip can be bootstrapped without get-pip ..."
echo_running "$text"
python -m ensurepip --default-pip &>$tmp_log
echo_okfail "$text" || {
local text="Installing pip via get-pip bootstrap ..."
echo_running "$text"
curl -1sLf https://bootstrap.pypa.io/get-pip.py 2>$tmp/log | python &>$tmp_log
echo_okfail "$text" || die "Failed to install pip!"
}
}
local text="Installing 'distro' python library ..."
echo_running "$text"
python -c 'import distro' &>$tmp_log || python -m pip install distro &>$tmp_log
echo_okfail "$text" || die "Failed to install required 'distro' python library!"
}
IFS='' read -r -d '' script <<-'EOF'
from __future__ import unicode_literals, print_function
import distro;
info = distro.linux_distribution(full_distribution_name=False) or ('', '', '');
for key, value in zip(('distro', 'version', 'codename'), info):
print("local guess_%s=\"%s\"\n" % (key, value.lower().replace(' ', '')));
EOF
local text="Detecting your OS distribution and release using modern python ..."
echo_running "$text"
local tool_rc=1
check_tool_silent "python" && {
eval $(python -c "$script")
distro=${distro:-$guess_distro}
codename=${codename:-$guess_codename}
version=${version:-$guess_version}
tool_rc=$?
}
check_os
local rc=$?
echo_okfail_rc $rc "$text"
check_tool_silent "python" || {
echo_helptext "Python isn't available, so skipping detection method (hint: install python)"
}
test $tool_rc -eq 0 && {
report_os_expanded
}
return $rc
}
function detect_os {
# Backwards compat for old distribution parameter names
distro=${distro:-$os}
# Always use "stable" as the codename
codename="stable"
arch=${arch:-$(arch || uname -m)}
# Only detect OS if not manually specified
if [ -z "$distro" ]; then
detect_os_system ||
detect_os_legacy_python ||
detect_os_modern_python
fi
# Always ensure we have a distro
(test -z "$distro") && {
echo_okfail_rc "1" "Unable to detect your OS distribution!"
cat <<EOF
>>>>:
>>>>: The 'distro' value is required. Without it, the install script
>>>>: cannot retrieve the correct configuration for this system.
>>>>:
>>>>: You can force this script to use a particular value by specifying distro
>>>>: via environment variable. E.g., to specify a distro
>>>>: such as $example_name, use the following:
>>>>:
>>>>: $prefix distro=$example_distro $self
>>>>:
EOF
die
}
}
function create_repo_config {
if [ -z "$PKG_PATH" ]; then
repo_url="${PKG_URL}"
else
repo_url="${PKG_URL}/${PKG_PATH}"
fi
# Create configuration with GPG key verification
local gpg_keyring_path="/usr/share/keyrings/${PACKAGE_NAME}-archive-keyring.gpg"
local apt_conf=$(cat <<EOF
deb [arch=$(dpkg --print-architecture) signed-by=${gpg_keyring_path}] ${repo_url} stable main
EOF
)
echo "$apt_conf"
return 0
}
function check_gpg_key {
local text="Checking if GPG key is accessible at ${GPG_KEY_URL} ..."
echo_running "$text"
local code="$(curl -1IsL -w "%{http_code}\\n" "$GPG_KEY_URL" -o /dev/null --connect-timeout 15 --max-time 60)"
test "$code" == "200" && {
echo_okfail_rc 0 "$text"
return 0
} || {
echo_okfail_rc 1 "$text"
echo_helptext "Failed to access the GPG key. Please check that it exists in your S3 bucket."
cat <<EOF
>>>>:
>>>>: It looks like we can't access the GPG key at ${GPG_KEY_URL}
>>>>:
EOF
die
}
}
function check_dpkg_tool {
local tool=${1}
local required=${2:-true}
local install=${3:-true}
local text="Checking for apt dependency '$tool' ..."
echo_running "$text"
dpkg -l | grep "$tool\>" &>$tmp_log
echo_okfail "$text" || {
if $install; then
test "$apt_updated" == "yes" || update_apt
local text="Attempting to install '$tool' ..."
echo_running "$text"
apt-get install -y "$tool" &>$tmp_log
echo_okfail "$text" || {
if $required; then
die "Could not install '$tool', check your permissions, etc."
fi
}
else {
if $required; then
die "$tool is not installed, but is required by this script."
fi
}
fi
}
return 0
}
function update_apt {
local text="Updating apt repository metadata cache ..."
local tmp_log=$(mktemp .s3_deb_output_XXXXXXXXX.log)
echo_running "$text"
apt-get update &>$tmp_log
echo_okfail "$text" || {
echo_colour "red" "Failed to update via apt-get update"
cat $tmp_log
rm -rf $tmp_log
die "Failed to update via apt-get update - Context above (maybe no packages?)."
}
rm -rf $tmp_log
apt_updated="yes"
}
function install_apt_prereqs {
# Debian-archive-keyring has to be installed for apt-transport-https.
test "${distro}" == "debian" && {
check_dpkg_tool "debian-keyring"
check_dpkg_tool "debian-archive-keyring"
}
check_dpkg_tool "apt-transport-https"
check_dpkg_tool "ca-certificates" false
check_dpkg_tool "gnupg"
}
function import_gpg_key {
local text="Importing '$PACKAGE_NAME' repository GPG key from S3 ..."
echo_running "$text"
local gpg_keyring_path="/usr/share/keyrings/${PACKAGE_NAME}-archive-keyring.gpg"
# Check if GPG key is accessible
check_gpg_key
# Download and import GPG key
curl -1sLf "${GPG_KEY_URL}" | gpg --dearmor > $gpg_keyring_path
chmod 644 $gpg_keyring_path
# Check for older apt versions that don't support signed-by
local signed_by_version="1.1"
local detected_version=$(dpkg -s apt | grep Version | cut -d' ' -f2)
if [ "$(printf "%s\n" $detected_version $signed_by_version | sort -V | head -n 1)" != "$signed_by_version" ]; then
echo_helptext "Detected older apt version without signed-by support. Copying key to trusted.gpg.d."
cp ${gpg_keyring_path} /etc/apt/trusted.gpg.d/${PACKAGE_NAME}.gpg
chmod 644 /etc/apt/trusted.gpg.d/${PACKAGE_NAME}.gpg
fi
echo_okfail "$text" || die "Could not import the GPG key for this repository"
}
function setup_repository {
local repo_path="/etc/apt/sources.list.d/${PACKAGE_NAME}.list"
local text="Installing '$PACKAGE_NAME' repository via apt ..."
echo_running "$text"
create_repo_config > "$repo_path"
chmod 644 $repo_path
echo_okfail "$text" || die "Could not install the repository, do you have permissions?"
}
function usage () {
cat <<EOF
Usage: $self [opts]
-h Displays this usage text.
-i Ignore repository setup errors during setup and
continue with install. This will leave the repository config
in place rather than removing it upon errors.
-p Package name to use for repository setup (default: ${PACKAGE_NAME})
-k GPG key URL (default: ${GPG_KEY_URL})
EOF
exit 0
}
trap cleanup EXIT
trap shutdown INT
ignore_errors=1
apt_updated="no"
while getopts ":ihp:b:s:k:" OPT; do
case $OPT in
i) ignore_errors=0 ;;
h) usage ;;
p) PACKAGE_NAME=$OPTARG ;;
b) PKG_URL=$OPTARG ;;
s) PKG_PATH=$OPTARG ;;
k) GPG_KEY_URL=$OPTARG ;;
\?) usage ;;
esac
done
shift $(($OPTIND - 1))
#
# MAIN
#
echo "Executing the setup script for the '$PACKAGE_NAME' S3 repository ..."
echo
check_tool "curl"
check_tool "apt-get"
detect_os
install_apt_prereqs
import_gpg_key
setup_repository
update_apt
echo_okfail_rc "0" "The repository has been installed successfully - You're ready to rock!"
echo
echo "You can now install the package with: apt install $PACKAGE_NAME"

View File

@ -1,15 +1,21 @@
cd dist
for i in *.apk; do
[ -f "$i" ] || break
cloudsmith push alpine --republish infisical/infisical-cli/alpine/any-version $i
done
# for i in *.apk; do
# [ -f "$i" ] || break
# cloudsmith push alpine --republish infisical/infisical-cli/alpine/any-version $i
# done
# for i in *.deb; do
# [ -f "$i" ] || break
# cloudsmith push deb --republish infisical/infisical-cli/any-distro/any-version $i
# done
for i in *.deb; do
[ -f "$i" ] || break
cloudsmith push deb --republish infisical/infisical-cli/any-distro/any-version $i
deb-s3 upload --bucket=$INFISICAL_CLI_S3_BUCKET --prefix=deb --visibility=private --sign=$INFISICAL_CLI_REPO_SIGNING_KEY_ID --preserve-versions $i
done
for i in *.rpm; do
[ -f "$i" ] || break
cloudsmith push rpm --republish infisical/infisical-cli/any-distro/any-version $i
done
# for i in *.rpm; do
# [ -f "$i" ] || break
# cloudsmith push rpm --republish infisical/infisical-cli/any-distro/any-version $i
# done

View File

@ -264,6 +264,7 @@ The available authentication methods are `universalAuth`, `kubernetesAuth`, `aws
- `credentialsRef.secretName`: The name of the Kubernetes secret.
- `credentialsRef.secretNamespace`: The namespace of the Kubernetes secret.
Example:
```yaml
@ -296,6 +297,9 @@ The available authentication methods are `universalAuth`, `kubernetesAuth`, `aws
- `serviceAccountRef`: The name and namespace of the service account that will be used to authenticate with Infisical.
- `serviceAccountRef.name`: The name of the service account.
- `serviceAccountRef.namespace`: The namespace of the service account.
- `autoCreateServiceAccountToken`: If set to `true`, the operator will automatically create a short-lived service account token on-demand for the service account. Defaults to `false`.
- `serviceAccountTokenAudiences`: Optionally specify audience for the service account token. This field is only relevant if you have set `autoCreateServiceAccountToken` to `true`. No audience is specified by default.
Example:
@ -303,6 +307,9 @@ The available authentication methods are `universalAuth`, `kubernetesAuth`, `aws
spec:
kubernetesAuth:
identityId: <machine-identity-id>
autoCreateServiceAccountToken: true # Automatically creates short-lived service account tokens for the service account.
serviceAccountTokenAudiences:
- <audience> # Optionally specify audience for the service account token. No audience is specified by default.
serviceAccountRef:
name: <secret-name>
namespace: <secret-namespace>

View File

@ -291,6 +291,8 @@ After applying the InfisicalPushSecret CRD, you should notice that the secrets y
- `serviceAccountRef`: The name and namespace of the service account that will be used to authenticate with Infisical.
- `serviceAccountRef.name`: The name of the service account.
- `serviceAccountRef.namespace`: The namespace of the service account.
- `autoCreateServiceAccountToken`: If set to `true`, the operator will automatically create a short-lived service account token on-demand for the service account. Defaults to `false`.
- `serviceAccountTokenAudiences`: Optionally specify audience for the service account token. This field is only relevant if you have set `autoCreateServiceAccountToken` to `true`. No audience is specified by default.
Example:
@ -298,6 +300,9 @@ After applying the InfisicalPushSecret CRD, you should notice that the secrets y
spec:
kubernetesAuth:
identityId: <machine-identity-id>
autoCreateServiceAccountToken: true # Automatically creates short-lived service account tokens for the service account.
serviceAccountTokenAudiences:
- <audience> # Optionally specify audience for the service account token. No audience is specified by default.
serviceAccountRef:
name: <secret-name>
namespace: <secret-namespace>

View File

@ -156,157 +156,420 @@ spec:
<Accordion title="authentication.kubernetesAuth">
The Kubernetes machine identity authentication method is used to authenticate with Infisical. The identity ID is stored in a field in the InfisicalSecret resource. This authentication method can only be used within a Kubernetes environment.
<Steps>
<Step title="Obtaining the token reviewer JWT for Infisical">
1.1. Start by creating a service account in your Kubernetes cluster that will be used by Infisical to authenticate with the Kubernetes API Server.
<Tabs>
<Tab title="Short-lived service account tokens (Recommended)">
Short-lived service account tokens are automatically created by the operator and are valid only for a short period of time. This is the recommended approach for using Kubernetes auth in the Infisical Secrets Operator.
```yaml infisical-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: infisical-auth
namespace: default
<Steps>
<Step title="Obtaining the token reviewer JWT for Infisical">
**1.1.** Start by creating a reviewer service account in your Kubernetes cluster that will be used by Infisical to authenticate with the Kubernetes API Server.
```
```yaml infisical-reviewer-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: infisical-token-reviewer
namespace: default
```
kubectl apply -f infisical-service-account.yaml
```
```
1.2. Bind the service account to the `system:auth-delegator` cluster role. As described [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#other-component-roles), this role allows delegated authentication and authorization checks, specifically for Infisical to access the [TokenReview API](https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-review-v1/). You can apply the following configuration file:
```bash
kubectl apply -f infisical-reviewer-service-account.yaml
```
```yaml cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: role-tokenreview-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: infisical-auth
namespace: default
```
**1.2.** Bind the reviewer service account to the `system:auth-delegator` cluster role. As described [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#other-component-roles), this role allows delegated authentication and authorization checks, specifically for Infisical to access the [TokenReview API](https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-review-v1/). You can apply the following configuration file:
```
kubectl apply -f cluster-role-binding.yaml
```
```yaml infisical-reviewer-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: infisical-token-reviewer-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: infisical-token-reviewer
namespace: default
```
1.3. Next, create a long-lived service account JWT token (i.e. the token reviewer JWT token) for the service account using this configuration file for a new `Secret` resource:
```bash
kubectl apply -f infisical-reviewer-cluster-role-binding.yaml
```
```yaml service-account-token.yaml
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: infisical-auth-token
annotations:
kubernetes.io/service-account.name: "infisical-auth"
```
**1.3.** Next, create a long-lived service account JWT token (i.e. the token reviewer JWT token) for the service account using this configuration file for a new `Secret` resource:
```yaml service-account-reviewer-token.yaml
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: infisical-token-reviewer-token
annotations:
kubernetes.io/service-account.name: "infisical-token-reviewer"
```
```
kubectl apply -f service-account-token.yaml
```
```bash
kubectl apply -f service-account-reviewer-token.yaml
```
1.4. Link the secret in step 1.3 to the service account in step 1.1:
**1.4.** Link the secret in step 1.3 to the service account in step 1.1:
```bash
kubectl patch serviceaccount infisical-auth -p '{"secrets": [{"name": "infisical-auth-token"}]}' -n default
```
```bash
kubectl patch serviceaccount infisical-token-reviewer -p '{"secrets": [{"name": "infisical-token-reviewer-token"}]}' -n default
```
1.5. Finally, retrieve the token reviewer JWT token from the secret.
**1.5.** Finally, retrieve the token reviewer JWT token from the secret.
```bash
kubectl get secret infisical-auth-token -n default -o=jsonpath='{.data.token}' | base64 --decode
```
```bash
kubectl get secret infisical-token-reviewer-token -n default -o=jsonpath='{.data.token}' | base64 --decode
```
Keep this JWT token handy as you will need it for the **Token Reviewer JWT** field when configuring the Kubernetes Auth authentication method for the identity in step 2.
Keep this JWT token handy as you will need it for the **Token Reviewer JWT** field when configuring the Kubernetes Auth authentication method for the identity in step 2.
</Step>
</Step>
<Step title="Creating an identity">
To create an identity, head to your Organization Settings > Access Control > Machine Identities and press **Create identity**.
<Step title="Creating an identity">
To create an identity, head to your Organization Settings > Access Control > Machine Identities and press **Create identity**.
![identities organization](/images/platform/identities/identities-org.png)
![identities organization](/images/platform/identities/identities-org.png)
When creating an identity, you specify an organization level [role](/documentation/platform/role-based-access-controls) for it to assume; you can configure roles in Organization Settings > Access Control > Organization Roles.
When creating an identity, you specify an organization level [role](/documentation/platform/role-based-access-controls) for it to assume; you can configure roles in Organization Settings > Access Control > Organization Roles.
![identities organization create](/images/platform/identities/identities-org-create.png)
![identities organization create](/images/platform/identities/identities-org-create.png)
Now input a few details for your new identity. Here's some guidance for each field:
Now input a few details for your new identity. Here's some guidance for each field:
- Name (required): A friendly name for the identity.
- Role (required): A role from the **Organization Roles** tab for the identity to assume. The organization role assigned will determine what organization level resources this identity can have access to.
- Name (required): A friendly name for the identity.
- Role (required): A role from the **Organization Roles** tab for the identity to assume. The organization role assigned will determine what organization level resources this identity can have access to.
Once you've created an identity, you'll be prompted to configure the authentication method for it. Here, select **Kubernetes Auth**.
Once you've created an identity, you'll be prompted to configure the authentication method for it. Here, select **Kubernetes Auth**.
<Info>
To learn more about each field of the Kubernetes native authentication method, see step 2 of [guide](/documentation/platform/identities/kubernetes-auth#guide).
</Info>
<Info>
To learn more about each field of the Kubernetes native authentication method, see step 2 of [guide](/documentation/platform/identities/kubernetes-auth#guide).
</Info>
![identities organization create auth method](/images/platform/identities/identities-org-create-kubernetes-auth-method.png)
![identities organization create auth method](/images/platform/identities/identities-org-create-kubernetes-auth-method.png)
</Step>
<Step title="Adding an identity to a project">
To allow the operator to use the given identity to access secrets, you will need to add the identity to project(s) that you would like to grant it access to.
</Step>
<Step title="Adding an identity to a project">
To allow the operator to use the given identity to access secrets, you will need to add the identity to project(s) that you would like to grant it access to.
To do this, head over to the project you want to add the identity to and go to Project Settings > Access Control > Machine Identities and press **Add identity**.
To do this, head over to the project you want to add the identity to and go to Project Settings > Access Control > Machine Identities and press **Add identity**.
Next, select the identity you want to add to the project and the project level role you want to allow it to assume. The project role assigned will determine what project level resources this identity can have access to.
Next, select the identity you want to add to the project and the project level role you want to allow it to assume. The project role assigned will determine what project level resources this identity can have access to.
![identities project](/images/platform/identities/identities-project.png)
![identities project](/images/platform/identities/identities-project.png)
![identities project create](/images/platform/identities/identities-project-create.png)
![identities project create](/images/platform/identities/identities-project-create.png)
</Step>
<Step title="Add your identity ID & service account to your InfisicalSecret resource">
Once you have created your machine identity and added it to your project(s), you will need to add the identity ID to your InfisicalSecret resource.
In the `authentication.kubernetesAuth.identityId` field, add the identity ID of the machine identity you created.
See the example below for more details.
</Step>
<Step title="Add your Kubernetes service account token to the InfisicalSecret resource">
Add the service account details from the previous steps under `authentication.kubernetesAuth.serviceAccountRef`.
Here you will need to enter the name and namespace of the service account.
The example below shows a complete InfisicalSecret resource with all required fields defined.
</Step>
</Step>
</Steps>
<Step title="Create a new Kubernetes service account to authenticate with Infisical">
You have already created the reviewer service account in step **1.1**. Now, create a new Kubernetes service account that will be used to authenticate with Infisical.
This service account will create short-lived tokens that will be used to authenticate with Infisical. The operator itself will handle the creation of these tokens automatically.
<Info>
Make sure to also populate the `secretsScope` field with the project slug
_`projectSlug`_, environment slug _`envSlug`_, and secrets path
_`secretsPath`_ that you want to fetch secrets from. Please see the example
below.
</Info>
```yaml infisical-service-account.yaml
kind: ServiceAccount
apiVersion: v1
metadata:
name: infisical-service-account
```
## Example
```bash
kubectl apply -f infisical-service-account.yaml -n default
```
```yaml example-kubernetes-auth.yaml
apiVersion: secrets.infisical.com/v1alpha1
kind: InfisicalSecret
metadata:
name: infisicalsecret-sample-crd
spec:
authentication:
kubernetesAuth:
identityId: <machine-identity-id>
serviceAccountRef:
name: <service-account-name>
namespace: <service-account-namespace>
</Step>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
...
```
<Step title="Add your identity ID & service account to your InfisicalSecret resource">
Once you have created your machine identity and added it to your project(s), you will need to add the identity ID to your InfisicalSecret resource.
In the `authentication.kubernetesAuth.identityId` field, add the identity ID of the machine identity you created.
See the example below for more details.
</Step>
<Step title="Add your Kubernetes service account token to the InfisicalSecret resource">
Add the service account details from the previous steps under `authentication.kubernetesAuth.serviceAccountRef`.
Here you will need to enter the name and namespace of the service account.
The example below shows a complete InfisicalSecret resource with all required fields defined.
Make sure you set `authentication.kubernetesAuth.autoCreateServiceAccountToken` to `true` to automatically create short-lived service account tokens for the service account.
</Step>
</Steps>
<Info>
Make sure to also populate the `secretsScope` field with the project slug
_`projectSlug`_, environment slug _`envSlug`_, and secrets path
_`secretsPath`_ that you want to fetch secrets from. Please see the example
below.
</Info>
## Example
```yaml example-kubernetes-auth.yaml
apiVersion: secrets.infisical.com/v1alpha1
kind: InfisicalSecret
metadata:
name: infisicalsecret-sample-crd
spec:
authentication:
kubernetesAuth:
identityId: <machine-identity-id>
autoCreateServiceAccountToken: true # Automatically creates short-lived service account tokens for the service account.
serviceAccountTokenAudiences:
- <audience> # Optionally specify audience for the service account token. No audience is specified by default.
serviceAccountRef:
name: infisical-service-account # The service account we just created in the previous step.
namespace: <service-account-namespace>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
...
```
</Tab>
<Tab title="Manual long-lived service account tokens">
Manual long-lived service account tokens are manually created by the user and are valid indefinitely unless deleted or rotated. In most cases, you should be using the automatic short-lived service account tokens as they are more secure and easier to use.
<Steps>
<Step title="Obtaining the token reviewer JWT for Infisical">
**1.1.** Start by creating a reviewer service account in your Kubernetes cluster that will be used by Infisical to authenticate with the Kubernetes API Server.
```yaml infisical-reviewer-service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: infisical-token-reviewer
namespace: default
```
```bash
kubectl apply -f infisical-reviewer-service-account.yaml
```
**1.2.** Bind the reviewer service account to the `system:auth-delegator` cluster role. As described [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#other-component-roles), this role allows delegated authentication and authorization checks, specifically for Infisical to access the [TokenReview API](https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-review-v1/). You can apply the following configuration file:
```yaml infisical-reviewer-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: infisical-token-reviewer-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: infisical-token-reviewer
namespace: default
```
```bash
kubectl apply -f infisical-reviewer-cluster-role-binding.yaml
```
**1.3.** Next, create a long-lived service account JWT token (i.e. the token reviewer JWT token) for the service account using this configuration file for a new `Secret` resource:
```yaml service-account-reviewer-token.yaml
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: infisical-token-reviewer-token
annotations:
kubernetes.io/service-account.name: "infisical-token-reviewer"
```
```bash
kubectl apply -f service-account-reviewer-token.yaml
```
**1.4.** Link the secret in step 1.3 to the service account in step 1.1:
```bash
kubectl patch serviceaccount infisical-token-reviewer -p '{"secrets": [{"name": "infisical-token-reviewer-token"}]}' -n default
```
**1.5.** Finally, retrieve the token reviewer JWT token from the secret.
```bash
kubectl get secret infisical-token-reviewer-token -n default -o=jsonpath='{.data.token}' | base64 --decode
```
Keep this JWT token handy as you will need it for the **Token Reviewer JWT** field when configuring the Kubernetes Auth authentication method for the identity in step 2.
</Step>
<Step title="Creating an identity">
To create an identity, head to your Organization Settings > Access Control > Machine Identities and press **Create identity**.
![identities organization](/images/platform/identities/identities-org.png)
When creating an identity, you specify an organization level [role](/documentation/platform/role-based-access-controls) for it to assume; you can configure roles in Organization Settings > Access Control > Organization Roles.
![identities organization create](/images/platform/identities/identities-org-create.png)
Now input a few details for your new identity. Here's some guidance for each field:
- Name (required): A friendly name for the identity.
- Role (required): A role from the **Organization Roles** tab for the identity to assume. The organization role assigned will determine what organization level resources this identity can have access to.
Once you've created an identity, you'll be prompted to configure the authentication method for it. Here, select **Kubernetes Auth**.
<Info>
To learn more about each field of the Kubernetes native authentication method, see step 2 of [guide](/documentation/platform/identities/kubernetes-auth#guide).
</Info>
![identities organization create auth method](/images/platform/identities/identities-org-create-kubernetes-auth-method.png)
</Step>
<Step title="Adding an identity to a project">
To allow the operator to use the given identity to access secrets, you will need to add the identity to project(s) that you would like to grant it access to.
To do this, head over to the project you want to add the identity to and go to Project Settings > Access Control > Machine Identities and press **Add identity**.
Next, select the identity you want to add to the project and the project level role you want to allow it to assume. The project role assigned will determine what project level resources this identity can have access to.
![identities project](/images/platform/identities/identities-project.png)
![identities project create](/images/platform/identities/identities-project-create.png)
</Step>
<Step title="Create a new Kubernetes service account to authenticate with Infisical">
You have already created the reviewer service account in step **1.1**. Now, create a new Kubernetes service account that will be used to authenticate with Infisical.
```yaml infisical-service-account.yaml
kind: ServiceAccount
apiVersion: v1
metadata:
name: infisical-service-account
```
```bash
kubectl apply -f infisical-service-account.yaml -n default
```
</Step>
<Step title="Create a service account token for the Kubernetes service account">
Create a service account token for the newly created Kubernetes service account from the previous step.
```yaml infisical-service-account-token.yaml
apiVersion: v1
kind: Secret
type: kubernetes.io/service-account-token
metadata:
name: infisical-service-account-token
annotations:
kubernetes.io/service-account.name: "infisical-service-account"
```
```bash
kubectl apply -f infisical-service-account-token.yaml -n default
```
Patch the service account with the newly created service account token.
```bash
kubectl patch serviceaccount infisical-service-account -p '{"secrets": [{"name": "infisical-service-account-token"}]}' -n default
```
</Step>
<Step title="Add your identity ID & service account to your InfisicalSecret resource">
Once you have created your machine identity and added it to your project(s), you will need to add the identity ID to your InfisicalSecret resource.
In the `authentication.kubernetesAuth.identityId` field, add the identity ID of the machine identity you created.
See the example below for more details.
</Step>
<Step title="Add your Kubernetes service account token to the InfisicalSecret resource">
Add the service account details from the previous steps under `authentication.kubernetesAuth.serviceAccountRef`.
Here you will need to enter the name and namespace of the service account.
The example below shows a complete InfisicalSecret resource with all required fields defined.
</Step>
</Steps>
<Info>
Make sure to also populate the `secretsScope` field with the project slug
_`projectSlug`_, environment slug _`envSlug`_, and secrets path
_`secretsPath`_ that you want to fetch secrets from. Please see the example
below.
</Info>
## Example
```yaml example-kubernetes-auth.yaml
apiVersion: secrets.infisical.com/v1alpha1
kind: InfisicalSecret
metadata:
name: infisicalsecret-sample-crd
spec:
authentication:
kubernetesAuth:
identityId: <machine-identity-id>
serviceAccountRef:
name: infisical-service-account # The service account we just created in the previous step. (*not* the reviewer service account)
namespace: <service-account-namespace>
# secretsScope is identical to the secrets scope in the universalAuth field in this sample.
secretsScope:
projectSlug: your-project-slug
envSlug: prod
secretsPath: "/path"
recursive: true
...
```
</Tab>
</Tabs>
</Accordion>

View File

@ -13,9 +13,9 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: v0.9.0
version: v0.9.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "v0.9.0"
appVersion: "v0.9.1"

View File

@ -74,6 +74,13 @@ spec:
type: object
kubernetesAuth:
properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set to
`true`, the operator will automatically create a service account
token for the configured service account. This field is recommended
in most cases.
type: boolean
identityId:
type: string
serviceAccountRef:
@ -86,6 +93,13 @@ spec:
- name
- namespace
type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account token.
This is only relevant if `autoCreateServiceAccountToken` is
true.
items:
type: string
type: array
required:
- identityId
- serviceAccountRef

View File

@ -74,6 +74,13 @@ spec:
type: object
kubernetesAuth:
properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set to
`true`, the operator will automatically create a service account
token for the configured service account. This field is recommended
in most cases.
type: boolean
identityId:
type: string
serviceAccountRef:
@ -86,6 +93,13 @@ spec:
- name
- namespace
type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account token.
This is only relevant if `autoCreateServiceAccountToken` is
true.
items:
type: string
type: array
required:
- identityId
- serviceAccountRef

View File

@ -137,6 +137,12 @@ spec:
type: object
kubernetesAuth:
properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set to
`true`, the operator will automatically create a service account
token for the configured service account.
type: boolean
identityId:
type: string
secretsScope:
@ -164,6 +170,13 @@ spec:
- name
- namespace
type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account token.
This is only relevant if `autoCreateServiceAccountToken` is
true.
items:
type: string
type: array
required:
- identityId
- secretsScope

View File

@ -23,6 +23,13 @@ rules:
- list
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- apiGroups:
- ""
resources:
@ -42,6 +49,12 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts/token
verbs:
- create
- apiGroups:
- apps
resources:
@ -62,6 +75,12 @@ rules:
- list
- update
- watch
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- secrets.infisical.com
resources:

View File

@ -32,7 +32,7 @@ controllerManager:
- ALL
image:
repository: infisical/kubernetes-operator
tag: v0.9.0
tag: v0.9.1
resources:
limits:
cpu: 500m

View File

@ -24,3 +24,6 @@ Dockerfile.cross
*.swp
*.swo
*~
# Testing directories
auto-token

View File

@ -49,6 +49,14 @@ type GenericKubernetesAuth struct {
IdentityID string `json:"identityId"`
// +kubebuilder:validation:Required
ServiceAccountRef KubernetesServiceAccountRef `json:"serviceAccountRef"`
// Optionally automatically create a service account token for the configured service account.
// If this is set to `true`, the operator will automatically create a service account token for the configured service account. This field is recommended in most cases.
// +kubebuilder:validation:Optional
AutoCreateServiceAccountToken bool `json:"autoCreateServiceAccountToken"`
// The audiences to use for the service account token. This is only relevant if `autoCreateServiceAccountToken` is true.
// +kubebuilder:validation:Optional
ServiceAccountTokenAudiences []string `json:"serviceAccountTokenAudiences"`
}
type TLSConfig struct {

View File

@ -38,6 +38,14 @@ type KubernetesAuthDetails struct {
// +kubebuilder:validation:Required
SecretsScope MachineIdentityScopeInWorkspace `json:"secretsScope"`
// Optionally automatically create a service account token for the configured service account.
// If this is set to `true`, the operator will automatically create a service account token for the configured service account.
// +kubebuilder:validation:Optional
AutoCreateServiceAccountToken bool `json:"autoCreateServiceAccountToken"`
// The audiences to use for the service account token. This is only relevant if `autoCreateServiceAccountToken` is true.
// +kubebuilder:validation:Optional
ServiceAccountTokenAudiences []string `json:"serviceAccountTokenAudiences"`
}
type KubernetesServiceAccountRef struct {

View File

@ -48,7 +48,7 @@ func (in *Authentication) DeepCopyInto(out *Authentication) {
out.ServiceAccount = in.ServiceAccount
out.ServiceToken = in.ServiceToken
out.UniversalAuth = in.UniversalAuth
out.KubernetesAuth = in.KubernetesAuth
in.KubernetesAuth.DeepCopyInto(&out.KubernetesAuth)
out.AwsIamAuth = in.AwsIamAuth
out.AzureAuth = in.AzureAuth
out.GcpIdTokenAuth = in.GcpIdTokenAuth
@ -207,7 +207,7 @@ func (in *GenericGcpIdTokenAuth) DeepCopy() *GenericGcpIdTokenAuth {
func (in *GenericInfisicalAuthentication) DeepCopyInto(out *GenericInfisicalAuthentication) {
*out = *in
out.UniversalAuth = in.UniversalAuth
out.KubernetesAuth = in.KubernetesAuth
in.KubernetesAuth.DeepCopyInto(&out.KubernetesAuth)
out.AwsIamAuth = in.AwsIamAuth
out.AzureAuth = in.AzureAuth
out.GcpIdTokenAuth = in.GcpIdTokenAuth
@ -228,6 +228,11 @@ func (in *GenericInfisicalAuthentication) DeepCopy() *GenericInfisicalAuthentica
func (in *GenericKubernetesAuth) DeepCopyInto(out *GenericKubernetesAuth) {
*out = *in
out.ServiceAccountRef = in.ServiceAccountRef
if in.ServiceAccountTokenAudiences != nil {
in, out := &in.ServiceAccountTokenAudiences, &out.ServiceAccountTokenAudiences
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericKubernetesAuth.
@ -336,7 +341,7 @@ func (in *InfisicalDynamicSecretList) DeepCopyObject() runtime.Object {
func (in *InfisicalDynamicSecretSpec) DeepCopyInto(out *InfisicalDynamicSecretSpec) {
*out = *in
in.ManagedSecretReference.DeepCopyInto(&out.ManagedSecretReference)
out.Authentication = in.Authentication
in.Authentication.DeepCopyInto(&out.Authentication)
out.DynamicSecret = in.DynamicSecret
out.TLS = in.TLS
}
@ -476,7 +481,7 @@ func (in *InfisicalPushSecretSecretSource) DeepCopy() *InfisicalPushSecretSecret
func (in *InfisicalPushSecretSpec) DeepCopyInto(out *InfisicalPushSecretSpec) {
*out = *in
out.Destination = in.Destination
out.Authentication = in.Authentication
in.Authentication.DeepCopyInto(&out.Authentication)
in.Push.DeepCopyInto(&out.Push)
out.TLS = in.TLS
}
@ -583,7 +588,7 @@ func (in *InfisicalSecretList) DeepCopyObject() runtime.Object {
func (in *InfisicalSecretSpec) DeepCopyInto(out *InfisicalSecretSpec) {
*out = *in
out.TokenSecretReference = in.TokenSecretReference
out.Authentication = in.Authentication
in.Authentication.DeepCopyInto(&out.Authentication)
in.ManagedSecretReference.DeepCopyInto(&out.ManagedSecretReference)
if in.ManagedKubeSecretReferences != nil {
in, out := &in.ManagedKubeSecretReferences, &out.ManagedKubeSecretReferences
@ -654,6 +659,11 @@ func (in *KubernetesAuthDetails) DeepCopyInto(out *KubernetesAuthDetails) {
*out = *in
out.ServiceAccountRef = in.ServiceAccountRef
out.SecretsScope = in.SecretsScope
if in.ServiceAccountTokenAudiences != nil {
in, out := &in.ServiceAccountTokenAudiences, &out.ServiceAccountTokenAudiences
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesAuthDetails.

View File

@ -73,6 +73,13 @@ spec:
type: object
kubernetesAuth:
properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set
to `true`, the operator will automatically create a service
account token for the configured service account. This field
is recommended in most cases.
type: boolean
identityId:
type: string
serviceAccountRef:
@ -85,6 +92,13 @@ spec:
- name
- namespace
type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account
token. This is only relevant if `autoCreateServiceAccountToken`
is true.
items:
type: string
type: array
required:
- identityId
- serviceAccountRef

View File

@ -73,6 +73,13 @@ spec:
type: object
kubernetesAuth:
properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set
to `true`, the operator will automatically create a service
account token for the configured service account. This field
is recommended in most cases.
type: boolean
identityId:
type: string
serviceAccountRef:
@ -85,6 +92,13 @@ spec:
- name
- namespace
type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account
token. This is only relevant if `autoCreateServiceAccountToken`
is true.
items:
type: string
type: array
required:
- identityId
- serviceAccountRef

View File

@ -136,6 +136,12 @@ spec:
type: object
kubernetesAuth:
properties:
autoCreateServiceAccountToken:
description: Optionally automatically create a service account
token for the configured service account. If this is set
to `true`, the operator will automatically create a service
account token for the configured service account.
type: boolean
identityId:
type: string
secretsScope:
@ -163,6 +169,13 @@ spec:
- name
- namespace
type: object
serviceAccountTokenAudiences:
description: The audiences to use for the service account
token. This is only relevant if `autoCreateServiceAccountToken`
is true.
items:
type: string
type: array
required:
- identityId
- secretsScope

View File

@ -16,6 +16,13 @@ rules:
- list
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- apiGroups:
- ""
resources:
@ -35,6 +42,12 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts/token
verbs:
- create
- apiGroups:
- apps
resources:
@ -55,6 +68,12 @@ rules:
- list
- update
- watch
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- secrets.infisical.com
resources:

View File

@ -45,6 +45,9 @@ func (r *InfisicalDynamicSecretReconciler) GetLogger(req ctrl.Request) logr.Logg
// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=list;watch;get;update
// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list
//+kubebuilder:rbac:groups="authentication.k8s.io",resources=tokenreviews,verbs=create
//+kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create
func (r *InfisicalDynamicSecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {

View File

@ -48,6 +48,9 @@ func (r *InfisicalPushSecretReconciler) GetLogger(req ctrl.Request) logr.Logger
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete
//+kubebuilder:rbac:groups=apps,resources=deployments,verbs=list;watch;get;update
//+kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list
//+kubebuilder:rbac:groups="authentication.k8s.io",resources=tokenreviews,verbs=create
//+kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.

View File

@ -44,6 +44,9 @@ func (r *InfisicalSecretReconciler) GetLogger(req ctrl.Request) logr.Logger {
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;delete
//+kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;statefulsets,verbs=list;watch;get;update
//+kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list
//+kubebuilder:rbac:groups="authentication.k8s.io",resources=tokenreviews,verbs=create
//+kubebuilder:rbac:groups="",resources=serviceaccounts/token,verbs=create
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.

View File

@ -8,12 +8,51 @@ import (
corev1 "k8s.io/api/core/v1"
authenticationv1 "k8s.io/api/authentication/v1"
"github.com/Infisical/infisical/k8-operator/api/v1alpha1"
"github.com/aws/smithy-go/ptr"
infisicalSdk "github.com/infisical/go-sdk"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func GetServiceAccountToken(k8sClient client.Client, namespace string, serviceAccountName string) (string, error) {
func GetServiceAccountToken(k8sClient client.Client, namespace string, serviceAccountName string, autoCreateServiceAccountToken bool, serviceAccountTokenAudiences []string) (string, error) {
if autoCreateServiceAccountToken {
restClient, err := GetRestClientFromClient()
if err != nil {
return "", fmt.Errorf("failed to get REST client: %w", err)
}
tokenRequest := &authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
ExpirationSeconds: ptr.Int64(600), // 10 minutes. the token only needs to be valid for when we do the initial k8s login.
},
}
if len(serviceAccountTokenAudiences) > 0 {
// Conditionally add the audiences if they are specified.
// Failing to do this causes a default audience to be used, which is not what we want if the user doesn't specify any.
tokenRequest.Spec.Audiences = serviceAccountTokenAudiences
}
result := &authenticationv1.TokenRequest{}
err = restClient.
Post().
Namespace(namespace).
Resource("serviceaccounts").
Name(serviceAccountName).
SubResource("token").
Body(tokenRequest).
Do(context.Background()).
Into(result)
if err != nil {
return "", fmt.Errorf("failed to create token: %w", err)
}
return result.Status.Token, nil
}
serviceAccount := &corev1.ServiceAccount{}
err := k8sClient.Get(context.TODO(), client.ObjectKey{Name: serviceAccountName, Namespace: namespace}, serviceAccount)
@ -172,7 +211,9 @@ func HandleKubernetesAuth(ctx context.Context, reconcilerClient client.Client, s
Namespace: infisicalPushSecret.Spec.Authentication.KubernetesAuth.ServiceAccountRef.Namespace,
Name: infisicalPushSecret.Spec.Authentication.KubernetesAuth.ServiceAccountRef.Name,
},
SecretsScope: v1alpha1.MachineIdentityScopeInWorkspace{},
SecretsScope: v1alpha1.MachineIdentityScopeInWorkspace{},
AutoCreateServiceAccountToken: infisicalPushSecret.Spec.Authentication.KubernetesAuth.AutoCreateServiceAccountToken,
ServiceAccountTokenAudiences: infisicalPushSecret.Spec.Authentication.KubernetesAuth.ServiceAccountTokenAudiences,
}
case SecretCrd.INFISICAL_DYNAMIC_SECRET:
@ -188,7 +229,9 @@ func HandleKubernetesAuth(ctx context.Context, reconcilerClient client.Client, s
Namespace: infisicalDynamicSecret.Spec.Authentication.KubernetesAuth.ServiceAccountRef.Namespace,
Name: infisicalDynamicSecret.Spec.Authentication.KubernetesAuth.ServiceAccountRef.Name,
},
SecretsScope: v1alpha1.MachineIdentityScopeInWorkspace{},
SecretsScope: v1alpha1.MachineIdentityScopeInWorkspace{},
AutoCreateServiceAccountToken: infisicalDynamicSecret.Spec.Authentication.KubernetesAuth.AutoCreateServiceAccountToken,
ServiceAccountTokenAudiences: infisicalDynamicSecret.Spec.Authentication.KubernetesAuth.ServiceAccountTokenAudiences,
}
}
@ -196,7 +239,14 @@ func HandleKubernetesAuth(ctx context.Context, reconcilerClient client.Client, s
return AuthenticationDetails{}, ErrAuthNotApplicable
}
serviceAccountToken, err := GetServiceAccountToken(reconcilerClient, kubernetesAuthSpec.ServiceAccountRef.Namespace, kubernetesAuthSpec.ServiceAccountRef.Name)
serviceAccountToken, err := GetServiceAccountToken(
reconcilerClient,
kubernetesAuthSpec.ServiceAccountRef.Namespace,
kubernetesAuthSpec.ServiceAccountRef.Name,
kubernetesAuthSpec.AutoCreateServiceAccountToken,
kubernetesAuthSpec.ServiceAccountTokenAudiences,
)
if err != nil {
return AuthenticationDetails{}, fmt.Errorf("unable to get service account token [err=%s]", err)
}

View File

@ -9,6 +9,9 @@ import (
corev1 "k8s.io/api/core/v1"
k8Errors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@ -58,3 +61,32 @@ func GetInfisicalUniversalAuthFromKubeSecret(ctx context.Context, reconcilerClie
return model.MachineIdentityDetails{ClientId: string(clientIdFromSecret), ClientSecret: string(clientSecretFromSecret)}, nil
}
func getKubeClusterConfig() (*rest.Config, error) {
config, err := rest.InClusterConfig()
if err != nil {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
configOverrides := &clientcmd.ConfigOverrides{}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
return kubeConfig.ClientConfig()
}
return config, nil
}
func GetRestClientFromClient() (rest.Interface, error) {
config, err := getKubeClusterConfig()
if err != nil {
return nil, err
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return clientset.CoreV1().RESTClient(), nil
}