chore: add cherry-picks for Coder v2.17.1 (#15454)

Co-authored-by: Cian Johnston <cian@coder.com>
This commit is contained in:
Ben Potter
2024-11-08 15:24:55 -06:00
committed by GitHub
parent 5a6d23a4a3
commit 0598aecf90
14 changed files with 230 additions and 76 deletions

View File

@ -523,8 +523,8 @@ func TestAcquirer_MatchTags(t *testing.T) {
// Generate a table that can be copy-pasted into docs/admin/provisioners.md
lines := []string{
"\n",
"| Provisioner Tags | Job Tags | Can Run Job? |",
"|------------------|----------|--------------|",
"| Provisioner Tags | Job Tags | Same Org | Can Run Job? |",
"|------------------|----------|----------|--------------|",
}
// turn the JSON map into k=v for readability
kvs := func(m map[string]string) string {
@ -539,10 +539,14 @@ func TestAcquirer_MatchTags(t *testing.T) {
}
for _, tt := range testCases {
acquire := "✅"
sameOrg := "✅"
if !tt.expectAcquire {
acquire = "❌"
}
s := fmt.Sprintf("| %s | %s | %s |", kvs(tt.acquireJobTags), kvs(tt.provisionerJobTags), acquire)
if tt.unmatchedOrg {
sameOrg = "❌"
}
s := fmt.Sprintf("| %s | %s | %s | %s |", kvs(tt.acquireJobTags), kvs(tt.provisionerJobTags), sameOrg, acquire)
lines = append(lines, s)
}
t.Logf("You can paste this into docs/admin/provisioners.md")

View File

@ -178,7 +178,8 @@ A provisioner can run a given build job if one of the below is true:
1. If a job has any explicit tags, it can only run on a provisioner with those
explicit tags (the provisioner could have additional tags).
The external provisioner in the above example can run build jobs with tags:
The external provisioner in the above example can run build jobs in the same
organization with tags:
- `environment=on_prem`
- `datacenter=chicago`
@ -186,7 +187,8 @@ The external provisioner in the above example can run build jobs with tags:
However, it will not pick up any build jobs that do not have either of the
`environment` or `datacenter` tags set. It will also not pick up any build jobs
from templates with the tag `scope=user` set.
from templates with the tag `scope=user` set, or build jobs from templates in
different organizations.
> [!NOTE] If you only run tagged provisioners, you will need to specify a set of
> tags that matches at least one provisioner for _all_ template import jobs and
@ -198,34 +200,35 @@ from templates with the tag `scope=user` set.
This is illustrated in the below table:
| Provisioner Tags | Job Tags | Can Run Job? |
| ----------------------------------------------------------------- | ---------------------------------------------------------------- | ------------ |
| scope=organization owner= | scope=organization owner= | ✅ |
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem | ✅ |
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem | ✅ |
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem datacenter=chicago | ✅ |
| scope=user owner=aaa | scope=user owner=aaa | ✅ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa | ✅ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem | ✅ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem | ✅ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=chicago | ✅ |
| scope=organization owner= | scope=organization owner= environment=on-prem | ❌ |
| scope=organization owner= environment=on-prem | scope=organization owner= | ❌ |
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem datacenter=chicago | ❌ |
| scope=organization owner= environment=on-prem datacenter=new_york | scope=organization owner= environment=on-prem datacenter=chicago | ❌ |
| scope=user owner=aaa | scope=organization owner= | ❌ |
| scope=user owner=aaa | scope=user owner=bbb | ❌ |
| scope=organization owner= | scope=user owner=aaa | ❌ |
| scope=organization owner= | scope=user owner=aaa environment=on-prem | ❌ |
| scope=user owner=aaa | scope=user owner=aaa environment=on-prem | ❌ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem datacenter=chicago | ❌ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=new_york | ❌ |
| Provisioner Tags | Job Tags | Same Org | Can Run Job? |
| ----------------------------------------------------------------- | ---------------------------------------------------------------- | -------- | ------------ |
| scope=organization owner= | scope=organization owner= | ✅ | ✅ |
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem | ✅ | ✅ |
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem | ✅ | ✅ |
| scope=organization owner= environment=on-prem datacenter=chicago | scope=organization owner= environment=on-prem datacenter=chicago | ✅ | ✅ |
| scope=user owner=aaa | scope=user owner=aaa | ✅ | ✅ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa | ✅ | ✅ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem | ✅ | ✅ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem | ✅ | ✅ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=chicago | ✅ | ✅ |
| scope=organization owner= | scope=organization owner= environment=on-prem | ✅ | ❌ |
| scope=organization owner= environment=on-prem | scope=organization owner= | ✅ | ❌ |
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem datacenter=chicago | ✅ | ❌ |
| scope=organization owner= environment=on-prem datacenter=new_york | scope=organization owner= environment=on-prem datacenter=chicago | ✅ | ❌ |
| scope=user owner=aaa | scope=organization owner= | ✅ | ❌ |
| scope=user owner=aaa | scope=user owner=bbb | ✅ | ❌ |
| scope=organization owner= | scope=user owner=aaa | ✅ | ❌ |
| scope=organization owner= | scope=user owner=aaa environment=on-prem | ✅ | ❌ |
| scope=user owner=aaa | scope=user owner=aaa environment=on-prem | ✅ | ❌ |
| scope=user owner=aaa environment=on-prem | scope=user owner=aaa environment=on-prem datacenter=chicago | ✅ | ❌ |
| scope=user owner=aaa environment=on-prem datacenter=chicago | scope=user owner=aaa environment=on-prem datacenter=new_york | ✅ | ❌ |
| scope=organization owner= environment=on-prem | scope=organization owner= environment=on-prem | ❌ | ❌ |
> **Note to maintainers:** to generate this table, run the following command and
> copy the output:
>
> ```
> go test -v -count=1 ./coderd/provisionerserver/ -test.run='^TestAcquirer_MatchTags/GenTable$'
> go test -v -count=1 ./coderd/provisionerdserver/ -test.run='^TestAcquirer_MatchTags/GenTable$'
> ```
## Types of provisioners
@ -288,8 +291,7 @@ will use in concert with the Helm chart for deploying the Coder server.
```sh
coder provisioner keys create my-cool-key --org default
# Optionally, you can specify tags for the provisioner key:
# coder provisioner keys create my-cool-key --org default --tags location=auh kind=k8s
```
# coder provisioner keys create my-cool-key --org default --tag location=auh --tag kind=k8s
Successfully created provisioner key kubernetes-key! Save this authentication
token, it will not be shown again.
@ -300,25 +302,7 @@ will use in concert with the Helm chart for deploying the Coder server.
1. Store the key in a kubernetes secret:
```sh
kubectl create secret generic coder-provisioner-psk --from-literal=key1=`<key omitted>`
```
1. Modify your Coder `values.yaml` to include
```yaml
provisionerDaemon:
keySecretName: "coder-provisioner-keys"
keySecretKey: "key1"
```
1. Redeploy Coder with the new `values.yaml` to roll out the PSK. You can omit
`--version <your version>` to also upgrade Coder to the latest version.
```sh
helm upgrade coder coder-v2/coder \
--namespace coder \
--version <your version> \
--values values.yaml
kubectl create secret generic coder-provisioner-psk --from-literal=my-cool-key=`<key omitted>`
```
1. Create a `provisioner-values.yaml` file for the provisioner daemons Helm
@ -331,13 +315,17 @@ will use in concert with the Helm chart for deploying the Coder server.
value: "https://coder.example.com"
replicaCount: 10
provisionerDaemon:
# NOTE: in older versions of the Helm chart (2.17.0 and below), it is required to set this to an empty string.
pskSecretName: ""
keySecretName: "coder-provisioner-keys"
keySecretKey: "key1"
keySecretKey: "my-cool-key"
```
This example creates a deployment of 10 provisioner daemons (for 10
concurrent builds) with the listed tags. For generic provisioners, remove the
tags.
concurrent builds) authenticating using the above key. The daemons will
authenticate using the provisioner key created in the previous step and
acquire jobs matching the tags specified when the provisioner key was
created. The set of tags is inferred automatically from the provisioner key.
> Refer to the
> [values.yaml](https://github.com/coder/coder/blob/main/helm/provisioner/values.yaml)

View File

@ -34,22 +34,23 @@ env:
value: "0.0.0.0:2112"
{{- if and (empty .Values.provisionerDaemon.pskSecretName) (empty .Values.provisionerDaemon.keySecretName) }}
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified." }}
{{- else if and (.Values.provisionerDaemon.pskSecretName) (.Values.provisionerDaemon.keySecretName) }}
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both." }}
{{- end }}
{{- if .Values.provisionerDaemon.pskSecretName }}
- name: CODER_PROVISIONER_DAEMON_PSK
valueFrom:
secretKeyRef:
name: {{ .Values.provisionerDaemon.pskSecretName | quote }}
key: psk
{{- end }}
{{- if and .Values.provisionerDaemon.keySecretName .Values.provisionerDaemon.keySecretKey }}
{{- else if and .Values.provisionerDaemon.keySecretName .Values.provisionerDaemon.keySecretKey }}
{{- if and (not (empty .Values.provisionerDaemon.pskSecretName)) (ne .Values.provisionerDaemon.pskSecretName "coder-provisioner-psk") }}
{{ fail "Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both." }}
{{- else if .Values.provisionerDaemon.tags }}
{{ fail "provisionerDaemon.tags may not be specified with provisionerDaemon.keySecretName." }}
{{- end }}
- name: CODER_PROVISIONER_DAEMON_KEY
valueFrom:
secretKeyRef:
name: {{ .Values.provisionerDaemon.keySecretName | quote }}
key: {{ .Values.provisionerDaemon.keySecretKey | quote }}
{{- else }}
- name: CODER_PROVISIONER_DAEMON_PSK
valueFrom:
secretKeyRef:
name: {{ .Values.provisionerDaemon.pskSecretName | quote }}
key: psk
{{- end }}
{{- if include "provisioner.tags" . }}
- name: CODER_PROVISIONERD_TAGS

View File

@ -56,6 +56,12 @@ var testCases = []testCase{
name: "provisionerd_key",
expectedError: "",
},
// Test explicitly for the workaround where setting provisionerDaemon.pskSecretName=""
// was required to use provisioner keys.
{
name: "provisionerd_key_psk_empty_workaround",
expectedError: "",
},
{
name: "provisionerd_psk_and_key",
expectedError: `Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified, but not both.`,
@ -64,6 +70,10 @@ var testCases = []testCase{
name: "provisionerd_no_psk_or_key",
expectedError: `Either provisionerDaemon.pskSecretName or provisionerDaemon.keySecretName must be specified.`,
},
{
name: "provisionerd_key_tags",
expectedError: `provisionerDaemon.tags may not be specified with provisionerDaemon.keySecretName.`,
},
{
name: "extra_templates",
expectedError: "",

View File

@ -112,8 +112,6 @@ spec:
secretKeyRef:
key: provisionerd-key
name: coder-provisionerd-key
- name: CODER_PROVISIONERD_TAGS
value: clusterType=k8s,location=auh
- name: CODER_URL
value: http://coder.default.svc.cluster.local
image: ghcr.io/coder/coder:latest

View File

@ -2,9 +2,5 @@ coder:
image:
tag: latest
provisionerDaemon:
pskSecretName: ""
keySecretName: "coder-provisionerd-key"
keySecretKey: "provisionerd-key"
tags:
location: auh
clusterType: k8s

View File

@ -0,0 +1,135 @@
---
# Source: coder-provisioner/templates/coder.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
annotations: {}
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: coder-provisioner
app.kubernetes.io/part-of: coder-provisioner
app.kubernetes.io/version: 0.1.0
helm.sh/chart: coder-provisioner-0.1.0
name: coder-provisioner
---
# Source: coder-provisioner/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: coder-provisioner-workspace-perms
rules:
- apiGroups: [""]
resources: ["pods"]
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
---
# Source: coder-provisioner/templates/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: "coder-provisioner"
subjects:
- kind: ServiceAccount
name: "coder-provisioner"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: coder-provisioner-workspace-perms
---
# Source: coder-provisioner/templates/coder.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: {}
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: coder-provisioner
app.kubernetes.io/part-of: coder-provisioner
app.kubernetes.io/version: 0.1.0
helm.sh/chart: coder-provisioner-0.1.0
name: coder-provisioner
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/name: coder-provisioner
template:
metadata:
annotations: {}
labels:
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: coder-provisioner
app.kubernetes.io/part-of: coder-provisioner
app.kubernetes.io/version: 0.1.0
helm.sh/chart: coder-provisioner-0.1.0
spec:
containers:
- args:
- provisionerd
- start
command:
- /opt/coder
env:
- name: CODER_PROMETHEUS_ADDRESS
value: 0.0.0.0:2112
- name: CODER_PROVISIONER_DAEMON_KEY
valueFrom:
secretKeyRef:
key: provisionerd-key
name: coder-provisionerd-key
- name: CODER_URL
value: http://coder.default.svc.cluster.local
image: ghcr.io/coder/coder:latest
imagePullPolicy: IfNotPresent
lifecycle: {}
name: coder
ports: null
resources: {}
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: null
runAsGroup: 1000
runAsNonRoot: true
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
volumeMounts: []
restartPolicy: Always
serviceAccountName: coder-provisioner
terminationGracePeriodSeconds: 600
volumes: []

View File

@ -0,0 +1,7 @@
coder:
image:
tag: latest
provisionerDaemon:
pskSecretName: ""
keySecretName: "coder-provisionerd-key"
keySecretKey: "provisionerd-key"

View File

@ -0,0 +1,9 @@
coder:
image:
tag: latest
provisionerDaemon:
keySecretName: "coder-provisionerd-key"
keySecretKey: "provisionerd-key"
tags:
location: auh
clusterType: k8s

View File

@ -4,6 +4,3 @@ coder:
provisionerDaemon:
pskSecretName: ""
keySecretName: ""
tags:
location: auh
clusterType: k8s

View File

@ -111,7 +111,7 @@ spec:
valueFrom:
secretKeyRef:
key: psk
name: coder-provisionerd-psk
name: not-the-default-coder-provisioner-psk
- name: CODER_PROVISIONERD_TAGS
value: clusterType=k8s,location=auh
- name: CODER_URL

View File

@ -2,7 +2,7 @@ coder:
image:
tag: latest
provisionerDaemon:
pskSecretName: "coder-provisionerd-psk"
pskSecretName: "not-the-default-coder-provisioner-psk"
tags:
location: auh
clusterType: k8s

View File

@ -2,7 +2,7 @@ coder:
image:
tag: latest
provisionerDaemon:
pskSecretName: "coder-provisionerd-psk"
pskSecretName: "not-the-default-coder-provisioner-psk"
keySecretName: "coder-provisionerd-key"
keySecretKey: "provisionerd-key"
tags:

View File

@ -204,14 +204,23 @@ provisionerDaemon:
# provisionerDaemon.keySecretName -- The name of the Kubernetes
# secret that contains a provisioner key to use to authenticate with Coder.
# See: https://coder.com/docs/admin/provisioners#authentication
# NOTE: it is not permitted to specify both provisionerDaemon.keySecretName
# and provisionerDaemon.pskSecretName. An exception is made for the purposes
# of backwards-compatibility: if provisionerDaemon.pskSecretName is unchanged
# from the default value and provisionerDaemon.keySecretName is set, then
# provisionerDaemon.keySecretName and provisionerDaemon.keySecretKey will take
# precedence over provisionerDaemon.pskSecretName.
keySecretName: ""
# provisionerDaemon.keySecretKey -- The key of the Kubernetes
# secret specified in provisionerDaemon.keySecretName that contains
# the provisioner key. Defaults to "key".
keySecretKey: "key"
# provisionerDaemon.tags -- Tags to filter provisioner jobs by.
# provisionerDaemon.tags -- If using a PSK, specify the set of provisioner
# job tags for which this provisioner daemon is responsible.
# See: https://coder.com/docs/admin/provisioners#provisioner-tags
# NOTE: it is not permitted to specify both provisionerDaemon.tags and
# provsionerDaemon.keySecretName.
tags:
{}
# location: usa