mirror of
https://github.com/coder/coder.git
synced 2025-07-09 11:45:56 +00:00
feat(scaletest): annotate scaletest pod when scaletest is in progress (#10235)
This PR modifies the scaletest-runner template to add a pod annotation to the scaletest runner pod. The annotation key is set to com.coder.scaletest.phase and the annotation value is one of preparing, running, or complete. This will allow checking if a scaletest is in progress, and preventing any operations that would interrupt a running scaletest. Co-authored-by: Mathias Fredriksson <mafredri@gmail.com>
This commit is contained in:
@ -40,7 +40,7 @@ show_json() {
|
|||||||
set_status() {
|
set_status() {
|
||||||
dry_run=
|
dry_run=
|
||||||
if [[ ${DRY_RUN} == 1 ]]; then
|
if [[ ${DRY_RUN} == 1 ]]; then
|
||||||
dry_run=" (dry-ryn)"
|
dry_run=" (dry-run)"
|
||||||
fi
|
fi
|
||||||
prev_status=$(get_status)
|
prev_status=$(get_status)
|
||||||
if [[ ${prev_status} != *"Not started"* ]]; then
|
if [[ ${prev_status} != *"Not started"* ]]; then
|
||||||
@ -49,6 +49,9 @@ set_status() {
|
|||||||
echo "$(date -Ins) ${*}${dry_run}" >>"${SCALETEST_STATE_DIR}/status"
|
echo "$(date -Ins) ${*}${dry_run}" >>"${SCALETEST_STATE_DIR}/status"
|
||||||
|
|
||||||
annotate_grafana "status" "Status: ${*}"
|
annotate_grafana "status" "Status: ${*}"
|
||||||
|
|
||||||
|
status_lower=$(tr '[:upper:]' '[:lower:]' <<<"${*}")
|
||||||
|
set_pod_status_annotation "${status_lower}"
|
||||||
}
|
}
|
||||||
lock_status() {
|
lock_status() {
|
||||||
chmod 0440 "${SCALETEST_STATE_DIR}/status"
|
chmod 0440 "${SCALETEST_STATE_DIR}/status"
|
||||||
@ -247,6 +250,16 @@ set_appearance() {
|
|||||||
"${CODER_URL}/api/v2/appearance"
|
"${CODER_URL}/api/v2/appearance"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace() {
|
||||||
|
cat /var/run/secrets/kubernetes.io/serviceaccount/namespace
|
||||||
|
}
|
||||||
|
coder_pods() {
|
||||||
|
kubectl get pods \
|
||||||
|
--namespace "$(namespace)" \
|
||||||
|
--selector "app.kubernetes.io/name=coder,app.kubernetes.io/part-of=coder" \
|
||||||
|
--output jsonpath='{.items[*].metadata.name}'
|
||||||
|
}
|
||||||
|
|
||||||
# fetch_coder_full fetches the full (non-slim) coder binary from one of the coder pods
|
# fetch_coder_full fetches the full (non-slim) coder binary from one of the coder pods
|
||||||
# running in the same namespace as the current pod.
|
# running in the same namespace as the current pod.
|
||||||
fetch_coder_full() {
|
fetch_coder_full() {
|
||||||
@ -254,26 +267,26 @@ fetch_coder_full() {
|
|||||||
log "Full Coder binary already exists at ${SCALETEST_CODER_BINARY}"
|
log "Full Coder binary already exists at ${SCALETEST_CODER_BINARY}"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
local pod
|
ns=$(namespace)
|
||||||
local namespace
|
if [[ -z "${ns}" ]]; then
|
||||||
namespace=$(</var/run/secrets/kubernetes.io/serviceaccount/namespace)
|
|
||||||
if [[ -z "${namespace}" ]]; then
|
|
||||||
log "Could not determine namespace!"
|
log "Could not determine namespace!"
|
||||||
exit 1
|
return 1
|
||||||
fi
|
fi
|
||||||
log "Namespace from serviceaccount token is ${namespace}"
|
log "Namespace from serviceaccount token is ${ns}"
|
||||||
pod=$(kubectl get pods \
|
pods=$(coder_pods)
|
||||||
--namespace "${namespace}" \
|
if [[ -z ${pods} ]]; then
|
||||||
--selector "app.kubernetes.io/name=coder,app.kubernetes.io/part-of=coder" \
|
log "Could not find coder pods!"
|
||||||
--output jsonpath='{.items[0].metadata.name}')
|
return
|
||||||
|
fi
|
||||||
|
pod=$(cut -d ' ' -f 1 <<<"${pods}")
|
||||||
if [[ -z ${pod} ]]; then
|
if [[ -z ${pod} ]]; then
|
||||||
log "Could not find coder pod!"
|
log "Could not find coder pod!"
|
||||||
exit 1
|
return
|
||||||
fi
|
fi
|
||||||
log "Fetching full Coder binary from ${pod}"
|
log "Fetching full Coder binary from ${pod}"
|
||||||
# We need --retries due to https://github.com/kubernetes/kubernetes/issues/60140 :(
|
# We need --retries due to https://github.com/kubernetes/kubernetes/issues/60140 :(
|
||||||
maybedryrun "${DRY_RUN}" kubectl \
|
maybedryrun "${DRY_RUN}" kubectl \
|
||||||
--namespace "${namespace}" \
|
--namespace "${ns}" \
|
||||||
cp \
|
cp \
|
||||||
--container coder \
|
--container coder \
|
||||||
--retries 10 \
|
--retries 10 \
|
||||||
@ -281,3 +294,14 @@ fetch_coder_full() {
|
|||||||
maybedryrun "${DRY_RUN}" chmod +x "${SCALETEST_CODER_BINARY}"
|
maybedryrun "${DRY_RUN}" chmod +x "${SCALETEST_CODER_BINARY}"
|
||||||
log "Full Coder binary downloaded to ${SCALETEST_CODER_BINARY}"
|
log "Full Coder binary downloaded to ${SCALETEST_CODER_BINARY}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# set_pod_status_annotation annotates the currently running pod with the key
|
||||||
|
# com.coder.scaletest.status. It will overwrite the previous status.
|
||||||
|
set_pod_status_annotation() {
|
||||||
|
if [[ $# -ne 1 ]]; then
|
||||||
|
log "must specify an annotation value"
|
||||||
|
return
|
||||||
|
else
|
||||||
|
maybedryrun "${DRY_RUN}" kubectl --namespace "$(namespace)" annotate pod "$(hostname)" "com.coder.scaletest.status=$1" --overwrite
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
@ -17,6 +17,7 @@ coder exp scaletest create-workspaces \
|
|||||||
--count "${SCALETEST_PARAM_NUM_WORKSPACES}" \
|
--count "${SCALETEST_PARAM_NUM_WORKSPACES}" \
|
||||||
--template "${SCALETEST_PARAM_TEMPLATE}" \
|
--template "${SCALETEST_PARAM_TEMPLATE}" \
|
||||||
--concurrency "${SCALETEST_PARAM_CREATE_CONCURRENCY}" \
|
--concurrency "${SCALETEST_PARAM_CREATE_CONCURRENCY}" \
|
||||||
|
--timeout 5h \
|
||||||
--job-timeout 5h \
|
--job-timeout 5h \
|
||||||
--no-cleanup \
|
--no-cleanup \
|
||||||
--output json:"${SCALETEST_RESULTS_DIR}/create-workspaces.json"
|
--output json:"${SCALETEST_RESULTS_DIR}/create-workspaces.json"
|
||||||
|
Reference in New Issue
Block a user