chore: scaletest: collect database metrics using prometheus-postgres-exporter (#7945)

Co-authored-by: Mathias Fredriksson <mafredri@gmail.com>
This commit is contained in:
Cian Johnston
2023-06-09 14:21:08 -07:00
committed by GitHub
parent 175561bf36
commit 2bbe650eb0
4 changed files with 66 additions and 14 deletions

View File

@ -84,11 +84,10 @@ fi
if [[ -z "${SCALETEST_PROMETHEUS_REMOTE_WRITE_USER}" ]] || [[ -z "${SCALETEST_PROMETHEUS_REMOTE_WRITE_PASSWORD}" ]]; then
echo "SCALETEST_PROMETHEUS_REMOTE_WRITE_USER or SCALETEST_PROMETHEUS_REMOTE_WRITE_PASSWORD not specified."
echo "No prometheus metrics will be collected!"
read -pr "Continue (y/N)? " choice
case "$choice" in
y | Y | yes | YES) ;;
*) exit 1 ;;
esac
read -p "Continue (y/N)? " -n1 -r
if [[ "${REPLY}" != [yY] ]]; then
exit 1
fi
fi
SCALETEST_SCENARIO_VARS="${PROJECT_ROOT}/scaletest/terraform/scenario-${SCALETEST_SCENARIO}.tfvars"

View File

@ -34,11 +34,11 @@ resource "kubernetes_namespace" "coder_namespace" {
]
}
resource "random_password" "postgres-admin-password" {
resource "random_password" "coder-postgres-password" {
length = 12
}
resource "random_password" "coder-postgres-password" {
resource "random_password" "prometheus-postgres-password" {
length = 12
}

View File

@ -51,3 +51,13 @@ resource "google_sql_user" "coder" {
# required for postgres, otherwise user fails to delete
deletion_policy = "ABANDON"
}
resource "google_sql_user" "prometheus" {
project = var.project_id
instance = google_sql_database_instance.db.id
name = "${var.name}-prometheus"
type = "BUILT_IN"
password = random_password.prometheus-postgres-password.result
# required for postgres, otherwise user fails to delete
deletion_policy = "ABANDON"
}

View File

@ -1,10 +1,12 @@
locals {
prometheus_helm_repo = "https://charts.bitnami.com/bitnami"
prometheus_helm_chart = "kube-prometheus"
prometheus_helm_version = null // just use latest
prometheus_release_name = "prometheus"
prometheus_namespace = "prometheus"
prometheus_remote_write_enabled = var.prometheus_remote_write_password != ""
prometheus_helm_repo = "https://charts.bitnami.com/bitnami"
prometheus_helm_chart = "kube-prometheus"
prometheus_exporter_helm_repo = "https://prometheus-community.github.io/helm-charts"
prometheus_exporter_helm_chart = "prometheus-postgres-exporter"
prometheus_release_name = "prometheus"
prometheus_exporter_release_name = "prometheus-postgres-exporter"
prometheus_namespace = "prometheus"
prometheus_remote_write_enabled = var.prometheus_remote_write_password != ""
}
# Create a namespace to hold our Prometheus deployment.
@ -37,7 +39,6 @@ resource "helm_release" "prometheus-chart" {
repository = local.prometheus_helm_repo
chart = local.prometheus_helm_chart
name = local.prometheus_release_name
version = local.prometheus_helm_version
namespace = kubernetes_namespace.prometheus_namespace.metadata.0.name
values = [<<EOF
alertmanager:
@ -97,6 +98,48 @@ prometheus:
]
}
resource "kubernetes_secret" "prometheus-postgres-password" {
type = "kubernetes.io/basic-auth"
metadata {
name = "prometheus-postgres"
namespace = kubernetes_namespace.prometheus_namespace.metadata.0.name
}
data = {
username = google_sql_user.prometheus.name
password = google_sql_user.prometheus.password
}
}
# Install Prometheus Postgres exporter helm chart
resource "helm_release" "prometheus-exporter-chart" {
repository = local.prometheus_exporter_helm_repo
chart = local.prometheus_exporter_helm_chart
name = local.prometheus_exporter_release_name
namespace = local.prometheus_namespace
values = [<<EOF
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "cloud.google.com/gke-nodepool"
operator: "In"
values: ["${google_container_node_pool.misc.name}"]
config:
datasource:
host: "${google_sql_database_instance.db.private_ip_address}"
user: "${google_sql_user.prometheus.name}"
database: "${google_sql_database.coder.name}"
passwordSecret:
name: "${kubernetes_secret.prometheus-postgres-password.metadata.0.name}"
key: password
autoDiscoverDatabases: true
serviceMonitor:
enabled: true
EOF
]
}
# NOTE: this is created as a local file before being applied
# as the kubernetes_manifest resource needs to be run separately
# after creating a cluster, and we want this to be brought up