From 7ec0384b1f6a704fab2b490c127a6f1b6320bbf7 Mon Sep 17 00:00:00 2001 From: Jaye Doepke Date: Mon, 15 Jul 2024 17:30:46 -0500 Subject: [PATCH] Use Grafana datasources from core-cluster-jsonnet for local dev This commit removes the need for the build-harness-extensions-private repo. Instead, the grafana-operator manifests will be fetched from core-cluster-jsonnet and patched by `yq` to perform local dashboard development. Additionally, if you are developing dashboards in core-cluster-jsonnet, it won't checkout a second core-cluster-jsonnet repo to get the grafana-operator manifests. This opens up options for testing changes to our datasources locally. --- modules/grafana/Makefile | 52 ++++++++++++++----- modules/grafana/env.list | 9 ++++ .../grafana/scripts/datasource_credentials.sh | 15 ++++++ modules/grafana/scripts/patch_datasources.sh | 39 ++++++++++++++ 4 files changed, 103 insertions(+), 12 deletions(-) create mode 100644 modules/grafana/env.list create mode 100644 modules/grafana/scripts/datasource_credentials.sh create mode 100755 modules/grafana/scripts/patch_datasources.sh diff --git a/modules/grafana/Makefile b/modules/grafana/Makefile index 90773f3ea5..175bcc1253 100644 --- a/modules/grafana/Makefile +++ b/modules/grafana/Makefile @@ -1,7 +1,7 @@ # You might want to set these when you run make GRAFANA_ADMIN_PASSWORD ?= admin LOCAL_DASHBOARD_DIRECTORY ?= $(shell ${BUILD_HARNESS_EXTENSIONS_PATH}/modules/grafana/scripts/local_dashboard_directory_prompt.sh) -BUILD_HARNESS_EXTENSIONS_PRIVATE_BRANCH ?= main +CORE_CLUSTER_JSONNET_BRANCH ?= main CREATE_GRAFANA_INSTANCE ?= true GRAFANA_IMAGE ?= grafana/grafana:9.3.6 GRAFANA_PLUGINS ?= natel-discrete-panel,grafana-piechart-panel,grafana-athena-datasource,marcusolsson-json-datasource @@ -12,9 +12,14 @@ GRAFANA_SYNC_DOCKER_NAME = grafana_sync # Don't change these, unless you understand how they're used in the scripts below and accept the repurcussions CONTAINER_DASHBOARD_DIRECTORY = /app/dashboards -TMP_GITLAB_REPO_DIRECTORY = /tmp/build-harness-extensions-private +ifeq ($(shell git remote get-url origin),git@gitlab.com:mintel/satoshi/kubernetes/jsonnet/sre/core-cluster-jsonnet.git) +TMP_CORE_CLUSTER_JSONNET = . +else +TMP_CORE_CLUSTER_JSONNET = /tmp/core-cluster-jsonnet +endif +TMP_GRAFANA_DATASOURCES = /tmp/grafana-datasources.yaml -.PHONY: grafana/develop grafana/develop-oss grafana/cleanup grafana/aws-profile-check grafana/private grafana/setup-local-grafana-mintel grafana/setup-local-grafana-oss grafana/setup-grafana-syncer +.PHONY: grafana/develop grafana/develop-oss grafana/cleanup grafana/aws-profile-check grafana/core-cluster-jsonnet grafana/setup-local-grafana-mintel grafana/setup-local-grafana-oss grafana/setup-grafana-syncer ifeq (${CREATE_GRAFANA_INSTANCE}, true) ## Develop grafana dashboards using live datasources. Mintel internal use only. @@ -33,22 +38,45 @@ grafana/cleanup: @docker kill ${GRAFANA_LOCAL_DOCKER_NAME} || true @echo "Killing ${GRAFANA_SYNC_DOCKER_NAME}..." @docker kill ${GRAFANA_SYNC_DOCKER_NAME} || true - @echo "Removing ${TMP_GITLAB_REPO_DIRECTORY}..." - @rm -rf ${TMP_GITLAB_REPO_DIRECTORY} + @echo "Removing ${TMP_GRAFANA_DATASOURCES}..." + @rm -rf ${TMP_GRAFANA_DATASOURCES} @echo "Cleanup successful." grafana/aws-profile-check: @[ "${AWS_PROFILE}" ] || ( echo ">> ERROR: AWS_PROFILE is not set. Please login with \"aws sso login --profile \" and set this variable, or try \"make grafana/develop-oss\" to edit dashboards with no datasources defined."; exit 1 ) @echo "AWS_PROFILE=${AWS_PROFILE}" -grafana/private: - @git clone git@gitlab.com:mintel/satoshi/tools/build-harness-extensions-private.git -b ${BUILD_HARNESS_EXTENSIONS_PRIVATE_BRANCH} ${TMP_GITLAB_REPO_DIRECTORY} +grafana/core-cluster-jsonnet: + @if [ ! -d "$(TMP_CORE_CLUSTER_JSONNET)" ]; then \ + git clone git@gitlab.com:mintel/satoshi/kubernetes/jsonnet/sre/core-cluster-jsonnet.git --depth 1 -b ${CORE_CLUSTER_JSONNET_BRANCH} ${TMP_CORE_CLUSTER_JSONNET}; \ + fi +ifeq ($(TMP_CORE_CLUSTER_JSONNET),.) + # You are developing in core-cluster-jsonnet; using local grafana-operator manifests to set up Grafana datasources. +else + @git -C "$(TMP_CORE_CLUSTER_JSONNET)" pull +endif -grafana/setup-local-grafana-mintel: grafana/aws-profile-check grafana/private +grafana/setup-local-grafana-mintel: grafana/aws-profile-check grafana/core-cluster-jsonnet @docker pull $(GRAFANA_IMAGE) - @. ${TMP_GITLAB_REPO_DIRECTORY}/modules/grafana/datasource_credentials.sh && \ - MINTEL_BASE_URL='$(MINTEL_BASE_URL)' \ - docker run --rm -d -p 3000:3000 --user $(id):$(id) -v ${TMP_GITLAB_REPO_DIRECTORY}/modules/grafana/provisioning:/etc/grafana/provisioning -v ${HOME}/.aws:/usr/share/grafana/.aws --env-file ${TMP_GITLAB_REPO_DIRECTORY}/modules/grafana/env.list -e GF_AUTH_ANONYMOUS_ORG_ROLE=Admin -e GF_AUTH_ANONYMOUS_ENABLED=true -e GF_INSTALL_PLUGINS=${GRAFANA_PLUGINS} -e GF_FEATURE_TOGGLES_ENABLE=traceqlEditor -e AWS_PROFILE=${AWS_PROFILE} -e AWS_SDK_LOAD_CONFIG=true -e AWS_EC2_METADATA_DISABLED=1 --name ${GRAFANA_LOCAL_DOCKER_NAME} $(GRAFANA_IMAGE) + @mkdir -p ${TMP_CORE_CLUSTER_JSONNET}/provisioning/datasources + @${BUILD_HARNESS_EXTENSIONS_PATH}/modules/grafana/scripts/patch_datasources.sh "${TMP_CORE_CLUSTER_JSONNET}" > ${TMP_GRAFANA_DATASOURCES} + @. ${BUILD_HARNESS_EXTENSIONS_PATH}/modules/grafana/scripts/datasource_credentials.sh && \ + MINTEL_BASE_URL='$(MINTEL_BASE_URL)' \ + docker run \ + --rm -d -p 3000:3000 --user $(id):$(id) \ + -v ${TMP_GRAFANA_DATASOURCES}:/etc/grafana/provisioning/datasources/automatic.yml \ + -v ${HOME}/.aws:/usr/share/grafana/.aws \ + --env-file ${BUILD_HARNESS_EXTENSIONS_PATH}/modules/grafana/env.list \ + -e GF_AUTH_ANONYMOUS_ORG_ROLE=Admin \ + -e GF_AUTH_ANONYMOUS_ENABLED=true \ + -e GF_INSTALL_PLUGINS=${GRAFANA_PLUGINS} \ + -e GF_FEATURE_TOGGLES_ENABLE=traceqlEditor \ + -e AWS_PROFILE \ + -e AWS_SDK_LOAD_CONFIG=true \ + -e AWS_EC2_METADATA_DISABLED=1 \ + --name ${GRAFANA_LOCAL_DOCKER_NAME} \ + $(GRAFANA_IMAGE) + grafana/setup-local-grafana-oss: @docker pull $(GRAFANA_IMAGE) @docker run --rm -d -p 3000:3000 --name ${GRAFANA_LOCAL_DOCKER_NAME} $(GRAFANA_IMAGE) @@ -58,7 +86,7 @@ ifeq (${CREATE_GRAFANA_INSTANCE}, true) # Give the grafana instance time to start up before changing the admin password in order to avoid errors @echo "Starting grafana on localhost:3000 ..." # Need to sleep for a bit and wait for plugins to be installed... - @sleep 6 + @while ! curl -s localhost:3000/api/health > /dev/null; do sleep 0.25; done @docker exec -it ${GRAFANA_LOCAL_DOCKER_NAME} grafana-cli --homepath "/usr/share/grafana" admin reset-admin-password ${GRAFANA_ADMIN_PASSWORD} endif @docker pull mintel/grafana-local-sync:latest diff --git a/modules/grafana/env.list b/modules/grafana/env.list new file mode 100644 index 0000000000..99d14ba0b2 --- /dev/null +++ b/modules/grafana/env.list @@ -0,0 +1,9 @@ +# set of environment variables passed into docker for initializing grafana datasources +# +MINTEL_BASE_URL +MIMIR_GATEWAY_AUTH_USER +MIMIR_GATEWAY_AUTH_PASS +LOKI_GATEWAY_AUTH_USER +LOKI_GATEWAY_AUTH_PASS +TEMPO_GATEWAY_AUTH_USER +TEMPO_GATEWAY_AUTH_PASS diff --git a/modules/grafana/scripts/datasource_credentials.sh b/modules/grafana/scripts/datasource_credentials.sh new file mode 100644 index 0000000000..643467acc0 --- /dev/null +++ b/modules/grafana/scripts/datasource_credentials.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# +# Set the credentials for monitoring related datasources as environment variables to be used in provisioning/datasources/automatic.yml +for s in $(aws secretsmanager get-secret-value --secret-id sre/monitoring/loki-read-ingress-auth --query SecretString --output text --region us-east-2 | jq -r "to_entries|map(\"\(.key)=\(.value|tostring)\")|.[]"); do + export "${s?}" +done +echo "Successfully loaded loki credentials." +for s in $(aws secretsmanager get-secret-value --secret-id sre/monitoring/tempo-read-ingress-auth --query SecretString --output text --region us-east-2 | jq -r "to_entries|map(\"\(.key)=\(.value|tostring)\")|.[]"); do + export "${s?}" +done +echo "Successfully loaded tempo credentials." +for s in $(aws secretsmanager get-secret-value --secret-id sre/monitoring/mimir-read-ingress-auth --query SecretString --output text --region us-east-2 | jq -r "to_entries|map(\"\(.key)=\(.value|tostring)\")|.[]"); do + export "${s?}" +done +echo "Successfully loaded mimir credentials." diff --git a/modules/grafana/scripts/patch_datasources.sh b/modules/grafana/scripts/patch_datasources.sh new file mode 100755 index 0000000000..fffc489c90 --- /dev/null +++ b/modules/grafana/scripts/patch_datasources.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +# Given a path to a git checkout of core-cluster-jsonnet, this script will read the manifests for the GrafanaDataSource +# resources of the us-monitoring1 cluster, merge them, and patch them for use in a locally-running Grafana container. +# The result is output on stdout. + +TMP_MONITORING_CLUSTER_JSONNET="$1" + +# shellcheck disable=SC2016 +yq eval-all '. as $manifest ireduce ([]; . + $manifest.spec.datasources) | {"apiVersion": 1, "datasources": .}' "$TMP_MONITORING_CLUSTER_JSONNET"/rendered/environments/grafana-operator/aws.logs/manifests/integreatly.org-v1alpha1.GrafanaDataSource-*.yaml | +yq eval '.datasources[] |= ( + .editable = true | + .jsonData = .customJsonData | del(.customJsonData) | + with(select(.type == "loki"); + .url = "https://gateway.loki.${MINTEL_BASE_URL}" | + .basicAuth = true | + .basicAuthUser = "${LOKI_GATEWAY_AUTH_USER}" | + .secureJsonData.basicAuthPassword = "${LOKI_GATEWAY_AUTH_PASS}" + ) | + with(select(.type == "tempo"); + .url = "https://gateway.tempo.${MINTEL_BASE_URL}" | + .basicAuth = true | + .basicAuthUser = "${TEMPO_GATEWAY_AUTH_USER}" | + .secureJsonData.basicAuthPassword = "${TEMPO_GATEWAY_AUTH_PASS}" + ) | + with(select(.type == "prometheus"); + .url = "https://gateway.mimir.${MINTEL_BASE_URL}/prometheus" | + .basicAuth = true | + .basicAuthUser = "${MIMIR_GATEWAY_AUTH_USER}" | + .secureJsonData.basicAuthPassword = "${MIMIR_GATEWAY_AUTH_PASS}" + ) | + with(select(.type == "grafana-athena-datasource"); + . + ) | + with(select(.type == "cloudwatch"); + .jsonData.defaultRegion = "us-east-2" + ) | + del(..|select(. == null)) +)'