diff --git a/.github/workflows/.dbdeployer.yml b/.github/workflows/.dbdeployer.yml new file mode 100644 index 000000000..863861149 --- /dev/null +++ b/.github/workflows/.dbdeployer.yml @@ -0,0 +1,166 @@ +name: .Crunchy Deploy + +on: + workflow_call: + inputs: ### Required + directory: + description: Crunchy Chart directory + default: 'charts/crunchy' + required: false + type: string + oc_server: + default: https://api.silver.devops.gov.bc.ca:6443 + description: 'OpenShift server' + required: false + type: string + environment: + description: Environment name; omit for PRs + required: false + type: string + s3_enabled: + description: Enable S3 backups + required: false + default: true + type: boolean + values: + description: 'Values file' + default: 'values.yaml' + required: false + type: string + app_values: + description: 'App specific values file which is present inside charts/app' + default: 'values.yaml' + required: false + type: string + enabled: + description: 'Enable the deployment of the crunchy database, easy switch to turn it on/off' + default: true + required: false + type: boolean + timeout-minutes: + description: 'Timeout minutes' + default: 20 + required: false + type: number + triggers: + description: Paths used to trigger a database deployment + required: false + type: string + secrets: + oc_namespace: + description: OpenShift namespace + required: true + oc_token: + description: OpenShift token + required: true + s3_access_key: + description: S3 access key + required: false + s3_secret_key: + description: S3 secret key + required: false +jobs: + deploy_db: + timeout-minutes: ${{ inputs.timeout-minutes }} + runs-on: ubuntu-24.04 + if: ${{ inputs.enabled }} + name: Deploy Or Upgrade Crunchy DB + environment: ${{ inputs.environment }} + steps: + - uses: actions/checkout@v4 + - name: Install CLI tools from OpenShift Mirror + uses: redhat-actions/openshift-tools-installer@v1 + with: + oc: "4.14.37" + - uses: bcgov-nr/action-diff-triggers@v0.2.0 + id: triggers + with: + triggers: ${{ inputs.triggers }} + - name: Validate Inputs + if: steps.triggers.outputs.triggered == 'true' + shell: bash + run: | + if [ ${{ inputs.s3_enabled }} == true ]; then + echo "S3 ie enabled for backups, checking for mandatory secrets" + if [ ! "${{ secrets.s3_access_key }}" ]; then + echo "S3 access key not found" + exit 1 + fi + if [ ! "${{ secrets.s3_secret_key }}" ]; then + echo "S3 secret key not found" + exit 1 + fi + fi + + - name: OC Login + shell: bash + run: | + # OC Login + OC_TEMP_TOKEN=$(curl -k -X POST ${{ inputs.oc_server }}/api/v1/namespaces/${{ secrets.oc_namespace }}/serviceaccounts/pipeline/token --header "Authorization: Bearer ${{ secrets.oc_token }}" -d '{"spec": {"expirationSeconds": 600}}' -H 'Content-Type: application/json; charset=utf-8' | jq -r '.status.token' ) + + oc login --token=$OC_TEMP_TOKEN --server=${{ inputs.oc_server }} + oc project ${{ secrets.oc_namespace }} # Safeguard! + + - name: Deploy Database + if: steps.triggers.outputs.triggered == 'true' + working-directory: ${{ inputs.directory }} + shell: bash + run: | + echo 'Deploying crunchy helm chart' + if [ ${{ inputs.s3_enabled }} == true ]; then + helm upgrade --install --wait --set crunchy.pgBackRest.s3.enabled=true \ + --set-string crunchy.pgBackRest.s3.accessKey=${{ secrets.s3_access_key }} \ + --set-string crunchy.pgBackRest.s3.secretKey=${{ secrets.s3_secret_key }} \ + --values ${{ inputs.values }} postgres . + else + helm upgrade --install --wait --values ${{ inputs.values }} postgres . + fi + + - name: Add PR specific user to Crunchy DB # only for PRs + shell: bash + if: github.event_name == 'pull_request' + run: | + echo 'Adding PR specific user to Crunchy DB' + NEW_USER='{"databases":["app-${{ github.event.number }}"],"name":"app-${{ github.event.number }}"}' + CURRENT_USERS=$(oc get PostgresCluster/postgres-crunchy -o json | jq '.spec.users') + echo "${CURRENT_USERS}" + + # check if current_users already contains the new_user + if echo "${CURRENT_USERS}" | jq -e ".[] | select(.name == \"app-${{ github.event.number }}\")" > /dev/null; then + echo "User already exists" + exit 0 + fi + + UPDATED_USERS=$(echo "$CURRENT_USERS" | jq --argjson NEW_USER "$NEW_USER" '. + [$NEW_USER]') + echo "$UPDATED_USERS" + PATCH_JSON=$(jq -n --argjson users "$UPDATED_USERS" '{"spec": {"users": $users}}') + echo "$PATCH_JSON" + oc patch PostgresCluster/postgres-crunchy --type=merge -p "${PATCH_JSON}" + + # wait for sometime as it takes time to create the user, query the secret and check if it is created, otherwise wait in a loop for 10 rounds + for i in {1..10}; do + if oc get secret postgres-crunchy-pguser-app-${{ github.event.number }} -o jsonpath='{.metadata.name}' > /dev/null; then + echo "Secret created" + break + else + echo "Secret not created, waiting for 60 seconds" + sleep 60 + fi + done + + # Add public schema and grant to PR user + # get primary crunchy pod and remove the role and db + CRUNCHY_PG_PRIMARY_POD_NAME=$(oc get pods -l postgres-operator.crunchydata.com/role=master -o json | jq -r '.items[0].metadata.name') + echo "${CRUNCHY_PG_PRIMARY_POD_NAME}" + oc exec "${CRUNCHY_PG_PRIMARY_POD_NAME}" -- psql -d "app-${{ github.event.number }}" -c "CREATE SCHEMA IF NOT EXISTS public;" + oc exec "${CRUNCHY_PG_PRIMARY_POD_NAME}" -- psql -d "app-${{ github.event.number }}" -c "GRANT ALL PRIVILEGES ON SCHEMA public TO \"app-${{ github.event.number }}\";" + oc exec "${CRUNCHY_PG_PRIMARY_POD_NAME}" -- psql -d "app-${{ github.event.number }}" -c "GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO \"app-${{ github.event.number }}\";" + oc exec "${CRUNCHY_PG_PRIMARY_POD_NAME}" -- psql -d "app-${{ github.event.number }}" -c "GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO \"app-${{ github.event.number }}\";" + oc exec "${CRUNCHY_PG_PRIMARY_POD_NAME}" -- psql -d "app-${{ github.event.number }}" -c "GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA public TO \"app-${{ github.event.number }}\";" + oc exec "${CRUNCHY_PG_PRIMARY_POD_NAME}" -- psql -d "app-${{ github.event.number }}" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL PRIVILEGES ON TABLES TO \"app-${{ github.event.number }}\";" + oc exec "${CRUNCHY_PG_PRIMARY_POD_NAME}" -- psql -d "app-${{ github.event.number }}" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL PRIVILEGES ON SEQUENCES TO \"app-${{ github.event.number }}\";" + oc exec "${CRUNCHY_PG_PRIMARY_POD_NAME}" -- psql -d "app-${{ github.event.number }}" -c "ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL PRIVILEGES ON FUNCTIONS TO \"app-${{ github.event.number }}\";" + # TODO: remove these + + + diff --git a/.github/workflows/pr-close.yml b/.github/workflows/pr-close.yml index a0b95c61c..f7ff62e4e 100644 --- a/.github/workflows/pr-close.yml +++ b/.github/workflows/pr-close.yml @@ -42,3 +42,50 @@ jobs: OC_SERVER: ${{ vars.OC_SERVER }} OC_TOKEN: ${{ secrets.OC_TOKEN }} PR_NUMBER: ${{ github.event.number }} + + cleanup-crunchy: + name: Cleanup Ephemeral Crunchy Data + runs-on: ubuntu-24.04 + timeout-minutes: 10 + steps: + # OC setup + - uses: redhat-actions/openshift-tools-installer@v1 + with: + oc: "4" + + # OC Login + - run: | + # OC Login + oc login --token=${{ secrets.OC_TOKEN }} --server=${{ vars.OC_SERVER }} + oc project ${{ secrets.OC_NAMESPACE }} # Safeguard! + - run: | + # check if postgres-crunchy exists or else exit + oc get PostgresCluster/postgres-crunchy || exit 0 + # Remove the user from the crunchy cluster yaml and apply the changes + USER_TO_REMOVE='{"databases":["app-${{ github.event.number }}"],"name":"app-${{ github.event.number }}"}' + + echo 'getting current users from crunchy' + CURRENT_USERS=$(oc get PostgresCluster/postgres-crunchy -o json | jq '.spec.users') + echo "${CURRENT_USERS}" + + # Remove the user from the list, + UPDATED_USERS=$(echo "$CURRENT_USERS" | jq --argjson user "$USER_TO_REMOVE" 'map(select(. != $user))') + + PATCH_JSON=$(jq -n --argjson users "$UPDATED_USERS" '{"spec": {"users": $users}}') + oc patch PostgresCluster/postgres-crunchy --type=merge -p "$PATCH_JSON" + + # get primary crunchy pod and remove the role and db + CRUNCHY_PG_PRIMARY_POD_NAME=$(oc get pods -l postgres-operator.crunchydata.com/role=master -o json | jq -r '.items[0].metadata.name') + + echo "${CRUNCHY_PG_PRIMARY_POD_NAME}" + # Terminate all connections to the database before trying terminate + oc exec "${CRUNCHY_PG_PRIMARY_POD_NAME}" -- psql -c "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pg_stat_activity.datname = 'app-${{ github.event.number }}' AND pid <> pg_backend_pid();" + + # Drop the database and role + oc exec "${CRUNCHY_PG_PRIMARY_POD_NAME}" -- psql -c "DROP DATABASE \"app-${{ github.event.number }}\" --cascade" + + oc exec "${CRUNCHY_PG_PRIMARY_POD_NAME}" -- psql -c "DROP ROLE \"app-${{ github.event.number }}\" --cascade" + + echo "Database and Role for PR is cleaned." + + exit 0 \ No newline at end of file diff --git a/.github/workflows/pr-open.yml b/.github/workflows/pr-open.yml index b773278b6..ac5fa54db 100644 --- a/.github/workflows/pr-open.yml +++ b/.github/workflows/pr-open.yml @@ -32,16 +32,28 @@ jobs: tag_fallback: latest triggers: ('${{ matrix.package }}/') + # https://github.com/bcgov/quickstart-openshift + crunchy: + name: Deploy Crunchy + needs: [builds] + uses: ./.github/workflows/.dbdeployer.yml + with: + s3_enabled: false # no backups in dev + secrets: + oc_namespace: ${{ secrets.OC_NAMESPACE }} + oc_token: ${{ secrets.OC_TOKEN }} + # https://github.com/bcgov/quickstart-openshift-helpers deploys: name: Deploys - needs: [builds] + needs: [builds, crunchy] uses: bcgov/quickstart-openshift-helpers/.github/workflows/.deployer.yml@v0.5.0 secrets: oc_namespace: ${{ secrets.OC_NAMESPACE }} oc_token: ${{ secrets.OC_TOKEN }} with: - triggers: ('backend/' 'frontend/' 'webeoc/' 'migrations/') + params: --set global.useCrunchy=true + healthcheck: name: Healthcheck Deployment diff --git a/backend/Dockerfile b/backend/Dockerfile index a7d9cda9f..1dc5edeb6 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -26,5 +26,5 @@ HEALTHCHECK --interval=30s --timeout=3s CMD curl -f http://localhost/:3000 # Non-privileged user USER app -# max old space the heap size, 120MB with 200MB limit in deployment. -CMD ["--max-old-space-size=120", "/app/dist/main"] +# max old space the heap size, 500MB +CMD ["--max-old-space-size=500", "/app/dist/main"] diff --git a/charts/app/templates/backend/templates/deployment.yaml b/charts/app/templates/backend/templates/deployment.yaml index 0ba5618f8..990af921b 100644 --- a/charts/app/templates/backend/templates/deployment.yaml +++ b/charts/app/templates/backend/templates/deployment.yaml @@ -31,9 +31,11 @@ spec: - name: {{ include "backend.fullname" . }}-init image: "{{ .Values.global.registry }}/{{ .Values.global.repository }}/migrations:{{ .Values.global.tag | default .Chart.AppVersion }}" imagePullPolicy: {{ default "Always" .Values.backend.imagePullPolicy }} + {{- if not .Values.global.useCrunchy }} envFrom: - secretRef: name: {{ .Release.Name }}-flyway + {{- end }} env: - name: NODE_TLS_REJECT_UNAUTHORIZED value: "0" @@ -45,10 +47,17 @@ spec: value: "10" - name: FLYWAY_LOCATIONS value: "{{- if eq .Release.Namespace "c1c7ed-dev" -}}{{ .Values.global.secrets.flywayLocations.dev }}{{- else if eq .Release.Namespace "c1c7ed-test" -}}{{ .Values.global.secrets.flywayLocations.test }}{{- else if eq .Release.Namespace "c1c7ed-prod" -}}{{ .Values.global.secrets.flywayLocations.prod }}{{- else -}}filesystem:./flyway/sql{{- end }}" + {{- if .Values.global.useCrunchy }} + - name: FLYWAY_URL + valueFrom: + secretKeyRef: + name: postgres-crunchy-pguser-app-{{ .Values.global.tag | trimAll "\"" }} + key: pgbouncer-jdbc-uri + {{- end }} resources: requests: - cpu: 50m - memory: 100Mi + cpu: 300m + memory: 750Mi containers: - name: {{ include "backend.fullname" . }} {{- if .Values.backend.securityContext }} @@ -65,6 +74,28 @@ spec: value: info - name: NODE_TLS_REJECT_UNAUTHORIZED value: "0" + {{- if .Values.global.useCrunchy }} + - name: POSTGRESQL_DATABASE + valueFrom: + secretKeyRef: + name: postgres-crunchy-pguser-app-{{ .Values.global.tag | trimAll "\"" }} + key: dbname + - name: POSTGRESQL_HOST + valueFrom: + secretKeyRef: + name: postgres-crunchy-pguser-app-{{ .Values.global.tag | trimAll "\"" }} + key: pgbouncer-host + - name: POSTGRESQL_USER + valueFrom: + secretKeyRef: + name: postgres-crunchy-pguser-app-{{ .Values.global.tag | trimAll "\"" }} + key: user + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + name: postgres-crunchy-pguser-app-{{ .Values.global.tag | trimAll "\"" }} + key: password + {{- end }} ports: - name: http containerPort: {{ .Values.backend.service.targetPort }} diff --git a/charts/app/values.yaml b/charts/app/values.yaml index 07f82aadd..81d95c720 100644 --- a/charts/app/values.yaml +++ b/charts/app/values.yaml @@ -68,6 +68,8 @@ global: domain: "apps.silver.devops.gov.bc.ca" # it is apps.gold.devops.gov.bc.ca for gold cluster #-- the database Alias gives a nice way to switch to different databases, crunchy, patroni ... etc. databaseAlias: bitnami-pg + #-- use crunchy for the database, it is optional + useCrunchy: false #-- the components of the application, backend. backend: diff --git a/charts/crunchy/Chart.yaml b/charts/crunchy/Chart.yaml new file mode 100644 index 000000000..223f5dfef --- /dev/null +++ b/charts/crunchy/Chart.yaml @@ -0,0 +1,26 @@ +apiVersion: v2 +name: crunchy +description: A Helm chart for Kubernetes deployment. +icon: https://www.nicepng.com/png/detail/521-5211827_bc-icon-british-columbia-government-logo.png + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 5.5.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "5.5.1" + diff --git a/charts/crunchy/templates/PostgresCluster.yaml b/charts/crunchy/templates/PostgresCluster.yaml new file mode 100644 index 000000000..2fc074f27 --- /dev/null +++ b/charts/crunchy/templates/PostgresCluster.yaml @@ -0,0 +1,258 @@ +{{- if .Values.crunchy.enabled}} +apiVersion: postgres-operator.crunchydata.com/v1beta1 +kind: PostgresCluster +metadata: + name: {{ template "crunchy-postgres.fullname" . }} + labels: {{ include "crunchy-postgres.labels" . | nindent 4 }} +spec: + metadata: + labels: {{ include "crunchy-postgres.labels" . | nindent 6 }} + {{ if .Values.crunchy.crunchyImage }} + image: {{ .Values.crunchy.crunchyImage }} + {{ end }} + imagePullPolicy: {{.Values.crunchy.imagePullPolicy}} + postgresVersion: {{ .Values.crunchy.postgresVersion }} + {{ if .Values.crunchy.postGISVersion }} + postGISVersion: {{ .Values.crunchy.postGISVersion | quote }} + {{ end }} + postgresVersion: {{ .Values.crunchy.postgresVersion }} + {{- if and .Values.crunchy.clone .Values.crunchy.clone.enabled }} # enabled in disaster recovery scenario + dataSource: + {{- if .Values.crunchy.clone.s3.enabled}} + pgbackrest: + stanza: {{ .Values.crunchy.instances.name }} + configuration: + - secret: + name: {{ .Release.Name }}-s3-secret + global: + repo2-s3-uri-style: path # This is mandatory since the backups are path based. + repo2-path: {{ .Values.crunchy.clone.path }} # path to the backup where cluster will bootstrap from + repo: + name: repo2 # hardcoded since repo 2, it is always backed up to object storage. + s3: + bucket: {{ .Values.crunchy.pgBackRest.s3.bucket }} + endpoint: {{ .Values.crunchy.pgBackRest.s3.endpoint }} + region: "ca-central-1" + {{- end}} + {{- if .Values.crunchy.clone.pvc.enabled}} + postgresCluster: + clusterName: {{ template "crunchy-postgres.fullname" . }} + repoName: repo1 + {{- end}} + {{- end}} + {{- if .Values.crunchy.pgmonitor.enabled }} + monitoring: + pgmonitor: + # this stuff is for the "exporter" container in the "postgres-cluster-ha" set of pods + exporter: + {{ if .Values.crunchy.pgmonitor.exporter.image}} + image: {{ .Values.crunchy.pgmonitor.exporter.image}} + {{ end }} + resources: + requests: + cpu: {{ .Values.crunchy.pgmonitor.exporter.requests.cpu }} + memory: {{ .Values.crunchy.pgmonitor.exporter.requests.memory }} + limits: + cpu: {{ .Values.crunchy.pgmonitor.exporter.limits.cpu }} + memory: {{ .Values.crunchy.pgmonitor.exporter.limits.memory }} + + {{ end }} + + instances: + - name: {{ .Values.crunchy.instances.name }} + {{- if .Values.crunchy.instances.metadata }} + metadata: + {{- toYaml .Values.crunchy.instances.metadata | nindent 8 }} + {{- end }} + replicas: {{ .Values.crunchy.instances.replicas }} + resources: + requests: + cpu: {{ .Values.crunchy.instances.requests.cpu }} + memory: {{ .Values.crunchy.instances.requests.memory }} + limits: + cpu: {{ .Values.crunchy.instances.limits.cpu }} + memory: {{ .Values.crunchy.instances.limits.memory }} + + sidecars: + replicaCertCopy: + resources: + requests: + cpu: {{ .Values.crunchy.instances.replicaCertCopy.requests.cpu }} + memory: {{ .Values.crunchy.instances.replicaCertCopy.requests.memory }} + limits: + cpu: {{ .Values.crunchy.instances.replicaCertCopy.limits.cpu }} + memory: {{ .Values.crunchy.instances.replicaCertCopy.limits.memory }} + dataVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: {{ .Values.crunchy.instances.dataVolumeClaimSpec.storage }} + storageClassName: {{ .Values.crunchy.instances.dataVolumeClaimSpec.storageClassName }} + walVolumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: {{ .Values.crunchy.instances.dataVolumeClaimSpec.walStorage }} + storageClassName: {{ .Values.crunchy.instances.dataVolumeClaimSpec.storageClassName }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: + {{ template "crunchy-postgres.fullname" . }} + postgres-operator.crunchydata.com/instance-set: {{ .Values.crunchy.instances.name }} + + users: + - name: {{ .Values.global.config.dbName }} + databases: + - {{ .Values.global.config.dbName }} + options: "SUPERUSER CREATEDB CREATEROLE" + - name: postgres + databases: + - postgres + - {{ .Values.global.config.dbName }} + - name: {{ .Values.global.config.dbName }}proxy # this user lets dev connect to postgres via pgbouncer from local system + databases: + - {{ .Values.global.config.dbName }} + - postgres + {{- if .Values.crunchy.pgBackRest.enabled }} + backups: + pgbackrest: + {{ if .Values.crunchy.pgBackRest.image }} + image: {{ .Values.crunchy.pgBackRest.image }} + {{ end }} + {{- if .Values.crunchy.pgBackRest.s3.enabled}} + configuration: + - secret: + name: {{ .Release.Name }}-s3-secret + {{- end }} + global: + repo1-retention-full: {{ .Values.crunchy.pgBackRest.pvc.retention | quote }} + repo1-retention-full-type: {{ .Values.crunchy.pgBackRest.pvc.retentionFullType }} + {{- if .Values.crunchy.pgBackRest.s3.enabled}} + repo2-retention-full: {{ .Values.crunchy.pgBackRest.s3.retention | quote }} + repo2-retention-full-type: {{ .Values.crunchy.pgBackRest.retentionFullType }} + repo2-path: '{{ .Values.crunchy.pgBackRest.backupPath }}/{{ .Values.crunchy.pgBackRest.clusterCounter}}' + repo2-s3-uri-style: path + {{- end }} + repos: + + - name: repo1 + schedules: + full: {{ .Values.crunchy.pgBackRest.pvc.fullBackupSchedule }} + incremental: {{ .Values.crunchy.pgBackRest.pvc.incrementalBackupSchedule }} + volume: + volumeClaimSpec: + accessModes: + - {{ .Values.crunchy.pgBackRest.pvc.volume.accessModes }} + resources: + requests: + storage: {{ .Values.crunchy.pgBackRest.pvc.volume.storage }} + storageClassName: {{ .Values.crunchy.pgBackRest.pvc.volume.storageClassName }} + {{- if .Values.crunchy.pgBackRest.s3.enabled}} + - name: repo2 + schedules: + full: {{ .Values.crunchy.pgBackRest.s3.fullBackupSchedule }} + incremental: {{ .Values.crunchy.pgBackRest.s3.incrementalBackupSchedule }} + s3: + bucket: {{ .Values.crunchy.pgBackRest.s3.bucket | quote }} + endpoint: {{ .Values.crunchy.pgBackRest.s3.endpoint | quote }} + region: "ca-central-1" + {{- end }} + {{- if and .Values.crunchy.restore .Values.crunchy.restore.enabled }} + restore: + enabled: {{ .Values.crunchy.restore.enabled }} + repoName: {{ .Values.crunchy.restore.repoName }} + options: + - --type=time + - --target="{{ .Values.crunchy.restore.target }}" + {{- end }} + # this stuff is for the "pgbackrest" container (the only non-init container) in the "postgres-crunchy-repo-host" pod + repoHost: + resources: + requests: + cpu: {{ .Values.crunchy.pgBackRest.repoHost.requests.cpu }} + memory: {{ .Values.crunchy.pgBackRest.repoHost.requests.memory }} + limits: + cpu: {{ .Values.crunchy.pgBackRest.repoHost.limits.cpu }} + memory: {{ .Values.crunchy.pgBackRest.repoHost.limits.memory }} + sidecars: + # this stuff is for the "pgbackrest" container in the "postgres-crunchy-ha" set of pods + pgbackrest: + resources: + requests: + cpu: {{ .Values.crunchy.pgBackRest.sidecars.requests.cpu }} + memory: {{ .Values.crunchy.pgBackRest.sidecars.requests.memory }} + limits: + cpu: {{ .Values.crunchy.pgBackRest.sidecars.limits.cpu }} + memory: {{ .Values.crunchy.pgBackRest.sidecars.limits.memory }} + pgbackrestConfig: + resources: + requests: + cpu: {{ .Values.crunchy.pgBackRest.sidecars.requests.cpu }} + memory: {{ .Values.crunchy.pgBackRest.sidecars.requests.memory }} + limits: + cpu: {{ .Values.crunchy.pgBackRest.sidecars.limits.cpu }} + memory: {{ .Values.crunchy.pgBackRest.sidecars.limits.memory }} + jobs: + resources: + requests: + cpu: {{ .Values.crunchy.pgBackRest.jobs.requests.cpu }} + memory: {{ .Values.crunchy.pgBackRest.jobs.requests.memory }} + limits: + cpu: {{ .Values.crunchy.pgBackRest.jobs.limits.cpu }} + memory: {{ .Values.crunchy.pgBackRest.jobs.limits.memory }} + {{- end }} + patroni: + dynamicConfiguration: + postgresql: + pg_hba: {{ toYaml .Values.crunchy.patroni.postgresql.pg_hba | nindent 10 }} + parameters: + log_min_duration_statement: {{ .Values.crunchy.patroni.postgresql.parameters.log_min_duration_statement }} + shared_buffers: {{ .Values.crunchy.patroni.postgresql.parameters.shared_buffers }} + wal_buffers: {{ .Values.crunchy.patroni.postgresql.parameters.wal_buffers }} + work_mem: {{ .Values.crunchy.patroni.postgresql.parameters.work_mem }} + min_wal_size: {{ .Values.crunchy.patroni.postgresql.parameters.min_wal_size }} + max_wal_size: {{ .Values.crunchy.patroni.postgresql.parameters.max_wal_size }} + max_slot_wal_keep_size: {{ .Values.crunchy.patroni.postgresql.parameters.max_slot_wal_keep_size }} + effective_io_concurrency: {{ .Values.crunchy.patroni.postgresql.parameters.effective_io_concurrency }} + {{- if and .Values.crunchy.proxy .Values.crunchy.proxy.enabled }} + proxy: + pgBouncer: + config: + global: + client_tls_sslmode: disable + pool_mode: session + max_db_connections: {{ .Values.crunchy.proxy.pgBouncer.maxConnections | quote }} + {{ if .Values.crunchy.proxy.pgBouncer.image }} + image: {{ .Values.crunchy.proxy.pgBouncer.image }} + {{ end }} + replicas: {{ .Values.crunchy.proxy.pgBouncer.replicas }} + # these resources are for the "pgbouncer" container in the "postgres-crunchy-ha-pgbouncer" set of pods + # there is a sidecar in these pods which are not mentioned here, but the requests/limits are teeny weeny by default so no worries there. + resources: + requests: + cpu: {{ .Values.crunchy.proxy.pgBouncer.requests.cpu }} + memory: {{ .Values.crunchy.proxy.pgBouncer.requests.memory }} + limits: + cpu: {{ .Values.crunchy.proxy.pgBouncer.limits.cpu }} + memory: {{ .Values.crunchy.proxy.pgBouncer.limits.memory }} + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 1 + podAffinityTerm: + topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: + {{ template "crunchy-postgres.fullname" . }} + postgres-operator.crunchydata.com/role: pgbouncer + {{- end }} + {{- end }} diff --git a/charts/crunchy/templates/_helpers.tpl b/charts/crunchy/templates/_helpers.tpl new file mode 100644 index 000000000..d6e89c6cc --- /dev/null +++ b/charts/crunchy/templates/_helpers.tpl @@ -0,0 +1,70 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "crunchy-postgres.name" -}} +{{- default "crunchy" .Values.crunchy.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "crunchy-postgres.fullname" -}} +{{- if .Values.crunchy.fullnameOverride }} +{{- .Values.crunchy.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default "crunchy" .Values.crunchy.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "crunchy-postgres.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "crunchy-postgres.labels" -}} +helm.sh/chart: {{ include "crunchy-postgres.chart" . }} +{{ include "crunchy-postgres.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "crunchy-postgres.selectorLabels" -}} +app.kubernetes.io/name: {{ include "crunchy-postgres.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "crunchy-postgres.serviceAccountName" -}} +{{- if .Values.crunchy.serviceAccount.create }} +{{- default (include "crunchy-postgres.fullname" .) .Values.crunchy.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.crunchy.serviceAccount.name }} +{{- end }} +{{- end }} + +{{- define "crunchy.s3" }} +{{- if .Values.crunchy.pgBackRest.s3.enabled}} +[global] +repo2-s3-key={{ .Values.crunchy.pgBackRest.s3.accessKey }} +repo2-s3-key-secret={{ .Values.crunchy.pgBackRest.s3.secretKey }} +{{ end }} +{{ end }} diff --git a/charts/crunchy/templates/cm.yaml b/charts/crunchy/templates/cm.yaml new file mode 100644 index 000000000..f765947ce --- /dev/null +++ b/charts/crunchy/templates/cm.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "crunchy-postgres.fullname" . }} + labels: {{ include "crunchy-postgres.labels" . | nindent 4 }} +data: + bootstrap.sql: | + CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA public; \ No newline at end of file diff --git a/charts/crunchy/templates/knp.yaml b/charts/crunchy/templates/knp.yaml new file mode 100644 index 000000000..9624fdf74 --- /dev/null +++ b/charts/crunchy/templates/knp.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ .Release.Name }} + labels: {{- include "crunchy-postgres.selectorLabels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: {{ template "crunchy-postgres.fullname" . }} + ingress: + - from: + - podSelector: + matchLabels: + postgres-operator.crunchydata.com/cluster: {{ template "crunchy-postgres.fullname" . }} + policyTypes: + - Ingress diff --git a/charts/crunchy/templates/secret.yaml b/charts/crunchy/templates/secret.yaml new file mode 100644 index 000000000..3d05f9eb0 --- /dev/null +++ b/charts/crunchy/templates/secret.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.crunchy.enabled .Values.crunchy.pgBackRest.s3.enabled}} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{.Release.Name}}-s3-secret +type: Opaque +stringData: + s3.conf: |- + {{ include "crunchy.s3" . | nindent 8}} +{{- end }} diff --git a/charts/crunchy/values.yaml b/charts/crunchy/values.yaml new file mode 100644 index 000000000..70d9adede --- /dev/null +++ b/charts/crunchy/values.yaml @@ -0,0 +1,147 @@ +global: + config: + dbName: app #test +crunchy: # enable it for TEST and PROD, for PR based pipelines simply use single postgres + enabled: true + crunchyImage: artifacts.developer.gov.bc.ca/bcgov-docker-local/crunchy-postgres-gis:ubi8-16.2-3.4-0 + postgresVersion: 16 + postGISVersion: '3.4' + imagePullPolicy: IfNotPresent + # enable below to start a new crunchy cluster after disaster from a backed-up location, crunchy will choose the best place to recover from. + # follow https://access.crunchydata.com/documentation/postgres-operator/5.2.0/tutorial/disaster-recovery/ + # Clone From Backups Stored in S3 / GCS / Azure Blob Storage + clone: + enabled: false + s3: + enabled: false + pvc: + enabled: false + path: ~ # provide the proper path to source the cluster. ex: /backups/cluster/version/1, if current new cluster being created, this should be current cluster version -1, ideally + # enable this to go back to a specific timestamp in history in the current cluster. + # follow https://access.crunchydata.com/documentation/postgres-operator/5.2.0/tutorial/disaster-recovery/ + # Perform an In-Place Point-in-time-Recovery (PITR) + restore: + repoName: ~ # provide repo name + enabled: false + target: ~ # 2024-03-24 17:16:00-07 this is the target timestamp to go back to in current cluster + instances: + name: db # high availability + replicas: 2 # 2 or 3 for high availability in TEST and PROD. + metadata: + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9187' + dataVolumeClaimSpec: + storage: 2Gi + storageClassName: netapp-block-standard + walStorage: 1Gi + + requests: + cpu: 200m + memory: 256Mi + limits: + cpu: 400m + memory: 512Mi + replicaCertCopy: + requests: + cpu: 1m + memory: 32Mi + limits: + cpu: 50m + memory: 64Mi + + pgBackRest: + enabled: true + backupPath: /backups/test/cluster/version # change it for PROD, create values-prod.yaml + clusterCounter: 1 # this is the number to identify what is the current counter for the cluster, each time it is cloned it should be incremented. + image: artifacts.developer.gov.bc.ca/bcgov-docker-local/crunchy-pgbackrest:ubi8-2.49-0 + # If retention-full-type set to 'count' then the oldest backups will expire when the number of backups reach the number defined in retention + # If retention-full-type set to 'time' then the number defined in retention will take that many days worth of full backups before expiration + retentionFullType: count + s3: + enabled: false # if enabled, below must be provided + retention: 7 # one weeks backup in object store. + bucket: ~ + endpoint: ~ + accessKey: ~ + secretKey: ~ + fullBackupSchedule: ~ # make sure to provide values here, if s3 is enabled. + incrementalBackupSchedule: ~ # make sure to provide values here, if s3 is enabled. + pvc: + retention: 1 # one day hot active backup in pvc + retentionFullType: count + fullBackupSchedule: 0 8 * * * + incrementalBackupSchedule: 0 0,12 * * * # every 12 hour incremental + volume: + accessModes: "ReadWriteOnce" + storage: 3Gi + storageClassName: netapp-file-backup + + config: + requests: + cpu: 15m + memory: 64Mi + limits: + cpu: 60m + memory: 128Mi + repoHost: + requests: + cpu: 60m + memory: 256Mi + limits: + cpu: 150m + memory: 512Mi + sidecars: + requests: + cpu: 20m + memory: 32Mi + limits: + cpu: 40m + memory: 128Mi + jobs: + requests: + cpu: 100m + memory: 256Mi + limits: + cpu: 300m + memory: 512Mi + + patroni: + postgresql: + pg_hba: + - "host all all 0.0.0.0/0 md5" + - "host all all ::1/128 md5" + parameters: + shared_buffers: 128MB # default is 128MB; a good tuned default for shared_buffers is 25% of the memory allocated to the pod + wal_buffers: -1 # this can be set to -1 to automatically set as 1/32 of shared_buffers or 64kB, whichever is larger + min_wal_size: 32MB + max_wal_size: 200MB # default is 1GB + max_slot_wal_keep_size: 128MB # default is -1, allowing unlimited wal growth when replicas fall behind + work_mem: 2MB # a work_mem value of 2 MB + log_min_duration_statement: 1000ms # log queries taking more than 1 second to respond. + effective_io_concurrency: 20 #If the underlying disk can handle multiple simultaneous requests, then you should increase the effective_io_concurrency value and test what value provides the best application performance. All BCGov clusters have SSD. + + proxy: + enabled: true + pgBouncer: + image: # it's not necessary to specify an image as the images specified in the Crunchy Postgres Operator will be pulled by default + replicas: 1 + requests: + cpu: 20m + memory: 64Mi + limits: + cpu: 40m + memory: 128Mi + maxConnections: 10 # make sure less than postgres max connections + + # Postgres Cluster resource values: + pgmonitor: + enabled: false + exporter: + image: # it's not necessary to specify an image as the images specified in the Crunchy Postgres Operator will be pulled by default + requests: + cpu: 1m + memory: 16Mi + limits: + cpu: 35m + memory: 32Mi diff --git a/frontend/src/app/components/containers/complaints/complaint-map-with-server-side-clustering.tsx b/frontend/src/app/components/containers/complaints/complaint-map-with-server-side-clustering.tsx index 194a2a213..25731b468 100644 --- a/frontend/src/app/components/containers/complaints/complaint-map-with-server-side-clustering.tsx +++ b/frontend/src/app/components/containers/complaints/complaint-map-with-server-side-clustering.tsx @@ -197,4 +197,4 @@ export const ComplaintMapWithServerSideClustering: FC = ({ type, searchQu unmappedCount={unmappedCount} /> ); -}; +}; \ No newline at end of file