From 02148549adef3e9980039c32c836f2f6f538ddad Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Fri, 27 Sep 2024 09:40:54 -0700 Subject: [PATCH 01/13] - Upgrade Postgres from 12.5 to 17-bullseye - Wrap most instances of "system_user" in double quotes, as it is not a reserved word in postgres. - Add migration to drop biohub_dapi_v1 schema - Disable static deploy db setup step --- .github/workflows/deployStatic.yml | 114 +++++++++--------- .../administrative-activity-repository.ts | 4 +- .../project-participation-repository.ts | 6 +- .../survey-participation-repository.ts | 4 +- api/src/repositories/user-repository.ts | 14 +-- compose.yml | 3 +- database/.docker/db/Dockerfile | 2 +- database/.pipeline/lib/db.deploy.js | 4 +- database/.pipeline/templates/db.bc.yaml | 4 +- database/.pipeline/templates/db.dc.yaml | 106 ++++++---------- .../prereqs/postgres-postgis.is.yaml | 80 ++++++++++++ database/Dockerfile | 37 ++++++ database/README.md | 2 +- .../20210225205948_biohub_release.ts | 59 +++++---- .../20210715170002_security_procedures.ts | 6 +- .../20211207105001_remove_user_roles.ts | 2 +- .../20211207170011_user_group_table.ts | 2 +- .../20221209131602_security_procedures.ts | 6 +- ...03000000_user_guid_bceid_basic_business.ts | 16 +-- ...0230127000000_user_guid_null_constraint.ts | 12 +- .../20230727000000_project_groups.ts | 2 +- ...801000001_system_user_permission_tables.ts | 28 ++--- .../20230801000002_api_patch_user_function.ts | 12 +- .../20230821000000_new_survey_user_table.ts | 2 +- ...0230905000000_update_user_source_system.ts | 2 +- .../20231207000001_update_api_set_context.ts | 6 +- .../20240722000002_remove_duplicate_users.ts | 42 +++---- ...241115000000_drop_biohub_dapi_v1_schema.ts | 20 +++ .../release.0.34/api_get_context_user_id.sql | 4 +- .../release.0.34/api_set_context.sql | 6 +- .../api_user_is_administrator.sql | 4 +- .../src/migrations/release.0.34/biohub.sql | 48 ++++---- .../migrations/release.0.34/db_setup_up.sql | 2 +- .../populate_user_identity_source.sql | 6 +- .../release.0.34/project_audit_triggers.sql | 2 +- .../release.0.34/project_dapi_views.sql | 2 +- .../release.0.34/project_journal_triggers.sql | 2 +- .../release.0.34/tr_audit_trigger.sql | 4 +- .../release.0.34/tr_journal_trigger.sql | 2 +- .../smoke_tests/smoketest_release.1.0.0.sql | 18 +-- .../smoke_tests/smoketest_release.1.1.0.sql | 18 +-- .../smoke_tests/smoketest_release.1.2.0.sql | 18 +-- .../smoke_tests/smoketest_release.1.3.0.sql | 18 +-- .../smoke_tests/smoketest_release.1.4.0.sql | 18 +-- .../smoke_tests/smoketest_release.1.5.0.sql | 18 +-- .../smoke_tests/smoketest_release.1.6.0.sql | 18 +-- .../src/procedures/api_patch_system_user.ts | 10 +- database/src/procedures/api_set_context.ts | 6 +- database/src/seeds/01_db_system_users.ts | 8 +- .../seeds/03_basic_project_survey_setup.ts | 12 +- env_config/env.docker | 3 +- scripts/bctw-deployments/main.js | 2 +- 52 files changed, 484 insertions(+), 362 deletions(-) create mode 100644 database/.pipeline/templates/prereqs/postgres-postgis.is.yaml create mode 100644 database/Dockerfile create mode 100644 database/src/migrations/20241115000000_drop_biohub_dapi_v1_schema.ts diff --git a/.github/workflows/deployStatic.yml b/.github/workflows/deployStatic.yml index 6bb5f6a95b..77155df922 100644 --- a/.github/workflows/deployStatic.yml +++ b/.github/workflows/deployStatic.yml @@ -431,62 +431,62 @@ jobs: DEBUG=* npm run db:deploy -- --pr=$PR_NUMBER --env=$BRANCH --branch=$BRANCH --type=static # Deploy Database setup image - deployDatabaseSetup: - name: Deploy Database Setup Image - runs-on: ubuntu-latest - timeout-minutes: 30 - if: ${{ github.event.pull_request.merged == true }} - env: - PR_NUMBER: ${{ github.event.number }} - BRANCH: ${{ github.base_ref }} - needs: - - scaleDownPods - - buildDatabaseSetup - - deployDatabase - steps: - # Install Node - for `node` and `npm` commands - - name: Setup Node.js - uses: actions/setup-node@v4 - with: - node-version: 20 - - # Load repo from cache - - name: Cache repo - uses: actions/cache@v4 - id: cache-repo - env: - cache-name: cache-repo - with: - path: ${{ github.workspace }}/* - key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ github.sha }} - - # Checkout the branch if not restored via cache - - name: Checkout Target Branch - if: steps.cache-repo.outputs.cache-hit != 'true' - uses: actions/checkout@v4 - - # Install oc, which was removed from the ubuntu-latest image in v24.04 - - name: Install OpenShift CLI tools - uses: redhat-actions/openshift-tools-installer@v1 - with: - oc: "4.14" - - # Log in to OpenShift. - # Note: The secrets needed to log in are NOT available if the PR comes from a FORK. - # PR's must originate from a branch off the original repo or else all openshift `oc` commands will fail. - - name: Log in to OpenShift - run: oc login --token=${{ secrets.TOOLS_SA_TOKEN }} --server=https://api.silver.devops.gov.bc.ca:6443 - - # Install database pipeline node modules - - name: Install database pipeline node modules - working-directory: "database/.pipeline/" - run: npm ci - - # Deploy the database setup image - - name: Deploy Database Setup Image - working-directory: "database/.pipeline/" - run: | - DEBUG=* npm run db-setup:deploy -- --pr=$PR_NUMBER --env=$BRANCH --branch=$BRANCH --type=static + # deployDatabaseSetup: + # name: Deploy Database Setup Image + # runs-on: ubuntu-latest + # timeout-minutes: 30 + # if: ${{ github.event.pull_request.merged == true }} + # env: + # PR_NUMBER: ${{ github.event.number }} + # BRANCH: ${{ github.base_ref }} + # needs: + # - scaleDownPods + # - buildDatabaseSetup + # - deployDatabase + # steps: + # # Install Node - for `node` and `npm` commands + # - name: Setup Node.js + # uses: actions/setup-node@v4 + # with: + # node-version: 20 + + # # Load repo from cache + # - name: Cache repo + # uses: actions/cache@v4 + # id: cache-repo + # env: + # cache-name: cache-repo + # with: + # path: ${{ github.workspace }}/* + # key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ github.sha }} + + # # Checkout the branch if not restored via cache + # - name: Checkout Target Branch + # if: steps.cache-repo.outputs.cache-hit != 'true' + # uses: actions/checkout@v4 + + # # Install oc, which was removed from the ubuntu-latest image in v24.04 + # - name: Install OpenShift CLI tools + # uses: redhat-actions/openshift-tools-installer@v1 + # with: + # oc: "4.14" + + # # Log in to OpenShift. + # # Note: The secrets needed to log in are NOT available if the PR comes from a FORK. + # # PR's must originate from a branch off the original repo or else all openshift `oc` commands will fail. + # - name: Log in to OpenShift + # run: oc login --token=${{ secrets.TOOLS_SA_TOKEN }} --server=https://api.silver.devops.gov.bc.ca:6443 + + # # Install database pipeline node modules + # - name: Install database pipeline node modules + # working-directory: "database/.pipeline/" + # run: npm ci + + # # Deploy the database setup image + # - name: Deploy Database Setup Image + # working-directory: "database/.pipeline/" + # run: | + # DEBUG=* npm run db-setup:deploy -- --pr=$PR_NUMBER --env=$BRANCH --branch=$BRANCH --type=static # Deploy API image deployAPI: @@ -554,7 +554,7 @@ jobs: if: ${{ github.event.pull_request.merged == true }} needs: - deployDatabase - - deployDatabaseSetup + # - deployDatabaseSetup - deployAPI - deployAPP env: diff --git a/api/src/repositories/administrative-activity-repository.ts b/api/src/repositories/administrative-activity-repository.ts index 69a09b41b0..516c362032 100644 --- a/api/src/repositories/administrative-activity-repository.ts +++ b/api/src/repositories/administrative-activity-repository.ts @@ -82,7 +82,7 @@ export class AdministrativeActivityRepository extends BaseRepository { ON aa.administrative_activity_type_id = aat.administrative_activity_type_id LEFT OUTER JOIN - system_user su + "system_user" su ON su.system_user_id = aa.update_user WHERE @@ -223,7 +223,7 @@ export class AdministrativeActivityRepository extends BaseRepository { FROM project_participation pp LEFT JOIN - system_user su + "system_user" su ON pp.system_user_id = su.system_user_id WHERE diff --git a/api/src/repositories/project-participation-repository.ts b/api/src/repositories/project-participation-repository.ts index cc726eaf7f..8aa8b78eb8 100644 --- a/api/src/repositories/project-participation-repository.ts +++ b/api/src/repositories/project-participation-repository.ts @@ -151,7 +151,7 @@ export class ProjectParticipationRepository extends BaseRepository { ON pp.project_role_id = prp.project_role_id LEFT JOIN project_permission pp2 ON pp2.project_permission_id = prp.project_permission_id - LEFT JOIN system_user su + LEFT JOIN "system_user" su ON pp.system_user_id = su.system_user_id LEFT JOIN system_user_role sur @@ -365,7 +365,7 @@ export class ProjectParticipationRepository extends BaseRepository { ON pp.project_role_id = prp.project_role_id LEFT JOIN project_permission pp2 ON pp2.project_permission_id = prp.project_permission_id - LEFT JOIN system_user su + LEFT JOIN "system_user" su ON pp.system_user_id = su.system_user_id LEFT JOIN system_user_role sur @@ -503,7 +503,7 @@ export class ProjectParticipationRepository extends BaseRepository { ON pp.project_role_id = prp.project_role_id LEFT JOIN project_permission pp2 ON pp2.project_permission_id = prp.project_permission_id - LEFT JOIN system_user su + LEFT JOIN "system_user" su ON pp.system_user_id = su.system_user_id LEFT JOIN system_user_role sur diff --git a/api/src/repositories/survey-participation-repository.ts b/api/src/repositories/survey-participation-repository.ts index 5e49fffcc8..1a19c18875 100644 --- a/api/src/repositories/survey-participation-repository.ts +++ b/api/src/repositories/survey-participation-repository.ts @@ -121,7 +121,7 @@ export class SurveyParticipationRepository extends BaseRepository { LEFT JOIN survey_job sj ON sj.survey_job_id = sp.survey_job_id - LEFT JOIN system_user su + LEFT JOIN "system_user" su ON sp.system_user_id = su.system_user_id LEFT JOIN system_user_role sur @@ -192,7 +192,7 @@ export class SurveyParticipationRepository extends BaseRepository { LEFT JOIN survey_job sj ON sj.survey_job_id = sp.survey_job_id - LEFT JOIN system_user su + LEFT JOIN "system_user" su ON sp.system_user_id = su.system_user_id LEFT JOIN system_user_role sur diff --git a/api/src/repositories/user-repository.ts b/api/src/repositories/user-repository.ts index b5475c2270..c14ced7d6f 100644 --- a/api/src/repositories/user-repository.ts +++ b/api/src/repositories/user-repository.ts @@ -83,7 +83,7 @@ export class UserRepository extends BaseRepository { su.family_name, su.agency FROM - system_user su + "system_user" su LEFT JOIN system_user_role sur ON @@ -147,7 +147,7 @@ export class UserRepository extends BaseRepository { su.family_name, su.agency FROM - system_user su + "system_user" su LEFT JOIN system_user_role sur ON @@ -205,7 +205,7 @@ export class UserRepository extends BaseRepository { su.family_name, su.agency FROM - system_user su + "system_user" su LEFT JOIN system_user_role sur ON @@ -264,7 +264,7 @@ export class UserRepository extends BaseRepository { ): Promise<{ system_user_id: number }> { const sqlStatement = SQL` INSERT INTO - system_user + "system_user" ( user_guid, user_identity_source_id, @@ -328,7 +328,7 @@ export class UserRepository extends BaseRepository { su.family_name, su.agency FROM - system_user su + "system_user" su LEFT JOIN system_user_role sur ON @@ -369,7 +369,7 @@ export class UserRepository extends BaseRepository { async activateSystemUser(systemUserId: number) { const sqlStatement = SQL` UPDATE - system_user + "system_user" SET record_end_date = NULL WHERE @@ -401,7 +401,7 @@ export class UserRepository extends BaseRepository { async deactivateSystemUser(systemUserId: number) { const sqlStatement = SQL` UPDATE - system_user + "system_user" SET record_end_date = now() WHERE diff --git a/compose.yml b/compose.yml index df5d29b2a9..9575665c87 100644 --- a/compose.yml +++ b/compose.yml @@ -23,10 +23,11 @@ services: - POSTGRES_PASSWORD=${DB_ADMIN_PASS} - POSTGRES_DB=${DB_DATABASE} - PORT=5432 + - PGDATA=${PG_DATA}/${PG_VERSION} networks: - sims-network volumes: - - postgres:/var/lib/postgresql/data + - postgres:${PG_DATA} ## Build the api docker image api: diff --git a/database/.docker/db/Dockerfile b/database/.docker/db/Dockerfile index c76168886e..6a0ff6fb2e 100644 --- a/database/.docker/db/Dockerfile +++ b/database/.docker/db/Dockerfile @@ -2,7 +2,7 @@ # This DockerFile is used for local development (via compose.yml) only. # ######################################################################################################## -ARG POSTGRES_VERSION=14.2 +ARG POSTGRES_VERSION=17-bullseye FROM postgres:$POSTGRES_VERSION diff --git a/database/.pipeline/lib/db.deploy.js b/database/.pipeline/lib/db.deploy.js index cbc1485827..ed07608b81 100644 --- a/database/.pipeline/lib/db.deploy.js +++ b/database/.pipeline/lib/db.deploy.js @@ -31,7 +31,7 @@ const dbDeploy = async (settings) => { DATABASE_SERVICE_NAME: `${name}-postgresql${phases[phase].suffix}`, IMAGE_STREAM_NAME: name, IMAGE_STREAM_VERSION: phases.build.tag, - POSTGRESQL_DATABASE: 'biohubbc', + POSTGRES_DB: 'biohubbc', TZ: phases[phase].tz, IMAGE_STREAM_NAMESPACE: phases.build.namespace, VOLUME_CAPACITY: phases[phase].volumeCapacity, @@ -46,7 +46,7 @@ const dbDeploy = async (settings) => { oc.applyRecommendedLabels(objects, name, phase, changeId, instance); oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag); - + await oc.applyAndDeploy(objects, instance); }; diff --git a/database/.pipeline/templates/db.bc.yaml b/database/.pipeline/templates/db.bc.yaml index 15bf4cbf7d..7955b588e0 100644 --- a/database/.pipeline/templates/db.bc.yaml +++ b/database/.pipeline/templates/db.bc.yaml @@ -1,7 +1,7 @@ kind: Template apiVersion: template.openshift.io/v1 metadata: - name: postgresql null + name: postgresql parameters: - name: NAME displayName: Name @@ -29,7 +29,7 @@ objects: annotations: from: kind: ImageStreamTag - name: postgis-postgres:12-31 + name: postgres-postgis:17-bullseye namespace: af2668-tools generation: 1 importPolicy: {} diff --git a/database/.pipeline/templates/db.dc.yaml b/database/.pipeline/templates/db.dc.yaml index e4b567fed5..5baadb071c 100644 --- a/database/.pipeline/templates/db.dc.yaml +++ b/database/.pipeline/templates/db.dc.yaml @@ -12,14 +12,18 @@ parameters: name: NAME required: true value: 'postgresql' - - description: The OpenShift ImageStream name. - displayName: IMAGE_STREAM_NAME - name: IMAGE_STREAM_NAME - value: 'postgis-postgres' - - description: The OpenShift Namespace where the ImageStream resides. - displayName: Namespace - name: IMAGE_STREAM_NAMESPACE + - name: IMAGE_STREAM_NAMESPACE + description: The OpenShift Namespace where the ImageStream resides. + required: true value: 'af2668-tools' + - name: IMAGE_STREAM_NAME + description: The OpenShift ImageStream name. + required: true + value: 'postgres-postgis' + - name: IMAGE_STREAM_VERSION + description: Version of PostgreSQL image to be used (9.2, 9.4, 9.5 or latest). + required: true + value: '17-bullseye' - description: The name of the OpenShift Service exposed for the database. displayName: Database Service Name name: DATABASE_SERVICE_NAME @@ -27,31 +31,24 @@ parameters: value: 'postgresql' - description: Username for PostgreSQL user that will be used for accessing the database. displayName: PostgreSQL Connection Username - name: POSTGRESQL_USER - required: true - value: 'postgres' - - description: Admin Username for PostgreSQL user that will be used for accessing the database. - displayName: PostgreSQL Admin Connection Username - name: POSTGRESQL_ADMIN_USER + name: POSTGRES_USER required: true value: 'postgres' - description: Password for the PostgreSQL connection user. displayName: PostgreSQL Connection Password from: '[a-zA-Z0-9]{16}' generate: expression - name: POSTGRESQL_PASSWORD - required: true - - description: Password for the PostgreSQL admin user. - displayName: PostgreSQL Admin Connection Password - from: '[a-zA-Z0-9]{16}' - generate: expression - name: POSTGRESQL_ADMIN_PASSWORD + name: POSTGRES_PASSWORD required: true - description: Name of the PostgreSQL database accessed. displayName: PostgreSQL Database Name - name: POSTGRESQL_DATABASE + name: POSTGRES_DB required: true value: 'biohubbc' + - name: PGDATA + description: Path to PostgreSQL data directory + value: '/var/lib/postgresql/data/17' + required: false - name: TZ description: Database timezone required: false @@ -61,19 +58,6 @@ parameters: name: VOLUME_CAPACITY required: true value: '500Mi' - - description: Version of PostgreSQL image to be used (9.2, 9.4, 9.5 or latest). - displayName: Version of PostgreSQL Image - name: IMAGE_STREAM_VERSION - required: true - value: '12-31' - - description: Indicator to enable pgcrypto extension (provided out-of-the-box with PostgreSQL). - displayName: Flag to enable pgcrypto PostgreSQL extension. - name: PGCRYPTO_EXTENSION - value: 'Y' - - description: Indicator to enable postgis extension (from official PostgeSQL YUM repo). - displayName: Flag to enable postgis PostgreSQL extension. - name: POSTGIS_EXTENSION - value: 'Y' - name: CPU_REQUEST value: '50m' - name: CPU_LIMIT @@ -90,16 +74,14 @@ objects: metadata: annotations: template.openshift.io/expose-database_name: "{.data['database-name']}" - template.openshift.io/expose-password: "{.data['database-user-password']}" - template.openshift.io/expose-admin-password: "{.data['database-admin-password']}" - template.openshift.io/expose-admin-username: "{.data['database-user']}" + template.openshift.io/expose-database-user: "{.data['database-admin']}" + template.openshift.io/expose-database-user-password: "{.data['database-admin-password']}" as-copy-of: biohubbc-creds name: '${DATABASE_SERVICE_NAME}' stringData: - database-name: '${POSTGRESQL_DATABASE}' - database-user-password: '${POSTGRESQL_PASSWORD}' - database-admin-password: '${POSTGRESQL_ADMIN_PASSWORD}' - database-user: '${POSTGRESQL_USER}' + database-name: '${POSTGRES_DB}' + database-user: '${POSTGRES_USER}' + database-user-password: '${POSTGRES_PASSWORD}' - kind: Service apiVersion: v1 @@ -165,30 +147,23 @@ objects: containers: - name: postgresql env: - - name: POSTGRESQL_USER + - name: POSTGRES_DB valueFrom: secretKeyRef: - key: database-user + key: database-name name: '${DATABASE_SERVICE_NAME}' - - name: POSTGRESQL_PASSWORD + - name: POSTGRES_USER valueFrom: secretKeyRef: - key: database-user-password + key: database-admin name: '${DATABASE_SERVICE_NAME}' - - name: POSTGRESQL_ADMIN_PASSWORD + - name: POSTGRES_PASSWORD valueFrom: secretKeyRef: key: database-admin-password name: '${DATABASE_SERVICE_NAME}' - - name: POSTGRESQL_DATABASE - valueFrom: - secretKeyRef: - key: database-name - name: '${DATABASE_SERVICE_NAME}' - - name: PGCRYPTO_EXTENSION - value: 'N' - - name: POSTGIS_EXTENSION - value: 'N' + - name: PGDATA + value: '${PGDATA}' - name: PGOPTIONS value: '-c maintenance_work_mem=128MB' - name: PGTZ @@ -209,7 +184,7 @@ objects: - '/bin/sh' - '-i' - '-c' - - psql -h 127.0.0.1 -U $POSTGRESQL_USER -q -d $POSTGRESQL_DATABASE -c 'SELECT 1' + - psql -h 127.0.0.1 -U $POSTGRES_USER -q -d $POSTGRES_DB -c 'SELECT 1' initialDelaySeconds: 5 timeoutSeconds: 1 resources: @@ -219,23 +194,23 @@ objects: limits: cpu: ${CPU_LIMIT} memory: ${MEMORY_LIMIT} - securityContext: - capabilities: {} - privileged: false + securityContext: {} terminationMessagePath: '/dev/termination-log' volumeMounts: - - mountPath: '/var/lib/pgsql/data' - name: '${DATABASE_SERVICE_NAME}-data' - - mountPath: '/var/run/postgresql' - name: '${DATABASE_SERVICE_NAME}-run' + - name: '${DATABASE_SERVICE_NAME}-data' + mountPath: '/var/lib/postgresql/data' + - name: '${DATABASE_SERVICE_NAME}-data' + mountPath: '/var/lib/pgsql/data' + # - name: '${DATABASE_SERVICE_NAME}-run' + # mountPath: '/var/run/postgresql' dnsPolicy: ClusterFirst restartPolicy: Always volumes: - name: '${DATABASE_SERVICE_NAME}-data' persistentVolumeClaim: claimName: '${DATABASE_SERVICE_NAME}' - - name: '${DATABASE_SERVICE_NAME}-run' - emptyDir: {} + # - name: '${DATABASE_SERVICE_NAME}-run' + # emptyDir: {} triggers: - imageChangeParams: automatic: true @@ -245,6 +220,5 @@ objects: kind: ImageStreamTag name: '${IMAGE_STREAM_NAME}:${IMAGE_STREAM_VERSION}' namespace: '${IMAGE_STREAM_NAMESPACE}' - lastTriggeredImage: '' type: ImageChange - type: ConfigChange diff --git a/database/.pipeline/templates/prereqs/postgres-postgis.is.yaml b/database/.pipeline/templates/prereqs/postgres-postgis.is.yaml new file mode 100644 index 0000000000..0de77dd345 --- /dev/null +++ b/database/.pipeline/templates/prereqs/postgres-postgis.is.yaml @@ -0,0 +1,80 @@ +# ######################################################################################################## +# Creates a build config that builds the database dockerfile +# Creates an imagestream that references the built database image +# ######################################################################################################## + +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: postgresql +parameters: + # Imagestream + - name: IMAGE_STREAM_NAMESPACE + value: af2668-tools + - name: IMAGE_STREAM_NAME + value: postgres-postgis + - name: IMAGE_STREAM_VERSION + value: 17-bullseye + # Dockerfile + - name: SOURCE_CONTEXT_DIR + value: 'database' + - name: SOURCE_REPOSITORY_REF + value: dev + - name: SOURCE_REPOSITORY_URL + value: https://github.com/bcgov/biohubbc.git + - name: DOCKER_FILE_PATH + value: Dockerfile + # Resources + - name: CPU_REQUEST + value: '50m' + - name: CPU_LIMIT + value: '200m' + - name: MEMORY_REQUEST + value: '100Mi' + - name: MEMORY_LIMIT + value: '2Gi' +objects: + - kind: ImageStream + apiVersion: image.openshift.io/v1 + metadata: + name: '${IMAGE_STREAM_NAME}' + labels: + shared: 'true' + spec: + lookupPolicy: + local: false + + - kind: BuildConfig + apiVersion: v1 + metadata: + name: '${IMAGE_STREAM_NAME}' + spec: + failedBuildsHistoryLimit: 5 + nodeSelector: + output: + to: + kind: ImageStreamTag + name: '${IMAGE_STREAM_NAME}:${IMAGE_STREAM_VERSION}' + postCommit: {} + resources: + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + runPolicy: SerialLatestOnly + source: + type: Git + git: + uri: '${SOURCE_REPOSITORY_URL}' + ref: '${SOURCE_REPOSITORY_REF}' + contextDir: '${SOURCE_CONTEXT_DIR}' + strategy: + type: Docker + dockerStrategy: + dockerfilePath: ${DOCKER_FILE_PATH} + successfulBuildsHistoryLimit: 5 + triggers: + - type: ConfigChange + - type: ImageChange diff --git a/database/Dockerfile b/database/Dockerfile new file mode 100644 index 0000000000..ef069e8398 --- /dev/null +++ b/database/Dockerfile @@ -0,0 +1,37 @@ +# ######################################################################################################## +# This DockerFile is used for Openshift deployments only. +# ######################################################################################################## + +ARG POSTGRES_VERSION=17-bullseye + +FROM postgres:$POSTGRES_VERSION + +# read env variables +ARG TZ=America/Vancouver +ARG POSTGIS_VERSION=3 + +ENV PORT=5432 + +# install postgis packages +RUN mkdir -p /opt/apps +RUN apt-get -qq update +RUN apt-get -qq install -y --no-install-recommends postgresql-$PG_MAJOR-postgis-$POSTGIS_VERSION +RUN apt-get -qq install -y --no-install-recommends postgresql-$PG_MAJOR-postgis-$POSTGIS_VERSION-scripts +RUN apt-get -qq install -y --no-install-recommends postgresql-$PG_MAJOR-pgrouting +RUN apt-get -qq install -y --no-install-recommends postgresql-$PG_MAJOR-pgrouting-scripts +RUN apt-get -qq install -y --no-install-recommends postgresql-server-dev-$PG_MAJOR +RUN apt-get -qq install -y --no-install-recommends pgbadger pg-activity wget unzip nano +RUN apt-get -qq purge -y --auto-remove postgresql-server-dev-$PG_MAJOR +RUN apt-get -qq autoremove -y +RUN apt-get -qq clean + +# set time zone +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + +# copy postgis init script to docker init directory +RUN mkdir -p /docker-entrypoint-initdb.d +COPY ./.docker/db/create_postgis.sql /docker-entrypoint-initdb.d/postgis.sql + +EXPOSE $PORT + +CMD ["postgres"] diff --git a/database/README.md b/database/README.md index cb1c2984c9..bcfe357955 100644 --- a/database/README.md +++ b/database/README.md @@ -6,7 +6,7 @@ | ---------- | ------- | ------------------------------------ | -------------------- | | node | 18.x.x | https://nodejs.org/en/ | JavaScript Runtime | | npm | 10.x.x | https://www.npmjs.com/ | Node Package Manager | -| PostgreSQL | 12.5 | https://www.postgresql.org/download/ | PSQL database | +| PostgreSQL | 17 | https://www.postgresql.org/download/ | PSQL database | | PostGIS | 3 | https://postgis.net/ | GIS (spatial) tools |
diff --git a/database/src/migrations/20210225205948_biohub_release.ts b/database/src/migrations/20210225205948_biohub_release.ts index 03f208a023..57697f634a 100644 --- a/database/src/migrations/20210225205948_biohub_release.ts +++ b/database/src/migrations/20210225205948_biohub_release.ts @@ -3,7 +3,6 @@ import { Knex } from 'knex'; import path from 'path'; const DB_USER_API_PASS = process.env.DB_USER_API_PASS; -const DB_USER_API = process.env.DB_USER_API; const DB_RELEASE = 'release.0.34'; @@ -117,29 +116,39 @@ export async function up(knex: Knex): Promise { -- set up spatial extensions ${create_spatial_extensions} - -- set up biohub schema - create schema if not exists biohub; - GRANT ALL ON SCHEMA biohub TO postgres; - set search_path = biohub, public; - - -- setup biohub api schema - create schema if not exists biohub_dapi_v1; - - -- setup api user - create user ${DB_USER_API} password '${DB_USER_API_PASS}'; - alter schema biohub_dapi_v1 owner to ${DB_USER_API}; + -- Set up biohub schema + CREATE SCHEMA IF NOT EXISTS biohub; - -- Grant rights on biohub_dapi_v1 to biohub_api user - grant all on schema biohub_dapi_v1 to ${DB_USER_API}; - grant all on schema biohub_dapi_v1 to postgres; - alter DEFAULT PRIVILEGES in SCHEMA biohub_dapi_v1 grant ALL on tables to ${DB_USER_API}; - alter DEFAULT PRIVILEGES in SCHEMA biohub_dapi_v1 grant ALL on tables to postgres; - - -- Biohub grants - GRANT USAGE ON SCHEMA biohub TO ${DB_USER_API}; - ALTER DEFAULT PRIVILEGES IN SCHEMA biohub GRANT ALL ON TABLES TO ${DB_USER_API}; - - alter role ${DB_USER_API} set search_path to biohub_dapi_v1, biohub, public, topology; + -- Grant postgres user full access to biohub schema + GRANT ALL ON SCHEMA biohub TO postgres; + -- Set search path for postgres user + SET search_path = "$user", biohub, public; + + -- Set up biohub API schema + CREATE SCHEMA IF NOT EXISTS biohub_dapi_v1; + + -- Setup biohub_api user + CREATE USER biohub_api PASSWORD '${DB_USER_API_PASS}'; + ALTER SCHEMA biohub_dapi_v1 OWNER TO biohub_api; + GRANT USAGE ON SCHEMA biohub TO biohub_api; + -- Set search path for biohub_api user + ALTER ROLE biohub_api SET search_path TO "$user", biohub, public, biohub_dapi_v1; + + -- Grant postgres user full access to biohub_dapi_v1 schema + GRANT ALL ON SCHEMA biohub_dapi_v1 TO biohub_api; + -- Alter default privileges for postgres user to grant access to future biohub_dapi_v1 schema objects + ALTER DEFAULT PRIVILEGES IN SCHEMA biohub_dapi_v1 GRANT ALL ON TABLES TO biohub_api; + + -- Grant biohub_api user full access to biohub_dapi_v1 schema + GRANT ALL ON SCHEMA biohub_dapi_v1 TO postgres; + -- Alter default privileges for biohub_api user to grant access to future biohub_dapi_v1 schema objects + ALTER DEFAULT PRIVILEGES IN SCHEMA biohub_dapi_v1 GRANT ALL ON TABLES TO postgres; + + -- Alter default privileges for biohub_api user to grant access to future biohub schema objects + ALTER DEFAULT PRIVILEGES IN SCHEMA biohub, public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO biohub_api; + ALTER DEFAULT PRIVILEGES IN SCHEMA biohub, public GRANT EXECUTE ON FUNCTIONS TO biohub_api; + ALTER DEFAULT PRIVILEGES IN SCHEMA biohub, public GRANT USAGE ON TYPES TO biohub_api; + ALTER DEFAULT PRIVILEGES IN SCHEMA biohub, public GRANT USAGE, SELECT ON SEQUENCES TO biohub_api; ${biohub_ddl} ${populate_user_identity_source} @@ -205,7 +214,7 @@ export async function up(knex: Knex): Promise { set role postgres; set search_path = biohub; - grant execute on function biohub.api_set_context(_system_user_identifier system_user.user_identifier%type, _user_identity_source_name user_identity_source.name%type) to ${DB_USER_API}; + grant execute on function biohub.api_set_context(_system_user_identifier "system_user".user_identifier%type, _user_identity_source_name user_identity_source.name%type) to biohub_api; `); } @@ -213,6 +222,6 @@ export async function down(knex: Knex): Promise { await knex.raw(` DROP SCHEMA IF EXISTS biohub CASCADE; DROP SCHEMA IF EXISTS biohub_dapi_v1 CASCADE; - DROP USER IF EXISTS ${DB_USER_API}; + DROP USER IF EXISTS biohub_api; `); } diff --git a/database/src/migrations/20210715170002_security_procedures.ts b/database/src/migrations/20210715170002_security_procedures.ts index 0612abb5d6..5db6378d22 100644 --- a/database/src/migrations/20210715170002_security_procedures.ts +++ b/database/src/migrations/20210715170002_security_procedures.ts @@ -10,7 +10,7 @@ export async function up(knex: Knex): Promise { drop function if exists api_set_context; -create or replace function api_set_context(p_system_user_identifier system_user.user_identifier%type, p_user_identity_source_name user_identity_source.name%type) returns system_user.system_user_id%type +create or replace function api_set_context(p_system_user_identifier "system_user".user_identifier%type, p_user_identity_source_name user_identity_source.name%type) returns "system_user".system_user_id%type language plpgsql security invoker set client_min_messages = warning @@ -30,7 +30,7 @@ $$ -- 2012-07-15 Added security related updates and additions -- ******************************************************************* declare - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _user_identity_source_id user_identity_source.user_identity_source_id%type; _system_role_id system_role.system_role_id%type; begin @@ -39,7 +39,7 @@ begin where name = p_user_identity_source_name and record_end_date is null; - select b.system_user_id into _system_user_id from system_user b + select b.system_user_id into _system_user_id from "system_user" b where b.user_identity_source_id = _user_identity_source_id and b.user_identifier = p_system_user_identifier; diff --git a/database/src/migrations/20211207105001_remove_user_roles.ts b/database/src/migrations/20211207105001_remove_user_roles.ts index aad8af5ba7..373af72377 100644 --- a/database/src/migrations/20211207105001_remove_user_roles.ts +++ b/database/src/migrations/20211207105001_remove_user_roles.ts @@ -43,7 +43,7 @@ export async function up(knex: Knex): Promise { system_role_id = (select system_role_id from system_role where name = '${OLD_SYSTEM_ROLE.PUBLIC_USER}'); - --Delete from system_user + --Delete from "system_user" DELETE FROM system_role WHERE name = '${OLD_SYSTEM_ROLE.GOVERNMENT_USER}'; DELETE FROM system_role WHERE name = '${OLD_SYSTEM_ROLE.EXTERNAL_USER}'; diff --git a/database/src/migrations/20211207170011_user_group_table.ts b/database/src/migrations/20211207170011_user_group_table.ts index 2b01a5f920..caf6011baf 100644 --- a/database/src/migrations/20211207170011_user_group_table.ts +++ b/database/src/migrations/20211207170011_user_group_table.ts @@ -780,7 +780,7 @@ export async function up(knex: Knex): Promise { ALTER TABLE user_user_group ADD CONSTRAINT "Refsystem_uug_1" FOREIGN KEY (system_user_id) - REFERENCES system_user(system_user_id) + REFERENCES "system_user"(system_user_id) ; ALTER TABLE user_user_group ADD CONSTRAINT "Refuug_2" diff --git a/database/src/migrations/20221209131602_security_procedures.ts b/database/src/migrations/20221209131602_security_procedures.ts index d74df9da8d..3df7a7841e 100644 --- a/database/src/migrations/20221209131602_security_procedures.ts +++ b/database/src/migrations/20221209131602_security_procedures.ts @@ -10,7 +10,7 @@ export async function up(knex: Knex): Promise { drop function if exists api_set_context; -create or replace function api_set_context(p_system_user_identifier system_user.user_identifier%type, p_user_identity_source_name user_identity_source.name%type) returns system_user.system_user_id%type +create or replace function api_set_context(p_system_user_identifier "system_user".user_identifier%type, p_user_identity_source_name user_identity_source.name%type) returns "system_user".system_user_id%type language plpgsql security invoker set client_min_messages = warning @@ -32,7 +32,7 @@ $$ -- 2022-12-20 removed security concepts -- ******************************************************************* declare - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _user_identity_source_id user_identity_source.user_identity_source_id%type; begin @@ -40,7 +40,7 @@ begin where name = p_user_identity_source_name and record_end_date is null; - select system_user_id into strict _system_user_id from system_user + select system_user_id into strict _system_user_id from "system_user" where user_identity_source_id = _user_identity_source_id and user_identifier = p_system_user_identifier; diff --git a/database/src/migrations/20230103000000_user_guid_bceid_basic_business.ts b/database/src/migrations/20230103000000_user_guid_bceid_basic_business.ts index c33c279a3b..4bb8e3c3c8 100644 --- a/database/src/migrations/20230103000000_user_guid_bceid_basic_business.ts +++ b/database/src/migrations/20230103000000_user_guid_bceid_basic_business.ts @@ -15,7 +15,7 @@ export async function up(knex: Knex): Promise { SET SCHEMA '${DB_SCHEMA}'; SET SEARCH_PATH = ${DB_SCHEMA_DAPI_V1}; - DROP VIEW IF EXISTS system_user; + DROP VIEW IF EXISTS "system_user"; DROP VIEW IF EXISTS user_identity_source; `); @@ -61,8 +61,8 @@ export async function up(knex: Knex): Promise { // Update default GUIDs await knex.raw(` - UPDATE system_user SET user_guid = 'postgres' WHERE user_identifier LIKE 'postgres'; - UPDATE system_user SET user_guid = 'biohub_api' WHERE user_identifier LIKE 'biohub_api'; + UPDATE "system_user" SET user_guid = 'postgres' WHERE user_identifier LIKE 'postgres'; + UPDATE "system_user" SET user_guid = 'biohub_api' WHERE user_identifier LIKE 'biohub_api'; `); await knex.raw(` @@ -75,7 +75,7 @@ export async function up(knex: Knex): Promise { // Drop the default value for GUIDs await knex.raw(` ALTER TABLE - system_user + "system_user" ALTER COLUMN user_guid DROP DEFAULT; @@ -85,7 +85,7 @@ export async function up(knex: Knex): Promise { await knex.raw(` drop function if exists api_set_context; - create or replace function api_set_context(p_system_user_guid system_user.user_guid%type, p_user_identity_source_name user_identity_source.name%type) returns system_user.system_user_id%type + create or replace function api_set_context(p_system_user_guid "system_user".user_guid%type, p_user_identity_source_name user_identity_source.name%type) returns "system_user".system_user_id%type language plpgsql security invoker set client_min_messages = warning @@ -110,7 +110,7 @@ export async function up(knex: Knex): Promise { -- user identifier -- ******************************************************************* declare - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _user_identity_source_id user_identity_source.user_identity_source_id%type; begin @@ -118,7 +118,7 @@ export async function up(knex: Knex): Promise { where name = p_user_identity_source_name and record_end_date is null; - select system_user_id into strict _system_user_id from system_user + select system_user_id into strict _system_user_id from "system_user" where user_identity_source_id = _user_identity_source_id and user_guid = p_system_user_guid; @@ -138,7 +138,7 @@ export async function up(knex: Knex): Promise { await knex.raw(` SET SEARCH_PATH = ${DB_SCHEMA_DAPI_V1}; - CREATE OR REPLACE VIEW system_user AS SELECT * FROM biohub.system_user; + CREATE OR REPLACE VIEW "system_user" AS SELECT * FROM biohub."system_user"; CREATE OR REPLACE VIEW user_identity_source AS SELECT * FROM biohub.user_identity_source; `); } diff --git a/database/src/migrations/20230127000000_user_guid_null_constraint.ts b/database/src/migrations/20230127000000_user_guid_null_constraint.ts index c459bb0ccb..95e98ebac6 100644 --- a/database/src/migrations/20230127000000_user_guid_null_constraint.ts +++ b/database/src/migrations/20230127000000_user_guid_null_constraint.ts @@ -4,7 +4,7 @@ const DB_SCHEMA = process.env.DB_SCHEMA; const DB_SCHEMA_DAPI_V1 = process.env.DB_SCHEMA_DAPI_V1; /** - * Removes the NULL constraint on user_guid from the system_user table. + * Removes the NULL constraint on user_guid from the "system_user" table. * * @export * @param {Knex} knex @@ -14,19 +14,19 @@ export async function up(knex: Knex): Promise { await knex.raw(` SET search_path = ${DB_SCHEMA_DAPI_V1}; - DROP VIEW system_user; + DROP VIEW "system_user"; SET search_path = ${DB_SCHEMA}; - ALTER TABLE system_user ALTER COLUMN user_guid DROP NOT NULL; + ALTER TABLE "system_user" ALTER COLUMN user_guid DROP NOT NULL; - UPDATE system_user set user_guid = null where user_guid = 'default_guid'; + UPDATE "system_user" set user_guid = null where user_guid = 'default_guid'; - CREATE UNIQUE INDEX system_user_uk1 ON system_user (user_guid); + CREATE UNIQUE INDEX system_user_uk1 ON "system_user" (user_guid); SET search_path = ${DB_SCHEMA_DAPI_V1}; - CREATE OR REPLACE VIEW system_user AS SELECT * FROM ${DB_SCHEMA}.system_user; + CREATE OR REPLACE VIEW "system_user" AS SELECT * FROM ${DB_SCHEMA}."system_user"; `); } diff --git a/database/src/migrations/20230727000000_project_groups.ts b/database/src/migrations/20230727000000_project_groups.ts index 0043073522..01908d2117 100644 --- a/database/src/migrations/20230727000000_project_groups.ts +++ b/database/src/migrations/20230727000000_project_groups.ts @@ -219,7 +219,7 @@ export async function up(knex: Knex): Promise { ALTER TABLE grouping_participation ADD CONSTRAINT grouping_participation_fk2 FOREIGN KEY (system_user_id) - REFERENCES system_user(system_user_id); + REFERENCES "system_user"(system_user_id); ALTER TABLE grouping_participation ADD CONSTRAINT grouping_participation_fk3 FOREIGN KEY (grouping_participation_role_id) diff --git a/database/src/migrations/20230801000001_system_user_permission_tables.ts b/database/src/migrations/20230801000001_system_user_permission_tables.ts index f3fbdc6f24..a923f4dac0 100644 --- a/database/src/migrations/20230801000001_system_user_permission_tables.ts +++ b/database/src/migrations/20230801000001_system_user_permission_tables.ts @@ -70,8 +70,8 @@ export async function up(knex: Knex): Promise { -- Alter tables ---------------------------------------------------------------------------------------- - -- Note: Include default value temporarily to satisfy existing system_user rows. Default will be removed afterwards. - alter table system_user + -- Note: Include default value temporarily to satisfy existing "system_user" rows. Default will be removed afterwards. + alter table "system_user" ADD COLUMN display_name varchar(100) NOT NULL DEFAULT 'default', ADD COLUMN given_name varchar(100), ADD COLUMN family_name varchar(100), @@ -80,15 +80,15 @@ export async function up(knex: Knex): Promise { ADD COLUMN notes varchar(250), ALTER COLUMN user_identifier DROP NOT NULL; - COMMENT ON COLUMN system_user.display_name IS 'The display name of the user (their IDIR/BCeID display name OR their first and last names combined).'; - COMMENT ON COLUMN system_user.given_name IS 'The given name of the user (often their first name).'; - COMMENT ON COLUMN system_user.family_name IS 'The family name of the user (often their last name).'; - COMMENT ON COLUMN system_user.email IS 'The email address of the user.'; - COMMENT ON COLUMN system_user.agency IS 'The agency the user is associated with.'; - COMMENT ON COLUMN system_user.notes IS 'Notes associated with the record.'; - COMMENT ON COLUMN system_user.user_identifier IS 'The identifier of the user (their IDIR/BCeID username)'; - COMMENT ON COLUMN system_user.user_identity_source_id IS 'Foreign key referencing the user identity source table.'; - COMMENT ON COLUMN system_user.user_guid IS 'The Keycloak GUID of the user.'; + COMMENT ON COLUMN "system_user".display_name IS 'The display name of the user (their IDIR/BCeID display name OR their first and last names combined).'; + COMMENT ON COLUMN "system_user".given_name IS 'The given name of the user (often their first name).'; + COMMENT ON COLUMN "system_user".family_name IS 'The family name of the user (often their last name).'; + COMMENT ON COLUMN "system_user".email IS 'The email address of the user.'; + COMMENT ON COLUMN "system_user".agency IS 'The agency the user is associated with.'; + COMMENT ON COLUMN "system_user".notes IS 'Notes associated with the record.'; + COMMENT ON COLUMN "system_user".user_identifier IS 'The identifier of the user (their IDIR/BCeID username)'; + COMMENT ON COLUMN "system_user".user_identity_source_id IS 'Foreign key referencing the user identity source table.'; + COMMENT ON COLUMN "system_user".user_guid IS 'The Keycloak GUID of the user.'; ---------------------------------------------------------------------------------------- -- Create Indexes and Constraints for table: system_permission @@ -138,7 +138,7 @@ export async function up(knex: Knex): Promise { create or replace view system_role_permission as select * from biohub.system_role_permission; - create or replace view system_user as select * from biohub.system_user; + create or replace view "system_user" as select * from biohub."system_user"; ---------------------------------------------------------------------------------------- -- Populate Tables @@ -187,8 +187,8 @@ export async function up(knex: Knex): Promise { -- Cleanup Temporary Defaults ---------------------------------------------------------------------------------------- - -- Note: Removing default value temporarily added to satisfy existing system_user rows. - alter table system_user + -- Note: Removing default value temporarily added to satisfy existing "system_user" rows. + alter table "system_user" ALTER COLUMN display_name DROP DEFAULT, ALTER COLUMN email DROP DEFAULT; `); diff --git a/database/src/migrations/20230801000002_api_patch_user_function.ts b/database/src/migrations/20230801000002_api_patch_user_function.ts index 825f948668..6a54565eaa 100644 --- a/database/src/migrations/20230801000002_api_patch_user_function.ts +++ b/database/src/migrations/20230801000002_api_patch_user_function.ts @@ -28,7 +28,7 @@ export async function up(knex: Knex): Promise { AS $$ -- ******************************************************************* -- Procedure: api_patch_system_user - -- Purpose: Updates a system_user record if any of the incoming values are not the same as the existing values. + -- Purpose: Updates a "system_user" record if any of the incoming values are not the same as the existing values. -- -- MODIFICATION HISTORY -- Person Date Comments @@ -37,11 +37,11 @@ export async function up(knex: Knex): Promise { -- 2023-08-01 initial release -- ******************************************************************* DECLARE - _system_user system_user%rowtype; + _system_user "system_user"%rowtype; _user_identity_source_id user_identity_source.user_identity_source_id%type; BEGIN -- Attempt to find user based on guid - SELECT * INTO _system_user FROM system_user + SELECT * INTO _system_user FROM "system_user" WHERE user_guid = p_system_user_guid AND record_end_date IS NULL LIMIT 1; @@ -52,7 +52,7 @@ export async function up(knex: Knex): Promise { WHERE name = p_user_identity_source_name AND record_end_date IS NULL; - SELECT * INTO _system_user FROM system_user + SELECT * INTO _system_user FROM "system_user" WHERE user_identity_source_id = _user_identity_source_id AND user_identifier = p_user_identifier LIMIT 1; @@ -64,7 +64,7 @@ export async function up(knex: Knex): Promise { END IF; -- Otherwise, patch the system user record with the latest information passed to this function - UPDATE system_user SET + UPDATE "system_user" SET user_guid = p_system_user_guid, user_identifier = p_user_identifier, email = p_email, @@ -92,7 +92,7 @@ export async function up(knex: Knex): Promise { END; $$; - COMMENT ON FUNCTION api_patch_system_user(varchar, varchar, varchar, varchar, varchar, varchar, varchar, varchar) IS 'Updates a system_user record if any of the incoming values are not the same as the existing values.'; + COMMENT ON FUNCTION api_patch_system_user(varchar, varchar, varchar, varchar, varchar, varchar, varchar, varchar) IS 'Updates a "system_user" record if any of the incoming values are not the same as the existing values.'; `); } diff --git a/database/src/migrations/20230821000000_new_survey_user_table.ts b/database/src/migrations/20230821000000_new_survey_user_table.ts index d383f1a712..9da6795695 100644 --- a/database/src/migrations/20230821000000_new_survey_user_table.ts +++ b/database/src/migrations/20230821000000_new_survey_user_table.ts @@ -89,7 +89,7 @@ export async function up(knex: Knex): Promise { ALTER TABLE survey_participation ADD CONSTRAINT survey_participation_fk3 FOREIGN KEY (system_user_id) - REFERENCES system_user(system_user_id); + REFERENCES "system_user"(system_user_id); -- Add unique end-date key constraint (don't allow 2 entities with the same name and a NULL record_end_date) CREATE UNIQUE INDEX survey_job_nuk1 ON survey_job(name, (record_end_date is NULL)) where record_end_date is null; diff --git a/database/src/migrations/20230905000000_update_user_source_system.ts b/database/src/migrations/20230905000000_update_user_source_system.ts index b74b82907a..dd20d9de3a 100644 --- a/database/src/migrations/20230905000000_update_user_source_system.ts +++ b/database/src/migrations/20230905000000_update_user_source_system.ts @@ -14,7 +14,7 @@ export async function up(knex: Knex): Promise { INSERT INTO user_identity_source (name, description, notes, record_effective_date) VALUES ('SYSTEM', 'SYSTEM user source system.', 'A system user.', now()); -- Populate new SIM service account user - insert into system_user (user_identity_source_id, user_identifier, user_guid, display_name, email, record_effective_date, create_date, create_user) + insert into "system_user" (user_identity_source_id, user_identifier, user_guid, display_name, email, record_effective_date, create_date, create_user) values ((select user_identity_source_id from user_identity_source where name = 'SYSTEM' and record_end_date is null), 'service-account-SIMS-SVC-4464', 'SIMS-SVC-4464', 'service-account-SIMS-SVC-4464', 'sims@email.com', now(), now(), 1); `); diff --git a/database/src/migrations/20231207000001_update_api_set_context.ts b/database/src/migrations/20231207000001_update_api_set_context.ts index 26baeb8037..6f7150d104 100644 --- a/database/src/migrations/20231207000001_update_api_set_context.ts +++ b/database/src/migrations/20231207000001_update_api_set_context.ts @@ -13,21 +13,21 @@ export async function up(knex: Knex): Promise { DROP FUNCTION IF EXISTS api_set_context; - CREATE OR REPLACE FUNCTION api_set_context(p_system_user_guid system_user.user_guid%type, p_user_identity_source_name user_identity_source.name%type) RETURNS system_user.system_user_id%type + CREATE OR REPLACE FUNCTION api_set_context(p_system_user_guid "system_user".user_guid%type, p_user_identity_source_name user_identity_source.name%type) RETURNS "system_user".system_user_id%type language plpgsql security invoker SET client_min_messages = warning AS $$ DECLARE - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _user_identity_source_id user_identity_source.user_identity_source_id%type; BEGIN SELECT user_identity_source_id INTO strict _user_identity_source_id FROM user_identity_source WHERE LOWER(name) = LOWER(p_user_identity_source_name) AND record_end_date IS NULL; - SELECT system_user_id INTO strict _system_user_id FROM system_user + SELECT system_user_id INTO strict _system_user_id FROM "system_user" WHERE user_identity_source_id = _user_identity_source_id AND LOWER(user_guid) = LOWER(p_system_user_guid); diff --git a/database/src/migrations/20240722000002_remove_duplicate_users.ts b/database/src/migrations/20240722000002_remove_duplicate_users.ts index 3b453c24e6..3feee495dc 100644 --- a/database/src/migrations/20240722000002_remove_duplicate_users.ts +++ b/database/src/migrations/20240722000002_remove_duplicate_users.ts @@ -4,7 +4,7 @@ import { Knex } from 'knex'; * Fixes duplicate system_user_ids AND references to duplicate system_user_ids * * Updates the following tables: - * - system_user: Update/end-dates duplicate system_user records. + * - system_user: Update/end-dates duplicate "system_user" records. * - system_user_role: Delete duplicate system_user_role records. * - project_participation: Update system_user_id to the canonical system_user_id, and delete duplicate records. * - survey_participation: Update system_user_id to the canonical system_user_id, and delete duplicate records. @@ -45,7 +45,7 @@ export async function up(knex: Knex): Promise { ---------------------------------------------------------------------------------------- WITH - -- Get all system_user records with a unique user_identifier (case-insensitive) and user_identity_source_id, + -- Get all "system_user" records with a unique user_identifier (case-insensitive) and user_identity_source_id, -- preferring the lowest system_user_id WHERE record_end_date is null w_system_user_1 AS ( SELECT @@ -57,7 +57,7 @@ export async function up(knex: Knex): Promise { user_identity_source_id, system_user_id FROM - system_user + "system_user" ORDER BY LOWER(user_identifier), user_identity_source_id, @@ -65,25 +65,25 @@ export async function up(knex: Knex): Promise { system_user_id ), w_system_user_2 AS ( - -- Get all system_user records with a unique user_identifier (case-insensitive) and user_identity_source_id, + -- Get all "system_user" records with a unique user_identifier (case-insensitive) and user_identity_source_id, -- aggregating all additional duplicate system_user_ids into an array SELECT - LOWER(system_user.user_identifier) AS user_identifier, + LOWER("system_user".user_identifier) AS user_identifier, user_identity_source_id, - array_remove(array_agg(system_user.system_user_id), null) duplicate_system_user_ids, + array_remove(array_agg("system_user".system_user_id), null) duplicate_system_user_ids, -- Get the first non-null value for each of the remaining user detail columns - (array_remove(array_agg(system_user.user_guid), null))[1] user_guid, - (array_remove(array_agg(system_user.display_name), null))[1] display_name, - (array_remove(array_agg(system_user.given_name), null))[1] given_name, - (array_remove(array_agg(system_user.family_name), null))[1] family_name, - (array_remove(array_agg(system_user.email), null))[1] email, - (array_remove(array_agg(system_user.agency), null))[1] agency, - (array_remove(array_agg(system_user.notes), null))[1] notes + (array_remove(array_agg("system_user".user_guid), null))[1] user_guid, + (array_remove(array_agg("system_user".display_name), null))[1] display_name, + (array_remove(array_agg("system_user".given_name), null))[1] given_name, + (array_remove(array_agg("system_user".family_name), null))[1] family_name, + (array_remove(array_agg("system_user".email), null))[1] email, + (array_remove(array_agg("system_user".agency), null))[1] agency, + (array_remove(array_agg("system_user".notes), null))[1] notes FROM - system_user + "system_user" GROUP BY - LOWER(system_user.user_identifier), - system_user.user_identity_source_id + LOWER("system_user".user_identifier), + "system_user".user_identity_source_id ), w_system_user_3 AS ( -- Combine the two previous CTEs to get the canonical system_user_id to use when there are duplicate users, and @@ -186,15 +186,15 @@ export async function up(knex: Knex): Promise { USING w_system_user_3 wsu3 WHERE system_user_role.system_user_id = ANY(wsu3.duplicate_system_user_ids) ), - -- Delete duplicate system_user records for duplicate system_user_ids + -- Delete duplicate "system_user" records for duplicate system_user_ids w_delete_duplicate_system_user AS ( - DELETE FROM system_user su + DELETE FROM "system_user" su USING w_system_user_3 wsu3 WHERE su.system_user_id = ANY(wsu3.duplicate_system_user_ids) ), -- Update the user details for the canonical system user record w_update_system_user AS ( - UPDATE system_user su + UPDATE "system_user" su SET user_guid = wsu3.user_guid, display_name = wsu3.display_name, @@ -243,7 +243,7 @@ export async function up(knex: Knex): Promise { ---------------------------------------------------------------------------------------- -- Don't allow more than 1 record with the same user_identifier (case-insensitive) AND user_identity_source_id. - CREATE UNIQUE INDEX system_user_uk2 ON system_user(LOWER(user_identifier), user_identity_source_id); + CREATE UNIQUE INDEX system_user_uk2 ON "system_user"(LOWER(user_identifier), user_identity_source_id); -- Don't allow the same system user to have more than one project role within a project. ALTER TABLE biohub.project_participation ADD CONSTRAINT project_participation_uk1 UNIQUE (system_user_id, project_id); @@ -252,7 +252,7 @@ export async function up(knex: Knex): Promise { ALTER TABLE biohub.survey_participation ADD CONSTRAINT survey_participation_uk1 UNIQUE (system_user_id, survey_id); -- Don't allow duplicate user_guid values - CREATE UNIQUE INDEX system_user_uk1 ON system_user (user_guid); + CREATE UNIQUE INDEX system_user_uk1 ON "system_user" (user_guid); `); } diff --git a/database/src/migrations/20241115000000_drop_biohub_dapi_v1_schema.ts b/database/src/migrations/20241115000000_drop_biohub_dapi_v1_schema.ts new file mode 100644 index 0000000000..38d96cd051 --- /dev/null +++ b/database/src/migrations/20241115000000_drop_biohub_dapi_v1_schema.ts @@ -0,0 +1,20 @@ +import { Knex } from 'knex'; + +/** + * Drops the biohub_dapi_v1 schema and all its objects. + * + * This schema only contained views and is no longer needed. + * + * @export + * @param {Knex} knex + * @return {*} {Promise} + */ +export async function up(knex: Knex): Promise { + await knex.raw(` + DROP SCHEMA biohub_dapi_v1 CASCADE; + `); +} + +export async function down(knex: Knex): Promise { + await knex.raw(``); +} diff --git a/database/src/migrations/release.0.34/api_get_context_user_id.sql b/database/src/migrations/release.0.34/api_get_context_user_id.sql index 0b33f7bf4e..36a2d0561a 100644 --- a/database/src/migrations/release.0.34/api_get_context_user_id.sql +++ b/database/src/migrations/release.0.34/api_get_context_user_id.sql @@ -1,5 +1,5 @@ -- api_get_context_user_id.sql -create or replace function api_get_context_user_id() returns system_user.system_user_id%type +create or replace function api_get_context_user_id() returns "system_user".system_user_id%type language plpgsql security invoker stable @@ -16,7 +16,7 @@ $$ -- 2021-01-03 initial release -- ******************************************************************* declare - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; begin select value::integer into _system_user_id from biohub_context_temp where tag = 'user_id'; diff --git a/database/src/migrations/release.0.34/api_set_context.sql b/database/src/migrations/release.0.34/api_set_context.sql index 398205181b..0d97ed748f 100644 --- a/database/src/migrations/release.0.34/api_set_context.sql +++ b/database/src/migrations/release.0.34/api_set_context.sql @@ -1,7 +1,7 @@ -- api_set_context.sql drop function if exists api_set_context; -create or replace function api_set_context(p_system_user_identifier system_user.user_identifier%type, p_user_identity_source_name user_identity_source.name%type) returns system_user.system_user_id%type +create or replace function api_set_context(p_system_user_identifier "system_user".user_identifier%type, p_user_identity_source_name user_identity_source.name%type) returns "system_user".system_user_id%type language plpgsql security invoker set client_min_messages = warning @@ -19,7 +19,7 @@ $$ -- 2021-04-16 adjusted to accepted defined user identity source -- ******************************************************************* declare - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _user_identity_source_id user_identity_source.user_identity_source_id%type; begin @@ -27,7 +27,7 @@ begin where name = p_user_identity_source_name and record_end_date is null; - select system_user_id into strict _system_user_id from system_user + select system_user_id into strict _system_user_id from "system_user" where user_identity_source_id = _user_identity_source_id and user_identifier = p_system_user_identifier; diff --git a/database/src/migrations/release.0.34/api_user_is_administrator.sql b/database/src/migrations/release.0.34/api_user_is_administrator.sql index d6713af8a5..87d8dfa20f 100644 --- a/database/src/migrations/release.0.34/api_user_is_administrator.sql +++ b/database/src/migrations/release.0.34/api_user_is_administrator.sql @@ -1,7 +1,7 @@ -- api_user_is_administrator.sql drop function if exists api_user_is_administrator; -create or replace function api_user_is_administrator(p_system_user_id system_user.system_user_id%type default null) returns boolean +create or replace function api_user_is_administrator(p_system_user_id "system_user".system_user_id%type default null) returns boolean language plpgsql security definer stable @@ -18,7 +18,7 @@ $$ -- 2021-06-21 initial release -- ******************************************************************* declare - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; begin if (p_system_user_id is null) then select api_get_context_user_id() into _system_user_id; diff --git a/database/src/migrations/release.0.34/biohub.sql b/database/src/migrations/release.0.34/biohub.sql index e0e70a9cfc..c4e4a3a216 100644 --- a/database/src/migrations/release.0.34/biohub.sql +++ b/database/src/migrations/release.0.34/biohub.sql @@ -2841,10 +2841,10 @@ COMMENT ON TABLE system_role IS 'Agency or Ministry funding the project.' ; -- --- TABLE: system_user +-- TABLE: "system_user" -- -CREATE TABLE system_user( +CREATE TABLE "system_user"( system_user_id integer GENERATED ALWAYS AS IDENTITY (START WITH 1 INCREMENT BY 1), user_identity_source_id integer NOT NULL, user_identifier varchar(200) NOT NULL, @@ -2861,27 +2861,27 @@ CREATE TABLE system_user( -COMMENT ON COLUMN system_user.system_user_id IS 'System generated surrogate primary key identifier.' +COMMENT ON COLUMN "system_user".system_user_id IS 'System generated surrogate primary key identifier.' ; -COMMENT ON COLUMN system_user.user_identity_source_id IS 'System generated surrogate primary key identifier.' +COMMENT ON COLUMN "system_user".user_identity_source_id IS 'System generated surrogate primary key identifier.' ; -COMMENT ON COLUMN system_user.user_identifier IS 'The identifier of the user.' +COMMENT ON COLUMN "system_user".user_identifier IS 'The identifier of the user.' ; -COMMENT ON COLUMN system_user.record_effective_date IS 'Record level effective date.' +COMMENT ON COLUMN "system_user".record_effective_date IS 'Record level effective date.' ; -COMMENT ON COLUMN system_user.record_end_date IS 'Record level end date.' +COMMENT ON COLUMN "system_user".record_end_date IS 'Record level end date.' ; -COMMENT ON COLUMN system_user.create_date IS 'The datetime the record was created.' +COMMENT ON COLUMN "system_user".create_date IS 'The datetime the record was created.' ; -COMMENT ON COLUMN system_user.create_user IS 'The id of the user who created the record as identified in the system user table.' +COMMENT ON COLUMN "system_user".create_user IS 'The id of the user who created the record as identified in the system user table.' ; -COMMENT ON COLUMN system_user.update_date IS 'The datetime the record was updated.' +COMMENT ON COLUMN "system_user".update_date IS 'The datetime the record was updated.' ; -COMMENT ON COLUMN system_user.update_user IS 'The id of the user who updated the record as identified in the system user table.' +COMMENT ON COLUMN "system_user".update_user IS 'The id of the user who updated the record as identified in the system user table.' ; -COMMENT ON COLUMN system_user.revision_count IS 'Revision count used for concurrency control.' +COMMENT ON COLUMN "system_user".revision_count IS 'Revision count used for concurrency control.' ; -COMMENT ON TABLE system_user IS 'Agency or Ministry funding the project.' +COMMENT ON TABLE "system_user" IS 'Agency or Ministry funding the project.' ; -- @@ -3822,13 +3822,13 @@ CREATE UNIQUE INDEX system_role_nuk1 ON system_role(name, (record_end_date is NU -- INDEX: system_user_nuk1 -- -CREATE UNIQUE INDEX system_user_nuk1 ON system_user(user_identifier, record_end_date, user_identity_source_id) +CREATE UNIQUE INDEX system_user_nuk1 ON "system_user"(user_identifier, record_end_date, user_identity_source_id) ; -- -- INDEX: "Ref120120" -- -CREATE INDEX "Ref120120" ON system_user(user_identity_source_id) +CREATE INDEX "Ref120120" ON "system_user"(user_identity_source_id) ; -- -- INDEX: system_user_role_uk1 @@ -3896,12 +3896,12 @@ CREATE INDEX "Ref78141" ON webform_draft(system_user_id) ALTER TABLE administrative_activity ADD CONSTRAINT "Refsystem_user143" FOREIGN KEY (assigned_system_user_id) - REFERENCES system_user(system_user_id) + REFERENCES "system_user"(system_user_id) ; ALTER TABLE administrative_activity ADD CONSTRAINT "Refsystem_user144" FOREIGN KEY (reported_system_user_id) - REFERENCES system_user(system_user_id) + REFERENCES "system_user"(system_user_id) ; ALTER TABLE administrative_activity ADD CONSTRAINT "Refadministrative_activity_type145" @@ -4016,7 +4016,7 @@ ALTER TABLE permit ADD CONSTRAINT "Refsurvey157" ALTER TABLE permit ADD CONSTRAINT "Refsystem_user171" FOREIGN KEY (system_user_id) - REFERENCES system_user(system_user_id) + REFERENCES "system_user"(system_user_id) ; @@ -4141,7 +4141,7 @@ ALTER TABLE project_participation ADD CONSTRAINT "Refproject148" ALTER TABLE project_participation ADD CONSTRAINT "Refsystem_user149" FOREIGN KEY (system_user_id) - REFERENCES system_user(system_user_id) + REFERENCES "system_user"(system_user_id) ; ALTER TABLE project_participation ADD CONSTRAINT "Refproject_role150" @@ -4181,7 +4181,7 @@ ALTER TABLE security ADD CONSTRAINT "Refsecurity_rule208" ALTER TABLE security ADD CONSTRAINT "Refsystem_user209" FOREIGN KEY (system_user_id) - REFERENCES system_user(system_user_id) + REFERENCES "system_user"(system_user_id) ; ALTER TABLE security ADD CONSTRAINT "Refproject210" @@ -4391,10 +4391,10 @@ ALTER TABLE survey_summary_submission_message ADD CONSTRAINT "Refsummary_submiss -- --- TABLE: system_user +-- TABLE: "system_user" -- -ALTER TABLE system_user ADD CONSTRAINT "Refuser_identity_source120" +ALTER TABLE "system_user" ADD CONSTRAINT "Refuser_identity_source120" FOREIGN KEY (user_identity_source_id) REFERENCES user_identity_source(user_identity_source_id) ; @@ -4406,7 +4406,7 @@ ALTER TABLE system_user ADD CONSTRAINT "Refuser_identity_source120" ALTER TABLE system_user_role ADD CONSTRAINT "Refsystem_user139" FOREIGN KEY (system_user_id) - REFERENCES system_user(system_user_id) + REFERENCES "system_user"(system_user_id) ; ALTER TABLE system_user_role ADD CONSTRAINT "Refsystem_role140" @@ -4441,7 +4441,7 @@ ALTER TABLE template_methodology_species ADD CONSTRAINT "Reftemplate187" ALTER TABLE webform_draft ADD CONSTRAINT "Refsystem_user141" FOREIGN KEY (system_user_id) - REFERENCES system_user(system_user_id) + REFERENCES "system_user"(system_user_id) ; diff --git a/database/src/migrations/release.0.34/db_setup_up.sql b/database/src/migrations/release.0.34/db_setup_up.sql index ef0e7edfd6..e90f88829a 100644 --- a/database/src/migrations/release.0.34/db_setup_up.sql +++ b/database/src/migrations/release.0.34/db_setup_up.sql @@ -104,4 +104,4 @@ set role biohub_api; set role postgres; set search_path = biohub; -grant execute on function api_set_context(_system_user_identifier system_user.user_identifier%type, _user_identity_source_name user_identity_source.name%type) to biohub_api; +grant execute on function api_set_context(_system_user_identifier "system_user".user_identifier%type, _user_identity_source_name user_identity_source.name%type) to biohub_api; diff --git a/database/src/migrations/release.0.34/populate_user_identity_source.sql b/database/src/migrations/release.0.34/populate_user_identity_source.sql index 5fd7c40bb3..61bf0c37c9 100644 --- a/database/src/migrations/release.0.34/populate_user_identity_source.sql +++ b/database/src/migrations/release.0.34/populate_user_identity_source.sql @@ -1,13 +1,13 @@ -- populate_user_identity_source.sql -delete from system_user; +delete from "system_user"; delete from user_identity_source; insert into user_identity_source(name, record_effective_date, description, create_date, create_user) values ('DATABASE', now(), 'DATABASE user source system.', now(), 1); insert into user_identity_source(name, record_effective_date, description, create_date, create_user) values ('IDIR', now(), 'IDIR user source system.', now(), 1); insert into user_identity_source(name, record_effective_date, description, create_date, create_user) values ('BCEID', now(), 'BCEID user source system.', now(), 1); -insert into system_user (user_identity_source_id, user_identifier, record_effective_date, create_date, create_user) +insert into "system_user" (user_identity_source_id, user_identifier, record_effective_date, create_date, create_user) values ((select user_identity_source_id from user_identity_source where name = 'DATABASE' and record_end_date is null), 'postgres', now(), now(), 1); -insert into system_user (user_identity_source_id, user_identifier, record_effective_date, create_date, create_user) +insert into "system_user" (user_identity_source_id, user_identifier, record_effective_date, create_date, create_user) values ((select user_identity_source_id from user_identity_source where name = 'DATABASE' and record_end_date is null), 'biohub_api', now(), now(), 1); diff --git a/database/src/migrations/release.0.34/project_audit_triggers.sql b/database/src/migrations/release.0.34/project_audit_triggers.sql index 78fbdb25d9..61099104f8 100644 --- a/database/src/migrations/release.0.34/project_audit_triggers.sql +++ b/database/src/migrations/release.0.34/project_audit_triggers.sql @@ -48,7 +48,7 @@ create trigger audit_template before insert or update or delete on biohub.template for each row execute procedure tr_audit_trigger(); create trigger audit_survey_summary_submission_message before insert or update or delete on biohub.survey_summary_submission_message for each row execute procedure tr_audit_trigger(); create trigger audit_system_user_role before insert or update or delete on biohub.system_user_role for each row execute procedure tr_audit_trigger(); - create trigger audit_system_user before insert or update or delete on biohub.system_user for each row execute procedure tr_audit_trigger(); + create trigger audit_system_user before insert or update or delete on biohub."system_user" for each row execute procedure tr_audit_trigger(); create trigger audit_template_methodology_species before insert or update or delete on biohub.template_methodology_species for each row execute procedure tr_audit_trigger(); create trigger audit_user_identity_source before insert or update or delete on biohub.user_identity_source for each row execute procedure tr_audit_trigger(); create trigger audit_system_role before insert or update or delete on biohub.system_role for each row execute procedure tr_audit_trigger(); diff --git a/database/src/migrations/release.0.34/project_dapi_views.sql b/database/src/migrations/release.0.34/project_dapi_views.sql index b51a004988..9f3506e740 100644 --- a/database/src/migrations/release.0.34/project_dapi_views.sql +++ b/database/src/migrations/release.0.34/project_dapi_views.sql @@ -56,7 +56,7 @@ create or replace view system_constant as select * from biohub.system_constant; create or replace view system_metadata_constant as select * from biohub.system_metadata_constant; create or replace view system_role as select * from biohub.system_role; - create or replace view system_user as select * from biohub.system_user; + create or replace view "system_user" as select * from biohub."system_user"; create or replace view system_user_role as select * from biohub.system_user_role; create or replace view template as select * from biohub.template; create or replace view template_methodology_species as select * from biohub.template_methodology_species; diff --git a/database/src/migrations/release.0.34/project_journal_triggers.sql b/database/src/migrations/release.0.34/project_journal_triggers.sql index 33aa6393c3..fd01ebbc83 100644 --- a/database/src/migrations/release.0.34/project_journal_triggers.sql +++ b/database/src/migrations/release.0.34/project_journal_triggers.sql @@ -48,7 +48,7 @@ create trigger journal_template after insert or update or delete on biohub.template for each row execute procedure tr_journal_trigger(); create trigger journal_survey_summary_submission_message after insert or update or delete on biohub.survey_summary_submission_message for each row execute procedure tr_journal_trigger(); create trigger journal_system_user_role after insert or update or delete on biohub.system_user_role for each row execute procedure tr_journal_trigger(); - create trigger journal_system_user after insert or update or delete on biohub.system_user for each row execute procedure tr_journal_trigger(); + create trigger journal_system_user after insert or update or delete on biohub."system_user" for each row execute procedure tr_journal_trigger(); create trigger journal_template_methodology_species after insert or update or delete on biohub.template_methodology_species for each row execute procedure tr_journal_trigger(); create trigger journal_user_identity_source after insert or update or delete on biohub.user_identity_source for each row execute procedure tr_journal_trigger(); create trigger journal_system_role after insert or update or delete on biohub.system_role for each row execute procedure tr_journal_trigger(); diff --git a/database/src/migrations/release.0.34/tr_audit_trigger.sql b/database/src/migrations/release.0.34/tr_audit_trigger.sql index ccee0288f2..d031ac7575 100644 --- a/database/src/migrations/release.0.34/tr_audit_trigger.sql +++ b/database/src/migrations/release.0.34/tr_audit_trigger.sql @@ -16,7 +16,7 @@ $$ -- 2021-01-03 initial release -- ******************************************************************* declare - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _user_identity_source_id user_identity_source.user_identity_source_id%type; begin -- api users will hopefully have created the temp table using an api helper function @@ -26,7 +26,7 @@ begin if (_system_user_id is null) THEN -- look up the database user - select a.system_user_id into strict _system_user_id from system_user a, user_identity_source b + select a.system_user_id into strict _system_user_id from "system_user" a, user_identity_source b where a.user_identity_source_id = b.user_identity_source_id and b.name = 'DATABASE' and user_identifier = user; diff --git a/database/src/migrations/release.0.34/tr_journal_trigger.sql b/database/src/migrations/release.0.34/tr_journal_trigger.sql index ec22bfae37..616c50f0af 100644 --- a/database/src/migrations/release.0.34/tr_journal_trigger.sql +++ b/database/src/migrations/release.0.34/tr_journal_trigger.sql @@ -15,7 +15,7 @@ $$ -- 2021-01-03 initial release -- ******************************************************************* declare - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; old_row json := null; new_row json := null; begin diff --git a/database/src/migrations/smoke_tests/smoketest_release.1.0.0.sql b/database/src/migrations/smoke_tests/smoketest_release.1.0.0.sql index 193c2ba196..76caa91fc5 100644 --- a/database/src/migrations/smoke_tests/smoketest_release.1.0.0.sql +++ b/database/src/migrations/smoke_tests/smoketest_release.1.0.0.sql @@ -6,23 +6,23 @@ set search_path=biohub; do $$ declare _count integer = 0; - _system_user system_user%rowtype; - _system_user_id system_user.system_user_id%type; + _system_user "system_user"%rowtype; + _system_user_id "system_user".system_user_id%type; begin - select * into _system_user from system_user where user_identifier = 'myIDIR'; + select * into _system_user from "system_user" where user_identifier = 'myIDIR'; if _system_user.system_user_id is not null then delete from permit where system_user_id = _system_user.system_user_id; delete from administrative_activity where reported_system_user_id = _system_user.system_user_id; delete from administrative_activity where assigned_system_user_id = _system_user.system_user_id; delete from system_user_role where system_user_id = _system_user.system_user_id; - delete from system_user where system_user_id = _system_user.system_user_id; + delete from "system_user" where system_user_id = _system_user.system_user_id; end if; - insert into system_user (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; + insert into "system_user" (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; insert into system_user_role (system_user_id, system_role_id) values (_system_user_id, (select system_role_id from system_role where name = 'System Administrator')); - select count(1) into _count from system_user; - assert _count > 1, 'FAIL system_user'; + select count(1) into _count from "system_user"; + assert _count > 1, 'FAIL "system_user"'; select count(1) into _count from audit_log; assert _count > 1, 'FAIL audit_log'; @@ -40,7 +40,7 @@ declare _project_id project.project_id%type; _survey_id survey.survey_id%type; _count integer = 0; - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _study_species_id study_species.study_species_id%type; _occurrence_submission_id occurrence_submission.occurrence_submission_id%type; _submission_status_id submission_status.submission_status_id%type; @@ -183,7 +183,7 @@ begin -- test ancillary data delete from webform_draft; - insert into webform_draft (system_user_id, name, data) values ((select system_user_id from system_user limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); + insert into webform_draft (system_user_id, name, data) values ((select system_user_id from "system_user" limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); select count(1) into _count from webform_draft; assert _count = 1, 'FAIL webform_draft'; diff --git a/database/src/migrations/smoke_tests/smoketest_release.1.1.0.sql b/database/src/migrations/smoke_tests/smoketest_release.1.1.0.sql index f862ee4681..907d7e31d2 100644 --- a/database/src/migrations/smoke_tests/smoketest_release.1.1.0.sql +++ b/database/src/migrations/smoke_tests/smoketest_release.1.1.0.sql @@ -6,23 +6,23 @@ set search_path=biohub; do $$ declare _count integer = 0; - _system_user system_user%rowtype; - _system_user_id system_user.system_user_id%type; + _system_user "system_user"%rowtype; + _system_user_id "system_user".system_user_id%type; begin - select * into _system_user from system_user where user_identifier = 'myIDIR'; + select * into _system_user from "system_user" where user_identifier = 'myIDIR'; if _system_user.system_user_id is not null then delete from permit where system_user_id = _system_user.system_user_id; delete from administrative_activity where reported_system_user_id = _system_user.system_user_id; delete from administrative_activity where assigned_system_user_id = _system_user.system_user_id; delete from system_user_role where system_user_id = _system_user.system_user_id; - delete from system_user where system_user_id = _system_user.system_user_id; + delete from "system_user" where system_user_id = _system_user.system_user_id; end if; - insert into system_user (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; + insert into "system_user" (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; insert into system_user_role (system_user_id, system_role_id) values (_system_user_id, (select system_role_id from system_role where name = 'System Administrator')); - select count(1) into _count from system_user; - assert _count > 1, 'FAIL system_user'; + select count(1) into _count from "system_user"; + assert _count > 1, 'FAIL "system_user"'; select count(1) into _count from audit_log; assert _count > 1, 'FAIL audit_log'; @@ -40,7 +40,7 @@ declare _project_id project.project_id%type; _survey_id survey.survey_id%type; _count integer = 0; - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _study_species_id study_species.study_species_id%type; _occurrence_submission_id occurrence_submission.occurrence_submission_id%type; _submission_status_id submission_status.submission_status_id%type; @@ -213,7 +213,7 @@ begin -- test ancillary data delete from webform_draft; - insert into webform_draft (system_user_id, name, data) values ((select system_user_id from system_user limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); + insert into webform_draft (system_user_id, name, data) values ((select system_user_id from "system_user" limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); select count(1) into _count from webform_draft; assert _count = 1, 'FAIL webform_draft'; diff --git a/database/src/migrations/smoke_tests/smoketest_release.1.2.0.sql b/database/src/migrations/smoke_tests/smoketest_release.1.2.0.sql index 54221caa6e..88ab2e47a8 100644 --- a/database/src/migrations/smoke_tests/smoketest_release.1.2.0.sql +++ b/database/src/migrations/smoke_tests/smoketest_release.1.2.0.sql @@ -6,23 +6,23 @@ set search_path=biohub; do $$ declare _count integer = 0; - _system_user system_user%rowtype; - _system_user_id system_user.system_user_id%type; + _system_user "system_user"%rowtype; + _system_user_id "system_user".system_user_id%type; begin - select * into _system_user from system_user where user_identifier = 'myIDIR'; + select * into _system_user from "system_user" where user_identifier = 'myIDIR'; if _system_user.system_user_id is not null then delete from permit where system_user_id = _system_user.system_user_id; delete from administrative_activity where reported_system_user_id = _system_user.system_user_id; delete from administrative_activity where assigned_system_user_id = _system_user.system_user_id; delete from system_user_role where system_user_id = _system_user.system_user_id; - delete from system_user where system_user_id = _system_user.system_user_id; + delete from "system_user" where system_user_id = _system_user.system_user_id; end if; - insert into system_user (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; + insert into "system_user" (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; insert into system_user_role (system_user_id, system_role_id) values (_system_user_id, (select system_role_id from system_role where name = 'System Administrator')); - select count(1) into _count from system_user; - assert _count > 1, 'FAIL system_user'; + select count(1) into _count from "system_user"; + assert _count > 1, 'FAIL "system_user"'; select count(1) into _count from audit_log; assert _count > 1, 'FAIL audit_log'; @@ -40,7 +40,7 @@ declare _project_id project.project_id%type; _survey_id survey.survey_id%type; _count integer = 0; - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _study_species_id study_species.study_species_id%type; _occurrence_submission_id occurrence_submission.occurrence_submission_id%type; _submission_status_id submission_status.submission_status_id%type; @@ -214,7 +214,7 @@ begin -- test ancillary data delete from webform_draft; - insert into webform_draft (system_user_id, name, data) values ((select system_user_id from system_user limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); + insert into webform_draft (system_user_id, name, data) values ((select system_user_id from "system_user" limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); select count(1) into _count from webform_draft; assert _count = 1, 'FAIL webform_draft'; diff --git a/database/src/migrations/smoke_tests/smoketest_release.1.3.0.sql b/database/src/migrations/smoke_tests/smoketest_release.1.3.0.sql index 8dcb656eff..b171efab6c 100644 --- a/database/src/migrations/smoke_tests/smoketest_release.1.3.0.sql +++ b/database/src/migrations/smoke_tests/smoketest_release.1.3.0.sql @@ -6,23 +6,23 @@ set search_path=biohub; do $$ declare _count integer = 0; - _system_user system_user%rowtype; - _system_user_id system_user.system_user_id%type; + _system_user "system_user"%rowtype; + _system_user_id "system_user".system_user_id%type; begin - select * into _system_user from system_user where user_identifier = 'myIDIR'; + select * into _system_user from "system_user" where user_identifier = 'myIDIR'; if _system_user.system_user_id is not null then delete from permit where system_user_id = _system_user.system_user_id; delete from administrative_activity where reported_system_user_id = _system_user.system_user_id; delete from administrative_activity where assigned_system_user_id = _system_user.system_user_id; delete from system_user_role where system_user_id = _system_user.system_user_id; - delete from system_user where system_user_id = _system_user.system_user_id; + delete from "system_user" where system_user_id = _system_user.system_user_id; end if; - insert into system_user (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; + insert into "system_user" (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; insert into system_user_role (system_user_id, system_role_id) values (_system_user_id, (select system_role_id from system_role where name = 'System Administrator')); - select count(1) into _count from system_user; - assert _count > 1, 'FAIL system_user'; + select count(1) into _count from "system_user"; + assert _count > 1, 'FAIL "system_user"'; select count(1) into _count from audit_log; assert _count > 1, 'FAIL audit_log'; @@ -40,7 +40,7 @@ declare _project_id project.project_id%type; _survey_id survey.survey_id%type; _count integer = 0; - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _study_species_id study_species.study_species_id%type; _occurrence_submission_id occurrence_submission.occurrence_submission_id%type; _submission_status_id submission_status.submission_status_id%type; @@ -216,7 +216,7 @@ begin -- test ancillary data delete from webform_draft; - insert into webform_draft (system_user_id, name, data) values ((select system_user_id from system_user limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); + insert into webform_draft (system_user_id, name, data) values ((select system_user_id from "system_user" limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); select count(1) into _count from webform_draft; assert _count = 1, 'FAIL webform_draft'; diff --git a/database/src/migrations/smoke_tests/smoketest_release.1.4.0.sql b/database/src/migrations/smoke_tests/smoketest_release.1.4.0.sql index 8dcb656eff..b171efab6c 100644 --- a/database/src/migrations/smoke_tests/smoketest_release.1.4.0.sql +++ b/database/src/migrations/smoke_tests/smoketest_release.1.4.0.sql @@ -6,23 +6,23 @@ set search_path=biohub; do $$ declare _count integer = 0; - _system_user system_user%rowtype; - _system_user_id system_user.system_user_id%type; + _system_user "system_user"%rowtype; + _system_user_id "system_user".system_user_id%type; begin - select * into _system_user from system_user where user_identifier = 'myIDIR'; + select * into _system_user from "system_user" where user_identifier = 'myIDIR'; if _system_user.system_user_id is not null then delete from permit where system_user_id = _system_user.system_user_id; delete from administrative_activity where reported_system_user_id = _system_user.system_user_id; delete from administrative_activity where assigned_system_user_id = _system_user.system_user_id; delete from system_user_role where system_user_id = _system_user.system_user_id; - delete from system_user where system_user_id = _system_user.system_user_id; + delete from "system_user" where system_user_id = _system_user.system_user_id; end if; - insert into system_user (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; + insert into "system_user" (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; insert into system_user_role (system_user_id, system_role_id) values (_system_user_id, (select system_role_id from system_role where name = 'System Administrator')); - select count(1) into _count from system_user; - assert _count > 1, 'FAIL system_user'; + select count(1) into _count from "system_user"; + assert _count > 1, 'FAIL "system_user"'; select count(1) into _count from audit_log; assert _count > 1, 'FAIL audit_log'; @@ -40,7 +40,7 @@ declare _project_id project.project_id%type; _survey_id survey.survey_id%type; _count integer = 0; - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _study_species_id study_species.study_species_id%type; _occurrence_submission_id occurrence_submission.occurrence_submission_id%type; _submission_status_id submission_status.submission_status_id%type; @@ -216,7 +216,7 @@ begin -- test ancillary data delete from webform_draft; - insert into webform_draft (system_user_id, name, data) values ((select system_user_id from system_user limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); + insert into webform_draft (system_user_id, name, data) values ((select system_user_id from "system_user" limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); select count(1) into _count from webform_draft; assert _count = 1, 'FAIL webform_draft'; diff --git a/database/src/migrations/smoke_tests/smoketest_release.1.5.0.sql b/database/src/migrations/smoke_tests/smoketest_release.1.5.0.sql index 8f7f3c0af1..8376092d5e 100644 --- a/database/src/migrations/smoke_tests/smoketest_release.1.5.0.sql +++ b/database/src/migrations/smoke_tests/smoketest_release.1.5.0.sql @@ -6,23 +6,23 @@ set search_path=biohub; do $$ declare _count integer = 0; - _system_user system_user%rowtype; - _system_user_id system_user.system_user_id%type; + _system_user "system_user"%rowtype; + _system_user_id "system_user".system_user_id%type; begin - select * into _system_user from system_user where user_identifier = 'myIDIR'; + select * into _system_user from "system_user" where user_identifier = 'myIDIR'; if _system_user.system_user_id is not null then delete from permit where system_user_id = _system_user.system_user_id; delete from administrative_activity where reported_system_user_id = _system_user.system_user_id; delete from administrative_activity where assigned_system_user_id = _system_user.system_user_id; delete from system_user_role where system_user_id = _system_user.system_user_id; - delete from system_user where system_user_id = _system_user.system_user_id; + delete from "system_user" where system_user_id = _system_user.system_user_id; end if; - insert into system_user (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; + insert into "system_user" (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; insert into system_user_role (system_user_id, system_role_id) values (_system_user_id, (select system_role_id from system_role where name = 'System Administrator')); - select count(1) into _count from system_user; - assert _count > 1, 'FAIL system_user'; + select count(1) into _count from "system_user"; + assert _count > 1, 'FAIL "system_user"'; select count(1) into _count from audit_log; assert _count > 1, 'FAIL audit_log'; @@ -40,7 +40,7 @@ declare _project_id project.project_id%type; _survey_id survey.survey_id%type; _count integer = 0; - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _study_species_id study_species.study_species_id%type; _occurrence_submission_id occurrence_submission.occurrence_submission_id%type; _submission_status_id submission_status.submission_status_id%type; @@ -226,7 +226,7 @@ begin -- test ancillary data delete from webform_draft; - insert into webform_draft (system_user_id, name, data) values ((select system_user_id from system_user limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); + insert into webform_draft (system_user_id, name, data) values ((select system_user_id from "system_user" limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); select count(1) into _count from webform_draft; assert _count = 1, 'FAIL webform_draft'; diff --git a/database/src/migrations/smoke_tests/smoketest_release.1.6.0.sql b/database/src/migrations/smoke_tests/smoketest_release.1.6.0.sql index 549ed7c849..5aa1db1456 100644 --- a/database/src/migrations/smoke_tests/smoketest_release.1.6.0.sql +++ b/database/src/migrations/smoke_tests/smoketest_release.1.6.0.sql @@ -6,23 +6,23 @@ set search_path=biohub; do $$ declare _count integer = 0; - _system_user system_user%rowtype; - _system_user_id system_user.system_user_id%type; + _system_user "system_user"%rowtype; + _system_user_id "system_user".system_user_id%type; begin - select * into _system_user from system_user where user_identifier = 'myIDIR'; + select * into _system_user from "system_user" where user_identifier = 'myIDIR'; if _system_user.system_user_id is not null then delete from permit where system_user_id = _system_user.system_user_id; delete from administrative_activity where reported_system_user_id = _system_user.system_user_id; delete from administrative_activity where assigned_system_user_id = _system_user.system_user_id; delete from system_user_role where system_user_id = _system_user.system_user_id; - delete from system_user where system_user_id = _system_user.system_user_id; + delete from "system_user" where system_user_id = _system_user.system_user_id; end if; - insert into system_user (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; + insert into "system_user" (user_identity_source_id, user_identifier, record_effective_date) values ((select user_identity_source_id from user_identity_source where name = 'IDIR' and record_end_date is null), 'myIDIR', now()) returning system_user_id into _system_user_id; insert into system_user_role (system_user_id, system_role_id) values (_system_user_id, (select system_role_id from system_role where name = 'System Administrator')); - select count(1) into _count from system_user; - assert _count > 1, 'FAIL system_user'; + select count(1) into _count from "system_user"; + assert _count > 1, 'FAIL "system_user"'; select count(1) into _count from audit_log; assert _count > 1, 'FAIL audit_log'; @@ -40,7 +40,7 @@ declare _project_id project.project_id%type; _survey_id survey.survey_id%type; _count integer = 0; - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _study_species_id study_species.study_species_id%type; _occurrence_submission_id occurrence_submission.occurrence_submission_id%type; _submission_status_id submission_status.submission_status_id%type; @@ -216,7 +216,7 @@ begin -- test ancillary data delete from webform_draft; - insert into webform_draft (system_user_id, name, data) values ((select system_user_id from system_user limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); + insert into webform_draft (system_user_id, name, data) values ((select system_user_id from "system_user" limit 1), 'my draft name', '{ "customer": "John Doe", "items": {"product": "Beer","qty": 6}}'); select count(1) into _count from webform_draft; assert _count = 1, 'FAIL webform_draft'; diff --git a/database/src/procedures/api_patch_system_user.ts b/database/src/procedures/api_patch_system_user.ts index 8803be960e..af04c5f44d 100644 --- a/database/src/procedures/api_patch_system_user.ts +++ b/database/src/procedures/api_patch_system_user.ts @@ -36,17 +36,17 @@ export async function seed(knex: Knex): Promise { SET client_min_messages TO 'warning' AS $$ DECLARE - _system_user system_user%rowtype; + _system_user "system_user"%rowtype; BEGIN -- Attempt to find user based on guid - SELECT * INTO _system_user FROM system_user + SELECT * INTO _system_user FROM "system_user" WHERE LOWER(user_guid) = LOWER(p_system_user_guid) AND record_end_date IS NULL LIMIT 1; -- Otherwise, attempt to find user based on identifier and identity source IF NOT found THEN - SELECT * INTO _system_user FROM system_user + SELECT * INTO _system_user FROM "system_user" WHERE user_identity_source_id = ( SELECT user_identity_source_id FROM user_identity_source WHERE LOWER(name) = LOWER(p_user_identity_source_name) @@ -63,7 +63,7 @@ export async function seed(knex: Knex): Promise { END IF; -- Otherwise, patch the system user record with the latest information passed to this function - UPDATE system_user SET + UPDATE "system_user" SET user_guid = p_system_user_guid, user_identifier = p_user_identifier, email = p_email, @@ -91,6 +91,6 @@ export async function seed(knex: Knex): Promise { END; $$; - COMMENT ON FUNCTION api_patch_system_user(varchar, varchar, varchar, varchar, varchar, varchar, varchar, varchar) IS 'Updates a system_user record if any of the incoming values are not the same as the existing values.'; + COMMENT ON FUNCTION api_patch_system_user(varchar, varchar, varchar, varchar, varchar, varchar, varchar, varchar) IS 'Updates a "system_user" record if any of the incoming values are not the same as the existing values.'; `); } diff --git a/database/src/procedures/api_set_context.ts b/database/src/procedures/api_set_context.ts index f37a5969d5..67dfeaf5ce 100644 --- a/database/src/procedures/api_set_context.ts +++ b/database/src/procedures/api_set_context.ts @@ -14,21 +14,21 @@ export async function seed(knex: Knex): Promise { DROP FUNCTION IF EXISTS api_set_context; - CREATE OR REPLACE FUNCTION api_set_context(p_system_user_guid system_user.user_guid%type, p_user_identity_source_name user_identity_source.name%type) RETURNS system_user.system_user_id%type + CREATE OR REPLACE FUNCTION api_set_context(p_system_user_guid "system_user".user_guid%type, p_user_identity_source_name user_identity_source.name%type) RETURNS "system_user".system_user_id%type language plpgsql security invoker SET client_min_messages = warning AS $$ DECLARE - _system_user_id system_user.system_user_id%type; + _system_user_id "system_user".system_user_id%type; _user_identity_source_id user_identity_source.user_identity_source_id%type; BEGIN SELECT user_identity_source_id INTO strict _user_identity_source_id FROM user_identity_source WHERE LOWER(name) = LOWER(p_user_identity_source_name) AND record_end_date IS NULL; - SELECT system_user_id INTO strict _system_user_id FROM system_user + SELECT system_user_id INTO strict _system_user_id FROM "system_user" WHERE user_identity_source_id = _user_identity_source_id AND LOWER(user_guid) = LOWER(p_system_user_guid); diff --git a/database/src/seeds/01_db_system_users.ts b/database/src/seeds/01_db_system_users.ts index be1729f084..13ba534775 100644 --- a/database/src/seeds/01_db_system_users.ts +++ b/database/src/seeds/01_db_system_users.ts @@ -132,7 +132,7 @@ const getSystemUserSQL = (systemUser: SystemUserSeed) => ` SELECT user_identifier FROM - system_user + "system_user" WHERE LOWER(user_identifier) = LOWER('${systemUser.identifier}'); `; @@ -143,7 +143,7 @@ const getSystemUserSQL = (systemUser: SystemUserSeed) => ` * @param {SystemUserSeed} systemUser */ const insertSystemUserSQL = (systemUser: SystemUserSeed) => ` - INSERT INTO system_user ( + INSERT INTO "system_user" ( user_identity_source_id, user_identifier, user_guid, @@ -161,7 +161,7 @@ const insertSystemUserSQL = (systemUser: SystemUserSeed) => ` LOWER('${systemUser.user_guid}'), now(), now(), - (SELECT system_user_id from system_user where LOWER(user_identifier) = LOWER('${DB_ADMIN}')), + (SELECT system_user_id from "system_user" where LOWER(user_identifier) = LOWER('${DB_ADMIN}')), '${systemUser.display_name}', '${systemUser.given_name}', '${systemUser.family_name}', @@ -184,7 +184,7 @@ const insertSystemUserRoleSQL = (systemUser: SystemUserSeed) => ` system_user_id, system_role_id ) VALUES ( - (SELECT system_user_id from system_user where LOWER(user_identifier) = LOWER('${systemUser.identifier}')), + (SELECT system_user_id from "system_user" where LOWER(user_identifier) = LOWER('${systemUser.identifier}')), (SELECT system_role_id from system_role where LOWER(name) = LOWER('${systemUser.role_name}')) ); `; diff --git a/database/src/seeds/03_basic_project_survey_setup.ts b/database/src/seeds/03_basic_project_survey_setup.ts index 89eef14992..e551b4c74d 100644 --- a/database/src/seeds/03_basic_project_survey_setup.ts +++ b/database/src/seeds/03_basic_project_survey_setup.ts @@ -172,7 +172,7 @@ const insertSurveyParticipationData = (surveyId: number) => ` SELECT system_user_id FROM - system_user su + "system_user" su WHERE su.user_identifier = '${PROJECT_SEEDER_USER_IDENTIFIER}' ), 1) @@ -391,7 +391,7 @@ const insertProjectParticipationData = (projectId: number) => ` SELECT system_user_id FROM - system_user su + "system_user" su WHERE su.user_identifier = '${PROJECT_SEEDER_USER_IDENTIFIER}' ), 1) @@ -760,8 +760,8 @@ const insertAccessRequest = () => ` VALUES ( (SELECT administrative_activity_status_type_id FROM administrative_activity_status_type ORDER BY random() LIMIT 1), (SELECT administrative_activity_type_id FROM administrative_activity_type WHERE name = 'System Access'), - (SELECT system_user_id FROM system_user ORDER BY random() LIMIT 1), - (SELECT system_user_id FROM system_user ORDER BY random() LIMIT 1), + (SELECT system_user_id FROM "system_user" ORDER BY random() LIMIT 1), + (SELECT system_user_id FROM "system_user" ORDER BY random() LIMIT 1), $$${faker.lorem.sentences(2)}$$, jsonb_build_object( 'reason', '${faker.lorem.sentences(1)}', @@ -799,7 +799,7 @@ const insertSystemAlert = () => ` NULL, '${faker.helpers.arrayElement(['info', 'success', 'warning', 'error'])}', (CASE WHEN random() < 0.5 THEN NULL ELSE (CURRENT_DATE - INTERVAL '30 days') END), - (SELECT system_user_id FROM system_user ORDER BY random() LIMIT 1), - (SELECT system_user_id FROM system_user ORDER BY random() LIMIT 1) + (SELECT system_user_id FROM "system_user" ORDER BY random() LIMIT 1), + (SELECT system_user_id FROM "system_user" ORDER BY random() LIMIT 1) ); `; diff --git a/env_config/env.docker b/env_config/env.docker index 5c4bce03cb..8f2be0a881 100644 --- a/env_config/env.docker +++ b/env_config/env.docker @@ -126,7 +126,7 @@ CB_API_HOST=https://moe-critterbase-api-dev.apps.silver.devops.gov.bc.ca/api # # See `biohubbc-creds` secret in openshift # ------------------------------------------------------------------------------ -POSTGRES_VERSION=14.2 +POSTGRES_VERSION=17-bullseye POSTGIS_VERSION=3 DB_HOST=db DB_ADMIN=postgres @@ -138,6 +138,7 @@ DB_DATABASE=biohubbc DB_SCHEMA=biohub DB_SCHEMA_DAPI_V1=biohub_dapi_v1 DB_TZ=America/Vancouver +PG_DATA=/var/lib/postgresql/data # ------------------------------------------------------------------------------ # KeyCloak Configuration for Keycloak Common Hosted Single Sign-on (CSS) diff --git a/scripts/bctw-deployments/main.js b/scripts/bctw-deployments/main.js index 37ccad0508..769ff04b9b 100755 --- a/scripts/bctw-deployments/main.js +++ b/scripts/bctw-deployments/main.js @@ -161,7 +161,7 @@ async function main() { const project = data[pIndex]; sql += `WITH p AS (INSERT INTO project (name, objectives, coordinator_first_name, coordinator_last_name, coordinator_email_address) VALUES ($$Caribou - ${project.herd} - BCTW Telemetry$$, $$BCTW telemetry deployments for ${project.herd} Caribou$$, $$${CONFIG.first_name}$$, $$${CONFIG.last_name}$$, $$${CONFIG.email}$$) RETURNING project_id - ), ppp AS (INSERT INTO project_participation (project_id, system_user_id, project_role_id) SELECT project_id, (select system_user_id from system_user where user_identifier = $$mauberti$$), (select project_role_id from project_role where name = $$${CONFIG.project_role}$$) FROM p + ), ppp AS (INSERT INTO project_participation (project_id, system_user_id, project_role_id) SELECT project_id, (select system_user_id from "system_user" where user_identifier = $$mauberti$$), (select project_role_id from project_role where name = $$${CONFIG.project_role}$$) FROM p ) `; for (let sIndex = 0; sIndex < project.surveys.length; sIndex++) { From e053059254d9215ba85694c4d992f3c42f8efe9d Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Fri, 15 Nov 2024 13:39:15 -0800 Subject: [PATCH 02/13] Update env-config to check for new required database env vars --- api/src/utils/env-config.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/api/src/utils/env-config.ts b/api/src/utils/env-config.ts index 201d89f4dd..d335e321b0 100644 --- a/api/src/utils/env-config.ts +++ b/api/src/utils/env-config.ts @@ -18,11 +18,13 @@ export const EnvSchema = z.object({ API_PORT: ZodEnvNumber, // Database + POSTGRES_VERSION: ZodEnvString, DB_HOST: ZodEnvString, DB_PORT: ZodEnvNumber, DB_USER_API: ZodEnvString, DB_USER_API_PASS: ZodEnvString, DB_DATABASE: ZodEnvString, + PG_DATA: ZodEnvString, // Keycloak KEYCLOAK_HOST: ZodEnvString, From 694c4fca6fd0614e40676cce1feddbd9c98d5432 Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Fri, 15 Nov 2024 13:46:48 -0800 Subject: [PATCH 03/13] Increase database setup memory --- database/.pipeline/lib/db.setup.deploy.js | 2 +- database/.pipeline/templates/db.setup.dc.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/database/.pipeline/lib/db.setup.deploy.js b/database/.pipeline/lib/db.setup.deploy.js index 08c9aeb14f..1a7d6fcfc4 100644 --- a/database/.pipeline/lib/db.setup.deploy.js +++ b/database/.pipeline/lib/db.setup.deploy.js @@ -74,7 +74,7 @@ const dbSetupDeploy = async (settings) => { CPU_REQUEST: '50m', CPU_LIMIT: '1000m', MEMORY_REQUEST: '100Mi', - MEMORY_LIMIT: '1.5Gi' + MEMORY_LIMIT: '1.75Gi' } }) ); diff --git a/database/.pipeline/templates/db.setup.dc.yaml b/database/.pipeline/templates/db.setup.dc.yaml index a384ae55a5..7e8e392dec 100644 --- a/database/.pipeline/templates/db.setup.dc.yaml +++ b/database/.pipeline/templates/db.setup.dc.yaml @@ -39,7 +39,7 @@ parameters: - name: MEMORY_REQUEST value: '100Mi' - name: MEMORY_LIMIT - value: '1.5Gi' + value: '1.75Gi' objects: - kind: Pod apiVersion: v1 From 1255b40f014edb2a83000ccea05b72baf881eec9 Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Fri, 15 Nov 2024 13:49:42 -0800 Subject: [PATCH 04/13] Fix wrong env var name in compose --- compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/compose.yml b/compose.yml index 9575665c87..cf1f73a2ef 100644 --- a/compose.yml +++ b/compose.yml @@ -23,7 +23,7 @@ services: - POSTGRES_PASSWORD=${DB_ADMIN_PASS} - POSTGRES_DB=${DB_DATABASE} - PORT=5432 - - PGDATA=${PG_DATA}/${PG_VERSION} + - PGDATA=${PG_DATA}/${POSTGRES_VERSION} networks: - sims-network volumes: From 10a2af24b7ce0f7cb18b70207bc07c83eee87217 Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Fri, 15 Nov 2024 13:50:37 -0800 Subject: [PATCH 05/13] Delete deprecated custom postgres image --- .../postgres12-postgis31/.gitattributes | 8 - containers/postgres12-postgis31/.gitignore | 7 - containers/postgres12-postgis31/LICENSE | 201 --------- containers/postgres12-postgis31/README.md | 51 --- .../postgresql12-postgis31-oracle-fdw.bc.yaml | 159 ------- .../rhel7.rh-postgresql12/Dockerfile | 142 ------- .../root/usr/bin/cgroup-limits | 92 ----- .../root/usr/bin/container-entrypoint | 3 - .../root/usr/bin/run-postgresql | 34 -- .../root/usr/bin/run-postgresql-master | 5 - .../root/usr/bin/run-postgresql-slave | 36 -- .../root/usr/libexec/fix-permissions | 7 - .../container-scripts/postgresql/README.md | 133 ------ .../container-scripts/postgresql/common.sh | 387 ------------------ ...ustom-postgresql-replication.conf.template | 7 - .../openshift-custom-postgresql.conf.template | 29 -- .../openshift-custom-recovery.conf.template | 9 - .../container-scripts/postgresql/scl_enable | 3 - 18 files changed, 1313 deletions(-) delete mode 100644 containers/postgres12-postgis31/.gitattributes delete mode 100644 containers/postgres12-postgis31/.gitignore delete mode 100644 containers/postgres12-postgis31/LICENSE delete mode 100644 containers/postgres12-postgis31/README.md delete mode 100644 containers/postgres12-postgis31/openshift/postgresql12-postgis31-oracle-fdw.bc.yaml delete mode 100644 containers/postgres12-postgis31/rhel7.rh-postgresql12/Dockerfile delete mode 100644 containers/postgres12-postgis31/root/usr/bin/cgroup-limits delete mode 100644 containers/postgres12-postgis31/root/usr/bin/container-entrypoint delete mode 100644 containers/postgres12-postgis31/root/usr/bin/run-postgresql delete mode 100644 containers/postgres12-postgis31/root/usr/bin/run-postgresql-master delete mode 100644 containers/postgres12-postgis31/root/usr/bin/run-postgresql-slave delete mode 100644 containers/postgres12-postgis31/root/usr/libexec/fix-permissions delete mode 100644 containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/README.md delete mode 100644 containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/common.sh delete mode 100644 containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-postgresql-replication.conf.template delete mode 100644 containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-postgresql.conf.template delete mode 100644 containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-recovery.conf.template delete mode 100644 containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/scl_enable diff --git a/containers/postgres12-postgis31/.gitattributes b/containers/postgres12-postgis31/.gitattributes deleted file mode 100644 index f864d31640..0000000000 --- a/containers/postgres12-postgis31/.gitattributes +++ /dev/null @@ -1,8 +0,0 @@ -# Set the default behavior, in case people don't have core.autocrlf set. -* text=auto - -# Declare files that will always have LF line endings on checkout. -*.sh text eol=lf -*.md text eol=lf -*.json text eol=lf -**/root/**/* text eol=lf \ No newline at end of file diff --git a/containers/postgres12-postgis31/.gitignore b/containers/postgres12-postgis31/.gitignore deleted file mode 100644 index 5aae642021..0000000000 --- a/containers/postgres12-postgis31/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -.gitconfig -oracle-instantclient12.2-basic-12.2.0.1.0-1.x86_64.rpm -oracle-instantclient12.2-devel-12.2.0.1.0-1.x86_64.rpm -openshift/postgis-secrets.yaml -etc-pki-entitlement -rhsm-conf -rhsm-ca \ No newline at end of file diff --git a/containers/postgres12-postgis31/LICENSE b/containers/postgres12-postgis31/LICENSE deleted file mode 100644 index 9c8f3ea087..0000000000 --- a/containers/postgres12-postgis31/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/containers/postgres12-postgis31/README.md b/containers/postgres12-postgis31/README.md deleted file mode 100644 index c299752ddb..0000000000 --- a/containers/postgres12-postgis31/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# OpenShift Postgresql + PostGIS + PGRouting - -This repository can be used to build a container featuring Postgresql and PostGIS and PGRouting extensions. - -## Versions - -PostgreSQL versions currently supported are: - -- postgresql-12.5 - -PostGIS versions currently supported are: - -| Extension | Version | Description | -| -------------------------------------------------------------------------- | :-----: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [postgis](https://postgis.net/) | 3.1.1 | PostGIS geometry, geography, and raster spatial types | -| [postgis_raster](https://trac.osgeo.org/postgis/wiki/WKTRaster) | 3.1.1 | Part of postgis: implements the RASTER type as much as possible like the GEOMETRY type is implemented in PostGIS and to offer a single set of overlay SQL functions (like ST_Intersects) operating seamlessly on vector and raster coverages. | -| [postgis_topology](https://postgis.net/docs/manual-dev/Topology.html) | 3.1.1 | Types and functions that are used to manage topological objects such as faces, edges and nodes | -| [postgis_sfcgal](https://postgis.net/docs/reference.html#reference_sfcgal) | 3.1.1 | Provides standard compliant geometry types and operations | -| [pgrouting](https://pgrouting.org/) | 3.1.2 | Provides geospatial routing functionality | - -Other extensions: -| Extension | Version | Description | -| ------------- |:-------------:| :-----| -fuzzystrmatch | 1.1 | Provides several functions to determine similarities and distance between strings | -pgcrypto | 1.3 | provides cryptographic functions for PostgreSQL | - -RHEL versions currently supported are: - -- RHEL7 - -### Use RHEL7 based image: - -RHEL7 based image - -To build the RHEL7 image you need to setup entitlement and subscription manager configurations. In the BC OCP3 cluster this was transparent. In the BC OCP4 cluster this (currently) requires a little extra work. Platform services will have to provision the required resources into your build environment. Once in place a build configuration based on [postgresql12-postgis31-oracle-fdw.bc.yaml](./openshift/postgresql12-postgis31-oracle-fdw.bc.yaml) will mount the resources so they are in place for the Dockerfile. - -Additional information can be found here; [Build Entitlements](https://github.com/BCDevOps/OpenShift4-Migration/issues/15) - -## Source - -The following open source project was used as a starting point: - -https://github.com/sclorg/postgresql-container/tree/master - -Refer to the above URL for a reference to the environment variables necessary to configure PostgreSQL. - -_NOTE_: This is meant for BC Gov Openshift (OCP4) Builds, as access to Redhat images requires certificates and tokens that are set up on OpenShift. - -## License - -Code released under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0). diff --git a/containers/postgres12-postgis31/openshift/postgresql12-postgis31-oracle-fdw.bc.yaml b/containers/postgres12-postgis31/openshift/postgresql12-postgis31-oracle-fdw.bc.yaml deleted file mode 100644 index bbfdc825cc..0000000000 --- a/containers/postgres12-postgis31/openshift/postgresql12-postgis31-oracle-fdw.bc.yaml +++ /dev/null @@ -1,159 +0,0 @@ ---- -kind: Template -apiVersion: template.openshift.io/v1 -metadata: - name: ${NAME}-build-template - annotations: - description: | - This template is used to create a build configuration that generates a PostgreSQL 12 with PostGIS 3.1 image. -parameters: - - name: NAME - displayName: Name - description: The name assigned to all of the objects defined in this template. - required: true - value: postgis-postgres - - name: SUFFIX - displayName: Suffix - description: A name suffix used for all objects - required: false - value: - - name: APP_NAME - displayName: App Name - description: Used to group components together in the OpenShift console. - required: true - value: biohubbc - - name: APP_GROUP - displayName: App Group - description: The name assigned to all of the deployments in this project. - required: true - value: biohubbc - - name: GIT_REPO_URL - displayName: Git Repo URL - description: The URL to your GIT repo. - required: true - value: https://github.com/bcgov/biohubbc.git - - name: GIT_REF - displayName: Git Reference - description: The git reference or branch. - required: true - value: dev - - name: SOURCE_CONTEXT_DIR - displayName: Source Context Directory - description: The source context directory. - required: false - value: containers/postgres12-postgis31 - - name: OUTPUT_IMAGE_TAG - displayName: Output Image Tag - description: The tag given to the built image. - required: true - value: 12-31 - - name: DOCKER_FILE_PATH - displayName: Docker File Path - description: The path to the docker file. - required: true - value: rhel7.rh-postgresql12/Dockerfile - - name: SOURCE_IMAGE_KIND - displayName: Source Image Kind - description: - The 'kind' (type) of the source image; typically ImageStreamTag, or - DockerImage. - required: false - value: DockerImage - - name: SOURCE_IMAGE_NAME - displayName: Source Image Name - description: - The name of the source image. If specified, this overrides the 'FROM' - spec in the Docker file. - required: false - value: registry.redhat.io/rhscl/postgresql-12-rhel7 - - name: SOURCE_IMAGE_TAG - displayName: Source Image Tag - description: - The tag of the source image. If specified, this overrides the 'FROM' - spec in the Docker file. - required: false - value: latest - - name: CPU_LIMIT - displayName: Resources CPU Limit - description: The resources CPU limit (in cores) for this build; 0 = use default. - required: true - value: "0" - - name: MEMORY_LIMIT - displayName: Resources Memory Limit - description: - The resources Memory limit (in Mi, Gi, etc) for this build; 0Mi = use - default. - required: true - value: 0Mi - - name: CPU_REQUEST - displayName: Resources CPU Request - description: The resources CPU request (in cores) for this build; 0 = use default. - required: true - value: "0" - - name: MEMORY_REQUEST - displayName: Resources Memory Request - description: - The resources Memory request (in Mi, Gi, etc) for this build; 0Mi = - use default. - required: true - value: 0Mi -objects: - - kind: ImageStream - apiVersion: image.openshift.io/v1 - metadata: - name: ${NAME} - labels: - name: ${NAME} - app: ${APP_NAME}${SUFFIX} - app-group: ${APP_GROUP} - - - kind: BuildConfig - apiVersion: build.openshift.io/v1 - metadata: - name: ${NAME} - labels: - name: ${NAME} - app: ${APP_NAME}${SUFFIX} - app-group: ${APP_GROUP} - spec: - runPolicy: Serial - source: - type: Git - git: - ref: ${GIT_REF} - uri: ${GIT_REPO_URL} - contextDir: ${SOURCE_CONTEXT_DIR} - secrets: - - secret: - name: platform-services-controlled-etc-pki-entitlement - destinationDir: etc-pki-entitlement - configMaps: - - configMap: - name: platform-services-controlled-rhsm-conf - destinationDir: rhsm-conf - - configMap: - name: platform-services-controlled-rhsm-ca - destinationDir: rhsm-ca - strategy: - type: Docker - dockerStrategy: - noCache: true - imageOptimizationPolicy: SkipLayers - dockerfilePath: ${DOCKER_FILE_PATH} - from: - kind: ${SOURCE_IMAGE_KIND} - name: ${SOURCE_IMAGE_NAME}:${SOURCE_IMAGE_TAG} - output: - to: - kind: ImageStreamTag - name: ${NAME}:${OUTPUT_IMAGE_TAG} - resources: - requests: - cpu: ${CPU_REQUEST} - memory: ${MEMORY_REQUEST} - limits: - cpu: ${CPU_LIMIT} - memory: ${MEMORY_LIMIT} - triggers: - - type: ImageChange - - type: ConfigChange diff --git a/containers/postgres12-postgis31/rhel7.rh-postgresql12/Dockerfile b/containers/postgres12-postgis31/rhel7.rh-postgresql12/Dockerfile deleted file mode 100644 index 52bd7775e1..0000000000 --- a/containers/postgres12-postgis31/rhel7.rh-postgresql12/Dockerfile +++ /dev/null @@ -1,142 +0,0 @@ -FROM registry.redhat.io/rhscl/postgresql-12-rhel7 -# FROM registry.redhat.io/rhscl/postgresql-13-rhel7 - -# PostgreSQL image for OpenShift with PostGIS extension. -# Volumes: -# * /var/lib/psql/data - Database cluster for PostgreSQL -# Environment: -# * $POSTGRESQL_USER - Database user name -# * $POSTGRESQL_PASSWORD - User's password -# * $POSTGRESQL_DATABASE - Name of the database to create -# * $POSTGRESQL_ADMIN_PASSWORD (Optional) - Password for the 'postgres' -# PostgreSQL administrative account - -ENV POSTGIS_EXTENSION=N \ - PGCRYPTO_EXTENSION=N \ - POSTGRESQL_VERSION=12 \ - POSTGRESQL_PREV_VERSION=10 \ - HOME=/var/lib/pgsql \ - PGUSER=postgres \ - LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 \ - APP_DATA=/opt/app-root - -ENV POSTGISV 3 -ENV TZ America/Vancouver -ENV PORT 5432 - -ENV SUMMARY="PostgreSQL is an advanced Object-Relational database management system" \ - DESCRIPTION="PostgreSQL is an advanced Object-Relational database management system (DBMS). \ -The image contains the client and server programs that you'll need to \ -create, run, maintain and access a PostgreSQL DBMS server." - -LABEL summary=$SUMMARY \ - description="$DESCRIPTION" \ - io.k8s.description="$DESCRIPTION" \ - io.k8s.display-name="PostgreSQL $POSTGRESQL_VERSION" \ - io.openshift.expose-services="5432:postgresql" \ - io.openshift.tags="database,postgresql,postgresql$POSTGRESQL_VERSION,rh-postgresql$POSTGRESQL_VERSION,postgis,postgis$POSTGISV" \ - name="rhscl/postgresql-$POSTGRESQL_VERSION-rhel7" \ - com.redhat.component="rh-postgresql$POSTGRESQL_VERSION-container" \ - version="1" \ - release="1" \ - com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#rhel" \ - usage="podman run -d --name postgresql_database -e POSTGRESQL_USER=user -e POSTGRESQL_PASSWORD=pass -e POSTGRESQL_DATABASE=db -p 5432:5432 rhscl/postgresql-$POSTGRESQL_VERSION-rhel7" \ - maintainer="SoftwareCollections.org " - -USER 0 - -# COPY root/usr/libexec/fix-permissions /usr/libexec/fix-permissions - -# Copy entitlements and subscription manager configurations -# https://github.com/BCDevOps/OpenShift4-Migration/issues/15 -COPY ./etc-pki-entitlement /etc/pki/entitlement -COPY ./rhsm-conf /etc/rhsm -COPY ./rhsm-ca /etc/rhsm/ca - -# This image must forever use UID 26 for postgres user so our volumes are -# safe in the future. This should *never* change, the last test is there -# to make sure of that. -# rhel-7-server-ose-3.2-rpms is enabled for nss_wrapper until this pkg is -# in base RHEL -# -# We need to call 2 (!) yum commands before being able to enable repositories properly -# This is a workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1479388 -# Initialize /etc/yum.repos.d/redhat.repo -# See https://access.redhat.com/solutions/1443553 -RUN rm /etc/rhsm-host && \ - yum repolist > /dev/null && \ - yum install -y yum-utils gettext && \ - yum-config-manager --disable \* &> /dev/null && \ - yum-config-manager --enable rhel-server-rhscl-7-rpms && \ - yum-config-manager --enable rhel-7-server-rpms && \ - yum-config-manager --enable rhel-7-server-optional-rpms && \ - INSTALL_PKGS="rsync tar gettext bind-utils nss_wrapper" && \ - INSTALL_PKGS="$INSTALL_PKGS rh-postgresql$POSTGRESQL_VERSION-pgaudit" && \ - yum -y --setopt=tsflags=nodocs install $INSTALL_PKGS && \ - rpm -V $INSTALL_PKGS - -RUN yum -y clean all --enablerepo='*' -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone -RUN localedef -f UTF-8 -i en_US en_US.UTF-8 - -RUN usermod -G "" postgres -RUN test "$(id postgres)" = "uid=26(postgres) gid=26(postgres) groups=26(postgres)" -RUN mkdir -p /var/lib/pgsql/data -RUN /usr/libexec/fix-permissions /var/lib/pgsql /var/run/postgresql - -RUN touch /etc/yum/pluginconf.d/rhnplugin.conf && \ - echo exclude=postgresql* >> /etc/yum/pluginconf.d/rhnplugin.conf - -RUN bash /usr/libexec/fix-permissions /var/run/postgresql - -# Get prefix path and path to scripts rather than hard-code them in scripts -ENV CONTAINER_SCRIPTS_PATH=/usr/share/container-scripts/postgresql \ - ENABLED_COLLECTIONS=rh-postgresql12 - -# When bash is started non-interactively, to run a shell script, for example it -# looks for this variable and source the content of this file. This will enable -# the SCL for all scripts without need to do 'scl enable'. -ENV BASH_ENV=${CONTAINER_SCRIPTS_PATH}/scl_enable \ - ENV=${CONTAINER_SCRIPTS_PATH}/scl_enable \ - PROMPT_COMMAND=". ${CONTAINER_SCRIPTS_PATH}/scl_enable" - -VOLUME ["/var/lib/pgsql/data", "/var/run/postgresql"] - -# COPY root / -ENV PGCONFIG /opt/rh/rh-postgresql12/root/usr/bin -ENV PATH /opt/rh/rh-postgresql12/root/usr/bin/:/usr/bin/:${PATH} - -# Aquire and build PostGIS 3.1, for PostgreSQL 12.x -RUN cd /tmp && \ - rpm -ivh https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm && \ - yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm - -RUN yum -y remove rh-postgresql12-postgresql-server-syspaths rh-postgresql12-postgresql-contrib-syspaths rh-postgresql12-postgresql-syspaths -RUN yum install -y postgis31_12 -RUN yum install -y postgis31_12-client -RUN yum install -y pgrouting_12 - -RUN /usr/bin/install -c -m 755 /usr/pgsql-12/lib/postgis* '/opt/rh/rh-postgresql12/root/usr/lib64/pgsql/' && \ - /usr/bin/install -c -m 755 /usr/pgsql-12/lib/libpgrouting* '/opt/rh/rh-postgresql12/root/usr/lib64/pgsql/' && \ - /usr/bin/install -c -m 755 /usr/pgsql-12/lib/pgcrypto* '/opt/rh/rh-postgresql12/root/usr/lib64/pgsql/' && \ - /usr/bin/install -c -m 755 /usr/pgsql-12/lib/fuzzystrmatch* '/opt/rh/rh-postgresql12/root/usr/lib64/pgsql/' && \ - /usr/bin/install -c -m 644 /usr/pgsql-12/share/extension/postgis* '/opt/rh/rh-postgresql12/root/usr/share/pgsql/extension/' && \ - /usr/bin/install -c -m 644 /usr/pgsql-12/share/extension/pgrouting* '/opt/rh/rh-postgresql12/root/usr/share/pgsql/extension/' && \ - /usr/bin/install -c -m 644 /usr/pgsql-12/share/extension/pgcrypto* '/opt/rh/rh-postgresql12/root/usr/share/pgsql/extension/' && \ - /usr/bin/install -c -m 644 /usr/pgsql-12/share/extension/fuzzystrmatch* '/opt/rh/rh-postgresql12/root/usr/share/pgsql/extension/' && \ - mv /usr/pgsql-12/share/contrib/postgis-3.1/ /opt/rh/rh-postgresql12/root/usr/share/pgsql/contrib/ - -RUN rm -rf /tmp/pgdg-redhat-repo-latest.noarch.rpm /var/cache/yum - -# Remove entitlements and Subscription Manager configs -RUN rm -rf /etc/pki/entitlement && \ - rm -rf /etc/rhsm/rhsm-conf && rm -fr /etc/rhsm/ca - -RUN bash /usr/libexec/fix-permissions /var/lib/pgsql - -USER 26 - -EXPOSE ${PORT} - -ENTRYPOINT ["/usr/bin/container-entrypoint"] -CMD ["/usr/bin/run-postgresql"] \ No newline at end of file diff --git a/containers/postgres12-postgis31/root/usr/bin/cgroup-limits b/containers/postgres12-postgis31/root/usr/bin/cgroup-limits deleted file mode 100644 index b9d4edc28d..0000000000 --- a/containers/postgres12-postgis31/root/usr/bin/cgroup-limits +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/python - -""" -Script for parsing cgroup information - -This script will read some limits from the cgroup system and parse -them, printing out "VARIABLE=VALUE" on each line for every limit that is -successfully read. Output of this script can be directly fed into -bash's export command. Recommended usage from a bash script: - - set -o errexit - export_vars=$(cgroup-limits) ; export $export_vars - -Variables currently supported: - MAX_MEMORY_LIMIT_IN_BYTES - Maximum possible limit MEMORY_LIMIT_IN_BYTES can have. This is - currently constant value of 9223372036854775807. - MEMORY_LIMIT_IN_BYTES - Maximum amount of user memory in bytes. If this value is set - to the same value as MAX_MEMORY_LIMIT_IN_BYTES, it means that - there is no limit set. The value is taken from - /sys/fs/cgroup/memory/memory.limit_in_bytes - NUMBER_OF_CORES - Number of detected CPU cores that can be used. This value is - calculated from /sys/fs/cgroup/cpuset/cpuset.cpus - NO_MEMORY_LIMIT - Set to "true" if MEMORY_LIMIT_IN_BYTES is so high that the caller - can act as if no memory limit was set. Undefined otherwise. -""" - -from __future__ import print_function -import sys - - -def _read_file(path): - try: - with open(path, 'r') as f: - return f.read().strip() - except IOError: - return None - - -def get_memory_limit(): - """ - Read memory limit, in bytes. - """ - - limit = _read_file('/sys/fs/cgroup/memory/memory.limit_in_bytes') - if limit is None or not limit.isdigit(): - print("Warning: Can't detect memory limit from cgroups", - file=sys.stderr) - return None - return int(limit) - - -def get_number_of_cores(): - """ - Read number of CPU cores. - """ - - core_count = 0 - - line = _read_file('/sys/fs/cgroup/cpuset/cpuset.cpus') - if line is None: - print("Warning: Can't detect number of CPU cores from cgroups", - file=sys.stderr) - return None - - for group in line.split(','): - core_ids = list(map(int, group.split('-'))) - if len(core_ids) == 2: - core_count += core_ids[1] - core_ids[0] + 1 - else: - core_count += 1 - - return core_count - - -if __name__ == "__main__": - env_vars = { - "MAX_MEMORY_LIMIT_IN_BYTES": 9223372036854775807, - "MEMORY_LIMIT_IN_BYTES": get_memory_limit(), - "NUMBER_OF_CORES": get_number_of_cores() - } - - env_vars = {k: v for k, v in env_vars.items() if v is not None} - - if env_vars.get("MEMORY_LIMIT_IN_BYTES", 0) >= 92233720368547: - env_vars["NO_MEMORY_LIMIT"] = "true" - - for key, value in env_vars.items(): - print("{0}={1}".format(key, value)) diff --git a/containers/postgres12-postgis31/root/usr/bin/container-entrypoint b/containers/postgres12-postgis31/root/usr/bin/container-entrypoint deleted file mode 100644 index 5fc44481d2..0000000000 --- a/containers/postgres12-postgis31/root/usr/bin/container-entrypoint +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -exec "$@" diff --git a/containers/postgres12-postgis31/root/usr/bin/run-postgresql b/containers/postgres12-postgis31/root/usr/bin/run-postgresql deleted file mode 100644 index 8c53fbe6b6..0000000000 --- a/containers/postgres12-postgis31/root/usr/bin/run-postgresql +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -export ENABLE_REPLICATION=${ENABLE_REPLICATION:-false} - -set -eu -export_vars=$(cgroup-limits) ; export $export_vars - -source "${CONTAINER_SCRIPTS_PATH}/common.sh" - -set_pgdata -check_env_vars -generate_passwd_file -generate_postgresql_config - -# Is this brand new data volume? -PG_INITIALIZED=false - -if [ ! -f "$PGDATA/postgresql.conf" ]; then - initialize_database - PG_INITIALIZED=: -else - try_pgupgrade -fi - -pg_ctl -w start -o "-h ''" -if $PG_INITIALIZED ; then - create_users -fi - -set_passwords -pg_ctl stop - -unset_env_vars -exec postgres "$@" diff --git a/containers/postgres12-postgis31/root/usr/bin/run-postgresql-master b/containers/postgres12-postgis31/root/usr/bin/run-postgresql-master deleted file mode 100644 index 79e7cc2400..0000000000 --- a/containers/postgres12-postgis31/root/usr/bin/run-postgresql-master +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -export ENABLE_REPLICATION=true - -exec run-postgresql "$@" diff --git a/containers/postgres12-postgis31/root/usr/bin/run-postgresql-slave b/containers/postgres12-postgis31/root/usr/bin/run-postgresql-slave deleted file mode 100644 index 5d42d0d456..0000000000 --- a/containers/postgres12-postgis31/root/usr/bin/run-postgresql-slave +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -export ENABLE_REPLICATION=true - -set -eu -export_vars=$(cgroup-limits) ; export $export_vars - -source "$CONTAINER_SCRIPTS_PATH"/common.sh - -set_pgdata - -function initialize_replica() { - echo "Initializing PostgreSQL slave ..." - # TODO: Validate and reuse existing data? - rm -rf $PGDATA - PGPASSWORD="${POSTGRESQL_MASTER_PASSWORD}" pg_basebackup -x --no-password --pgdata ${PGDATA} --host=${MASTER_FQDN} --port=5432 -U "${POSTGRESQL_MASTER_USER}" - - # PostgreSQL recovery configuration. - generate_postgresql_recovery_config - cat >> "$PGDATA/recovery.conf" <&2 "error: $1" - fi - - cat >&2 </dev/null) - # FIXME: This is for debugging (docker run) - if [ -v POSTGRESQL_MASTER_IP ]; then - endpoints=${POSTGRESQL_MASTER_IP:-} - fi - if [ -z "$endpoints" ]; then - >&2 echo "Failed to resolve PostgreSQL master IP address" - exit 3 - fi - echo -n "$(echo $endpoints | cut -d ' ' -f 1)" -} - -# New config is generated every time a container is created. It only contains -# additional custom settings and is included from $PGDATA/postgresql.conf. -function generate_postgresql_config() { - envsubst \ - < "${CONTAINER_SCRIPTS_PATH}/openshift-custom-postgresql.conf.template" \ - > "${POSTGRESQL_CONFIG_FILE}" - - if [ "${ENABLE_REPLICATION}" == "true" ]; then - envsubst \ - < "${CONTAINER_SCRIPTS_PATH}/openshift-custom-postgresql-replication.conf.template" \ - >> "${POSTGRESQL_CONFIG_FILE}" - fi -} - -function generate_postgresql_recovery_config() { - envsubst \ - < "${CONTAINER_SCRIPTS_PATH}/openshift-custom-recovery.conf.template" \ - > "${POSTGRESQL_RECOVERY_FILE}" -} - -# Generate passwd file based on current uid -function generate_passwd_file() { - export USER_ID=$(id -u) - export GROUP_ID=$(id -g) - grep -v ^postgres /etc/passwd > "$HOME/passwd" - echo "postgres:x:${USER_ID}:${GROUP_ID}:PostgreSQL Server:${HOME}:/bin/bash" >> "$HOME/passwd" - export LD_PRELOAD=libnss_wrapper.so - export NSS_WRAPPER_PASSWD=${HOME}/passwd - export NSS_WRAPPER_GROUP=/etc/group -} - -initdb_wrapper () -{ - # Initialize the database cluster with utf8 support enabled by default. - # This might affect performance, see: - # https://www.postgresql.org/docs/9.6/static/locale.html - LANG=${LANG:-en_US.utf8} "$@" -} - -function initialize_database() { - initdb_wrapper initdb --username=${PGUSER} - - # PostgreSQL configuration. - cat >> "$PGDATA/postgresql.conf" <> "$PGDATA/pg_hba.conf" <&2 "\n========== \$PGDATA upgrade: %s -> %s ==========\n\n" \ - "$POSTGRESQL_PREV_VERSION" \ - "$POSTGRESQL_VERSION" - - info_msg () { printf >&2 "\n===> $*\n\n" ;} - - # pg_upgrade writes logs to cwd, so go to the persistent storage first - cd "$HOME"/data - - # disable this because of scl_source, 'set +u' just makes the code ugly - # anyways - set +u - - # we need to have the old SCL enabled, otherwise the $old_pgengine is not - # working. The scl_source script doesn't pay attention to non-zero exit - # statuses, so use 'set +e'. - set +e - source scl_source enable $old_collection - set -e - - case $POSTGRESQL_UPGRADE in - copy) # we accept this - ;; - hardlink) - optimized=: - ;; - *) - echo >&2 "Unsupported value: \$POSTGRESQL_UPGRADE=$POSTGRESQL_UPGRADE" - false - ;; - esac - - # Ensure $PGDATA_new doesn't exist yet, so we can immediately remove it if - # there's some problem. - test ! -e "$PGDATA_new" - - # initialize the database - info_msg "Initialize new data directory; we will migrate to that." - initdb_cmd=( initdb_wrapper "$new_pgengine"/initdb "$PGDATA_new" ) - eval "\${initdb_cmd[@]} ${POSTGRESQL_UPGRADE_INITDB_OPTIONS-}" || \ - { rm -rf "$PGDATA_new" ; false ; } - - upgrade_cmd=( - "$new_pgengine"/pg_upgrade - "--old-bindir=$old_pgengine" - "--new-bindir=$new_pgengine" - "--old-datadir=$PGDATA" - "--new-datadir=$PGDATA_new" - ) - - # Dangerous --link option, we loose $DATADIR if something goes wrong. - ! $optimized || upgrade_cmd+=(--link) - - # User-specififed options for pg_upgrade. - eval "upgrade_cmd+=(${POSTGRESQL_UPGRADE_PGUPGRADE_OPTIONS-})" - - # the upgrade - info_msg "Starting the pg_upgrade process." - - # Once we stop support for PostgreSQL 9.4, we don't need - # REDHAT_PGUPGRADE_FROM_RHEL hack as we don't upgrade from 9.2 -- that means - # that we don't need to fiddle with unix_socket_director{y,ies} option. - REDHAT_PGUPGRADE_FROM_RHEL=1 \ - "${upgrade_cmd[@]}" || { rm -rf "$PGDATA_new" && false ; } - - # Move the important configuration and remove old data. This is highly - # careless, but we can't do more for this over-automatized process. - info_msg "Swap the old and new PGDATA and cleanup." - mv "$PGDATA"/*.conf "$PGDATA_new" - rm -rf "$PGDATA" - mv "$PGDATA_new" "$PGDATA" - - info_msg "Upgrade DONE." -) - - -# Run right after container startup, when the data volume is already initialized -# (not initialized by this container run) and thus there exists a chance that -# the data was generated by incompatible PostgreSQL major version. -try_pgupgrade () -{ - local versionfile="$PGDATA"/PG_VERSION version upgrade_available - - # This file always exists. - test -f "$versionfile" - version=$(cat "$versionfile") - - # If we don't support pg_upgrade, skip. - test -z "${POSTGRESQL_PREV_VERSION-}" && return 0 - - if test "$POSTGRESQL_VERSION" = "$version"; then - # No need to call pg_upgrade. - - # Mistakenly requests upgrade? If not, just start the DB. - test -z "${POSTGRESQL_UPGRADE-}" && return 0 - - # Make _sure_ we have this safety-belt here, otherwise our users would - # just specify '-e POSTGRESQL_UPGRADE=hardlink' permanently, even for - # re-deployment cases when upgrade is not needed. Setting such - # unfortunate default could mean that pg_upgrade might (after some user - # mistake) migrate (or even destruct, especially with --link) the old data - # directory with limited rollback options, if any. - echo >&2 - echo >&2 "== WARNING!! ==" - echo >&2 "PostgreSQL server version matches the datadir PG_VERSION." - echo >&2 "The \$POSTGRESQL_UPGRADE makes no sense and you probably" - echo >&2 "made some mistake, keeping the variable set you might" - echo >&2 "risk a data loss in future!" - echo >&2 "===============" - echo >&2 - - # Exit here, but allow _really explicit_ foot-shot. - ${POSTGRESQL_UPGRADE_FORCE-false} - return 0 - fi - - # At this point in code we know that PG_VERSION doesn't match the PostgreSQL - # server major version; this might mean that user either (a) mistakenly - # deploys from a bad image, or (b) user wants to perform upgrade. For the - # upgrade we require explicit request -- just to avoid disasters in (a)-cases. - - if test -z "${POSTGRESQL_UPGRADE-}"; then - echo >&2 "Incompatible data directory. This container image provides" - echo >&2 "PostgreSQL '$POSTGRESQL_VERSION', but data directory is of" - echo >&2 "version '$version'." - echo >&2 - echo >&2 "This image supports automatic data directory upgrade from" - echo >&2 "'$POSTGRESQL_PREV_VERSION', please _carefully_ consult image documentation" - echo >&2 "about how to use the '\$POSTGRESQL_UPGRADE' startup option." - # We could wait for postgresql startup failure (there's no risk of data dir - # corruption), but fail rather early. - false - fi - - # We support pg_upgrade process only from previous version of this container - # (upgrade to N to N+1 is possible, so e.g. 9.5 to 9.6). - if test "$POSTGRESQL_PREV_VERSION" != "$version"; then - echo >&2 "With this container image you can only upgrade from data directory" - echo >&2 "of version '$POSTGRESQL_PREV_VERSION', not '$version'." - false - fi - - run_pgupgrade -} diff --git a/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-postgresql-replication.conf.template b/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-postgresql-replication.conf.template deleted file mode 100644 index ef04eaae87..0000000000 --- a/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-postgresql-replication.conf.template +++ /dev/null @@ -1,7 +0,0 @@ -# required on master for replication -wal_level = hot_standby # minimal, archive, hot_standby, or logical -max_wal_senders = 6 # max number of walsender processes -wal_keep_segments = 400 # in logfile segments, 16MB each; 0 disables - -# required on replicas for replication -hot_standby = on diff --git a/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-postgresql.conf.template b/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-postgresql.conf.template deleted file mode 100644 index 56fb3f33aa..0000000000 --- a/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-postgresql.conf.template +++ /dev/null @@ -1,29 +0,0 @@ -# -# Custom OpenShift configuration. -# -# NOTE: This file is rewritten every time the container is started! -# Changes to this file will be overwritten. -# - -# Listen on all interfaces. -listen_addresses = '*' - -# Determines the maximum number of concurrent connections to the database server. Default: 100 -max_connections = ${POSTGRESQL_MAX_CONNECTIONS} - -# Allow each connection to use a prepared transaction -max_prepared_transactions = ${POSTGRESQL_MAX_PREPARED_TRANSACTIONS} - -# Sets the amount of memory the database server uses for shared memory buffers. Default: 32MB -shared_buffers = ${POSTGRESQL_SHARED_BUFFERS} - -# Sets the planner's assumption about the effective size of the disk cache that is available to a single query -effective_cache_size = ${POSTGRESQL_EFFECTIVE_CACHE_SIZE} - -max_wal_size = 2GB - -# Enable DB logging -log_statement = 'all' -log_connections = on -log_disconnections = on -log_duration = on \ No newline at end of file diff --git a/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-recovery.conf.template b/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-recovery.conf.template deleted file mode 100644 index 26d56693b7..0000000000 --- a/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/openshift-custom-recovery.conf.template +++ /dev/null @@ -1,9 +0,0 @@ -# -# Custom OpenShift configuration. -# -# NOTE: This file is rewritten every time the container is started! -# Changes to this file will be overwritten. -# - -standby_mode = on -primary_conninfo = 'host=${MASTER_FQDN} port=5432 user=${POSTGRESQL_MASTER_USER} password=${POSTGRESQL_MASTER_PASSWORD}' diff --git a/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/scl_enable b/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/scl_enable deleted file mode 100644 index 1d967f9b6d..0000000000 --- a/containers/postgres12-postgis31/root/usr/share/container-scripts/postgresql/scl_enable +++ /dev/null @@ -1,3 +0,0 @@ -# This will make scl collection binaries work out of box. -unset BASH_ENV PROMPT_COMMAND ENV -source scl_source enable $ENABLED_COLLECTIONS From 774a7ed1e9820f391e5817092fb527d79bffda4d Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Fri, 15 Nov 2024 13:59:09 -0800 Subject: [PATCH 06/13] Cleanup db.dc.yaml --- database/.pipeline/templates/db.dc.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/database/.pipeline/templates/db.dc.yaml b/database/.pipeline/templates/db.dc.yaml index 5baadb071c..1ebfa98439 100644 --- a/database/.pipeline/templates/db.dc.yaml +++ b/database/.pipeline/templates/db.dc.yaml @@ -199,18 +199,12 @@ objects: volumeMounts: - name: '${DATABASE_SERVICE_NAME}-data' mountPath: '/var/lib/postgresql/data' - - name: '${DATABASE_SERVICE_NAME}-data' - mountPath: '/var/lib/pgsql/data' - # - name: '${DATABASE_SERVICE_NAME}-run' - # mountPath: '/var/run/postgresql' dnsPolicy: ClusterFirst restartPolicy: Always volumes: - name: '${DATABASE_SERVICE_NAME}-data' persistentVolumeClaim: claimName: '${DATABASE_SERVICE_NAME}' - # - name: '${DATABASE_SERVICE_NAME}-run' - # emptyDir: {} triggers: - imageChangeParams: automatic: true From e0b15784302af81d0fab9bef32b16e43ce16040e Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Fri, 15 Nov 2024 15:20:50 -0800 Subject: [PATCH 07/13] Fix env vars --- api/src/utils/env-config.ts | 2 -- compose.yml | 4 ++-- env_config/env.docker | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/api/src/utils/env-config.ts b/api/src/utils/env-config.ts index f5bedcc738..913afeedec 100644 --- a/api/src/utils/env-config.ts +++ b/api/src/utils/env-config.ts @@ -18,13 +18,11 @@ export const EnvSchema = z.object({ API_PORT: ZodEnvNumber, // Database - POSTGRES_VERSION: ZodEnvString, DB_HOST: ZodEnvString, DB_PORT: ZodEnvNumber, DB_USER_API: ZodEnvString, DB_USER_API_PASS: ZodEnvString, DB_DATABASE: ZodEnvString, - PG_DATA: ZodEnvString, // Keycloak KEYCLOAK_HOST: ZodEnvString, diff --git a/compose.yml b/compose.yml index cf1f73a2ef..28cae3c98a 100644 --- a/compose.yml +++ b/compose.yml @@ -23,11 +23,11 @@ services: - POSTGRES_PASSWORD=${DB_ADMIN_PASS} - POSTGRES_DB=${DB_DATABASE} - PORT=5432 - - PGDATA=${PG_DATA}/${POSTGRES_VERSION} + - PGDATA=${PGDATA}/${POSTGRES_VERSION} networks: - sims-network volumes: - - postgres:${PG_DATA} + - postgres:${PGDATA} ## Build the api docker image api: diff --git a/env_config/env.docker b/env_config/env.docker index 8f2be0a881..d1a7a5ddca 100644 --- a/env_config/env.docker +++ b/env_config/env.docker @@ -138,7 +138,7 @@ DB_DATABASE=biohubbc DB_SCHEMA=biohub DB_SCHEMA_DAPI_V1=biohub_dapi_v1 DB_TZ=America/Vancouver -PG_DATA=/var/lib/postgresql/data +PGDATA=/var/lib/postgresql/data # ------------------------------------------------------------------------------ # KeyCloak Configuration for Keycloak Common Hosted Single Sign-on (CSS) From bf39ec14f4e6c360664b4a384aabb2f7f5e9b316 Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Fri, 15 Nov 2024 16:00:59 -0800 Subject: [PATCH 08/13] ignore-skip From a38a763e31c423bb59c8e328b6207822c03b42f5 Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Tue, 19 Nov 2024 14:20:52 -0800 Subject: [PATCH 09/13] Remove user name search path --- database/src/migrations/20210225205948_biohub_release.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/database/src/migrations/20210225205948_biohub_release.ts b/database/src/migrations/20210225205948_biohub_release.ts index 57697f634a..59774091c3 100644 --- a/database/src/migrations/20210225205948_biohub_release.ts +++ b/database/src/migrations/20210225205948_biohub_release.ts @@ -122,7 +122,7 @@ export async function up(knex: Knex): Promise { -- Grant postgres user full access to biohub schema GRANT ALL ON SCHEMA biohub TO postgres; -- Set search path for postgres user - SET search_path = "$user", biohub, public; + SET search_path = biohub, public; -- Set up biohub API schema CREATE SCHEMA IF NOT EXISTS biohub_dapi_v1; @@ -132,7 +132,7 @@ export async function up(knex: Knex): Promise { ALTER SCHEMA biohub_dapi_v1 OWNER TO biohub_api; GRANT USAGE ON SCHEMA biohub TO biohub_api; -- Set search path for biohub_api user - ALTER ROLE biohub_api SET search_path TO "$user", biohub, public, biohub_dapi_v1; + ALTER ROLE biohub_api SET search_path TO biohub, public, biohub_dapi_v1; -- Grant postgres user full access to biohub_dapi_v1 schema GRANT ALL ON SCHEMA biohub_dapi_v1 TO biohub_api; From bd3a29b9e4ae87067b9de9458100789c1e59bf21 Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Thu, 28 Nov 2024 13:41:16 -0800 Subject: [PATCH 10/13] Move migration to the latest --- .../20240722000002_remove_duplicate_users.ts | 10 +++++----- .../src/migrations/20241115120400_help_markdown.ts | 2 +- ...ts => 20241128000000_drop_biohub_dapi_v1_schema.ts} | 0 3 files changed, 6 insertions(+), 6 deletions(-) rename database/src/migrations/{20241115000000_drop_biohub_dapi_v1_schema.ts => 20241128000000_drop_biohub_dapi_v1_schema.ts} (100%) diff --git a/database/src/migrations/20240722000002_remove_duplicate_users.ts b/database/src/migrations/20240722000002_remove_duplicate_users.ts index 3feee495dc..5a5f25abae 100644 --- a/database/src/migrations/20240722000002_remove_duplicate_users.ts +++ b/database/src/migrations/20240722000002_remove_duplicate_users.ts @@ -4,7 +4,7 @@ import { Knex } from 'knex'; * Fixes duplicate system_user_ids AND references to duplicate system_user_ids * * Updates the following tables: - * - system_user: Update/end-dates duplicate "system_user" records. + * - system_user: Update/end-dates duplicate system_user records. * - system_user_role: Delete duplicate system_user_role records. * - project_participation: Update system_user_id to the canonical system_user_id, and delete duplicate records. * - survey_participation: Update system_user_id to the canonical system_user_id, and delete duplicate records. @@ -45,7 +45,7 @@ export async function up(knex: Knex): Promise { ---------------------------------------------------------------------------------------- WITH - -- Get all "system_user" records with a unique user_identifier (case-insensitive) and user_identity_source_id, + -- Get all system_user records with a unique user_identifier (case-insensitive) and user_identity_source_id, -- preferring the lowest system_user_id WHERE record_end_date is null w_system_user_1 AS ( SELECT @@ -65,7 +65,7 @@ export async function up(knex: Knex): Promise { system_user_id ), w_system_user_2 AS ( - -- Get all "system_user" records with a unique user_identifier (case-insensitive) and user_identity_source_id, + -- Get all system_user records with a unique user_identifier (case-insensitive) and user_identity_source_id, -- aggregating all additional duplicate system_user_ids into an array SELECT LOWER("system_user".user_identifier) AS user_identifier, @@ -186,7 +186,7 @@ export async function up(knex: Knex): Promise { USING w_system_user_3 wsu3 WHERE system_user_role.system_user_id = ANY(wsu3.duplicate_system_user_ids) ), - -- Delete duplicate "system_user" records for duplicate system_user_ids + -- Delete duplicate system_user records for duplicate system_user_ids w_delete_duplicate_system_user AS ( DELETE FROM "system_user" su USING w_system_user_3 wsu3 @@ -252,7 +252,7 @@ export async function up(knex: Knex): Promise { ALTER TABLE biohub.survey_participation ADD CONSTRAINT survey_participation_uk1 UNIQUE (system_user_id, survey_id); -- Don't allow duplicate user_guid values - CREATE UNIQUE INDEX system_user_uk1 ON "system_user" (user_guid); + CREATE UNIQUE INDEX system_user_uk1 ON "system_user"(user_guid); `); } diff --git a/database/src/migrations/20241115120400_help_markdown.ts b/database/src/migrations/20241115120400_help_markdown.ts index 340b141644..92a650dbc9 100644 --- a/database/src/migrations/20241115120400_help_markdown.ts +++ b/database/src/migrations/20241115120400_help_markdown.ts @@ -108,7 +108,7 @@ export async function up(knex: Knex): Promise { COMMENT ON COLUMN markdown_user.revision_count IS 'Revision count used for concurrency control.'; ALTER TABLE markdown_user ADD CONSTRAINT markdown_user_fk1 FOREIGN KEY (markdown_id) REFERENCES markdown(markdown_id); - ALTER TABLE markdown_user ADD CONSTRAINT markdown_user_fk2 FOREIGN KEY (system_user_id) REFERENCES system_user(system_user_id); + ALTER TABLE markdown_user ADD CONSTRAINT markdown_user_fk2 FOREIGN KEY (system_user_id) REFERENCES "system_user"(system_user_id); CREATE INDEX markdown_user_idx1 ON markdown_user(system_user_id); CREATE INDEX markdown_user_idx2 ON markdown_user(markdown_id); diff --git a/database/src/migrations/20241115000000_drop_biohub_dapi_v1_schema.ts b/database/src/migrations/20241128000000_drop_biohub_dapi_v1_schema.ts similarity index 100% rename from database/src/migrations/20241115000000_drop_biohub_dapi_v1_schema.ts rename to database/src/migrations/20241128000000_drop_biohub_dapi_v1_schema.ts From 697515db8f0d3a8753e58b1225b34c52d6256fc4 Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Fri, 29 Nov 2024 09:54:03 -0800 Subject: [PATCH 11/13] ignore-skip From 42f42516b6a99e82c463f491a88ae2cc15ee358b Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Tue, 14 Jan 2025 15:09:26 -0800 Subject: [PATCH 12/13] Rename migration that drops the views (make it the latest migration) --- ..._v1_schema.ts => 20250114000000_drop_biohub_dapi_v1_schema.ts} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename database/src/migrations/{20241128000000_drop_biohub_dapi_v1_schema.ts => 20250114000000_drop_biohub_dapi_v1_schema.ts} (100%) diff --git a/database/src/migrations/20241128000000_drop_biohub_dapi_v1_schema.ts b/database/src/migrations/20250114000000_drop_biohub_dapi_v1_schema.ts similarity index 100% rename from database/src/migrations/20241128000000_drop_biohub_dapi_v1_schema.ts rename to database/src/migrations/20250114000000_drop_biohub_dapi_v1_schema.ts From cfa517646be9f8f19fc02351a11e4947a5ef06e5 Mon Sep 17 00:00:00 2001 From: Nick Phura Date: Tue, 14 Jan 2025 15:49:02 -0800 Subject: [PATCH 13/13] ignore-skip