diff --git a/.github/actions/archive-artifacts/action.yaml b/.github/actions/archive-artifacts/action.yaml index 47aad4a4fc..6d02f751da 100644 --- a/.github/actions/archive-artifacts/action.yaml +++ b/.github/actions/archive-artifacts/action.yaml @@ -1,27 +1,79 @@ --- -name: "Archive artifact logs and data" -description: "Archive logs, pods and events" +name: Archive artifact logs and data +description: Archive logs, pods and events + +inputs: + user: + description: Artifacts user + required: true + password: + description: Artifacts password + required: true + junit-paths: + description: Path to junit reports + default: /artifacts/data/reports/*.xml + required: true + stage: + description: Stage name + default: ${{ github.job }}.${{ github.run_attempt }} + required: true + trunk_token: + description: Trunk token + required: false + zenko-name: + description: Zenko name + default: end2end + required: false + zenko-namespace: + description: Namespace + default: default + required: false runs: using: composite steps: + - name: Publish test report + uses: mikepenz/action-junit-report@v4 + with: + annotate_only: true + check_name: ${{ inputs.stage}} + report_paths: ${{ inputs.junit-paths }} + job_summary: ${{ job.status != 'success' }} # Only show the summary if the job failed + detailed_summary: true + continue-on-error: true + + - name: Upload results + if: inputs.trunk_token && job.status != 'cancelled' + uses: trunk-io/analytics-uploader@v1.4.0 + with: + junit-paths: ${{ inputs.junit-paths }} + org-slug: ${{ github.repository_owner }} + token: ${{ inputs.trunk_token }} + continue-on-error: true + - name: Archive artifact logs and data shell: bash run: |- - set -exu; - mkdir -p /tmp/artifacts/data/${STAGE}/kind-logs; - kubectl get pods -A -o yaml > /tmp/artifacts/data/${STAGE}/kind-logs/all-pods.log; - kubectl get events -A -o yaml > /tmp/artifacts/data/${STAGE}/kind-logs/all-events.log; - kind export logs /tmp/artifacts/data/${STAGE}/kind-logs/kind-export; - tar zcvf /tmp/artifacts/${{ github.sha }}-${STAGE}-logs-volumes.tgz /tmp/artifacts/data/${STAGE}/kind-logs; + set -exu + + mkdir -p /tmp/artifacts/data/${STAGE}/kind-logs + kubectl get pods -A -o yaml > /tmp/artifacts/data/${STAGE}/kind-logs/all-pods.log + kubectl get events -A -o yaml > /tmp/artifacts/data/${STAGE}/kind-logs/all-events.log + kubectl get zenko -A -o yaml > /tmp/artifacts/data/${STAGE}/kind-logs/all-zenkos.log + kubectl get zenkodrsource -A -o yaml > /tmp/artifacts/data/${STAGE}/kind-logs/all-zenkodrsources.log + kubectl get zenkodrsink -A -o yaml > /tmp/artifacts/data/${STAGE}/kind-logs/all-zenkodrsinks.log + kind export logs /tmp/artifacts/data/${STAGE}/kind-logs/kind-export + tar zcvf /tmp/artifacts/${{ github.sha }}-${STAGE}-logs-volumes.tgz /tmp/artifacts/data/${STAGE}/kind-logs + env: + STAGE: ${{ inputs.stage }} + continue-on-error: true + - name: Dump kafka shell: bash - continue-on-error: true run: |- set -exu - NAMESPACE=${NAMESPACE:-default} - KAFKA=$(kubectl get pods -n ${NAMESPACE} -lkafka_cr=${ZENKO_NAME:-end2end}-base-queue -o jsonpath='{.items[0].metadata.name}') + KAFKA=$(kubectl get pods -n ${NAMESPACE} -lkafka_cr=${ZENKO_NAME}-base-queue -o jsonpath='{.items[0].metadata.name}') kubectl exec -in ${NAMESPACE} ${KAFKA} -c kafka -- \ env KAFKA_OPTS= kafka-topics.sh --bootstrap-server :9092 --list \ @@ -35,16 +87,21 @@ runs: env KAFKA_OPTS= kafka-consumer-groups.sh --bootstrap-server :9092 --describe --all-groups \ > /tmp/artifacts/data/${STAGE}/kafka-offsets.log - KAFKA_SERVICE=$(kubectl get services -n ${NAMESPACE} -lkafka_cr=${ZENKO_NAME:-end2end}-base-queue -o jsonpath='{.items[0].metadata.name}') + KAFKA_SERVICE=$(kubectl get services -n ${NAMESPACE} -lkafka_cr=${ZENKO_NAME}-base-queue -o jsonpath='{.items[0].metadata.name}') kubectl run -n ${NAMESPACE} kcat --image=edenhill/kcat:1.7.1 --restart=Never --command -- sleep 300 kubectl wait -n ${NAMESPACE} pod kcat --for=condition=ready cat /tmp/artifacts/data/${STAGE}/kafka-topics.log | grep -v '^__' | xargs -P 15 -I {} \ sh -c "kubectl exec -i -n ${NAMESPACE} kcat -- \ kcat -L -b ${KAFKA_SERVICE} -t {} -C -o beginning -e -q -J \ > /tmp/artifacts/data/${STAGE}/kafka-messages-{}.log" + env: + STAGE: ${{ inputs.stage }} + NAMESPACE: ${{ inputs.zenko-namespace }} + ZENKO_NAME: ${{ inputs.zenko-name }} + continue-on-error: true + - name: Dump MongoDB shell: bash - continue-on-error: true run: |- set -exu @@ -54,9 +111,29 @@ runs: NAMESPACE="${NAMESPACE:-default}" DUMP_DIR="/tmp/mongodb.dump" - kubectl exec -n ${NAMESPACE} data-db-mongodb-sharded-mongos-0 -- mongodump --db ${ZENKO_MONGODB_DATABASE} -u ${MONGODB_ROOT_USERNAME} -p ${MONGODB_ROOT_PASSWORD} --authenticationDatabase admin --out ${DUMP_DIR} + kubectl exec -n ${NAMESPACE} data-db-mongodb-sharded-mongos-0 -- mongodump \ + --db ${ZENKO_MONGODB_DATABASE} -u ${MONGODB_ROOT_USERNAME} -p ${MONGODB_ROOT_PASSWORD} \ + --authenticationDatabase admin --out ${DUMP_DIR} - kubectl exec -n ${NAMESPACE} data-db-mongodb-sharded-mongos-0 -- bash -c "for bson_file in ${DUMP_DIR}/${ZENKO_MONGODB_DATABASE}/*.bson; do json_file=\"${DUMP_DIR}/\$(basename \${bson_file} .bson).json\"; bsondump --outFile \${json_file} \${bson_file}; done" + kubectl exec -n ${NAMESPACE} data-db-mongodb-sharded-mongos-0 -- bash -c \ + "for bson_file in ${DUMP_DIR}/${ZENKO_MONGODB_DATABASE}/*.bson; do \ + json_file=\"${DUMP_DIR}/\$(basename \${bson_file} .bson).json\"; \ + bsondump --outFile \${json_file} \${bson_file}; \ + done" mkdir -p /tmp/artifacts/data/${STAGE}/mongodb-dump kubectl cp ${NAMESPACE}/data-db-mongodb-sharded-mongos-0:${DUMP_DIR} /tmp/artifacts/data/${STAGE}/mongodb-dump + env: + STAGE: ${{ inputs.stage }}.${{ github.run_attempt }} + NAMESPACE: ${{ inputs.zenko-namespace }} + ZENKO_NAME: ${{ inputs.zenko-name }} + continue-on-error: true + + - name: Upload artifacts # move into `archive-artifacts` action + uses: scality/action-artifacts@v4 + with: + method: upload + url: https://artifacts.scality.net + user: ${{ inputs.user }} + password: ${{ inputs.password }} + source: /tmp/artifacts diff --git a/.github/scripts/end2end/configs/zenko.yaml b/.github/scripts/end2end/configs/zenko.yaml index 29a044d1ff..ad1b1fd7cd 100644 --- a/.github/scripts/end2end/configs/zenko.yaml +++ b/.github/scripts/end2end/configs/zenko.yaml @@ -103,6 +103,9 @@ spec: enable: true configurationOverrides: e2e-cold: + # for mock DMF, we need to override the endpoint to use in-cluster service: otherwise it + # relies on external hostname (ok thanks to coredns patch), but TLS cert is not valid + s3-endpoint: http://${ZENKO_NAME}-internal-s3api.default.svc.cluster.local debug: "true" command-timeout: "60s" pending-job-poll-after-age: "10s" diff --git a/.github/scripts/end2end/install-kind-dependencies.sh b/.github/scripts/end2end/install-kind-dependencies.sh index ee568df9d5..6b3d1ac15b 100755 --- a/.github/scripts/end2end/install-kind-dependencies.sh +++ b/.github/scripts/end2end/install-kind-dependencies.sh @@ -128,6 +128,24 @@ build_solution_base_manifests() { sed -i "s/MONGODB_SHARDSERVER_RAM_LIMIT/${MONGODB_SHARDSERVER_RAM_LIMIT}/g" $DIR/_build/root/deploy/* sed -i "s/MONGODB_SHARDSERVER_RAM_REQUEST/${MONGODB_SHARDSERVER_RAM_REQUEST}/g" $DIR/_build/root/deploy/* sed -i "s/MONGODB_MONGOS_RAM_REQUEST/${MONGODB_MONGOS_RAM_REQUEST}/g" $DIR/_build/root/deploy/* + + # Limits and requests for MongoDB are computed based on the current system + # Detect total system RAM in GiB + TOTAL_RAM_GB=$(awk '/MemTotal/ {printf "%.0f", $2/1024/1024}' /proc/meminfo) + + # Compute MongoDB settings based on the total RAM + MONGODB_WIRETIGER_CACHE_SIZE_GB=$((TOTAL_RAM_GB * 335 / 1000)) + MONGODB_MONGOS_RAM_LIMIT=$((TOTAL_RAM_GB * 165 / 1000))Gi + MONGODB_SHARDSERVER_RAM_LIMIT=$((2 * MONGODB_WIRETIGER_CACHE_SIZE_GB))Gi + MONGODB_SHARDSERVER_RAM_REQUEST=${MONGODB_WIRETIGER_CACHE_SIZE_GB}Gi + MONGODB_MONGOS_RAM_REQUEST=$((TOTAL_RAM_GB * 33 / 1000))Gi + + # Replace values before deploying + sed -i "s/MONGODB_SHARDSERVER_EXTRA_FLAGS/--wiredTigerCacheSizeGB=${MONGODB_WIRETIGER_CACHE_SIZE_GB}/g" $DIR/_build/root/deploy/* + sed -i "s/MONGODB_MONGOS_RAM_LIMIT/${MONGODB_MONGOS_RAM_LIMIT}/g" $DIR/_build/root/deploy/* + sed -i "s/MONGODB_SHARDSERVER_RAM_LIMIT/${MONGODB_SHARDSERVER_RAM_LIMIT}/g" $DIR/_build/root/deploy/* + sed -i "s/MONGODB_SHARDSERVER_RAM_REQUEST/${MONGODB_SHARDSERVER_RAM_REQUEST}/g" $DIR/_build/root/deploy/* + sed -i "s/MONGODB_MONGOS_RAM_REQUEST/${MONGODB_MONGOS_RAM_REQUEST}/g" $DIR/_build/root/deploy/* } get_image_from_deps() { diff --git a/.github/scripts/end2end/requirements.sh b/.github/scripts/end2end/requirements.sh index ecc686e689..ad66f95e39 100755 --- a/.github/scripts/end2end/requirements.sh +++ b/.github/scripts/end2end/requirements.sh @@ -1,7 +1,7 @@ #!/bin/bash KUBECTL_VERSION=1.21.1 -KIND_VERSION=v0.20.0 +KIND_VERSION=v0.24.0 HELM_VERSION=v3.5.3 KUSTOMIZE_VERSION=v4.4.1 YQ_VERSION=v4.27.5 diff --git a/.github/workflows/end2end.yaml b/.github/workflows/end2end.yaml index c937f18dd1..80c767cee6 100644 --- a/.github/workflows/end2end.yaml +++ b/.github/workflows/end2end.yaml @@ -16,7 +16,7 @@ env: WORKER_COUNT: '2' OPERATOR_REPO: git@github.com:scality/zenko-operator.git OPERATOR_IMAGE: "" - KIND_NODE_IMAGE: "kindest/node:v1.23.4@sha256:0e34f0d0fd448aa2f2819cfd74e99fe5793a6e4938b328f657c8e3f81ee0dfb9" + KIND_NODE_IMAGE: "kindest/node:v1.25.16@sha256:6110314339b3b44d10da7d27881849a87e092124afab5956f2e10ecdb463b025" VOLUME_ROOT: /artifacts OIDC_REALM: "zenko" OIDC_CLIENT_ID: "zenko-ui" @@ -97,7 +97,7 @@ env: SKOPEO_VERSION: "v1.16.1" KUBECTL_VERSION: "1.31.0" TILT_VERSION: "0.33.19" - KIND_VERSION: "v0.12.0" + KIND_VERSION: "v0.24.0" ZENKO_ENABLE_SOSAPI: false EXPIRE_ONE_DAY_EARLIER: true TRANSITION_ONE_DAY_EARLIER: true @@ -426,32 +426,15 @@ jobs: uses: ./.github/actions/debug-wait timeout-minutes: 60 if: failure() && runner.debug == '1' - - name: Upload results - if: "!cancelled() && env.TRUNK_TOKEN" - uses: trunk-io/analytics-uploader@main - with: - junit-paths: /artifacts/data/reports/*.xml - org-slug: ${{ github.repository_owner }} - token: ${{ env.TRUNK_TOKEN }} - env: - TRUNK_TOKEN: ${{ secrets.TRUNK_TOKEN }} - continue-on-error: true - - name: Archive artifact logs and data + - name: Archive and publish artifacts uses: ./.github/actions/archive-artifacts - env: - STAGE: end2end-http - if: always() - - name: Clean Up - run: kind delete cluster - - name: Upload artifacts - uses: scality/action-artifacts@v4 with: - method: upload - url: https://artifacts.scality.net user: ${{ secrets.ARTIFACTS_USER }} password: ${{ secrets.ARTIFACTS_PASSWORD }} - source: /tmp/artifacts + trunk_token: ${{ secrets.TRUNK_TOKEN }} if: always() + - name: Clean Up + run: kind delete cluster end2end-pra: needs: [build-kafka, check-dashboard-versions, lint-and-build-ctst] @@ -503,32 +486,15 @@ jobs: uses: ./.github/actions/debug-wait timeout-minutes: 60 if: failure() && runner.debug == '1' - - name: Upload results - if: "!cancelled() && env.TRUNK_TOKEN" - uses: trunk-io/analytics-uploader@main - with: - junit-paths: /artifacts/data/reports/*.xml - org-slug: ${{ github.repository_owner }} - token: ${{ env.TRUNK_TOKEN }} - env: - TRUNK_TOKEN: ${{ secrets.TRUNK_TOKEN }} - continue-on-error: true - - name: Archive artifact logs and data + - name: Archive and publish artifacts uses: ./.github/actions/archive-artifacts - env: - STAGE: end2end-pra - if: always() - - name: Clean Up - run: kind delete cluster - - name: Upload artifacts - uses: scality/action-artifacts@v4 with: - method: upload - url: https://artifacts.scality.net user: ${{ secrets.ARTIFACTS_USER }} password: ${{ secrets.ARTIFACTS_PASSWORD }} - source: /tmp/artifacts + trunk_token: ${{ secrets.TRUNK_TOKEN }} if: always() + - name: Clean Up + run: kind delete cluster end2end-https: needs: [build-kafka, build-test-image, check-dashboard-versions] @@ -563,16 +529,6 @@ jobs: - name: Run smoke tests run: bash run-e2e-test.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "smoke" "default" working-directory: ./.github/scripts/end2end - - name: Upload results - if: "!cancelled() && env.TRUNK_TOKEN" - uses: trunk-io/analytics-uploader@main - with: - junit-paths: /artifacts/data/reports/*.xml - org-slug: ${{ github.repository_owner }} - token: ${{ env.TRUNK_TOKEN }} - env: - TRUNK_TOKEN: ${{ secrets.TRUNK_TOKEN }} - continue-on-error: true # Temporarily disabled as CTST will test the same APIs more # extensively. # - name: Run vault e2e tests @@ -582,22 +538,15 @@ jobs: uses: ./.github/actions/debug-wait timeout-minutes: 60 if: failure() && runner.debug == '1' - - name: Archive artifact logs and data + - name: Archive and publish artifacts uses: ./.github/actions/archive-artifacts - env: - STAGE: end2end-https - if: always() - - name: Clean Up - run: kind delete cluster - - name: Upload artifacts - uses: scality/action-artifacts@v4 with: - method: upload - url: https://artifacts.scality.net user: ${{ secrets.ARTIFACTS_USER }} password: ${{ secrets.ARTIFACTS_PASSWORD }} - source: /tmp/artifacts + trunk_token: ${{ secrets.TRUNK_TOKEN }} if: always() + - name: Clean Up + run: kind delete cluster end2end-sharded: needs: [build-kafka, build-test-image, check-dashboard-versions] @@ -625,36 +574,19 @@ jobs: - name: Run backbeat end to end tests run: bash run-e2e-test.sh "end2end" ${E2E_IMAGE_NAME}:${E2E_IMAGE_TAG} "backbeat" "default" working-directory: ./.github/scripts/end2end - - name: Upload results - if: "!cancelled() && env.TRUNK_TOKEN" - uses: trunk-io/analytics-uploader@main - with: - junit-paths: /artifacts/data/reports/*.xml - org-slug: ${{ github.repository_owner }} - token: ${{ env.TRUNK_TOKEN }} - env: - TRUNK_TOKEN: ${{ secrets.TRUNK_TOKEN }} - continue-on-error: true - name: Debug wait uses: ./.github/actions/debug-wait timeout-minutes: 60 if: failure() && runner.debug == '1' - - name: Archive artifact logs and data + - name: Archive and publish artifacts uses: ./.github/actions/archive-artifacts - env: - STAGE: end2end-sharded - if: always() - - name: Clean Up - run: kind delete cluster - - name: Upload artifacts - uses: scality/action-artifacts@v4 with: - method: upload - url: https://artifacts.scality.net user: ${{ secrets.ARTIFACTS_USER }} password: ${{ secrets.ARTIFACTS_PASSWORD }} - source: /tmp/artifacts + trunk_token: ${{ secrets.TRUNK_TOKEN }} if: always() + - name: Clean Up + run: kind delete cluster ctst-end2end-sharded: needs: [build-kafka, lint-and-build-ctst, check-dashboard-versions] @@ -689,36 +621,19 @@ jobs: - name: Run CTST end to end tests run: bash run-e2e-ctst.sh "" "" "" "" --tags 'not @PRA' working-directory: ./.github/scripts/end2end - - name: Upload results - if: "!cancelled() && env.TRUNK_TOKEN" - uses: trunk-io/analytics-uploader@main - with: - junit-paths: /artifacts/data/reports/*.xml - org-slug: ${{ github.repository_owner }} - token: ${{ env.TRUNK_TOKEN }} - env: - TRUNK_TOKEN: ${{ secrets.TRUNK_TOKEN }} - continue-on-error: true - name: Debug wait uses: ./.github/actions/debug-wait timeout-minutes: 60 if: failure() && runner.debug == '1' - - name: Archive artifact logs and data + - name: Archive and publish artifacts uses: ./.github/actions/archive-artifacts - env: - STAGE: ctst-end2end-sharded - if: always() - - name: Clean Up - run: kind delete cluster - - name: Upload artifacts - uses: scality/action-artifacts@v4 with: - method: upload - url: https://artifacts.scality.net user: ${{ secrets.ARTIFACTS_USER }} password: ${{ secrets.ARTIFACTS_PASSWORD }} - source: /tmp/artifacts + trunk_token: ${{ secrets.TRUNK_TOKEN }} if: always() + - name: Clean Up + run: kind delete cluster write-final-status: runs-on: ubuntu-latest @@ -736,9 +651,9 @@ jobs: - ctst-end2end-sharded steps: - name: Upload final status - if: always() - uses: scality/actions/upload_final_status@1.8.0 + uses: scality/actions/upload_final_status@1.9.0 with: ARTIFACTS_USER: ${{ secrets.ARTIFACTS_USER }} ARTIFACTS_PASSWORD: ${{ secrets.ARTIFACTS_PASSWORD }} JOBS_RESULTS: ${{ join(needs.*.result) }} + if: always() diff --git a/solution/deps.yaml b/solution/deps.yaml index 8ffeabeae5..3b99a3f29f 100644 --- a/solution/deps.yaml +++ b/solution/deps.yaml @@ -6,7 +6,7 @@ backbeat: dashboard: backbeat/backbeat-dashboards image: backbeat policy: backbeat/backbeat-policies - tag: 8.6.48 + tag: 8.6.49 envsubst: BACKBEAT_TAG busybox: image: busybox @@ -16,12 +16,12 @@ cloudserver: sourceRegistry: ghcr.io/scality dashboard: cloudserver/cloudserver-dashboards image: cloudserver - tag: 8.8.32 + tag: 8.8.35 envsubst: CLOUDSERVER_TAG drctl: sourceRegistry: ghcr.io/scality image: zenko-drctl - tag: v1.0.4 + tag: v1.0.6 envsubst: DRCTL_TAG toolName: zenko-drctl fubectl: @@ -72,7 +72,7 @@ kafka-cruise-control: kafka-lag-exporter: sourceRegistry: seglo image: kafka-lag-exporter - tag: 0.7.1 + tag: 0.7.3 envsubst: KAFKA_LAGEXPORTER_TAG mongodb-connector: tag: 1.13.0 @@ -80,7 +80,7 @@ mongodb-connector: pensieve-api: sourceRegistry: ghcr.io/scality image: pensieve-api - tag: 1.6.0 + tag: 1.6.2 envsubst: PENSIEVE_API_TAG rclone: sourceRegistry: rclone @@ -100,13 +100,13 @@ s3utils: sourceRegistry: ghcr.io/scality dashboard: s3utils/s3utils-dashboards image: s3utils - tag: 1.14.13 + tag: 1.14.14 envsubst: S3UTILS_TAG scuba: sourceRegistry: ghcr.io/scality dashboard: scuba/scuba-dashboards image: scuba - tag: 1.0.2 + tag: 1.0.7 envsubst: SCUBA_TAG sorbet: sourceRegistry: ghcr.io/scality @@ -136,7 +136,7 @@ vault: zenko-operator: sourceRegistry: ghcr.io/scality image: zenko-operator - tag: 1.6.1 + tag: v1.6.3 envsubst: ZENKO_OPERATOR_TAG zenko-ui: sourceRegistry: ghcr.io/scality diff --git a/tests/ctst/steps/dr/drctl.ts b/tests/ctst/steps/dr/drctl.ts index 3bef123930..c2f09a70cf 100644 --- a/tests/ctst/steps/dr/drctl.ts +++ b/tests/ctst/steps/dr/drctl.ts @@ -108,8 +108,8 @@ type FailoverConfig = { timeout?: string; sinkKubeconfigPath?: string; sinkKubeconfigData?: string; - sinkZenkoInstance?: string; - sinkZenkoNamespace?: string; + sinkZenkoDrInstance?: string; + sinkZenkoDrNamespace?: string; }; type FailbackConfig = { @@ -117,23 +117,21 @@ type FailbackConfig = { timeout?: string; sinkKubeconfigPath?: string; sinkKubeconfigData?: string; - sinkZenkoInstance?: string; - sinkZenkoNamespace?: string; + sinkZenkoDrInstance?: string; + sinkZenkoDrNamespace?: string; }; type UninstallConfig = { - sinkZenkoDrInstance?: string; - sourceZenkoDrInstance?: string; wait?: boolean; timeout?: string; sourceKubeconfigPath?: string; sourceKubeconfigData?: string; sinkKubeconfigPath?: string; sinkKubeconfigData?: string; - sinkZenkoInstance?: string; - sinkZenkoNamespace?: string; - sourceZenkoInstance?: string; - sourceZenkoNamespace?: string; + sinkZenkoDrInstance?: string; + sinkZenkoDrNamespace?: string; + sourceZenkoDrInstance?: string; + sourceZenkoDrNamespace?: string; }; type StatusConfig = { @@ -148,6 +146,7 @@ type StatusConfig = { sourceZenkoDrInstance?: string; sinkZenkoDrInstance?: string; output?: string; + outputFormat?: string; }; type ReplicationPauseConfig = { @@ -155,10 +154,8 @@ type ReplicationPauseConfig = { sourceKubeconfigData?: string; sinkKubeconfigPath?: string; sinkKubeconfigData?: string; - sourceZenkoInstance?: string; - sourceZenkoNamespace?: string; - sinkZenkoInstance?: string; - sinkZenkoNamespace?: string; + sourceZenkoDrNamespace?: string; + sinkZenkoDrNamespace?: string; sourceZenkoDrInstance?: string; sinkZenkoDrInstance?: string; wait?: boolean; @@ -170,10 +167,8 @@ type ReplicationResumeConfig = { sourceKubeconfigData?: string; sinkKubeconfigPath?: string; sinkKubeconfigData?: string; - sourceZenkoInstance?: string; - sourceZenkoNamespace?: string; - sinkZenkoInstance?: string; - sinkZenkoNamespace?: string; + sourceZenkoDrNamespace?: string; + sinkZenkoDrNamespace?: string; sourceZenkoDrInstance?: string; sinkZenkoDrInstance?: string; wait?: boolean; diff --git a/tests/ctst/steps/pra.ts b/tests/ctst/steps/pra.ts index 88f13b2a0c..f8cc5a1725 100644 --- a/tests/ctst/steps/pra.ts +++ b/tests/ctst/steps/pra.ts @@ -13,7 +13,7 @@ import { restoreObject, verifyObjectLocation, } from 'steps/utils/utils'; -import { Constants, Identity, IdentityEnum, SuperAdmin, Utils } from 'cli-testing'; +import { CacheHelper, Constants, Identity, IdentityEnum, SuperAdmin, Utils } from 'cli-testing'; import { safeJsonParse } from 'common/utils'; import assert from 'assert'; import { EntityType } from 'world/Zenko'; @@ -73,7 +73,7 @@ async function installPRA(world: Zenko, sinkS3Endpoint = 'http://s3.zenko.local' // prometheusHostname: 'prom.dr.zenko.local', // could be any name, cert will be auto-generated prometheusExternalIpsDiscovery: true, prometheusDisableTls: true, - forceRotateServiceCredentials: world.praInstallCount > 0, + forceRotateServiceCredentials: (CacheHelper.savedAcrossTests[Zenko.PRA_INSTALL_COUNT_KEY] as number) > 0, ...kafkaExternalIpOption, timeout, }); @@ -111,15 +111,14 @@ async function waitForPhase( sourceZenkoNamespace: 'default', sinkZenkoDrInstance: 'end2end-pra-sink', sourceZenkoDrInstance: 'end2end-source', - output: 'json', + outputFormat: 'json', }); if (!currentStatus) { - world.logger.debug('Failed to get DR status, retrying', { + world.logger.debug('Failed to get DR status', { currentStatus, }); - await Utils.sleep(1000); - continue; + throw new Error('Failed to get DR status'); } const lines = currentStatus.split('\n'); @@ -174,8 +173,12 @@ Given('a DR installed', { timeout: installTimeout + 2000 }, async function (this accessKey: Buffer.from(credentials.accessKeyId).toString('base64'), secretAccessKey: Buffer.from(credentials.secretAccessKey).toString('base64'), }); - await installPRA(this, undefined, `${installTimeout.toString()}ms`); - this.praInstallCount += 1; + + // Timeout is set to 1 second less than the cucumber + // timeout to see the command timeout instead of the step timeout + + await installPRA(this, undefined, `${(installTimeout - 1000).toString()}ms`); + (CacheHelper.savedAcrossTests[Zenko.PRA_INSTALL_COUNT_KEY] as number) += 1; return; }); @@ -218,7 +221,8 @@ Then('the DR sink should be in phase {string}', { timeout: 360000 }, async funct throw new Error(`Unknown state ${state}`); } - await waitForPhase(this, 'sink', targetPhase); + const res = await waitForPhase(this, 'sink', targetPhase); + assert(res); }); Then('the DR source should be in phase {string}', { timeout: 360000 }, async function (this: Zenko, state: string) { @@ -246,7 +250,8 @@ Then('the DR source should be in phase {string}', { timeout: 360000 }, async fun throw new Error(`Unknown state ${state}`); } - await waitForPhase(this, 'source', targetPhase); + const res = await waitForPhase(this, 'source', targetPhase); + assert(res); }); Then('object {string} should {string} be {string} and have the storage class {string} on {string} site', @@ -297,6 +302,9 @@ When('the DATA_ACCESSOR user tries to perform PutObject on {string} site', { tim } catch (err) { this.logger.error('Failed to setup entity', { err }); } + if (!conditionOk) { + await Utils.sleep(1000); + } } await putObject(this); @@ -323,7 +331,8 @@ Then('the kafka DR volume exists', { timeout: volumeTimeout + 2000 }, async func const failoverTimeout = 360000; When ('I request the failover state for the DR', { timeout: failoverTimeout + 2000 }, async function (this: Zenko) { await this.zenkoDrCtl?.failover({ - sinkZenkoNamespace: 'default', + sinkZenkoDrNamespace: 'default', + sinkZenkoDrInstance: 'end2end-pra-sink', wait: true, timeout: `${failoverTimeout.toString()}ms`, }); @@ -332,7 +341,8 @@ When ('I request the failover state for the DR', { timeout: failoverTimeout + 20 const failbackTimeout = 360000; When ('I resume operations for the DR', { timeout: failbackTimeout + 2000 }, async function (this: Zenko) { await this.zenkoDrCtl?.failback({ - sinkZenkoNamespace: 'default', + sinkZenkoDrNamespace: 'default', + sinkZenkoDrInstance: 'end2end-pra-sink', wait: true, timeout: `${failbackTimeout.toString()}ms`, }); @@ -343,8 +353,8 @@ When('I pause the DR', { timeout: pauseTimeout + 2000 }, async function (this: Z await this.zenkoDrCtl?.replicationPause({ sourceZenkoDrInstance: 'end2end-source', sinkZenkoDrInstance: 'end2end-pra-sink', - sinkZenkoNamespace: 'default', - sourceZenkoNamespace: 'default', + sinkZenkoDrNamespace: 'default', + sourceZenkoDrNamespace: 'default', wait: true, timeout: `${pauseTimeout.toString()}ms`, }); @@ -355,10 +365,10 @@ When('I resume the DR', { timeout: resumeTimeout + 2000 }, async function (this: await this.zenkoDrCtl?.replicationResume({ sourceZenkoDrInstance: 'end2end-source', sinkZenkoDrInstance: 'end2end-pra-sink', - sinkZenkoNamespace: 'default', - sourceZenkoNamespace: 'default', + sinkZenkoDrNamespace: 'default', + sourceZenkoDrNamespace: 'default', wait: true, - timeout: `${resumeTimeout.toString()}ms`, + timeout: `${pauseTimeout.toString()}ms`, }); }); @@ -367,8 +377,8 @@ When('I uninstall DR', { timeout: uninstallTimeout + 2000 }, async function (thi await this.zenkoDrCtl?.uninstall({ sourceZenkoDrInstance: 'end2end-source', sinkZenkoDrInstance: 'end2end-pra-sink', - sinkZenkoNamespace: 'default', - sourceZenkoNamespace: 'default', + sinkZenkoDrNamespace: 'default', + sourceZenkoDrNamespace: 'default', wait: true, timeout: `${uninstallTimeout.toString()}ms`, }); diff --git a/tests/ctst/steps/utils/utils.ts b/tests/ctst/steps/utils/utils.ts index 5420c212dc..7ee3efbbe1 100644 --- a/tests/ctst/steps/utils/utils.ts +++ b/tests/ctst/steps/utils/utils.ts @@ -308,9 +308,15 @@ async function verifyObjectLocation(this: Zenko, objectName: string, this.addCommandParameter({ versionId }); } let conditionOk = false; + + const startTime = Date.now(); + while (!conditionOk) { const res = await S3.headObject(this.getCommandParameters()); if (res.err?.includes('NotFound')) { + if (Date.now() - startTime > 300000) { + throw new Error('Object not found after 300 seconds'); + } await Utils.sleep(1000); continue; } else if (res.err) { diff --git a/tests/ctst/world/Zenko.ts b/tests/ctst/world/Zenko.ts index af2ecce340..95886d0378 100644 --- a/tests/ctst/world/Zenko.ts +++ b/tests/ctst/world/Zenko.ts @@ -116,8 +116,7 @@ export default class Zenko extends World { static readonly PRIMARY_SITE_NAME = 'admin'; static readonly SECONDARY_SITE_NAME = 'dradmin'; - - public praInstallCount = 0; + static readonly PRA_INSTALL_COUNT_KEY = 'praInstallCount'; /** * @constructor @@ -145,6 +144,9 @@ export default class Zenko extends World { ...this.parameters, }); + CacheHelper.savedAcrossTests[Zenko.PRA_INSTALL_COUNT_KEY] = 0; + + if (this.parameters.AccountName && !Identity.hasIdentity(IdentityEnum.ACCOUNT, this.parameters.AccountName)) { Identity.addIdentity(IdentityEnum.ACCOUNT, this.parameters.AccountName, { accessKeyId: this.parameters.AccountAccessKey,