diff --git a/.github/workflows/reusable-build-test-release.yml b/.github/workflows/reusable-build-test-release.yml index e49691f97..2e823958a 100644 --- a/.github/workflows/reusable-build-test-release.yml +++ b/.github/workflows/reusable-build-test-release.yml @@ -8,6 +8,12 @@ on: type: string default: >- [""] + ucc-modinput-marker: + required: false + description: 'Parallel run ucc mod_input marker' + type: string + default: >- + [""] ui_marker: required: false description: 'Parallel run ui marker' @@ -33,7 +39,7 @@ on: required: false description: "branch for k8s manifests to run the tests on" type: string - default: "v3.0.9" + default: "v3.2.0" scripted-inputs-os-list: required: false description: "list of OS used for scripted input tests" @@ -111,6 +117,7 @@ jobs: execute-knowledge-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_knowledge_labeled }} execute-ui-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_ui_labeled }} execute-modinput-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_modinput_functional_labeled }} + execute-ucc-modinput-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_ucc_modinput_functional_labeled }} execute-scripted_inputs-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_scripted_inputs_labeled }} execute-requirement-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_requirement_test_labeled }} s3_bucket_k8s: ${{ steps.k8s-environment.outputs.s3_bucket }} @@ -141,7 +148,7 @@ jobs: run: | set +e declare -A EXECUTE_LABELED - TESTSET=("execute_knowledge" "execute_ui" "execute_modinput_functional" "execute_scripted_inputs" "execute_requirement_test") + TESTSET=("execute_knowledge" "execute_ui" "execute_modinput_functional" "execute_ucc_modinput_functional" "execute_scripted_inputs" "execute_requirement_test") for test_type in "${TESTSET[@]}"; do EXECUTE_LABELED["$test_type"]="false" done @@ -315,7 +322,7 @@ jobs: fetch-depth: "0" ref: ${{ github.head_ref }} - name: Secret Scanning Trufflehog - uses: trufflesecurity/trufflehog@v3.81.10 + uses: trufflesecurity/trufflehog@v3.85.0 with: extra_args: -x .github/workflows/exclude-patterns.txt --json --only-verified version: 3.77.0 @@ -324,6 +331,8 @@ jobs: uses: splunk/sast-scanning/.github/workflows/sast-scan.yml@main secrets: SEMGREP_KEY: ${{ secrets.SEMGREP_PUBLISH_TOKEN }} + with: + block_mode: "policy" test-inventory: runs-on: ubuntu-latest @@ -335,22 +344,13 @@ jobs: modinput_functional: ${{ steps.testset.outputs.modinput_functional }} requirement_test: ${{ steps.testset.outputs.requirement_test }} scripted_inputs: ${{ steps.testset.outputs.scripted_inputs }} - ucc_modinput_functional: ${{ steps.modinput-version.outputs.ucc_modinput_tests }} + ucc_modinput_functional: ${{ steps.testset.outputs.ucc_modinput_functional }} steps: - uses: actions/checkout@v4 - id: testset name: Check available test types run: | find tests -type d -maxdepth 1 -mindepth 1 | sed 's|^tests/||g' | while read -r TESTSET; do echo "$TESTSET=true" >> "$GITHUB_OUTPUT"; echo "$TESTSET::true"; done - - id: modinput-version - name: Check modinput tests version - run: | - CENTAURS_MODINPUT_TESTS_CHECK_DIR="tests/modinput_functional/centaurs" - ucc_modinput_tests="true" - if [ -d "$CENTAURS_MODINPUT_TESTS_CHECK_DIR" ]; then - ucc_modinput_tests="false" - fi - echo "ucc_modinput_tests=$ucc_modinput_tests" >> "$GITHUB_OUTPUT" run-unit-tests: name: test-unit-python3-${{ matrix.python-version }} @@ -404,15 +404,15 @@ jobs: junit_xml_file=$(find "test-results" -name "*.xml" -type f 2>/dev/null | head -n 1) if [ -n "$junit_xml_file" ]; then - total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") - failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") - errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") - skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") - passed=$((total_tests - failures - errors - skipped)) - echo -e "| Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests |\n| ----------- | ------------ | ------------ | ------------- | ------------- |\n| $total_tests | $passed | $failures | $errors | $skipped |" >> "$GITHUB_STEP_SUMMARY" + total_tests=$(xmllint --xpath "count(//testcase)" "$junit_xml_file") + failures=$(xmllint --xpath "count(//testcase[failure])" "$junit_xml_file") + errors=$(xmllint --xpath "count(//testcase[error])" "$junit_xml_file") + skipped=$(xmllint --xpath "count(//testcase[skipped])" "$junit_xml_file") + passed=$((total_tests - failures - errors - skipped)) + echo -e "| Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests |\n| ----------- | ------------ | ------------ | ------------- | ------------- |\n| $total_tests | $passed | $failures | $errors | $skipped |" >> "$GITHUB_STEP_SUMMARY" else - echo "no XML File found, exiting" - exit 1 + echo "no XML File found, exiting" + exit 1 fi - uses: actions/upload-artifact@v4 if: success() || failure() @@ -472,15 +472,15 @@ jobs: junit_xml_file=$(find "test-results" -name "*.xml" -type f 2>/dev/null | head -n 1) if [ -n "$junit_xml_file" ]; then - total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") - failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") - errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") - skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") - passed=$((total_tests - failures - errors - skipped)) - echo -e "| Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests |\n| ----------- | ------------ | ------------ | ------------- | ------------- |\n| $total_tests | $passed | $failures | $errors | $skipped |" >> "$GITHUB_STEP_SUMMARY" + total_tests=$(xmllint --xpath "count(//testcase)" "$junit_xml_file") + failures=$(xmllint --xpath "count(//testcase[failure])" "$junit_xml_file") + errors=$(xmllint --xpath "count(//testcase[error])" "$junit_xml_file") + skipped=$(xmllint --xpath "count(//testcase[skipped])" "$junit_xml_file") + passed=$((total_tests - failures - errors - skipped)) + echo -e "| Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests |\n| ----------- | ------------ | ------------ | ------------- | ------------- |\n| $total_tests | $passed | $failures | $errors | $skipped |" >> "$GITHUB_STEP_SUMMARY" else - echo "no XML File found, exiting" - exit 1 + echo "no XML File found, exiting" + exit 1 fi - uses: actions/upload-artifact@v4 if: success() || failure() @@ -652,7 +652,7 @@ jobs: with: name: artifact-openapi path: ${{ github.workspace }}/${{ steps.uccgen.outputs.OUTPUT }}/appserver/static/openapi.json - if: ${{ !cancelled() && needs.test-inventory.outputs.ucc_modinput_functional == 'true' && needs.test-inventory.outputs.modinput_functional == 'true' }} + if: ${{ !cancelled() && needs.test-inventory.outputs.ucc_modinput_functional == 'true' }} - name: artifact-splunk-base uses: actions/upload-artifact@v4 with: @@ -956,7 +956,7 @@ jobs: echo "k8s-manifests-branch=${{ inputs.k8s-manifests-branch }}" } >> "$GITHUB_OUTPUT" - uses: actions/download-artifact@v4 - if: ${{ needs.test-inventory.outputs.ucc_modinput_functional == 'true' && needs.test-inventory.outputs.modinput_functional == 'true'}} + if: ${{ needs.test-inventory.outputs.ucc_modinput_functional == 'true' }} id: download-openapi with: name: artifact-openapi @@ -977,7 +977,11 @@ jobs: export POETRY_HTTP_BASIC_SPLUNK_ADD_ON_UCC_MODINPUT_TEST_USERNAME=${{ secrets.SA_GH_USER_NAME }} export POETRY_HTTP_BASIC_SPLUNK_ADD_ON_UCC_MODINPUT_TEST_PASSWORD=${{ secrets.GH_TOKEN_ADMIN }} poetry install --only modinput - poetry run ucc-test-modinput -o ${{ steps.download-openapi.outputs.download-path }}/openapi.json -t ${{ steps.download-openapi.outputs.download-path }}/tmp/ + if [ -f "tests/ucc_modinput_functional/tmp/openapi.json" ]; then + poetry run ucc-test-modinput -o tests/ucc_modinput_functional/tmp/openapi.json -t ${{ steps.download-openapi.outputs.download-path }}/tmp/ + else + poetry run ucc-test-modinput -o ${{ steps.download-openapi.outputs.download-path }}/openapi.json -t ${{ steps.download-openapi.outputs.download-path }}/tmp/ + fi - name: upload-swagger-artifacts-to-s3 if: steps.download-openapi.conclusion != 'skipped' id: swaggerupload @@ -989,6 +993,203 @@ jobs: swagger_name=swagger_$(basename "$BUILD_NAME" .spl) aws s3 sync "${{ steps.download-openapi.outputs.download-path }}/tmp/restapi_client/" "s3://${{ needs.setup-workflow.outputs.s3_bucket_k8s }}/ta-apps/$swagger_name/" --exclude "*" --include "README.md" --include "*swagger_client*" --only-show-errors + run-btool-check: + if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.knowledge == 'true' && needs.setup-workflow.outputs.execute-knowledge-labeled == 'true' }} + needs: + - build + - test-inventory + - setup + - meta + - setup-workflow + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + splunk: ${{ fromJson(needs.meta.outputs.matrix_latestSplunk) }} + sc4s: ${{ fromJson(needs.meta.outputs.matrix_supportedSC4S) }} + container: + image: ghcr.io/splunk/workflow-engine-base:4.1.0 + env: + ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} + ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} + ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} + ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} + ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} + SPLUNK_VERSION_BASE: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} + TEST_TYPE: "btool" + TEST_ARGS: "" + permissions: + actions: read + deployments: read + contents: read + packages: read + statuses: read + checks: write + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - name: capture start time + id: capture-start-time + run: | + echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_DEFAULT_REGION }} + - name: Read secrets from AWS Secrets Manager into environment variables + id: get-argo-token + run: | + ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') + echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" + - name: create job name + id: create-job-name + shell: bash + run: | + RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) + JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} + JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}} + JOB_NAME=${JOB_NAME//[_.]/-} + JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') + echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" + - name: run-btool-check + id: run-btool-check + timeout-minutes: 10 + env: + ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} + uses: splunk/wfe-test-runner-action@v5.0 + with: + splunk: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} + test-type: ${{ env.TEST_TYPE }} + test-args: "" + job-name: ${{ steps.create-job-name.outputs.job-name }} + labels: ${{ needs.setup.outputs.labels }} + workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} + workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} + addon-url: ${{ needs.setup.outputs.addon-upload-path }} + addon-name: ${{ needs.setup.outputs.addon-name }} + sc4s-version: ${{ matrix.sc4s.version }} + sc4s-docker-registry: ${{ matrix.sc4s.docker_registry }} + k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} + - name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation + id: update-argo-token + if: ${{ !cancelled() }} + run: | + ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') + echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" + - name: calculate timeout + id: calculate-timeout + run: | + start_time=${{ steps.capture-start-time.outputs.start_time }} + current_time=$(date +%s) + remaining_time_minutes=$(( 10-((current_time-start_time)/60) )) + echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" + - name: Check if pod was deleted + id: is-pod-deleted + timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} + if: ${{ !cancelled() }} + shell: bash + env: + ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} + run: | + set -o xtrace + if argo watch ${{ steps.run-btool-check.outputs.workflow-name }} -n workflows | grep "pod deleted"; then + echo "retry-workflow=true" >> "$GITHUB_OUTPUT" + fi + - name: Cancel workflow + env: + ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} + if: ${{ cancelled() }} + run: | + cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-btool-check.outputs.workflow-name }}) + cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) + cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) + if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-btool-check.outputs.workflow-name }} stopped"; then + echo "Workflow ${{ steps.run-btool-check.outputs.workflow-name }} stopped" + else + echo "Workflow ${{ steps.run-btool-check.outputs.workflow-name }} didn't stop" + exit 1 + fi + - name: Retrying workflow + id: retry-wf + shell: bash + env: + ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} + if: ${{ !cancelled() }} + run: | + set -o xtrace + set +e + if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] + then + WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-btool-check.outputs.workflow-name }}" | jq -r .metadata.name) + echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" + argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." + else + echo "No retry required" + argo wait "${{ steps.run-btool-check.outputs.workflow-name }}" -n workflows + argo watch "${{ steps.run-btool-check.outputs.workflow-name }}" -n workflows | grep "test-btool" + fi + - name: check workflow status + id: check-workflow-status + env: + ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} + shell: bash + if: ${{ !cancelled() }} + run: | + set +e + # shellcheck disable=SC2157 + if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then + WORKFLOW_NAME=${{ steps.run-btool-check.outputs.workflow-name }} + else + WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" + fi + ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') + while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] + do + echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." + argo wait "${WORKFLOW_NAME}" -n workflows || true + ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') + done + echo "Status of workflow:" "$ARGO_STATUS" + echo "workflow-status=$ARGO_STATUS" >> "$GITHUB_OUTPUT" + if [ "$ARGO_STATUS" == "Succeeded" ]; then + exit 0 + else + exit 1 + fi + - name: pull artifacts from s3 bucket + if: ${{ !cancelled() && steps.check-workflow-status.outputs.workflow-status != 'Succeeded' }} + run: | + echo "pulling artifacts" + aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ + tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} + - name: pull logs from s3 bucket + if: ${{ !cancelled() && steps.check-workflow-status.outputs.workflow-status != 'Succeeded' }} + run: | + # shellcheck disable=SC2157 + if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then + WORKFLOW_NAME=${{ steps.run-btool-check.outputs.workflow-name }} + else + WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" + fi + echo "pulling logs" + mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs + aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() && steps.check-workflow-status.outputs.workflow-status != 'Succeeded' }} + with: + name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests artifacts + path: | + ${{ needs.setup.outputs.directory-path }}/test-results + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() && steps.check-workflow-status.outputs.workflow-status != 'Succeeded' }} + with: + name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests logs + path: | + ${{ needs.setup.outputs.directory-path }}/argo-logs + run-knowledge-tests: if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.knowledge == 'true' && needs.setup-workflow.outputs.execute-knowledge-labeled == 'true' }} needs: @@ -1208,15 +1409,15 @@ jobs: junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) if [ -n "$junit_xml_file" ]; then - total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") - failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") - errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") - skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") - passed=$((total_tests - failures - errors - skipped)) - echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} |$total_tests |$passed |$failures |$errors | $skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt + total_tests=$(xmllint --xpath "count(//testcase)" "$junit_xml_file") + failures=$(xmllint --xpath "count(//testcase[failure])" "$junit_xml_file") + errors=$(xmllint --xpath "count(//testcase[error])" "$junit_xml_file") + skipped=$(xmllint --xpath "count(//testcase[skipped])" "$junit_xml_file") + passed=$((total_tests - failures - errors - skipped)) + echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} |$total_tests |$passed |$failures |$errors | $skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt else - echo "no XML File found, exiting" - exit 1 + echo "no XML File found, exiting" + exit 1 fi - name: Upload-artifact-for-github-summary uses: actions/upload-artifact@v4 @@ -1463,15 +1664,15 @@ jobs: junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) if [ -n "$junit_xml_file" ]; then - total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") - failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") - errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") - skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") - passed=$((total_tests - failures - errors - skipped)) - echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} |$total_tests |$passed |$failures |$errors |$skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt + total_tests=$(xmllint --xpath "count(//testcase)" "$junit_xml_file") + failures=$(xmllint --xpath "count(//testcase[failure])" "$junit_xml_file") + errors=$(xmllint --xpath "count(//testcase[error])" "$junit_xml_file") + skipped=$(xmllint --xpath "count(//testcase[skipped])" "$junit_xml_file") + passed=$((total_tests - failures - errors - skipped)) + echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} |$total_tests |$passed |$failures |$errors |$skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt else - echo "no XML File found, exiting" - exit 1 + echo "no XML File found, exiting" + exit 1 fi - name: Upload-artifact-for-github-summary uses: actions/upload-artifact@v4 @@ -1740,15 +1941,15 @@ jobs: junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) if [ -n "$junit_xml_file" ]; then - total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") - failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") - errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") - skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") - passed=$((total_tests - failures - errors - skipped)) - echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} |$total_tests |$passed |$failures |$errors |$skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt + total_tests=$(xmllint --xpath "count(//testcase)" "$junit_xml_file") + failures=$(xmllint --xpath "count(//testcase[failure])" "$junit_xml_file") + errors=$(xmllint --xpath "count(//testcase[error])" "$junit_xml_file") + skipped=$(xmllint --xpath "count(//testcase[skipped])" "$junit_xml_file") + passed=$((total_tests - failures - errors - skipped)) + echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ matrix.browser }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} |$total_tests |$passed |$failures |$errors |$skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt else - echo "no XML File found, exiting" - exit 1 + echo "no XML File found, exiting" + exit 1 fi - name: Upload-artifact-for-github-summary uses: actions/upload-artifact@v4 @@ -2015,15 +2216,15 @@ jobs: junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) if [ -n "$junit_xml_file" ]; then - total_tests=$(xmllint --xpath 'sum(//testsuite/@tests)' "$junit_xml_file") - failures=$(xmllint --xpath 'sum(//testsuite/@failures)' "$junit_xml_file") - errors=$(xmllint --xpath 'sum(//testsuite/@errors)' "$junit_xml_file") - skipped=$(xmllint --xpath 'sum(//testsuite/@skipped)' "$junit_xml_file") - passed=$((total_tests - failures - errors - skipped)) - echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ matrix.marker }} ${{ matrix.vendor-version.image }} |$total_tests |$passed |$failures |$errors | $skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt + total_tests=$(xmllint --xpath "count(//testcase)" "$junit_xml_file") + failures=$(xmllint --xpath "count(//testcase[failure])" "$junit_xml_file") + errors=$(xmllint --xpath "count(//testcase[error])" "$junit_xml_file") + skipped=$(xmllint --xpath "count(//testcase[skipped])" "$junit_xml_file") + passed=$((total_tests - failures - errors - skipped)) + echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ matrix.marker }} ${{ matrix.vendor-version.image }} |$total_tests |$passed |$failures |$errors | $skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt else - echo "no XML File found, exiting" - exit 1 + echo "no XML File found, exiting" + exit 1 fi - name: Upload-artifact-for-github-summary uses: actions/upload-artifact@v4 @@ -2064,6 +2265,280 @@ jobs: name: | summary-modinput* + run-ucc-modinput-tests: + if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.ucc_modinput_functional == 'true' && needs.setup-workflow.outputs.execute-ucc-modinput-labeled == 'true' }} + needs: + - build + - test-inventory + - setup + - meta + - setup-workflow + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} + vendor-version: ${{ fromJson(needs.meta.outputs.matrix_supportedModinputFunctionalVendors) }} + marker: ${{ fromJson(inputs.ucc-modinput-marker) }} + container: + image: ghcr.io/splunk/workflow-engine-base:4.1.0 + env: + ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} + ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} + ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} + ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} + ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} + SPLUNK_VERSION_BASE: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} + TEST_TYPE: "ucc_modinput_functional" + TEST_ARGS: "" + permissions: + actions: read + deployments: read + contents: read + packages: read + statuses: read + checks: write + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + - name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage + id: configure-git + run: | + git --version + git_path="$(pwd)" + echo "$git_path" + git config --global --add safe.directory "$git_path" + - name: capture start time + id: capture-start-time + run: | + echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" + - name: Configure AWS credentials + uses: aws-actions/configure-aws-credentials@v4 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: ${{ secrets.AWS_DEFAULT_REGION }} + - name: Read secrets from AWS Secrets Manager into environment variables + id: get-argo-token + run: | + ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') + echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" + - name: create job name + id: create-job-name + shell: bash + run: | + RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) + JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} + JOB_NAME=${JOB_NAME//TEST-TYPE/ucc_modinput} + JOB_NAME=${JOB_NAME//[_.]/-} + JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') + echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" + - name: create test argument + id: create-test-arg + shell: bash + run: | + TEST_ARG_M="" + EMPTY_MARKER="[]" + + if [[ "${{ inputs.ucc-modinput-marker }}" != "$EMPTY_MARKER" ]]; then + TEST_ARG_M="-m" + fi + + echo "test-arg=$TEST_ARG_M" >> "$GITHUB_OUTPUT" + - name: run-tests + id: run-tests + timeout-minutes: 340 + continue-on-error: true + env: + ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} + uses: splunk/wfe-test-runner-action@v5.0 + with: + splunk: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} + test-type: ${{ env.TEST_TYPE }} + test-args: ${{ env.TEST_ARGS }} ${{ steps.create-test-arg.outputs.test-arg }} ${{ matrix.marker }} + job-name: ${{ steps.create-job-name.outputs.job-name }} + labels: ${{ needs.setup.outputs.labels }} + workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} + workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} + addon-url: ${{ needs.setup.outputs.addon-upload-path }} + addon-name: ${{ needs.setup.outputs.addon-name }} + vendor-version: ${{ matrix.vendor-version.image }} + sc4s-version: "No" + k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} + - name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation + id: update-argo-token + if: ${{ !cancelled() }} + run: | + ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') + echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" + - name: calculate timeout + id: calculate-timeout + run: | + start_time=${{ steps.capture-start-time.outputs.start_time }} + current_time=$(date +%s) + remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) + echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" + - name: Check if pod was deleted + id: is-pod-deleted + timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} + if: ${{ !cancelled() }} + shell: bash + env: + ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} + run: | + set -o xtrace + if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then + echo "retry-workflow=true" >> "$GITHUB_OUTPUT" + fi + - name: Cancel workflow + env: + ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} + if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} + run: | + cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) + cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) + cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) + if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then + echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" + else + echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" + exit 1 + fi + - name: Retrying workflow + id: retry-wf + shell: bash + env: + ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} + if: ${{ !cancelled() }} + run: | + set -o xtrace + set +e + if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] + then + WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) + echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" + argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." + else + echo "No retry required" + argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows + argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" + fi + - name: check if workflow completed + env: + ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} + if: ${{ !cancelled() }} + shell: bash + run: | + set +e + # shellcheck disable=SC2157 + if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then + WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} + else + WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" + fi + ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') + echo "Status of workflow:" "$ARGO_STATUS" + while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] + do + echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." + argo wait "${WORKFLOW_NAME}" -n workflows || true + ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') + done + - name: pull artifacts from s3 bucket + if: ${{ !cancelled() }} + run: | + echo "pulling artifacts" + aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ + tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} + - name: pull logs from s3 bucket + if: ${{ !cancelled() }} + run: | + # shellcheck disable=SC2157 + if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then + WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} + else + WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" + fi + echo "pulling logs" + mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs + aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests artifacts + path: | + ${{ needs.setup.outputs.directory-path }}/test-results + - uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests logs + path: | + ${{ needs.setup.outputs.directory-path }}/argo-logs + - name: Test Report + id: test_report + uses: dorny/test-reporter@v1.9.1 + if: ${{ !cancelled() }} + with: + name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} test report + path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" + reporter: java-junit + - name: Parse JUnit XML + if: ${{ !cancelled() }} + run: | + apt-get install -y libxml2-utils + junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" + junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) + if [ -n "$junit_xml_file" ]; then + total_tests=$(xmllint --xpath "count(//testcase)" "$junit_xml_file") + failures=$(xmllint --xpath "count(//testcase[failure])" "$junit_xml_file") + errors=$(xmllint --xpath "count(//testcase[error])" "$junit_xml_file") + skipped=$(xmllint --xpath "count(//testcase[skipped])" "$junit_xml_file") + passed=$((total_tests - failures - errors - skipped)) + echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ matrix.marker }} ${{ matrix.vendor-version.image }} |$total_tests |$passed |$failures |$errors | $skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt + else + echo "no XML File found, exiting" + exit 1 + fi + - name: Upload-artifact-for-github-summary + uses: actions/upload-artifact@v4 + if: ${{ !cancelled() }} + with: + name: summary-${{ env.TEST_TYPE }}-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }}-${{ matrix.vendor-version.image }}-${{ matrix.marker }}-artifact + path: job_summary.txt + - name: pull diag from s3 bucket + if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} + run: | + echo "pulling diag" + aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ + - uses: actions/upload-artifact@v4 + if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} + with: + name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} ${{ matrix.vendor-version.image }} ${{ matrix.marker }} tests diag + path: | + ${{ needs.setup.outputs.directory-path }}/diag* + + ucc-modinput-tests-report: + needs: run-ucc-modinput-tests + runs-on: ubuntu-latest + if: ${{ !cancelled() && needs.run-ucc-modinput-tests.result != 'skipped' }} + steps: + - name: Download all summaries + uses: actions/download-artifact@v4 + with: + pattern: summary-ucc_modinput* + - name: Combine summaries into a table + run: | + echo "| Job | Total Tests | Passed Tests | Failed Tests | Errored Tests | Skipped Tests | Report Link" >> "$GITHUB_STEP_SUMMARY" + echo "| ---------- | ----------- | ------ | ------ | ------ | ------- | ------ |" >> "$GITHUB_STEP_SUMMARY" + for file in summary-ucc_modinput*/job_summary.txt; do + cat "$file" >> "$GITHUB_STEP_SUMMARY" + done + - uses: geekyeggo/delete-artifact@v5 + with: + name: | + summary-ucc_modinput* + run-scripted-input-tests-full-matrix: if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.scripted_inputs == 'true' && needs.setup-workflow.outputs.execute-scripted_inputs-labeled == 'true' }} needs: @@ -2352,6 +2827,7 @@ jobs: - setup - run-knowledge-tests - run-modinput-tests + - run-ucc-modinput-tests - run-ui-tests - validate-pr-title runs-on: ubuntu-latest diff --git a/README.md b/README.md index b7e8bde0e..fd09ecf22 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ Workflow defines jobs which perform security code scanning, execute different ty ## Inputs * marker - list of markers used to paralelize modinput tests +* ucc-modinput-marker - list of markers used to paralelize ucc modinput tests * ui_marker - list of markers used to paralelize ui tests * custom-version - version used for release on manual workflow trigger * execute-tests-on-push-to-release - enable tests on release branch - default false @@ -262,6 +263,7 @@ ui_local::true knowledge::true unit::true modinput_functional::true +ucc_modinput_functional::true ``` build @@ -455,6 +457,31 @@ test_cim_output.txt test_check_unicode_output.txt ``` +# run-btool-check + +**Description:** + +- This stage validates the configuration of the TA using btool + +**Action used:** +- No action used + +**Pass/fail behaviour:** + +- The stage is expected to fail only if there are errors reported by btool check. + +**Troubleshooting steps for failures if any:** + +- btool-output.txt can be used for identifying the errors reported by the btool check + +- User can validate the configuration by executing btool check in local env using `$SPLUNK_HOME/bin/splunk btool check | grep "Splunk_TA_example"`. + +**Artifacts:** + +``` +btool-output.txt +``` + # run-knowledge-tests **Description:** @@ -549,7 +576,7 @@ Junit XML file **Pass/fail behaviour** -- The stage is expected to fail only if there are any Modular input test failures defined under tests/modular_input +- The stage is expected to fail only if there are any Modular input test failures defined under tests/modinput_functional **Troubleshooting steps for failures if any:** @@ -575,6 +602,45 @@ helmut.log Junit XML file ``` +# run-ucc-modinput-tests + +**Description** + +- This stage does the setup for executing Modinput tests using [ucc modinput tests framework](https://github.com/splunk/addonfactory-ucc-test) and reports the results +- It is possible to parallelize Modinput tests execution by using pytest markers. + To do so, one must specify `ucc-modinput-marker` parameter in buid-test-release.yml as in [example](https://github.com/splunk/splunk-add-on-for-google-cloud-platform/blob/34abcf2780d8f223f292c9c2fcc5835b71a8de99/.github/workflows/build-test-release.yml#L34). + Markers must be created prior and each test case must be marked (check the following references: [ref1](https://github.com/splunk/splunk-add-on-for-google-cloud-platform/blob/34abcf2780d8f223f292c9c2fcc5835b71a8de99/tests/ucc_modinput_functional/markers.py), +[ref2](https://github.com/splunk/splunk-add-on-for-google-cloud-platform/blob/34abcf2780d8f223f292c9c2fcc5835b71a8de99/tests/ucc_modinput_functional/test_google_cloud_rh_settings.py#L19)) + +**Action used:** +- No action + +**Pass/fail behaviour** + +- The stage is expected to fail only if there are any Modular input test failures defined under tests/ucc_modinput_functional + +**Troubleshooting steps for failures if any:** + +- we can validate the test-execution in local env and compare results. + +- The `splunk-add-on-ucc-modinput-test-functional.log` file, `test-result.xml` can be used for identifying errors. + +- `splunk-add-on-ucc-modinput-test-functional.log` file has detailed logs for each action for the test case we can observe the logs and troubleshoot what’s the root cause of failure + +- Make sure setup and teardown methods works as expected in the test-case. + +**Exception file:** + +- `.pytest.expect` User can add failures here which can be ignored while test execution and will be marked as XFail + +**NOTE:** There should be valid reasons and approvals from addon and automation PMs to add failures in this file. + +**Artifacts:** +``` +splunk-add-on-ucc-modinput-test-functional.log +Junit XML file +``` + pre-publish =========== diff --git a/runbooks/addonfactory-workflow-addon-release-docker-images.md b/runbooks/addonfactory-workflow-addon-release-docker-images.md index afc36ef4e..645ce0235 100644 --- a/runbooks/addonfactory-workflow-addon-release-docker-images.md +++ b/runbooks/addonfactory-workflow-addon-release-docker-images.md @@ -8,7 +8,7 @@ Once there is new Splunk release, and [matrix](https://github.com/splunk/addonfa - if any is missing in [ta-automation-docker-images](https://cd.splunkdev.com/taautomation/ta-automation-docker-images/-/tree/main/dockerfiles) then add new Dockerfile #### Create images and publish them to ECR -- figure out what version of Splunk is needed (sha) using go/fetcher +- figure out what version of Splunk is needed (sha) using `BUILD` field from [splunk_matrix](https://github.com/splunk/addonfactory-test-matrix-action/blob/main/config/splunk_matrix.conf#L7) (alternatively use go/fetcher) - trigger [pipeline](https://cd.splunkdev.com/taautomation/ta-automation-docker-images/-/pipelines/new) for every OS flavor separately ## Runbook to publish unreleased Splunk image for testing diff --git a/runbooks/backporting-changes-to-older-version.md b/runbooks/backporting-changes-to-older-version.md new file mode 100644 index 000000000..4e94084ad --- /dev/null +++ b/runbooks/backporting-changes-to-older-version.md @@ -0,0 +1,52 @@ +# Runbook to backport changes to previous versions of `addonfactory-workflow-addon-release` +`addonfactory-workflow-addon-release` is utilized by all supported TAs. While it is strongly recommended to use the latest minor version of the reusable action, not all TAs consistently follow this guideline. As a result, there are cases when crucial updates introduced in the latest reusable workflow version need to be backported to its older versions that are still in use. + +This runbook shows a real example of backporting changes correlated to `ta-automation-k8s-manifests`. In the example: +- current version of `addonfactory-workflow-addon-release` is `v4.17.0` +- there was a bug in `ta-automation-k8s-manifests` affecting all TAs + - there is a need to make a fix and release `v4.17.1` containing fixed `ta-automation-k8s-manifests` version ([PR](https://github.com/splunk/addonfactory-workflow-addon-release/pull/329)) + - there is need to backport it to `v4.16` (the old version which is still in use by some TAs) + - the latest patch release of `v4.16` is `v4.16.14` +### Steps +- make release `v4.17.1` with necessary changes +- fetch all existing tags from `addonfactory-workflow-addon-release`: + ``` + git checkout main + git pull + git fetch --tags + ``` +- checkout to the latest tag of the minor release you want to backport the changes to + ``` + git checkout v4.16.14 + ``` +- create a new branch based on the tag you are currently checked out to + ``` + git checkout -b fix/bump-k8s-manifest-version + ``` +- changes made in `ta-automation-k8s-manifests` were correlated with changes in `addonfactory-workflow-addon-release`, so there is a need to backport **only necessary** changes to `v4.16`. There are two ways to do that: by cherrypicking specific commits (and resolving the conflicts if they exist) or by commiting necessary changes manually. + - for example make necessary changes and commit them: + ``` + git add .github/workflows/reusable-build-test-release.yml + git commit -m "fix: bump k8s-manifest version" + ``` +- push newly created branch to the remote repository + - ```git push -u origin fix/bump-k8s-manifest-version``` +- in GitHub UI: + - navigate to releases + - draft a new release + - `Choose a tag`: type the tag that will be created, i.e. `v4.16.15` + - `Target` - newly created branch `fix/bump-k8s-manifest-version` + - click on `Generate release notes` + - `Title of release` should be `v4.16.15 backport` + - write description of the changes + - uncheck `Set as the latest release` box + - click `Publish release` + - check if the release is available, and it points at the proper version - https://github.com/splunk/addonfactory-workflow-addon-release/tags tag `v4.16` should point to the same commit as tag `v4.16.15` + tags + Backporting changes will cause that the tag `v4` will point at the same commit as `v4.16`. To make it proper one has to either re-trigger the workflow which produced the latest tag (`v4.17.1`) or resolve that manually: + ``` + git fetch --tags -f + git tag -f v4 v4.17.1 + git push -f --tags + ``` +- run the workflow for some TA using v4.16 to verify if the pipeline works as expected. \ No newline at end of file diff --git a/runbooks/images/backporting/compare-tags.png b/runbooks/images/backporting/compare-tags.png new file mode 100644 index 000000000..df128fddd Binary files /dev/null and b/runbooks/images/backporting/compare-tags.png differ