-
Notifications
You must be signed in to change notification settings - Fork 5
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
chore: ADDON-76668 adding tests for spl2
- Loading branch information
1 parent
eb908d6
commit dc20fce
Showing
1 changed file
with
250 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -115,6 +115,7 @@ jobs: | |
runs-on: ubuntu-latest | ||
outputs: | ||
execute-knowledge-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_knowledge_labeled }} | ||
execute-spl2-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_spl2_labeled }} | ||
execute-ui-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_ui_labeled }} | ||
execute-modinput-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_modinput_functional_labeled }} | ||
execute-ucc-modinput-labeled: ${{ steps.configure-tests-on-labels.outputs.execute_ucc_modinput_functional_labeled }} | ||
|
@@ -148,7 +149,7 @@ jobs: | |
run: | | ||
set +e | ||
declare -A EXECUTE_LABELED | ||
TESTSET=("execute_knowledge" "execute_ui" "execute_modinput_functional" "execute_ucc_modinput_functional" "execute_scripted_inputs" "execute_requirement_test") | ||
TESTSET=("execute_knowledge" "execute_spl2" "execute_ui" "execute_modinput_functional" "execute_ucc_modinput_functional" "execute_scripted_inputs" "execute_requirement_test") | ||
for test_type in "${TESTSET[@]}"; do | ||
EXECUTE_LABELED["$test_type"]="false" | ||
done | ||
|
@@ -338,6 +339,7 @@ jobs: | |
runs-on: ubuntu-latest | ||
# Map a step output to a job output | ||
outputs: | ||
spl2: ${{ steps.testset.outputs.spl2 }} | ||
unit: ${{ steps.testset.outputs.unit }} | ||
knowledge: ${{ steps.testset.outputs.knowledge }} | ||
ui: ${{ steps.testset.outputs.ui }} | ||
|
@@ -1190,6 +1192,253 @@ jobs: | |
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
run-spl2-tests: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.spl2 == 'true' && needs.setup-workflow.outputs.execute-spl2-labeled == 'true' }} | ||
needs: | ||
- build | ||
- test-inventory | ||
- setup | ||
- meta | ||
- setup-workflow | ||
runs-on: ubuntu-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
splunk: ${{ fromJson(needs.meta.outputs.matrix_supportedSplunk) }} | ||
sc4s: ${{ fromJson(needs.meta.outputs.matrix_supportedSC4S) }} | ||
container: | ||
image: ghcr.io/splunk/workflow-engine-base:4.1.0 | ||
env: | ||
ARGO_SERVER: ${{ needs.setup.outputs.argo-server }} | ||
ARGO_HTTP1: ${{ needs.setup.outputs.argo-http1 }} | ||
ARGO_SECURE: ${{ needs.setup.outputs.argo-secure }} | ||
ARGO_BASE_HREF: ${{ needs.setup.outputs.argo-href }} | ||
ARGO_NAMESPACE: ${{ needs.setup.outputs.argo-namespace }} | ||
SPLUNK_VERSION_BASE: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
TEST_TYPE: "spl2_test" | ||
TEST_ARGS: "" | ||
permissions: | ||
actions: read | ||
deployments: read | ||
contents: read | ||
packages: read | ||
statuses: read | ||
checks: write | ||
steps: | ||
- uses: actions/checkout@v4 | ||
with: | ||
submodules: recursive | ||
- name: configure git # This step configures git to omit "dubious git ownership error" in later test-reporter stage | ||
id: configure-git | ||
run: | | ||
git --version | ||
git_path="$(pwd)" | ||
echo "$git_path" | ||
git config --global --add safe.directory "$git_path" | ||
- name: capture start time | ||
id: capture-start-time | ||
run: | | ||
echo "start_time=$(date +%s)" >> "$GITHUB_OUTPUT" | ||
- name: Configure AWS credentials | ||
uses: aws-actions/configure-aws-credentials@v4 | ||
with: | ||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} | ||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} | ||
aws-region: ${{ secrets.AWS_DEFAULT_REGION }} | ||
- name: Read secrets from AWS Secrets Manager into environment variables | ||
id: get-argo-token | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: create job name | ||
id: create-job-name | ||
shell: bash | ||
run: | | ||
RANDOM_STRING=$(head -3 /dev/urandom | tr -cd '[:lower:]' | cut -c -4) | ||
JOB_NAME=${{ needs.setup.outputs.job-name }}-${RANDOM_STRING} | ||
JOB_NAME=${JOB_NAME//TEST-TYPE/${{ env.TEST_TYPE }}} | ||
JOB_NAME=${JOB_NAME//[_.]/-} | ||
JOB_NAME=$(echo "$JOB_NAME" | tr '[:upper:]' '[:lower:]') | ||
echo "job-name=$JOB_NAME" >> "$GITHUB_OUTPUT" | ||
- name: run-tests | ||
id: run-tests | ||
timeout-minutes: 340 | ||
continue-on-error: true | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
uses: splunk/[email protected] | ||
with: | ||
splunk: ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
test-type: ${{ env.TEST_TYPE }} | ||
test-args: "" | ||
job-name: ${{ steps.create-job-name.outputs.job-name }} | ||
labels: ${{ needs.setup.outputs.labels }} | ||
workflow-tmpl-name: ${{ needs.setup.outputs.argo-workflow-tmpl-name }} | ||
workflow-template-ns: ${{ needs.setup.outputs.argo-namespace }} | ||
addon-url: ${{ needs.setup.outputs.addon-upload-path }} | ||
addon-name: ${{ needs.setup.outputs.addon-name }} | ||
sc4s-version: ${{ matrix.sc4s.version }} | ||
sc4s-docker-registry: ${{ matrix.sc4s.docker_registry }} | ||
k8s-manifests-branch: ${{ needs.setup.outputs.k8s-manifests-branch }} | ||
- name: Read secrets from AWS Secrets Manager again into environment variables in case credential rotation | ||
id: update-argo-token | ||
if: ${{ !cancelled() }} | ||
run: | | ||
ARGO_TOKEN=$(aws secretsmanager get-secret-value --secret-id "${{ needs.setup-workflow.outputs.argo_token_secret_id_k8s }}" | jq -r '.SecretString') | ||
echo "argo-token=$ARGO_TOKEN" >> "$GITHUB_OUTPUT" | ||
- name: calculate timeout | ||
id: calculate-timeout | ||
run: | | ||
start_time=${{ steps.capture-start-time.outputs.start_time }} | ||
current_time=$(date +%s) | ||
remaining_time_minutes=$(( 350-((current_time-start_time)/60) )) | ||
echo "remaining_time_minutes=$remaining_time_minutes" >> "$GITHUB_OUTPUT" | ||
- name: Check if pod was deleted | ||
id: is-pod-deleted | ||
timeout-minutes: ${{ fromJson(steps.calculate-timeout.outputs.remaining_time_minutes) }} | ||
if: ${{ !cancelled() }} | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
run: | | ||
set -o xtrace | ||
if argo watch ${{ steps.run-tests.outputs.workflow-name }} -n workflows | grep "pod deleted"; then | ||
echo "retry-workflow=true" >> "$GITHUB_OUTPUT" | ||
fi | ||
- name: Cancel workflow | ||
env: | ||
ARGO_TOKEN: ${{ steps.get-argo-token.outputs.argo-token }} | ||
if: ${{ cancelled() || steps.is-pod-deleted.outcome != 'success' }} | ||
run: | | ||
cancel_response=$(argo submit -v -o json --from wftmpl/${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} -l workflows.argoproj.io/workflow-template=${{ needs.setup.outputs.argo-cancel-workflow-tmpl-name }} --argo-base-href '' -p workflow-to-cancel=${{ steps.run-tests.outputs.workflow-name }}) | ||
cancel_workflow_name=$( echo "$cancel_response" |jq -r '.metadata.name' ) | ||
cancel_logs=$(argo logs --follow "$cancel_workflow_name" -n workflows) | ||
if echo "$cancel_logs" | grep -q "workflow ${{ steps.run-tests.outputs.workflow-name }} stopped"; then | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} stopped" | ||
else | ||
echo "Workflow ${{ steps.run-tests.outputs.workflow-name }} didn't stop" | ||
exit 1 | ||
fi | ||
- name: Retrying workflow | ||
id: retry-wf | ||
shell: bash | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set -o xtrace | ||
set +e | ||
if [[ "${{ steps.is-pod-deleted.outputs.retry-workflow }}" == "true" ]] | ||
then | ||
WORKFLOW_NAME=$(argo resubmit -v -o json -n workflows "${{ steps.run-tests.outputs.workflow-name }}" | jq -r .metadata.name) | ||
echo "workflow-name=$WORKFLOW_NAME" >> "$GITHUB_OUTPUT" | ||
argo logs --follow "${WORKFLOW_NAME}" -n workflows || echo "... there was an error fetching logs, the workflow is still in progress. please wait for the workflow to complete ..." | ||
else | ||
echo "No retry required" | ||
argo wait "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | ||
argo watch "${{ steps.run-tests.outputs.workflow-name }}" -n workflows | grep "test-addon" | ||
fi | ||
- name: check if workflow completed | ||
env: | ||
ARGO_TOKEN: ${{ steps.update-argo-token.outputs.argo-token }} | ||
shell: bash | ||
if: ${{ !cancelled() }} | ||
run: | | ||
set +e | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
echo "Status of workflow:" "$ARGO_STATUS" | ||
while [ "$ARGO_STATUS" == "Running" ] || [ "$ARGO_STATUS" == "Pending" ] | ||
do | ||
echo "... argo Workflow ${WORKFLOW_NAME} is running, waiting for it to complete." | ||
argo wait "${WORKFLOW_NAME}" -n workflows || true | ||
ARGO_STATUS=$(argo get "${WORKFLOW_NAME}" -n workflows -o json | jq -r '.status.phase') | ||
done | ||
- name: pull artifacts from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
echo "pulling artifacts" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/artifacts-${{ steps.create-job-name.outputs.job-name }}/${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
tar -xf ${{ needs.setup.outputs.directory-path }}/${{ steps.create-job-name.outputs.job-name }}.tgz -C ${{ needs.setup.outputs.directory-path }} | ||
- name: pull logs from s3 bucket | ||
if: ${{ !cancelled() }} | ||
run: | | ||
# shellcheck disable=SC2157 | ||
if [ -z "${{ steps.retry-wf.outputs.workflow-name }}" ]; then | ||
WORKFLOW_NAME=${{ steps.run-tests.outputs.workflow-name }} | ||
else | ||
WORKFLOW_NAME="${{ steps.retry-wf.outputs.workflow-name }}" | ||
fi | ||
echo "pulling logs" | ||
mkdir -p ${{ needs.setup.outputs.directory-path }}/argo-logs | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/workflows/${WORKFLOW_NAME}/ ${{ needs.setup.outputs.directory-path }}/argo-logs/ --recursive | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests artifacts | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests logs | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/argo-logs | ||
- name: Upload cim-compliance-report for ${{ matrix.splunk.version }} | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ matrix.splunk.islatest == true }} | ||
with: | ||
name: cim-compliance-report | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/test-results/cim-compliance-report.md | ||
- name: Test Report | ||
id: test_report | ||
uses: dorny/[email protected] | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} test report | ||
path: "${{ needs.setup.outputs.directory-path }}/test-results/*.xml" | ||
reporter: java-junit | ||
- name: Parse JUnit XML | ||
if: ${{ !cancelled() }} | ||
run: | | ||
apt-get install -y libxml2-utils | ||
junit_xml_path="${{ needs.setup.outputs.directory-path }}/test-results" | ||
junit_xml_file=$(find "$junit_xml_path" -name "*.xml" -type f 2>/dev/null | head -n 1) | ||
if [ -n "$junit_xml_file" ]; then | ||
total_tests=$(xmllint --xpath "count(//testcase)" "$junit_xml_file") | ||
failures=$(xmllint --xpath "count(//testcase[failure])" "$junit_xml_file") | ||
errors=$(xmllint --xpath "count(//testcase[error])" "$junit_xml_file") | ||
skipped=$(xmllint --xpath "count(//testcase[skipped])" "$junit_xml_file") | ||
passed=$((total_tests - failures - errors - skipped)) | ||
echo "splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} |$total_tests |$passed |$failures |$errors | $skipped |${{steps.test_report.outputs.url_html}}" > job_summary.txt | ||
else | ||
echo "no XML File found, exiting" | ||
exit 1 | ||
fi | ||
- name: Upload-artifact-for-github-summary | ||
uses: actions/upload-artifact@v4 | ||
if: ${{ !cancelled() }} | ||
with: | ||
name: summary-ko-${{ matrix.splunk.version }}-${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} | ||
path: job_summary.txt | ||
- name: pull diag from s3 bucket | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
run: | | ||
echo "pulling diag" | ||
aws s3 cp s3://${{ needs.setup.outputs.s3-bucket }}/diag-${{ steps.create-job-name.outputs.job-name }}/diag-${{ steps.create-job-name.outputs.job-name }}.tgz ${{ needs.setup.outputs.directory-path }}/ | ||
- uses: actions/upload-artifact@v4 | ||
if: ${{ failure() && steps.test_report.outputs.conclusion == 'failure' }} | ||
with: | ||
name: archive splunk ${{ matrix.splunk.version }}${{ secrets.OTHER_TA_REQUIRED_CONFIGS }} ${{ env.TEST_TYPE }} tests diag | ||
path: | | ||
${{ needs.setup.outputs.directory-path }}/diag* | ||
run-knowledge-tests: | ||
if: ${{ !cancelled() && needs.build.result == 'success' && needs.test-inventory.outputs.knowledge == 'true' && needs.setup-workflow.outputs.execute-knowledge-labeled == 'true' }} | ||
needs: | ||
|