From de2f1bb0bac1d293f2eff02d609225235419b85b Mon Sep 17 00:00:00 2001 From: Michael Sauter Date: Fri, 8 Sep 2023 15:45:56 +0200 Subject: [PATCH] Rip out everything that will be moved to other repos --- .github/workflows/main.yaml | 149 ++----- Makefile | 11 +- build/{package => images}/Dockerfile.finish | 0 .../Dockerfile.pipeline-manager | 0 build/{package => images}/Dockerfile.start | 0 build/package/Dockerfile.aqua-scan | 27 -- build/package/Dockerfile.go-toolset | 34 -- build/package/Dockerfile.gradle-toolset | 48 --- build/package/Dockerfile.helm | 73 ---- build/package/Dockerfile.node16-npm-toolset | 41 -- build/package/Dockerfile.node18-npm-toolset | 41 -- build/package/Dockerfile.package-image | 49 --- build/package/Dockerfile.python-toolset | 29 -- build/package/Dockerfile.sonar | 63 --- build/package/scripts/build-go.sh | 134 ------ build/package/scripts/build-gradle.sh | 105 ----- build/package/scripts/build-npm.sh | 107 ----- build/package/scripts/build-python.sh | 87 ---- .../package/scripts/download-aqua-scanner.sh | 45 -- ...supply-sonar-project-properties-default.sh | 18 - .../sonar-project.properties.d/go.properties | 7 - .../gradle.properties | 6 - .../sonar-project.properties.d/npm.properties | 5 - .../python.properties | 8 - cmd/aqua-scan/aqua.go | 107 ----- cmd/aqua-scan/aqua_test.go | 76 ---- cmd/aqua-scan/bitbucket.go | 56 --- cmd/aqua-scan/main.go | 77 ---- cmd/aqua-scan/skip.go | 11 - cmd/aqua-scan/steps.go | 97 ----- cmd/deploy-helm/age.go | 25 -- cmd/deploy-helm/helm.go | 185 --------- cmd/deploy-helm/helm_test.go | 299 -------------- cmd/deploy-helm/main.go | 136 ------ cmd/deploy-helm/skip.go | 11 - cmd/deploy-helm/skopeo.go | 67 --- cmd/deploy-helm/steps.go | 388 ------------------ cmd/deploy-helm/steps_test.go | 101 ----- cmd/docs/main.go | 20 - cmd/package-image/buildah.go | 157 ------- cmd/package-image/buildah_test.go | 171 -------- cmd/package-image/main.go | 167 -------- cmd/package-image/skip.go | 11 - cmd/package-image/skopeo_tag.go | 45 -- cmd/package-image/steps.go | 187 --------- cmd/package-image/trivy.go | 39 -- cmd/pipeline-manager/main.go | 10 - cmd/sonar/main.go | 219 ---------- cmd/sonar/main_test.go | 161 -------- cmd/tasks/main.go | 19 - deploy/{ods-pipeline => chart}/.gitignore | 0 .../charts/tasks => chart}/Chart.yaml | 5 +- .../setup => chart}/templates/_helpers.tpl | 12 - .../templates/configmap-bitbucket.yaml | 0 .../templates/configmap-cluster.yaml | 0 .../templates/configmap-nexus.yaml | 0 .../templates/configmap-notifications.yaml | 0 .../templates/configmap-pipeline.yaml | 0 .../setup => chart}/templates/deployment.yaml | 6 +- .../setup => chart}/templates/service.yaml | 0 .../templates/task-finish.yaml} | 10 +- .../templates/task-start.yaml} | 10 +- deploy/chart/values.kind.yaml | 14 + deploy/chart/values.yaml | 113 +++++ deploy/install.sh | 41 +- deploy/ods-pipeline/Chart.yaml | 32 -- deploy/ods-pipeline/charts/setup/Chart.yaml | 23 -- .../setup/templates/configmap-aqua.yaml | 9 - .../setup/templates/configmap-sonar.yaml | 9 - deploy/ods-pipeline/charts/setup/values.yaml | 2 - .../charts/tasks/templates/_helpers.tpl | 74 ---- .../charts/tasks/templates/_sonar-step.tpl | 52 --- .../tasks/templates/task-ods-build-go.yaml | 148 ------- .../templates/task-ods-build-gradle.yaml | 177 -------- .../tasks/templates/task-ods-build-npm.yaml | 148 ------- .../templates/task-ods-build-python.yaml | 144 ------- .../tasks/templates/task-ods-deploy-helm.yaml | 115 ------ .../templates/task-ods-package-image.yaml | 192 --------- .../charts/tasks/values.docs.yaml | 11 - deploy/ods-pipeline/charts/tasks/values.yaml | 5 - deploy/ods-pipeline/values.kind.yaml | 22 - deploy/ods-pipeline/values.yaml | 204 --------- deploy/values.yaml.tmpl | 58 +-- docs/installation.adoc | 2 +- go.mod | 4 +- go.sum | 5 +- internal/installation/bitbucket.go | 2 +- internal/installation/nexus.go | 2 +- internal/kubernetes/secrets.go | 6 +- internal/kubernetes/services.go | 82 ---- internal/kubernetes/volumes.go | 4 +- internal/manager/pipeline.go | 10 +- internal/manager/pipeline_test.go | 27 +- internal/manager/schedule.go | 7 +- pkg/exchange/image.go | 8 - pkg/exchange/test.go | 42 -- pkg/odstasktest/workspace.go | 1 + pkg/sonar/client.go | 123 ------ pkg/sonar/client_test.go | 49 --- pkg/sonar/compute_engine.go | 61 --- pkg/sonar/compute_engine_test.go | 57 --- pkg/sonar/quality_gate.go | 68 --- pkg/sonar/quality_gate_test.go | 75 ---- pkg/sonar/report.go | 75 ---- pkg/sonar/scan.go | 111 ----- pkg/sonar/scan_test.go | 24 -- pkg/tasktesting/bitbucket.go | 2 +- pkg/tasktesting/git.go | 4 +- pkg/tasktesting/nexus.go | 2 +- pkg/tektontaskrun/cluster.go | 1 + pkg/tektontaskrun/namespace_opt.go | 9 +- pkg/tektontaskrun/taskrun.go | 22 +- pkg/tektontaskrun/taskrun_opt.go | 17 +- scripts/install-inside-kind.sh | 23 +- scripts/run-nexus.sh | 2 +- scripts/waitfor-bitbucket.sh | 2 +- scripts/waitfor-nexus.sh | 2 +- scripts/waitfor-sonarqube.sh | 2 +- tasks/ods-build-go.yaml | 193 --------- tasks/ods-build-gradle.yaml | 222 ---------- tasks/ods-build-npm.yaml | 193 --------- tasks/ods-build-python.yaml | 189 --------- tasks/ods-deploy-helm.yaml | 115 ------ tasks/ods-finish.yaml | 87 ---- tasks/ods-package-image.yaml | 192 --------- tasks/ods-start.yaml | 160 -------- test/e2e/common_test.go | 7 + test/e2e/main_test.go | 118 ++++++ .../e2e/{e2e_test.go => pipeline_run_test.go} | 141 +++++-- test/e2e/task_finish_test.go | 198 +++++++++ test/e2e/task_start_test.go | 335 +++++++++++++++ test/tasks/common_test.go | 257 ------------ test/tasks/ods-aqua-scan_test.go | 24 -- test/tasks/ods-build-go_test.go | 285 ------------- test/tasks/ods-build-gradle_test.go | 143 ------- test/tasks/ods-build-npm_test.go | 196 --------- test/tasks/ods-build-python_test.go | 188 --------- test/tasks/ods-deploy-helm_external_test.go | 170 -------- test/tasks/ods-deploy-helm_test.go | 340 --------------- test/tasks/ods-finish_test.go | 190 --------- test/tasks/ods-package-image_test.go | 307 -------------- test/tasks/ods-start_test.go | 246 ----------- 142 files changed, 1015 insertions(+), 9979 deletions(-) rename build/{package => images}/Dockerfile.finish (100%) rename build/{package => images}/Dockerfile.pipeline-manager (100%) rename build/{package => images}/Dockerfile.start (100%) delete mode 100644 build/package/Dockerfile.aqua-scan delete mode 100644 build/package/Dockerfile.go-toolset delete mode 100644 build/package/Dockerfile.gradle-toolset delete mode 100644 build/package/Dockerfile.helm delete mode 100644 build/package/Dockerfile.node16-npm-toolset delete mode 100644 build/package/Dockerfile.node18-npm-toolset delete mode 100644 build/package/Dockerfile.package-image delete mode 100644 build/package/Dockerfile.python-toolset delete mode 100644 build/package/Dockerfile.sonar delete mode 100755 build/package/scripts/build-go.sh delete mode 100755 build/package/scripts/build-gradle.sh delete mode 100755 build/package/scripts/build-npm.sh delete mode 100755 build/package/scripts/build-python.sh delete mode 100755 build/package/scripts/download-aqua-scanner.sh delete mode 100755 build/package/scripts/supply-sonar-project-properties-default.sh delete mode 100644 build/package/sonar-project.properties.d/go.properties delete mode 100644 build/package/sonar-project.properties.d/gradle.properties delete mode 100644 build/package/sonar-project.properties.d/npm.properties delete mode 100644 build/package/sonar-project.properties.d/python.properties delete mode 100644 cmd/aqua-scan/aqua.go delete mode 100644 cmd/aqua-scan/aqua_test.go delete mode 100644 cmd/aqua-scan/bitbucket.go delete mode 100644 cmd/aqua-scan/main.go delete mode 100644 cmd/aqua-scan/skip.go delete mode 100644 cmd/aqua-scan/steps.go delete mode 100644 cmd/deploy-helm/age.go delete mode 100644 cmd/deploy-helm/helm.go delete mode 100644 cmd/deploy-helm/helm_test.go delete mode 100644 cmd/deploy-helm/main.go delete mode 100644 cmd/deploy-helm/skip.go delete mode 100644 cmd/deploy-helm/skopeo.go delete mode 100644 cmd/deploy-helm/steps.go delete mode 100644 cmd/deploy-helm/steps_test.go delete mode 100644 cmd/docs/main.go delete mode 100644 cmd/package-image/buildah.go delete mode 100644 cmd/package-image/buildah_test.go delete mode 100644 cmd/package-image/main.go delete mode 100644 cmd/package-image/skip.go delete mode 100644 cmd/package-image/skopeo_tag.go delete mode 100644 cmd/package-image/steps.go delete mode 100644 cmd/package-image/trivy.go delete mode 100644 cmd/sonar/main.go delete mode 100644 cmd/sonar/main_test.go delete mode 100644 cmd/tasks/main.go rename deploy/{ods-pipeline => chart}/.gitignore (100%) rename deploy/{ods-pipeline/charts/tasks => chart}/Chart.yaml (92%) rename deploy/{ods-pipeline/charts/setup => chart}/templates/_helpers.tpl (84%) rename deploy/{ods-pipeline/charts/setup => chart}/templates/configmap-bitbucket.yaml (100%) rename deploy/{ods-pipeline/charts/setup => chart}/templates/configmap-cluster.yaml (100%) rename deploy/{ods-pipeline/charts/setup => chart}/templates/configmap-nexus.yaml (100%) rename deploy/{ods-pipeline/charts/setup => chart}/templates/configmap-notifications.yaml (100%) rename deploy/{ods-pipeline/charts/setup => chart}/templates/configmap-pipeline.yaml (100%) rename deploy/{ods-pipeline/charts/setup => chart}/templates/deployment.yaml (87%) rename deploy/{ods-pipeline/charts/setup => chart}/templates/service.yaml (100%) rename deploy/{ods-pipeline/charts/tasks/templates/task-ods-finish.yaml => chart/templates/task-finish.yaml} (88%) rename deploy/{ods-pipeline/charts/tasks/templates/task-ods-start.yaml => chart/templates/task-start.yaml} (94%) create mode 100644 deploy/chart/values.kind.yaml create mode 100644 deploy/chart/values.yaml delete mode 100644 deploy/ods-pipeline/Chart.yaml delete mode 100644 deploy/ods-pipeline/charts/setup/Chart.yaml delete mode 100644 deploy/ods-pipeline/charts/setup/templates/configmap-aqua.yaml delete mode 100644 deploy/ods-pipeline/charts/setup/templates/configmap-sonar.yaml delete mode 100644 deploy/ods-pipeline/charts/setup/values.yaml delete mode 100644 deploy/ods-pipeline/charts/tasks/templates/_helpers.tpl delete mode 100644 deploy/ods-pipeline/charts/tasks/templates/_sonar-step.tpl delete mode 100644 deploy/ods-pipeline/charts/tasks/templates/task-ods-build-go.yaml delete mode 100644 deploy/ods-pipeline/charts/tasks/templates/task-ods-build-gradle.yaml delete mode 100644 deploy/ods-pipeline/charts/tasks/templates/task-ods-build-npm.yaml delete mode 100644 deploy/ods-pipeline/charts/tasks/templates/task-ods-build-python.yaml delete mode 100644 deploy/ods-pipeline/charts/tasks/templates/task-ods-deploy-helm.yaml delete mode 100644 deploy/ods-pipeline/charts/tasks/templates/task-ods-package-image.yaml delete mode 100644 deploy/ods-pipeline/charts/tasks/values.docs.yaml delete mode 100644 deploy/ods-pipeline/charts/tasks/values.yaml delete mode 100644 deploy/ods-pipeline/values.kind.yaml delete mode 100644 deploy/ods-pipeline/values.yaml delete mode 100644 internal/kubernetes/services.go delete mode 100644 pkg/exchange/image.go delete mode 100644 pkg/exchange/test.go delete mode 100644 pkg/sonar/client.go delete mode 100644 pkg/sonar/client_test.go delete mode 100644 pkg/sonar/compute_engine.go delete mode 100644 pkg/sonar/compute_engine_test.go delete mode 100644 pkg/sonar/quality_gate.go delete mode 100644 pkg/sonar/quality_gate_test.go delete mode 100644 pkg/sonar/report.go delete mode 100644 pkg/sonar/scan.go delete mode 100644 pkg/sonar/scan_test.go delete mode 100644 tasks/ods-build-go.yaml delete mode 100644 tasks/ods-build-gradle.yaml delete mode 100644 tasks/ods-build-npm.yaml delete mode 100644 tasks/ods-build-python.yaml delete mode 100644 tasks/ods-deploy-helm.yaml delete mode 100644 tasks/ods-finish.yaml delete mode 100644 tasks/ods-package-image.yaml delete mode 100644 tasks/ods-start.yaml create mode 100644 test/e2e/common_test.go create mode 100644 test/e2e/main_test.go rename test/e2e/{e2e_test.go => pipeline_run_test.go} (63%) create mode 100644 test/e2e/task_finish_test.go create mode 100644 test/e2e/task_start_test.go delete mode 100644 test/tasks/common_test.go delete mode 100644 test/tasks/ods-aqua-scan_test.go delete mode 100644 test/tasks/ods-build-go_test.go delete mode 100644 test/tasks/ods-build-gradle_test.go delete mode 100644 test/tasks/ods-build-npm_test.go delete mode 100644 test/tasks/ods-build-python_test.go delete mode 100644 test/tasks/ods-deploy-helm_external_test.go delete mode 100644 test/tasks/ods-deploy-helm_test.go delete mode 100644 test/tasks/ods-finish_test.go delete mode 100644 test/tasks/ods-package-image_test.go delete mode 100644 test/tasks/ods-start_test.go diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 9072872d..23be5dc9 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -11,139 +11,44 @@ env: IMAGE_BASE: ${{ github.repository }} jobs: - build-images: - name: Build ODS images - runs-on: ubuntu-latest - strategy: - fail-fast: true - matrix: - image: ["aqua-scan", "finish", "go-toolset", "gradle-toolset", "helm", "node16-npm-toolset", "node18-npm-toolset", "package-image", "pipeline-manager", "python-toolset", "sonar", "start"] - steps: - - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - with: - driver-opts: | - image=moby/buildkit:master - network=host - - - name: Build image - uses: docker/build-push-action@v3 - with: - context: . - push: false - file: build/package/Dockerfile.${{ matrix.image }} - tags: localhost:5000/ods/ods-${{ matrix.image }}:latest - outputs: type=docker,dest=/tmp/image-ods-${{ matrix.image }}.tar - - - name: Upload artifacts - uses: actions/upload-artifact@v3 - with: - name: buildx-image-ods-${{ matrix.image }} - path: /tmp/image-ods-${{ matrix.image }}.tar - retention-days: 1 - pipeline-tests: name: Tests runs-on: ubuntu-latest - needs: build-images - env: - IMAGES: aqua-scan finish go-toolset gradle-toolset helm node16-npm-toolset node18-npm-toolset package-image pipeline-manager python-toolset sonar start steps: - - - name: Download image artifacts - uses: actions/download-artifact@v3 - with: - path: /tmp - name: Checkout uses: actions/checkout@v3 with: fetch-depth: 0 - - name: Setup KinD cluster with internal registry - working-directory: scripts - run: ./kind-with-registry.sh - - - name: Push images to local registry - run: | - images=(${{ env.IMAGES }}) - for image in ${images[*]} - do - echo "::group::Push ods-$image to local registry" - docker load --input /tmp/buildx-image-ods-$image/image-ods-$image.tar - docker push localhost:5000/ods/ods-$image:latest - if [[ "${{ github.event_name }}" == 'pull_request' ]] - then - docker rmi localhost:5000/ods/ods-$image:latest - fi - echo "::endgroup::" - done - - - name: Delete image tarballs - run: | - rm -rf /tmp/buildx-image-* - - - name: Setup kubectl - uses: azure/setup-kubectl@v3 - id: install - - - name: Install Tekton Core Components - run: make install-tekton-pipelines - - - name: Show disk space - run: df -h - - - name: Spin up Bitbucket container - run: make run-bitbucket - - - name: Spin up Nexus container - run: make run-nexus - - - name: Spin up SonarQube container - run: make run-sonarqube - - - name: Show disk space - run: df -h - - - name: Setup Go 1.19 - uses: actions/setup-go@v3 + name: Setup Go + uses: actions/setup-go@v4 with: - go-version: '1.19' - - - name: Check if docs are up-to-date - run: ./.github/workflows/check-docs.sh + go-version: '1.21' + # - + # name: Check if docs are up-to-date + # run: ./.github/workflows/check-docs.sh - name: Run tests run: | - set -o pipefail - go test -v ./cmd/... | sed ''/PASS/s//$(printf "\033[32mPASS\033[0m")/'' | sed ''/FAIL/s//$(printf "\033[31mFAIL\033[0m")/'' - go test -v ./internal/... | sed ''/PASS/s//$(printf "\033[32mPASS\033[0m")/'' | sed ''/FAIL/s//$(printf "\033[31mFAIL\033[0m")/'' - go test -v ./pkg/... | sed ''/PASS/s//$(printf "\033[32mPASS\033[0m")/'' | sed ''/FAIL/s//$(printf "\033[31mFAIL\033[0m")/'' - go test -timeout 45m -v ./test/tasks/... -always-keep-tmp-workspaces | sed ''/PASS/s//$(printf "\033[32mPASS\033[0m")/'' | sed ''/FAIL/s//$(printf "\033[31mFAIL\033[0m")/'' - go test -timeout 10m -v ./test/e2e/... | sed ''/PASS/s//$(printf "\033[32mPASS\033[0m")/'' | sed ''/FAIL/s//$(printf "\033[31mFAIL\033[0m")/'' - - - name: Log into ghcr.io - if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v1 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Push images to ghcr.io - if: ${{ github.event_name != 'pull_request' }} - run: | - images=(${{ env.IMAGES }}) - for image in ${images[*]} - do - echo "::group::Push ods-$image to ghcr.io" - docker tag localhost:5000/ods/ods-$image:latest ghcr.io/${{ env.IMAGE_BASE }}/ods-$image:latest - docker push ghcr.io/${{ env.IMAGE_BASE }}/ods-$image:latest - echo "::endgroup::" - done + make test + # - + # name: Log into ghcr.io + # if: ${{ github.event_name != 'pull_request' }} + # uses: docker/login-action@v1 + # with: + # registry: ghcr.io + # username: ${{ github.actor }} + # password: ${{ secrets.GITHUB_TOKEN }} + # - + # name: Push images to ghcr.io + # if: ${{ github.event_name != 'pull_request' }} + # run: | + # images=(${{ env.IMAGES }}) + # for image in ${images[*]} + # do + # echo "::group::Push ods-$image to ghcr.io" + # docker tag localhost:5000/ods/ods-$image:latest ghcr.io/${{ env.IMAGE_BASE }}/ods-$image:latest + # docker push ghcr.io/${{ env.IMAGE_BASE }}/ods-$image:latest + # echo "::endgroup::" + # done diff --git a/Makefile b/Makefile index 4056a020..69145004 100644 --- a/Makefile +++ b/Makefile @@ -67,7 +67,7 @@ build-artifact-download-windows: ## Build artifact-download Windows binary. ##@ Testing -test: test-cmd test-internal test-pkg test-tasks test-e2e ## Run complete testsuite. +test: test-cmd test-internal test-pkg test-e2e ## Run complete testsuite. .PHONY: test test-cmd: ## Run testsuite of cmd packages. @@ -82,12 +82,9 @@ test-pkg: ## Run testsuite of public packages. go test -cover ./pkg/... .PHONY: test-pkg -test-tasks: ## Run testsuite of Tekton tasks. - go test -v -count=1 -timeout $${ODS_TESTTIMEOUT:-30m} ./test/tasks/... -.PHONY: test-tasks - -test-e2e: ## Run testsuite of end-to-end pipeline run. - go test -v -count=1 -timeout $${ODS_TESTTIMEOUT:-10m} ./test/e2e/... +test-e2e: ## Run testsuite of tasks and full pipeline run. + go test -v -count=1 -run ^TestPipelineRun ./test/e2e/... + go test -v -count=1 -skip ^TestPipelineRun ./test/e2e/... .PHONY: test-e2e clear-tmp-workspaces: ## Clear temporary workspaces created in testruns. diff --git a/build/package/Dockerfile.finish b/build/images/Dockerfile.finish similarity index 100% rename from build/package/Dockerfile.finish rename to build/images/Dockerfile.finish diff --git a/build/package/Dockerfile.pipeline-manager b/build/images/Dockerfile.pipeline-manager similarity index 100% rename from build/package/Dockerfile.pipeline-manager rename to build/images/Dockerfile.pipeline-manager diff --git a/build/package/Dockerfile.start b/build/images/Dockerfile.start similarity index 100% rename from build/package/Dockerfile.start rename to build/images/Dockerfile.start diff --git a/build/package/Dockerfile.aqua-scan b/build/package/Dockerfile.aqua-scan deleted file mode 100644 index 7dd630fc..00000000 --- a/build/package/Dockerfile.aqua-scan +++ /dev/null @@ -1,27 +0,0 @@ -FROM golang:1.19 as builder - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -USER root -WORKDIR /usr/src/app - -# Build Go binary. -COPY go.mod . -COPY go.sum . -RUN go mod download -COPY cmd cmd -COPY internal internal -COPY pkg pkg -RUN cd cmd/aqua-scan && CGO_ENABLED=0 go build -o /usr/local/bin/ods-aqua-scan - -# Final image -# ubi-micro cannot be used as it misses the ca-certificates package. -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4 - -COPY --from=builder /usr/local/bin/ods-aqua-scan /usr/local/bin/ods-aqua-scan - -# Add scripts -COPY build/package/scripts/download-aqua-scanner.sh /usr/local/bin/download-aqua-scanner - -VOLUME /workspace/source - -USER 1001 diff --git a/build/package/Dockerfile.go-toolset b/build/package/Dockerfile.go-toolset deleted file mode 100644 index e284cc6d..00000000 --- a/build/package/Dockerfile.go-toolset +++ /dev/null @@ -1,34 +0,0 @@ -FROM registry.access.redhat.com/ubi8/go-toolset:1.18 - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -USER root - -ENV GOLANGCI_LINT_VERSION=v1.45.2 \ - GO_JUNIT_REPORT_VERSION=v2.0.0 \ - GOBIN=/usr/local/bin - -RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/$GOLANGCI_LINT_VERSION/install.sh | sh -s -- -b /usr/local/bin $GOLANGCI_LINT_VERSION - -RUN go install github.com/jstemmer/go-junit-report/v2@$GO_JUNIT_REPORT_VERSION - -# Add scripts -COPY build/package/scripts/cache-build.sh /usr/local/bin/cache-build -COPY build/package/scripts/copy-build-if-cached.sh /usr/local/bin/copy-build-if-cached -COPY build/package/scripts/copy-artifacts.sh /usr/local/bin/copy-artifacts -COPY build/package/scripts/build-go.sh /usr/local/bin/build-go -COPY build/package/scripts/supply-sonar-project-properties-default.sh /usr/local/bin/supply-sonar-project-properties-default -RUN chmod +x /usr/local/bin/build-go && \ - chmod +x /usr/local/bin/cache-build && \ - chmod +x /usr/local/bin/copy-build-if-cached && \ - chmod +x /usr/local/bin/copy-artifacts && \ - chmod +x /usr/local/bin/supply-sonar-project-properties-default - -# Add sonar-project.properties -COPY build/package/sonar-project.properties.d/go.properties /usr/local/default-sonar-project.properties - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -USER 1001 diff --git a/build/package/Dockerfile.gradle-toolset b/build/package/Dockerfile.gradle-toolset deleted file mode 100644 index 6bb9a334..00000000 --- a/build/package/Dockerfile.gradle-toolset +++ /dev/null @@ -1,48 +0,0 @@ -FROM registry.access.redhat.com/ubi8/openjdk-17:1.13 - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -ENV GIT_VERSION=2.39 \ - GRADLE_VERSION=7.4.2 \ - GRADLE_USER_HOME=/workspace/source/.ods-cache/deps/gradle - -ARG GRADLE_DOWNLOAD_SHA256=29e49b10984e585d8118b7d0bc452f944e386458df27371b49b4ac1dec4b7fda -ARG GRADLE_WRAPPER_DOWNLOAD_SHA256=29e49b10984e585d8118b7d0bc452f944e386458df27371b49b4ac1dec4b7fda - -USER root - -RUN microdnf install --nodocs git-${GIT_VERSION}* && microdnf clean all - -# Install Gradle -RUN cd /opt && \ - curl -LO https://services.gradle.org/distributions/gradle-${GRADLE_VERSION}-bin.zip && \ - echo "Checking hash of downloaded gradle distribution" && \ - echo "${GRADLE_DOWNLOAD_SHA256} gradle-${GRADLE_VERSION}-bin.zip" | sha256sum -c - && \ - unzip -d /opt/gradle gradle-${GRADLE_VERSION}-bin.zip && \ - ln -s /opt/gradle/gradle-${GRADLE_VERSION}/bin/gradle /usr/local/bin/gradle && \ - rm gradle-${GRADLE_VERSION}-bin.zip && \ - gradle -v && \ - echo "Loading gradle cache with gradlew ${GRADLE_VERSION} distribution" && \ - mkdir -p /tmp/temp-gradle-app && cd /tmp/temp-gradle-app && touch settings.gradle && \ - gradle wrapper --gradle-distribution-sha256-sum ${GRADLE_WRAPPER_DOWNLOAD_SHA256} && ./gradlew -version && \ - chown -R 1001:0 /workspace/source $HOME && \ - chmod -R g=u /workspace/source $HOME - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -# Add scripts -COPY build/package/scripts/cache-build.sh /usr/local/bin/cache-build -COPY build/package/scripts/copy-build-if-cached.sh /usr/local/bin/copy-build-if-cached -COPY build/package/scripts/copy-artifacts.sh /usr/local/bin/copy-artifacts -COPY build/package/scripts/build-gradle.sh /usr/local/bin/build-gradle -COPY build/package/scripts/supply-sonar-project-properties-default.sh /usr/local/bin/supply-sonar-project-properties-default -COPY build/package/scripts/configure-gradle.sh /usr/local/bin/configure-gradle -COPY build/package/scripts/configure-truststore.sh /usr/local/bin/configure-truststore - -# Add sonar-project.properties -COPY build/package/sonar-project.properties.d/gradle.properties /usr/local/default-sonar-project.properties - -USER 1001 diff --git a/build/package/Dockerfile.helm b/build/package/Dockerfile.helm deleted file mode 100644 index 8817f32c..00000000 --- a/build/package/Dockerfile.helm +++ /dev/null @@ -1,73 +0,0 @@ -FROM golang:1.19 as builder - -ARG TARGETARCH - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -USER root -WORKDIR /usr/src/app - -ENV HELM_VERSION=3.5.2 \ - SOPS_VERSION=3.7.1 \ - AGE_VERSION=1.0.0 \ - GOBIN=/usr/local/bin - -# Install Helm. -RUN mkdir -p /tmp/helm \ - && cd /tmp \ - && curl -LO https://get.helm.sh/helm-v${HELM_VERSION}-linux-${TARGETARCH}.tar.gz \ - && tar -zxvf helm-v${HELM_VERSION}-linux-${TARGETARCH}.tar.gz -C /tmp/helm \ - && mv /tmp/helm/linux-${TARGETARCH}/helm /usr/local/bin/helm \ - && chmod a+x /usr/local/bin/helm \ - && helm version \ - && helm env - -# Install sops. -RUN go install go.mozilla.org/sops/v3/cmd/sops@v${SOPS_VERSION} \ - && sops --version - -# Install age. -RUN go install filippo.io/age/cmd/...@v${AGE_VERSION} \ - && age --version - -# Build Go binary. -COPY go.mod . -COPY go.sum . -RUN go mod download -COPY cmd cmd -COPY internal internal -COPY pkg pkg -RUN cd cmd/deploy-helm && CGO_ENABLED=0 go build -o /usr/local/bin/deploy-helm - -# Final image -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.4 - -ENV HELM_PLUGIN_DIFF_VERSION=3.3.2 \ - HELM_PLUGIN_SECRETS_VERSION=3.10.0 \ - HELM_PLUGINS=/usr/local/helm/plugins \ - SKOPEO_VERSION=1.11 \ - TAR_VERSION=1.30 \ - GIT_VERSION=2.39 \ - FINDUTILS_VERSION=4.6 - -# helm-secrets depends on xargs (from GNU findutils) in it's signal handlers, -# c.f. https://github.com/jkroepke/helm-secrets/blob/main/scripts/commands/helm.sh#L34-L36 -RUN microdnf install --nodocs skopeo-${SKOPEO_VERSION}* git-${GIT_VERSION}* tar-${TAR_VERSION}* findutils-${FINDUTILS_VERSION}* && microdnf clean all - -COPY --from=builder /usr/local/bin/deploy-helm /usr/local/bin/deploy-helm -COPY --from=builder /usr/local/bin/helm /usr/local/bin/helm -COPY --from=builder /usr/local/bin/sops /usr/local/bin/sops -COPY --from=builder /usr/local/bin/age /usr/local/bin/age - -RUN mkdir -p $HELM_PLUGINS \ - && HELM_DATA_HOME=${HELM_PLUGINS%/*} helm plugin install https://github.com/databus23/helm-diff --version v${HELM_PLUGIN_DIFF_VERSION} \ - && HELM_DATA_HOME=${HELM_PLUGINS%/*} helm plugin install https://github.com/jkroepke/helm-secrets --version v${HELM_PLUGIN_SECRETS_VERSION} \ - && ls -lah $HELM_PLUGINS \ - && sops --version \ - && age --version - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -USER 1001 diff --git a/build/package/Dockerfile.node16-npm-toolset b/build/package/Dockerfile.node16-npm-toolset deleted file mode 100644 index a38daac9..00000000 --- a/build/package/Dockerfile.node16-npm-toolset +++ /dev/null @@ -1,41 +0,0 @@ -FROM registry.access.redhat.com/ubi8/nodejs-16:1 - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -ENV NPM_CONFIG_PREFIX=$HOME/.npm-global \ - LANG=en_US.UTF-8 \ - LC_ALL=en_US.UTF-8 - -RUN echo node version: $(node --version) && \ - echo npm version: $(npm --version) && \ - echo npx version: $(npx --version) - -WORKDIR /app - -USER root - -RUN mkdir -p /.npm $HOME/.npm-global/lib && \ - chown -R 1001:0 /app /.npm $HOME && \ - chmod -R g=u /app /.npm $HOME - -# Add scripts -COPY build/package/scripts/build-npm.sh /usr/local/bin/build-npm -COPY build/package/scripts/cache-build.sh /usr/local/bin/cache-build -COPY build/package/scripts/copy-build-if-cached.sh /usr/local/bin/copy-build-if-cached -COPY build/package/scripts/copy-artifacts.sh /usr/local/bin/copy-artifacts -COPY build/package/scripts/supply-sonar-project-properties-default.sh /usr/local/bin/supply-sonar-project-properties-default -RUN chmod +x /usr/local/bin/build-npm && \ - chmod +x /usr/local/bin/cache-build && \ - chmod +x /usr/local/bin/copy-build-if-cached && \ - chmod +x /usr/local/bin/copy-artifacts && \ - chmod +x /usr/local/bin/supply-sonar-project-properties-default - -# Add sonar-project.properties -COPY build/package/sonar-project.properties.d/npm.properties /usr/local/default-sonar-project.properties - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -USER 1001 diff --git a/build/package/Dockerfile.node18-npm-toolset b/build/package/Dockerfile.node18-npm-toolset deleted file mode 100644 index 767eb4db..00000000 --- a/build/package/Dockerfile.node18-npm-toolset +++ /dev/null @@ -1,41 +0,0 @@ -FROM registry.access.redhat.com/ubi8/nodejs-18:1 - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -ENV NPM_CONFIG_PREFIX=$HOME/.npm-global \ - LANG=en_US.UTF-8 \ - LC_ALL=en_US.UTF-8 - -RUN echo node version: $(node --version) && \ - echo npm version: $(npm --version) && \ - echo npx version: $(npx --version) - -WORKDIR /app - -USER root - -RUN mkdir -p /.npm $HOME/.npm-global/lib && \ - chown -R 1001:0 /app /.npm $HOME && \ - chmod -R g=u /app /.npm $HOME - -# Add scripts -COPY build/package/scripts/build-npm.sh /usr/local/bin/build-npm -COPY build/package/scripts/cache-build.sh /usr/local/bin/cache-build -COPY build/package/scripts/copy-build-if-cached.sh /usr/local/bin/copy-build-if-cached -COPY build/package/scripts/copy-artifacts.sh /usr/local/bin/copy-artifacts -COPY build/package/scripts/supply-sonar-project-properties-default.sh /usr/local/bin/supply-sonar-project-properties-default -RUN chmod +x /usr/local/bin/build-npm && \ - chmod +x /usr/local/bin/cache-build && \ - chmod +x /usr/local/bin/copy-build-if-cached && \ - chmod +x /usr/local/bin/copy-artifacts && \ - chmod +x /usr/local/bin/supply-sonar-project-properties-default - -# Add sonar-project.properties -COPY build/package/sonar-project.properties.d/npm.properties /usr/local/default-sonar-project.properties - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -USER 1001 diff --git a/build/package/Dockerfile.package-image b/build/package/Dockerfile.package-image deleted file mode 100644 index b0806148..00000000 --- a/build/package/Dockerfile.package-image +++ /dev/null @@ -1,49 +0,0 @@ -FROM golang:1.19 as builder - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -USER root -WORKDIR /usr/src/app - -# Build Go binary. -COPY go.mod . -COPY go.sum . -RUN go mod download -COPY cmd cmd -COPY internal internal -COPY pkg pkg -RUN cd cmd/package-image && CGO_ENABLED=0 go build -o /usr/local/bin/ods-package-image - -# Final image -# Based on https://catalog.redhat.com/software/containers/detail/5dca3d76dd19c71643b226d5?container-tabs=dockerfile. -FROM registry.access.redhat.com/ubi8:8.7 - -ENV BUILDAH_VERSION=1.29 \ - SKOPEO_VERSION=1.11 \ - TRIVY_VERSION=0.36.0 - -COPY --from=builder /usr/local/bin/ods-package-image /usr/local/bin/ods-package-image - -# Don't include container-selinux and remove -# directories used by yum that are just taking -# up space. -RUN useradd build; \ - dnf -y module enable container-tools:rhel8; \ - dnf -y update; dnf -y reinstall shadow-utils; \ - dnf -y install skopeo-${SKOPEO_VERSION}* buildah-${BUILDAH_VERSION}* fuse-overlayfs /etc/containers/storage.conf; \ - rm -rf /var/cache /var/log/dnf* /var/log/yum.* - -# Adjust storage.conf to enable Fuse storage. -RUN sed -i -e 's|^#mount_program|mount_program|g' -e '/additionalimage.*/a "/var/lib/shared",' /etc/containers/storage.conf -RUN mkdir -p /var/lib/shared/overlay-images /var/lib/shared/overlay-layers; touch /var/lib/shared/overlay-images/images.lock; touch /var/lib/shared/overlay-layers/layers.lock - -# Set up environment variables to note that this is -# not starting with usernamespace and default to -# isolate the filesystem with chroot. -ENV _BUILDAH_STARTED_IN_USERNS="" BUILDAH_ISOLATION=chroot - -VOLUME /var/lib/containers -VOLUME /home/build/.local/share/containers -VOLUME /workspace/source - -# Install Trivy -RUN curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin "v${TRIVY_VERSION}" diff --git a/build/package/Dockerfile.python-toolset b/build/package/Dockerfile.python-toolset deleted file mode 100644 index fcc074d6..00000000 --- a/build/package/Dockerfile.python-toolset +++ /dev/null @@ -1,29 +0,0 @@ -FROM registry.access.redhat.com/ubi8/python-39:1 - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] - -RUN pip3 config set global.cert /etc/ssl/certs/ca-bundle.crt - -USER root - -# Add scripts -COPY build/package/scripts/build-python.sh /usr/local/bin/build-python -COPY build/package/scripts/cache-build.sh /usr/local/bin/cache-build -COPY build/package/scripts/copy-build-if-cached.sh /usr/local/bin/copy-build-if-cached -COPY build/package/scripts/copy-artifacts.sh /usr/local/bin/copy-artifacts -COPY build/package/scripts/supply-sonar-project-properties-default.sh /usr/local/bin/supply-sonar-project-properties-default -RUN chmod +x /usr/local/bin/build-python && \ - chmod +x /usr/local/bin/cache-build && \ - chmod +x /usr/local/bin/copy-build-if-cached && \ - chmod +x /usr/local/bin/copy-artifacts && \ - chmod +x /usr/local/bin/supply-sonar-project-properties-default - -# Add sonar-project.properties -COPY build/package/sonar-project.properties.d/python.properties /usr/local/default-sonar-project.properties - -VOLUME /workspace/source -# Ensure that file permissions do not prevent Git checkout into workspace. -# See https://git-scm.com/docs/git-config/#Documentation/git-config.txt-safedirectory. -RUN git config --system --add safe.directory '/workspace/source' - -USER 1001 diff --git a/build/package/Dockerfile.sonar b/build/package/Dockerfile.sonar deleted file mode 100644 index 6c71ce9a..00000000 --- a/build/package/Dockerfile.sonar +++ /dev/null @@ -1,63 +0,0 @@ -FROM golang:1.19 as builder - -SHELL ["/bin/bash", "-o", "pipefail", "-c"] -USER root -WORKDIR /usr/src/app - -ENV SONAR_SCANNER_VERSION=4.8.0.2856 \ - CNES_REPORT_VERSION=4.2.0 - -# Build Go binary. -COPY go.mod . -COPY go.sum . -RUN go mod download -COPY cmd cmd -COPY internal internal -COPY pkg pkg -RUN cd cmd/sonar && CGO_ENABLED=0 go build -o /usr/local/bin/sonar - -# Install Sonar Scanner. -RUN apt-get update && apt-get install -y unzip \ - && cd /tmp \ - && curl -LO https://repo1.maven.org/maven2/org/sonarsource/scanner/cli/sonar-scanner-cli/${SONAR_SCANNER_VERSION}/sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ - && unzip sonar-scanner-cli-${SONAR_SCANNER_VERSION}.zip \ - && mv sonar-scanner-${SONAR_SCANNER_VERSION} /usr/local/sonar-scanner-cli - -# Install CNES report. -RUN cd /tmp \ - && curl -L https://github.com/cnescatlab/sonar-cnes-report/releases/download/${CNES_REPORT_VERSION}/sonar-cnes-report-${CNES_REPORT_VERSION}.jar -o cnesreport.jar \ - && mkdir /usr/local/cnes \ - && mv cnesreport.jar /usr/local/cnes/cnesreport.jar \ - && chmod +x /usr/local/cnes/cnesreport.jar - -# Final image -FROM registry.access.redhat.com/ubi8/nodejs-18:1 - -ENV NPM_CONFIG_PREFIX=$HOME/.npm-global \ - LANG=en_US.UTF-8 \ - LC_ALL=en_US.UTF-8 - -RUN echo id: $(id) && \ - echo node version: $(node --version) && \ - echo npm version: $(npm --version) && \ - echo npx version: $(npx --version) - -ENV SONAR_EDITION="community" \ - JAVA_HOME=/usr/lib/jvm/jre-11 - -USER root -RUN INSTALL_PKGS="java-11-openjdk-headless which" && \ - yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ - rpm -V $INSTALL_PKGS && \ - yum -y clean all --enablerepo='*' - -COPY --from=builder /usr/local/bin/sonar /usr/local/bin/sonar -COPY --from=builder /usr/local/sonar-scanner-cli /usr/local/sonar-scanner-cli -COPY --from=builder /usr/local/cnes/cnesreport.jar /usr/local/cnes/cnesreport.jar -COPY build/package/scripts/configure-truststore.sh /usr/local/bin/configure-truststore - -ENV PATH=/usr/local/sonar-scanner-cli/bin:$PATH - -VOLUME /workspace/source - -USER 1001 diff --git a/build/package/scripts/build-go.sh b/build/package/scripts/build-go.sh deleted file mode 100755 index 4deddf89..00000000 --- a/build/package/scripts/build-go.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/bin/bash -set -eu - -copyLintReport() { - cat golangci-lint-report.txt - mkdir -p "${tmp_artifacts_dir}/lint-reports" - cp golangci-lint-report.txt "${tmp_artifacts_dir}/lint-reports/${ARTIFACT_PREFIX}report.txt" -} - -ENABLE_CGO="false" -GO_OS="" -GO_ARCH="" -OUTPUT_DIR="docker" -WORKING_DIR="." -ARTIFACT_PREFIX="" -PRE_TEST_SCRIPT="" -DEBUG="${DEBUG:-false}" - -while [ "$#" -gt 0 ]; do - case $1 in - - --working-dir) WORKING_DIR="$2"; shift;; - --working-dir=*) WORKING_DIR="${1#*=}";; - - --enable-cgo) ENABLE_CGO="$2"; shift;; - --enable-cgo=*) ENABLE_CGO="${1#*=}";; - - --go-os) GO_OS="$2"; shift;; - --go-os=*) GO_OS="${1#*=}";; - - --go-arch) GO_ARCH="$2"; shift;; - --go-arch=*) GO_ARCH="${1#*=}";; - - --output-dir) OUTPUT_DIR="$2"; shift;; - --output-dir=*) OUTPUT_DIR="${1#*=}";; - - --pre-test-script) PRE_TEST_SCRIPT="$2"; shift;; - --pre-test-script=*) PRE_TEST_SCRIPT="${1#*=}";; - - --debug) DEBUG="$2"; shift;; - --debug=*) DEBUG="${1#*=}";; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -if [ "${DEBUG}" == "true" ]; then - set -x -fi - -ROOT_DIR=$(pwd) -tmp_artifacts_dir="${ROOT_DIR}/.ods/tmp-artifacts" -# tmp_artifacts_dir enables keeping artifacts created by this build -# separate from other builds in the same repo to facilitate caching. -rm -rf "${tmp_artifacts_dir}" -if [ "${WORKING_DIR}" != "." ]; then - cd "${WORKING_DIR}" - ARTIFACT_PREFIX="${WORKING_DIR/\//-}-" -fi - -echo "Working on Go module in $(pwd) ..." - -go version -if [ "${ENABLE_CGO}" = "false" ]; then - export CGO_ENABLED=0 -fi -if [ -n "${GO_OS}" ]; then - export GOOS="${GO_OS}" -fi -if [ -n "${GO_ARCH}" ]; then - export GOARCH="${GO_ARCH}" -fi -export GOMODCACHE="$ROOT_DIR/.ods-cache/deps/gomod" -echo INFO: Using gomodule cache on repo pvc -echo GOMODCACHE="$GOMODCACHE" -df -h "$ROOT_DIR" - -echo "Checking format ..." -# shellcheck disable=SC2046 -unformatted=$(go fmt $(go list ./...)) -if [ -n "${unformatted}" ]; then - echo "Unformatted files:" - echo "${unformatted}" - echo "All files need to be gofmt'd. Please run: gofmt -w ." - exit 1 -fi - -echo "Linting ..." -golangci-lint version -set +e -rm golangci-lint-report.txt &>/dev/null -golangci-lint run > golangci-lint-report.txt -exitcode=$? -set -e -if [ $exitcode == 0 ]; then - echo "OK" > golangci-lint-report.txt - copyLintReport -else - copyLintReport - exit $exitcode -fi - -echo "Testing ..." -if [ -n "${PRE_TEST_SCRIPT}" ]; then - echo "Executing pre-test script ..." - ./"${PRE_TEST_SCRIPT}" -fi -GOPKGS=$(go list ./... | grep -v /vendor) -set +e -rm coverage.out test-results.txt report.xml &>/dev/null -go test -v -coverprofile=coverage.out "$GOPKGS" > test-results.txt 2>&1 -exitcode=$? -set -e -df -h "$ROOT_DIR" -if [ -f test-results.txt ]; then - cat test-results.txt - go-junit-report < test-results.txt > report.xml - mkdir -p "${tmp_artifacts_dir}/xunit-reports" - cp report.xml "${tmp_artifacts_dir}/xunit-reports/${ARTIFACT_PREFIX}report.xml" -else - echo "No test results found" - exit 1 -fi -if [ -f coverage.out ]; then - mkdir -p "${tmp_artifacts_dir}/code-coverage" - cp coverage.out "${tmp_artifacts_dir}/code-coverage/${ARTIFACT_PREFIX}coverage.out" -else - echo "No code coverage found" - exit 1 -fi -if [ $exitcode != 0 ]; then - exit $exitcode -fi -echo "Building ..." -go build -gcflags "all=-trimpath=$(pwd)" -o "${OUTPUT_DIR}/app" diff --git a/build/package/scripts/build-gradle.sh b/build/package/scripts/build-gradle.sh deleted file mode 100755 index 65bdb83d..00000000 --- a/build/package/scripts/build-gradle.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/bash -set -eu - -# the copy commands are based on GNU cp tools -# On a mac `brew install coreutils` gives `g` prefixed cmd line tools such as gcp -# to use these define env variable GNU_CP=gcp before invoking this script. -CP="${GNU_CP:-cp}" - -output_dir="docker" -working_dir="." -artifact_prefix="" -debug="${DEBUG:-false}" -gradle_build_dir="build" -gradle_additional_tasks= -gradle_options= - -while [ "$#" -gt 0 ]; do - case $1 in - - --working-dir) working_dir="$2"; shift;; - --working-dir=*) working_dir="${1#*=}";; - - --output-dir) output_dir="$2"; shift;; - --output-dir=*) output_dir="${1#*=}";; - - --gradle-build-dir) gradle_build_dir="$2"; shift;; - --gradle-build-dir=*) gradle_build_dir="${1#*=}";; - - --gradle-additional-tasks) gradle_additional_tasks="$2"; shift;; - --gradle-additional-tasks=*) gradle_additional_tasks="${1#*=}";; - - # Gradle project properties ref: https://docs.gradle.org/7.4.2/userguide/build_environment.html#sec:gradle_configuration_properties - # Gradle options ref: https://docs.gradle.org/7.4.2/userguide/command_line_interface.html - --gradle-options) gradle_options="$2"; shift;; - --gradle-options=*) gradle_options="${1#*=}";; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -root_dir=$(pwd) -tmp_artifacts_dir="${root_dir}/.ods/tmp-artifacts" -# tmp_artifacts_dir enables keeping artifacts created by this build -# separate from other builds in the same repo to facilitate caching. -rm -rf "${tmp_artifacts_dir}" -if [ "${working_dir}" != "." ]; then - cd "${working_dir}" - artifact_prefix="${working_dir/\//-}-" -fi - -if [ "${debug}" == "true" ]; then - set -x -fi - -echo "Using NEXUS_URL=$NEXUS_URL" -echo "Using GRADLE_OPTS=$GRADLE_OPTS" -echo "Using GRADLE_USER_HOME=$GRADLE_USER_HOME" -mkdir -p "${GRADLE_USER_HOME}" - -configure-gradle - -echo -echo "Working on Gradle project in '${working_dir}'..." -echo -echo "Gradlew version: " -./gradlew -version -echo -echo "Note on build environment variables available:" -echo -echo " ODS_OUTPUT_DIR: this environment variable points to the folder " -echo " that this build expects generated application artifacts to be copied to." -echo " The project gradle script should read this env var to copy all the " -echo " generated application artifacts." -echo -export ODS_OUTPUT_DIR=${output_dir} -echo "Exported env var 'ODS_OUTPUT_DIR' with value '${output_dir}'" -echo -echo "Building (Compile and Test) ..." -# shellcheck disable=SC2086 -./gradlew clean build ${gradle_additional_tasks} ${gradle_options} -echo - -echo "Verifying unit test report was generated ..." -unit_test_result_dir="${gradle_build_dir}/test-results/test" -if [ -d "${unit_test_result_dir}" ]; then - unit_test_artifacts_dir="${tmp_artifacts_dir}/xunit-reports" - mkdir -p "${unit_test_artifacts_dir}" - # Each test class produces its own report file, but they contain a fully qualified class - # name in their file name. Due to that, we do not need to add an artifact prefix to - # distinguish them with reports from other artifacts of the same repo/pipeline build. - "$CP" "${unit_test_result_dir}/"*.xml "${unit_test_artifacts_dir}" -else - echo "Build failed: no unit test results found in ${unit_test_result_dir}" - exit 1 -fi - -echo "Verifying unit test coverage report was generated ..." -coverage_result_dir="${gradle_build_dir}/reports/jacoco/test" -if [ -d "${coverage_result_dir}" ]; then - code_coverage_artifacts_dir="${tmp_artifacts_dir}/code-coverage" - mkdir -p "${code_coverage_artifacts_dir}" - "$CP" "${coverage_result_dir}/jacocoTestReport.xml" "${code_coverage_artifacts_dir}/${artifact_prefix}coverage.xml" -else - echo "Build failed: no unit test coverage report was found in ${coverage_result_dir}" - exit 1 -fi diff --git a/build/package/scripts/build-npm.sh b/build/package/scripts/build-npm.sh deleted file mode 100755 index eb3db36d..00000000 --- a/build/package/scripts/build-npm.sh +++ /dev/null @@ -1,107 +0,0 @@ -#!/bin/bash -set -eu - -urlencode() { - local LC_COLLATE=C - local length="${#1}" - for (( i = 0; i < length; i++ )); do - local c="${1:$i:1}" - case $c in - [a-zA-Z0-9.~_-]) printf '%s' "$c" ;; - *) printf '%%%02X' "'$c" ;; - esac - done -} - -copyLintReport() { - cat eslint-report.txt - mkdir -p "${tmp_artifacts_dir}/lint-reports" - cp eslint-report.txt "${tmp_artifacts_dir}/lint-reports/${ARTIFACT_PREFIX}report.txt" -} - -WORKING_DIR="." -ARTIFACT_PREFIX="" -DEBUG="${DEBUG:-false}" - -while [ "$#" -gt 0 ]; do - case $1 in - - --working-dir) WORKING_DIR="$2"; shift;; - --working-dir=*) WORKING_DIR="${1#*=}";; - - --debug) DEBUG="$2"; shift;; - --debug=*) DEBUG="${1#*=}";; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -if [ "${DEBUG}" == "true" ]; then - set -x -fi - -ROOT_DIR=$(pwd) -tmp_artifacts_dir="${ROOT_DIR}/.ods/tmp-artifacts" -# tmp_artifacts_dir enables keeping artifacts created by this build -# separate from other builds in the same repo to facilitate caching. -rm -rf "${tmp_artifacts_dir}" -if [ "${WORKING_DIR}" != "." ]; then - cd "${WORKING_DIR}" - ARTIFACT_PREFIX="${WORKING_DIR/\//-}-" -fi - -echo "Configuring npm to use Nexus (${NEXUS_URL}) ..." -# Remove the protocol segment from NEXUS_URL -NEXUS_HOST=$(echo "${NEXUS_URL}" | sed -E 's/^\s*.*:\/\///g') -if [ -n "${NEXUS_URL}" ] && [ -n "${NEXUS_USERNAME}" ] && [ -n "${NEXUS_PASSWORD}" ]; then - NEXUS_AUTH="$(urlencode "${NEXUS_USERNAME}"):$(urlencode "${NEXUS_PASSWORD}")" - npm config set registry="$NEXUS_URL"/repository/npmjs/ - npm config set "//${NEXUS_HOST}/repository/npmjs/:_auth"="$(echo -n "$NEXUS_AUTH" | base64)" - npm config set email=no-reply@opendevstack.org - if [ -f /etc/ssl/certs/private-cert.pem ]; then - echo "Configuring private cert ..." - npm config set cafile=/etc/ssl/certs/private-cert.pem - fi -fi; - -echo "package-*.json checks ..." -if [ ! -f package.json ]; then - echo "File package.json not found" - exit 1 -fi -if [ ! -f package-lock.json ]; then - echo "File package-lock.json not found" - exit 1 -fi - -echo "Installing dependencies ..." -npm ci --ignore-scripts - -echo "Linting ..." -set +e -npm run lint > eslint-report.txt -exitcode=$? -set -e - -if [ $exitcode == 0 ]; then - echo "OK" > eslint-report.txt - copyLintReport -else - copyLintReport - exit $exitcode -fi - -echo "Building ..." -npm run build - -echo "Testing ..." -npm run test - -mkdir -p "${tmp_artifacts_dir}/xunit-reports" -cp build/test-results/test/report.xml "${tmp_artifacts_dir}/xunit-reports/${ARTIFACT_PREFIX}report.xml" - -mkdir -p "${tmp_artifacts_dir}/code-coverage" -cp build/coverage/clover.xml "${tmp_artifacts_dir}/code-coverage/${ARTIFACT_PREFIX}clover.xml" - -cp build/coverage/coverage-final.json "${tmp_artifacts_dir}/code-coverage/${ARTIFACT_PREFIX}coverage-final.json" - -cp build/coverage/lcov.info "${tmp_artifacts_dir}/code-coverage/${ARTIFACT_PREFIX}lcov.info" diff --git a/build/package/scripts/build-python.sh b/build/package/scripts/build-python.sh deleted file mode 100755 index ef2c12da..00000000 --- a/build/package/scripts/build-python.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash -set -eu - -urlencode() { - local LC_COLLATE=C - local length="${#1}" - for (( i = 0; i < length; i++ )); do - local c="${1:$i:1}" - case $c in - [a-zA-Z0-9.~_-]) printf '%s' "$c" ;; - *) printf '%%%02X' "'$c" ;; - esac - done -} - -MAX_LINE_LENGTH="120" -WORKING_DIR="." -ARTIFACT_PREFIX="" -PRE_TEST_SCRIPT="" -DEBUG="${DEBUG:-false}" - -while [ "$#" -gt 0 ]; do - case $1 in - - --working-dir) WORKING_DIR="$2"; shift;; - --working-dir=*) WORKING_DIR="${1#*=}";; - - --max-line-length) MAX_LINE_LENGTH="$2"; shift;; - --max-line-length=*) MAX_LINE_LENGTH="${1#*=}";; - - --pre-test-script) PRE_TEST_SCRIPT="$2"; shift;; - --pre-test-script=*) PRE_TEST_SCRIPT="${1#*=}";; - - --debug) DEBUG="$2"; shift;; - --debug=*) DEBUG="${1#*=}";; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -if [ "${DEBUG}" == "true" ]; then - set -x -fi - -ROOT_DIR=$(pwd) -tmp_artifacts_dir="${ROOT_DIR}/.ods/tmp-artifacts" -# tmp_artifacts_dir enables keeping artifacts created by this build -# separate from other builds in the same repo to facilitate caching. -rm -rf "${tmp_artifacts_dir}" -if [ "${WORKING_DIR}" != "." ]; then - cd "${WORKING_DIR}" - ARTIFACT_PREFIX="${WORKING_DIR/\//-}-" -fi - -echo "Configuring pip to use Nexus (${NEXUS_URL}) ..." -# Remove the protocol segment from NEXUS_URL -NEXUS_HOST=$(echo "${NEXUS_URL}" | sed -E 's/^\s*.*:\/\///g') -if [ -n "${NEXUS_HOST}" ] && [ -n "${NEXUS_USERNAME}" ] && [ -n "${NEXUS_PASSWORD}" ]; then - NEXUS_AUTH="$(urlencode "${NEXUS_USERNAME}"):$(urlencode "${NEXUS_PASSWORD}")" - NEXUS_URL_WITH_AUTH="$(echo "${NEXUS_URL}" | sed -E 's/:\/\//:\/\/'"${NEXUS_AUTH}"@'/g')" - pip3 config set global.index-url "${NEXUS_URL_WITH_AUTH}"/repository/pypi-all/simple - pip3 config set global.trusted-host "${NEXUS_HOST}" - pip3 config set global.extra-index-url https://pypi.org/simple -fi; - -echo "Installing test requirements ..." -# shellcheck source=/dev/null -pip install --upgrade pip -pip install -r tests_requirements.txt -pip check - -echo "Linting ..." -mypy src -flake8 --max-line-length="${MAX_LINE_LENGTH}" src - -if [ -n "${PRE_TEST_SCRIPT}" ]; then - echo "Executing pre-test script ..." - ./"${PRE_TEST_SCRIPT}" -fi - -echo "Testing ..." -rm report.xml coverage.xml &>/dev/null || true -PYTHONPATH=src python -m pytest --junitxml=report.xml -o junit_family=xunit2 --cov-report term-missing --cov-report xml:coverage.xml --cov=src -o testpaths=tests - -mkdir -p "${tmp_artifacts_dir}/xunit-reports" -cp report.xml "${tmp_artifacts_dir}/xunit-reports/${ARTIFACT_PREFIX}report.xml" -mkdir -p "${tmp_artifacts_dir}/code-coverage" -cp coverage.xml "${tmp_artifacts_dir}/code-coverage/${ARTIFACT_PREFIX}coverage.xml" diff --git a/build/package/scripts/download-aqua-scanner.sh b/build/package/scripts/download-aqua-scanner.sh deleted file mode 100755 index 29e81300..00000000 --- a/build/package/scripts/download-aqua-scanner.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -set -eu - -md5_bin="${MD5_BIN:-"md5sum"}" -aqua_scanner_url="" -bin_dir=".ods-cache/bin" - -while [ "$#" -gt 0 ]; do - case $1 in - - --bin-dir) bin_dir="$2"; shift;; - --bin-dir=*) bin_dir="${1#*=}";; - - --aqua-scanner-url) aqua_scanner_url="$2"; shift;; - --aqua-scanner-url=*) aqua_scanner_url="${1#*=}";; - - --debug) set -x;; - - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -aqua_scanner_path="${bin_dir}/aquasec" -md5_aqua_scanner_url_path="${bin_dir}/.md5-aquasec" -mkdir -p "${bin_dir}" - -# Optionally install Aqua scanner. -# If the binary already exists and was downloaded from the -# URL given by aqua_scanner_url, skip download. -if [ -n "${aqua_scanner_url}" ] && [ "${aqua_scanner_url}" != "none" ]; then - md5_aqua_scanner_url=$(printf "%s" "${aqua_scanner_url}" | ${md5_bin} | cut -d- -f1) - if [ ! -f "${md5_aqua_scanner_url_path}" ] || [ "${md5_aqua_scanner_url}" != "$(cat "${md5_aqua_scanner_url_path}")" ]; then - echo 'Installing Aqua scanner...' - curl -sSf -L "${aqua_scanner_url}" -o aquasec - mv aquasec "${aqua_scanner_path}" - chmod +x "${aqua_scanner_path}" - echo "${md5_aqua_scanner_url}" > "${md5_aqua_scanner_url_path}" - echo 'Installed Aqua scanner version:' - version_output=$("${aqua_scanner_path}" version) - if [ "${version_output}" = "" ]; then - echo "Downloaded binary is broken. Re-run the task." - rm -rf "${bin_dir}" - exit 1 - fi - fi -fi diff --git a/build/package/scripts/supply-sonar-project-properties-default.sh b/build/package/scripts/supply-sonar-project-properties-default.sh deleted file mode 100755 index 8f35ef4d..00000000 --- a/build/package/scripts/supply-sonar-project-properties-default.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -eu - -working_dir="." - -while [ "$#" -gt 0 ]; do - case $1 in - --working-dir) working_dir="$2"; shift;; - --working-dir=*) working_dir="${1#*=}";; - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -echo "Checking for sonar-project.properties ..." -if [ ! -f "${working_dir}/sonar-project.properties" ]; then - echo "No sonar-project.properties present, using default:" - cat /usr/local/default-sonar-project.properties - cp /usr/local/default-sonar-project.properties "${working_dir}/sonar-project.properties" -fi diff --git a/build/package/sonar-project.properties.d/go.properties b/build/package/sonar-project.properties.d/go.properties deleted file mode 100644 index 88e2422f..00000000 --- a/build/package/sonar-project.properties.d/go.properties +++ /dev/null @@ -1,7 +0,0 @@ -sonar.sources=. -sonar.sourceEncoding=UTF-8 -sonar.exclusions=**/*_test.go,**/vendor/**,**/.ods-cache/** -sonar.tests=. -sonar.test.inclusions=**/*_test.go -sonar.test.exclusions=**/vendor/** -sonar.go.coverage.reportPaths=coverage.out diff --git a/build/package/sonar-project.properties.d/gradle.properties b/build/package/sonar-project.properties.d/gradle.properties deleted file mode 100644 index 024e42a7..00000000 --- a/build/package/sonar-project.properties.d/gradle.properties +++ /dev/null @@ -1,6 +0,0 @@ -sonar.sources=src -sonar.sourceEncoding=UTF-8 -sonar.coverage.jacoco.xmlReportPaths=build/reports/jacoco/test/jacocoTestReport.xml -sonar.java.binaries=build/classes -sonar.java.libraries=docker -sonar.junit.reportPaths=build/test-results/test diff --git a/build/package/sonar-project.properties.d/npm.properties b/build/package/sonar-project.properties.d/npm.properties deleted file mode 100644 index 41292803..00000000 --- a/build/package/sonar-project.properties.d/npm.properties +++ /dev/null @@ -1,5 +0,0 @@ -sonar.sources=src -sonar.sourceEncoding=UTF-8 -sonar.exclusions=**/*.html,**/*.scss,**/*.json,**/*.ico,**/*.svg,**/.ods-cache/** -sonar.coverage.exclusions=**/*.spec.ts,**/*.module.ts,src/environments/** -sonar.javascript.lcov.reportPaths=build/coverage/lcov.info diff --git a/build/package/sonar-project.properties.d/python.properties b/build/package/sonar-project.properties.d/python.properties deleted file mode 100644 index 1178e2b1..00000000 --- a/build/package/sonar-project.properties.d/python.properties +++ /dev/null @@ -1,8 +0,0 @@ -sonar.sources=src -sonar.sourceEncoding=UTF-8 -sonar.exclusions=**/.ods-cache/** -sonar.tests=tests -sonar.test.inclusions=**/*_test.py -sonar.coverage.exclusions=tests/** -sonar.python.xunit.reportPath=report.xml -sonar.python.coverage.reportPaths=coverage.xml diff --git a/cmd/aqua-scan/aqua.go b/cmd/aqua-scan/aqua.go deleted file mode 100644 index 534313b8..00000000 --- a/cmd/aqua-scan/aqua.go +++ /dev/null @@ -1,107 +0,0 @@ -package main - -import ( - "fmt" - "io" - "net/url" - "os" - "path/filepath" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/image" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -const ( - aquasecBin = "./.ods-cache/bin/aquasec" - scanComplianceFailureExitCode = 4 - scanLicenseValidationFailureExitCode = 5 -) - -// aquaScanURL returns an URL to the given aquaImage. -func aquaScanURL(opts options, aquaImage string) (string, error) { - aquaURL, err := url.Parse(opts.aquaURL) - if err != nil { - return "", fmt.Errorf("parse base URL: %w", err) - } - aquaPath := fmt.Sprintf( - "/#/images/%s/%s/vulns", - url.QueryEscape(opts.aquaRegistry), url.QueryEscape(aquaImage), - ) - fullURL, err := aquaURL.Parse(aquaPath) - if err != nil { - return "", fmt.Errorf("parse URL path: %w", err) - } - return fullURL.String(), nil -} - -// aquaScan runs the scan and returns whether there was a policy incompliance or not. -// An error is returned when the scan cannot be started or encounters failures -// unrelated to policy compliance. -func runScan(exe string, args []string, outWriter, errWriter io.Writer) (bool, error) { - // STDERR contains the scan log output, hence we read it before STDOUT. - // STDOUT contains the scan summary (incl. ASCII table). - return command.RunWithSpecialFailureCode( - exe, args, []string{}, outWriter, errWriter, scanComplianceFailureExitCode, - ) -} - -// aquaAssembleScanArgs creates args/flags to pass to the Aqua scanner based on given arguments. -func aquaAssembleScanArgs(opts options, image, htmlReportFile, jsonReportFile string) []string { - return []string{ - "scan", - "--dockerless", "--register", "--text", - fmt.Sprintf("--htmlfile=%s", htmlReportFile), - fmt.Sprintf("--jsonfile=%s", jsonReportFile), - "-w", "/tmp", - fmt.Sprintf("--user=%s", opts.aquaUsername), - fmt.Sprintf("--password=%s", opts.aquaPassword), - fmt.Sprintf("--host=%s", opts.aquaURL), - image, - fmt.Sprintf("--registry=%s", opts.aquaRegistry), - } -} - -// htmlReportFilename returns the HTML report filename for given image. -func htmlReportFilename(iid image.Identity) string { - return fmt.Sprintf("%s.html", iid.ImageStream) -} - -// htmlReportFilename returns the JSON report filename for given image. -func jsonReportFilename(iid image.Identity) string { - return fmt.Sprintf("%s.json", iid.ImageStream) -} - -// reportFilenames returns the list of scan report filenames. -func reportFilenames(iid image.Identity) []string { - return []string{htmlReportFilename(iid), jsonReportFilename(iid)} -} - -// aquaReportsExist checks whether the reports associated with the image name -// exist in the given artifacts path. -func aquaReportsExist(artifactsPath string, iid image.Identity) bool { - d := filepath.Join(artifactsPath, pipelinectxt.AquaScansDir) - for _, f := range reportFilenames(iid) { - if _, err := os.Stat(filepath.Join(d, f)); err != nil { - return false - } - } - return true -} - -// copyAquaReportsToArtifacts copies the Aqua scan reports to the artifacts directory. -func copyAquaReportsToArtifacts(htmlReportFile, jsonReportFile string) error { - if _, err := os.Stat(htmlReportFile); err == nil { - err := pipelinectxt.CopyArtifact(htmlReportFile, pipelinectxt.AquaScansPath) - if err != nil { - return fmt.Errorf("copying HTML report to artifacts failed: %w", err) - } - } - if _, err := os.Stat(jsonReportFile); err == nil { - err := pipelinectxt.CopyArtifact(jsonReportFile, pipelinectxt.AquaScansPath) - if err != nil { - return fmt.Errorf("copying JSON report to artifacts failed: %w", err) - } - } - return nil -} diff --git a/cmd/aqua-scan/aqua_test.go b/cmd/aqua-scan/aqua_test.go deleted file mode 100644 index 09e564b5..00000000 --- a/cmd/aqua-scan/aqua_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package main - -import ( - "bytes" - "strconv" - "testing" -) - -func TestAquaScan(t *testing.T) { - tests := map[string]struct { - cmdExitCode int - wantSuccess bool - wantErr bool - }{ - "scan exits with license validation failure exit code": { - cmdExitCode: scanLicenseValidationFailureExitCode, - wantSuccess: false, - wantErr: true, - }, - "scan exits with compliance failure exit code": { - cmdExitCode: scanComplianceFailureExitCode, - wantSuccess: false, - wantErr: false, - }, - "scan passes": { - cmdExitCode: 0, - wantSuccess: true, - wantErr: false, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - var stdout, stderr bytes.Buffer - success, err := runScan( - "../../test/scripts/exit-with-code.sh", - []string{"", "", strconv.Itoa(tc.cmdExitCode)}, - &stdout, &stderr, - ) - if tc.wantErr && err == nil { - t.Fatal("want err, got none") - } - if !tc.wantErr && err != nil { - t.Fatalf("want no err, got %s", err) - } - if tc.wantSuccess != success { - t.Fatalf("want success=%v, got success=%v", tc.wantSuccess, success) - } - }) - } -} - -func TestAquaScanURL(t *testing.T) { - tests := map[string]struct { - aquaURL string - }{ - "base URL without trailing slash": { - aquaURL: "https://console.example.com", - }, - "base URL with trailing slash": { - aquaURL: "https://console.example.com/", - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - opts := options{aquaURL: tc.aquaURL, aquaRegistry: "ods"} - u, err := aquaScanURL(opts, "foo") - if err != nil { - t.Fatal(err) - } - want := "https://console.example.com/#/images/ods/foo/vulns" - if u != want { - t.Fatalf("want: %s, got: %s", want, u) - } - }) - } -} diff --git a/cmd/aqua-scan/bitbucket.go b/cmd/aqua-scan/bitbucket.go deleted file mode 100644 index f918a52d..00000000 --- a/cmd/aqua-scan/bitbucket.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "fmt" - "time" - - "github.com/opendevstack/ods-pipeline/pkg/bitbucket" - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -// createBitbucketInsightReport attaches a code insight report to the Git commit -// being built in Bitbucket. The code insight report points to the Aqua security scan. -func createBitbucketInsightReport(opts options, aquaScanUrl string, success bool, ctxt *pipelinectxt.ODSContext) error { - var logger logging.LeveledLoggerInterface - if opts.debug { - logger = &logging.LeveledLogger{Level: logging.LevelDebug} - } - bitbucketClient, err := bitbucket.NewClient(&bitbucket.ClientConfig{ - APIToken: opts.bitbucketAccessToken, - BaseURL: opts.bitbucketURL, - Logger: logger, - }) - if err != nil { - return fmt.Errorf("bitbucket client: %w", err) - } - reportKey := "org.opendevstack.aquasec" - scanResult := bitbucket.InsightReportFail - if success { - scanResult = bitbucket.InsightReportPass - } - _, err = bitbucketClient.InsightReportCreate( - ctxt.Project, - ctxt.Repository, - ctxt.GitCommitSHA, - reportKey, - bitbucket.InsightReportCreatePayload{ - Title: "Aqua Security", - Reporter: "OpenDevStack", - CreatedDate: time.Now().Unix(), - Details: "Please visit the following link to review the Aqua Security scan report:", - Result: scanResult, - Data: []bitbucket.InsightReportData{ - { - Title: "Report", - Type: "LINK", - Value: map[string]string{ - "linktext": "Result in Aqua", - "href": aquaScanUrl, - }, - }, - }, - }, - ) - return err -} diff --git a/cmd/aqua-scan/main.go b/cmd/aqua-scan/main.go deleted file mode 100644 index 4997094d..00000000 --- a/cmd/aqua-scan/main.go +++ /dev/null @@ -1,77 +0,0 @@ -package main - -import ( - "flag" - "os" - - "github.com/opendevstack/ods-pipeline/internal/image" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "golang.org/x/exp/slog" -) - -type options struct { - checkoutDir string - imageStream string - imageNamespace string - bitbucketAccessToken string - bitbucketURL string - aquaUsername string - aquaPassword string - aquaURL string - aquaRegistry string - aquasecGate bool - debug bool -} - -type aquaScan struct { - opts options - ctxt *pipelinectxt.ODSContext - imageId image.Identity -} - -var defaultOptions = options{ - checkoutDir: ".", - imageStream: "", - imageNamespace: "", - bitbucketAccessToken: os.Getenv("BITBUCKET_ACCESS_TOKEN"), - bitbucketURL: os.Getenv("BITBUCKET_URL"), - aquaUsername: os.Getenv("AQUA_USERNAME"), - aquaPassword: os.Getenv("AQUA_PASSWORD"), - aquaURL: os.Getenv("AQUA_URL"), - aquaRegistry: os.Getenv("AQUA_REGISTRY"), - aquasecGate: false, - debug: (os.Getenv("DEBUG") == "true"), -} - -func main() { - opts := options{} - flag.StringVar(&opts.checkoutDir, "checkout-dir", defaultOptions.checkoutDir, "Checkout dir") - flag.StringVar(&opts.imageStream, "image-stream", defaultOptions.imageStream, "Image stream") - flag.StringVar(&opts.imageNamespace, "image-namespace", defaultOptions.imageNamespace, "image namespace") - flag.StringVar(&opts.bitbucketAccessToken, "bitbucket-access-token", defaultOptions.bitbucketAccessToken, "bitbucket-access-token") - flag.StringVar(&opts.bitbucketURL, "bitbucket-url", defaultOptions.bitbucketURL, "bitbucket-url") - flag.StringVar(&opts.aquaUsername, "aqua-username", defaultOptions.aquaUsername, "aqua-username") - flag.StringVar(&opts.aquaPassword, "aqua-password", defaultOptions.aquaPassword, "aqua-password") - flag.StringVar(&opts.aquaURL, "aqua-url", defaultOptions.aquaURL, "aqua-url") - flag.StringVar(&opts.aquaRegistry, "aqua-registry", defaultOptions.aquaRegistry, "aqua-registry") - flag.BoolVar(&opts.aquasecGate, "aqua-gate", defaultOptions.aquasecGate, "whether the Aqua security scan needs to pass for the task to succeed") - flag.BoolVar(&opts.debug, "debug", defaultOptions.debug, "debug mode") - flag.Parse() - - logLevel := slog.LevelInfo - if opts.debug { - logLevel = slog.LevelDebug - } - slog.SetDefault(slog.New(slog.HandlerOptions{Level: logLevel}.NewTextHandler(os.Stderr))) - - err := (&aquaScan{opts: opts}).runSteps( - setupContext(), - setImageId(), - skipIfScanArtifactsExist(), - scanImagesWithAqua(), - ) - if err != nil { - slog.Error("step failed", err) - os.Exit(1) - } -} diff --git a/cmd/aqua-scan/skip.go b/cmd/aqua-scan/skip.go deleted file mode 100644 index d404f22e..00000000 --- a/cmd/aqua-scan/skip.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -// skipRemainingSteps is a pseudo error used to indicate that remaining -// steps should be skipped. -type skipRemainingSteps struct { - msg string -} - -func (e *skipRemainingSteps) Error() string { - return e.msg -} diff --git a/cmd/aqua-scan/steps.go b/cmd/aqua-scan/steps.go deleted file mode 100644 index e72d71dd..00000000 --- a/cmd/aqua-scan/steps.go +++ /dev/null @@ -1,97 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/opendevstack/ods-pipeline/internal/image" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "golang.org/x/exp/slog" -) - -type AquaScanStep func(d *aquaScan) (*aquaScan, error) - -func (s *aquaScan) runSteps(steps ...AquaScanStep) error { - var skip *skipRemainingSteps - var err error - for _, step := range steps { - s, err = step(s) - if err != nil { - if errors.As(err, &skip) { - slog.Info(err.Error()) - return nil - } - return err - } - } - return nil -} - -// setupContext creates and ODS context. -func setupContext() AquaScanStep { - return func(s *aquaScan) (*aquaScan, error) { - ctxt := &pipelinectxt.ODSContext{} - err := ctxt.ReadCache(s.opts.checkoutDir) - if err != nil { - return s, fmt.Errorf("read cache: %w", err) - } - s.ctxt = ctxt - - return s, nil - } -} - -func setImageId() AquaScanStep { - return func(p *aquaScan) (*aquaScan, error) { - p.imageId = image.CreateImageIdentity(p.ctxt, p.opts.imageNamespace, p.opts.imageStream) - return p, nil - } -} - -func skipIfScanArtifactsExist() AquaScanStep { - return func(s *aquaScan) (*aquaScan, error) { - if ok := aquaReportsExist(pipelinectxt.AquaScansPath, s.imageId); ok { - return s, &skipRemainingSteps{fmt.Sprintf("aqua scan artifact exists already for %s", s.imageId.ImageStream)} - } - return s, nil - } -} - -// scanImagesWithAqua runs the Aqua scanner over each image artifact. -func scanImagesWithAqua() AquaScanStep { - return func(s *aquaScan) (*aquaScan, error) { - slog.Info("Scanning image with Aqua scanner ...") - aquaImage := s.imageId.NamespaceStreamSha() - htmlReportFile := filepath.Join(s.opts.checkoutDir, htmlReportFilename(s.imageId)) - jsonReportFile := filepath.Join(s.opts.checkoutDir, jsonReportFilename(s.imageId)) - scanArgs := aquaAssembleScanArgs(s.opts, aquaImage, htmlReportFile, jsonReportFile) - scanSuccessful, err := runScan(aquasecBin, scanArgs, os.Stdout, os.Stderr) - if err != nil { - return s, fmt.Errorf("aqua scan: %w", err) - } - - if !scanSuccessful && s.opts.aquasecGate { - return s, errors.New("stopping build as successful Aqua scan is required") - } - - asu, err := aquaScanURL(s.opts, aquaImage) - if err != nil { - return s, fmt.Errorf("aqua scan URL: %w", err) - } - slog.Info("Aqua vulnerability report is at " + asu) - - err = copyAquaReportsToArtifacts(htmlReportFile, jsonReportFile) - if err != nil { - return s, err - } - - slog.Info("Creating Bitbucket code insight report ...") - err = createBitbucketInsightReport(s.opts, asu, scanSuccessful, s.ctxt) - if err != nil { - return s, err - } - return s, nil - } -} diff --git a/cmd/deploy-helm/age.go b/cmd/deploy-helm/age.go deleted file mode 100644 index 67290ba7..00000000 --- a/cmd/deploy-helm/age.go +++ /dev/null @@ -1,25 +0,0 @@ -package main - -import ( - "fmt" - "os" -) - -const ( - // ageKeyFilePath is the path where to store the age-key-secret openshift secret content, - // required by the helm secrets plugin. - ageKeyFilePath = "./key.txt" -) - -func storeAgeKey(ageKeyContent []byte) error { - file, err := os.Create(ageKeyFilePath) - if err != nil { - return fmt.Errorf("create age key file path: %w", err) - } - defer file.Close() - _, err = file.Write(ageKeyContent) - if err != nil { - return fmt.Errorf("write age key: %w", err) - } - return err -} diff --git a/cmd/deploy-helm/helm.go b/cmd/deploy-helm/helm.go deleted file mode 100644 index adc94a6e..00000000 --- a/cmd/deploy-helm/helm.go +++ /dev/null @@ -1,185 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/google/shlex" - "github.com/opendevstack/ods-pipeline/internal/command" - "sigs.k8s.io/yaml" -) - -const ( - // helmDiffDetectedMarker is the message Helm prints when helm-diff is - // configured to exit with a non-zero exit code when drift is detected. - helmDiffDetectedMarker = `Error: identified at least one change, exiting with non-zero exit code (detailed-exitcode parameter enabled)` - - // desiredDiffMessage is the message that should be presented to the user. - desiredDiffMessage = `plugin "diff" identified at least one change` - - // exit code returned from helm-diff when diff is detected. - diffDriftExitCode = 2 - - // exit code returned from helm-diff when there is an error (e.g. invalid resource manifests). - diffGenericExitCode = 1 -) - -type helmChart struct { - Name string `json:"name"` - Version string `json:"version"` -} - -// helmDiff runs the diff and returns whether the Helm release is in sync. -// An error is returned when the diff cannot be started or encounters failures -// unrelated to drift (such as invalid resource manifests). -func (d *deployHelm) helmDiff(args []string, outWriter, errWriter io.Writer) (bool, error) { - return command.RunWithSpecialFailureCode( - d.helmBin, args, []string{ - fmt.Sprintf("SOPS_AGE_KEY_FILE=%s", ageKeyFilePath), - "HELM_DIFF_IGNORE_UNKNOWN_FLAGS=true", // https://github.com/databus23/helm-diff/issues/278 - }, outWriter, errWriter, diffDriftExitCode, - ) -} - -// helmUpgrade runs given Helm command. -func (d *deployHelm) helmUpgrade(args []string, stdout, stderr io.Writer) error { - return command.Run( - d.helmBin, args, []string{fmt.Sprintf("SOPS_AGE_KEY_FILE=%s", ageKeyFilePath)}, stdout, stderr, - ) -} - -// assembleHelmDiffArgs creates a slice of arguments for "helm diff upgrade". -func (d *deployHelm) assembleHelmDiffArgs() ([]string, error) { - helmDiffArgs := []string{ - "--namespace=" + d.releaseNamespace, - "secrets", - "diff", - "upgrade", - "--detailed-exitcode", - "--no-color", - "--normalize-manifests", - } - helmDiffFlags, err := shlex.Split(d.opts.diffFlags) - if err != nil { - return []string{}, fmt.Errorf("parse diff flags (%s): %s", d.opts.diffFlags, err) - } - helmDiffArgs = append(helmDiffArgs, helmDiffFlags...) - commonArgs, err := d.commonHelmUpgradeArgs() - if err != nil { - return []string{}, fmt.Errorf("upgrade args: %w", err) - } - return append(helmDiffArgs, commonArgs...), nil -} - -// assembleHelmDiffArgs creates a slice of arguments for "helm upgrade". -func (d *deployHelm) assembleHelmUpgradeArgs() ([]string, error) { - helmUpgradeArgs := []string{ - "--namespace=" + d.releaseNamespace, - "secrets", - "upgrade", - } - commonArgs, err := d.commonHelmUpgradeArgs() - if err != nil { - return []string{}, fmt.Errorf("upgrade args: %w", err) - } - return append(helmUpgradeArgs, commonArgs...), nil -} - -// commonHelmUpgradeArgs returns arguments common to "helm upgrade" and "helm diff upgrade". -func (d *deployHelm) commonHelmUpgradeArgs() ([]string, error) { - args, err := shlex.Split(d.opts.upgradeFlags) - if err != nil { - return []string{}, fmt.Errorf("parse upgrade flags (%s): %s", d.opts.upgradeFlags, err) - } - if d.opts.debug { - args = append([]string{"--debug"}, args...) - } - if d.targetConfig.APIServer != "" { - args = append( - []string{ - fmt.Sprintf("--kube-apiserver=%s", d.targetConfig.APIServer), - fmt.Sprintf("--kube-token=%s", d.targetConfig.APIToken), - }, - args..., - ) - } - for _, vf := range d.valuesFiles { - args = append(args, fmt.Sprintf("--values=%s", vf)) - } - args = append(args, d.cliValues...) - args = append(args, d.releaseName, d.helmArchive) - return args, nil -} - -// getHelmChart reads given filename into a helmChart struct. -func getHelmChart(filename string) (*helmChart, error) { - y, err := os.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("read chart file: %w", err) - } - - var hc *helmChart - err = yaml.Unmarshal(y, &hc) - if err != nil { - return nil, fmt.Errorf("unmarshal chart: %w", err) - } - return hc, nil -} - -// cleanHelmDiffOutput removes error messages from the given Helm output. -// Those error messages are confusing, because they do not come from an actual -// error, but from detecting drift between desired and current Helm state. -func cleanHelmDiffOutput(out string) string { - if !strings.Contains(out, helmDiffDetectedMarker) { - return out - } - cleanedOut := strings.Replace( - out, helmDiffDetectedMarker, desiredDiffMessage, -1, - ) - r := regexp.MustCompile(`Error: plugin "(diff|secrets)" exited with error[\n]?`) - cleanedOut = r.ReplaceAllString(cleanedOut, "") - r = regexp.MustCompile(`helm.go:81: \[debug\] plugin "(diff|secrets)" exited with error[\n]?`) - cleanedOut = r.ReplaceAllString(cleanedOut, "") - return cleanedOut -} - -// printlnSafeHelmCmd prints all args that do not contain sensitive information. -func printlnSafeHelmCmd(args []string, outWriter io.Writer) { - safeArgs := []string{} - for _, a := range args { - if strings.HasPrefix(a, "--kube-token=") { - safeArgs = append(safeArgs, "--kube-token=***") - } else { - safeArgs = append(safeArgs, a) - } - } - fmt.Fprintln(outWriter, helmBin, strings.Join(safeArgs, " ")) -} - -// packageHelmChart creates a Helm package for given chart. -func packageHelmChart(chartDir, gitCommitSHA string, debug bool) (string, error) { - hc, err := getHelmChart(filepath.Join(chartDir, "Chart.yaml")) - if err != nil { - return "", fmt.Errorf("read chart: %w", err) - } - packageVersion := fmt.Sprintf("%s+%s", hc.Version, gitCommitSHA) - helmPackageArgs := []string{ - "package", - fmt.Sprintf("--app-version=%s", gitCommitSHA), - fmt.Sprintf("--version=%s", packageVersion), - } - if debug { - helmPackageArgs = append(helmPackageArgs, "--debug") - } - err = command.Run(helmBin, append(helmPackageArgs, chartDir), []string{}, os.Stdout, os.Stderr) - if err != nil { - return "", fmt.Errorf("package chart %s: %w", chartDir, err) - } - - helmArchive := fmt.Sprintf("%s-%s.tgz", hc.Name, packageVersion) - return helmArchive, nil -} diff --git a/cmd/deploy-helm/helm_test.go b/cmd/deploy-helm/helm_test.go deleted file mode 100644 index 2a7966ce..00000000 --- a/cmd/deploy-helm/helm_test.go +++ /dev/null @@ -1,299 +0,0 @@ -package main - -import ( - "bytes" - "strconv" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func TestHelmDiff(t *testing.T) { - tests := map[string]struct { - cmdExitCode int - wantInSync bool - wantErr bool - }{ - "diff exits with generic exit code": { - cmdExitCode: diffGenericExitCode, - wantInSync: false, - wantErr: true, - }, - "diff exits with drift exit code": { - cmdExitCode: diffDriftExitCode, - wantInSync: false, - wantErr: false, - }, - "diff passes (no drift)": { - cmdExitCode: 0, - wantInSync: true, - wantErr: false, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - var stdout, stderr bytes.Buffer - d := &deployHelm{helmBin: "../../test/scripts/exit-with-code.sh"} - driftDetected, err := d.helmDiff( - []string{"", "", strconv.Itoa(tc.cmdExitCode)}, - &stdout, &stderr, - ) - if tc.wantErr && err == nil { - t.Fatal("want err, got none") - } - if !tc.wantErr && err != nil { - t.Fatalf("want no err, got %s", err) - } - if tc.wantInSync != driftDetected { - t.Fatalf("want success=%v, got success=%v", tc.wantInSync, driftDetected) - } - }) - } -} - -func TestCleanHelmDiffOutput(t *testing.T) { - tests := map[string]struct { - example string - want string - }{ - "diff detected drift": { - example: `Error: identified at least one change, exiting with non-zero exit code (detailed-exitcode parameter enabled) -Error: plugin "diff" exited with error - -[helm-secrets] Removed: ./chart/secrets.dev.yaml.dec -Error: plugin "secrets" exited with error`, - want: `plugin "diff" identified at least one change - -[helm-secrets] Removed: ./chart/secrets.dev.yaml.dec -`, - }, - "diff detected drift with debug turned on": { - example: `Error: identified at least one change, exiting with non-zero exit code (detailed-exitcode parameter enabled) -Error: plugin "diff" exited with error -helm.go:81: [debug] plugin "diff" exited with error - -[helm-secrets] Removed: ./chart/secrets.dev.yaml.dec -Error: plugin "secrets" exited with error -helm.go:81: [debug] plugin "secrets" exited with error`, - want: `plugin "diff" identified at least one change - -[helm-secrets] Removed: ./chart/secrets.dev.yaml.dec -`, - }, - "diff encounters another error": { - example: `Error: This command needs 2 arguments: release name, chart path - -Use "diff [command] --help" for more information about a command. - -Error: plugin "diff" exited with error`, - want: `Error: This command needs 2 arguments: release name, chart path - -Use "diff [command] --help" for more information about a command. - -Error: plugin "diff" exited with error`, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got := cleanHelmDiffOutput(tc.example) - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Fatalf("output mismatch (-want +got):\n%s", diff) - } - }) - } -} - -func TestAssembleHelmDiffArgs(t *testing.T) { - tests := map[string]struct { - releaseNamespace string - releaseName string - helmArchive string - opts options - valuesFiles []string - cliValues []string - want []string - }{ - "default": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install", debug: true}, - want: []string{"--namespace=a", "secrets", "diff", "upgrade", - "--detailed-exitcode", "--no-color", "--normalize-manifests", "--three-way-merge", "--debug", "--install", - "b", "c"}, - }, - "with no diff flags": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "", upgradeFlags: "--install"}, - want: []string{"--namespace=a", "secrets", "diff", "upgrade", - "--detailed-exitcode", "--no-color", "--normalize-manifests", "--install", - "b", "c"}, - }, - "with values file": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install"}, - valuesFiles: []string{"values.dev.yaml"}, - want: []string{"--namespace=a", "secrets", "diff", "upgrade", - "--detailed-exitcode", "--no-color", "--normalize-manifests", "--three-way-merge", "--install", "--values=values.dev.yaml", - "b", "c"}, - }, - "with CLI values": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install"}, - cliValues: []string{"--set=image.tag=abcdef"}, - want: []string{"--namespace=a", "secrets", "diff", "upgrade", - "--detailed-exitcode", "--no-color", "--normalize-manifests", "--three-way-merge", "--install", "--set=image.tag=abcdef", - "b", "c"}, - }, - "with multiple args": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{ - diffFlags: "--three-way-merge --no-hooks --include-tests", - upgradeFlags: "--install --wait", - }, - valuesFiles: []string{"secrets.yaml", "values.dev.yaml", "secrets.dev.yaml"}, - cliValues: []string{"--set=image.tag=abcdef", "--set=x=y"}, - want: []string{"--namespace=a", "secrets", "diff", "upgrade", - "--detailed-exitcode", "--no-color", "--normalize-manifests", - "--three-way-merge", "--no-hooks", "--include-tests", - "--install", "--wait", - "--values=secrets.yaml", "--values=values.dev.yaml", "--values=secrets.dev.yaml", - "--set=image.tag=abcdef", "--set=x=y", - "b", "c"}, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - d := &deployHelm{ - releaseNamespace: tc.releaseNamespace, - releaseName: tc.releaseName, - helmArchive: tc.helmArchive, - opts: tc.opts, - valuesFiles: tc.valuesFiles, - cliValues: tc.cliValues, - targetConfig: &targetEnvironment{}, - } - got, err := d.assembleHelmDiffArgs() - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Fatalf("args mismatch (-want +got):\n%s", diff) - } - }) - } -} - -func TestAssembleHelmUpgradeArgs(t *testing.T) { - tests := map[string]struct { - releaseNamespace string - releaseName string - helmArchive string - opts options - valuesFiles []string - cliValues []string - want []string - }{ - "default": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install --wait", debug: true}, - want: []string{"--namespace=a", "secrets", "upgrade", - "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", - "--debug", - "--install", "--wait", - "b", "c"}, - }, - "with no upgrade flags": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: ""}, - want: []string{"--namespace=a", "secrets", "upgrade", - "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", - "b", "c"}, - }, - "with values file": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install --wait"}, - valuesFiles: []string{"values.dev.yaml"}, - want: []string{"--namespace=a", "secrets", "upgrade", - "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", - "--install", "--wait", - "--values=values.dev.yaml", - "b", "c"}, - }, - "with CLI values": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install --wait"}, - cliValues: []string{"--set=image.tag=abcdef"}, - want: []string{"--namespace=a", "secrets", "upgrade", - "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", - "--install", "--wait", - "--set=image.tag=abcdef", - "b", "c"}, - }, - "with multiple args": { - releaseNamespace: "a", - releaseName: "b", - helmArchive: "c", - opts: options{diffFlags: "--three-way-merge", upgradeFlags: "--install --atomic"}, - valuesFiles: []string{"secrets.yaml", "values.dev.yaml", "secrets.dev.yaml"}, - cliValues: []string{"--set=image.tag=abcdef", "--set=x=y"}, - want: []string{"--namespace=a", "secrets", "upgrade", - "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", - "--install", "--atomic", - "--values=secrets.yaml", "--values=values.dev.yaml", "--values=secrets.dev.yaml", - "--set=image.tag=abcdef", "--set=x=y", - "b", "c"}, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - d := &deployHelm{ - releaseNamespace: tc.releaseNamespace, - releaseName: tc.releaseName, - helmArchive: tc.helmArchive, - opts: tc.opts, - valuesFiles: tc.valuesFiles, - cliValues: tc.cliValues, - targetConfig: &targetEnvironment{ - APIServer: "https://example.com", - APIToken: "s3cr3t", - }, - } - got, err := d.assembleHelmUpgradeArgs() - if err != nil { - t.Fatal(err) - } - if diff := cmp.Diff(tc.want, got); diff != "" { - t.Fatalf("args mismatch (-want +got):\n%s", diff) - } - }) - } -} - -func TestPrintlnSafeHelmCmd(t *testing.T) { - var stdout bytes.Buffer - printlnSafeHelmCmd([]string{"diff", "upgrade", "--kube-apiserver=https://example.com", "--kube-token=s3cr3t", "--debug"}, &stdout) - want := "helm diff upgrade --kube-apiserver=https://example.com --kube-token=*** --debug" - got := strings.TrimSpace(stdout.String()) - if got != want { - t.Fatalf("want: '%s', got: '%s'", want, got) - } -} diff --git a/cmd/deploy-helm/main.go b/cmd/deploy-helm/main.go deleted file mode 100644 index 48b2c5f4..00000000 --- a/cmd/deploy-helm/main.go +++ /dev/null @@ -1,136 +0,0 @@ -package main - -import ( - "flag" - "io/fs" - "os" - - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "k8s.io/client-go/kubernetes" -) - -const ( - helmBin = "helm" - kubernetesServiceaccountDir = "/var/run/secrets/kubernetes.io/serviceaccount" -) - -type options struct { - // Name of the Secret resource holding the API user credentials. - apiCredentialsSecret string - // API server of the target cluster, including scheme. - apiServer string - // Target K8s namespace (or OpenShift project) to deploy into. - namespace string - // Hostname of the target registry to push images to. - registryHost string - // Location of checkout directory. - checkoutDir string - // Location of Helm chart directory. - chartDir string - // Name of Helm release. - releaseName string - // Flags to pass to `helm diff upgrade` (in addition to default ones and upgrade flags). - diffFlags string - // Flags to pass to `helm upgrade`. - upgradeFlags string - // Name of K8s secret holding the age key. - ageKeySecret string - // Field name within the K8s secret holding the age key. - ageKeySecretField string - // Location of the certificate directory. - certDir string - // Whether to TLS verify the source image registry. - srcRegistryTLSVerify bool - // Whether to perform just a diff without any upgrade. - diffOnly bool - // Whether to enable debug mode. - debug bool -} - -type deployHelm struct { - logger logging.LeveledLoggerInterface - // Name of helm binary. - helmBin string - opts options - releaseName string - releaseNamespace string - targetConfig *targetEnvironment - imageDigests []string - cliValues []string - helmArchive string - valuesFiles []string - clientset *kubernetes.Clientset - subrepos []fs.DirEntry - ctxt *pipelinectxt.ODSContext -} - -var defaultOptions = options{ - checkoutDir: ".", - chartDir: "./chart", - ageKeySecretField: "key.txt", - certDir: defaultCertDir(), - srcRegistryTLSVerify: true, - debug: (os.Getenv("DEBUG") == "true"), -} - -type targetEnvironment struct { - APIServer string - APIToken string - RegistryHost string - RegistryTLSVerify *bool - Namespace string -} - -func main() { - opts := options{} - flag.StringVar(&opts.checkoutDir, "checkout-dir", defaultOptions.checkoutDir, "Checkout dir") - flag.StringVar(&opts.chartDir, "chart-dir", defaultOptions.chartDir, "Chart dir") - flag.StringVar(&opts.releaseName, "release-name", defaultOptions.releaseName, "Name of Helm release") - flag.StringVar(&opts.diffFlags, "diff-flags", defaultOptions.diffFlags, "Flags to pass to `helm diff upgrade` (in addition to default ones and upgrade flags)") - flag.StringVar(&opts.upgradeFlags, "upgrade-flags", defaultOptions.upgradeFlags, "Flags to pass to `helm upgrade`") - flag.StringVar(&opts.ageKeySecret, "age-key-secret", defaultOptions.ageKeySecret, "Name of the secret containing the age key to use for helm-secrets") - flag.StringVar(&opts.ageKeySecretField, "age-key-secret-field", defaultOptions.ageKeySecretField, "Name of the field in the secret holding the age private key") - flag.StringVar(&opts.apiServer, "api-server", defaultOptions.apiServer, "API server of the target cluster, including scheme") - flag.StringVar(&opts.apiCredentialsSecret, "api-credentials-secret", defaultOptions.apiCredentialsSecret, "Name of the Secret resource holding the API user credentials") - flag.StringVar(&opts.registryHost, "registry-host", defaultOptions.registryHost, "Hostname of the target registry to push images to") - flag.StringVar(&opts.namespace, "namespace", defaultOptions.namespace, "Target K8s namespace (or OpenShift project) to deploy into") - flag.StringVar(&opts.certDir, "cert-dir", defaultOptions.certDir, "Use certificates at the specified path to access the registry") - flag.BoolVar(&opts.srcRegistryTLSVerify, "src-registry-tls-verify", defaultOptions.srcRegistryTLSVerify, "TLS verify source registry") - flag.BoolVar(&opts.diffOnly, "diff-only", defaultOptions.diffOnly, "Whether to perform only a diff") - flag.BoolVar(&opts.debug, "debug", defaultOptions.debug, "debug mode") - flag.Parse() - - var logger logging.LeveledLoggerInterface - if opts.debug { - logger = &logging.LeveledLogger{Level: logging.LevelDebug} - } else { - logger = &logging.LeveledLogger{Level: logging.LevelInfo} - } - - err := (&deployHelm{helmBin: helmBin, logger: logger, opts: opts}).runSteps( - setupContext(), - skipOnEmptyNamespace(), - setReleaseTarget(), - detectSubrepos(), - listHelmPlugins(), - packageHelmChartWithSubcharts(), - collectValuesFiles(), - importAgeKey(), - diffHelmRelease(), - detectImageDigests(), - copyImagesIntoReleaseNamespace(), - upgradeHelmRelease(), - ) - if err != nil { - logger.Errorf(err.Error()) - os.Exit(1) - } -} - -func defaultCertDir() string { - if _, err := os.Stat(kubernetesServiceaccountDir); err == nil { - return kubernetesServiceaccountDir - } - return "/etc/containers/certs.d" -} diff --git a/cmd/deploy-helm/skip.go b/cmd/deploy-helm/skip.go deleted file mode 100644 index d404f22e..00000000 --- a/cmd/deploy-helm/skip.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -// skipRemainingSteps is a pseudo error used to indicate that remaining -// steps should be skipped. -type skipRemainingSteps struct { - msg string -} - -func (e *skipRemainingSteps) Error() string { - return e.msg -} diff --git a/cmd/deploy-helm/skopeo.go b/cmd/deploy-helm/skopeo.go deleted file mode 100644 index 67becd69..00000000 --- a/cmd/deploy-helm/skopeo.go +++ /dev/null @@ -1,67 +0,0 @@ -package main - -import ( - "fmt" - "io" - "strings" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/pkg/artifact" -) - -func (d *deployHelm) copyImage(imageArtifact artifact.Image, destRegistryToken string, outWriter, errWriter io.Writer) error { - imageStream := imageArtifact.Name - d.logger.Infof("Copying image %s ...", imageStream) - srcImageURL := imageArtifact.Ref - // If the source registry should be TLS verified, the destination - // should be verified by default as well. - destRegistryTLSVerify := d.opts.srcRegistryTLSVerify - srcRegistryTLSVerify := d.opts.srcRegistryTLSVerify - // TLS verification of the KinD registry is not possible at the moment as - // requests error out with "server gave HTTP response to HTTPS client". - if strings.HasPrefix(imageArtifact.Registry, "kind-registry.kind") { - srcRegistryTLSVerify = false - destRegistryTLSVerify = false - } - if d.targetConfig.RegistryHost != "" && d.targetConfig.RegistryTLSVerify != nil { - destRegistryTLSVerify = *d.targetConfig.RegistryTLSVerify - } - destImageURL := getImageDestURL(d.targetConfig.RegistryHost, d.releaseNamespace, imageArtifact) - d.logger.Infof("Source image: %s", srcImageURL) - d.logger.Infof("Destination image: %s", destImageURL) - // TODO: for QA/PROD deployments we may want to ensure that the SHA - // recorded in Nexus matches the SHA referenced by the Git commit tag. - args := []string{ - "copy", - fmt.Sprintf("--src-tls-verify=%v", srcRegistryTLSVerify), - fmt.Sprintf("--dest-tls-verify=%v", destRegistryTLSVerify), - } - if srcRegistryTLSVerify { - args = append(args, fmt.Sprintf("--src-cert-dir=%v", d.opts.certDir)) - } - if destRegistryTLSVerify { - args = append(args, fmt.Sprintf("--dest-cert-dir=%v", d.opts.certDir)) - } - if destRegistryToken != "" { - args = append(args, "--dest-registry-token", destRegistryToken) - } - if d.opts.debug { - args = append(args, "--debug") - } - args = append( - args, fmt.Sprintf("docker://%s", srcImageURL), fmt.Sprintf("docker://%s", destImageURL), - ) - err := command.Run("skopeo", args, []string{}, outWriter, errWriter) - if err != nil { - return fmt.Errorf("skopeo copy %s: %w", srcImageURL, err) - } - return nil -} - -func getImageDestURL(registryHost, releaseNamespace string, imageArtifact artifact.Image) string { - if registryHost != "" { - return fmt.Sprintf("%s/%s/%s:%s", registryHost, releaseNamespace, imageArtifact.Name, imageArtifact.Tag) - } else { - return strings.Replace(imageArtifact.Ref, "/"+imageArtifact.Repository+"/", "/"+releaseNamespace+"/", -1) - } -} diff --git a/cmd/deploy-helm/steps.go b/cmd/deploy-helm/steps.go deleted file mode 100644 index da4aa674..00000000 --- a/cmd/deploy-helm/steps.go +++ /dev/null @@ -1,388 +0,0 @@ -package main - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "log" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/directory" - "github.com/opendevstack/ods-pipeline/internal/file" - k "github.com/opendevstack/ods-pipeline/internal/kubernetes" - "github.com/opendevstack/ods-pipeline/pkg/artifact" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -const ( - tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" - subchartsDir = "charts" -) - -type DeployStep func(d *deployHelm) (*deployHelm, error) - -func (d *deployHelm) runSteps(steps ...DeployStep) error { - var skip *skipRemainingSteps - var err error - for _, step := range steps { - d, err = step(d) - if err != nil { - if errors.As(err, &skip) { - d.logger.Infof(err.Error()) - return nil - } - return err - } - } - return nil -} - -func setupContext() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - ctxt := &pipelinectxt.ODSContext{} - err := ctxt.ReadCache(d.opts.checkoutDir) - if err != nil { - return d, fmt.Errorf("read cache: %w", err) - } - d.ctxt = ctxt - - clientset, err := k.NewInClusterClientset() - if err != nil { - return d, fmt.Errorf("create Kubernetes clientset: %w", err) - } - d.clientset = clientset - - if d.opts.debug { - if err := directory.ListFiles(d.opts.certDir, os.Stdout); err != nil { - log.Fatal(err) - } - } - return d, nil - } -} - -func skipOnEmptyNamespace() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - if d.opts.namespace == "" { - return d, &skipRemainingSteps{"No namespace given. Skipping deployment ..."} - } - return d, nil - } -} - -func setReleaseTarget() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - // Release name - if d.opts.releaseName != "" { - d.releaseName = d.opts.releaseName - } else { - d.releaseName = d.ctxt.Component - } - d.logger.Infof("Release name: %s", d.releaseName) - - // Target environment configuration - targetConfig := &targetEnvironment{ - APIServer: d.opts.apiServer, - Namespace: d.opts.namespace, - RegistryHost: d.opts.registryHost, - } - if targetConfig.APIServer != "" { - token, err := tokenFromSecret(d.clientset, d.ctxt.Namespace, d.opts.apiCredentialsSecret) - if err != nil { - return d, fmt.Errorf("get API token from secret %s: %w", d.opts.apiCredentialsSecret, err) - } - targetConfig.APIToken = token - } - d.targetConfig = targetConfig - - // Release namespace - d.releaseNamespace = targetConfig.Namespace - pattern := "^[a-z][a-z0-9-]{0,61}[a-z]$" - matched, err := regexp.MatchString(pattern, d.releaseNamespace) - if err != nil || !matched { - return d, fmt.Errorf("release namespace: %s must match %s", d.releaseNamespace, pattern) - } - d.logger.Infof("Release namespace: %s", d.releaseNamespace) - - return d, nil - } -} - -func detectSubrepos() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - subrepos, err := pipelinectxt.DetectSubrepos() - if err != nil { - return d, fmt.Errorf("detect subrepos: %w", err) - } - d.subrepos = subrepos - return d, nil - } -} - -func detectImageDigests() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - digests, err := pipelinectxt.ReadArtifactFilesIncludingSubrepos(pipelinectxt.ImageDigestsPath, d.subrepos) - if err != nil { - return d, fmt.Errorf("collect image digests: %w", err) - } - d.imageDigests = digests - return d, nil - } -} - -func copyImagesIntoReleaseNamespace() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - if len(d.imageDigests) == 0 { - return d, nil - } - // Get destination registry token from secret or file in pod. - var destRegistryToken string - if d.targetConfig.APIToken != "" { - destRegistryToken = d.targetConfig.APIToken - } else { - token, err := getTrimmedFileContent(tokenFile) - if err != nil { - return d, fmt.Errorf("get token from file %s: %w", tokenFile, err) - } - destRegistryToken = token - } - - d.logger.Infof("Copying images into release namespace ...") - for _, artifactFile := range d.imageDigests { - imageArtifact, err := artifact.ReadFromFile(artifactFile) - if err != nil { - return d, fmt.Errorf("read image artifact %s: %w", artifactFile, err) - } - err = d.copyImage(*imageArtifact, destRegistryToken, os.Stdout, os.Stderr) - if err != nil { - return d, fmt.Errorf("copy image %s: %w", imageArtifact.Name, err) - } - } - - return d, nil - } -} - -func listHelmPlugins() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - d.logger.Infof("List Helm plugins...") - helmPluginArgs := []string{"plugin", "list"} - if d.opts.debug { - helmPluginArgs = append(helmPluginArgs, "--debug") - } - err := command.Run(d.helmBin, helmPluginArgs, []string{}, os.Stdout, os.Stderr) - if err != nil { - return d, fmt.Errorf("list Helm plugins: %w", err) - } - return d, nil - } -} - -func packageHelmChartWithSubcharts() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - // Collect values to be set via the CLI. - d.cliValues = []string{ - fmt.Sprintf("--set=image.tag=%s", d.ctxt.GitCommitSHA), - } - - d.logger.Infof("Adding dependencies from subrepos into the %s/ directory ...", subchartsDir) - // Find subcharts - chartsDir := filepath.Join(d.opts.chartDir, subchartsDir) - if _, err := os.Stat(chartsDir); os.IsNotExist(err) { - err = os.Mkdir(chartsDir, 0755) - if err != nil { - return d, fmt.Errorf("create %s: %s", chartsDir, err) - } - } - for _, r := range d.subrepos { - subrepo := filepath.Join(pipelinectxt.SubreposPath, r.Name()) - subchart := filepath.Join(subrepo, d.opts.chartDir) - if _, err := os.Stat(subchart); os.IsNotExist(err) { - d.logger.Infof("no chart in %s", r.Name()) - continue - } - gitCommitSHA, err := getTrimmedFileContent(filepath.Join(subrepo, ".ods", "git-commit-sha")) - if err != nil { - return d, fmt.Errorf("get commit SHA of %s: %w", subrepo, err) - } - hc, err := getHelmChart(filepath.Join(subchart, "Chart.yaml")) - if err != nil { - return d, fmt.Errorf("get Helm chart of %s: %w", subrepo, err) - } - d.cliValues = append(d.cliValues, fmt.Sprintf("--set=%s.image.tag=%s", hc.Name, gitCommitSHA)) - if d.releaseName == d.ctxt.Component { - d.cliValues = append(d.cliValues, fmt.Sprintf("--set=%s.fullnameOverride=%s", hc.Name, hc.Name)) - } - helmArchive, err := packageHelmChart(subchart, gitCommitSHA, d.opts.debug) - if err != nil { - return d, fmt.Errorf("package Helm chart of %s: %w", subrepo, err) - } - helmArchiveName := filepath.Base(helmArchive) - d.logger.Infof("copying %s into %s", helmArchiveName, chartsDir) - err = file.Copy(helmArchive, filepath.Join(chartsDir, helmArchiveName)) - if err != nil { - return d, fmt.Errorf("copy Helm archive of %s: %w", subrepo, err) - } - } - - subcharts, err := os.ReadDir(chartsDir) - if err != nil { - return d, fmt.Errorf("read %s: %w", chartsDir, err) - } - if len(subcharts) > 0 { - d.logger.Infof("Subcharts in %s:", chartsDir) - for _, sc := range subcharts { - d.logger.Infof(sc.Name()) - } - } - - d.logger.Infof("Packaging Helm chart ...") - helmArchive, err := packageHelmChart(d.opts.chartDir, d.ctxt.GitCommitSHA, d.opts.debug) - if err != nil { - return d, fmt.Errorf("package Helm chart: %w", err) - } - d.helmArchive = helmArchive - return d, nil - } -} - -func collectValuesFiles() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - d.logger.Infof("Collecting Helm values files ...") - d.valuesFiles = []string{} - valuesFilesCandidates := []string{ - fmt.Sprintf("%s/secrets.yaml", d.opts.chartDir), // equivalent values.yaml is added automatically by Helm - fmt.Sprintf("%s/values.%s.yaml", d.opts.chartDir, d.targetConfig.Namespace), - fmt.Sprintf("%s/secrets.%s.yaml", d.opts.chartDir, d.targetConfig.Namespace), - } - for _, vfc := range valuesFilesCandidates { - if _, err := os.Stat(vfc); os.IsNotExist(err) { - d.logger.Infof("%s is not present, skipping.", vfc) - } else { - d.logger.Infof("%s is present, adding.", vfc) - d.valuesFiles = append(d.valuesFiles, vfc) - } - } - return d, nil - } -} - -func importAgeKey() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - if len(d.opts.ageKeySecret) == 0 { - d.logger.Infof("Skipping import of age key for helm-secrets as parameter is not set ...") - return d, nil - } - d.logger.Infof("Storing age key for helm-secrets ...") - secret, err := d.clientset.CoreV1().Secrets(d.ctxt.Namespace).Get( - context.TODO(), d.opts.ageKeySecret, metav1.GetOptions{}, - ) - if err != nil { - d.logger.Infof("No secret %s found, skipping.", d.opts.ageKeySecret) - return d, nil - } - err = storeAgeKey(secret.Data[d.opts.ageKeySecretField]) - if err != nil { - return d, fmt.Errorf("store age key: %w", err) - } - d.logger.Infof("Age key secret %s stored.", d.opts.ageKeySecret) - return d, nil - } -} - -func diffHelmRelease() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - d.logger.Infof("Diffing Helm release against %s...", d.helmArchive) - helmDiffArgs, err := d.assembleHelmDiffArgs() - if err != nil { - return d, fmt.Errorf("assemble helm diff args: %w", err) - } - printlnSafeHelmCmd(helmDiffArgs, os.Stdout) - // helm-dff stderr contains confusing text about "errors" when drift is - // detected, therefore we want to collect and polish it before we print it. - // helm-diff stdout needs to be written into a buffer so that we can both - // print it and store it later as a deployment artifact. - var diffStdoutBuf, diffStderrBuf bytes.Buffer - diffStdoutWriter := io.MultiWriter(os.Stdout, &diffStdoutBuf) - inSync, err := d.helmDiff(helmDiffArgs, diffStdoutWriter, &diffStderrBuf) - fmt.Print(cleanHelmDiffOutput(diffStderrBuf.String())) - if err != nil { - return d, fmt.Errorf("helm diff: %w", err) - } - if d.opts.diffOnly { - return d, &skipRemainingSteps{"Only diff was requested, skipping helm upgrade."} - } - if inSync { - return d, &skipRemainingSteps{"No diff detected, skipping helm upgrade."} - } - - err = writeDeploymentArtifact(diffStdoutBuf.Bytes(), "diff", d.opts.chartDir, d.targetConfig.Namespace) - if err != nil { - return d, fmt.Errorf("write diff artifact: %w", err) - } - return d, nil - } -} - -func upgradeHelmRelease() DeployStep { - return func(d *deployHelm) (*deployHelm, error) { - d.logger.Infof("Upgrading Helm release to %s...", d.helmArchive) - helmUpgradeArgs, err := d.assembleHelmUpgradeArgs() - if err != nil { - return d, fmt.Errorf("assemble helm upgrade args: %w", err) - } - printlnSafeHelmCmd(helmUpgradeArgs, os.Stdout) - var upgradeStdoutBuf bytes.Buffer - upgradeStdoutWriter := io.MultiWriter(os.Stdout, &upgradeStdoutBuf) - err = d.helmUpgrade(helmUpgradeArgs, upgradeStdoutWriter, os.Stderr) - if err != nil { - return d, fmt.Errorf("helm upgrade: %w", err) - } - err = writeDeploymentArtifact(upgradeStdoutBuf.Bytes(), "release", d.opts.chartDir, d.targetConfig.Namespace) - if err != nil { - return d, fmt.Errorf("write release artifact: %w", err) - } - return d, nil - } -} - -func getTrimmedFileContent(filename string) (string, error) { - content, err := os.ReadFile(filename) - if err != nil { - return "", err - } - return strings.TrimSpace(string(content)), nil -} - -func tokenFromSecret(clientset *kubernetes.Clientset, namespace, name string) (string, error) { - secret, err := clientset.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return "", err - } - return string(secret.Data["token"]), nil -} - -func writeDeploymentArtifact(content []byte, filename, chartDir, targetEnv string) error { - err := os.MkdirAll(pipelinectxt.DeploymentsPath, 0755) - if err != nil { - return err - } - f := artifactFilename(filename, chartDir, targetEnv) + ".txt" - return os.WriteFile(filepath.Join(pipelinectxt.DeploymentsPath, f), content, 0644) -} - -func artifactFilename(filename, chartDir, targetEnv string) string { - trimmedChartDir := strings.TrimPrefix(chartDir, "./") - if trimmedChartDir != "chart" { - filename = fmt.Sprintf("%s-%s", strings.Replace(trimmedChartDir, "/", "-", -1), filename) - } - return fmt.Sprintf("%s-%s", filename, targetEnv) -} diff --git a/cmd/deploy-helm/steps_test.go b/cmd/deploy-helm/steps_test.go deleted file mode 100644 index 430fbfe5..00000000 --- a/cmd/deploy-helm/steps_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package main - -import ( - "fmt" - "testing" - - "github.com/opendevstack/ods-pipeline/pkg/artifact" -) - -func TestArtifactFilename(t *testing.T) { - tests := map[string]struct { - filename string - chartDir string - targetEnv string - want string - }{ - "default chart dir": { - filename: "diff", - chartDir: "./chart", - targetEnv: "foo-dev", - want: "diff-foo-dev", - }, - "default chart dir without prefix": { - filename: "diff", - chartDir: "chart", - targetEnv: "dev", - want: "diff-dev", - }, - "other chart dir": { - filename: "diff", - chartDir: "./foo-chart", - targetEnv: "qa", - want: "foo-chart-diff-qa", - }, - "other chart dir without prefix": { - filename: "diff", - chartDir: "bar-chart", - targetEnv: "foo-qa", - want: "bar-chart-diff-foo-qa", - }, - "nested chart dir": { - filename: "diff", - chartDir: "./some/path/chart", - targetEnv: "prod", - want: "some-path-chart-diff-prod", - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got := artifactFilename(tc.filename, tc.chartDir, tc.targetEnv) - if got != tc.want { - t.Fatalf("want: %s, got: %s", tc.want, got) - } - }) - } -} - -func TestGetImageURLs(t *testing.T) { - srcHost := "image-registry.openshift-image-registry.svc:5000" - destHost := "default-route-openshift-image-registry.apps.example.com" - imgArtifact := artifact.Image{ - Ref: fmt.Sprintf("%s/foo/bar:baz", srcHost), - Repository: "foo", Name: "bar", Tag: "baz", - } - tests := map[string]struct { - registryHost string - releaseNamespace string - want string - }{ - "same cluster, same namespace": { - registryHost: "", - releaseNamespace: "foo", - want: fmt.Sprintf("%s/foo/bar:baz", srcHost), - }, - "same cluster, different namespace": { - registryHost: "", - releaseNamespace: "other", - want: fmt.Sprintf("%s/other/bar:baz", srcHost), - }, - "different cluster, same namespace": { - registryHost: destHost, - releaseNamespace: "foo", - want: fmt.Sprintf("%s/foo/bar:baz", destHost), - }, - "different cluster, different namespace": { - registryHost: destHost, - releaseNamespace: "other", - want: fmt.Sprintf("%s/other/bar:baz", destHost), - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - got := getImageDestURL(tc.registryHost, tc.releaseNamespace, imgArtifact) - if got != tc.want { - t.Fatalf("want: %s, got: %s", tc.want, got) - } - }) - } -} diff --git a/cmd/docs/main.go b/cmd/docs/main.go deleted file mode 100644 index 20d17847..00000000 --- a/cmd/docs/main.go +++ /dev/null @@ -1,20 +0,0 @@ -package main - -import ( - "log" - "path/filepath" - - "github.com/opendevstack/ods-pipeline/internal/docs" - "github.com/opendevstack/ods-pipeline/internal/projectpath" -) - -func main() { - err := docs.RenderTasks( - filepath.Join(projectpath.Root, "deploy/ods-pipeline/charts/tasks"), - filepath.Join(projectpath.Root, "docs/tasks/descriptions"), - filepath.Join(projectpath.Root, "docs/tasks"), - ) - if err != nil { - log.Fatal(err) - } -} diff --git a/cmd/package-image/buildah.go b/cmd/package-image/buildah.go deleted file mode 100644 index a74cb1bd..00000000 --- a/cmd/package-image/buildah.go +++ /dev/null @@ -1,157 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "io" - "log" - "net/url" - "path/filepath" - "strings" - - "github.com/google/shlex" - "github.com/opendevstack/ods-pipeline/internal/command" -) - -const ( - buildahBin = "buildah" - buildahWorkdir = "/tmp" -) - -// buildahBuild builds a local image using the Dockerfile and context directory -// given in opts, tagging the resulting image with given tag. -func (p *packageImage) buildahBuild(outWriter, errWriter io.Writer) error { - args, err := p.buildahBuildArgs(p.imageRef()) - if err != nil { - return fmt.Errorf("assemble build args: %w", err) - } - return command.RunInDir(buildahBin, args, []string{}, buildahWorkdir, outWriter, errWriter) -} - -// buildahPush pushes a local image to a OCI formatted directory for trivy image scans. -func (p *packageImage) buildahPushTar(outWriter, errWriter io.Writer) error { - args := []string{ - fmt.Sprintf("--storage-driver=%s", p.opts.storageDriver), - "push", - fmt.Sprintf("--digestfile=%s", tektonResultsImageDigestFile), - } - if p.opts.debug { - args = append(args, "--log-level=debug") - } - args = append(args, p.imageRef(), fmt.Sprintf("oci:%s", filepath.Join(p.opts.checkoutDir, p.imageName()))) - return command.RunInDir(buildahBin, args, []string{}, buildahWorkdir, outWriter, errWriter) -} - -// buildahPush pushes a local image to the given imageRef. -func (p *packageImage) buildahPush(outWriter, errWriter io.Writer) error { - opts := p.opts - extraArgs, err := shlex.Split(opts.buildahPushExtraArgs) - if err != nil { - log.Printf("could not parse extra args (%s): %s", opts.buildahPushExtraArgs, err) - } - args := []string{ - fmt.Sprintf("--storage-driver=%s", opts.storageDriver), - "push", - fmt.Sprintf("--tls-verify=%v", opts.tlsVerify), - fmt.Sprintf("--cert-dir=%s", opts.certDir), - } - args = append(args, extraArgs...) - if opts.debug { - args = append(args, "--log-level=debug") - } - - source := p.imageId.ImageRefWithSha(opts.registry) - destination := fmt.Sprintf("docker://%s", source) - log.Printf("buildah push %s %s", source, destination) - args = append(args, source, destination) - return command.RunInDir(buildahBin, args, []string{}, buildahWorkdir, outWriter, errWriter) -} - -// buildahBuildArgs assembles the args to be passed to buildah based on -// given options and tag. -func (p *packageImage) buildahBuildArgs(tag string) ([]string, error) { - if tag == "" { - return nil, errors.New("tag must not be empty") - } - opts := p.opts - extraArgs, err := shlex.Split(opts.buildahBuildExtraArgs) - if err != nil { - return nil, fmt.Errorf("parse extra args (%s): %w", opts.buildahBuildExtraArgs, err) - } - - absDir, err := filepath.Abs(opts.checkoutDir) - if err != nil { - return nil, fmt.Errorf("abs dir: %w", err) - } - - args := []string{ - fmt.Sprintf("--storage-driver=%s", opts.storageDriver), - "bud", - fmt.Sprintf("--format=%s", opts.format), - fmt.Sprintf("--tls-verify=%v", opts.tlsVerify), - fmt.Sprintf("--cert-dir=%s", opts.certDir), - "--no-cache", - fmt.Sprintf("--file=%s", opts.dockerfile), - fmt.Sprintf("--tag=%s", tag), - } - args = append(args, extraArgs...) - nexusArgs, err := p.nexusBuildArgs() - if err != nil { - return nil, fmt.Errorf("add nexus build args: %w", err) - } - args = append(args, nexusArgs...) - - if opts.debug { - args = append(args, "--log-level=debug") - } - return append(args, filepath.Join(absDir, opts.contextDir)), nil -} - -// nexusBuildArgs computes --build-arg parameters so that the Dockerfile -// can access nexus as determined by the options nexus related -// parameters. -func (p *packageImage) nexusBuildArgs() ([]string, error) { - args := []string{} - opts := p.opts - if strings.TrimSpace(opts.nexusURL) != "" { - nexusUrl, err := url.Parse(opts.nexusURL) - if err != nil { - return nil, fmt.Errorf("could not parse nexus url (%s): %w", opts.nexusURL, err) - } - if nexusUrl.Host == "" { - return nil, fmt.Errorf("could not get host in nexus url (%s)", opts.nexusURL) - } - if opts.nexusUsername != "" { - if opts.nexusPassword == "" { - nexusUrl.User = url.User(opts.nexusUsername) - } else { - nexusUrl.User = url.UserPassword(opts.nexusUsername, opts.nexusPassword) - } - } - nexusAuth := nexusUrl.User.String() // this is encoded as needed. - a := strings.SplitN(nexusAuth, ":", 2) - unEscaped := "" - pwEscaped := "" - if len(a) > 0 { - unEscaped = a[0] - } - if len(a) > 1 { - pwEscaped = a[1] - } - args = []string{ - fmt.Sprintf("--build-arg=nexusUrl=%s", opts.nexusURL), - fmt.Sprintf("--build-arg=nexusUsername=%s", unEscaped), - fmt.Sprintf("--build-arg=nexusPassword=%s", pwEscaped), - fmt.Sprintf("--build-arg=nexusHost=%s", nexusUrl.Host), - } - args = append(args, fmt.Sprintf("--build-arg=nexusAuth=%s", nexusAuth)) - if nexusAuth != "" { - args = append(args, - fmt.Sprintf("--build-arg=nexusUrlWithAuth=%s://%s@%s", nexusUrl.Scheme, nexusAuth, nexusUrl.Host)) - } else { - args = append(args, - fmt.Sprintf("--build-arg=nexusUrlWithAuth=%s", opts.nexusURL)) - } - } - return args, nil -} diff --git a/cmd/package-image/buildah_test.go b/cmd/package-image/buildah_test.go deleted file mode 100644 index 2a626e7f..00000000 --- a/cmd/package-image/buildah_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package main - -import ( - "fmt" - "os" - "path/filepath" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func TestBuildahBuildArgs(t *testing.T) { - basePath, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - dockerDir := filepath.Join(basePath, "docker") - tests := map[string]struct { - opts options - tag string - wantArgs []string - wantErr string - }{ - "with default options": { - opts: defaultOptions, - tag: "foo", - wantArgs: []string{ - "--storage-driver=vfs", "bud", "--format=oci", - "--tls-verify=true", "--cert-dir=/etc/containers/certs.d", - "--no-cache", - "--file=./Dockerfile", "--tag=foo", dockerDir, - }, - }, - "with blank tag": { - opts: defaultOptions, - tag: "", - wantErr: "tag must not be empty", - }, - "with incorrect buildah extra args": { - opts: func(o options) options { o.buildahBuildExtraArgs = "\\"; return o }(defaultOptions), - tag: "foo", - wantErr: "parse extra args (\\): EOF found after escape character", - }, - "with Nexus args": { - opts: func(o options) options { - o.nexusURL = "http://nexus.example.com" - o.nexusUsername = "developer" - o.nexusPassword = "s3cr3t" - return o - }(defaultOptions), - tag: "foo", - wantArgs: []string{ - "--storage-driver=vfs", "bud", "--format=oci", - "--tls-verify=true", "--cert-dir=/etc/containers/certs.d", - "--no-cache", - "--file=./Dockerfile", "--tag=foo", - "--build-arg=nexusUrl=http://nexus.example.com", - "--build-arg=nexusUsername=developer", - "--build-arg=nexusPassword=s3cr3t", - "--build-arg=nexusHost=nexus.example.com", - "--build-arg=nexusAuth=developer:s3cr3t", - "--build-arg=nexusUrlWithAuth=http://developer:s3cr3t@nexus.example.com", - dockerDir, - }, - }, - "with debug on": { - opts: func(o options) options { o.debug = true; return o }(defaultOptions), - tag: "foo", - wantArgs: []string{ - "--storage-driver=vfs", "bud", "--format=oci", - "--tls-verify=true", "--cert-dir=/etc/containers/certs.d", - "--no-cache", - "--file=./Dockerfile", "--tag=foo", "--log-level=debug", dockerDir, - }, - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - p := packageImage{opts: tc.opts} - got, err := p.buildahBuildArgs(tc.tag) - if err != nil { - if tc.wantErr != err.Error() { - t.Fatalf("want err: '%s', got err: %s", tc.wantErr, err) - } - } - if diff := cmp.Diff(tc.wantArgs, got); diff != "" { - t.Fatalf("args mismatch (-want +got):\n%s", diff) - } - }) - } -} - -func TestNexusBuildArgs(t *testing.T) { - tests := map[string]struct { - nexusUrl string - nexusUsername string - nexusPassword string - baNexusUsername string - baNexusPassword string - baNexusHost string - baNexusAuth string - baNexusUrlWithAuth string - }{ - "simple-password": { - nexusUrl: "https://nexus-ods.example.openshiftapps.com", - nexusUsername: "un", - nexusPassword: "pw", - baNexusUsername: "un", - baNexusPassword: "pw", - baNexusHost: "nexus-ods.example.openshiftapps.com", - baNexusAuth: "un:pw", - baNexusUrlWithAuth: "https://un:pw@nexus-ods.example.openshiftapps.com", - }, - "simple-username-only": { - nexusUrl: "https://nexus-ods.example.openshiftapps.com", - nexusUsername: "un", - nexusPassword: "", - baNexusUsername: "un", - baNexusPassword: "", - baNexusHost: "nexus-ods.example.openshiftapps.com", - baNexusAuth: "un", - baNexusUrlWithAuth: "https://un@nexus-ods.example.openshiftapps.com", - }, - "simple-no-auth": { - nexusUrl: "https://nexus-ods.example.openshiftapps.com", - nexusUsername: "", - nexusPassword: "", - baNexusUsername: "", - baNexusPassword: "", - baNexusHost: "nexus-ods.example.openshiftapps.com", - baNexusAuth: "", - baNexusUrlWithAuth: "https://nexus-ods.example.openshiftapps.com", - }, - "complex-password": { - nexusUrl: "https://nexus-ods.example.openshiftapps.com", - nexusUsername: "user: mypw-to-follow", - nexusPassword: "a secret", - baNexusUsername: "user%3A%20mypw-to-follow", - baNexusPassword: "a%20secret", - baNexusHost: "nexus-ods.example.openshiftapps.com", - baNexusAuth: "user%3A%20mypw-to-follow:a%20secret", - baNexusUrlWithAuth: "https://user%3A%20mypw-to-follow:a%20secret@nexus-ods.example.openshiftapps.com", - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - opts := options{ - nexusURL: tc.nexusUrl, - nexusUsername: tc.nexusUsername, - nexusPassword: tc.nexusPassword, - } - p := packageImage{opts: opts} - args, err := p.nexusBuildArgs() - if err != nil { - t.Fatal(err) - } - - expected := []string{ - fmt.Sprintf("--build-arg=nexusUrl=%s", tc.nexusUrl), - fmt.Sprintf("--build-arg=nexusUsername=%s", tc.baNexusUsername), - fmt.Sprintf("--build-arg=nexusPassword=%s", tc.baNexusPassword), - fmt.Sprintf("--build-arg=nexusHost=%s", tc.baNexusHost), - fmt.Sprintf("--build-arg=nexusAuth=%s", tc.baNexusAuth), - fmt.Sprintf("--build-arg=nexusUrlWithAuth=%s", tc.baNexusUrlWithAuth), - } - if diff := cmp.Diff(expected, args); diff != "" { - t.Fatalf("expected (-want +got):\n%s", diff) - } - }) - } -} diff --git a/cmd/package-image/main.go b/cmd/package-image/main.go deleted file mode 100644 index caf3e49a..00000000 --- a/cmd/package-image/main.go +++ /dev/null @@ -1,167 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/opendevstack/ods-pipeline/internal/image" - "github.com/opendevstack/ods-pipeline/pkg/artifact" - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -const ( - kubernetesServiceaccountDir = "/var/run/secrets/kubernetes.io/serviceaccount" - tektonResultsImageDigestFile = "/tekton/results/image-digest" -) - -type options struct { - checkoutDir string - imageStream string - extraTags string - registry string - certDir string - imageNamespace string - tlsVerify bool - storageDriver string - format string - dockerfile string - contextDir string - nexusURL string - nexusUsername string - nexusPassword string - buildahBuildExtraArgs string - buildahPushExtraArgs string - trivySBOMExtraArgs string - debug bool -} - -type packageImage struct { - logger logging.LeveledLoggerInterface - opts options - parsedExtraTags []string - ctxt *pipelinectxt.ODSContext - imageId image.Identity - imageDigest string - sbomFile string -} - -func (p *packageImage) imageName() string { - return p.imageId.StreamSha() -} - -func (p *packageImage) imageNameNoSha() string { - return p.imageId.ImageStream -} - -func (p *packageImage) imageRef() string { - return p.imageId.ImageRefWithSha(p.opts.registry) -} - -func (p *packageImage) artifactImage() artifact.Image { - return p.imageId.ArtifactImage(p.opts.registry, p.imageDigest) -} - -func (p *packageImage) artifactImageForTag(tag string) artifact.Image { - imageExtraTag := p.imageId.Tag(tag) - return imageExtraTag.ArtifactImage(p.opts.registry, p.imageDigest) -} - -var defaultOptions = options{ - checkoutDir: ".", - imageStream: "", - extraTags: "", - registry: "image-registry.openshift-image-registry.svc:5000", - certDir: defaultCertDir(), - imageNamespace: "", - tlsVerify: true, - storageDriver: "vfs", - format: "oci", - dockerfile: "./Dockerfile", - contextDir: "docker", - nexusURL: os.Getenv("NEXUS_URL"), - nexusUsername: os.Getenv("NEXUS_USERNAME"), - nexusPassword: os.Getenv("NEXUS_PASSWORD"), - buildahBuildExtraArgs: "", - buildahPushExtraArgs: "", - trivySBOMExtraArgs: "", - debug: (os.Getenv("DEBUG") == "true"), -} - -func main() { - opts := options{} - flag.StringVar(&opts.checkoutDir, "checkout-dir", defaultOptions.checkoutDir, "Checkout dir") - flag.StringVar(&opts.imageStream, "image-stream", defaultOptions.imageStream, "Image stream") - flag.StringVar(&opts.extraTags, "extra-tags", defaultOptions.extraTags, "Extra tags") - flag.StringVar(&opts.registry, "registry", defaultOptions.registry, "Registry") - flag.StringVar(&opts.certDir, "cert-dir", defaultOptions.certDir, "Use certificates at the specified path to access the registry") - flag.StringVar(&opts.imageNamespace, "image-namespace", defaultOptions.imageNamespace, "image namespace") - flag.BoolVar(&opts.tlsVerify, "tls-verify", defaultOptions.tlsVerify, "TLS verify") - flag.StringVar(&opts.storageDriver, "storage-driver", defaultOptions.storageDriver, "storage driver") - flag.StringVar(&opts.format, "format", defaultOptions.format, "format of the built container, oci or docker") - flag.StringVar(&opts.dockerfile, "dockerfile", defaultOptions.dockerfile, "dockerfile") - flag.StringVar(&opts.contextDir, "context-dir", defaultOptions.contextDir, "contextDir") - flag.StringVar(&opts.nexusURL, "nexus-url", defaultOptions.nexusURL, "Nexus URL") - flag.StringVar(&opts.nexusUsername, "nexus-username", defaultOptions.nexusUsername, "Nexus username") - flag.StringVar(&opts.nexusPassword, "nexus-password", defaultOptions.nexusPassword, "Nexus password") - flag.StringVar(&opts.buildahBuildExtraArgs, "buildah-build-extra-args", defaultOptions.buildahBuildExtraArgs, "extra parameters passed for the build command when building images") - flag.StringVar(&opts.buildahPushExtraArgs, "buildah-push-extra-args", defaultOptions.buildahPushExtraArgs, "extra parameters passed for the push command when pushing images") - flag.StringVar(&opts.trivySBOMExtraArgs, "trivy-sbom-extra-args", defaultOptions.trivySBOMExtraArgs, "extra parameters passed for the trivy command to generate an SBOM") - flag.BoolVar(&opts.debug, "debug", defaultOptions.debug, "debug mode") - flag.Parse() - var logger logging.LeveledLoggerInterface - if opts.debug { - logger = &logging.LeveledLogger{Level: logging.LevelDebug} - } else { - logger = &logging.LeveledLogger{Level: logging.LevelInfo} - } - p := packageImage{logger: logger, opts: opts} - err := (&p).runSteps( - setExtraTags(), - setupContext(), - setImageId(), - skipIfImageArtifactExists(), - buildImageAndGenerateTar(), - generateSBOM(), - pushImage(), - storeArtifact(), - ) - if err != nil { - logger.Errorf(err.Error()) - os.Exit(1) - } - // If skipIfImageArtifactExists skips the remaining runSteps, extra-tags - // still should be processed if their related artifact has not been set. - err = (&p).runSteps(processExtraTags()) - if err != nil { - logger.Errorf(err.Error()) - os.Exit(1) - } -} - -func defaultCertDir() string { - if _, err := os.Stat(kubernetesServiceaccountDir); err == nil { - return kubernetesServiceaccountDir - } - return "/etc/containers/certs.d" -} - -// getImageDigestFromFile reads the image digest from the file written to by buildah. -func getImageDigestFromFile(workingDir string) (string, error) { - content, err := os.ReadFile(tektonResultsImageDigestFile) - if err != nil { - return "", err - } - return strings.TrimSpace(string(content)), nil -} - -// imageArtifactExists checks if image artifact JSON file exists in its artifacts path -func imageArtifactExists(p *packageImage) error { - imageArtifactsDir := filepath.Join(p.opts.checkoutDir, pipelinectxt.ImageDigestsPath) - imageArtifactFilename := fmt.Sprintf("%s.json", p.ctxt.Component) - _, err := os.Stat(filepath.Join(imageArtifactsDir, imageArtifactFilename)) - return err -} diff --git a/cmd/package-image/skip.go b/cmd/package-image/skip.go deleted file mode 100644 index d404f22e..00000000 --- a/cmd/package-image/skip.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -// skipRemainingSteps is a pseudo error used to indicate that remaining -// steps should be skipped. -type skipRemainingSteps struct { - msg string -} - -func (e *skipRemainingSteps) Error() string { - return e.msg -} diff --git a/cmd/package-image/skopeo_tag.go b/cmd/package-image/skopeo_tag.go deleted file mode 100644 index 2e44fea3..00000000 --- a/cmd/package-image/skopeo_tag.go +++ /dev/null @@ -1,45 +0,0 @@ -package main - -import ( - "fmt" - "io" - "strings" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/image" -) - -func (p *packageImage) skopeoTag(idt *image.IdentityWithTag, outWriter, errWriter io.Writer) error { - imageRef := idt.ImageRefWithSha(p.opts.registry) - p.logger.Infof("Tagging image %s with %s", imageRef, idt.Tag) - tlsVerify := p.opts.tlsVerify - // TLS verification of the KinD registry is not possible at the moment as - // requests error out with "server gave HTTP response to HTTPS client". - if strings.HasPrefix(p.opts.registry, "kind-registry.kind") { - tlsVerify = false - } - args := []string{ - "copy", - fmt.Sprintf("--src-tls-verify=%v", tlsVerify), - fmt.Sprintf("--dest-tls-verify=%v", tlsVerify), - } - if tlsVerify { - args = append(args, - fmt.Sprintf("--src-cert-dir=%v", p.opts.certDir), - fmt.Sprintf("--dest-cert-dir=%v", p.opts.certDir)) - } - if p.opts.debug { - args = append(args, "--debug") - } - source := fmt.Sprintf("docker://%s", imageRef) - destination := fmt.Sprintf("docker://%s", idt.ImageRef(p.opts.registry)) - - args = append(args, source, destination) - - p.logger.Infof("skopeo copy %s %s", source, destination) - err := command.Run("skopeo", args, []string{}, outWriter, errWriter) - if err != nil { - return fmt.Errorf("skopeo copy %s to %s: %w", source, destination, err) - } - return nil -} diff --git a/cmd/package-image/steps.go b/cmd/package-image/steps.go deleted file mode 100644 index e85309c9..00000000 --- a/cmd/package-image/steps.go +++ /dev/null @@ -1,187 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/google/shlex" - "github.com/opendevstack/ods-pipeline/internal/directory" - "github.com/opendevstack/ods-pipeline/internal/image" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -type PackageStep func(d *packageImage) (*packageImage, error) - -func (d *packageImage) runSteps(steps ...PackageStep) error { - var skip *skipRemainingSteps - var err error - for _, step := range steps { - d, err = step(d) - if err != nil { - if errors.As(err, &skip) { - d.logger.Infof(err.Error()) - return nil - } - return err - } - } - return nil -} - -func setupContext() PackageStep { - return func(p *packageImage) (*packageImage, error) { - ctxt := &pipelinectxt.ODSContext{} - err := ctxt.ReadCache(p.opts.checkoutDir) - if err != nil { - return p, fmt.Errorf("read cache: %w", err) - } - p.ctxt = ctxt - - if p.opts.debug { - if err := directory.ListFiles(p.opts.certDir, os.Stdout); err != nil { - p.logger.Errorf(err.Error()) - } - } - - // TLS verification of the KinD registry is not possible at the moment as - // requests error out with "server gave HTTP response to HTTPS client". - if strings.HasPrefix(p.opts.registry, "kind-registry.kind") { - p.opts.tlsVerify = false - } - - return p, nil - } -} - -func setExtraTags() PackageStep { - return func(p *packageImage) (*packageImage, error) { - extraTagsSpecified, err := shlex.Split(p.opts.extraTags) - if err != nil { - return p, fmt.Errorf("parse extra tags (%s): %w", p.opts.extraTags, err) - } - p.parsedExtraTags = extraTagsSpecified - return p, nil - } -} - -func setImageId() PackageStep { - return func(p *packageImage) (*packageImage, error) { - p.imageId = image.CreateImageIdentity(p.ctxt, p.opts.imageNamespace, p.opts.imageStream) - return p, nil - } -} - -// skipIfImageArtifactExists informs to skip next steps if ODS image artifact is already in place. -// In future we might want to check all the expected artifacts, that must exist to do skip properly. -func skipIfImageArtifactExists() PackageStep { - return func(p *packageImage) (*packageImage, error) { - fmt.Printf("Checking if image artifact for %s exists already ...\n", p.imageName()) - err := imageArtifactExists(p) - if err == nil { - return p, &skipRemainingSteps{"image artifact exists already"} - } - return p, nil - } -} - -func buildImageAndGenerateTar() PackageStep { - return func(p *packageImage) (*packageImage, error) { - fmt.Printf("Building image %s ...\n", p.imageName()) - err := p.buildahBuild(os.Stdout, os.Stderr) - if err != nil { - return p, fmt.Errorf("buildah bud: %w", err) - } - fmt.Printf("Creating local tar folder for image %s ...\n", p.imageName()) - err = p.buildahPushTar(os.Stdout, os.Stderr) - if err != nil { - return p, fmt.Errorf("buildah push tar: %w", err) - } - d, err := getImageDigestFromFile(p.opts.checkoutDir) - if err != nil { - return p, err - } - p.imageDigest = d - return p, nil - } -} - -func generateSBOM() PackageStep { - return func(p *packageImage) (*packageImage, error) { - fmt.Println("Generating image SBOM with trivy scanner ...") - err := p.generateImageSBOM() - if err != nil { - return p, fmt.Errorf("generate SBOM: %w", err) - } - return p, nil - } -} - -func pushImage() PackageStep { - return func(p *packageImage) (*packageImage, error) { - fmt.Printf("Pushing image %s ...\n", p.imageName()) - err := p.buildahPush(os.Stdout, os.Stderr) - if err != nil { - return p, fmt.Errorf("buildah push: %w", err) - } - return p, nil - } -} - -func storeArtifact() PackageStep { - return func(p *packageImage) (*packageImage, error) { - fmt.Println("Writing image artifact ...") - imageArtifactFilename := fmt.Sprintf("%s.json", p.imageNameNoSha()) - err := pipelinectxt.WriteJsonArtifact(p.artifactImage(), pipelinectxt.ImageDigestsPath, imageArtifactFilename) - if err != nil { - return p, err - } - - fmt.Println("Writing SBOM artifact ...") - err = pipelinectxt.CopyArtifact(p.sbomFile, pipelinectxt.SBOMsPath) - if err != nil { - return p, fmt.Errorf("copy SBOM report to artifacts: %w", err) - } - - return p, nil - } -} - -func processExtraTags() PackageStep { - return func(p *packageImage) (*packageImage, error) { - if len(p.parsedExtraTags) > 0 { - p.logger.Infof("Processing extra tags: %+q", p.parsedExtraTags) - for _, extraTag := range p.parsedExtraTags { - err := imageTagArtifactExists(p, extraTag) - if err == nil { - p.logger.Infof("Artifact exists for tag: %s", extraTag) - continue - } - p.logger.Infof("pushing extra tag: %s", extraTag) - imageExtraTag := p.imageId.Tag(extraTag) - err = p.skopeoTag(&imageExtraTag, os.Stdout, os.Stderr) - if err != nil { - return p, fmt.Errorf("skopeo push failed: %w", err) - } - - p.logger.Infof("Writing image artifact for tag: %s", extraTag) - image := p.artifactImageForTag(extraTag) - filename := fmt.Sprintf("%s-%s.json", p.imageId.ImageStream, extraTag) - err = pipelinectxt.WriteJsonArtifact(image, pipelinectxt.ImageDigestsPath, filename) - if err != nil { - return p, err - } - } - } - return p, nil - } -} - -func imageTagArtifactExists(p *packageImage, tag string) error { - imageArtifactsDir := filepath.Join(p.opts.checkoutDir, pipelinectxt.ImageDigestsPath) - filename := fmt.Sprintf("%s-%s.json", p.imageId.ImageStream, tag) - _, err := os.Stat(filepath.Join(imageArtifactsDir, filename)) - return err -} diff --git a/cmd/package-image/trivy.go b/cmd/package-image/trivy.go deleted file mode 100644 index 5a0a24dd..00000000 --- a/cmd/package-image/trivy.go +++ /dev/null @@ -1,39 +0,0 @@ -package main - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/google/shlex" - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -const ( - trivyBin = "trivy" - trivyWorkdir = "/tmp" -) - -func (p *packageImage) generateImageSBOM() error { - // settle for one format and name until we have use cases for multiple formats (we use spdx format). - // trivy support --formats: table, json, sarif, template, cyclonedx, spdx, spdx-json, github, cosign-vuln (default "table") - // more args for experimentation via extra args - extraArgs, err := shlex.Split(p.opts.trivySBOMExtraArgs) - if err != nil { - p.logger.Errorf("could not parse extra args (%s): %s", p.opts.trivySBOMExtraArgs, err) - } - sbomFilename := fmt.Sprintf("%s.%s", p.imageNameNoSha(), pipelinectxt.SBOMsFormat) - p.sbomFile = filepath.Join(trivyWorkdir, sbomFilename) - args := []string{ - "image", - fmt.Sprintf("--format=%s", pipelinectxt.SBOMsFormat), - fmt.Sprintf("--input=%s", filepath.Join(buildahWorkdir, p.imageNameNoSha())), - fmt.Sprintf("--output=%s", p.sbomFile), - } - if p.opts.debug { - args = append(args, "--debug=true") - } - args = append(args, extraArgs...) - return command.RunInDir(trivyBin, args, []string{}, trivyWorkdir, os.Stdout, os.Stderr) -} diff --git a/cmd/pipeline-manager/main.go b/cmd/pipeline-manager/main.go index 92949082..8699d60a 100644 --- a/cmd/pipeline-manager/main.go +++ b/cmd/pipeline-manager/main.go @@ -16,7 +16,6 @@ import ( tektonClient "github.com/opendevstack/ods-pipeline/internal/tekton" "github.com/opendevstack/ods-pipeline/pkg/bitbucket" "github.com/opendevstack/ods-pipeline/pkg/logging" - tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) const ( @@ -25,9 +24,6 @@ const ( repoBaseEnvVar = "REPO_BASE" tokenEnvVar = "ACCESS_TOKEN" webhookSecretEnvVar = "WEBHOOK_SECRET" - taskKindEnvVar = "ODS_TASK_KIND" - taskKindDefault = "Task" - taskSuffixEnvVar = "ODS_TASK_SUFFIX" storageProvisionerEnvVar = "ODS_STORAGE_PROVISIONER" storageClassNameEnvVar = "ODS_STORAGE_CLASS_NAME" storageClassNameDefault = "standard" @@ -77,10 +73,6 @@ func serve() error { return fmt.Errorf("%s must be set", webhookSecretEnvVar) } - taskKind := readStringFromEnvVar(taskKindEnvVar, taskKindDefault) - - taskSuffix := readStringFromEnvVar(taskSuffixEnvVar, "") - storageProvisioner := readStringFromEnvVar(storageProvisionerEnvVar, "") storageClassName := readStringFromEnvVar(storageClassNameEnvVar, storageClassNameDefault) @@ -152,8 +144,6 @@ func serve() error { TektonClient: tClient, KubernetesClient: kClient, Logger: logger.WithTag("scheduler"), - TaskKind: tekton.TaskKind(taskKind), - TaskSuffix: taskSuffix, StorageConfig: manager.StorageConfig{ Provisioner: storageProvisioner, ClassName: storageClassName, diff --git a/cmd/sonar/main.go b/cmd/sonar/main.go deleted file mode 100644 index 11e2271c..00000000 --- a/cmd/sonar/main.go +++ /dev/null @@ -1,219 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "os" - "path/filepath" - "strings" - "time" - - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" -) - -type options struct { - sonarAuthToken string - sonarURL string - sonarEdition string - workingDir string - rootPath string - qualityGate bool - trustStore string - trustStorePassword string - debug bool -} - -var defaultOptions = options{ - sonarAuthToken: os.Getenv("SONAR_AUTH_TOKEN"), - sonarURL: os.Getenv("SONAR_URL"), - sonarEdition: os.Getenv("SONAR_EDITION"), - workingDir: ".", - qualityGate: false, - trustStore: "${JAVA_HOME}/lib/security/cacerts", - trustStorePassword: "changeit", - debug: (os.Getenv("DEBUG") == "true"), -} - -func main() { - rootPath, err := filepath.Abs(".") - if err != nil { - log.Fatal(err) - } - - opts := options{rootPath: rootPath} - flag.StringVar(&opts.sonarAuthToken, "sonar-auth-token", defaultOptions.sonarAuthToken, "sonar-auth-token") - flag.StringVar(&opts.sonarURL, "sonar-url", defaultOptions.sonarURL, "sonar-url") - flag.StringVar(&opts.sonarEdition, "sonar-edition", defaultOptions.sonarEdition, "sonar-edition") - flag.StringVar(&opts.workingDir, "working-dir", defaultOptions.workingDir, "working directory") - flag.BoolVar(&opts.qualityGate, "quality-gate", defaultOptions.qualityGate, "require quality gate pass") - flag.StringVar(&opts.trustStore, "truststore", defaultOptions.trustStore, "JKS truststore") - flag.StringVar(&opts.trustStorePassword, "truststore-pass", defaultOptions.trustStorePassword, "JKS truststore password") - flag.BoolVar(&opts.debug, "debug", defaultOptions.debug, "debug mode") - flag.Parse() - - var logger logging.LeveledLoggerInterface - if opts.debug { - logger = &logging.LeveledLogger{Level: logging.LevelDebug} - } else { - logger = &logging.LeveledLogger{Level: logging.LevelInfo} - } - - ctxt := &pipelinectxt.ODSContext{} - err = ctxt.ReadCache(".") - if err != nil { - log.Fatal(err) - } - - err = os.Chdir(opts.workingDir) - if err != nil { - log.Fatal(err) - } - - sonarClient, err := sonar.NewClient(&sonar.ClientConfig{ - APIToken: opts.sonarAuthToken, - BaseURL: opts.sonarURL, - ServerEdition: opts.sonarEdition, - TrustStore: opts.trustStore, - TrustStorePassword: opts.trustStorePassword, - Debug: opts.debug, - Logger: logger, - }) - if err != nil { - log.Fatal("sonar client:", err) - } - - err = sonarScan(logger, opts, ctxt, sonarClient) - if err != nil { - log.Fatal(err) - } -} - -func sonarScan( - logger logging.LeveledLoggerInterface, - opts options, - ctxt *pipelinectxt.ODSContext, - sonarClient sonar.ClientInterface) error { - artifactPrefix := "" - if opts.workingDir != "." { - artifactPrefix = strings.Replace(opts.workingDir, "/", "-", -1) + "-" - } - - sonarProject := sonar.ProjectKey(ctxt, artifactPrefix) - - logger.Infof("Scanning with sonar-scanner ...") - var prInfo *sonar.PullRequest - if len(ctxt.PullRequestKey) > 0 && ctxt.PullRequestKey != "0" && len(ctxt.PullRequestBase) > 0 { - logger.Infof("Pull request (ID %s) detected.", ctxt.PullRequestKey) - prInfo = &sonar.PullRequest{ - Key: ctxt.PullRequestKey, - Branch: ctxt.GitRef, - Base: ctxt.PullRequestBase, - } - } - err := sonarClient.Scan( - sonarProject, - ctxt.GitRef, - ctxt.GitCommitSHA, - prInfo, - os.Stdout, - os.Stderr, - ) - if err != nil { - return fmt.Errorf("scan failed: %w", err) - } - - logger.Infof("Wait until compute engine task finishes ...") - err = waitUntilComputeEngineTaskIsSuccessful(logger, sonarClient) - if err != nil { - return fmt.Errorf("background task did not finish successfully: %w", err) - } - - if prInfo == nil { - logger.Infof("Generating reports ...") - err := sonarClient.GenerateReports( - sonarProject, - "OpenDevStack", - ctxt.GitRef, - opts.rootPath, - artifactPrefix, - ) - if err != nil { - logger.Errorf(err.Error()) - os.Exit(1) - } - } else { - logger.Infof("No reports are generated for pull request scans.") - } - - if opts.qualityGate { - logger.Infof("Checking quality gate ...") - qualityGateResult, err := sonarClient.QualityGateGet(sonar.QualityGateGetParams{ - ProjectKey: sonarProject, - Branch: ctxt.GitRef, - PullRequest: ctxt.PullRequestKey, - }) - if err != nil { - return fmt.Errorf("quality gate could not be retrieved: %w", err) - } - err = pipelinectxt.WriteJsonArtifact( - qualityGateResult, - filepath.Join(opts.rootPath, pipelinectxt.SonarAnalysisPath), - fmt.Sprintf("%squality-gate.json", artifactPrefix), - ) - if err != nil { - return fmt.Errorf("quality gate status could not be stored as an artifact: %w", err) - } - actualStatus := qualityGateResult.ProjectStatus.Status - if actualStatus != sonar.QualityGateStatusOk { - return fmt.Errorf( - "quality gate status is '%s', not '%s'", - actualStatus, sonar.QualityGateStatusOk, - ) - } else { - logger.Infof("Quality gate passed.") - } - } - - return nil -} - -// waitUntilComputeEngineTaskIsSuccessful reads the scanner report file and -// extracts the task ID. It then waits until the corresponding background task -// in SonarQube succeeds. If the tasks fails or the timeout is reached, an -// error is returned. -func waitUntilComputeEngineTaskIsSuccessful(logger logging.LeveledLoggerInterface, sonarClient sonar.ClientInterface) error { - reportTaskID, err := sonarClient.ExtractComputeEngineTaskID(sonar.ReportTaskFile) - if err != nil { - return fmt.Errorf("cannot read task ID: %w", err) - } - params := sonar.ComputeEngineTaskGetParams{ID: reportTaskID} - attempts := 8 // allows for over 4min task runtime - sleep := time.Second - for i := 0; i < attempts; i++ { - logger.Infof("Waiting %s before checking task status ...", sleep) - time.Sleep(sleep) - sleep *= 2 - task, err := sonarClient.ComputeEngineTaskGet(params) - if err != nil { - logger.Infof("cannot get status of task: %s", err) - continue - } - switch task.Status { - case sonar.TaskStatusInProgress: - logger.Infof("Background task %s has not finished yet", reportTaskID) - case sonar.TaskStatusPending: - logger.Infof("Background task %s has not started yet", reportTaskID) - case sonar.TaskStatusFailed: - return fmt.Errorf("background task %s has failed", reportTaskID) - case sonar.TaskStatusSuccess: - logger.Infof("Background task %s has finished successfully", reportTaskID) - return nil - default: - logger.Infof("Background task %s has unknown status %s", reportTaskID, task.Status) - } - } - return fmt.Errorf("background task %s did not succeed within timeout", reportTaskID) -} diff --git a/cmd/sonar/main_test.go b/cmd/sonar/main_test.go deleted file mode 100644 index e9ad9d56..00000000 --- a/cmd/sonar/main_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package main - -import ( - "io" - "os" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" -) - -type fakeClient struct { - scanPerformed bool - passQualityGate bool - qualityGateRetrieved bool - reportGenerated bool -} - -func (c *fakeClient) Scan(sonarProject, branch, commit string, pr *sonar.PullRequest, outWriter, errWriter io.Writer) error { - c.scanPerformed = true - return nil -} - -func (c *fakeClient) QualityGateGet(p sonar.QualityGateGetParams) (*sonar.QualityGate, error) { - c.qualityGateRetrieved = true - status := sonar.QualityGateStatusError - if c.passQualityGate { - status = sonar.QualityGateStatusOk - } - return &sonar.QualityGate{ProjectStatus: sonar.QualityGateProjectStatus{Status: status}}, nil -} - -func (c *fakeClient) GenerateReports(sonarProject, author, branch, rootPath, artifactPrefix string) error { - c.reportGenerated = true - return nil -} - -func (c *fakeClient) ExtractComputeEngineTaskID(filename string) (string, error) { - return "abc123", nil -} - -func (c *fakeClient) ComputeEngineTaskGet(params sonar.ComputeEngineTaskGetParams) (*sonar.ComputeEngineTask, error) { - return &sonar.ComputeEngineTask{Status: sonar.TaskStatusSuccess}, nil -} - -func TestSonarScan(t *testing.T) { - logger := &logging.LeveledLogger{Level: logging.LevelDebug} - - tests := map[string]struct { - // which SQ edition is in use - optSonarEdition string - // whether quality gate is required to pass - optQualityGate bool - - // PR key - ctxtPrKey string - // PR base - ctxtPrBase string - - // whether the quality gate in SQ passes (faked) - passQualityGate bool - - // whether scan should have been performed - wantScanPerformed bool - // whether report should have been generated - wantReportGenerated bool - // whether quality gate should have been retrieved - wantQualityGateRetrieved bool - // whether scanning should fail - if not empty, the actual error message - // will be checked to contain wantErr. - wantErr string - }{ - "developer edition generates report when no PR is present": { - optSonarEdition: "developer", - optQualityGate: true, - ctxtPrKey: "", - ctxtPrBase: "", - passQualityGate: true, - wantScanPerformed: true, - wantReportGenerated: true, - wantQualityGateRetrieved: true, - }, - "developer edition does not generate report when PR is present": { - optSonarEdition: "developer", - optQualityGate: true, - ctxtPrKey: "3", - ctxtPrBase: "master", - passQualityGate: true, - wantScanPerformed: true, - wantReportGenerated: false, - wantQualityGateRetrieved: true, - }, - "community edition generates report": { - optSonarEdition: "community", - optQualityGate: true, - ctxtPrKey: "", - ctxtPrBase: "", - passQualityGate: true, - wantScanPerformed: true, - wantReportGenerated: true, - wantQualityGateRetrieved: true, - }, - "does not check quality gate if disabled": { - optSonarEdition: "community", - optQualityGate: false, - ctxtPrKey: "", - ctxtPrBase: "", - passQualityGate: true, - wantScanPerformed: true, - wantReportGenerated: true, - wantQualityGateRetrieved: false, - }, - "fails if quality gate does not pass": { - optSonarEdition: "community", - optQualityGate: true, - ctxtPrKey: "", - ctxtPrBase: "", - passQualityGate: false, - wantScanPerformed: true, - wantReportGenerated: true, - wantQualityGateRetrieved: true, - wantErr: "quality gate status is 'ERROR', not 'OK'", - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - tempDir, err := os.MkdirTemp(".", "test-cmd-sonar-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempDir) - opts := options{ - sonarEdition: tc.optSonarEdition, - qualityGate: tc.optQualityGate, - workingDir: ".", - rootPath: tempDir, - } - ctxt := &pipelinectxt.ODSContext{PullRequestKey: tc.ctxtPrKey, PullRequestBase: tc.ctxtPrBase} - sonarClient := &fakeClient{passQualityGate: tc.passQualityGate} - err = sonarScan(logger, opts, ctxt, sonarClient) - if err != nil { - if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) { - t.Fatalf("want err to contain: %s, got err: %s", tc.wantErr, err) - } - } - if sonarClient.scanPerformed != tc.wantScanPerformed { - t.Fatalf("want scan performed to be %v, got %v", tc.wantScanPerformed, sonarClient.scanPerformed) - } - if sonarClient.reportGenerated != tc.wantReportGenerated { - t.Fatalf("want report generated to be %v, got %v", tc.wantReportGenerated, sonarClient.reportGenerated) - } - if sonarClient.qualityGateRetrieved != tc.wantQualityGateRetrieved { - t.Fatalf("want quality gate retrieved to be %v, got %v", tc.wantQualityGateRetrieved, sonarClient.qualityGateRetrieved) - } - }) - } - -} diff --git a/cmd/tasks/main.go b/cmd/tasks/main.go deleted file mode 100644 index f974f692..00000000 --- a/cmd/tasks/main.go +++ /dev/null @@ -1,19 +0,0 @@ -package main - -import ( - "log" - "path/filepath" - - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/internal/tasks" -) - -func main() { - err := tasks.Render( - filepath.Join(projectpath.Root, "deploy/ods-pipeline/charts/tasks"), - filepath.Join(projectpath.Root, "tasks"), - ) - if err != nil { - log.Fatal(err) - } -} diff --git a/deploy/ods-pipeline/.gitignore b/deploy/chart/.gitignore similarity index 100% rename from deploy/ods-pipeline/.gitignore rename to deploy/chart/.gitignore diff --git a/deploy/ods-pipeline/charts/tasks/Chart.yaml b/deploy/chart/Chart.yaml similarity index 92% rename from deploy/ods-pipeline/charts/tasks/Chart.yaml rename to deploy/chart/Chart.yaml index 5fbe09cb..e8197bba 100644 --- a/deploy/ods-pipeline/charts/tasks/Chart.yaml +++ b/deploy/chart/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -name: tasks -description: A Helm chart to setup ODS pipeline tasks +name: ods-pipeline +description: ODS Pipeline # A chart can be either an 'application' or a 'library' chart. # @@ -20,4 +20,5 @@ version: 0.13.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. appVersion: "0.13.2" diff --git a/deploy/ods-pipeline/charts/setup/templates/_helpers.tpl b/deploy/chart/templates/_helpers.tpl similarity index 84% rename from deploy/ods-pipeline/charts/setup/templates/_helpers.tpl rename to deploy/chart/templates/_helpers.tpl index b1c875bc..7ba5edc2 100644 --- a/deploy/ods-pipeline/charts/setup/templates/_helpers.tpl +++ b/deploy/chart/templates/_helpers.tpl @@ -60,15 +60,3 @@ Create the name of the service account to use {{- default "default" .Values.serviceAccount.name }} {{- end }} {{- end }} - -{{/* -Create the task suffix. -See https://github.com/Masterminds/sprig/issues/53#issuecomment-483414063. -*/}} -{{- define "taskSuffix" -}} -{{- if kindIs "invalid" .Values.global.taskSuffix }} -{{- printf "-v%s" (.Chart.AppVersion | replace "." "-") }} -{{- else }} -{{- .Values.global.taskSuffix }} -{{- end }} -{{- end }} diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-bitbucket.yaml b/deploy/chart/templates/configmap-bitbucket.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/configmap-bitbucket.yaml rename to deploy/chart/templates/configmap-bitbucket.yaml diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-cluster.yaml b/deploy/chart/templates/configmap-cluster.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/configmap-cluster.yaml rename to deploy/chart/templates/configmap-cluster.yaml diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-nexus.yaml b/deploy/chart/templates/configmap-nexus.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/configmap-nexus.yaml rename to deploy/chart/templates/configmap-nexus.yaml diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-notifications.yaml b/deploy/chart/templates/configmap-notifications.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/configmap-notifications.yaml rename to deploy/chart/templates/configmap-notifications.yaml diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-pipeline.yaml b/deploy/chart/templates/configmap-pipeline.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/configmap-pipeline.yaml rename to deploy/chart/templates/configmap-pipeline.yaml diff --git a/deploy/ods-pipeline/charts/setup/templates/deployment.yaml b/deploy/chart/templates/deployment.yaml similarity index 87% rename from deploy/ods-pipeline/charts/setup/templates/deployment.yaml rename to deploy/chart/templates/deployment.yaml index b82f39e7..482126c0 100644 --- a/deploy/ods-pipeline/charts/setup/templates/deployment.yaml +++ b/deploy/chart/templates/deployment.yaml @@ -17,7 +17,7 @@ spec: containers: - name: pipeline-manager securityContext: {} - image: "{{.Values.pipelineManager.imageRepository}}/ods-pipeline-manager:{{.Values.pipelineManager.imageTag | default .Chart.AppVersion}}" + image: "{{.Values.imageRepository}}/pipeline-manager:{{.Values.imageTag | default .Chart.AppVersion}}" ports: - name: http containerPort: 8080 @@ -53,10 +53,6 @@ spec: value: '{{int .Values.pipelineRunMinKeepHours}}' - name: ODS_PRUNE_MAX_KEEP_RUNS value: '{{int .Values.pipelineRunMaxKeepRuns}}' - - name: ODS_TASK_KIND - value: '{{default "Task" .Values.global.taskKind}}' - - name: ODS_TASK_SUFFIX - value: '{{- include "taskSuffix" .}}' readinessProbe: httpGet: path: /health diff --git a/deploy/ods-pipeline/charts/setup/templates/service.yaml b/deploy/chart/templates/service.yaml similarity index 100% rename from deploy/ods-pipeline/charts/setup/templates/service.yaml rename to deploy/chart/templates/service.yaml diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-finish.yaml b/deploy/chart/templates/task-finish.yaml similarity index 88% rename from deploy/ods-pipeline/charts/tasks/templates/task-ods-finish.yaml rename to deploy/chart/templates/task-finish.yaml index 251e9500..d8e06aea 100644 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-finish.yaml +++ b/deploy/chart/templates/task-finish.yaml @@ -1,9 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' +kind: Task metadata: - name: '{{default "ods" .Values.taskPrefix}}-finish{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep + name: ods-pipeline-finish spec: description: | Finishes the pipeline run. @@ -20,9 +18,9 @@ spec: description: Artifact target respository default: '' steps: - - name: ods-finish + - name: finish # Image is built from build/package/Dockerfile.finish. - image: '{{.Values.imageRepository}}/ods-finish:{{.Values.global.imageTag | default .Chart.AppVersion}}' + image: '{{.Values.imageRepository}}/finish:{{.Values.imageTag | default .Chart.AppVersion}}' env: - name: HOME value: '/tekton/home' diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-start.yaml b/deploy/chart/templates/task-start.yaml similarity index 94% rename from deploy/ods-pipeline/charts/tasks/templates/task-ods-start.yaml rename to deploy/chart/templates/task-start.yaml index 1faf9d42..4fbe25ea 100644 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-start.yaml +++ b/deploy/chart/templates/task-start.yaml @@ -1,9 +1,7 @@ apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' +kind: Task metadata: - name: '{{default "ods" .Values.taskPrefix}}-start{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep + name: ods-pipeline-start spec: description: | Starts the pipeline run. @@ -76,9 +74,9 @@ spec: - description: The URL that was fetched by this task. name: url steps: - - name: ods-start + - name: start # Image is built from build/package/Dockerfile.start. - image: '{{.Values.imageRepository}}/ods-start:{{.Values.global.imageTag | default .Chart.AppVersion}}' + image: '{{.Values.imageRepository}}/start:{{.Values.imageTag | default .Chart.AppVersion}}' env: - name: HOME value: '/tekton/home' diff --git a/deploy/chart/values.kind.yaml b/deploy/chart/values.kind.yaml new file mode 100644 index 00000000..260bc492 --- /dev/null +++ b/deploy/chart/values.kind.yaml @@ -0,0 +1,14 @@ +imageTag: latest + +# Cluster +consoleUrl: 'http://example.com' +# Pipeline Manager +pipelineManager: + storageProvisioner: '' + storageClassName: 'standard' + storageSize: '2Gi' + +# Image repository to pull task images from. +# To test with the latest public ods-pipeline images, set +# global.imageTag to 'latest' and use: 'ghcr.io/opendevstack/ods-pipeline'. +imageRepository: localhost:5000/ods-pipeline diff --git a/deploy/chart/values.yaml b/deploy/chart/values.yaml new file mode 100644 index 00000000..33abac1c --- /dev/null +++ b/deploy/chart/values.yaml @@ -0,0 +1,113 @@ +# ----------------------- Installation hint ----------------------- +# !!! Important !!! +# This is the default values file - if you're editing this as +# part of the ODS pipeline installation you're in the wrong file! +# +# Please open ../values.yaml (the file you have created by making +# a copy of ../values.yaml.tmpl) and do your changes there. +# ----------------------- Installation hint ----------------------- + +# General +# Serviceaccount name to use for pipeline resources. +serviceAccountName: 'pipeline' +# Whether to enable debug mode +debug: 'false' + +# Bitbucket +# Bitbucket URL (including scheme, without trailing slash). +# Example: https://bitbucket.example.com. +bitbucketUrl: '' +# Bitbucket username. Example: cd_user. +bitbucketUsername: '' + +# Nexus +# Nexus URL (including scheme, without trailing slash). +# Example: https://nexus.example.com. +nexusUrl: '' +# Nexus username. Example: developer. +nexusUsername: '' + +# Cluster +# URL (including scheme, without trailing slash) of the OpenShift Web Console. +consoleUrl: 'http://example.com' + +# Notification Webhook +notification: + # Whether notifications should be sent to the URL specified below or not. + enabled: false + # URL of the configured webhook + url: 'http://example.com' + # The HTTP method to be used + method: 'POST' + # The HTTP content type header + contentType: 'application/json' + # Specify the outcomes you want to be notified of (allowed values: c.f. + # https://tekton.dev/docs/pipelines/pipelines/#using-aggregate-execution-status-of-all-tasks) + notifyOnStatus: + - 'Failed' + # Template to be processed and accepted by the configured webhook in use + # Below example might work for Microsoft Teams + requestTemplate: |- + { + "@type": "MessageCard", + "@context": "http://schema.org/extensions", + "themeColor": {{if eq .OverallStatus "Succeeded"}}"237b4b"{{else}}"c4314b"{{ end }}, + "summary": "{{.ODSContext.Project}} - ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", + "sections": [ + { + "activityTitle": "ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", + "activitySubtitle": "On Project {{.ODSContext.Project}}", + "activityImage": "https://avatars.githubusercontent.com/u/38974438?s=200&v=4", + "facts": [ + { + "name": "GitRef", + "value": "{{.ODSContext.GitRef}}" + } + ], + "markdown": true + } + ], + "potentialAction": [ + { + "@type": "OpenUri", + "name": "Go to PipelineRun", + "targets": [ + { + "os": "default", + "uri": "{{.PipelineRunURL}}" + } + ] + } + ] + } + +# Pipeline(Run) Pruning +# Minimum hours to keep a pipeline run. Has precendence over pipelineRunMaxKeepRuns. +# Must be at least 1. +pipelineRunMinKeepHours: '48' +# Maximum number of pipeline runs to keep. +# Must be at least 1. +pipelineRunMaxKeepRuns: '20' + +# Pipeline Manager +pipelineManager: + # PVC (used for the pipeline workspace) + # Storage provisioner. On AWS backed clusters, use 'kubernetes.io/aws-ebs'. + storageProvisioner: 'kubernetes.io/aws-ebs' + # Storage class. On AWS backed clusters, use 'gp2'. + storageClassName: 'gp2' + # Storage size. Defaults to 2Gi unless set explicitly here. + storageSize: '5Gi' + # Number of replicas to run for the pipeline manager. + replicaCount: 1 + # Deployment pod resources. Typically these settings should not need to change. + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +# Image repository to pull start/finish task images from. +imageRepository: ghcr.io/opendevstack/ods-pipeline diff --git a/deploy/install.sh b/deploy/install.sh index aa10e473..7137c993 100755 --- a/deploy/install.sh +++ b/deploy/install.sh @@ -10,15 +10,12 @@ NAMESPACE="" RELEASE_NAME="ods-pipeline" SERVICEACCOUNT="pipeline" VALUES_FILE="values.yaml" -CHART_DIR="./ods-pipeline" +CHART_DIR="./chart" # Secrets AUTH_SEPARATOR=":" -AQUA_AUTH="" -AQUA_SCANNER_URL="" BITBUCKET_AUTH="" BITBUCKET_WEBHOOK_SECRET="" NEXUS_AUTH="" -SONAR_AUTH="" PRIVATE_CERT="" # Check prerequisites. @@ -47,20 +44,15 @@ function usage { printf "\t--no-diff\t\t\tDo not run Helm diff before running Helm upgrade.\n" printf "\t--dry-run\t\t\tDo not apply any changes, instead just print what the script would do.\n" printf "\t--auth-separator\t\tCharacter to use as a separator for basic auth flags (defaults to '%s')\n" "$AUTH_SEPARATOR" - printf "\t--aqua-auth\t\t\tUsername and password (separated by '%s') of an Aqua user (if not given, script will prompt for this).\n" "$AUTH_SEPARATOR" - printf "\t--aqua-scanner-url\t\t\tURL from which to download Aqua scanner (if not given, script will prompt for this).\n" printf "\t--bitbucket-auth\t\tAccess token of a Bitbucket user (if not given, script will prompt for this).\n" printf "\t--bitbucket-webhook-secret\tSecret to protect webhook endpoint with (if not given, script will generate this).\n" printf "\t--nexus-auth\t\t\tUsername and password (separated by '%s') of a Nexus user (if not given, script will prompt for this).\n" "$AUTH_SEPARATOR" - printf "\t--sonar-auth\t\t\tAuth token of a SonarQube user (if not given, script will prompt for this).\n" printf "\t--private-cert\t\t\tHost from which to download private certificate (if not given, script will skip this).\n" printf "\nExample:\n\n" printf "\t%s \ \ \n\t\t--namespace foo \ \ - \n\t\t--aqua-auth 'user:password' \ \ \n\t\t--bitbucket-auth 'personal-access-token' \ \ - \n\t\t--nexus-auth 'user:password' \ \ - \n\t\t--sonar-auth 'auth-token' \n\n" "$0" + \n\t\t--nexus-auth 'user:password' \n\n" "$0" } while [ "$#" -gt 0 ]; do @@ -87,12 +79,6 @@ while [ "$#" -gt 0 ]; do --auth-separator) AUTH_SEPARATOR="$2"; shift;; --auth-separator=*) AUTH_SEPARATOR="${1#*=}";; - --aqua-auth) AQUA_AUTH="$2"; shift;; - --aqua-auth=*) AQUA_AUTH="${1#*=}";; - - --aqua-scanner-url) AQUA_SCANNER_URL="$2"; shift;; - --aqua-scanner-url=*) AQUA_SCANNER_URL="${1#*=}";; - --bitbucket-auth) BITBUCKET_AUTH="$2"; shift;; --bitbucket-auth=*) BITBUCKET_AUTH="${1#*=}";; @@ -102,9 +88,6 @@ while [ "$#" -gt 0 ]; do --nexus-auth) NEXUS_AUTH="$2"; shift;; --nexus-auth=*) NEXUS_AUTH="${1#*=}";; - --sonar-auth) SONAR_AUTH="$2"; shift;; - --sonar-auth=*) SONAR_AUTH="${1#*=}";; - --private-cert) PRIVATE_CERT="$2"; shift;; --private-cert=*) PRIVATE_CERT="${1#*=}";; @@ -231,19 +214,6 @@ echo "Installing secrets ..." if [ "${DRY_RUN}" == "true" ]; then echo "(skipping in dry-run)" else - installSecret "ods-aqua-auth" \ - "basic-auth-secret.yaml.tmpl" \ - "${AQUA_AUTH}" \ - "Please enter the username of an Aqua user with scan permissions. If you do not want to use Aqua, leave this empty:" \ - "Please enter the password of this Aqua user (input will be hidden). If you do not want to use Aqua, leave this empty:" - - # Aqua scanner URL is a single value. - installSecret "ods-aqua-scanner-url" \ - "opaque-secret.yaml.tmpl" \ - "${AQUA_SCANNER_URL}" \ - "" \ - "Please enter the URL from which to download the Aqua scanner binary. The URL may need to contain basic authentication - if so, ensure username/password are URL-encoded. Further, ensure that the version matches your Aqua server version. If you do not want to use Aqua, leave this empty:" - # Bitbucket username is not required as PAT alone is enough. installSecret "ods-bitbucket-auth" \ "basic-auth-secret.yaml.tmpl" \ @@ -264,13 +234,6 @@ else "Please enter the username of a Nexus user with write permission:" \ "Please enter the password of this Nexus user (input will be hidden):" - # SonarQube username is not required as auth token alone is enough. - installSecret "ods-sonar-auth" \ - "basic-auth-secret.yaml.tmpl" \ - "${SONAR_AUTH}" \ - "" \ - "Please enter an auth token of a SonarQube user with scan permissions (input will be hidden):" - installTLSSecret "ods-private-cert" "${PRIVATE_CERT}" fi diff --git a/deploy/ods-pipeline/Chart.yaml b/deploy/ods-pipeline/Chart.yaml deleted file mode 100644 index dae63cea..00000000 --- a/deploy/ods-pipeline/Chart.yaml +++ /dev/null @@ -1,32 +0,0 @@ -apiVersion: v2 -name: ods-pipeline -description: Umbrella chart for ods-pipeline - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.13.2 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -# It is recommended to use it with quotes. -appVersion: "0.13.2" - -dependencies: - - name: setup - version: 0.13.2 - condition: setup.enabled - - name: tasks - version: 0.13.2 - condition: tasks.enabled diff --git a/deploy/ods-pipeline/charts/setup/Chart.yaml b/deploy/ods-pipeline/charts/setup/Chart.yaml deleted file mode 100644 index af91c0cf..00000000 --- a/deploy/ods-pipeline/charts/setup/Chart.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: v2 -name: setup -description: A Helm chart to setup ODS pipelines - -# A chart can be either an 'application' or a 'library' chart. -# -# Application charts are a collection of templates that can be packaged into versioned archives -# to be deployed. -# -# Library charts provide useful utilities or functions for the chart developer. They're included as -# a dependency of application charts to inject those utilities and functions into the rendering -# pipeline. Library charts do not define any templates and therefore cannot be deployed. -type: application - -# This is the chart version. This version number should be incremented each time you make changes -# to the chart and its templates, including the app version. -# Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.13.2 - -# This is the version number of the application being deployed. This version number should be -# incremented each time you make changes to the application. Versions are not expected to -# follow Semantic Versioning. They should reflect the version the application is using. -appVersion: "0.13.2" diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-aqua.yaml b/deploy/ods-pipeline/charts/setup/templates/configmap-aqua.yaml deleted file mode 100644 index eea1b6b0..00000000 --- a/deploy/ods-pipeline/charts/setup/templates/configmap-aqua.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: ods-aqua - labels: - {{- include "chart.labels" . | nindent 4}} -data: - url: '{{.Values.aquaUrl | trimSuffix "/"}}' - registry: '{{.Values.aquaRegistry}}' diff --git a/deploy/ods-pipeline/charts/setup/templates/configmap-sonar.yaml b/deploy/ods-pipeline/charts/setup/templates/configmap-sonar.yaml deleted file mode 100644 index 5e4b35d6..00000000 --- a/deploy/ods-pipeline/charts/setup/templates/configmap-sonar.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: ConfigMap -apiVersion: v1 -metadata: - name: ods-sonar - labels: - {{- include "chart.labels" . | nindent 4}} -data: - url: '{{.Values.sonarUrl | trimSuffix "/"}}' - edition: '{{.Values.sonarEdition | default "community" }}' diff --git a/deploy/ods-pipeline/charts/setup/values.yaml b/deploy/ods-pipeline/charts/setup/values.yaml deleted file mode 100644 index e8d0e03d..00000000 --- a/deploy/ods-pipeline/charts/setup/values.yaml +++ /dev/null @@ -1,2 +0,0 @@ -# override name to be consistent with previous, separate chart naming convention(s) -nameOverride: ods-pipeline diff --git a/deploy/ods-pipeline/charts/tasks/templates/_helpers.tpl b/deploy/ods-pipeline/charts/tasks/templates/_helpers.tpl deleted file mode 100644 index b1c875bc..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/_helpers.tpl +++ /dev/null @@ -1,74 +0,0 @@ -{{/* -Expand the name of the chart. -*/}} -{{- define "chart.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "chart.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "chart.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "chart.labels" -}} -helm.sh/chart: {{ include "chart.chart" . }} -{{ include "chart.selectorLabels" . }} -{{- if .Chart.AppVersion }} -app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "chart.selectorLabels" -}} -app.kubernetes.io/name: {{ include "chart.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Create the name of the service account to use -*/}} -{{- define "chart.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "chart.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} - -{{/* -Create the task suffix. -See https://github.com/Masterminds/sprig/issues/53#issuecomment-483414063. -*/}} -{{- define "taskSuffix" -}} -{{- if kindIs "invalid" .Values.global.taskSuffix }} -{{- printf "-v%s" (.Chart.AppVersion | replace "." "-") }} -{{- else }} -{{- .Values.global.taskSuffix }} -{{- end }} -{{- end }} diff --git a/deploy/ods-pipeline/charts/tasks/templates/_sonar-step.tpl b/deploy/ods-pipeline/charts/tasks/templates/_sonar-step.tpl deleted file mode 100644 index 93c5c925..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/_sonar-step.tpl +++ /dev/null @@ -1,52 +0,0 @@ -{{- define "sonar-step"}} -- name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: '{{.Values.imageRepository}}/ods-sonar:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - - truststore="${JAVA_HOME}/lib/security/cacerts" - if [ -f /etc/ssl/certs/private-cert.pem ]; then - truststore="$(pwd)/.ods-cache/truststore/cacerts" - configure-truststore --dest-store "${truststore}" - fi - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) \ - -truststore "${truststore}" - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) -{{- end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-go.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-go.yaml deleted file mode 100644 index 5216344c..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-go.yaml +++ /dev/null @@ -1,148 +0,0 @@ -{{if .Values.global.enabledTasks.buildGo }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-go{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Go (module) applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-build-go.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: enable-cgo - description: Whether to enable CGO. When not enabled the build will set `CGO_ENABLED=0`. - type: string - default: "false" - - name: go-os - description: "`GOOS` variable (the execution operating system such as `linux`, `windows`)." - type: string - default: "linux" - - name: go-arch - description: "`GOARCH` variable (the execution architecture such as `arm`, `amd64`)." - type: string - default: "amd64" - - name: output-dir - description: >- - Path to the directory into which the resulting Go binary should be copied, relative to `working-dir`. - This directory may then later be used as Docker context for example. - type: string - default: docker - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-go.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-go" - - name: pre-test-script - description: Script to execute before running tests, relative to the working directory. - type: string - default: "" - - name: sonar-quality-gate - description: Whether the SonarQube quality gate needs to pass for the task to succeed. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - {{- with ((.Values.go).sidecars) }} - sidecars: - {{- toYaml . | nindent 4 }} - {{- end }} - steps: - - name: build-go-binary - # Image is built from build/package/Dockerfile.go-toolset. - image: '{{.Values.imageRepository}}/ods-go-toolset:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {{- (.Values.go).resources | default dict | toYaml | nindent 8 }} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=go-$(params.go-os)-$(params.go-arch) - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.output-dir) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-go.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --enable-cgo=$(params.enable-cgo) \ - --go-os=$(params.go-os) \ - --go-arch=$(params.go-arch) \ - --pre-test-script=$(params.pre-test-script) \ - --output-dir=$(params.output-dir) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.output-dir) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - {{- include "sonar-step" . | indent 4}} - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-gradle.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-gradle.yaml deleted file mode 100644 index 25e53afb..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-gradle.yaml +++ /dev/null @@ -1,177 +0,0 @@ -{{if .Values.global.enabledTasks.buildGradle }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-gradle{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Gradle applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-build-gradle.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: gradle-additional-tasks - description: >- - Additional gradle tasks to be passed to the gradle build. (default tasks called are `clean` and `build`). - type: string - default: "" - - name: gradle-options - description: >- - Options to be passed to the gradle build. - (See ref: https://docs.gradle.org/7.4.2/userguide/command_line_interface.html#sec:command_line_debugging) - type: string - default: "--no-daemon --stacktrace" - - name: gradle-opts-env - description: >- - Will be exposed to the build via `GRADLE_OPTS` environment variable. - Specifies JVM arguments to use when starting the Gradle client VM. The client VM only handles command line input/output, so it is rare that one would need to change its VM options. - You can still use this to change the settings for the Gradle daemon which runs the actual build by setting the according Gradle properties by `-D`. - If you want to set the JVM arguments for the actual build you would do this via `-Dorg.gradle.jvmargs=-Xmx1024M` - (See ref: https://docs.gradle.org/7.4.2/userguide/build_environment.html#sec:gradle_configuration_properties). - type: string - default: "-Dorg.gradle.jvmargs=-Xmx512M" - - name: output-dir - description: >- - Path to the directory into which the resulting Java application jar should be copied, relative to `working-dir`. - This directory may then later be used as Docker context for example. - type: string - default: docker - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: cached-outputs - description: >- - List of build output directories (as colon separated string) to be cached. - These directories are relative to `working-dir`. - type: string - default: "docker" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-gradle.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-gradle" - - name: gradle-build-dir - description: >- - Path to the directory into which Gradle publishes its build. - type: string - default: build - - name: sonar-quality-gate - description: Whether the SonarQube quality gate needs to pass for the task to succeed. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - {{- with ((.Values.gradle).sidecars) }} - sidecars: - {{- toYaml . | nindent 4 }} - {{- end }} - steps: - - name: build-gradle-binary - # Image is built from build/package/Dockerfile.gradle-toolset. - image: '{{.Values.imageRepository}}/ods-gradle-toolset:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: GRADLE_OPTS - value: "$(params.gradle-opts-env)" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - resources: - {{- (.Values.gradle).resources | default dict | toYaml | nindent 8 }} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=gradle - if copy-build-if-cached \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-gradle.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --output-dir=$(params.output-dir) \ - --gradle-build-dir=$(params.gradle-build-dir) \ - --gradle-additional-tasks="$(params.gradle-additional-tasks)" \ - --gradle-options="$(params.gradle-options)" - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - {{- include "sonar-step" . | indent 4}} - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-npm.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-npm.yaml deleted file mode 100644 index 279be5d0..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-npm.yaml +++ /dev/null @@ -1,148 +0,0 @@ -{{if .Values.global.enabledTasks.buildNPM }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-npm{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Node.js applications using npm. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-build-npm.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: cached-outputs - description: >- - List of build output directories (as colon separated string) to be cached. - These directories are relative to the `working-dir` parameter` - Common build directories are `dist` (default), `build` and `public`. - If empty this could mean that the original sources are being used as build output and no caching of built files are needed. Nonetheless build skipping can still be remain enabled. - type: string - default: "dist" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-npm.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-npm" - - name: sonar-quality-gate - description: Whether quality gate needs to pass. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip the SonarQube analysis or not. - type: string - default: "false" - - name: node-version - description: "Node.js version to use - supported versions: 16, 18" - type: string - default: "18" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - {{- with ((.Values.npm).sidecars) }} - sidecars: - {{- toYaml . | nindent 4 }} - {{- end }} - steps: - - name: build-npm - # Image is built from build/package/Dockerfile.node-npm-toolset. - image: '{{.Values.imageRepository}}/ods-node$(params.node-version)-npm-toolset:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {{- (.Values.npm).resources | default dict | toYaml | nindent 8 }} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=npm - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-npm.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - {{- include "sonar-step" . | indent 4}} - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-python.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-python.yaml deleted file mode 100644 index cac14b0a..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-build-python.yaml +++ /dev/null @@ -1,144 +0,0 @@ -{{if .Values.global.enabledTasks.buildPython }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-build-python{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Python applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-build-python.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: max-line-length - description: Maximum line length. - type: string - default: "120" - - name: pre-test-script - description: Script to execute before running tests, relative to the working directory. - type: string - default: "" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-python.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-python" - - name: sonar-quality-gate - description: Whether quality gate needs to pass. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip the SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - {{- with ((.Values.python).sidecars) }} - sidecars: - {{- toYaml . | nindent 4 }} - {{- end }} - steps: - - name: build-python - # Image is built from build/package/Dockerfile.python-toolset. - image: '{{.Values.imageRepository}}/ods-python-toolset:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {{- (.Values.python).resources | default dict | toYaml | nindent 8 }} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=python - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-python.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --max-line-length=$(params.max-line-length) \ - --pre-test-script=$(params.pre-test-script) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - {{- include "sonar-step" . | indent 4}} - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-deploy-helm.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-deploy-helm.yaml deleted file mode 100644 index c199bccf..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-deploy-helm.yaml +++ /dev/null @@ -1,115 +0,0 @@ -{{if .Values.global.enabledTasks.deployHelm }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-deploy-helm{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Deploy Helm charts. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-deploy-helm.adoc - params: - - name: chart-dir - description: Helm chart directory that will be deployed - type: string - default: ./chart - - name: release-name - description: | - The Helm release name. If empty, the release name is simply the name of the chart. - - When this task is used in a repository which defines subcharts, and the parameter is not set, - then the task sets `.fullnameOverride` equal to the respective - subcomponent to avoid resources being prefixed with the umbrella repository - component name (assuming your resources are named using the `chart.fullname` - helper). However, if the parameter is specified, `.fullnameOverride` is not set. - As a result the `chart.fullname` helper prefixes resources with the specfied - `release-name` unless the chart's name contains the `release-name`. - type: string - default: '' - - name: diff-flags - description: Flags to pass to `helm diff upgrade` in addition to the ones specified via the `upgrade-flags` parameter. Note that the flags `--detailed-exitcode` and `--no-color` are automatically set and cannot be removed. If flags unknown to `helm diff` are passed, they are ignored. - type: string - default: '--three-way-merge' - - name: upgrade-flags - description: Flags to pass to `helm upgrade`. - type: string - default: '--install --wait' - - name: age-key-secret - description: | - Name of the secret containing the age key to use for helm-secrets. - If the secret exists, it is expected to have a field named `key.txt` with the age secret key in its content. - type: string - default: 'helm-secrets-age-key' - - name: api-server - description: | - API server of the target cluster, including scheme. - Only required if the target namespace is outside the cluster in which - the pipeline runs. - type: string - default: '' - - name: api-credentials-secret - description: | - Name of the Secret resource holding the token of a serviceaccount (in field `token`). - Only required when `api-server` is set. - type: string - default: '' - - name: namespace - description: | - Target K8s namespace (or OpenShift project) to deploy into. - If empty, the task will be a no-op. - type: string - default: '' - - name: registry-host - description: | - Hostname of the target registry to push images to. - If not given, the registy host of the source image is used. - type: string - default: '' - - name: diff-only - description: | - If set to true, the task will only perform a diff, and then stop. - No images will be promoted or upgrades attempted. - type: string - default: 'false' - steps: - - name: helm-upgrade-from-repo - # Image is built from build/package/Dockerfile.helm. - image: '{{.Values.imageRepository}}/ods-helm:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - - name: HOME - value: '/tekton/home' - resources: {} - script: | - # deploy-helm is built from /cmd/deploy-helm/main.go. - deploy-helm \ - -chart-dir=$(params.chart-dir) \ - -namespace=$(params.namespace) \ - -release-name=$(params.release-name) \ - -diff-flags="$(params.diff-flags)" \ - -upgrade-flags="$(params.upgrade-flags)" \ - -age-key-secret=$(params.age-key-secret) \ - -api-server=$(params.api-server) \ - -api-credentials-secret=$(params.api-credentials-secret) \ - -registry-host=$(params.registry-host) \ - -diff-only=$(params.diff-only) - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/templates/task-ods-package-image.yaml b/deploy/ods-pipeline/charts/tasks/templates/task-ods-package-image.yaml deleted file mode 100644 index 6bbab6a6..00000000 --- a/deploy/ods-pipeline/charts/tasks/templates/task-ods-package-image.yaml +++ /dev/null @@ -1,192 +0,0 @@ -{{if .Values.global.enabledTasks.packageImage }} -apiVersion: tekton.dev/v1beta1 -kind: '{{default "Task" .Values.global.taskKind}}' -metadata: - name: '{{default "ods" .Values.taskPrefix}}-package-image{{- include "taskSuffix" .}}' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Packages applications into container images using buildah. - - See https://github.com/opendevstack/ods-pipeline/blob/v{{.Chart.AppVersion}}/docs/tasks/ods-package-image.adoc - params: - - name: registry - description: Image registry to push image to. - type: string - default: '{{default .Values.pushRegistry}}' - - name: image-stream - description: Reference of the image stream buildah will produce. If not set, the value of `.ods/component` is used. - type: string - default: '' - - name: extra-tags - description: Additional image tags (e.g. 'latest dev') for pushed images. The primary tag is based on the commit sha. Only tags currently missing from the image will be added. - type: string # Wanted to use and array but ran into [Cannot refer array params in script #4912](https://github.com/tektoncd/pipeline/issues/4912) - default: '' - - name: storage-driver - description: Set buildah storage driver. - type: string - default: vfs - - name: dockerfile - description: Path to the Dockerfile to build (relative to `docker-dir`). - type: string - default: ./Dockerfile - - name: docker-dir - description: Path to the directory to use as Docker context. - type: string - default: '.' - - name: format - description: 'The format of the built container, `oci` or `docker`.' - type: string - default: oci - - name: buildah-build-extra-args - description: Extra parameters passed for the build command when building images (e.g. '--build-arg=firstArg=one --build-arg=secondArg=two'). - type: string - default: '' - - name: buildah-push-extra-args - description: Extra parameters passed for the push command when pushing images. - type: string - default: '' - - name: trivy-sbom-extra-args - description: Extra parameters passed for the trivy command to generate an SBOM. - type: string - default: '' - - name: aqua-gate - description: Whether the Aqua security scan needs to pass for the task to succeed. - type: string - default: "false" - results: - - description: Digest of the image just built. - name: image-digest - steps: - - name: package-image - # Image is built from build/package/Dockerfile.package-image. - image: '{{.Values.imageRepository}}/ods-package-image:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - - # ods-package-image is built from cmd/package-image/main.go. - ods-package-image \ - -image-stream=$(params.image-stream) \ - -extra-tags=$(params.extra-tags) \ - -registry=$(params.registry) \ - -storage-driver=$(params.storage-driver) \ - -format=$(params.format) \ - -dockerfile=$(params.dockerfile) \ - -context-dir=$(params.docker-dir) \ - -buildah-build-extra-args=$(params.buildah-build-extra-args) \ - -buildah-push-extra-args=$(params.buildah-push-extra-args) \ - -trivy-sbom-extra-args=$(params.trivy-sbom-extra-args) - - # As this task does not run unter uid 1001, chown created artifacts - # to make them deletable by ods-start's cleanup procedure. - chown -R 1001:0 .ods/artifacts/image-digests .ods/artifacts/sboms - securityContext: - capabilities: - add: - - SETFCAP - volumeMounts: - - mountPath: /var/lib/containers - name: varlibcontainers - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: aqua-scan - # Image is built from build/package/Dockerfile.aqua-scan. - image: '{{.Values.imageRepository}}/ods-aqua-scan:{{.Values.global.imageTag | default .Chart.AppVersion}}' - env: - - name: HOME - value: '/tekton/home' - - name: BITBUCKET_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-bitbucket - - name: BITBUCKET_ACCESS_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-bitbucket-auth - - name: AQUA_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-aqua - - name: AQUA_REGISTRY - valueFrom: - configMapKeyRef: - key: registry - name: ods-aqua - - name: AQUA_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-aqua-auth - - name: AQUA_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-aqua-auth - - name: AQUA_SCANNER_URL - valueFrom: - secretKeyRef: - key: secret - name: ods-aqua-scanner-url - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "${AQUA_SCANNER_URL:0:4}" != "http" ]; then - echo "Skipping Aqua scan" - else - download-aqua-scanner \ - --aqua-scanner-url=${AQUA_SCANNER_URL} \ - $(case ${DEBUG} in (true) printf -- '--debug'; esac) - - # ods-aqua-scan is built from cmd/aqua-scan/main.go. - ods-aqua-scan \ - -image-stream=$(params.image-stream) \ - -aqua-gate=$(params.aqua-gate) - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - emptyDir: {} - name: varlibcontainers - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source -{{end}} diff --git a/deploy/ods-pipeline/charts/tasks/values.docs.yaml b/deploy/ods-pipeline/charts/tasks/values.docs.yaml deleted file mode 100644 index 1640798d..00000000 --- a/deploy/ods-pipeline/charts/tasks/values.docs.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -global: - taskSuffix: '' - enabledTasks: - buildGo: true - buildGradle: true - buildPython: true - buildNPM: true - packageImage: true - deployHelm: true diff --git a/deploy/ods-pipeline/charts/tasks/values.yaml b/deploy/ods-pipeline/charts/tasks/values.yaml deleted file mode 100644 index 47b0c55b..00000000 --- a/deploy/ods-pipeline/charts/tasks/values.yaml +++ /dev/null @@ -1,5 +0,0 @@ -# override name to be consistent with previous, separate chart naming convention(s) -nameOverride: ods-pipeline - -imageRepository: ghcr.io/opendevstack/ods-pipeline -pushRegistry: image-registry.openshift-image-registry.svc:5000 diff --git a/deploy/ods-pipeline/values.kind.yaml b/deploy/ods-pipeline/values.kind.yaml deleted file mode 100644 index 80011275..00000000 --- a/deploy/ods-pipeline/values.kind.yaml +++ /dev/null @@ -1,22 +0,0 @@ -global: - imageTag: latest - taskSuffix: '' - -setup: - # Cluster - consoleUrl: 'http://example.com' - # Pipeline Manager - pipelineManager: - storageProvisioner: '' - storageClassName: 'standard' - storageSize: '2Gi' - imageRepository: localhost:5000/ods - imageTag: 'latest' - -tasks: - # Image repository to pull task images from. - # To test with the latest public ods-pipeline images, set - # global.imageTag to 'latest' and use: 'ghcr.io/opendevstack/ods-pipeline'. - imageRepository: localhost:5000/ods - - pushRegistry: kind-registry.kind:5000 diff --git a/deploy/ods-pipeline/values.yaml b/deploy/ods-pipeline/values.yaml deleted file mode 100644 index a865efa8..00000000 --- a/deploy/ods-pipeline/values.yaml +++ /dev/null @@ -1,204 +0,0 @@ -# ----------------------- Installation hint ----------------------- -# !!! Important !!! -# This is the default values file - if you're editing this as -# part of the ODS pipeline installation you're in the wrong file! -# -# Please open ../values.yaml (the file you have created by making -# a copy of ../values.yaml.tmpl) and do your changes there. -# ----------------------- Installation hint ----------------------- - -# ####################################### # -# UMBRELLA # -# ####################################### # -global: - # Image tag to use for images referenced by tasks (defaults to the chart appVersion). - # imageTag: '' - # Suffix to append to the task name. If not set, the sufix will be computed - # from the chart appVersion in the form "-vMAJOR-MINOR-PATCH". - # taskSuffix: -latest - # Custom task kind (defaults to "Task") - # taskKind: "ClusterTask" - # enabledTasks controls which tasks will be installed. Set the tasks you do - # not want to install to false. - enabledTasks: - buildGo: true - buildGradle: true - buildPython: true - buildNPM: true - packageImage: true - deployHelm: true - - - -# ####################################### # -# SETUP CHART CONFIG # -# ####################################### # -setup: - # enable configuration and management chart - enabled: true - - # General - # Serviceaccount name to use for pipeline resources. - serviceAccountName: 'pipeline' - # Whether to enable debug mode - debug: 'false' - - # Bitbucket - # Bitbucket URL (including scheme, without trailing slash). - # Example: https://bitbucket.example.com. - bitbucketUrl: '' - # Bitbucket username. Example: cd_user. - bitbucketUsername: '' - - # Nexus - # Nexus URL (including scheme, without trailing slash). - # Example: https://nexus.example.com. - nexusUrl: '' - # Nexus username. Example: developer. - nexusUsername: '' - - # Sonar - # SonarQube URL (including scheme, without trailing slash). - # Example: https://sonarqube.example.com. - sonarUrl: '' - # SonarQube edition. Valid options: 'community', 'developer', 'enterprise' or 'datacenter' - sonarEdition: 'community' - - # Aqua - # Aqua URL (including scheme, without trailing slash). - # Example: https://aqua.example.com. - # Leave empty when not using Aqua. - aquaUrl: '' - # Aqua registry name. - # Leave empty when not using Aqua. - aquaRegistry: '' - # Aqua username. Example: developer. - # Leave empty when not using Aqua. - aquaUsername: '' - - # Cluster - # URL (including scheme, without trailing slash) of the OpenShift Web Console. - consoleUrl: 'http://example.com' - - # Notification Webhook - notification: - # Whether notifications should be sent to the URL specified below or not. - enabled: false - # URL of the configured webhook - url: 'http://example.com' - # The HTTP method to be used - method: 'POST' - # The HTTP content type header - contentType: 'application/json' - # Specify the outcomes you want to be notified of (allowed values: c.f. - # https://tekton.dev/docs/pipelines/pipelines/#using-aggregate-execution-status-of-all-tasks) - notifyOnStatus: - - 'Failed' - # Template to be processed and accepted by the configured webhook in use - # Below example might work for Microsoft Teams - requestTemplate: |- - { - "@type": "MessageCard", - "@context": "http://schema.org/extensions", - "themeColor": {{if eq .OverallStatus "Succeeded"}}"237b4b"{{else}}"c4314b"{{ end }}, - "summary": "{{.ODSContext.Project}} - ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", - "sections": [ - { - "activityTitle": "ODS Pipeline Run {{.PipelineRunName}} finished with status {{.OverallStatus}}", - "activitySubtitle": "On Project {{.ODSContext.Project}}", - "activityImage": "https://avatars.githubusercontent.com/u/38974438?s=200&v=4", - "facts": [ - { - "name": "GitRef", - "value": "{{.ODSContext.GitRef}}" - } - ], - "markdown": true - } - ], - "potentialAction": [ - { - "@type": "OpenUri", - "name": "Go to PipelineRun", - "targets": [ - { - "os": "default", - "uri": "{{.PipelineRunURL}}" - } - ] - } - ] - } - - # Pipeline(Run) Pruning - # Minimum hours to keep a pipeline run. Has precendence over pipelineRunMaxKeepRuns. - # Must be at least 1. - pipelineRunMinKeepHours: '48' - # Maximum number of pipeline runs to keep. - # Must be at least 1. - pipelineRunMaxKeepRuns: '20' - - # Pipeline Manager - pipelineManager: - # PVC (used for the pipeline workspace) - # Storage provisioner. On AWS backed clusters, use 'kubernetes.io/aws-ebs'. - storageProvisioner: 'kubernetes.io/aws-ebs' - # Storage class. On AWS backed clusters, use 'gp2'. - storageClassName: 'gp2' - # Storage size. Defaults to 2Gi unless set explicitly here. - storageSize: '5Gi' - # Number of replicas to run for the pipeline manager. - replicaCount: 1 - # Repository from which to pull the pipeline manager container image. - imageRepository: ghcr.io/opendevstack/ods-pipeline - # Deployment pod resources. Typically these settings should not need to change. - resources: - limits: - cpu: 100m - memory: 128Mi - requests: - cpu: 100m - memory: 128Mi - - - -# ####################################### # -# TASK CHART CONFIG # -# ####################################### # -tasks: - # enable task definition chart - enabled: true - - # Image repository to pull task images from. - imageRepository: ghcr.io/opendevstack/ods-pipeline - - # Custom task prefix (defaults to "ods") - # taskPrefix: "foo" - - # Registry to push images to from ods-package-image task. - pushRegistry: image-registry.openshift-image-registry.svc:5000 - - # To define build task specific sidecars and quotas, add resources/sidecar section(s) per task, - # e.g. - # - # go: - # # define custom resource quotas for the go build task - # resources: - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - # sidecars: - # # sidecars added to go build task - # - workspaces: null - # image: postgres - # name: postgres-sidecar - # resources: - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi diff --git a/deploy/values.yaml.tmpl b/deploy/values.yaml.tmpl index bbc1e4b6..a85ee168 100644 --- a/deploy/values.yaml.tmpl +++ b/deploy/values.yaml.tmpl @@ -6,48 +6,22 @@ # All of these configuration options are set to a default value in ods-pipeline/values.yaml. # If you want to override something there, copy the field here and configure as needed. -global: - # Configure which tasks should be installed. Tasks set to "false" will - # not be part of the installation and cannot be referenced from ods.yaml. - enabledTasks: - buildGo: false - buildGradle: false - buildPython: false - buildNPM: false - packageImage: true - deployHelm: true +# Bitbucket URL (including scheme, without trailing slash). +# Example: https://bitbucket.example.com. +bitbucketUrl: '' -setup: - # Bitbucket URL (including scheme, without trailing slash). - # Example: https://bitbucket.example.com. - bitbucketUrl: '' +# Nexus URL (including scheme, without trailing slash). +# Example: https://nexus.example.com. +nexusUrl: '' - # Nexus URL (including scheme, without trailing slash). - # Example: https://nexus.example.com. - nexusUrl: '' +# OpenShift Web ConsoleURL (including scheme, without trailing slash). +# Example: https://console-openshift-console.apps.foo.tftp.p1.openshiftapps.com. +consoleUrl: '' - # SonarQube URL (including scheme, without trailing slash). - # Example: https://sonarqube.example.com. - sonarUrl: '' - # SonarQube edition. Valid options: 'community', 'developer', 'enterprise' or 'datacenter' - sonarEdition: 'community' - - # Aqua URL (including scheme, without trailing slash). - # Example: https://aqua.example.com. - # Leave empty when not using Aqua. - aquaUrl: '' - # Aqua registry name. - # Leave empty when not using Aqua. - aquaRegistry: '' - - # OpenShift Web ConsoleURL (including scheme, without trailing slash). - # Example: https://console-openshift-console.apps.foo.tftp.p1.openshiftapps.com. - consoleUrl: '' - - # Pipeline Manager - pipelineManager: - # PVC (used for the pipeline workspace) - # Storage provisioner. On AWS backed clusters, use 'kubernetes.io/aws-ebs'. - storageProvisioner: 'kubernetes.io/aws-ebs' - # Storage class. On AWS backed clusters, use 'gp2'. - storageClassName: 'gp2' +# Pipeline Manager +pipelineManager: + # PVC (used for the pipeline workspace) + # Storage provisioner. On AWS backed clusters, use 'kubernetes.io/aws-ebs'. + storageProvisioner: 'kubernetes.io/aws-ebs' + # Storage class. On AWS backed clusters, use 'gp2'. + storageClassName: 'gp2' diff --git a/docs/installation.adoc b/docs/installation.adoc index 7a778e75..4d9ab43a 100644 --- a/docs/installation.adoc +++ b/docs/installation.adoc @@ -145,4 +145,4 @@ TIP: The credentials stored in the K8s secrets will not be updated. If you need ==== Finishing the update -Once the resources in your namespace are updated, you likely have to update the `ods.yaml` files in your repository to point to the new tasks, e.g. changing `ods-build-go-v0-12-0` to `ods-build-go-v0-13-2`. Whether or not you have to update the `ods.yaml` file depends whether the task suffix (controlled by the value `taskSuffix`) has changed due to the update. +Once the resources in your namespace are updated, you likely have to update the `ods.yaml` files in your repository to point to the new tasks, e.g. changing `ods-build-go-v0-12-0` to `ods-build-go-v0-13-2`. diff --git a/go.mod b/go.mod index db782c80..b7097230 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ go 1.19 require ( github.com/google/go-cmp v0.5.9 github.com/google/go-github/v42 v42.0.0 - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/sonatype-nexus-community/gonexus v0.59.0 github.com/tektoncd/pipeline v0.41.1 golang.org/x/net v0.7.0 @@ -16,6 +15,8 @@ require ( sigs.k8s.io/yaml v1.3.0 ) +require golang.org/x/tools v0.2.0 // indirect + require ( contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect contrib.go.opencensus.io/exporter/prometheus v0.4.0 // indirect @@ -63,7 +64,6 @@ require ( go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.23.0 // indirect golang.org/x/crypto v0.1.0 // indirect - golang.org/x/exp v0.0.0-20230111222715-75897c7a292a golang.org/x/oauth2 v0.1.0 // indirect golang.org/x/sync v0.1.0 // indirect golang.org/x/sys v0.5.0 // indirect diff --git a/go.sum b/go.sum index ce077f5d..d9fe6c23 100644 --- a/go.sum +++ b/go.sum @@ -172,8 +172,6 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -336,8 +334,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230111222715-75897c7a292a h1:/YWeLOBWYV5WAQORVPkZF3Pq9IppkcT72GKnWjNf5W8= -golang.org/x/exp v0.0.0-20230111222715-75897c7a292a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -518,6 +514,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/installation/bitbucket.go b/internal/installation/bitbucket.go index 07ffe203..a921384c 100644 --- a/internal/installation/bitbucket.go +++ b/internal/installation/bitbucket.go @@ -23,7 +23,7 @@ const ( // NewBitbucketClientConfig returns a *bitbucket.ClientConfig which is derived // from the information about Bitbucket located in the given Kubernetes namespace. -func NewBitbucketClientConfig(c *kclient.Clientset, namespace string, logger logging.LeveledLoggerInterface, privateCert string) (*bitbucket.ClientConfig, error) { +func NewBitbucketClientConfig(c kclient.Interface, namespace string, logger logging.LeveledLoggerInterface, privateCert string) (*bitbucket.ClientConfig, error) { bitbucketSecret, err := c.CoreV1().Secrets(namespace). Get(context.TODO(), BitbucketSecretName, metav1.GetOptions{}) if err != nil { diff --git a/internal/installation/nexus.go b/internal/installation/nexus.go index 3da24178..e6841e87 100644 --- a/internal/installation/nexus.go +++ b/internal/installation/nexus.go @@ -20,7 +20,7 @@ const ( // NewNexusClientConfig returns a *nexus.ClientConfig which is derived // from the information about Nexus located in the given Kubernetes namespace. -func NewNexusClientConfig(c *kclient.Clientset, namespace string, logger logging.LeveledLoggerInterface) (*nexus.ClientConfig, error) { +func NewNexusClientConfig(c kclient.Interface, namespace string, logger logging.LeveledLoggerInterface) (*nexus.ClientConfig, error) { nexusSecret, err := c.CoreV1().Secrets(namespace). Get(context.TODO(), NexusSecretName, metav1.GetOptions{}) if err != nil { diff --git a/internal/kubernetes/secrets.go b/internal/kubernetes/secrets.go index 81b0ea10..4173372f 100644 --- a/internal/kubernetes/secrets.go +++ b/internal/kubernetes/secrets.go @@ -10,7 +10,7 @@ import ( "k8s.io/client-go/kubernetes" ) -func CreateSecret(clientset *kubernetes.Clientset, namespace string, secret *corev1.Secret) (*corev1.Secret, error) { +func CreateSecret(clientset kubernetes.Interface, namespace string, secret *corev1.Secret) (*corev1.Secret, error) { log.Printf("Create secret %s", secret.Name) @@ -21,7 +21,7 @@ func CreateSecret(clientset *kubernetes.Clientset, namespace string, secret *cor return secret, err } -func GetSecret(clientset *kubernetes.Clientset, namespace string, secretName string) (*corev1.Secret, error) { +func GetSecret(clientset kubernetes.Interface, namespace string, secretName string) (*corev1.Secret, error) { log.Printf("Get secret %s", secretName) @@ -32,7 +32,7 @@ func GetSecret(clientset *kubernetes.Clientset, namespace string, secretName str return secret, err } -func GetSecretKey(clientset *kubernetes.Clientset, namespace, secretName, key string) (string, error) { +func GetSecretKey(clientset kubernetes.Interface, namespace, secretName, key string) (string, error) { log.Printf("Get secret %s", secretName) diff --git a/internal/kubernetes/services.go b/internal/kubernetes/services.go deleted file mode 100644 index 449138ef..00000000 --- a/internal/kubernetes/services.go +++ /dev/null @@ -1,82 +0,0 @@ -package kubernetes - -import ( - "context" - "fmt" - "log" - "strings" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/client-go/kubernetes" -) - -func CreateNodePortService(clientset *kubernetes.Clientset, name string, selectors map[string]string, port, targetPort int32, namespace string) (*v1.Service, error) { - - log.Printf("Create node port service %s", name) - svc, err := clientset.CoreV1().Services(namespace).Create(context.TODO(), - &v1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"app.kubernetes.io/managed-by": "ods-pipeline"}, - }, - Spec: v1.ServiceSpec{ - ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeCluster, - Ports: []v1.ServicePort{ - { - Name: fmt.Sprintf("%d-%d", port, targetPort), - NodePort: port, - Port: port, - Protocol: v1.ProtocolTCP, - TargetPort: intstr.FromInt(int(targetPort)), - }, - }, - Selector: selectors, - SessionAffinity: v1.ServiceAffinityNone, - Type: v1.ServiceTypeNodePort, - }, - }, metav1.CreateOptions{}) - - return svc, err -} - -// ServiceHasReadyPods returns false if no pod is assigned to given service -// or if one or more pods are not "Running" -// or one or more of any pods containers are not "ready". -func ServiceHasReadyPods(clientset *kubernetes.Clientset, svc *v1.Service) (bool, string, error) { - podList, err := servicePods(clientset, svc) - if err != nil { - return false, "error", err - } - for _, pod := range podList.Items { - phase := pod.Status.Phase - if phase != "Running" { - return false, fmt.Sprintf("pod %s is in phase %+v", pod.Name, phase), nil - } - for _, containerStatus := range pod.Status.ContainerStatuses { - if !containerStatus.Ready { - return false, fmt.Sprintf("container %s in pod %s is not ready", containerStatus.Name, pod.Name), nil - } - } - } - return true, "ok", nil -} - -func servicePods(clientset *kubernetes.Clientset, svc *v1.Service) (*v1.PodList, error) { - podClient := clientset.CoreV1().Pods(svc.Namespace) - selector := []string{} - for key, value := range svc.Spec.Selector { - selector = append(selector, fmt.Sprintf("%s=%s", key, value)) - } - pods, err := podClient.List( - context.TODO(), - metav1.ListOptions{ - LabelSelector: strings.Join(selector, ","), - }, - ) - if err != nil { - return nil, err - } - return pods.DeepCopy(), nil -} diff --git a/internal/kubernetes/volumes.go b/internal/kubernetes/volumes.go index 0392d128..754f72bd 100644 --- a/internal/kubernetes/volumes.go +++ b/internal/kubernetes/volumes.go @@ -10,7 +10,7 @@ import ( "k8s.io/client-go/kubernetes" ) -func CreatePersistentVolume(clientset *kubernetes.Clientset, pvName string, capacity string, hostPath string, storageClassName string) (*v1.PersistentVolume, error) { +func CreatePersistentVolume(clientset kubernetes.Interface, pvName string, capacity string, hostPath string, storageClassName string) (*v1.PersistentVolume, error) { log.Printf("Create persistent volume %s", pvName) @@ -34,7 +34,7 @@ func CreatePersistentVolume(clientset *kubernetes.Clientset, pvName string, capa return pv, err } -func CreatePersistentVolumeClaim(clientset *kubernetes.Clientset, capacity string, storageClassName string, namespace string) (*v1.PersistentVolumeClaim, error) { +func CreatePersistentVolumeClaim(clientset kubernetes.Interface, capacity string, storageClassName string, namespace string) (*v1.PersistentVolumeClaim, error) { pvcName := "task-pv-claim" log.Printf("Create persistent volume claim %s", pvcName) diff --git a/internal/manager/pipeline.go b/internal/manager/pipeline.go index 37f2f595..482c9560 100644 --- a/internal/manager/pipeline.go +++ b/internal/manager/pipeline.go @@ -44,8 +44,6 @@ func createPipelineRun( tektonClient tektonClient.ClientPipelineRunInterface, ctxt context.Context, cfg PipelineConfig, - taskKind tekton.TaskKind, - taskSuffix string, needQueueing bool) (*tekton.PipelineRun, error) { pr := &tekton.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ @@ -57,7 +55,7 @@ func createPipelineRun( Kind: "PipelineRun", }, Spec: tekton.PipelineRunSpec{ - PipelineSpec: assemblePipelineSpec(cfg, taskKind, taskSuffix), + PipelineSpec: assemblePipelineSpec(cfg), Params: extractPipelineParams(cfg.Params), ServiceAccountName: "pipeline", // TODO PodTemplate: cfg.PipelineSpec.PodTemplate, @@ -127,11 +125,11 @@ func pipelineLabels(data PipelineConfig) map[string]string { } // assemblePipelineSpec returns a Tekton pipeline based on given PipelineConfig. -func assemblePipelineSpec(cfg PipelineConfig, taskKind tekton.TaskKind, taskSuffix string) *tekton.PipelineSpec { +func assemblePipelineSpec(cfg PipelineConfig) *tekton.PipelineSpec { var tasks []tekton.PipelineTask tasks = append(tasks, tekton.PipelineTask{ Name: "start", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-start" + taskSuffix}, + TaskRef: &tekton.TaskRef{Kind: tekton.NamespacedTaskKind, Name: "ods-pipeline-start"}, Params: startTaskParams(), Workspaces: tektonDefaultWorkspaceBindings(), }) @@ -151,7 +149,7 @@ func assemblePipelineSpec(cfg PipelineConfig, taskKind tekton.TaskKind, taskSuff finallyTasks := append([]tekton.PipelineTask{}, cfg.PipelineSpec.Finally...) finallyTasks = append(finallyTasks, tekton.PipelineTask{ Name: "finish", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-finish" + taskSuffix}, + TaskRef: &tekton.TaskRef{Kind: tekton.NamespacedTaskKind, Name: "ods-pipeline-finish"}, Workspaces: tektonDefaultWorkspaceBindings(), Params: finishTaskParams(), }) diff --git a/internal/manager/pipeline_test.go b/internal/manager/pipeline_test.go index b34e515c..3345114e 100644 --- a/internal/manager/pipeline_test.go +++ b/internal/manager/pipeline_test.go @@ -67,7 +67,7 @@ func TestCreatePipelineRun(t *testing.T) { PVC: "pvc", } t.Run("non-queued PR", func(t *testing.T) { - pr, err := createPipelineRun(tc, ctxt, pData, tekton.NamespacedTaskKind, "", false) + pr, err := createPipelineRun(tc, ctxt, pData, false) if err != nil { t.Fatal(err) } @@ -96,7 +96,7 @@ func TestCreatePipelineRun(t *testing.T) { }) t.Run("pending PR", func(t *testing.T) { - pr, err := createPipelineRun(tc, ctxt, pData, tekton.NamespacedTaskKind, "", true) + pr, err := createPipelineRun(tc, ctxt, pData, true) if err != nil { t.Fatal(err) } @@ -124,7 +124,7 @@ func TestCreatePipelineRun(t *testing.T) { }, }, } - pr, err := createPipelineRun(tc, ctxt, pData, tekton.NamespacedTaskKind, "", false) + pr, err := createPipelineRun(tc, ctxt, pData, false) if err != nil { t.Fatal(err) } @@ -137,7 +137,7 @@ func TestCreatePipelineRun(t *testing.T) { wantTasks := []tekton.PipelineTask{ { Name: "start", - TaskRef: &tekton.TaskRef{Kind: "Task", Name: "ods-start"}, + TaskRef: &tekton.TaskRef{Kind: "Task", Name: "ods-pipeline-start"}, Params: append(startTaskParams(), tektonStringParam("clone-depth", "5")), Workspaces: tektonDefaultWorkspaceBindings(), }, @@ -157,7 +157,7 @@ func TestCreatePipelineRun(t *testing.T) { wantFinallyTasks := []tekton.PipelineTask{ { Name: "finish", - TaskRef: &tekton.TaskRef{Kind: "Task", Name: "ods-finish"}, + TaskRef: &tekton.TaskRef{Kind: "Task", Name: "ods-pipeline-finish"}, Params: []tekton.Param{ tektonStringParam("pipeline-run-name", "$(context.pipelineRun.name)"), tektonStringParam("aggregate-tasks-status", "overriden"), @@ -173,7 +173,6 @@ func TestCreatePipelineRun(t *testing.T) { func TestAssemblePipeline(t *testing.T) { taskKind := tekton.NamespacedTaskKind - taskSuffix := "-latest" cfg := PipelineConfig{ PipelineInfo: PipelineInfo{ Project: "project", @@ -195,7 +194,7 @@ func TestAssemblePipeline(t *testing.T) { Tasks: []tekton.PipelineTask{ { Name: "build", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-build-go" + taskSuffix}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-pipeline-go-build"}, Workspaces: []tekton.WorkspacePipelineTaskBinding{ {Name: "source", Workspace: sharedWorkspaceName}, }, @@ -204,12 +203,12 @@ func TestAssemblePipeline(t *testing.T) { Finally: []tekton.PipelineTask{ { Name: "final", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "final" + taskSuffix}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "final"}, }, }, }, } - got := assemblePipelineSpec(cfg, taskKind, taskSuffix) + got := assemblePipelineSpec(cfg) want := &tekton.PipelineSpec{ Description: "", Params: []tekton.ParamSpec{ @@ -224,7 +223,7 @@ func TestAssemblePipeline(t *testing.T) { Tasks: []tekton.PipelineTask{ { Name: "start", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-start-latest"}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-pipeline-start"}, Params: []tekton.Param{ tektonStringParam("url", "$(params.git-repo-url)"), tektonStringParam("git-full-ref", "$(params.git-full-ref)"), @@ -239,7 +238,7 @@ func TestAssemblePipeline(t *testing.T) { { Name: "build", RunAfter: []string{"start"}, - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-build-go-latest"}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-pipeline-go-build"}, Params: nil, Workspaces: tektonDefaultWorkspaceBindings(), }, @@ -247,12 +246,12 @@ func TestAssemblePipeline(t *testing.T) { Finally: []tekton.PipelineTask{ { Name: "final", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "final-latest"}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "final"}, Params: nil, }, { Name: "finish", - TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-finish-latest"}, + TaskRef: &tekton.TaskRef{Kind: taskKind, Name: "ods-pipeline-finish"}, Params: []tekton.Param{ tektonStringParam("pipeline-run-name", "$(context.pipelineRun.name)"), tektonStringParam("aggregate-tasks-status", "$(tasks.status)"), @@ -320,7 +319,7 @@ func TestTasksRunAfterInjection(t *testing.T) { for name, tc := range tests { t.Run(name, func(t *testing.T) { cfg := PipelineConfig{PipelineSpec: config.Pipeline{Tasks: tc.cfgTasks}} - got := assemblePipelineSpec(cfg, tekton.NamespacedTaskKind, "") + got := assemblePipelineSpec(cfg) wantRunAfter := [][]string{} for _, task := range tc.want { wantRunAfter = append(wantRunAfter, task.RunAfter) diff --git a/internal/manager/schedule.go b/internal/manager/schedule.go index e8c8fc6f..611434a0 100644 --- a/internal/manager/schedule.go +++ b/internal/manager/schedule.go @@ -30,11 +30,6 @@ type Scheduler struct { TektonClient tektonClient.ClientInterface KubernetesClient kubernetesClient.ClientInterface Logger logging.LeveledLoggerInterface - // TaskKind is the Tekton resource kind for tasks. - // Either "ClusterTask" or "Task". - TaskKind tekton.TaskKind - // TaskSuffic is the suffix applied to tasks (version information). - TaskSuffix string StorageConfig StorageConfig } @@ -74,7 +69,7 @@ func (s *Scheduler) schedule(ctx context.Context, pData PipelineConfig) bool { s.Logger.Debugf("Found %d pipeline runs related to repository %s.", len(pipelineRuns.Items), pData.Repository) needQueueing := needsQueueing(pipelineRuns) s.Logger.Debugf("Creating run for pipeline %s (queued=%v) ...", pData.Component, needQueueing) - _, err = createPipelineRun(s.TektonClient, ctxt, pData, s.TaskKind, s.TaskSuffix, needQueueing) + _, err = createPipelineRun(s.TektonClient, ctxt, pData, needQueueing) if err != nil { s.Logger.Errorf(err.Error()) return false diff --git a/pkg/exchange/image.go b/pkg/exchange/image.go deleted file mode 100644 index 86f77024..00000000 --- a/pkg/exchange/image.go +++ /dev/null @@ -1,8 +0,0 @@ -package exchange - -// ImageDigest represants an image -type ImageDigest struct { - Name string - Tag string - Sha string -} diff --git a/pkg/exchange/test.go b/pkg/exchange/test.go deleted file mode 100644 index 0a302bdb..00000000 --- a/pkg/exchange/test.go +++ /dev/null @@ -1,42 +0,0 @@ -package exchange - -import "time" - -type PipelineInfo struct { - Time time.Time - URL string -} - -type ItemInfo struct { - Name string - Description string -} - -type TestReport struct { - Name string - Description string - Executor string - Time time.Time - Suites []TestSuite - Properties map[string]string // extension mechanims - // extra prop for linking to e.q. requirement(s)? -} - -type TestSuite struct { - Name string - Description string - Cases []TestCase - Duration time.Duration - Properties map[string]string // extension mechanims - // extra prop for linking to e.q. requirement(s)? -} - -type TestCase struct { - Name string - Description string - Result string // should be enum: Passed / Failed / Skipped - Message string - Duration time.Duration - Properties map[string]string // extension mechanims - // extra prop for linking to e.q. requirement(s)? -} diff --git a/pkg/odstasktest/workspace.go b/pkg/odstasktest/workspace.go index 4e83f378..486c2db9 100644 --- a/pkg/odstasktest/workspace.go +++ b/pkg/odstasktest/workspace.go @@ -8,6 +8,7 @@ import ( ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" ) +// GetSourceWorkspaceContext reads the ODS context from the source workspace. func GetSourceWorkspaceContext(t *testing.T, config *ttr.TaskRunConfig) (dir string, ctxt *pipelinectxt.ODSContext) { dir = config.WorkspaceConfigs["source"].Dir ctxt, err := pipelinectxt.NewFromCache(dir) diff --git a/pkg/sonar/client.go b/pkg/sonar/client.go deleted file mode 100644 index 0eb8421b..00000000 --- a/pkg/sonar/client.go +++ /dev/null @@ -1,123 +0,0 @@ -package sonar - -import ( - b64 "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "time" - - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -type ClientInterface interface { - Scan(sonarProject, branch, commit string, pr *PullRequest, outWriter, errWriter io.Writer) error - QualityGateGet(p QualityGateGetParams) (*QualityGate, error) - GenerateReports(sonarProject, author, branch, rootPath, artifactPrefix string) error - ExtractComputeEngineTaskID(filename string) (string, error) - ComputeEngineTaskGet(p ComputeEngineTaskGetParams) (*ComputeEngineTask, error) -} - -// Loosely based on https://github.com/brandur/wanikaniapi. -type Client struct { - httpClient *http.Client - clientConfig *ClientConfig - baseURL *url.URL -} - -type ClientConfig struct { - Timeout time.Duration - APIToken string - HTTPClient *http.Client - MaxRetries int - BaseURL string - ServerEdition string - TrustStore string - TrustStorePassword string - Debug bool - // Logger is the logger to send logging messages to. - Logger logging.LeveledLoggerInterface -} - -func NewClient(clientConfig *ClientConfig) (*Client, error) { - httpClient := clientConfig.HTTPClient - if httpClient == nil { - httpClient = &http.Client{} - } - if clientConfig.Timeout > 0 { - httpClient.Timeout = clientConfig.Timeout - } else { - httpClient.Timeout = 20 * time.Second - } - if clientConfig.Logger == nil { - clientConfig.Logger = &logging.LeveledLogger{Level: logging.LevelError} - } - if clientConfig.ServerEdition == "" { - clientConfig.ServerEdition = "community" - } - baseURL, err := url.Parse(clientConfig.BaseURL) - if err != nil { - return nil, fmt.Errorf("parse base URL: %w", err) - } - return &Client{ - httpClient: httpClient, - clientConfig: clientConfig, - baseURL: baseURL, - }, nil -} - -// ProjectKey returns the SonarQube project key for given context and artifact prefix. -// Monorepo support: separate projects in SonarQube. -// See https://community.sonarsource.com/t/monorepo-and-sonarqube/37990/3. -func ProjectKey(ctxt *pipelinectxt.ODSContext, artifactPrefix string) string { - sonarProject := fmt.Sprintf("%s-%s", ctxt.Project, ctxt.Component) - if len(artifactPrefix) > 0 { - sonarProject = fmt.Sprintf("%s-%s", sonarProject, strings.TrimSuffix(artifactPrefix, "-")) - } - return sonarProject -} - -func (c *Client) logger() logging.LeveledLoggerInterface { - return c.clientConfig.Logger -} - -func (c *Client) javaSystemProperties() []string { - return []string{ - fmt.Sprintf("-Djavax.net.ssl.trustStore=%s", c.clientConfig.TrustStore), - fmt.Sprintf("-Djavax.net.ssl.trustStorePassword=%s", c.clientConfig.TrustStorePassword), - } -} - -func (c *Client) get(urlPath string) (int, []byte, error) { - u, err := c.baseURL.Parse(urlPath) - if err != nil { - return 0, nil, fmt.Errorf("parse URL path: %w", err) - } - c.logger().Debugf("GET %s", u) - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - return 0, nil, fmt.Errorf("could not create request: %s", err) - } - - res, err := c.do(req) - if err != nil { - return 500, nil, fmt.Errorf("got error %s", err) - } - defer res.Body.Close() - - body, err := io.ReadAll(res.Body) - return res.StatusCode, body, err -} - -func (c *Client) do(req *http.Request) (*http.Response, error) { - // The user token is sent via the login field of HTTP basic authentication, - // without any password. See https://docs.sonarqube.org/latest/extend/web-api/. - credentials := fmt.Sprintf("%s:", c.clientConfig.APIToken) - basicAuth := b64.StdEncoding.EncodeToString([]byte(credentials)) - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", fmt.Sprintf("Basic %s", basicAuth)) - return c.httpClient.Do(req) -} diff --git a/pkg/sonar/client_test.go b/pkg/sonar/client_test.go deleted file mode 100644 index 936ed968..00000000 --- a/pkg/sonar/client_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package sonar - -import ( - "fmt" - "net/http" - "net/http/httptest" - "testing" -) - -func testClient(t *testing.T, baseURL string) *Client { - c, err := NewClient(&ClientConfig{BaseURL: baseURL}) - if err != nil { - t.Fatal(err) - } - return c -} - -func TestGetRequest(t *testing.T) { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, r.URL.Path) - })) - defer srv.Close() - tests := map[string]struct { - baseURL string - }{ - "base URL without trailing slash": { - baseURL: srv.URL, - }, - "base URL with trailing slash": { - baseURL: srv.URL + "/", - }, - } - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - bitbucketClient := testClient(t, tc.baseURL) - requestPath := "/foo" - code, out, err := bitbucketClient.get(requestPath) - if err != nil { - t.Fatal(err) - } - if code != 200 { - t.Fatal("expected 200") - } - if string(out) != requestPath { - t.Fatalf("expected %s, got: %s", requestPath, string(out)) - } - }) - } -} diff --git a/pkg/sonar/compute_engine.go b/pkg/sonar/compute_engine.go deleted file mode 100644 index bfc8d7a8..00000000 --- a/pkg/sonar/compute_engine.go +++ /dev/null @@ -1,61 +0,0 @@ -package sonar - -import ( - "encoding/json" - "fmt" -) - -const ( - TaskStatusInProgress = "IN_PROGRESS" - TaskStatusPending = "PENDING" - TaskStatusSuccess = "SUCCESS" - TaskStatusFailed = "FAILED" -) - -type ComputeEngineTask struct { - Organization string `json:"organization"` - ID string `json:"id"` - Type string `json:"type"` - ComponentID string `json:"componentId"` - ComponentKey string `json:"componentKey"` - ComponentName string `json:"componentName"` - ComponentQualifier string `json:"componentQualifier"` - AnalysisID string `json:"analysisId"` - Status string `json:"status"` - SubmittedAt string `json:"submittedAt"` - StartedAt string `json:"startedAt"` - ExecutedAt string `json:"executedAt"` - ExecutionTimeMs int `json:"executionTimeMs"` - ErrorMessage string `json:"errorMessage"` - Logs bool `json:"logs"` - HasErrorStacktrace bool `json:"hasErrorStacktrace"` - ErrorStacktrace string `json:"errorStacktrace"` - ScannerContext string `json:"scannerContext"` - HasScannerContext bool `json:"hasScannerContext"` -} - -type computeEngineTaskResponse struct { - Task *ComputeEngineTask `json:"task"` -} - -type ComputeEngineTaskGetParams struct { - AdditionalFields string `json:"additionalFields"` - ID string `json:"id"` -} - -func (c *Client) ComputeEngineTaskGet(p ComputeEngineTaskGetParams) (*ComputeEngineTask, error) { - urlPath := fmt.Sprintf("/api/ce/task?id=%s", p.ID) - statusCode, response, err := c.get(urlPath) - if err != nil { - return nil, fmt.Errorf("request returned err: %w", err) - } - if statusCode != 200 { - return nil, fmt.Errorf("request returned unexpected response code: %d, body: %s", statusCode, string(response)) - } - var cetr *computeEngineTaskResponse - err = json.Unmarshal(response, &cetr) - if err != nil { - return nil, fmt.Errorf("could not unmarshal response: %w", err) - } - return cetr.Task, nil -} diff --git a/pkg/sonar/compute_engine_test.go b/pkg/sonar/compute_engine_test.go deleted file mode 100644 index 838828df..00000000 --- a/pkg/sonar/compute_engine_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package sonar - -import ( - "testing" - - "github.com/opendevstack/ods-pipeline/test/testserver" -) - -func TestComputeEngineTaskGet(t *testing.T) { - - srv, cleanup := testserver.NewTestServer(t) - defer cleanup() - c := testClient(t, srv.Server.URL) - - tests := map[string]struct { - Fixture string - WantStatus string - }{ - "FAILED status": { - Fixture: "sonar/task_failed.json", - WantStatus: TaskStatusFailed, - }, - "SUCCESS status": { - Fixture: "sonar/task_success.json", - WantStatus: TaskStatusSuccess, - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - srv.EnqueueResponse( - t, "/api/ce/task", - 200, tc.Fixture, - ) - taskID := "AVAn5RKqYwETbXvgas-I" - got, err := c.ComputeEngineTaskGet(ComputeEngineTaskGetParams{ID: taskID}) - if err != nil { - t.Fatalf("Unexpected error on request: %s", err) - } - - // check extracted status matches - if got.Status != tc.WantStatus { - t.Fatalf("want %s, got %s", tc.WantStatus, got.Status) - } - - // check sent task ID matches - lr, err := srv.LastRequest() - if err != nil { - t.Fatal(err) - } - q := lr.URL.Query() - if q.Get("id") != taskID { - t.Fatalf("want %s, got %s", taskID, q.Get("id")) - } - }) - } -} diff --git a/pkg/sonar/quality_gate.go b/pkg/sonar/quality_gate.go deleted file mode 100644 index 575a7309..00000000 --- a/pkg/sonar/quality_gate.go +++ /dev/null @@ -1,68 +0,0 @@ -package sonar - -import ( - "encoding/json" - "fmt" -) - -const ( - QualityGateStatusOk = "OK" - QualityGateStatusWarn = "WARN" - QualityGateStatusError = "ERROR" - QualityGateStatusNone = "NONE" -) - -type QualityGate struct { - ProjectStatus QualityGateProjectStatus `json:"projectStatus"` -} - -type QualityGateProjectStatus struct { - Status string `json:"status"` - IgnoredConditions bool `json:"ignoredConditions"` - Conditions []QualityGateCondition `json:"conditions"` - Periods []QualityGatePeriod `json:"periods"` -} - -type QualityGateCondition struct { - Status string `json:"status"` - MetricKey string `json:"metricKey"` - Comparator string `json:"comparator"` - PeriodIndex int `json:"periodIndex"` - ErrorThreshold string `json:"errorThreshold,omitempty"` - ActualValue string `json:"actualValue"` -} - -type QualityGatePeriod struct { - Index int `json:"index"` - Mode string `json:"mode"` - Date string `json:"date"` - Parameter string `json:"parameter"` -} - -type QualityGateGetParams struct { - ProjectKey string - Branch string - PullRequest string -} - -func (c *Client) QualityGateGet(p QualityGateGetParams) (*QualityGate, error) { - urlPath := "/api/qualitygates/project_status?projectKey=" + p.ProjectKey - if p.PullRequest != "" && p.PullRequest != "0" { - urlPath = urlPath + "&pullRequest=" + p.PullRequest - } else if p.Branch != "" { - urlPath = urlPath + "&branch=" + p.Branch - } - statusCode, response, err := c.get(urlPath) - if err != nil { - return &QualityGate{ProjectStatus: QualityGateProjectStatus{Status: QualityGateStatusNone}}, nil - } - if statusCode != 200 { - return nil, fmt.Errorf("request returned unexpected response code: %d, body: %s", statusCode, string(response)) - } - var qg *QualityGate - err = json.Unmarshal(response, &qg) - if err != nil { - return qg, err - } - return qg, nil -} diff --git a/pkg/sonar/quality_gate_test.go b/pkg/sonar/quality_gate_test.go deleted file mode 100644 index 0677321d..00000000 --- a/pkg/sonar/quality_gate_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package sonar - -import ( - "testing" - - "github.com/opendevstack/ods-pipeline/test/testserver" -) - -func TestQualityGateGet(t *testing.T) { - - srv, cleanup := testserver.NewTestServer(t) - defer cleanup() - c := testClient(t, srv.Server.URL) - - tests := map[string]struct { - responseFixture string - params QualityGateGetParams - wantRequestURI string - wantStatus string - }{ - "ERROR status": { - params: QualityGateGetParams{ProjectKey: "foo"}, - responseFixture: "sonar/project_status_error.json", - wantRequestURI: "/api/qualitygates/project_status?projectKey=foo", - wantStatus: "ERROR", - }, - "OK status": { - params: QualityGateGetParams{ProjectKey: "foo"}, - responseFixture: "sonar/project_status_ok.json", - wantRequestURI: "/api/qualitygates/project_status?projectKey=foo", - wantStatus: "OK", - }, - "OK status for branch": { - params: QualityGateGetParams{ProjectKey: "foo", Branch: "bar"}, - responseFixture: "sonar/project_status_ok.json", - wantRequestURI: "/api/qualitygates/project_status?projectKey=foo&branch=bar", - wantStatus: "OK", - }, - "OK status for branch (PR=0)": { - params: QualityGateGetParams{ProjectKey: "foo", Branch: "bar", PullRequest: "0"}, - responseFixture: "sonar/project_status_ok.json", - wantRequestURI: "/api/qualitygates/project_status?projectKey=foo&branch=bar", - wantStatus: "OK", - }, - "OK status for PR": { - params: QualityGateGetParams{ProjectKey: "foo", PullRequest: "123"}, - responseFixture: "sonar/project_status_ok.json", - wantRequestURI: "/api/qualitygates/project_status?projectKey=foo&pullRequest=123", - wantStatus: "OK", - }, - } - - for name, tc := range tests { - t.Run(name, func(t *testing.T) { - srv.EnqueueResponse( - t, "/api/qualitygates/project_status", - 200, tc.responseFixture, - ) - got, err := c.QualityGateGet(tc.params) - if err != nil { - t.Fatalf("Unexpected error on request: %s", err) - } - if got.ProjectStatus.Status != tc.wantStatus { - t.Fatalf("want %s, got %s", tc.wantStatus, got.ProjectStatus.Status) - } - req, err := srv.LastRequest() - if err != nil { - t.Fatal(err) - } - if req.URL.RequestURI() != tc.wantRequestURI { - t.Fatalf("want request URI %s, got %s", tc.wantRequestURI, req.URL.RequestURI()) - } - }) - } -} diff --git a/pkg/sonar/report.go b/pkg/sonar/report.go deleted file mode 100644 index 5328b74a..00000000 --- a/pkg/sonar/report.go +++ /dev/null @@ -1,75 +0,0 @@ -package sonar - -import ( - "fmt" - "path/filepath" - "time" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/file" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" -) - -// GenerateReports generates SonarQube reports using cnesreport. -// See https://github.com/cnescatlab/sonar-cnes-report. -func (c *Client) GenerateReports(sonarProject, author, branch, rootPath, artifactPrefix string) error { - reportParams := append( - c.javaSystemProperties(), - "-jar", "/usr/local/cnes/cnesreport.jar", - "-s", c.clientConfig.BaseURL, - "-t", c.clientConfig.APIToken, - "-p", sonarProject, - "-a", author, - branch, - ) - stdout, stderr, err := command.RunBuffered("java", reportParams) - if err != nil { - return fmt.Errorf( - "report generation failed: %w, stderr: %s, stdout: %s", - err, string(stderr), string(stdout), - ) - } - - artifactsPath := filepath.Join(rootPath, pipelinectxt.SonarAnalysisPath) - err = copyReportFiles(sonarProject, artifactsPath, artifactPrefix) - if err != nil { - return fmt.Errorf("copying report to artifacts failed: %w", err) - } - - return nil -} - -func copyReportFiles(project, destinationDir, artifactPrefix string) error { - analysisReportFile := fmt.Sprintf( - "%s-%s-analysis-report.md", - currentDate(), - project, - ) - err := file.Copy( - analysisReportFile, - filepath.Join(destinationDir, artifactPrefix+"analysis-report.md"), - ) - if err != nil { - return fmt.Errorf("copying %s failed: %w", analysisReportFile, err) - } - - issuesReportFile := fmt.Sprintf( - "%s-%s-issues-report.csv", - currentDate(), - project, - ) - err = file.Copy( - issuesReportFile, - filepath.Join(destinationDir, artifactPrefix+"issues-report.csv"), - ) - if err != nil { - return fmt.Errorf("copying %s failed: %w", issuesReportFile, err) - } - return nil -} - -// currentDate returns the current date as YYYY-MM-DD -func currentDate() string { - currentTime := time.Now() - return currentTime.Format("2006-01-02") -} diff --git a/pkg/sonar/scan.go b/pkg/sonar/scan.go deleted file mode 100644 index 988de1ee..00000000 --- a/pkg/sonar/scan.go +++ /dev/null @@ -1,111 +0,0 @@ -package sonar - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" - - "github.com/opendevstack/ods-pipeline/internal/command" -) - -type PullRequest struct { - Key string - Branch string - Base string -} - -// Scan report -type ReportTask struct { - ProjectKey string - ServerUrl string - ServerVersion string - Branch string - DashboardUrl string - CeTaskId string - CeTaskUrl string -} - -const ( - ScannerworkDir = ".scannerwork" - ReportTaskFilename = "report-task.txt" - ReportTaskFile = ScannerworkDir + "/" + ReportTaskFilename -) - -// Scan scans the source code and uploads the analysis to given SonarQube project. -// If pr is non-nil, information for pull request decoration is sent. -func (c *Client) Scan(sonarProject, branch, commit string, pr *PullRequest, outWriter, errWriter io.Writer) error { - scannerParams := []string{ - fmt.Sprintf("-Dsonar.host.url=%s", c.clientConfig.BaseURL), - "-Dsonar.scm.provider=git", - fmt.Sprintf("-Dsonar.projectKey=%s", sonarProject), - fmt.Sprintf("-Dsonar.projectName=%s", sonarProject), - fmt.Sprintf("-Dsonar.projectVersion=%s", commit), - } - if c.clientConfig.Debug { - scannerParams = append(scannerParams, "-X") - } - // Both Branch Analysis and Pull Request Analysis are only available - // starting in Developer Edition, see - // https://docs.sonarqube.org/latest/branches/overview/ and - // https://docs.sonarqube.org/latest/analysis/pull-request/. - if c.clientConfig.ServerEdition != "community" { - if pr != nil { - scannerParams = append( - scannerParams, - fmt.Sprintf("-Dsonar.pullrequest.key=%s", pr.Key), - fmt.Sprintf("-Dsonar.pullrequest.branch=%s", pr.Branch), - fmt.Sprintf("-Dsonar.pullrequest.base=%s", pr.Base), - ) - } else { - scannerParams = append(scannerParams, fmt.Sprintf("-Dsonar.branch.name=%s", branch)) - } - } - - c.logger().Debugf("Scan params: %v", scannerParams) - // The authentication token of a SonarQube user with "Execute Analysis" - // permission on the project is passed as "sonar.login" for authentication, - // see https://docs.sonarqube.org/latest/analysis/analysis-parameters/. - scannerParams = append(scannerParams, fmt.Sprintf("-Dsonar.login=%s", c.clientConfig.APIToken)) - - return command.Run( - "sonar-scanner", scannerParams, - []string{fmt.Sprintf("SONAR_SCANNER_OPTS=%s", strings.Join(c.javaSystemProperties(), " "))}, - outWriter, errWriter, - ) -} - -/* -Example of the file located in .scannerwork/report-task.txt: - - projectKey=XXXX-python - serverUrl=https://sonarqube-ods.XXXX.com - serverVersion=8.2.0.32929 - branch=dummy - dashboardUrl=https://sonarqube-ods.XXXX.com/dashboard?id=XXXX-python&branch=dummy - ceTaskId=AXxaAoUSsjAMlIY9kNmn - ceTaskUrl=https://sonarqube-ods.XXXX.com/api/ce/task?id=AXxaAoUSsjAMlIY9kNmn -*/ -func (c *Client) ExtractComputeEngineTaskID(filename string) (string, error) { - file, err := os.Open(filename) - if err != nil { - return "", err - } - defer file.Close() - - taskIDPrefix := "ceTaskId=" - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if strings.HasPrefix(line, taskIDPrefix) { - return strings.TrimPrefix(line, taskIDPrefix), nil - } - } - - if err := scanner.Err(); err != nil { - return "", err - } - - return "", fmt.Errorf("properties file %s does not contain %s", filename, taskIDPrefix) -} diff --git a/pkg/sonar/scan_test.go b/pkg/sonar/scan_test.go deleted file mode 100644 index 629f543f..00000000 --- a/pkg/sonar/scan_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package sonar - -import ( - "path/filepath" - "testing" - - "github.com/opendevstack/ods-pipeline/internal/projectpath" -) - -func TestExtractComputeEngineTaskID(t *testing.T) { - - c := testClient(t, "") - want := "AVAn5RKqYwETbXvgas-I" - fixture := filepath.Join(projectpath.Root, "test/testdata/fixtures/sonar", ReportTaskFilename) - got, err := c.ExtractComputeEngineTaskID(fixture) - if err != nil { - t.Fatal(err) - } - - // check extracted status matches - if got != want { - t.Fatalf("want %s, got %s", want, got) - } -} diff --git a/pkg/tasktesting/bitbucket.go b/pkg/tasktesting/bitbucket.go index d1f23839..5bc900e0 100644 --- a/pkg/tasktesting/bitbucket.go +++ b/pkg/tasktesting/bitbucket.go @@ -19,7 +19,7 @@ const ( ) // BitbucketClientOrFatal returns a Bitbucket client, configured based on ConfigMap/Secret in the given namespace. -func BitbucketClientOrFatal(t *testing.T, c *kclient.Clientset, namespace string, privateCert bool) *bitbucket.Client { +func BitbucketClientOrFatal(t *testing.T, c kclient.Interface, namespace string, privateCert bool) *bitbucket.Client { var privateCertPath string if privateCert { privateCertPath = filepath.Join(projectpath.Root, PrivateCertFile) diff --git a/pkg/tasktesting/git.go b/pkg/tasktesting/git.go index 25ec1179..0ced4cf5 100644 --- a/pkg/tasktesting/git.go +++ b/pkg/tasktesting/git.go @@ -74,7 +74,7 @@ func RemoveAll(t *testing.T, path ...string) { } // SetupBitbucketRepo initializes a Git repo, commits, pushes to Bitbucket and writes the result to the .ods cache. -func SetupBitbucketRepo(t *testing.T, c *kclient.Clientset, ns, wsDir, projectKey string, privateCert bool) *pipelinectxt.ODSContext { +func SetupBitbucketRepo(t *testing.T, c kclient.Interface, ns, wsDir, projectKey string, privateCert bool) *pipelinectxt.ODSContext { initAndCommitOrFatal(t, wsDir) originURL := pushToBitbucketOrFatal(t, c, ns, wsDir, projectKey, privateCert) @@ -150,7 +150,7 @@ func PushFileToBitbucketOrFatal(t *testing.T, c *kclient.Clientset, ns, wsDir, b } } -func pushToBitbucketOrFatal(t *testing.T, c *kclient.Clientset, ns, wsDir, projectKey string, privateCert bool) string { +func pushToBitbucketOrFatal(t *testing.T, c kclient.Interface, ns, wsDir, projectKey string, privateCert bool) string { repoName := filepath.Base(wsDir) bbURL := "http://localhost:7990" bbToken, err := kubernetes.GetSecretKey(c, ns, "ods-bitbucket-auth", "password") diff --git a/pkg/tasktesting/nexus.go b/pkg/tasktesting/nexus.go index 502c9d83..99a65556 100644 --- a/pkg/tasktesting/nexus.go +++ b/pkg/tasktesting/nexus.go @@ -18,7 +18,7 @@ const ( ) // NexusClientOrFatal returns a Nexus client, configured based on ConfigMap/Secret in the given namespace. -func NexusClientOrFatal(t *testing.T, c *kclient.Clientset, namespace string, privateCert bool) *nexus.Client { +func NexusClientOrFatal(t *testing.T, c kclient.Interface, namespace string, privateCert bool) *nexus.Client { ncc, err := installation.NewNexusClientConfig( c, namespace, &logging.LeveledLogger{Level: logging.LevelDebug}, ) diff --git a/pkg/tektontaskrun/cluster.go b/pkg/tektontaskrun/cluster.go index 20be2a5d..91066ab2 100644 --- a/pkg/tektontaskrun/cluster.go +++ b/pkg/tektontaskrun/cluster.go @@ -20,6 +20,7 @@ const ( KinDMountHostPath = "/tmp/ods-pipeline/kind-mount" KinDMountContainerPath = "/files" KinDRegistry = "localhost:5000" + KinDName = "ods-pipeline" ) var recreateClusterFlag = flag.Bool("ods-recreate-cluster", false, "Whether to remove and recreate the KinD cluster named 'ods-pipeline'") diff --git a/pkg/tektontaskrun/namespace_opt.go b/pkg/tektontaskrun/namespace_opt.go index 3f626474..23e21e70 100644 --- a/pkg/tektontaskrun/namespace_opt.go +++ b/pkg/tektontaskrun/namespace_opt.go @@ -73,7 +73,7 @@ func initNamespaceAndPVC(cc *ClusterConfig, nc *NamespaceConfig) (cleanup func() }, nil } -func createTempNamespace(clientset *kubernetes.Clientset, name string) (namespace *corev1.Namespace, cleanup func(), err error) { +func createTempNamespace(clientset kubernetes.Interface, name string) (namespace *corev1.Namespace, cleanup func(), err error) { namespace, err = clientset.CoreV1().Namespaces().Create( context.TODO(), &corev1.Namespace{ @@ -84,6 +84,7 @@ func createTempNamespace(clientset *kubernetes.Clientset, name string) (namespac metav1.CreateOptions{}, ) return namespace, func() { + log.Printf("Removing temporary namespace %q ...", name) err := removeNamespace(clientset, name) if err != nil { log.Println(err) @@ -91,7 +92,7 @@ func createTempNamespace(clientset *kubernetes.Clientset, name string) (namespac }, err } -func createTempPVC(clientset *kubernetes.Clientset, cc *ClusterConfig, name string) (pvc *corev1.PersistentVolumeClaim, cleanup func(), err error) { +func createTempPVC(clientset kubernetes.Interface, cc *ClusterConfig, name string) (pvc *corev1.PersistentVolumeClaim, cleanup func(), err error) { _, err = k.CreatePersistentVolume( clientset, name, @@ -120,10 +121,10 @@ func createTempPVC(clientset *kubernetes.Clientset, cc *ClusterConfig, name stri }, err } -func removeNamespace(clientset *kubernetes.Clientset, name string) error { +func removeNamespace(clientset kubernetes.Interface, name string) error { return clientset.CoreV1().Namespaces().Delete(context.Background(), name, metav1.DeleteOptions{}) } -func removePVC(clientset *kubernetes.Clientset, name string) error { +func removePVC(clientset kubernetes.Interface, name string) error { return clientset.CoreV1().PersistentVolumes().Delete(context.Background(), name, metav1.DeleteOptions{}) } diff --git a/pkg/tektontaskrun/taskrun.go b/pkg/tektontaskrun/taskrun.go index 6726c89f..a681ac8e 100644 --- a/pkg/tektontaskrun/taskrun.go +++ b/pkg/tektontaskrun/taskrun.go @@ -20,17 +20,29 @@ import ( "knative.dev/pkg/apis" ) -func runTask(tc *TaskRunConfig) (*tekton.TaskRun, error) { +func TektonParamsFromStringParams(stringParams map[string]string) []tekton.Param { + var params []tekton.Param + for k, v := range stringParams { + tp := tekton.Param{Name: k, Value: tekton.ParamValue{ + Type: tekton.ParamTypeString, + StringVal: v, + }} + params = append(params, tp) + } + return params +} + +func runTask(tc *TaskRunConfig) (*tekton.TaskRun, bytes.Buffer, error) { clients := k.NewClients() tr, err := createTaskRunWithParams(clients.TektonClientSet, tc) if err != nil { - return nil, err + return nil, bytes.Buffer{}, err } // TODO: if last output is short, it may be omitted from the logs. - taskRun, _, err := watchTaskRunUntilDone(clients, tc, tr) + taskRun, logsBuffer, err := watchTaskRunUntilDone(clients, tc, tr) if err != nil { - return nil, err + return nil, logsBuffer, err } log.Printf( @@ -39,7 +51,7 @@ func runTask(tc *TaskRunConfig) (*tekton.TaskRun, error) { taskRun.Status.GetCondition(apis.ConditionSucceeded).GetMessage(), ) - return taskRun, nil + return taskRun, logsBuffer, nil } func createTaskRunWithParams(tknClient *pipelineclientset.Clientset, tc *TaskRunConfig) (*tekton.TaskRun, error) { diff --git a/pkg/tektontaskrun/taskrun_opt.go b/pkg/tektontaskrun/taskrun_opt.go index ed5ff5f8..c82ca1e9 100644 --- a/pkg/tektontaskrun/taskrun_opt.go +++ b/pkg/tektontaskrun/taskrun_opt.go @@ -1,6 +1,7 @@ package tektontaskrun import ( + "bytes" "errors" "log" "os" @@ -25,7 +26,7 @@ type TaskRunConfig struct { Namespace string ServiceAccountName string Timeout time.Duration - AfterRunFunc func(config *TaskRunConfig, taskRun *tekton.TaskRun) + AfterRunFunc func(config *TaskRunConfig, taskRun *tekton.TaskRun, logs bytes.Buffer) CleanupFuncs []func() NamespaceConfig *NamespaceConfig WorkspaceConfigs map[string]*WorkspaceConfig @@ -57,7 +58,7 @@ func RunTask(opts ...TaskRunOpt) error { cleanupOnInterrupt(trc.Cleanup) defer trc.Cleanup() - taskRun, err := runTask(trc) + taskRun, logsBuffer, err := runTask(trc) if err != nil { return err } @@ -67,7 +68,7 @@ func RunTask(opts ...TaskRunOpt) error { } if trc.AfterRunFunc != nil { - trc.AfterRunFunc(trc, taskRun) + trc.AfterRunFunc(trc, taskRun, logsBuffer) } return err @@ -163,13 +164,7 @@ func WithParams(params ...tekton.Param) TaskRunOpt { // simple parameters compares to WithParams. func WithStringParams(params map[string]string) TaskRunOpt { return func(c *TaskRunConfig) error { - for k, v := range params { - tp := tekton.Param{Name: k, Value: tekton.ParamValue{ - Type: tekton.ParamTypeString, - StringVal: v, - }} - c.Params = append(c.Params, tp) - } + c.Params = append(c.Params, TektonParamsFromStringParams(params)...) return nil } } @@ -187,7 +182,7 @@ func ExpectFailure() TaskRunOpt { // AfterRun registers a function which is run after the task run completes. // The function will receive the task run configuration, as well as an instance // of the TaskRun. -func AfterRun(f func(c *TaskRunConfig, r *tekton.TaskRun)) TaskRunOpt { +func AfterRun(f func(c *TaskRunConfig, r *tekton.TaskRun, l bytes.Buffer)) TaskRunOpt { return func(c *TaskRunConfig) error { c.AfterRunFunc = f return nil diff --git a/scripts/install-inside-kind.sh b/scripts/install-inside-kind.sh index 3000585a..308b46ea 100755 --- a/scripts/install-inside-kind.sh +++ b/scripts/install-inside-kind.sh @@ -5,12 +5,11 @@ script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ods_pipeline_dir=${script_dir%/*} kind_deploy_path="/tmp/ods-pipeline/kind-deploy" kind_values_dir="/tmp/ods-pipeline/kind-values" -helm_generated_values_file="${kind_deploy_path}/ods-pipeline/values.generated.yaml" +helm_generated_values_file="${kind_deploy_path}/chart/values.generated.yaml" url_suffix="http" bitbucket_auth="unavailable" nexus_auth="unavailable:unavailable" -sonar_auth="unavailable" if [ "$#" -gt 0 ]; then case $1 in @@ -29,36 +28,24 @@ fi if [ -f "${kind_values_dir}/nexus-auth" ]; then nexus_auth=$(cat "${kind_values_dir}/nexus-auth") fi -if [ -f "${kind_values_dir}/sonar-auth" ]; then - sonar_auth=$(cat "${kind_values_dir}/sonar-auth") -fi -if [ ! -e "${helm_generated_values_file}" ]; then - echo "setup:" > "${helm_generated_values_file}" -fi +touch "${helm_generated_values_file}" if [ -f "${kind_values_dir}/bitbucket-${url_suffix}" ]; then bitbucket_url=$(cat "${kind_values_dir}/bitbucket-${url_suffix}") - echo " bitbucketUrl: '${bitbucket_url}'" >> "${helm_generated_values_file}" + echo "bitbucketUrl: '${bitbucket_url}'" >> "${helm_generated_values_file}" fi if [ -f "${kind_values_dir}/nexus-${url_suffix}" ]; then nexus_url=$(cat "${kind_values_dir}/nexus-${url_suffix}") - echo " nexusUrl: '${nexus_url}'" >> "${helm_generated_values_file}" -fi -if [ -f "${kind_values_dir}/sonar-${url_suffix}" ]; then - sonar_url=$(cat "${kind_values_dir}/sonar-${url_suffix}") - echo " sonarUrl: '${sonar_url}'" >> "${helm_generated_values_file}" + echo "nexusUrl: '${nexus_url}'" >> "${helm_generated_values_file}" fi -values_arg="${kind_deploy_path}/ods-pipeline/values.kind.yaml" +values_arg="${kind_deploy_path}/chart/values.kind.yaml" if [ "$(cat "${helm_generated_values_file}")" != "setup:" ]; then values_arg="${values_arg},${helm_generated_values_file}" fi cd "${kind_deploy_path}" bash ./install.sh \ - --aqua-auth "unavailable:unavailable" \ - --aqua-scanner-url "none" \ --bitbucket-auth "${bitbucket_auth}" \ --nexus-auth "${nexus_auth}" \ - --sonar-auth "${sonar_auth}" \ -f "${values_arg}" "$@" diff --git a/scripts/run-nexus.sh b/scripts/run-nexus.sh index 5306eca3..1232e00a 100755 --- a/scripts/run-nexus.sh +++ b/scripts/run-nexus.sh @@ -48,7 +48,7 @@ docker build -t ${IMAGE_NAME} -f "Dockerfile.$(uname -m)" "${DOCKER_CONTEXT_DIR} cd - &> /dev/null docker run -d -p "${HOST_HTTP_PORT}:8081" --net kind --name ${CONTAINER_NAME} ${IMAGE_NAME} -if ! bash "${SCRIPT_DIR}/waitfor-nexus.sh" ; then +if ! bash "${SCRIPT_DIR}"/waitfor-nexus.sh ; then docker logs ${CONTAINER_NAME} exit 1 fi diff --git a/scripts/waitfor-bitbucket.sh b/scripts/waitfor-bitbucket.sh index c193d2f6..0debb250 100755 --- a/scripts/waitfor-bitbucket.sh +++ b/scripts/waitfor-bitbucket.sh @@ -29,7 +29,7 @@ until [ $n -ge 30 ]; do break else echo -n "." - sleep 10 + sleep 5 n=$((n+1)) fi done diff --git a/scripts/waitfor-nexus.sh b/scripts/waitfor-nexus.sh index 462de568..cdb3cf80 100755 --- a/scripts/waitfor-nexus.sh +++ b/scripts/waitfor-nexus.sh @@ -28,7 +28,7 @@ function waitForReady { break else echo -n "." - sleep 10 + sleep 5 n=$((n+1)) fi done diff --git a/scripts/waitfor-sonarqube.sh b/scripts/waitfor-sonarqube.sh index 8b0c431a..ea95e293 100755 --- a/scripts/waitfor-sonarqube.sh +++ b/scripts/waitfor-sonarqube.sh @@ -30,7 +30,7 @@ until [ $n -ge 30 ]; do break else echo -n "." - sleep 10 + sleep 5 n=$((n+1)) fi done diff --git a/tasks/ods-build-go.yaml b/tasks/ods-build-go.yaml deleted file mode 100644 index 2d71beb1..00000000 --- a/tasks/ods-build-go.yaml +++ /dev/null @@ -1,193 +0,0 @@ - -# Source: tasks/templates/task-ods-build-go.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-build-go' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Go (module) applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-build-go.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: enable-cgo - description: Whether to enable CGO. When not enabled the build will set `CGO_ENABLED=0`. - type: string - default: "false" - - name: go-os - description: "`GOOS` variable (the execution operating system such as `linux`, `windows`)." - type: string - default: "linux" - - name: go-arch - description: "`GOARCH` variable (the execution architecture such as `arm`, `amd64`)." - type: string - default: "amd64" - - name: output-dir - description: >- - Path to the directory into which the resulting Go binary should be copied, relative to `working-dir`. - This directory may then later be used as Docker context for example. - type: string - default: docker - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-go.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-go" - - name: pre-test-script - description: Script to execute before running tests, relative to the working directory. - type: string - default: "" - - name: sonar-quality-gate - description: Whether the SonarQube quality gate needs to pass for the task to succeed. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - steps: - - name: build-go-binary - # Image is built from build/package/Dockerfile.go-toolset. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-go-toolset:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=go-$(params.go-os)-$(params.go-arch) - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.output-dir) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-go.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --enable-cgo=$(params.enable-cgo) \ - --go-os=$(params.go-os) \ - --go-arch=$(params.go-arch) \ - --pre-test-script=$(params.pre-test-script) \ - --output-dir=$(params.output-dir) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.output-dir) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-sonar:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - - truststore="${JAVA_HOME}/lib/security/cacerts" - if [ -f /etc/ssl/certs/private-cert.pem ]; then - truststore="$(pwd)/.ods-cache/truststore/cacerts" - configure-truststore --dest-store "${truststore}" - fi - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) \ - -truststore "${truststore}" - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-build-gradle.yaml b/tasks/ods-build-gradle.yaml deleted file mode 100644 index bac13d9d..00000000 --- a/tasks/ods-build-gradle.yaml +++ /dev/null @@ -1,222 +0,0 @@ - -# Source: tasks/templates/task-ods-build-gradle.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-build-gradle' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Gradle applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-build-gradle.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: gradle-additional-tasks - description: >- - Additional gradle tasks to be passed to the gradle build. (default tasks called are `clean` and `build`). - type: string - default: "" - - name: gradle-options - description: >- - Options to be passed to the gradle build. - (See ref: https://docs.gradle.org/7.4.2/userguide/command_line_interface.html#sec:command_line_debugging) - type: string - default: "--no-daemon --stacktrace" - - name: gradle-opts-env - description: >- - Will be exposed to the build via `GRADLE_OPTS` environment variable. - Specifies JVM arguments to use when starting the Gradle client VM. The client VM only handles command line input/output, so it is rare that one would need to change its VM options. - You can still use this to change the settings for the Gradle daemon which runs the actual build by setting the according Gradle properties by `-D`. - If you want to set the JVM arguments for the actual build you would do this via `-Dorg.gradle.jvmargs=-Xmx1024M` - (See ref: https://docs.gradle.org/7.4.2/userguide/build_environment.html#sec:gradle_configuration_properties). - type: string - default: "-Dorg.gradle.jvmargs=-Xmx512M" - - name: output-dir - description: >- - Path to the directory into which the resulting Java application jar should be copied, relative to `working-dir`. - This directory may then later be used as Docker context for example. - type: string - default: docker - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: cached-outputs - description: >- - List of build output directories (as colon separated string) to be cached. - These directories are relative to `working-dir`. - type: string - default: "docker" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-gradle.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-gradle" - - name: gradle-build-dir - description: >- - Path to the directory into which Gradle publishes its build. - type: string - default: build - - name: sonar-quality-gate - description: Whether the SonarQube quality gate needs to pass for the task to succeed. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - steps: - - name: build-gradle-binary - # Image is built from build/package/Dockerfile.gradle-toolset. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-gradle-toolset:0.13.2' - env: - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: GRADLE_OPTS - value: "$(params.gradle-opts-env)" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - resources: - {} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=gradle - if copy-build-if-cached \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-gradle.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --output-dir=$(params.output-dir) \ - --gradle-build-dir=$(params.gradle-build-dir) \ - --gradle-additional-tasks="$(params.gradle-additional-tasks)" \ - --gradle-options="$(params.gradle-options)" - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-sonar:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - - truststore="${JAVA_HOME}/lib/security/cacerts" - if [ -f /etc/ssl/certs/private-cert.pem ]; then - truststore="$(pwd)/.ods-cache/truststore/cacerts" - configure-truststore --dest-store "${truststore}" - fi - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) \ - -truststore "${truststore}" - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-build-npm.yaml b/tasks/ods-build-npm.yaml deleted file mode 100644 index 23851a65..00000000 --- a/tasks/ods-build-npm.yaml +++ /dev/null @@ -1,193 +0,0 @@ - -# Source: tasks/templates/task-ods-build-npm.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-build-npm' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Node.js applications using npm. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-build-npm.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: cached-outputs - description: >- - List of build output directories (as colon separated string) to be cached. - These directories are relative to the `working-dir` parameter` - Common build directories are `dist` (default), `build` and `public`. - If empty this could mean that the original sources are being used as build output and no caching of built files are needed. Nonetheless build skipping can still be remain enabled. - type: string - default: "dist" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-npm.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-npm" - - name: sonar-quality-gate - description: Whether quality gate needs to pass. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip the SonarQube analysis or not. - type: string - default: "false" - - name: node-version - description: "Node.js version to use - supported versions: 16, 18" - type: string - default: "18" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - steps: - - name: build-npm - # Image is built from build/package/Dockerfile.node-npm-toolset. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-node$(params.node-version)-npm-toolset:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=npm - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-npm.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cached-outputs=$(params.cached-outputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-sonar:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - - truststore="${JAVA_HOME}/lib/security/cacerts" - if [ -f /etc/ssl/certs/private-cert.pem ]; then - truststore="$(pwd)/.ods-cache/truststore/cacerts" - configure-truststore --dest-store "${truststore}" - fi - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) \ - -truststore "${truststore}" - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-build-python.yaml b/tasks/ods-build-python.yaml deleted file mode 100644 index fd0e69c3..00000000 --- a/tasks/ods-build-python.yaml +++ /dev/null @@ -1,189 +0,0 @@ - -# Source: tasks/templates/task-ods-build-python.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-build-python' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Builds Python applications. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-build-python.adoc - params: - - name: working-dir - description: | - Working directory. The path must be relative to the root of the repository, - without leading `./` and trailing `/`. - type: string - default: "." - - name: cache-build - description: >- - If enabled tasks uses or populates cache with the output dir contents (and artifacts) so that - a build can be skipped if the `working-dir` contents did not change. - You must set this to `"false"` if the build can be affected by files outside `working-dir`. See ADR caching-build-tasks for more details and workarounds. - type: string - default: "true" - - name: build-extra-inputs - description: >- - List of build source directories (as colon separated string) which in addition working-dir influence the build. - These directories are relative to the repository root. - If the contents in these directories change the cache is invalidated so that the build task will rebuild from scratch. - type: string - default: "" - - name: max-line-length - description: Maximum line length. - type: string - default: "120" - - name: pre-test-script - description: Script to execute before running tests, relative to the working directory. - type: string - default: "" - - name: build-script - description: >- - Build script to execute. The - link:https://github.com/opendevstack/ods-pipeline/blob/master/build/package/scripts/build-python.sh[default script] - is located in the container image. If you specify a relative path - instead, it will be resolved from the workspace. See the task definition - for details how the build script is invoked. - type: string - default: "/usr/local/bin/build-python" - - name: sonar-quality-gate - description: Whether quality gate needs to pass. - type: string - default: "false" - - name: sonar-skip - description: Whether to skip the SonarQube analysis or not. - type: string - default: "false" - results: - - description: The cache location that the build task used. If caching is not enabled this will be an empty string. - name: build-reused-from-location - steps: - - name: build-python - # Image is built from build/package/Dockerfile.python-toolset. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-python-toolset:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: CI - value: "true" - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: - {} - script: | - supply-sonar-project-properties-default --working-dir=$(params.working-dir) - echo -n "" > $(results.build-reused-from-location.path) - cache_build_key=python - if copy-build-if-cached \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} ; then - exit 0 - fi - # Default build script is build/package/scripts/build-python.sh. - set +e - $(params.build-script) \ - --working-dir=$(params.working-dir) \ - --max-line-length=$(params.max-line-length) \ - --pre-test-script=$(params.pre-test-script) \ - --debug=${DEBUG} - build_exit=$? - set -e - copy-artifacts --debug=${DEBUG} - if [ $build_exit -ne 0 ]; then - exit $build_exit - fi - cache-build \ - --cache-build=$(params.cache-build) \ - --cache-build-key="$cache_build_key" \ - --build-extra-inputs=$(params.build-extra-inputs) \ - --cache-location-used-path=$(results.build-reused-from-location.path) \ - --working-dir=$(params.working-dir) \ - --debug=${DEBUG} - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: scan-with-sonar - # Image is built from build/package/Dockerfile.sonar. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-sonar:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: SONAR_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-sonar - - name: SONAR_EDITION - valueFrom: - configMapKeyRef: - key: edition - name: ods-sonar - - name: SONAR_AUTH_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-sonar-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "$(params.sonar-skip)" = "true" ]; then - echo "Skipping SonarQube analysis" - else - mkdir -p .ods/artifacts/sonarqube-analysis - - truststore="${JAVA_HOME}/lib/security/cacerts" - if [ -f /etc/ssl/certs/private-cert.pem ]; then - truststore="$(pwd)/.ods-cache/truststore/cacerts" - configure-truststore --dest-store "${truststore}" - fi - # sonar is built from cmd/sonar/main.go. - sonar \ - -working-dir=$(params.working-dir) \ - -quality-gate=$(params.sonar-quality-gate) \ - -truststore "${truststore}" - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-deploy-helm.yaml b/tasks/ods-deploy-helm.yaml deleted file mode 100644 index 8fe8c4ce..00000000 --- a/tasks/ods-deploy-helm.yaml +++ /dev/null @@ -1,115 +0,0 @@ - -# Source: tasks/templates/task-ods-deploy-helm.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-deploy-helm' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Deploy Helm charts. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-deploy-helm.adoc - params: - - name: chart-dir - description: Helm chart directory that will be deployed - type: string - default: ./chart - - name: release-name - description: | - The Helm release name. If empty, the release name is simply the name of the chart. - - When this task is used in a repository which defines subcharts, and the parameter is not set, - then the task sets `.fullnameOverride` equal to the respective - subcomponent to avoid resources being prefixed with the umbrella repository - component name (assuming your resources are named using the `chart.fullname` - helper). However, if the parameter is specified, `.fullnameOverride` is not set. - As a result the `chart.fullname` helper prefixes resources with the specfied - `release-name` unless the chart's name contains the `release-name`. - type: string - default: '' - - name: diff-flags - description: Flags to pass to `helm diff upgrade` in addition to the ones specified via the `upgrade-flags` parameter. Note that the flags `--detailed-exitcode` and `--no-color` are automatically set and cannot be removed. If flags unknown to `helm diff` are passed, they are ignored. - type: string - default: '--three-way-merge' - - name: upgrade-flags - description: Flags to pass to `helm upgrade`. - type: string - default: '--install --wait' - - name: age-key-secret - description: | - Name of the secret containing the age key to use for helm-secrets. - If the secret exists, it is expected to have a field named `key.txt` with the age secret key in its content. - type: string - default: 'helm-secrets-age-key' - - name: api-server - description: | - API server of the target cluster, including scheme. - Only required if the target namespace is outside the cluster in which - the pipeline runs. - type: string - default: '' - - name: api-credentials-secret - description: | - Name of the Secret resource holding the token of a serviceaccount (in field `token`). - Only required when `api-server` is set. - type: string - default: '' - - name: namespace - description: | - Target K8s namespace (or OpenShift project) to deploy into. - If empty, the task will be a no-op. - type: string - default: '' - - name: registry-host - description: | - Hostname of the target registry to push images to. - If not given, the registy host of the source image is used. - type: string - default: '' - - name: diff-only - description: | - If set to true, the task will only perform a diff, and then stop. - No images will be promoted or upgrades attempted. - type: string - default: 'false' - steps: - - name: helm-upgrade-from-repo - # Image is built from build/package/Dockerfile.helm. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-helm:0.13.2' - env: - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - - name: HOME - value: '/tekton/home' - resources: {} - script: | - # deploy-helm is built from /cmd/deploy-helm/main.go. - deploy-helm \ - -chart-dir=$(params.chart-dir) \ - -namespace=$(params.namespace) \ - -release-name=$(params.release-name) \ - -diff-flags="$(params.diff-flags)" \ - -upgrade-flags="$(params.upgrade-flags)" \ - -age-key-secret=$(params.age-key-secret) \ - -api-server=$(params.api-server) \ - -api-credentials-secret=$(params.api-credentials-secret) \ - -registry-host=$(params.registry-host) \ - -diff-only=$(params.diff-only) - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-finish.yaml b/tasks/ods-finish.yaml deleted file mode 100644 index 2059e8e4..00000000 --- a/tasks/ods-finish.yaml +++ /dev/null @@ -1,87 +0,0 @@ - -# Source: tasks/templates/task-ods-finish.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-finish' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Finishes the pipeline run. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-finish.adoc - params: - - name: pipeline-run-name - description: Name of pipeline run. - type: string - - name: aggregate-tasks-status - description: Aggregate status of all tasks. - default: 'None' - - name: artifact-target - description: Artifact target respository - default: '' - steps: - - name: ods-finish - # Image is built from build/package/Dockerfile.finish. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-finish:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: BITBUCKET_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-bitbucket - - name: BITBUCKET_ACCESS_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-bitbucket-auth - - name: CONSOLE_URL - valueFrom: - configMapKeyRef: - key: consoleUrl - name: ods-cluster - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - - # ods-finish is built from cmd/finish/main.go. - ods-finish \ - -pipeline-run-name=$(params.pipeline-run-name) \ - -aggregate-tasks-status=$(params.aggregate-tasks-status) \ - -artifact-target=$(params.artifact-target) - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - description: The git repo will be present onto the volume backing this workspace - name: source diff --git a/tasks/ods-package-image.yaml b/tasks/ods-package-image.yaml deleted file mode 100644 index ec569f22..00000000 --- a/tasks/ods-package-image.yaml +++ /dev/null @@ -1,192 +0,0 @@ - -# Source: tasks/templates/task-ods-package-image.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-package-image' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Packages applications into container images using buildah. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-package-image.adoc - params: - - name: registry - description: Image registry to push image to. - type: string - default: 'image-registry.openshift-image-registry.svc:5000' - - name: image-stream - description: Reference of the image stream buildah will produce. If not set, the value of `.ods/component` is used. - type: string - default: '' - - name: extra-tags - description: Additional image tags (e.g. 'latest dev') for pushed images. The primary tag is based on the commit sha. Only tags currently missing from the image will be added. - type: string # Wanted to use and array but ran into [Cannot refer array params in script #4912](https://github.com/tektoncd/pipeline/issues/4912) - default: '' - - name: storage-driver - description: Set buildah storage driver. - type: string - default: vfs - - name: dockerfile - description: Path to the Dockerfile to build (relative to `docker-dir`). - type: string - default: ./Dockerfile - - name: docker-dir - description: Path to the directory to use as Docker context. - type: string - default: '.' - - name: format - description: 'The format of the built container, `oci` or `docker`.' - type: string - default: oci - - name: buildah-build-extra-args - description: Extra parameters passed for the build command when building images (e.g. '--build-arg=firstArg=one --build-arg=secondArg=two'). - type: string - default: '' - - name: buildah-push-extra-args - description: Extra parameters passed for the push command when pushing images. - type: string - default: '' - - name: trivy-sbom-extra-args - description: Extra parameters passed for the trivy command to generate an SBOM. - type: string - default: '' - - name: aqua-gate - description: Whether the Aqua security scan needs to pass for the task to succeed. - type: string - default: "false" - results: - - description: Digest of the image just built. - name: image-digest - steps: - - name: package-image - # Image is built from build/package/Dockerfile.package-image. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-package-image:0.13.2' - env: - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - - # ods-package-image is built from cmd/package-image/main.go. - ods-package-image \ - -image-stream=$(params.image-stream) \ - -extra-tags=$(params.extra-tags) \ - -registry=$(params.registry) \ - -storage-driver=$(params.storage-driver) \ - -format=$(params.format) \ - -dockerfile=$(params.dockerfile) \ - -context-dir=$(params.docker-dir) \ - -buildah-build-extra-args=$(params.buildah-build-extra-args) \ - -buildah-push-extra-args=$(params.buildah-push-extra-args) \ - -trivy-sbom-extra-args=$(params.trivy-sbom-extra-args) - - # As this task does not run unter uid 1001, chown created artifacts - # to make them deletable by ods-start's cleanup procedure. - chown -R 1001:0 .ods/artifacts/image-digests .ods/artifacts/sboms - securityContext: - capabilities: - add: - - SETFCAP - volumeMounts: - - mountPath: /var/lib/containers - name: varlibcontainers - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - - name: aqua-scan - # Image is built from build/package/Dockerfile.aqua-scan. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-aqua-scan:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: BITBUCKET_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-bitbucket - - name: BITBUCKET_ACCESS_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-bitbucket-auth - - name: AQUA_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-aqua - - name: AQUA_REGISTRY - valueFrom: - configMapKeyRef: - key: registry - name: ods-aqua - - name: AQUA_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-aqua-auth - - name: AQUA_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-aqua-auth - - name: AQUA_SCANNER_URL - valueFrom: - secretKeyRef: - key: secret - name: ods-aqua-scanner-url - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ "${AQUA_SCANNER_URL:0:4}" != "http" ]; then - echo "Skipping Aqua scan" - else - download-aqua-scanner \ - --aqua-scanner-url=${AQUA_SCANNER_URL} \ - $(case ${DEBUG} in (true) printf -- '--debug'; esac) - - # ods-aqua-scan is built from cmd/aqua-scan/main.go. - ods-aqua-scan \ - -image-stream=$(params.image-stream) \ - -aqua-gate=$(params.aqua-gate) - fi - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - emptyDir: {} - name: varlibcontainers - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - name: source diff --git a/tasks/ods-start.yaml b/tasks/ods-start.yaml deleted file mode 100644 index 80b78ca7..00000000 --- a/tasks/ods-start.yaml +++ /dev/null @@ -1,160 +0,0 @@ - -# Source: tasks/templates/task-ods-start.yaml -apiVersion: tekton.dev/v1beta1 -kind: 'Task' -metadata: - name: 'ods-start' - annotations: - "helm.sh/resource-policy": keep -spec: - description: | - Starts the pipeline run. - - See https://github.com/opendevstack/ods-pipeline/blob/v0.13.2/docs/tasks/ods-start.adoc - params: - - name: url - description: Git URL to clone - type: string - - name: git-full-ref - description: 'Git revision to checkout (branch, tag, sha, ref, ...)' - type: string - default: '' - - name: submodules - description: Defines if the resource should initialize and fetch the submodules. - type: string - default: 'true' - - name: clone-depth - description: >- - Perform a shallow clone where only the most recent commit(s) will be - fetched. By default, a full clone is performed. Note that the parameter is of string type, - therefore the depth value must be quoted, e.g. `value: '1'`. - type: string - default: '' - - name: http-proxy - description: Git HTTP proxy server for non-SSL requests. - type: string - default: '' - - name: https-proxy - description: Git HTTPS proxy server for SSL requests. - type: string - default: '' - - name: no-proxy - description: Git no proxy - opt out of proxying HTTP/HTTPS requests. - type: string - default: '' - - name: project - description: >- - Name of the project to build. - The project is equal to the Bitbucket project of the repository to clone. - type: string - - name: pr-key - description: >- - Bitbucket pull request key. - Empty if there is no open PR for the specified Git branch. - type: string - default: '' - - name: pr-base - description: >- - Bitbucket pull request base branch. - Empty if there is no open PR for the specified Git branch. - type: string - default: '' - - name: pipeline-run-name - description: Name of pipeline run. - type: string - - name: cache-build-tasks-for-days - description: >- - Number of days build tasks are cached to enable build skipping. - A subsequent build reusing the cache resets the time for that cache location. - type: string - default: '7' - - name: artifact-source - description: Artifact source respository - type: string - default: '' - results: - - description: The commit SHA that was fetched by this task. - name: commit - - description: The URL that was fetched by this task. - name: url - steps: - - name: ods-start - # Image is built from build/package/Dockerfile.start. - image: 'ghcr.io/opendevstack/ods-pipeline/ods-start:0.13.2' - env: - - name: HOME - value: '/tekton/home' - - name: NEXUS_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-nexus - - name: NEXUS_USERNAME - valueFrom: - secretKeyRef: - key: username - name: ods-nexus-auth - - name: NEXUS_PASSWORD - valueFrom: - secretKeyRef: - key: password - name: ods-nexus-auth - - name: BITBUCKET_URL - valueFrom: - configMapKeyRef: - key: url - name: ods-bitbucket - - name: BITBUCKET_ACCESS_TOKEN - valueFrom: - secretKeyRef: - key: password - name: ods-bitbucket-auth - - name: CONSOLE_URL - valueFrom: - configMapKeyRef: - key: consoleUrl - name: ods-cluster - - name: DEBUG - valueFrom: - configMapKeyRef: - key: debug - name: ods-pipeline - resources: {} - script: | - if [ -f /etc/ssl/certs/private-cert.pem ]; then - cat /etc/pki/tls/certs/ca-bundle.crt /etc/ssl/certs/private-cert.pem > /tekton/home/git-cert.pem - git config --global http.sslCAInfo /tekton/home/git-cert.pem - fi - - # ods-start is built from cmd/start/main.go. - ods-start \ - -project=$(params.project) \ - -git-full-ref=$(params.git-full-ref) \ - -url=$(params.url) \ - -pr-key=$(params.pr-key) \ - -pr-base=$(params.pr-base) \ - -http-proxy=$(params.http-proxy) \ - -https-proxy=$(params.https-proxy) \ - -no-proxy=$(params.no-proxy) \ - -submodules=$(params.submodules) \ - -clone-depth=$(params.clone-depth) \ - -pipeline-run-name=$(params.pipeline-run-name) \ - -artifact-source=$(params.artifact-source) - - cp .ods/git-commit-sha $(results.commit.path) - - echo -n "$(params.url)" > $(results.url.path) - volumeMounts: - - mountPath: /etc/ssl/certs/private-cert.pem - name: private-cert - readOnly: true - subPath: tls.crt - workingDir: $(workspaces.source.path) - volumes: - - name: private-cert - secret: - secretName: ods-private-cert - optional: true - workspaces: - - description: The git repo will be cloned onto the volume backing this workspace - name: source diff --git a/test/e2e/common_test.go b/test/e2e/common_test.go new file mode 100644 index 00000000..d4926dfa --- /dev/null +++ b/test/e2e/common_test.go @@ -0,0 +1,7 @@ +package e2e + +import ( + "flag" +) + +var privateCertFlag = flag.Bool("private-cert", false, "Whether to run tests using a private cert") diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go new file mode 100644 index 00000000..6960d2c1 --- /dev/null +++ b/test/e2e/main_test.go @@ -0,0 +1,118 @@ +package e2e + +import ( + "log" + "os" + "path/filepath" + "testing" + + "github.com/opendevstack/ods-pipeline/pkg/bitbucket" + ott "github.com/opendevstack/ods-pipeline/pkg/odstasktest" + "github.com/opendevstack/ods-pipeline/pkg/tasktesting" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" + tekton "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/homedir" +) + +var ( + namespaceConfig *ttr.NamespaceConfig + rootPath = "../.." +) + +func TestMain(m *testing.M) { + os.Exit(testMain(m)) +} + +func testMain(m *testing.M) int { + cc, err := ttr.StartKinDCluster( + ttr.LoadImage(ttr.ImageBuildConfig{ + Dockerfile: "build/images/Dockerfile.start", + ContextDir: rootPath, + }), + ttr.LoadImage(ttr.ImageBuildConfig{ + Dockerfile: "build/images/Dockerfile.finish", + ContextDir: rootPath, + }), + ttr.LoadImage(ttr.ImageBuildConfig{ + Dockerfile: "build/images/Dockerfile.pipeline-manager", + ContextDir: rootPath, + }), + ) + if err != nil { + log.Fatal("Could not start KinD cluster: ", err) + } + nc, cleanup, err := ttr.SetupTempNamespace( + cc, + ott.StartBitbucket(), + ott.StartNexus(), + ott.InstallODSPipeline(), + ) + if err != nil { + log.Fatal("Could not setup temporary namespace: ", err) + } + defer cleanup() + namespaceConfig = nc + return m.Run() +} + +func newK8sClient(t *testing.T) *kubernetes.Clientset { + home := homedir.HomeDir() + kubeconfig := filepath.Join(home, ".kube", "config") + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + t.Fatal(err) + } + kubernetesClientset, err := kubernetes.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + return kubernetesClientset +} + +func newTektonClient(t *testing.T) *tekton.Clientset { + home := homedir.HomeDir() + kubeconfig := filepath.Join(home, ".kube", "config") + config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + t.Fatal(err) + } + tektonClientSet, err := tekton.NewForConfig(config) + if err != nil { + t.Fatal(err) + } + return tektonClientSet +} + +// initBitbucketRepo initialises a Git repository inside the given workspace, +// then commits and pushes to Bitbucket. +// The workspace will also be setup with an ODS context directory in .ods +// with the given namespace. +func initBitbucketRepo(t *testing.T, k8sClient kubernetes.Interface, namespace string) ttr.WorkspaceOpt { + return func(c *ttr.WorkspaceConfig) error { + _ = tasktesting.SetupBitbucketRepo(t, k8sClient, namespace, c.Dir, tasktesting.BitbucketProjectKey, false) + return nil + } +} + +// withBitbucketSourceWorkspace configures the task run with a workspace named +// "source", mapped to the directory sourced from sourceDir. The directory is +// initialised as a Git repository with an ODS context with the given namespace. +func withBitbucketSourceWorkspace(t *testing.T, sourceDir string, k8sClient kubernetes.Interface, namespace string, opts ...ttr.WorkspaceOpt) ttr.TaskRunOpt { + return ott.WithSourceWorkspace( + t, sourceDir, + append([]ttr.WorkspaceOpt{initBitbucketRepo(t, k8sClient, namespace)}, opts...)..., + ) +} + +func checkBuildStatus(t *testing.T, c *bitbucket.Client, gitCommit, wantBuildStatus string) { + buildStatusPage, err := c.BuildStatusList(gitCommit) + buildStatus := buildStatusPage.Values[0] + if err != nil { + t.Fatal(err) + } + if buildStatus.State != wantBuildStatus { + t.Fatalf("Got: %s, want: %s", buildStatus.State, wantBuildStatus) + } +} diff --git a/test/e2e/e2e_test.go b/test/e2e/pipeline_run_test.go similarity index 63% rename from test/e2e/e2e_test.go rename to test/e2e/pipeline_run_test.go index 34434e0e..c6bbf36f 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/pipeline_run_test.go @@ -3,8 +3,8 @@ package e2e import ( "context" "errors" - "flag" "fmt" + "log" "os" "os/exec" "path/filepath" @@ -22,36 +22,17 @@ import ( tekton "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" k8s "k8s.io/client-go/kubernetes" "knative.dev/pkg/apis" ) -var outsideKindFlag = flag.Bool("outside-kind", false, "Whether to continue if not in KinD cluster") -var privateCertFlag = flag.Bool("private-cert", false, "Whether to run tests using a private cert") - -func TestE2E(t *testing.T) { - tasktesting.CheckCluster(t, *outsideKindFlag) - tasktesting.CheckServices(t, []tasktesting.Service{ - tasktesting.Bitbucket, tasktesting.Nexus, - }) - - // Setup namespace to run tests in. - c, ns := tasktesting.Setup(t, - tasktesting.SetupOpts{ - SourceDir: tasktesting.StorageSourceDir, - StorageCapacity: tasktesting.StorageCapacity, - StorageClassName: tasktesting.StorageClassName, - }, - ) - - // Cleanup namespace at the end. - tasktesting.CleanupOnInterrupt(func() { tasktesting.TearDown(t, c, ns) }, t.Logf) - defer tasktesting.TearDown(t, c, ns) - +func TestPipelineRun(t *testing.T) { + k8sClient := newK8sClient(t) // Create NodePort service which Bitbucket can post its webhook to. var nodePort int32 = 30950 - _, err := kubernetes.CreateNodePortService( - c.KubernetesClientSet, + _, err := createNodePortService( + k8sClient, "ods-pm-nodeport", // NodePort for ODS Pipeline Manager map[string]string{ "app.kubernetes.io/name": "ods-pipeline", @@ -59,7 +40,7 @@ func TestE2E(t *testing.T) { }, nodePort, 8080, - ns, + namespaceConfig.Name, ) if err != nil { t.Fatal(err) @@ -76,7 +57,7 @@ func TestE2E(t *testing.T) { } t.Logf("Workspace is in %s", wsDir) odsContext := tasktesting.SetupBitbucketRepo( - t, c.KubernetesClientSet, ns, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, + t, k8sClient, namespaceConfig.Name, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, ) // The webhook URL needs to be the address of the KinD control plane on the node port. @@ -89,12 +70,12 @@ func TestE2E(t *testing.T) { // Create webhook in Bitbucket. webhookSecret, err := kubernetes.GetSecretKey( - c.KubernetesClientSet, ns, "ods-bitbucket-webhook", "secret", + k8sClient, namespaceConfig.Name, "ods-bitbucket-webhook", "secret", ) if err != nil { t.Fatalf("could not get Bitbucket webhook secret: %s", err) } - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, c.KubernetesClientSet, ns, *privateCertFlag) + bitbucketClient := tasktesting.BitbucketClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) _, err = bitbucketClient.WebhookCreate( odsContext.Project, odsContext.Repository, @@ -113,14 +94,20 @@ func TestE2E(t *testing.T) { filename := "ods.yaml" fileContent := ` pipelines: - - tasks: - - name: package-image - taskRef: - kind: Task - name: ods-package-image - workspaces: - - name: source - workspace: shared-workspace` +- tasks: + - name: hello-world + taskSpec: + steps: + - name: message + image: busybox + script: | + echo "hello world" + workingDir: $(workspaces.source.path) + workspaces: + - name: source + workspaces: + - name: source + workspace: shared-workspace` err = os.WriteFile(filepath.Join(wsDir, filename), []byte(fileContent), 0644) if err != nil { @@ -129,28 +116,29 @@ pipelines: requiredService := "ods-pipeline" serviceTimeout := time.Minute t.Logf("Waiting %s for service %s to have ready pods ...\n", serviceTimeout, requiredService) - err = waitForServiceToBeReady(t, c.KubernetesClientSet, ns, requiredService, serviceTimeout) + err = waitForServiceToBeReady(t, k8sClient, namespaceConfig.Name, requiredService, serviceTimeout) if err != nil { t.Fatal(err) } t.Log("Pushing file to Bitbucket ...") - tasktesting.PushFileToBitbucketOrFatal(t, c.KubernetesClientSet, ns, wsDir, "master:feature/test-branch", "ods.yaml") + tasktesting.PushFileToBitbucketOrFatal(t, k8sClient, namespaceConfig.Name, wsDir, "master:feature/test-branch", "ods.yaml") triggerTimeout := time.Minute + tektonClient := newTektonClient(t) t.Logf("Waiting %s for pipeline run to be triggered ...", triggerTimeout) - pr, err := waitForPipelineRunToBeTriggered(c.TektonClientSet, ns, triggerTimeout) + pr, err := waitForPipelineRunToBeTriggered(tektonClient, namespaceConfig.Name, triggerTimeout) if err != nil { t.Fatal(err) } t.Logf("Triggered pipeline run %s\n", pr.Name) runTimeout := 3 * time.Minute t.Logf("Waiting %s for pipeline run to succeed ...", runTimeout) - gotReason, err := waitForPipelineRunToBeDone(c.TektonClientSet, ns, pr.Name, runTimeout) + gotReason, err := waitForPipelineRunToBeDone(tektonClient, namespaceConfig.Name, pr.Name, runTimeout) if err != nil { t.Fatal(err) } if gotReason != "Succeeded" { t.Logf("Want pipeline run reason to be 'Succeeded' but got '%s'", gotReason) - logs, err := pipelineRunLogs(ns, pr.Name) + logs, err := pipelineRunLogs(namespaceConfig.Name, pr.Name) if err != nil { t.Fatal(err) } @@ -173,7 +161,7 @@ func waitForServiceToBeReady(t *testing.T, clientset *k8s.Clientset, ns, name st svc = s } time.Sleep(2 * time.Second) - ready, reason, err := kubernetes.ServiceHasReadyPods(clientset, svc) + ready, reason, err := serviceHasReadyPods(clientset, svc) if err != nil { return err } @@ -238,7 +226,7 @@ func waitForPipelineRunToBeDone(clientset *tekton.Clientset, ns, pr string, time func kindControlPlaneIP() (string, error) { stdout, stderr, err := command.RunBuffered( "docker", - []string{"inspect", "-f", "{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}", "kind-control-plane"}, + []string{"inspect", "-f", "{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}", tektontaskrun.KinDName + "-control-plane"}, ) if err != nil { return "", fmt.Errorf("could not get IP address of KinD control plane: %s, err: %s", string(stderr), err) @@ -264,3 +252,68 @@ func tknInstalled() bool { _, err := exec.LookPath("tkn") return err == nil } + +func createNodePortService(clientset k8s.Interface, name string, selectors map[string]string, port, targetPort int32, namespace string) (*corev1.Service, error) { + log.Printf("Create node port service %s", name) + svc, err := clientset.CoreV1().Services(namespace).Create(context.TODO(), + &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Spec: corev1.ServiceSpec{ + ExternalTrafficPolicy: corev1.ServiceExternalTrafficPolicyTypeCluster, + Ports: []corev1.ServicePort{ + { + Name: fmt.Sprintf("%d-%d", port, targetPort), + NodePort: port, + Port: port, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.FromInt(int(targetPort)), + }, + }, + Selector: selectors, + SessionAffinity: corev1.ServiceAffinityNone, + Type: corev1.ServiceTypeNodePort, + }, + }, metav1.CreateOptions{}) + + return svc, err +} + +// serviceHasReadyPods returns false if no pod is assigned to given service +// or if one or more pods are not "Running" +// or one or more of any pods containers are not "ready". +func serviceHasReadyPods(clientset *k8s.Clientset, svc *corev1.Service) (bool, string, error) { + podList, err := servicePods(clientset, svc) + if err != nil { + return false, "error", err + } + for _, pod := range podList.Items { + phase := pod.Status.Phase + if phase != "Running" { + return false, fmt.Sprintf("pod %s is in phase %+v", pod.Name, phase), nil + } + for _, containerStatus := range pod.Status.ContainerStatuses { + if !containerStatus.Ready { + return false, fmt.Sprintf("container %s in pod %s is not ready", containerStatus.Name, pod.Name), nil + } + } + } + return true, "ok", nil +} + +func servicePods(clientset *k8s.Clientset, svc *corev1.Service) (*corev1.PodList, error) { + podClient := clientset.CoreV1().Pods(svc.Namespace) + selector := []string{} + for key, value := range svc.Spec.Selector { + selector = append(selector, fmt.Sprintf("%s=%s", key, value)) + } + pods, err := podClient.List( + context.TODO(), + metav1.ListOptions{ + LabelSelector: strings.Join(selector, ","), + }, + ) + if err != nil { + return nil, err + } + return pods.DeepCopy(), nil +} diff --git a/test/e2e/task_finish_test.go b/test/e2e/task_finish_test.go new file mode 100644 index 00000000..e65c2c2b --- /dev/null +++ b/test/e2e/task_finish_test.go @@ -0,0 +1,198 @@ +package e2e + +import ( + "bytes" + "fmt" + "log" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/opendevstack/ods-pipeline/pkg/bitbucket" + "github.com/opendevstack/ods-pipeline/pkg/nexus" + "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" + "github.com/opendevstack/ods-pipeline/pkg/tasktesting" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "k8s.io/client-go/kubernetes" + + ott "github.com/opendevstack/ods-pipeline/pkg/odstasktest" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +func runFinishTask(opts ...ttr.TaskRunOpt) error { + return ttr.RunTask(append([]ttr.TaskRunOpt{ + ttr.InNamespace(namespaceConfig.Name), + ttr.UsingTask("ods-pipeline-finish"), + }, opts...)...) +} + +func TestFinishTaskSetsBitbucketStatusToFailed(t *testing.T) { + k8sClient := newK8sClient(t) + if err := runFinishTask( + withBitbucketSourceWorkspace(t, "../testdata/workspaces/hello-world-app-with-artifacts", k8sClient, namespaceConfig.Name), + ttr.WithStringParams(map[string]string{ + "pipeline-run-name": "foo", + "aggregate-tasks-status": "None", + }), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + _, odsContext := ott.GetSourceWorkspaceContext(t, config) + bitbucketClient := tasktesting.BitbucketClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + checkBuildStatus(t, bitbucketClient, odsContext.GitCommitSHA, bitbucket.BuildStatusFailed) + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestFinishTaskSetsBitbucketStatusToSuccessfulAndUploadsArtifactsToNexus(t *testing.T) { + k8sClient := newK8sClient(t) + if err := runFinishTask( + ott.WithSourceWorkspace( + t, + "../testdata/workspaces/hello-world-app-with-artifacts", + func(c *ttr.WorkspaceConfig) error { + odsContext := tasktesting.SetupBitbucketRepo( + t, k8sClient, namespaceConfig.Name, c.Dir, tasktesting.BitbucketProjectKey, *privateCertFlag, + ) + // Pretend there is alredy a coverage report in Nexus. + // This assures the safeguard is working to avoid duplicate upload. + t.Log("Uploading coverage artifact to Nexus and writing manifest") + nexusClient := tasktesting.NexusClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + if _, err := nexusClient.Upload( + nexus.TestTemporaryRepository, + pipelinectxt.ArtifactGroup(odsContext, pipelinectxt.CodeCoveragesDir), + filepath.Join(c.Dir, pipelinectxt.CodeCoveragesPath, "coverage.out"), + ); err != nil { + t.Fatal(err) + } + am := pipelinectxt.NewArtifactsManifest( + nexus.TestTemporaryRepository, + pipelinectxt.ArtifactInfo{ + Directory: pipelinectxt.CodeCoveragesDir, + Name: "coverage.out", + }, + ) + if err := pipelinectxt.WriteJsonArtifact( + am, + filepath.Join(c.Dir, pipelinectxt.ArtifactsPath), + pipelinectxt.ArtifactsManifestFilename, + ); err != nil { + t.Fatal(err) + } + return nil + }, + ), + ttr.WithStringParams(map[string]string{ + "pipeline-run-name": "foo", + "aggregate-tasks-status": "Succeeded", + "artifact-target": nexus.TestTemporaryRepository, + }), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + _, odsContext := ott.GetSourceWorkspaceContext(t, config) + bitbucketClient := tasktesting.BitbucketClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + checkBuildStatus(t, bitbucketClient, odsContext.GitCommitSHA, bitbucket.BuildStatusSuccessful) + checkArtifactsAreInNexus(t, k8sClient, odsContext, nexus.TestTemporaryRepository) + + wantLogMsg := "Artifact \"coverage.out\" is already present in Nexus repository" + if !strings.Contains(logs.String(), wantLogMsg) { + t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, logs.String()) + } + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestFinishTaskStopsGracefullyWhenContextCannotBeRead(t *testing.T) { + if err := runFinishTask( + ott.WithSourceWorkspace(t, "../testdata/workspaces/empty"), + ttr.WithStringParams(map[string]string{ + "pipeline-run-name": "foo", + "aggregate-tasks-status": "None", + }), + ttr.ExpectFailure(), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + want := "Unable to continue as pipeline context cannot be read" + if !strings.Contains(logs.String(), want) { + t.Fatalf("Want:\n%s\n\nGot:\n%s", want, logs.String()) + } + }), + ); err != nil { + t.Fatal(err) + } +} + +func checkArtifactsAreInNexus(t *testing.T, k8sClient kubernetes.Interface, odsContext *pipelinectxt.ODSContext, targetRepository string) { + + nexusClient := tasktesting.NexusClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + + // List of expected artifacts to have been uploaded to Nexus + artifactsMap := map[string][]string{ + pipelinectxt.XUnitReportsDir: {"report.xml"}, + // exclude coverage as we pretend it has been uploaded earlier already + // pipelinectxt.CodeCoveragesDir: {"coverage.out"}, + pipelinectxt.SonarAnalysisDir: {"analysis-report.md", "issues-report.csv"}, + } + + for artifactsSubDir, files := range artifactsMap { + + filesCountInSubDir := len(artifactsMap[artifactsSubDir]) + + // e.g: "/ODSPIPELINETEST/workspace-190880007/935e5229b084dd60d44a5eddd2d023720ec153c1/xunit-reports" + group := pipelinectxt.ArtifactGroup(odsContext, artifactsSubDir) + + // The test is so fast that, when we reach this line, the artifacts could still being uploaded to Nexus + artifactURLs := waitForArtifacts(t, nexusClient, targetRepository, group, filesCountInSubDir, 5*time.Second) + if len(artifactURLs) != filesCountInSubDir { + t.Fatalf("Got: %d artifacts in subdir %s, want: %d.", len(artifactURLs), artifactsMap[artifactsSubDir], filesCountInSubDir) + } + + for _, file := range files { + + // e.g. "http://localhost:8081/repository/ods-pipelines/ODSPIPELINETEST/workspace-866704509/b1415e831b4f5b24612abf24499663ddbff6babb/xunit-reports/report.xml" + // note that the "group" value already has a leading slash! + url := fmt.Sprintf("%s/repository/%s%s/%s", nexusClient.URL(), targetRepository, group, file) + + if !contains(artifactURLs, url) { + t.Fatalf("Artifact %s with URL %+v not found in Nexus under any of the following URLs: %v", file, url, artifactURLs) + } + } + + } +} + +func waitForArtifacts(t *testing.T, nexusClient *nexus.Client, targetRepository, group string, expectedArtifactsCount int, timeout time.Duration) []string { + start := time.Now().UTC() + elapsed := time.Since(start) + artifactURLs := []string{} + + for elapsed < timeout { + artifactURLs, err := nexusClient.Search(targetRepository, group) + if err != nil { + t.Fatal(err) + } + + if len(artifactURLs) == expectedArtifactsCount { + return artifactURLs + } + + log.Printf("Artifacts are not yet available in Nexus...\n") + time.Sleep(1 * time.Second) + + elapsed = time.Since(start) + } + + log.Printf("Time out reached.\n") + return artifactURLs +} + +// contains checks if a string is present in a slice +func contains(s []string, str string) bool { + for _, v := range s { + if v == str { + return true + } + } + return false +} diff --git a/test/e2e/task_start_test.go b/test/e2e/task_start_test.go new file mode 100644 index 00000000..af4ee55f --- /dev/null +++ b/test/e2e/task_start_test.go @@ -0,0 +1,335 @@ +package e2e + +import ( + "bytes" + "crypto/sha256" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/opendevstack/ods-pipeline/internal/directory" + "github.com/opendevstack/ods-pipeline/internal/projectpath" + "github.com/opendevstack/ods-pipeline/pkg/bitbucket" + "github.com/opendevstack/ods-pipeline/pkg/config" + "github.com/opendevstack/ods-pipeline/pkg/nexus" + "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" + "github.com/opendevstack/ods-pipeline/pkg/tasktesting" + tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/yaml" + + ott "github.com/opendevstack/ods-pipeline/pkg/odstasktest" + ttr "github.com/opendevstack/ods-pipeline/pkg/tektontaskrun" +) + +func runStartTask(opts ...ttr.TaskRunOpt) error { + return ttr.RunTask(append([]ttr.TaskRunOpt{ + ttr.InNamespace(namespaceConfig.Name), + ttr.UsingTask("ods-pipeline-start"), + }, opts...)...) +} + +func TestStartTaskClonesRepoAtBranch(t *testing.T) { + k8sClient := newK8sClient(t) + if err := runStartTask( + withBitbucketSourceWorkspace(t, "../testdata/workspaces/hello-world-app", k8sClient, namespaceConfig.Name), + func(c *ttr.TaskRunConfig) error { + c.Params = append(c.Params, ttr.TektonParamsFromStringParams(map[string]string{ + "url": bitbucketURLForWorkspace(c.WorkspaceConfigs["source"]), + "git-full-ref": "refs/heads/master", + "project": tasktesting.BitbucketProjectKey, + "pipeline-run-name": "foo", + })...) + return nil + }, + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + wsDir, odsContext := ott.GetSourceWorkspaceContext(t, config) + + checkODSContext(t, wsDir, odsContext) + checkFilesExist(t, wsDir, filepath.Join(pipelinectxt.ArtifactsPath, pipelinectxt.ArtifactsManifestFilename)) + + bitbucketClient := tasktesting.BitbucketClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + checkBuildStatus(t, bitbucketClient, odsContext.GitCommitSHA, bitbucket.BuildStatusInProgress) + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestStartTaskClonesRepoAtTag(t *testing.T) { + k8sClient := newK8sClient(t) + if err := runStartTask( + withBitbucketSourceWorkspace(t, "../testdata/workspaces/hello-world-app", k8sClient, namespaceConfig.Name), + func(c *ttr.TaskRunConfig) error { + wsDir, odsContext := ott.GetSourceWorkspaceContext(t, c) + tasktesting.UpdateBitbucketRepoWithTagOrFatal(t, odsContext, wsDir, "v1.0.0") + c.Params = append(c.Params, ttr.TektonParamsFromStringParams(map[string]string{ + "url": bitbucketURLForWorkspace(c.WorkspaceConfigs["source"]), + "git-full-ref": "refs/tags/v1.0.0", + "project": tasktesting.BitbucketProjectKey, + "pipeline-run-name": "foo", + })...) + return nil + }, + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + wsDir, odsContext := ott.GetSourceWorkspaceContext(t, config) + checkODSContext(t, wsDir, odsContext) + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestStartTaskClonesRepoAndSubrepos(t *testing.T) { + var subrepoContext *pipelinectxt.ODSContext + k8sClient := newK8sClient(t) + if err := runStartTask( + ott.WithSourceWorkspace( + t, + "../testdata/workspaces/hello-world-app", + func(c *ttr.WorkspaceConfig) error { + // Setup sub-component + subrepoContext = setupBitbucketRepoWithSubdirOrFatal(t, c, k8sClient) + // Nexus artifacts + nexusClient := tasktesting.NexusClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + artifactsBaseDir := filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, "hello-world-app-with-artifacts", pipelinectxt.ArtifactsPath) + _, err := nexusClient.Upload( + nexus.TestTemporaryRepository, + pipelinectxt.ArtifactGroup(subrepoContext, pipelinectxt.XUnitReportsDir), + filepath.Join(artifactsBaseDir, pipelinectxt.XUnitReportsDir, "report.xml"), + ) + if err != nil { + return err + } + _, err = nexusClient.Upload( + nexus.TestTemporaryRepository, + pipelinectxt.ArtifactGroup(subrepoContext, pipelinectxt.PipelineRunsDir), + filepath.Join(artifactsBaseDir, pipelinectxt.PipelineRunsDir, "foo-zh9gt0.json"), + ) + if err != nil { + return err + } + return nil + }, + ), + func(c *ttr.TaskRunConfig) error { + c.Params = append(c.Params, ttr.TektonParamsFromStringParams(map[string]string{ + "url": bitbucketURLForWorkspace(c.WorkspaceConfigs["source"]), + "git-full-ref": "refs/heads/master", + "project": tasktesting.BitbucketProjectKey, + "pipeline-run-name": "foo", + "artifact-source": nexus.TestTemporaryRepository, + })...) + return nil + }, + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + wsDir, odsContext := ott.GetSourceWorkspaceContext(t, config) + + // Check .ods directory contents of main repo + checkODSContext(t, wsDir, odsContext) + checkFilesExist(t, wsDir, filepath.Join(pipelinectxt.ArtifactsPath, pipelinectxt.ArtifactsManifestFilename)) + + // Check .ods directory contents of subrepo + subrepoDir := filepath.Join(wsDir, pipelinectxt.SubreposPath, subrepoContext.Repository) + checkODSContext(t, subrepoDir, subrepoContext) + + // Check artifacts are downloaded properly in subrepo + sourceArtifactsBaseDir := filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, "hello-world-app-with-artifacts", pipelinectxt.ArtifactsPath) + xUnitFileSource := "xunit-reports/report.xml" + xUnitContent := trimmedFileContentOrFatal(t, filepath.Join(sourceArtifactsBaseDir, xUnitFileSource)) + destinationArtifactsBaseDir := filepath.Join(subrepoDir, pipelinectxt.ArtifactsPath) + checkFileContent(t, destinationArtifactsBaseDir, xUnitFileSource, xUnitContent) + checkFilesExist(t, destinationArtifactsBaseDir, pipelinectxt.ArtifactsManifestFilename) + + bitbucketClient := tasktesting.BitbucketClientOrFatal(t, k8sClient, namespaceConfig.Name, *privateCertFlag) + checkBuildStatus(t, bitbucketClient, odsContext.GitCommitSHA, bitbucket.BuildStatusInProgress) + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestStartTaskFailsWithoutSuccessfulPipelineRunOfSubrepo(t *testing.T) { + k8sClient := newK8sClient(t) + if err := runStartTask( + ott.WithSourceWorkspace( + t, + "../testdata/workspaces/hello-world-app", + func(c *ttr.WorkspaceConfig) error { + _ = setupBitbucketRepoWithSubdirOrFatal(t, c, k8sClient) + return nil + }, + ), + func(c *ttr.TaskRunConfig) error { + c.Params = append(c.Params, ttr.TektonParamsFromStringParams(map[string]string{ + "url": bitbucketURLForWorkspace(c.WorkspaceConfigs["source"]), + "git-full-ref": "refs/heads/master", + "project": tasktesting.BitbucketProjectKey, + "pipeline-run-name": "foo", + "artifact-source": "empty-repo", + })...) + return nil + }, + ttr.ExpectFailure(), + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + want := "Pipeline runs with subrepos require a successful pipeline run artifact " + + "for all checked out subrepo commits, however no such artifact was found" + + if !strings.Contains(logs.String(), want) { + t.Fatalf("Want:\n%s\n\nGot:\n%s", want, logs.String()) + } + }), + ); err != nil { + t.Fatal(err) + } +} + +func TestStartTaskClonesUsingLFS(t *testing.T) { + var lfsFilename string + var lfsFileHash [32]byte + k8sClient := newK8sClient(t) + if err := runStartTask( + ott.WithSourceWorkspace( + t, + "../testdata/workspaces/hello-world-app", + func(c *ttr.WorkspaceConfig) error { + odsContext := tasktesting.SetupBitbucketRepo( + t, k8sClient, namespaceConfig.Name, c.Dir, tasktesting.BitbucketProjectKey, *privateCertFlag, + ) + tasktesting.EnableLfsOnBitbucketRepoOrFatal(t, filepath.Base(c.Dir), tasktesting.BitbucketProjectKey) + lfsFilename = "lfspicture.jpg" + lfsFileHash = tasktesting.UpdateBitbucketRepoWithLfsOrFatal(t, odsContext, c.Dir, tasktesting.BitbucketProjectKey, lfsFilename) + return nil + }, + ), + func(c *ttr.TaskRunConfig) error { + c.Params = append(c.Params, ttr.TektonParamsFromStringParams(map[string]string{ + "url": bitbucketURLForWorkspace(c.WorkspaceConfigs["source"]), + "git-full-ref": "refs/heads/master", + "project": tasktesting.BitbucketProjectKey, + "pipeline-run-name": "foo", + })...) + return nil + }, + ttr.AfterRun(func(config *ttr.TaskRunConfig, run *tekton.TaskRun, logs bytes.Buffer) { + wsDir, odsContext := ott.GetSourceWorkspaceContext(t, config) + checkODSContext(t, wsDir, odsContext) + checkFileHash(t, wsDir, lfsFilename, lfsFileHash) + }), + ); err != nil { + t.Fatal(err) + } +} + +func setupBitbucketRepoWithSubdirOrFatal(t *testing.T, c *ttr.WorkspaceConfig, k8sClient kubernetes.Interface) *pipelinectxt.ODSContext { + // Setup sub-component + tempDir, err := directory.CopyToTempDir( + filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, "hello-world-app"), + c.Dir, + "subcomponent-", + ) + if err != nil { + t.Fatal(err) + } + subCtxt := tasktesting.SetupBitbucketRepo( + t, k8sClient, namespaceConfig.Name, tempDir, tasktesting.BitbucketProjectKey, *privateCertFlag, + ) + err = os.RemoveAll(tempDir) + if err != nil { + t.Fatal(err) + } + err = createStartODSYMLWithSubrepo(c.Dir, filepath.Base(tempDir)) + if err != nil { + t.Fatal(err) + } + _ = tasktesting.SetupBitbucketRepo( + t, k8sClient, namespaceConfig.Name, c.Dir, tasktesting.BitbucketProjectKey, *privateCertFlag, + ) + return subCtxt +} + +func bitbucketURLForWorkspace(c *ttr.WorkspaceConfig) string { + bbURL := "http://ods-test-bitbucket-server.kind:7990" + repoName := filepath.Base(c.Dir) + return fmt.Sprintf("%s/scm/%s/%s.git", bbURL, tasktesting.BitbucketProjectKey, repoName) +} + +func createStartODSYMLWithSubrepo(wsDir, repo string) error { + o := &config.ODS{Repositories: []config.Repository{{Name: repo}}} + return createODSYML(wsDir, o) +} + +func createODSYML(wsDir string, o *config.ODS) error { + y, err := yaml.Marshal(o) + if err != nil { + return err + } + filename := filepath.Join(wsDir, "ods.yaml") + return os.WriteFile(filename, y, 0644) +} + +func checkFileHash(t *testing.T, wsDir string, filename string, hash [32]byte) { + filepath := filepath.Join(wsDir, filename) + filecontent, err := os.ReadFile(filepath) + if err != nil { + t.Fatalf("Want %s, but got nothing", filename) + } + filehash := sha256.Sum256(filecontent) + if filehash != hash { + t.Fatalf("Want %x, but got %x", hash, filehash) + } +} + +func checkODSContext(t *testing.T, repoDir string, want *pipelinectxt.ODSContext) { + checkODSFileContent(t, repoDir, "component", want.Component) + checkODSFileContent(t, repoDir, "git-commit-sha", want.GitCommitSHA) + checkODSFileContent(t, repoDir, "git-full-ref", want.GitFullRef) + checkODSFileContent(t, repoDir, "git-ref", want.GitRef) + checkODSFileContent(t, repoDir, "git-url", want.GitURL) + checkODSFileContent(t, repoDir, "namespace", want.Namespace) + checkODSFileContent(t, repoDir, "pr-base", want.PullRequestBase) + checkODSFileContent(t, repoDir, "pr-key", want.PullRequestKey) + checkODSFileContent(t, repoDir, "project", want.Project) + checkODSFileContent(t, repoDir, "repository", want.Repository) +} + +func checkODSFileContent(t *testing.T, wsDir, filename, want string) { + checkFileContent(t, filepath.Join(wsDir, pipelinectxt.BaseDir), filename, want) +} + +func checkFileContent(t *testing.T, wsDir, filename, want string) { + got, err := getTrimmedFileContent(filepath.Join(wsDir, filename)) + if err != nil { + t.Fatalf("could not read %s: %s", filename, err) + } + if got != want { + t.Fatalf("got '%s', want '%s' in file %s", got, want, filename) + } +} + +func checkFilesExist(t *testing.T, wsDir string, wantFiles ...string) { + for _, wf := range wantFiles { + filename := filepath.Join(wsDir, wf) + if _, err := os.Stat(filename); os.IsNotExist(err) { + t.Fatalf("Want %s, but got nothing", filename) + } + } +} + +func getTrimmedFileContent(filename string) (string, error) { + content, err := os.ReadFile(filename) + if err != nil { + return "", err + } + return strings.TrimSpace(string(content)), nil +} + +func trimmedFileContentOrFatal(t *testing.T, filename string) string { + c, err := getTrimmedFileContent(filename) + if err != nil { + t.Fatal(err) + } + return c +} diff --git a/test/tasks/common_test.go b/test/tasks/common_test.go deleted file mode 100644 index 2e9ebb27..00000000 --- a/test/tasks/common_test.go +++ /dev/null @@ -1,257 +0,0 @@ -package tasks - -import ( - "crypto/sha256" - "flag" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/opendevstack/ods-pipeline/internal/directory" - "github.com/opendevstack/ods-pipeline/internal/kubernetes" - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/pkg/bitbucket" - "github.com/opendevstack/ods-pipeline/pkg/config" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" - "golang.org/x/exp/slices" - kclient "k8s.io/client-go/kubernetes" - "sigs.k8s.io/yaml" -) - -var alwaysKeepTmpWorkspacesFlag = flag.Bool("always-keep-tmp-workspaces", false, "Whether to keep temporary workspaces from taskruns even when test is successful") -var outsideKindFlag = flag.Bool("outside-kind", false, "Whether to continue if not in KinD cluster") -var skipSonarQubeFlag = flag.Bool("skip-sonar", false, "Whether to skip SonarQube steps") -var privateCertFlag = flag.Bool("private-cert", false, "Whether to run tests using a private cert") - -const ( - taskKindRef = "Task" - nexusPermanentRepository = "ods-permanent-artifacts" - nexusTemporaryRepository = "ods-temporary-artifacts" -) - -// buildTaskParams forces all SonarQube params to be "falsy" -// if the skipSonarQubeFlag is set. -func buildTaskParams(p map[string]string) map[string]string { - if *skipSonarQubeFlag { - p["sonar-skip"] = "true" - p["sonar-quality-gate"] = "false" - } - return p -} - -// requiredServices takes a variable amount of services and removes -// SonarQube from the resulting slice if the skipSonarQubeFlag is set. -func requiredServices(s ...tasktesting.Service) []tasktesting.Service { - requiredServices := append([]tasktesting.Service{}, s...) - sqIndex := slices.Index(requiredServices, tasktesting.SonarQube) - if sqIndex != -1 && *skipSonarQubeFlag { - requiredServices = slices.Delete(requiredServices, sqIndex, sqIndex+1) - } - return requiredServices -} - -func checkODSContext(t *testing.T, repoDir string, want *pipelinectxt.ODSContext) { - checkODSFileContent(t, repoDir, "component", want.Component) - checkODSFileContent(t, repoDir, "git-commit-sha", want.GitCommitSHA) - checkODSFileContent(t, repoDir, "git-full-ref", want.GitFullRef) - checkODSFileContent(t, repoDir, "git-ref", want.GitRef) - checkODSFileContent(t, repoDir, "git-url", want.GitURL) - checkODSFileContent(t, repoDir, "namespace", want.Namespace) - checkODSFileContent(t, repoDir, "pr-base", want.PullRequestBase) - checkODSFileContent(t, repoDir, "pr-key", want.PullRequestKey) - checkODSFileContent(t, repoDir, "project", want.Project) - checkODSFileContent(t, repoDir, "repository", want.Repository) -} - -func checkODSFileContent(t *testing.T, wsDir, filename, want string) { - checkFileContent(t, filepath.Join(wsDir, pipelinectxt.BaseDir), filename, want) -} - -func checkFileContent(t *testing.T, wsDir, filename, want string) { - got, err := getTrimmedFileContent(filepath.Join(wsDir, filename)) - if err != nil { - t.Fatalf("could not read %s: %s", filename, err) - } - if got != want { - t.Fatalf("got '%s', want '%s' in file %s", got, want, filename) - } -} - -func checkFilesExist(t *testing.T, wsDir string, wantFiles ...string) { - for _, wf := range wantFiles { - filename := filepath.Join(wsDir, wf) - if _, err := os.Stat(filename); os.IsNotExist(err) { - t.Fatalf("Want %s, but got nothing", filename) - } - } -} - -func checkFileHash(t *testing.T, wsDir string, filename string, hash [32]byte) { - filepath := filepath.Join(wsDir, filename) - filecontent, err := os.ReadFile(filepath) - if err != nil { - t.Fatalf("Want %s, but got nothing", filename) - } - filehash := sha256.Sum256(filecontent) - if filehash != hash { - t.Fatalf("Want %x, but got %x", hash, filehash) - } -} - -func getTrimmedFileContent(filename string) (string, error) { - content, err := os.ReadFile(filename) - if err != nil { - return "", err - } - return strings.TrimSpace(string(content)), nil -} - -func trimmedFileContentOrFatal(t *testing.T, filename string) string { - c, err := getTrimmedFileContent(filename) - if err != nil { - t.Fatal(err) - } - return c -} - -func checkFileContentContains(t *testing.T, wsDir, filename string, wantContains ...string) { - content, err := os.ReadFile(filepath.Join(wsDir, filename)) - got := string(content) - if err != nil { - t.Fatalf("could not read %s: %s", filename, err) - } - for _, w := range wantContains { - if !strings.Contains(got, w) { - t.Fatalf("got '%s', want '%s' contained in file %s", got, w, filename) - } - } -} - -func checkFileContentLeanContains(t *testing.T, wsDir, filename string, wantContains string) { - got, err := getFileContentLean(filepath.Join(wsDir, filename)) - if err != nil { - t.Fatalf("could not read %s: %s", filename, err) - } - if !strings.Contains(got, wantContains) { - t.Fatalf("got '%s', want '%s' contained in file %s", got, wantContains, filename) - } -} - -func getFileContentLean(filename string) (string, error) { - content, err := os.ReadFile(filename) - if err != nil { - return "", err - } - - contentStr := strings.ReplaceAll(string(content), "\t", "") - contentStr = strings.ReplaceAll(contentStr, "\n", "") - contentStr = strings.ReplaceAll(contentStr, " ", "") - - return contentStr, nil -} - -func runTaskTestCases(t *testing.T, taskName string, requiredServices []tasktesting.Service, testCases map[string]tasktesting.TestCase) { - tasktesting.CheckCluster(t, *outsideKindFlag) - if len(requiredServices) != 0 { - tasktesting.CheckServices(t, requiredServices) - } - - c, ns := tasktesting.Setup(t, - tasktesting.SetupOpts{ - SourceDir: tasktesting.StorageSourceDir, - StorageCapacity: tasktesting.StorageCapacity, - StorageClassName: tasktesting.StorageClassName, - PrivateCert: *privateCertFlag, - }, - ) - - tasktesting.CleanupOnInterrupt(func() { tasktesting.TearDown(t, c, ns) }, t.Logf) - defer tasktesting.TearDown(t, c, ns) - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - tn := taskName - if tc.Timeout == 0 { - tc.Timeout = 5 * time.Minute - } - tasktesting.Run(t, tc, tasktesting.TestOpts{ - TaskKindRef: taskKindRef, - TaskName: tn, - Clients: c, - Namespace: ns, - Timeout: tc.Timeout, - AlwaysKeepTmpWorkspaces: *alwaysKeepTmpWorkspacesFlag, - }) - }) - } -} - -func checkSonarQualityGate(t *testing.T, c *kclient.Clientset, namespace, sonarProject string, qualityGateFlag bool, wantQualityGateStatus string) { - - sonarToken, err := kubernetes.GetSecretKey(c, namespace, "ods-sonar-auth", "password") - if err != nil { - t.Fatalf("could not get SonarQube token: %s", err) - } - - sonarClient, err := sonar.NewClient(&sonar.ClientConfig{ - APIToken: sonarToken, - BaseURL: "http://localhost:9000", // use localhost instead of ods-test-sonarqube.kind! - ServerEdition: "community", - }) - if err != nil { - t.Fatalf("sonar client: %s", err) - } - - if qualityGateFlag { - qualityGateResult, err := sonarClient.QualityGateGet( - sonar.QualityGateGetParams{ProjectKey: sonarProject}, - ) - if err != nil { - t.Fatal(err) - } - actualStatus := qualityGateResult.ProjectStatus.Status - if actualStatus != wantQualityGateStatus { - t.Fatalf("Got: %s, want: %s", actualStatus, wantQualityGateStatus) - } - - } - -} - -func createODSYML(wsDir string, o *config.ODS) error { - y, err := yaml.Marshal(o) - if err != nil { - return err - } - filename := filepath.Join(wsDir, "ods.yaml") - return os.WriteFile(filename, y, 0644) -} - -func checkBuildStatus(t *testing.T, c *bitbucket.Client, gitCommit, wantBuildStatus string) { - buildStatusPage, err := c.BuildStatusList(gitCommit) - buildStatus := buildStatusPage.Values[0] - if err != nil { - t.Fatal(err) - } - if buildStatus.State != wantBuildStatus { - t.Fatalf("Got: %s, want: %s", buildStatus.State, wantBuildStatus) - } -} - -func createAppInSubDirectory(t *testing.T, wsDir string, subdir string, sampleApp string) { - err := os.MkdirAll(filepath.Join(wsDir, subdir), 0755) - if err != nil { - t.Fatal(err) - } - err = directory.Copy( - filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, sampleApp), - filepath.Join(wsDir, subdir), - ) - if err != nil { - t.Fatal(err) - } -} diff --git a/test/tasks/ods-aqua-scan_test.go b/test/tasks/ods-aqua-scan_test.go deleted file mode 100644 index efc74f34..00000000 --- a/test/tasks/ods-aqua-scan_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package tasks - -import ( - "testing" - - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSAquaScan(t *testing.T) { - runTaskTestCases(t, - "ods-aqua-scan", - []tasktesting.Service{}, - map[string]tasktesting.TestCase{ - "task fails without Aqua download URL": { - WorkspaceDirMapping: map[string]string{"source": "empty"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: false, - }, - }, - ) -} diff --git a/test/tasks/ods-build-go_test.go b/test/tasks/ods-build-go_test.go deleted file mode 100644 index 5d55f7f8..00000000 --- a/test/tasks/ods-build-go_test.go +++ /dev/null @@ -1,285 +0,0 @@ -package tasks - -import ( - "bytes" - "fmt" - "io" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSBuildGo(t *testing.T) { - goProverb := "Don't communicate by sharing memory, share memory by communicating." - runTaskTestCases(t, - "ods-build-go", - requiredServices(tasktesting.SonarQube), - map[string]tasktesting.TestCase{ - "build go app": { - WorkspaceDirMapping: map[string]string{"source": "go-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "go-os": runtime.GOOS, - "go-arch": runtime.GOARCH, - "sonar-quality-gate": "true", - "cache-build": "false", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "docker/Dockerfile", - "docker/app", - filepath.Join(pipelinectxt.LintReportsPath, "report.txt"), - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.out"), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - sonarProject := sonar.ProjectKey(ctxt.ODS, "") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - - wantLogMsg := "No sonar-project.properties present, using default:" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - - b, _, err := command.RunBuffered(wsDir+"/docker/app", []string{}) - if err != nil { - t.Fatal(err) - } - if string(b) != goProverb { - t.Fatalf("Got: %+v, want: %+v.", string(b), goProverb) - } - }, - CleanupFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - cleanModcache(t, wsDir) - }, - }, - "build go app with build caching": { - WorkspaceDirMapping: map[string]string{"source": "go-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "go-os": runtime.GOOS, - "go-arch": runtime.GOARCH, - "sonar-quality-gate": "true", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "docker/Dockerfile", - "docker/app", - filepath.Join(pipelinectxt.LintReportsPath, "report.txt"), - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.out"), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - - sonarProject := sonar.ProjectKey(ctxt.ODS, "") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - - // This is not available when build skipping as the default is - // supplied on the second repeat. - // Not sure whether the check is significant in the first place. - // wantLogMsg := "No sonar-project.properties present, using default:" - // if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - // t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - // } - - b, _, err := command.RunBuffered(wsDir+"/docker/app", []string{}) - if err != nil { - t.Fatal(err) - } - if string(b) != goProverb { - t.Fatalf("Got: %+v, want: %+v.", string(b), goProverb) - } - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - }, - WantRunSuccess: true, - }}, - }, - "build go app in subdirectory": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - // Setup subdir in "monorepo" - subdir := "go-src" - createAppInSubDirectory(t, wsDir, subdir, "go-sample-app") - - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "go-os": runtime.GOOS, - "go-arch": runtime.GOARCH, - "sonar-quality-gate": "true", - "working-dir": subdir, - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - subdir := "go-src" - binary := fmt.Sprintf("%s/docker/app", subdir) - - checkFilesExist(t, wsDir, - fmt.Sprintf("%s/docker/Dockerfile", subdir), - binary, - filepath.Join(pipelinectxt.LintReportsPath, fmt.Sprintf("%s-report.txt", subdir)), - filepath.Join(pipelinectxt.XUnitReportsPath, fmt.Sprintf("%s-report.xml", subdir)), - filepath.Join(pipelinectxt.CodeCoveragesPath, fmt.Sprintf("%s-coverage.out", subdir)), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-analysis-report.md", subdir)), - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-issues-report.csv", subdir)), - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-quality-gate.json", subdir)), - ) - sonarProject := sonar.ProjectKey(ctxt.ODS, subdir+"-") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - - b, _, err := command.RunBuffered(filepath.Join(wsDir, binary), []string{}) - if err != nil { - t.Fatal(err) - } - if string(b) != goProverb { - t.Fatalf("Got: %+v, want: %+v.", string(b), goProverb) - } - }, - CleanupFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - cleanModcache(t, wsDir) - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - }, - WantRunSuccess: true, - }}, - }, - "fail linting go app and generate lint report": { - WorkspaceDirMapping: map[string]string{"source": "go-sample-app-lint-error"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "go-os": runtime.GOOS, - "go-arch": runtime.GOARCH, - }) - }, - WantRunSuccess: false, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - wantFile := filepath.Join(pipelinectxt.LintReportsPath, "report.txt") - checkFilesExist(t, wsDir, wantFile) - - wantLintReportContent := "main.go:6:2: printf: fmt.Printf format %s reads arg #1, but call has 0 args (govet)\n\tfmt.Printf(\"Hello World %s\") // lint error on purpose to generate lint report\n\t^" - - checkFileContent(t, wsDir, ".ods/artifacts/lint-reports/report.txt", wantLintReportContent) - }, - CleanupFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - cleanModcache(t, wsDir) - }, - }, - "build go app with pre-test script": { - WorkspaceDirMapping: map[string]string{"source": "go-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-skip": "true", - "pre-test-script": "pre-test-script.sh", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - wantFile := "docker/test.txt" - checkFilesExist(t, wsDir, wantFile) - }, - CleanupFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - cleanModcache(t, wsDir) - }, - }, - "build go app in PR": { - WorkspaceDirMapping: map[string]string{"source": "go-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - writeContextFile(t, wsDir, "pr-key", "3") - writeContextFile(t, wsDir, "pr-base", "master") - ctxt.Params = buildTaskParams(map[string]string{ - "go-os": runtime.GOOS, - "go-arch": runtime.GOARCH, - // "sonar-quality-gate": "true", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // No idea yet how to fake PR scanning in SQ ... - // if !*skipSonarQubeFlag { - // sonarProject := sonar.ProjectKey(ctxt.ODS, "") - // checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - // } - }, - CleanupFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - cleanModcache(t, wsDir) - }, - }, - }) -} - -func cleanModcache(t *testing.T, workspace string) { - var stderr bytes.Buffer - err := command.Run( - "go", []string{"clean", "-modcache"}, - []string{ - fmt.Sprintf("GOMODCACHE=%s/%s", workspace, ".ods-cache/deps/gomod"), - }, - io.Discard, - &stderr, - ) - if err != nil { - t.Fatalf("could not clean up modcache: %s, stderr: %s", err, stderr.String()) - } -} diff --git a/test/tasks/ods-build-gradle_test.go b/test/tasks/ods-build-gradle_test.go deleted file mode 100644 index 91cde428..00000000 --- a/test/tasks/ods-build-gradle_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package tasks - -import ( - "path/filepath" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSBuildGradle(t *testing.T) { - runTaskTestCases(t, - "ods-build-gradle", - requiredServices(tasktesting.Nexus, tasktesting.SonarQube), - map[string]tasktesting.TestCase{ - "task should build gradle app": { - WorkspaceDirMapping: map[string]string{"source": "gradle-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - "cache-build": "false", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "docker/Dockerfile", - "docker/app.jar", - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest.xml"), - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest2.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - logContains(t, ctxt.CollectedLogs, - "No sonar-project.properties present, using default:", - "ods-test-nexus", - "Gradle 7.4.2", - "Using GRADLE_OPTS=-Dorg.gradle.jvmargs=-Xmx512M", - "Using GRADLE_USER_HOME=/workspace/source/.ods-cache/deps/gradle", - "To honour the JVM settings for this build a single-use Daemon process will be forked.", - ) - }, - }, - "build gradle app with build caching": { - WorkspaceDirMapping: map[string]string{"source": "gradle-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "docker/Dockerfile", - "docker/app.jar", - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest.xml"), - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest2.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - logContains(t, ctxt.CollectedLogs, - "No sonar-project.properties present, using default:", - "ods-test-nexus", - "Gradle 7.4.2", - "Using GRADLE_OPTS=-Dorg.gradle.jvmargs=-Xmx512M", - "Using GRADLE_USER_HOME=/workspace/source/.ods-cache/deps/gradle", - "To honour the JVM settings for this build a single-use Daemon process will be forked.", - ) - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - wsDir := ctxt.Workspaces["source"] - tasktesting.RemoveAll(t, wsDir, "docker/app.jar") - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "docker/Dockerfile", - "docker/app.jar", - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest.xml"), - filepath.Join(pipelinectxt.XUnitReportsPath, "TEST-ods.java.gradle.sample.app.AppTest2.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), - ) - - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - logContains(t, ctxt.CollectedLogs, - "Copying prior ods build artifacts from cache: /workspace/source/.ods-cache/build-task/gradle", - "Copying prior build output from cache: /workspace/source/.ods-cache/build-task/gradle", - ) - }, - }}, - }, - }) -} - -func logContains(t *testing.T, collectedLogs []byte, wantLogMsgs ...string) { - t.Helper() - logString := string(collectedLogs) - - for _, msg := range wantLogMsgs { - if !strings.Contains(logString, msg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", msg, logString) - } - } - -} diff --git a/test/tasks/ods-build-npm_test.go b/test/tasks/ods-build-npm_test.go deleted file mode 100644 index 71cd2335..00000000 --- a/test/tasks/ods-build-npm_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package tasks - -import ( - "fmt" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSBuildNPM(t *testing.T) { - runTaskTestCases(t, - "ods-build-npm", - requiredServices(tasktesting.Nexus, tasktesting.SonarQube), - map[string]tasktesting.TestCase{ - "build typescript app with SQ scan": { - WorkspaceDirMapping: map[string]string{"source": "typescript-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - "cache-build": "false", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "clover.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage-final.json"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "lcov.info"), - filepath.Join(pipelinectxt.LintReportsPath, "report.txt"), - "dist/src/index.js", - "node_modules", - "package.json", - "package-lock.json", - ) - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - wantLogMsg := "No sonar-project.properties present, using default:" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - - if !*skipSonarQubeFlag { - sonarProject := sonar.ProjectKey(ctxt.ODS, "") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - }, - }, - "build javascript app in subdirectory with build caching": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - // Setup subdir in "monorepo" - subdir := "js-src" - createAppInSubDirectory(t, wsDir, subdir, "javascript-sample-app") - - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "working-dir": subdir, - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - subdir := "js-src" - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.XUnitReportsPath, fmt.Sprintf("%s-report.xml", subdir)), - filepath.Join(pipelinectxt.CodeCoveragesPath, fmt.Sprintf("%s-clover.xml", subdir)), - filepath.Join(pipelinectxt.CodeCoveragesPath, fmt.Sprintf("%s-coverage-final.json", subdir)), - filepath.Join(pipelinectxt.CodeCoveragesPath, fmt.Sprintf("%s-lcov.info", subdir)), - filepath.Join(pipelinectxt.LintReportsPath, fmt.Sprintf("%s-report.txt", subdir)), - fmt.Sprintf("%s/dist/src/index.js", subdir), - fmt.Sprintf("%s/package.json", subdir), - fmt.Sprintf("%s/package-lock.json", subdir), - ) - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - wsDir := ctxt.Workspaces["source"] - tasktesting.RemoveAll(t, wsDir, "js-src/dist") - tasktesting.RemoveAll(t, wsDir, "js-src/node_modules") - }, - WantRunSuccess: true, - }}, - }, - "fail linting typescript app and generate lint report": { - WorkspaceDirMapping: map[string]string{"source": "typescript-sample-app-lint-error"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: false, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - wantFile := filepath.Join(pipelinectxt.LintReportsPath, "report.txt") - checkFilesExist(t, wsDir, wantFile) - - wantLintReportContent := "/workspace/source/src/index.ts: line 3, col 31, Warning - Unexpected any. Specify a different type. (@typescript-eslint/no-explicit-any)\n\n1 problem" - checkFileContentContains(t, wsDir, filepath.Join(pipelinectxt.LintReportsPath, "report.txt"), wantLintReportContent) - }, - }, - "fail pulling image if unsupported node version is specified": { - WorkspaceDirMapping: map[string]string{"source": "javascript-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = map[string]string{ - "node-version": "10", - } - }, - WantSetupFail: true, - }, - "build backend javascript app": { - Timeout: 10 * time.Minute, - WorkspaceDirMapping: map[string]string{"source": "javascript-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "cached-outputs": "node_modules/", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkFilesExist(t, wsDir, - "node_modules/", - "package.json", - "package-lock.json", - ) - }, - }, - "build javascript app with custom build directory": { - WorkspaceDirMapping: map[string]string{"source": "javascript-sample-app-build-dir"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "cached-outputs": "build", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkFilesExist(t, wsDir, - "build/src/index.js", - "package.json", - "package-lock.json", - ) - }, - }, - "build javascript app using node16": { - WorkspaceDirMapping: map[string]string{"source": "javascript-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = map[string]string{ - "sonar-skip": "true", - "node-version": "16", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "clover.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage-final.json"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "lcov.info"), - filepath.Join(pipelinectxt.LintReportsPath, "report.txt"), - "dist/src/index.js", - "package.json", - "package-lock.json", - ) - }, - }, - }) -} diff --git a/test/tasks/ods-build-python_test.go b/test/tasks/ods-build-python_test.go deleted file mode 100644 index 35918094..00000000 --- a/test/tasks/ods-build-python_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package tasks - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/sonar" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSBuildPython(t *testing.T) { - runTaskTestCases(t, - "ods-build-python", - requiredServices(tasktesting.Nexus, tasktesting.SonarQube), - map[string]tasktesting.TestCase{ - "build python fastapi app": { - WorkspaceDirMapping: map[string]string{"source": "python-fastapi-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - "cache-build": "false", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "src/main.py", - "requirements.txt", - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), - ) - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - wantContainsBytes, err := os.ReadFile("../../test/testdata/golden/ods-build-python/excerpt-from-coverage.xml") - if err != nil { - t.Fatal(err) - } - - wantContains := string(wantContainsBytes) - - wantContains = strings.ReplaceAll(wantContains, "\t", "") - wantContains = strings.ReplaceAll(wantContains, "\n", "") - wantContains = strings.ReplaceAll(wantContains, " ", "") - - checkFileContentLeanContains(t, wsDir, filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), wantContains) - - if !*skipSonarQubeFlag { - sonarProject := sonar.ProjectKey(ctxt.ODS, "") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - - wantLogMsg := "No sonar-project.properties present, using default:" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - }, - }, - "build python fastapi app with build caching": { - WorkspaceDirMapping: map[string]string{"source": "python-fastapi-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkFilesExist(t, wsDir, - "src/main.py", - "requirements.txt", - filepath.Join(pipelinectxt.XUnitReportsPath, "report.xml"), - filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), - ) - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, "analysis-report.md"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "issues-report.csv"), - filepath.Join(pipelinectxt.SonarAnalysisPath, "quality-gate.json"), - ) - } - - wantContainsBytes, err := os.ReadFile("../../test/testdata/golden/ods-build-python/excerpt-from-coverage.xml") - if err != nil { - t.Fatal(err) - } - - wantContains := string(wantContainsBytes) - - wantContains = strings.ReplaceAll(wantContains, "\t", "") - wantContains = strings.ReplaceAll(wantContains, "\n", "") - wantContains = strings.ReplaceAll(wantContains, " ", "") - - checkFileContentLeanContains(t, wsDir, filepath.Join(pipelinectxt.CodeCoveragesPath, "coverage.xml"), wantContains) - - if !*skipSonarQubeFlag { - sonarProject := sonar.ProjectKey(ctxt.ODS, "") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - - // This is not available when build skipping as the default is - // supplied on the second repeat. - // Not sure whether the check is significant in the first place. - // wantLogMsg := "No sonar-project.properties present, using default:" - // if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - // t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - // } - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - }, - WantRunSuccess: true, - }}, - }, - "build python fastapi app in subdirectory": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - // Setup subdir in "monorepo" - subdir := "fastapi-src" - createAppInSubDirectory(t, wsDir, subdir, "python-fastapi-sample-app") - - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "sonar-quality-gate": "true", - "working-dir": subdir, - "cache-build": "true", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - subdir := "fastapi-src" - - checkFilesExist(t, wsDir, - fmt.Sprintf("%s/src/main.py", subdir), - fmt.Sprintf("%s/requirements.txt", subdir), - filepath.Join(pipelinectxt.XUnitReportsPath, fmt.Sprintf("%s-report.xml", subdir)), - filepath.Join(pipelinectxt.CodeCoveragesPath, fmt.Sprintf("%s-coverage.xml", subdir)), - ) - if !*skipSonarQubeFlag { - checkFilesExist(t, wsDir, - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-analysis-report.md", subdir)), - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-issues-report.csv", subdir)), - filepath.Join(pipelinectxt.SonarAnalysisPath, fmt.Sprintf("%s-quality-gate.json", subdir)), - ) - sonarProject := sonar.ProjectKey(ctxt.ODS, subdir+"-") - checkSonarQualityGate(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, sonarProject, true, "OK") - } - }, - }, - "build python fastapi app with pre-test script": { - WorkspaceDirMapping: map[string]string{"source": "python-fastapi-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = buildTaskParams(map[string]string{ - "pre-test-script": "pre-test-script.sh", - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - wantFile := "docker/test.txt" - checkFilesExist(t, wsDir, wantFile) - }, - }, - }) -} diff --git a/test/tasks/ods-deploy-helm_external_test.go b/test/tasks/ods-deploy-helm_external_test.go deleted file mode 100644 index 81830958..00000000 --- a/test/tasks/ods-deploy-helm_external_test.go +++ /dev/null @@ -1,170 +0,0 @@ -//go:build external -// +build external - -package tasks - -import ( - "flag" - "fmt" - "path/filepath" - "testing" - "time" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/kubernetes" - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/internal/random" - "github.com/opendevstack/ods-pipeline/pkg/artifact" - "github.com/opendevstack/ods-pipeline/pkg/config" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// To test deployment to external cluster, you must provide the token for a -// serviceaccount in an externa cluster, and a matching configuration like this: -// -// TODO: make this part of triggers, and supply -// tasks: -// - name: deploy -// taskRef: -// kind: Task -// name: ods-deploy-helm -// params: -// - name: namespace -// value: foobar -// apiServer: https://api.example.openshift.com:443 -// registryHost: default-route-openshift-image-registry.apps.example.openshiftapps.com -// -// You do not need to specify "apiCredentialsSecret", it is set automatically to -// the secret created from the token given via -external-cluster-token. -// -// The test will not create or delete any namespaces. It will install a Helm -// release into the specified namespace, and delete the release again after the -// test. The Helm release and related resources are prefixed with the temporary -// workspace directory (e.g. "workspace-476709422") so any clashes even in none- -// empty namespace are very unlikely. Nonetheless, it is always recommended to -// use an empty namespace setup solely for the purpose of testing. -var ( - externalClusterTokenFlag = flag.String("external-cluster-token", "", "Token of serviceaccount in external cluster") - externalClusterConfigFlag = flag.String("external-cluster-config", "", "ods.yaml describing external cluster") -) - -func TestTaskODSDeployHelmExternal(t *testing.T) { - var externalEnv *config.Environment - var imageStream string - runTaskTestCases(t, - "ods-deploy-helm", - []tasktesting.Service{}, - map[string]tasktesting.TestCase{ - "external deployment": { - Timeout: 10 * time.Minute, - WorkspaceDirMapping: map[string]string{"source": "helm-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - if *externalClusterConfigFlag == "" || *externalClusterTokenFlag == "" { - t.Fatal( - "-external-cluster-token and -external-cluster-config are required to run this test. " + - "Use -short to skip this test.", - ) - } - - t.Log("Create token secret for external cluster") - secret, err := kubernetes.CreateSecret(ctxt.Clients.KubernetesClientSet, ctxt.Namespace, &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{Name: "ext"}, - Data: map[string][]byte{ - "token": []byte(*externalClusterTokenFlag), - }, - }) - if err != nil { - t.Fatal(err) - } - - t.Log("Create private key secret for sample app") - createSampleAppPrivateKeySecret(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace) - - t.Log("Read ods.yaml from flag and write into working dir") - externalClusterConfig := *externalClusterConfigFlag - if !filepath.IsAbs(externalClusterConfig) { - externalClusterConfig = filepath.Join(projectpath.Root, externalClusterConfig) - } - o, err := config.ReadFromFile(externalClusterConfig) - if err != nil { - t.Fatal(err) - } - externalEnv := o.Environments[0] - externalEnv.APICredentialsSecret = secret.Name - externalEnv.APIToken = *externalClusterTokenFlag - o.Environments[0] = externalEnv - err = createODSYML(wsDir, o) - if err != nil { - t.Fatal(err) - } - - imageStream = random.PseudoString() - tag := "latest" - fullTag := fmt.Sprintf("localhost:5000/%s/%s:%s", ctxt.Namespace, imageStream, tag) - buildAndPushImageWithLabel(t, ctxt, fullTag, wsDir) - ia := artifact.Image{ - Ref: fmt.Sprintf("kind-registry.kind:5000/%s/%s:%s", ctxt.Namespace, imageStream, tag), - Registry: "kind-registry.kind:5000", - Repository: ctxt.Namespace, - Name: imageStream, - Tag: tag, - Digest: "abc", - } - imageArtifactFilename := fmt.Sprintf("%s.json", imageStream) - err = pipelinectxt.WriteJsonArtifact(ia, filepath.Join(wsDir, pipelinectxt.ImageDigestsPath), imageArtifactFilename) - if err != nil { - t.Fatal(err) - } - - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - t.Log("Check image") - _, _, err := command.Run("skopeo", []string{ - "inspect", - fmt.Sprintf("--registry-token=%s", externalEnv.APIToken), - fmt.Sprintf("docker://%s/%s/%s:%s", externalEnv.RegistryHost, ctxt.Namespace, imageStream, "latest"), - }) - if err != nil { - t.Fatal(err) - } - t.Log("Remove Helm release again") - command.Run("helm", []string{ - fmt.Sprintf("--kube-apiserver=%s", externalEnv.APIServer), - fmt.Sprintf("--kube-token=%s", externalEnv.APIToken), - fmt.Sprintf("--namespace=%s", externalEnv.Namespace), - "uninstall", - ctxt.ODS.Component, - }) - }, - }, - }, - ) -} - -// buildAndPushImageWithLabel builds an image and pushes it to the registry. -// The used image tag equals the Git SHA that is being built, so the task -// will pick up the existing image. -// The image is labelled with "tasktestrun=true" so that it is possible to -// verify that the image has not been rebuild in the task. -func buildAndPushImageWithLabel(t *testing.T, ctxt *tasktesting.TaskRunContext, tag string, wsDir string) { - t.Logf("Build image %s ahead of taskrun", tag) - _, stderr, err := command.RunBuffered("docker", []string{ - "build", "--label", "tasktestrun=true", "-t", tag, filepath.Join(wsDir, "docker"), - }) - if err != nil { - t.Fatalf("could not build image: %s, stderr: %s", err, string(stderr)) - } - _, stderr, err = command.RunBuffered("docker", []string{ - "push", tag, - }) - if err != nil { - t.Fatalf("could not push image: %s, stderr: %s", err, string(stderr)) - } -} diff --git a/test/tasks/ods-deploy-helm_test.go b/test/tasks/ods-deploy-helm_test.go deleted file mode 100644 index 70507cbd..00000000 --- a/test/tasks/ods-deploy-helm_test.go +++ /dev/null @@ -1,340 +0,0 @@ -package tasks - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/kubernetes" - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/internal/random" - "github.com/opendevstack/ods-pipeline/pkg/artifact" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8s "k8s.io/client-go/kubernetes" - "sigs.k8s.io/yaml" -) - -const ( - localRegistry = "localhost:5000" - kindRegistry = "kind-registry.kind:5000" -) - -type imageImportParams struct { - externalRef string - namespace string - workdir string -} - -func TestTaskODSDeployHelm(t *testing.T) { - var separateReleaseNamespace string - runTaskTestCases(t, - "ods-deploy-helm", - []tasktesting.Service{}, - map[string]tasktesting.TestCase{ - "skips when no namespace is given": { - WorkspaceDirMapping: map[string]string{"source": "helm-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - // no "namespace" param set - }, - WantRunSuccess: true, - }, - "upgrades Helm chart in separate namespace": { - WorkspaceDirMapping: map[string]string{"source": "helm-sample-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - - externalNamespace, cleanupFunc := createReleaseNamespaceOrFatal( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, - ) - separateReleaseNamespace = externalNamespace - ctxt.Cleanup = cleanupFunc - ctxt.Params = map[string]string{ - "namespace": externalNamespace, - } - importImage(t, imageImportParams{ - externalRef: "index.docker.io/crccheck/hello-world", - namespace: ctxt.Namespace, - workdir: wsDir, - }) - createSampleAppPrivateKeySecret(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkFileContentContains( - t, wsDir, - filepath.Join(pipelinectxt.DeploymentsPath, fmt.Sprintf("diff-%s.txt", separateReleaseNamespace)), - "Release was not present in Helm. Diff will show entire contents as new.", - "Deployment (apps) has been added", - "Secret (v1) has been added", - "Service (v1) has been added", - ) - checkFileContentContains( - t, wsDir, - filepath.Join(pipelinectxt.DeploymentsPath, fmt.Sprintf("release-%s.txt", separateReleaseNamespace)), - "Installing it now.", - fmt.Sprintf("NAMESPACE: %s", separateReleaseNamespace), - "STATUS: deployed", - "REVISION: 1", - ) - resourceName := fmt.Sprintf("%s-%s", ctxt.ODS.Component, "helm-sample-app") - _, err := checkService(ctxt.Clients.KubernetesClientSet, separateReleaseNamespace, resourceName) - if err != nil { - t.Fatal(err) - } - _, err = checkDeployment(ctxt.Clients.KubernetesClientSet, separateReleaseNamespace, resourceName) - if err != nil { - t.Fatal(err) - } - - // Verify log output massaging - doNotWantLogMsg := "plugin \"diff\" exited with error" - if strings.Contains(string(ctxt.CollectedLogs), doNotWantLogMsg) { - t.Fatalf("Do not want:\n%s\n\nGot:\n%s", doNotWantLogMsg, string(ctxt.CollectedLogs)) - } - wantLogMsg := "identified at least one change" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - }, - }, - "upgrades Helm chart with dependencies": { - WorkspaceDirMapping: map[string]string{"source": "helm-app-with-dependencies"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = map[string]string{ - "namespace": ctxt.Namespace, - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - parentChartResourceName := fmt.Sprintf("%s-%s", ctxt.ODS.Component, "helm-app-with-dependencies") - // Parent chart - _, err := checkService(ctxt.Clients.KubernetesClientSet, ctxt.Namespace, parentChartResourceName) - if err != nil { - t.Fatal(err) - } - _, err = checkDeployment(ctxt.Clients.KubernetesClientSet, ctxt.Namespace, parentChartResourceName) - if err != nil { - t.Fatal(err) - } - // Subchart - subChartResourceName := "helm-sample-database" // fixed name due to fullnameOverride - _, err = checkService(ctxt.Clients.KubernetesClientSet, ctxt.Namespace, subChartResourceName) - if err != nil { - t.Fatal(err) - } - d, err := checkDeployment(ctxt.Clients.KubernetesClientSet, ctxt.Namespace, subChartResourceName) - if err != nil { - t.Fatal(err) - } - // Check that Helm value overriding in subchart works - gotEnvValue := d.Spec.Template.Spec.Containers[0].Env[0].Value - wantEnvValue := "tom" // defined in parent (child has value "john") - if gotEnvValue != wantEnvValue { - t.Fatalf("Want ENV username = %s, got: %s", wantEnvValue, gotEnvValue) - } - }, - AdditionalRuns: []tasktesting.TaskRunCase{{ - // inherits funcs from primary task only set explicitly - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - // ctxt still in place from prior run - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wantLogMsg := "No diff detected, skipping helm upgrade" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - }, - }}, - }, - "skips upgrade when diff-only is requested": { - WorkspaceDirMapping: map[string]string{"source": "helm-app-with-dependencies"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - externalNamespace, cleanupFunc := createReleaseNamespaceOrFatal( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, - ) - separateReleaseNamespace = externalNamespace - ctxt.Cleanup = cleanupFunc - ctxt.Params = map[string]string{ - "namespace": externalNamespace, - "diff-only": "true", - } - importImage(t, imageImportParams{ - externalRef: "index.docker.io/crccheck/hello-world", - namespace: ctxt.Namespace, - workdir: wsDir, - }) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - t.Log("Verify image was not promoted ...") - img := fmt.Sprintf("%s/%s/hello-world", localRegistry, separateReleaseNamespace) - promoted := checkIfImageExists(t, img) - if promoted { - t.Fatalf("Image %s should not have been promoted to %s", img, separateReleaseNamespace) - } - t.Log("Verify service was not deployed ...") - resourceName := fmt.Sprintf("%s-%s", ctxt.ODS.Component, "helm-app-with-dependencies") - _, err := checkService(ctxt.Clients.KubernetesClientSet, separateReleaseNamespace, resourceName) - if err == nil { - t.Fatalf("Service %s should not have been deployed to %s", resourceName, separateReleaseNamespace) - } - t.Log("Verify task skipped upgrade ...") - wantLogMsg := "Only diff was requested, skipping helm upgrade" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - }, - }, - }, - ) -} - -func createSampleAppPrivateKeySecret(t *testing.T, clientset *k8s.Clientset, ctxtNamespace string) { - secret, err := readPrivateKeySecret() - if err != nil { - t.Fatal(err) - } - _, err = kubernetes.CreateSecret(clientset, ctxtNamespace, secret) - if err != nil { - t.Fatal(err) - } -} - -func createReleaseNamespaceOrFatal(t *testing.T, clientset *k8s.Clientset, ctxtNamespace string) (string, func()) { - externalNamespace, err := createReleaseNamespace(clientset, ctxtNamespace) - if err != nil { - t.Fatal(err) - } - return externalNamespace, func() { - if err := clientset.CoreV1().Namespaces().Delete(context.TODO(), externalNamespace, metav1.DeleteOptions{}); err != nil { - t.Errorf("Failed to delete namespace %s: %s", externalNamespace, err) - } - } -} - -func createReleaseNamespace(clientset *k8s.Clientset, ctxtNamespace string) (string, error) { - releaseNamespace := random.PseudoString() - kubernetes.CreateNamespace(clientset, releaseNamespace) - _, err := clientset.RbacV1().RoleBindings(releaseNamespace).Create( - context.Background(), - &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pipeline-deployer", - Namespace: releaseNamespace, - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: "pipeline", - Namespace: ctxtNamespace, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: "edit", - }, - }, - metav1.CreateOptions{}) - - return releaseNamespace, err -} - -func writeContextFile(t *testing.T, wsDir, file, content string) { - err := os.WriteFile( - filepath.Join(wsDir, pipelinectxt.BaseDir, file), []byte(content), 0644, - ) - if err != nil { - t.Fatal(err) - } -} - -func checkDeployment(clientset *k8s.Clientset, namespace, name string) (*appsv1.Deployment, error) { - return clientset.AppsV1(). - Deployments(namespace). - Get(context.TODO(), name, metav1.GetOptions{}) -} - -func checkService(clientset *k8s.Clientset, namespace, name string) (*corev1.Service, error) { - return clientset.CoreV1(). - Services(namespace). - Get(context.TODO(), name, metav1.GetOptions{}) -} - -func readPrivateKeySecret() (*corev1.Secret, error) { - bytes, err := os.ReadFile(filepath.Join(projectpath.Root, "test/testdata/fixtures/tasks/secret.yaml")) - if err != nil { - return nil, err - } - - var secretSpec corev1.Secret - err = yaml.Unmarshal(bytes, &secretSpec) - if err != nil { - return nil, err - } - return &secretSpec, nil -} - -func importImage(t *testing.T, iip imageImportParams) { - var err error - cmds := [][]string{ - {"pull", iip.externalRef}, - {"tag", iip.externalRef, iip.internalRef(localRegistry)}, - {"push", iip.internalRef(localRegistry)}, - } - for _, args := range cmds { - if err == nil { - _, _, err = command.RunBuffered("docker", args) - } - } - if err != nil { - t.Fatalf("docker cmd failed: %s", err) - } - - err = pipelinectxt.WriteJsonArtifact(artifact.Image{ - Ref: iip.internalRef(kindRegistry), - Registry: kindRegistry, - Repository: iip.namespace, - Name: iip.name(), - Tag: "latest", - Digest: "not needed", - }, filepath.Join(iip.workdir, pipelinectxt.ImageDigestsPath), fmt.Sprintf("%s.json", iip.name())) - if err != nil { - t.Fatalf("failed to write artifact: %s", err) - } - t.Log("Imported image", iip.internalRef(localRegistry)) -} - -func checkIfImageExists(t *testing.T, name string) bool { - t.Helper() - _, _, err := command.RunBuffered("docker", []string{"inspect", name}) - return err == nil -} - -func (iip imageImportParams) name() string { - parts := strings.Split(iip.externalRef, "/") - return parts[2] -} - -func (iip imageImportParams) internalRef(registry string) string { - parts := strings.Split(iip.externalRef, "/") - return fmt.Sprintf("%s/%s/%s", registry, iip.namespace, parts[2]) -} diff --git a/test/tasks/ods-finish_test.go b/test/tasks/ods-finish_test.go deleted file mode 100644 index e2058e71..00000000 --- a/test/tasks/ods-finish_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package tasks - -import ( - "fmt" - "log" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/opendevstack/ods-pipeline/pkg/bitbucket" - "github.com/opendevstack/ods-pipeline/pkg/nexus" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSFinish(t *testing.T) { - runTaskTestCases(t, - "ods-finish", - []tasktesting.Service{ - tasktesting.Bitbucket, - tasktesting.Nexus, - }, - map[string]tasktesting.TestCase{ - "set bitbucket build status to failed": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app-with-artifacts"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - ctxt.Params = map[string]string{ - "pipeline-run-name": "foo", - "aggregate-tasks-status": "None", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - checkBuildStatus(t, bitbucketClient, ctxt.ODS.GitCommitSHA, bitbucket.BuildStatusFailed) - }, - }, - "set bitbucket build status to successful and upload artifacts to Nexus repository": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app-with-artifacts"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - // Pretend there is alredy a coverage report in Nexus. - // This assures the safeguard is working to avoid duplicate upload. - // TODO: assure the safeguard is actually invoked by checking the logs. - t.Log("Uploading coverage artifact to Nexus and writing manifest") - nexusClient := tasktesting.NexusClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - if _, err := nexusClient.Upload( - nexus.TestTemporaryRepository, - pipelinectxt.ArtifactGroup(ctxt.ODS, pipelinectxt.CodeCoveragesDir), - filepath.Join(wsDir, pipelinectxt.CodeCoveragesPath, "coverage.out"), - ); err != nil { - t.Fatal(err) - } - am := pipelinectxt.NewArtifactsManifest( - nexus.TestTemporaryRepository, - pipelinectxt.ArtifactInfo{ - Directory: pipelinectxt.CodeCoveragesDir, - Name: "coverage.out", - }, - ) - if err := pipelinectxt.WriteJsonArtifact( - am, - filepath.Join(wsDir, pipelinectxt.ArtifactsPath), - pipelinectxt.ArtifactsManifestFilename, - ); err != nil { - t.Fatal(err) - } - - ctxt.Params = map[string]string{ - "pipeline-run-name": "foo", - "aggregate-tasks-status": "Succeeded", - "artifact-target": nexus.TestTemporaryRepository, - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - checkBuildStatus(t, bitbucketClient, ctxt.ODS.GitCommitSHA, bitbucket.BuildStatusSuccessful) - checkArtifactsAreInNexus(t, ctxt, nexus.TestTemporaryRepository) - - wantLogMsg := "Artifact \"coverage.out\" is already present in Nexus repository" - if !strings.Contains(string(ctxt.CollectedLogs), wantLogMsg) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", wantLogMsg, string(ctxt.CollectedLogs)) - } - }, - }, - "stops gracefully when context cannot be read": { - WorkspaceDirMapping: map[string]string{"source": "empty"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - ctxt.Params = map[string]string{ - "pipeline-run-name": "foo", - "aggregate-tasks-status": "Failed", - } - }, - WantRunSuccess: false, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - want := "Unable to continue as pipeline context cannot be read" - - if !strings.Contains(string(ctxt.CollectedLogs), want) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", want, string(ctxt.CollectedLogs)) - } - }, - }, - }, - ) -} - -func checkArtifactsAreInNexus(t *testing.T, ctxt *tasktesting.TaskRunContext, targetRepository string) { - - nexusClient := tasktesting.NexusClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - - // List of expected artifacts to have been uploaded to Nexus - artifactsMap := map[string][]string{ - pipelinectxt.XUnitReportsDir: {"report.xml"}, - // exclude coverage as we pretend it has been uploaded earlier already - // pipelinectxt.CodeCoveragesDir: {"coverage.out"}, - pipelinectxt.SonarAnalysisDir: {"analysis-report.md", "issues-report.csv"}, - } - - for artifactsSubDir, files := range artifactsMap { - - filesCountInSubDir := len(artifactsMap[artifactsSubDir]) - - // e.g: "/ODSPIPELINETEST/workspace-190880007/935e5229b084dd60d44a5eddd2d023720ec153c1/xunit-reports" - group := pipelinectxt.ArtifactGroup(ctxt.ODS, artifactsSubDir) - - // The test is so fast that, when we reach this line, the artifacts could still being uploaded to Nexus - artifactURLs := waitForArtifacts(t, nexusClient, targetRepository, group, filesCountInSubDir, 5*time.Second) - if len(artifactURLs) != filesCountInSubDir { - t.Fatalf("Got: %d artifacts in subdir %s, want: %d.", len(artifactURLs), artifactsMap[artifactsSubDir], filesCountInSubDir) - } - - for _, file := range files { - - // e.g. "http://localhost:8081/repository/ods-pipelines/ODSPIPELINETEST/workspace-866704509/b1415e831b4f5b24612abf24499663ddbff6babb/xunit-reports/report.xml" - // note that the "group" value already has a leading slash! - url := fmt.Sprintf("%s/repository/%s%s/%s", nexusClient.URL(), targetRepository, group, file) - - if !contains(artifactURLs, url) { - t.Fatalf("Artifact %s with URL %+v not found in Nexus under any of the following URLs: %v", file, url, artifactURLs) - } - } - - } -} - -func waitForArtifacts(t *testing.T, nexusClient *nexus.Client, targetRepository, group string, expectedArtifactsCount int, timeout time.Duration) []string { - - start := time.Now().UTC() - elapsed := time.Since(start) - artifactURLs := []string{} - - for elapsed < timeout { - artifactURLs, err := nexusClient.Search(targetRepository, group) - if err != nil { - t.Fatal(err) - } - - if len(artifactURLs) == expectedArtifactsCount { - return artifactURLs - } - - log.Printf("Artifacts are not yet available in Nexus...\n") - time.Sleep(1 * time.Second) - - elapsed = time.Since(start) - } - - log.Printf("Time out reached.\n") - return artifactURLs -} - -// contains checks if a string is present in a slice -func contains(s []string, str string) bool { - for _, v := range s { - if v == str { - return true - } - } - - return false -} diff --git a/test/tasks/ods-package-image_test.go b/test/tasks/ods-package-image_test.go deleted file mode 100644 index 7a3f1707..00000000 --- a/test/tasks/ods-package-image_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package tasks - -import ( - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/opendevstack/ods-pipeline/internal/command" - "github.com/opendevstack/ods-pipeline/internal/installation" - "github.com/opendevstack/ods-pipeline/pkg/artifact" - "github.com/opendevstack/ods-pipeline/pkg/logging" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" - "golang.org/x/exp/slices" -) - -func TestTaskODSPackageImage(t *testing.T) { - runTaskTestCases(t, - "ods-package-image", - []tasktesting.Service{ - tasktesting.Nexus, - }, - map[string]tasktesting.TestCase{ - "task should build image and use nexus args": { - WorkspaceDirMapping: map[string]string{"source": "hello-nexus-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkResultingFiles(t, ctxt, wsDir) - checkResultingImageHelloNexus(t, ctxt, wsDir) - }, - }, - "task should build image": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkResultingFiles(t, ctxt, wsDir) - checkResultingImageHelloWorld(t, ctxt, wsDir) - }, - }, - "task should build image with additional tags": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - ctxt.Params = map[string]string{ - "extra-tags": "'latest cool'", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkResultingFiles(t, ctxt, wsDir) - checkTagFiles(t, ctxt, wsDir, []string{"latest", "cool"}) - checkTags(t, ctxt, wsDir, []string{ctxt.ODS.GitCommitSHA, "latest", "cool"}) - checkResultingImageHelloWorld(t, ctxt, wsDir) - checkTaggedImageHelloWorld(t, ctxt, wsDir, "latest") - checkTaggedImageHelloWorld(t, ctxt, wsDir, "cool") - }, - }, - "task should reuse existing image": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - tag := getDockerImageTag(t, ctxt, wsDir) - generateArtifacts(t, ctxt, tag, wsDir) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkResultingFiles(t, ctxt, wsDir) - }, - }, - "task should build image with build extra args param": { - WorkspaceDirMapping: map[string]string{"source": "hello-build-extra-args-app"}, - TaskParamsMapping: map[string]string{ - "buildah-build-extra-args": "'--build-arg=firstArg=one --build-arg=secondArg=two'", - "docker-dir": "docker", - }, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupGitRepo(t, ctxt.Namespace, wsDir) - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkResultingFiles(t, ctxt, wsDir) - checkResultingImageHelloBuildExtraArgs(t, ctxt, wsDir) - }, - }, - }, - ) -} - -func checkResultingFiles(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) { - wantFiles := []string{ - fmt.Sprintf(".ods/artifacts/image-digests/%s.json", ctxt.ODS.Component), - fmt.Sprintf(".ods/artifacts/sboms/%s.spdx", ctxt.ODS.Component), - } - for _, wf := range wantFiles { - if _, err := os.Stat(filepath.Join(wsDir, wf)); os.IsNotExist(err) { - t.Fatalf("Want %s, but got nothing", wf) - } - } -} - -func checkTagFiles(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string, tags []string) { - wantFiles := []string{} - for _, tag := range tags { - wantFiles = append(wantFiles, fmt.Sprintf(".ods/artifacts/image-digests/%s-%s.json", ctxt.ODS.Component, tag)) - } - for _, wf := range wantFiles { - if _, err := os.Stat(filepath.Join(wsDir, wf)); os.IsNotExist(err) { - t.Fatalf("Want %s, but got nothing", wf) - } - } -} - -func checkTags(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string, expectedTags []string) { - // registry := "kind-registry.kind:5000" - registry := "localhost:5000" - tlsVerify := false - args := []string{ - "inspect", - `--format={{.RepoTags}}`, - fmt.Sprintf("--tls-verify=%v", tlsVerify), - } - imageNsStreamSha := fmt.Sprintf("%s/%s:%s", ctxt.Namespace, ctxt.ODS.Component, ctxt.ODS.GitCommitSHA) - imageRef := fmt.Sprintf("docker://%s/%s", registry, imageNsStreamSha) - args = append(args, imageRef) - - stdout, _, err := command.RunBuffered("skopeo", args) - if err != nil { - t.Fatalf("skopeo inspect %s: %s", fmt.Sprint(args), err) - } - tags, err := parseSkopeoInspectDigestTags(string(stdout)) - if err != nil { - t.Fatalf("parse tags failed: %s", err) - } - for _, expectedTag := range expectedTags { - if !slices.Contains(tags, expectedTag) { - t.Fatalf("Expected tags=%s to be in actual tags=%s", fmt.Sprint(expectedTags), fmt.Sprint(tags)) - } - } -} - -func parseSkopeoInspectDigestTags(out string) ([]string, error) { - t := strings.TrimSpace(out) - if !(strings.HasPrefix(t, "[") && strings.HasSuffix(t, "]")) { - return nil, fmt.Errorf("skopeo inspect: unexpected tag response expecting tags to be in brackets %s", t) - } - t = t[1 : len(t)-1] - // expecting t to have space separated tags. - tags := strings.Split(t, " ") - return tags, nil -} - -func runSpecifiedImage(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string, image string) string { - stdout, stderr, err := command.RunBuffered("docker", []string{ - "run", "--rm", - image, - }) - if err != nil { - t.Fatalf("could not run built image: %s, stderr: %s", err, string(stderr)) - } - got := strings.TrimSpace(string(stdout)) - return got -} - -func runResultingImage(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) string { - got := runSpecifiedImage(t, ctxt, wsDir, getDockerImageTag(t, ctxt, wsDir)) - return got -} - -func checkResultingImageHelloWorld(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) { - got := runResultingImage(t, ctxt, wsDir) - want := "Hello World" - if got != want { - t.Fatalf("Want %s, but got %s", want, got) - } -} - -func checkTaggedImageHelloWorld(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string, tag string) { - image := fmt.Sprintf("localhost:5000/%s/%s:%s", ctxt.Namespace, ctxt.ODS.Component, tag) - got := runSpecifiedImage(t, ctxt, wsDir, image) - want := "Hello World" - if got != want { - t.Fatalf("Want %s, but got %s", want, got) - } -} - -func checkResultingImageHelloNexus(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) { - got := runResultingImage(t, ctxt, wsDir) - gotLines := strings.Split(got, "\n") - - ncc, err := installation.NewNexusClientConfig( - ctxt.Clients.KubernetesClientSet, ctxt.Namespace, &logging.LeveledLogger{Level: logging.LevelDebug}, - ) - if err != nil { - t.Fatalf("could not create Nexus client config: %s", err) - } - - // nexusClient := tasktesting.NexusClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace) - nexusUrlString := string(ncc.BaseURL) - nexusUrl, err := url.Parse(nexusUrlString) - if err != nil { - t.Fatalf("could not determine nexusUrl from nexusClient: %s", err) - } - - wantUsername := "developer" - if ncc.Username != wantUsername { - t.Fatalf("Want %s, but got %s", wantUsername, ncc.Username) - } - - wantSecret := "s3cr3t" - if ncc.Password != wantSecret { - t.Fatalf("Want %s, but got %s", wantSecret, ncc.Password) - } - - want := []string{ - fmt.Sprintf("nexusUrl=%s", nexusUrlString), - fmt.Sprintf("nexusUsername=%s", ncc.Username), - fmt.Sprintf("nexusPassword=%s", ncc.Password), - fmt.Sprintf("nexusAuth=%s:%s", ncc.Username, ncc.Password), - fmt.Sprintf("nexusUrlWithAuth=http://%s:%s@%s", ncc.Username, ncc.Password, nexusUrl.Host), - fmt.Sprintf("nexusHost=%s", nexusUrl.Host), - } - if diff := cmp.Diff(want, gotLines); diff != "" { - t.Fatalf("context mismatch (-want +got):\n%s", diff) - } -} - -func checkResultingImageHelloBuildExtraArgs(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) { - got := runResultingImage(t, ctxt, wsDir) - gotLines := strings.Split(got, "\n") - - want := []string{ - fmt.Sprintf("firstArg=%s", "one"), - fmt.Sprintf("secondArg=%s", "two"), - } - if diff := cmp.Diff(want, gotLines); diff != "" { - t.Fatalf("context mismatch (-want +got):\n%s", diff) - } -} - -func getDockerImageTag(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) string { - sha, err := getTrimmedFileContent(filepath.Join(wsDir, ".ods/git-commit-sha")) - if err != nil { - t.Fatalf("could not read git-commit-sha: %s", err) - } - return fmt.Sprintf("localhost:5000/%s/%s:%s", ctxt.Namespace, ctxt.ODS.Component, sha) -} - -func generateArtifacts(t *testing.T, ctxt *tasktesting.TaskRunContext, tag string, wsDir string) { - t.Logf("Generating artifacts for image %s", tag) - generateImageArtifact(t, ctxt, tag, wsDir) - generateImageSBOMArtifact(t, ctxt, wsDir) -} - -func generateImageArtifact(t *testing.T, ctxt *tasktesting.TaskRunContext, tag string, wsDir string) { - t.Logf("Generating image artifact") - sha, err := getTrimmedFileContent(filepath.Join(wsDir, ".ods/git-commit-sha")) - if err != nil { - t.Fatalf("could not read git-commit-sha: %s", err) - } - ia := artifact.Image{ - Ref: tag, - Registry: "kind-registry.kind:5000", - Repository: ctxt.Namespace, - Name: ctxt.ODS.Component, - Tag: sha, - Digest: "abc", - } - imageArtifactFilename := fmt.Sprintf("%s.json", ctxt.ODS.Component) - err = pipelinectxt.WriteJsonArtifact(ia, filepath.Join(wsDir, pipelinectxt.ImageDigestsPath), imageArtifactFilename) - if err != nil { - t.Fatalf("could not create image artifact: %s", err) - } -} - -func generateImageSBOMArtifact(t *testing.T, ctxt *tasktesting.TaskRunContext, wsDir string) { - t.Logf("Generating image SBOM artifact") - artifactsDir := filepath.Join(wsDir, pipelinectxt.SBOMsPath) - sbomArtifactFilename := fmt.Sprintf("%s.%s", ctxt.ODS.Component, pipelinectxt.SBOMsFormat) - err := os.MkdirAll(artifactsDir, 0755) - if err != nil { - t.Fatalf("could not create %s: %s", artifactsDir, err) - } - _, err = os.Create(filepath.Join(artifactsDir, sbomArtifactFilename)) - if err != nil { - t.Fatalf("could not create image SBOM artifact: %s", err) - } -} diff --git a/test/tasks/ods-start_test.go b/test/tasks/ods-start_test.go deleted file mode 100644 index c4d55fd7..00000000 --- a/test/tasks/ods-start_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package tasks - -import ( - "os" - "path/filepath" - "strings" - "testing" - - "github.com/opendevstack/ods-pipeline/internal/directory" - "github.com/opendevstack/ods-pipeline/internal/projectpath" - "github.com/opendevstack/ods-pipeline/pkg/bitbucket" - "github.com/opendevstack/ods-pipeline/pkg/config" - "github.com/opendevstack/ods-pipeline/pkg/nexus" - "github.com/opendevstack/ods-pipeline/pkg/pipelinectxt" - "github.com/opendevstack/ods-pipeline/pkg/tasktesting" -) - -func TestTaskODSStart(t *testing.T) { - var subrepoContext *pipelinectxt.ODSContext - var lfsFilename string - var lfsFileHash [32]byte - runTaskTestCases(t, - "ods-start", - []tasktesting.Service{ - tasktesting.Bitbucket, - tasktesting.Nexus, - }, - map[string]tasktesting.TestCase{ - "clones repo @ branch": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - ctxt.Params = map[string]string{ - "url": ctxt.ODS.GitURL, - "git-full-ref": "refs/heads/master", - "project": ctxt.ODS.Project, - "pipeline-run-name": "foo", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkODSContext(t, wsDir, ctxt.ODS) - checkFilesExist(t, wsDir, filepath.Join(pipelinectxt.ArtifactsPath, pipelinectxt.ArtifactsManifestFilename)) - - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - checkBuildStatus(t, bitbucketClient, ctxt.ODS.GitCommitSHA, bitbucket.BuildStatusInProgress) - }, - }, - "clones repo @ tag": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - tasktesting.UpdateBitbucketRepoWithTagOrFatal(t, ctxt.ODS, wsDir, "v1.0.0") - ctxt.ODS.GitRef = "v1.0.0" - ctxt.ODS.GitFullRef = "refs/tags/v1.0.0" - ctxt.Params = map[string]string{ - "url": ctxt.ODS.GitURL, - "git-full-ref": ctxt.ODS.GitFullRef, - "project": ctxt.ODS.Project, - "pipeline-run-name": "foo", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - checkODSContext(t, wsDir, ctxt.ODS) - }, - }, - "clones repo and configured subrepos": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - // Setup sub-component - tempDir, err := directory.CopyToTempDir( - filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, "hello-world-app"), - wsDir, - "subcomponent-", - ) - if err != nil { - t.Fatal(err) - } - subCtxt := tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, tempDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - subrepoContext = subCtxt - err = os.RemoveAll(tempDir) - if err != nil { - t.Fatal(err) - } - err = createStartODSYMLWithSubrepo(wsDir, filepath.Base(tempDir)) - if err != nil { - t.Fatal(err) - } - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - - nexusClient := tasktesting.NexusClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - artifactsBaseDir := filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, "hello-world-app-with-artifacts", pipelinectxt.ArtifactsPath) - _, err = nexusClient.Upload( - nexus.TestTemporaryRepository, - pipelinectxt.ArtifactGroup(subCtxt, pipelinectxt.XUnitReportsDir), - filepath.Join(artifactsBaseDir, pipelinectxt.XUnitReportsDir, "report.xml"), - ) - if err != nil { - t.Fatal(err) - } - _, err = nexusClient.Upload( - nexus.TestTemporaryRepository, - pipelinectxt.ArtifactGroup(subCtxt, pipelinectxt.PipelineRunsDir), - filepath.Join(artifactsBaseDir, pipelinectxt.PipelineRunsDir, "foo-zh9gt0.json"), - ) - if err != nil { - t.Fatal(err) - } - - ctxt.Params = map[string]string{ - "url": ctxt.ODS.GitURL, - "git-full-ref": "refs/heads/master", - "project": ctxt.ODS.Project, - "pipeline-run-name": "foo", - "artifact-source": nexus.TestTemporaryRepository, - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - // Check .ods directory contents of main repo - checkODSContext(t, wsDir, ctxt.ODS) - checkFilesExist(t, wsDir, filepath.Join(pipelinectxt.ArtifactsPath, pipelinectxt.ArtifactsManifestFilename)) - - // Check .ods directory contents of subrepo - subrepoDir := filepath.Join(wsDir, pipelinectxt.SubreposPath, subrepoContext.Repository) - checkODSContext(t, subrepoDir, subrepoContext) - - // Check artifacts are downloaded properly in subrepo - sourceArtifactsBaseDir := filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, "hello-world-app-with-artifacts", pipelinectxt.ArtifactsPath) - xUnitFileSource := "xunit-reports/report.xml" - xUnitContent := trimmedFileContentOrFatal(t, filepath.Join(sourceArtifactsBaseDir, xUnitFileSource)) - destinationArtifactsBaseDir := filepath.Join(subrepoDir, pipelinectxt.ArtifactsPath) - checkFileContent(t, destinationArtifactsBaseDir, xUnitFileSource, xUnitContent) - checkFilesExist(t, destinationArtifactsBaseDir, pipelinectxt.ArtifactsManifestFilename) - - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - checkBuildStatus(t, bitbucketClient, ctxt.ODS.GitCommitSHA, bitbucket.BuildStatusInProgress) - - }, - }, - "fails when subrepo has no successful pipeline run": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - // Setup sub-component - tempDir, err := directory.CopyToTempDir( - filepath.Join(projectpath.Root, "test", tasktesting.TestdataWorkspacesPath, "hello-world-app"), - wsDir, - "subcomponent-", - ) - if err != nil { - t.Fatal(err) - } - tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, tempDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - err = os.RemoveAll(tempDir) - if err != nil { - t.Fatal(err) - } - err = createStartODSYMLWithSubrepo(wsDir, filepath.Base(tempDir)) - if err != nil { - t.Fatal(err) - } - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - ctxt.Params = map[string]string{ - "url": ctxt.ODS.GitURL, - "git-full-ref": "refs/heads/master", - "project": ctxt.ODS.Project, - "pipeline-run-name": "foo", - "artifact-source": "empty-repo", - } - }, - WantRunSuccess: false, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - want := "Pipeline runs with subrepos require a successful pipeline run artifact " + - "for all checked out subrepo commits, however no such artifact was found" - - if !strings.Contains(string(ctxt.CollectedLogs), want) { - t.Fatalf("Want:\n%s\n\nGot:\n%s", want, string(ctxt.CollectedLogs)) - } - }, - }, - "handles git LFS extension": { - WorkspaceDirMapping: map[string]string{"source": "hello-world-app"}, - PreRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - ctxt.ODS = tasktesting.SetupBitbucketRepo( - t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, wsDir, tasktesting.BitbucketProjectKey, *privateCertFlag, - ) - tasktesting.EnableLfsOnBitbucketRepoOrFatal(t, filepath.Base(wsDir), tasktesting.BitbucketProjectKey) - lfsFilename = "lfspicture.jpg" - lfsFileHash = tasktesting.UpdateBitbucketRepoWithLfsOrFatal(t, ctxt.ODS, wsDir, tasktesting.BitbucketProjectKey, lfsFilename) - - ctxt.Params = map[string]string{ - "url": ctxt.ODS.GitURL, - "git-full-ref": "refs/heads/master", - "project": ctxt.ODS.Project, - "pipeline-run-name": "foo", - } - }, - WantRunSuccess: true, - PostRunFunc: func(t *testing.T, ctxt *tasktesting.TaskRunContext) { - wsDir := ctxt.Workspaces["source"] - - checkODSContext(t, wsDir, ctxt.ODS) - - bitbucketClient := tasktesting.BitbucketClientOrFatal(t, ctxt.Clients.KubernetesClientSet, ctxt.Namespace, *privateCertFlag) - checkBuildStatus(t, bitbucketClient, ctxt.ODS.GitCommitSHA, bitbucket.BuildStatusInProgress) - - checkFileHash(t, wsDir, lfsFilename, lfsFileHash) - }, - }, - }, - ) -} - -func createStartODSYMLWithSubrepo(wsDir, repo string) error { - o := &config.ODS{ - Repositories: []config.Repository{ - { - Name: repo, - }, - }, - } - return createODSYML(wsDir, o) -}