diff --git a/.github/workflows/e2e-tests.yaml b/.github/workflows/e2e-tests.yaml index f91fef27..82d48bf1 100644 --- a/.github/workflows/e2e-tests.yaml +++ b/.github/workflows/e2e-tests.yaml @@ -18,7 +18,6 @@ on: concurrency: e2e-cluster env: AWS_REGION : "us-east-1" - KUBECONFIG: "/tmp/kubeconfig" COMMIT_ID: ${{ github.event_name == 'push' && github.sha || github.event.pull_request.head.sha }} TMP_IMAGE_NAME: "s3-csi-driver-tmp" PROMOTED_IMAGE_NAME: "s3-csi-driver" @@ -26,6 +25,9 @@ jobs: build: # this is to prevent the job to run at forked projects if: github.repository == 'awslabs/mountpoint-s3-csi-driver' + strategy: + matrix: + cluster-type: ["kops", "eksctl"] runs-on: ubuntu-latest permissions: id-token: write @@ -60,18 +62,46 @@ jobs: export TAG=${{ env.COMMIT_ID }} make build_image make push_image + - name: Install tools + run: | + export ACTION=install_tools + tests/e2e-kubernetes/run.sh + - name: Create cluster + run: | + export ACTION=create_cluster + export AWS_REGION=${{ env.AWS_REGION }} + export CLUSTER_TYPE=${{ matrix.cluster-type }} + tests/e2e-kubernetes/run.sh - name: Install the driver - env: - REGISTRY: ${{ steps.login-ecr.outputs.registry }} - IMAGE_NAME: ${{ env.TMP_IMAGE_NAME }} run: | - export EKS_REGION=${{ env.AWS_REGION }} - export EKS_CLUSTER_NAME=s3-csi-cluster - export KUBECONFIG=${{ env.KUBECONFIG }} + export ACTION=install_driver + export AWS_REGION=${{ env.AWS_REGION }} + export CLUSTER_TYPE=${{ matrix.cluster-type }} + export IMAGE_NAME=${{ env.TMP_IMAGE_NAME }} export TAG=${{ env.COMMIT_ID }} - tests/e2e-kubernetes/install.sh + tests/e2e-kubernetes/run.sh - name: Run E2E Tests - run: make e2e E2E_KUBECONFIG=${{ env.KUBECONFIG }} E2E_COMMIT_ID=${{ env.COMMIT_ID }} + run: | + cd tests/e2e-kubernetes + export ACTION=run_tests + export AWS_REGION=${{ env.AWS_REGION }} + export CLUSTER_TYPE=${{ matrix.cluster-type }} + export TAG=${{ env.COMMIT_ID }} + ./run.sh + - name: Uinstall the driver + if: always() + run: | + export ACTION=uninstall_driver + export AWS_REGION=${{ env.AWS_REGION }} + export CLUSTER_TYPE=${{ matrix.cluster-type }} + tests/e2e-kubernetes/run.sh + - name: Delete cluster + if: always() + run: | + export ACTION=delete_cluster + export AWS_REGION=${{ env.AWS_REGION }} + export CLUSTER_TYPE=${{ matrix.cluster-type }} + tests/e2e-kubernetes/run.sh - name: Promote image for release branch if: ${{ startsWith(github.ref_name, 'release') }} env: diff --git a/Makefile b/Makefile index 90fbab25..a7a3f167 100644 --- a/Makefile +++ b/Makefile @@ -64,6 +64,10 @@ test: # skipping controller test cases because we don't implement controller for static provisioning, this is a known limitation of sanity testing package: https://github.com/kubernetes-csi/csi-test/issues/214 go test -v ./tests/sanity/... -ginkgo.skip="ControllerGetCapabilities" -ginkgo.skip="ValidateVolumeCapabilities" +.PHONY: fmt +fmt: + go fmt ./... + .PHONY: e2e e2e: pushd tests/e2e-kubernetes; \ @@ -72,10 +76,6 @@ e2e: popd; \ exit $$EXIT_CODE -.PHONY: fmt -fmt: - go fmt ./... - .PHONY: check_style check_style: test -z "$$(gofmt -d . | tee /dev/stderr)" diff --git a/examples/kubernetes/static_provisioning/static_provisioning.yaml b/examples/kubernetes/static_provisioning/static_provisioning.yaml index d8788ce5..8a43ce94 100644 --- a/examples/kubernetes/static_provisioning/static_provisioning.yaml +++ b/examples/kubernetes/static_provisioning/static_provisioning.yaml @@ -10,8 +10,6 @@ spec: mountOptions: - allow-delete - region eu-west-1 - - uid 1001 - - allow-other csi: driver: s3.csi.aws.com # required volumeHandle: s3-csi-driver # bucket name, required diff --git a/tests/e2e-kubernetes/.gitignore b/tests/e2e-kubernetes/.gitignore new file mode 100644 index 00000000..72f2d097 --- /dev/null +++ b/tests/e2e-kubernetes/.gitignore @@ -0,0 +1 @@ +csi-test-artifacts/* diff --git a/tests/e2e-kubernetes/README.md b/tests/e2e-kubernetes/README.md index 6c5799b9..33d35c96 100644 --- a/tests/e2e-kubernetes/README.md +++ b/tests/e2e-kubernetes/README.md @@ -1,13 +1,52 @@ -## Usage +# Usage +## Prerequisites +AWS credentials in ENVs with the following policies attached: +``` +AmazonEC2FullAccess +AmazonRoute53FullAccess +AmazonS3FullAccess +IAMFullAccess +AmazonVPCFullAccess +AmazonSQSFullAccess +AmazonEventBridgeFullAccess +AmazonSSMReadOnlyAccess +``` + +## Setting up the environment +All of the following commands are expected to be executed from repo root: + +```bash +ACTION=install_tools tests/e2e-kubernetes/run.sh + +ACTION=create_cluster AWS_REGION=us-east-1 CLUSTER_TYPE=kops tests/e2e-kubernetes/run.sh # set KOPS_STATE_FILE to your bucket when running locally + +ACTION=install_driver AWS_REGION=us-east-1 CLUSTER_TYPE=kops IMAGE_NAME=s3-csi-driver TAG=v0.1.0 tests/e2e-kubernetes/run.sh + +ACTION=uninstall_driver AWS_REGION=us-east-1 CLUSTER_TYPE=kops tests/e2e-kubernetes/run.sh + +ACTION=delete_cluster AWS_REGION=us-east-1 CLUSTER_TYPE=kops tests/e2e-kubernetes/run.sh +``` + +## Running tests +### On cluster created by run.sh +`run_tests` command is expected to be executed from tests/e2e-kubernetes directory: +```bash +pushd tests/e2e-kubernetes +ACTION=run_tests AWS_REGION=us-east-1 CLUSTER_TYPE=kops TAG=v0.1.0 ./run.sh +popd +``` + +### On existing cluster From repository root: ``` make e2e E2E_KUBECONFIG=~/.kube/config E2E_REGION=eu-west-1 ``` +> E2E_REGION specifies where to create bucket for test (should be the same as where cluster is located) -## Prerequisites +#### Prerequisites - existing k8s cluster (e.g. EKS) - `kubectl` in $PATH - `kubeconfig` setting up access to k8s cluster - driver deployed in the cluster -- aws credentials with access to s3 (create/delete buckets, read/write/list objects) +- aws credentials with access to s3 (create/delete buckets, read/write/list objects) \ No newline at end of file diff --git a/tests/e2e-kubernetes/eksctl.sh b/tests/e2e-kubernetes/eksctl.sh new file mode 100644 index 00000000..5c9fff17 --- /dev/null +++ b/tests/e2e-kubernetes/eksctl.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -euox pipefail + +# does not actually create cluster yet +function eksctl_create_cluster() { + EKS_CLUSTER_NAME=${1} + EKS_REGION=${2} + KUBECONFIG=${3} + aws eks update-kubeconfig --region ${EKS_REGION} --name ${EKS_CLUSTER_NAME} --kubeconfig=${KUBECONFIG} +} + +function eksctl_delete_cluster() { + echo "eksctl is still using pre-created cluster" +} diff --git a/tests/e2e-kubernetes/helm.sh b/tests/e2e-kubernetes/helm.sh new file mode 100644 index 00000000..739fab1e --- /dev/null +++ b/tests/e2e-kubernetes/helm.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +set -euox pipefail + +function helm_install() { + INSTALL_PATH=${1} + if [[ ! -e ${INSTALL_PATH}/helm ]]; then + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 + chmod 700 get_helm.sh + export USE_SUDO=false + export HELM_INSTALL_DIR=${INSTALL_PATH} + ./get_helm.sh + rm get_helm.sh + fi +} + +function helm_uninstall_driver() { + HELM_BIN=${1} + KUBECTL_BIN=${2} + RELEASE_NAME=${3} + KUBECONFIG=${4} + if [[ $($HELM_BIN list -A --kubeconfig $KUBECONFIG | grep $RELEASE_NAME) == *deployed* ]]; then + $HELM_BIN uninstall $RELEASE_NAME --namespace kube-system --kubeconfig $KUBECONFIG + $KUBECTL_BIN wait --for=delete pod --selector="app=s3-csi-node" -n kube-system --timeout=60s --kubeconfig $KUBECONFIG + else + echo "driver does not seem to be installed" + fi + $KUBECTL_BIN get pods -A --kubeconfig $KUBECONFIG + $KUBECTL_BIN get CSIDriver --kubeconfig $KUBECONFIG +} + +function helm_install_driver() { + HELM_BIN=${1} + KUBECTL_BIN=${2} + RELEASE_NAME=${3} + REPOSITORY=${4} + TAG=${5} + KUBECONFIG=${6} + helm_uninstall_driver \ + "$HELM_BIN" \ + "$KUBECTL_BIN" \ + "$RELEASE_NAME" \ + "$KUBECONFIG" + # temporary crutch to make eksctl working with pre-created cluster + SA_CREATE=true + if [[ "${KUBECONFIG}" == *"s3-csi-cluster.kubeconfig"* ]]; then + SA_CREATE=false + fi + $HELM_BIN upgrade --install $RELEASE_NAME --namespace kube-system ./charts/aws-s3-csi-driver --values \ + ./charts/aws-s3-csi-driver/values.yaml \ + --set image.repository=${REPOSITORY} \ + --set image.tag=${TAG} \ + --set image.pullPolicy=Always \ + --set node.serviceAccount.create=${SA_CREATE} \ + --kubeconfig ${KUBECONFIG} + $KUBECTL_BIN rollout status daemonset s3-csi-node -n kube-system --timeout=60s --kubeconfig $KUBECONFIG + $KUBECTL_BIN get pods -A --kubeconfig $KUBECONFIG + echo "s3-csi-node-image: $($KUBECTL_BIN get daemonset s3-csi-node -n kube-system -o jsonpath="{$.spec.template.spec.containers[:1].image}" --kubeconfig $KUBECONFIG)" +} diff --git a/tests/e2e-kubernetes/install.sh b/tests/e2e-kubernetes/install.sh deleted file mode 100755 index 5b930649..00000000 --- a/tests/e2e-kubernetes/install.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -export INSTALL_PATH=/usr/local/bin - -function kubectl_install() { - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" - echo "$(cat kubectl.sha256) kubectl" | sha256sum --check - sudo install -o root -g root -m 0755 kubectl ${INSTALL_PATH}/kubectl -} - -function helm_install() { - if [[ ! -e ${INSTALL_PATH}/helm ]]; then - curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 - chmod 700 get_helm.sh - export USE_SUDO=false - export HELM_INSTALL_DIR=${INSTALL_PATH} - ./get_helm.sh - rm get_helm.sh - fi -} - -function setup_kubeconfig() { - aws eks update-kubeconfig --region ${EKS_REGION} --name ${EKS_CLUSTER_NAME} --kubeconfig=${KUBECONFIG} -} - -function ensure_driver_not_installed() { - if [[ $(helm list -A | grep aws-s3-csi-driver) == *deployed* ]]; then - helm uninstall aws-s3-csi-driver --namespace kube-system - sleep 10 # nice to have: a better way to wait for driver removed - fi - kubectl get pods -A - kubectl get CSIDriver -} - -function install_driver() { - helm upgrade --install aws-s3-csi-driver --namespace kube-system ./charts/aws-s3-csi-driver --values \ - ./charts/aws-s3-csi-driver/values.yaml \ - --set image.repository=${REGISTRY}/${IMAGE_NAME} \ - --set image.tag=${TAG} \ - --set image.pullPolicy=Always - kubectl rollout status daemonset s3-csi-node -n kube-system --timeout=60s - kubectl get pods -A - echo "s3-csi-node-image: $(kubectl get daemonset s3-csi-node -n kube-system -o jsonpath="{$.spec.template.spec.containers[:1].image}")" -} - -kubectl_install -helm_install -setup_kubeconfig -ensure_driver_not_installed -install_driver diff --git a/tests/e2e-kubernetes/kops-patch-node.yaml b/tests/e2e-kubernetes/kops-patch-node.yaml new file mode 100644 index 00000000..6ed0a458 --- /dev/null +++ b/tests/e2e-kubernetes/kops-patch-node.yaml @@ -0,0 +1,3 @@ +spec: + instanceMetadata: + httpPutResponseHopLimit: 3 \ No newline at end of file diff --git a/tests/e2e-kubernetes/kops-patch.yaml b/tests/e2e-kubernetes/kops-patch.yaml new file mode 100644 index 00000000..41f1d4cb --- /dev/null +++ b/tests/e2e-kubernetes/kops-patch.yaml @@ -0,0 +1,12 @@ +spec: + additionalPolicies: + node: | + [ + { + "Effect": "Allow", + "Action": [ + "s3:*" + ], + "Resource": "*" + } + ] diff --git a/tests/e2e-kubernetes/kops.sh b/tests/e2e-kubernetes/kops.sh new file mode 100755 index 00000000..2271b5ca --- /dev/null +++ b/tests/e2e-kubernetes/kops.sh @@ -0,0 +1,140 @@ +#!/bin/bash + +set -euox pipefail + +OS_ARCH=$(go env GOOS)-amd64 + +function kops_install() { + INSTALL_PATH=${1} + KOPS_VERSION=${2} + if [[ -e "${INSTALL_PATH}"/kops ]]; then + INSTALLED_KOPS_VERSION=$("${INSTALL_PATH}"/kops version) + if [[ "$INSTALLED_KOPS_VERSION" == *"$KOPS_VERSION"* ]]; then + echo "KOPS $INSTALLED_KOPS_VERSION already installed!" + return + fi + fi + KOPS_DOWNLOAD_URL=https://github.com/kubernetes/kops/releases/download/v${KOPS_VERSION}/kops-${OS_ARCH} + curl -L -X GET "${KOPS_DOWNLOAD_URL}" -o "${INSTALL_PATH}"/kops + chmod +x "${INSTALL_PATH}"/kops +} + +function kops_create_cluster() { + CLUSTER_NAME=${1} + BIN=${2} + ZONES=${3} + NODE_COUNT=${4} + INSTANCE_TYPE=${5} + AMI_ID=${6} + K8S_VERSION=${7} + CLUSTER_FILE=${8} + KUBECONFIG=${9} + KOPS_PATCH_FILE=${10} + KOPS_PATCH_NODE_FILE=${11} + KOPS_STATE_FILE=${12} + + if kops_cluster_exists "${CLUSTER_NAME}" "${BIN}" "${KOPS_STATE_FILE}"; then + kops_delete_cluster "$BIN" "$CLUSTER_NAME" "$KOPS_STATE_FILE" + fi + + ${BIN} create cluster --state "${KOPS_STATE_FILE}" \ + --zones "${ZONES}" \ + --node-count="${NODE_COUNT}" \ + --node-size="${INSTANCE_TYPE}" \ + --image="${AMI_ID}" \ + --kubernetes-version="${K8S_VERSION}" \ + --dry-run \ + -o yaml \ + "${CLUSTER_NAME}" > "${CLUSTER_FILE}" + + kops_patch_cluster_file "$CLUSTER_FILE" "$KOPS_PATCH_FILE" "Cluster" "" + kops_patch_cluster_file "$CLUSTER_FILE" "$KOPS_PATCH_NODE_FILE" "InstanceGroup" "Node" + + ${BIN} create --state "${KOPS_STATE_FILE}" -f "${CLUSTER_FILE}" + ${BIN} update cluster --state "${KOPS_STATE_FILE}" "${CLUSTER_NAME}" --yes + ${BIN} export kubecfg --state "${KOPS_STATE_FILE}" "${CLUSTER_NAME}" --admin --kubeconfig "${KUBECONFIG}" + ${BIN} validate cluster --state "${KOPS_STATE_FILE}" --wait 10m --kubeconfig "${KUBECONFIG}" +} + +function kops_cluster_exists() { + CLUSTER_NAME=${1} + BIN=${2} + KOPS_STATE_FILE=${3} + set +e + if ${BIN} get cluster --state "${KOPS_STATE_FILE}" "${CLUSTER_NAME}"; then + set -e + return 0 + else + set -e + return 1 + fi +} + +function kops_delete_cluster() { + BIN=${1} + CLUSTER_NAME=${2} + KOPS_STATE_FILE=${3} + echo "Deleting cluster ${CLUSTER_NAME}" + ${BIN} delete cluster --name "${CLUSTER_NAME}" --state "${KOPS_STATE_FILE}" --yes +} + +# TODO switch this to python, work exclusively with yaml, use kops toolbox +# template/kops set?, all this hacking with jq stinks! +function kops_patch_cluster_file() { + CLUSTER_FILE=${1} # input must be yaml + KOPS_PATCH_FILE=${2} # input must be yaml + KIND=${3} # must be either Cluster or InstanceGroup + ROLE=${4} # must be either Master or Node + + echo "Patching cluster $CLUSTER_NAME with $KOPS_PATCH_FILE" + + # Temporary intermediate files for patching, don't mutate CLUSTER_FILE until + # the end + CLUSTER_FILE_JSON=$CLUSTER_FILE.json + CLUSTER_FILE_0=$CLUSTER_FILE.0 + CLUSTER_FILE_1=$CLUSTER_FILE.1 + + # HACK convert the multiple yaml documents to an array of json objects + yaml_to_json "$CLUSTER_FILE" "$CLUSTER_FILE_JSON" + + # Find the json objects to patch + FILTER=".[] | select(.kind==\"$KIND\")" + if [ -n "$ROLE" ]; then + FILTER="$FILTER | select(.spec.role==\"$ROLE\")" + fi + jq "$FILTER" "$CLUSTER_FILE_JSON" > "$CLUSTER_FILE_0" + + # Patch only the json objects + kubectl patch -f "$CLUSTER_FILE_0" --local --type merge --patch "$(cat "$KOPS_PATCH_FILE")" -o json > "$CLUSTER_FILE_1" + mv "$CLUSTER_FILE_1" "$CLUSTER_FILE_0" + + # Delete the original json objects, add the patched + # TODO Cluster must always be first? + jq "del($FILTER)" "$CLUSTER_FILE_JSON" | jq ". + \$patched | sort" --slurpfile patched "$CLUSTER_FILE_0" > "$CLUSTER_FILE_1" + mv "$CLUSTER_FILE_1" "$CLUSTER_FILE_0" + + # HACK convert the array of json objects to multiple yaml documents + json_to_yaml "$CLUSTER_FILE_0" "$CLUSTER_FILE_1" + mv "$CLUSTER_FILE_1" "$CLUSTER_FILE_0" + + # Done patching, overwrite original yaml CLUSTER_FILE + mv "$CLUSTER_FILE_0" "$CLUSTER_FILE" # output is yaml + + # Clean up + rm "$CLUSTER_FILE_JSON" +} + +function yaml_to_json() { + IN=${1} + OUT=${2} + kubectl patch -f "$IN" --local -p "{}" --type merge -o json | jq '.' -s > "$OUT" +} + +function json_to_yaml() { + IN=${1} + OUT=${2} + for ((i = 0; i < $(jq length "$IN"); i++)); do + echo "---" >> "$OUT" + jq ".[$i]" "$IN" | kubectl patch -f - --local -p "{}" --type merge -o yaml >> "$OUT" + done +} \ No newline at end of file diff --git a/tests/e2e-kubernetes/run.sh b/tests/e2e-kubernetes/run.sh new file mode 100755 index 00000000..90387b6a --- /dev/null +++ b/tests/e2e-kubernetes/run.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +set -euox pipefail + +ACTION=${ACTION:-} +REGION=${AWS_REGION} + +AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) +REGISTRY=${REGISTRY:-${AWS_ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com} +IMAGE_NAME=${IMAGE_NAME:-} +TAG=${TAG:-} + +BASE_DIR=$(dirname "$(realpath "${BASH_SOURCE[0]}")") +source "${BASE_DIR}"/kops.sh +source "${BASE_DIR}"/eksctl.sh +source "${BASE_DIR}"/helm.sh + +TEST_DIR=${BASE_DIR}/csi-test-artifacts +BIN_DIR=${TEST_DIR}/bin +KUBECTL_INSTALL_PATH=/usr/local/bin + +HELM_BIN=${BIN_DIR}/helm +KOPS_BIN=${BIN_DIR}/kops +EKSCTL_BIN=${BIN_DIR}/eksctl +KUBECTL_BIN=${KUBECTL_INSTALL_PATH}/kubectl + +CLUSTER_TYPE=${CLUSTER_TYPE:-kops} +CLUSTER_NAME="s3-csi-cluster.${CLUSTER_TYPE}.k8s.local" +# temporary crutch to make eksctl working with pre-created cluster +if [[ "${CLUSTER_TYPE}" == "eksctl" ]]; then + CLUSTER_NAME=s3-csi-cluster +fi +KUBECONFIG=${KUBECONFIG:-"${TEST_DIR}/${CLUSTER_NAME}.kubeconfig"} + +KOPS_VERSION=1.28.0 +ZONES=${AWS_AVAILABILITY_ZONES:-us-east-1a,us-east-1b,us-east-1c,us-east-1d} +NODE_COUNT=${NODE_COUNT:-3} +INSTANCE_TYPE=${INSTANCE_TYPE:-c5.large} +AMI_ID=$(aws ssm get-parameters --names /aws/service/ami-amazon-linux-latest/al2023-ami-kernel-default-x86_64 --region ${REGION} --query 'Parameters[0].Value' --output text) +CLUSTER_FILE=${TEST_DIR}/${CLUSTER_NAME}.${CLUSTER_TYPE}.yaml +KOPS_PATCH_FILE=${KOPS_PATCH_FILE:-${BASE_DIR}/kops-patch.yaml} +KOPS_PATCH_NODE_FILE=${KOPS_PATCH_NODE_FILE:-${BASE_DIR}/kops-patch-node.yaml} +KOPS_STATE_FILE=${KOPS_STATE_FILE:-s3://mountpoint-s3-csi-driver-kops-state-store} + +HELM_RELEASE_NAME=mountpoint-s3-csi-driver + +# kops: must include patch version (e.g. 1.19.1) +# eksctl: mustn't include patch version (e.g. 1.19) +K8S_VERSION_KOPS=${K8S_VERSION_KOPS:-${K8S_VERSION:-1.28.2}} + +mkdir -p ${TEST_DIR} +mkdir -p ${BIN_DIR} +export PATH="$PATH:${BIN_DIR}" + +function kubectl_install() { + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" + echo "$(cat kubectl.sha256) kubectl" | sha256sum --check + sudo install -o root -g root -m 0755 kubectl ${KUBECTL_INSTALL_PATH}/kubectl +} + +function install_tools() { + kubectl_install + + helm_install "$BIN_DIR" + + kops_install \ + "${BIN_DIR}" \ + "${KOPS_VERSION}" +} + +function create_cluster() { + if [[ "${CLUSTER_TYPE}" == "kops" ]]; then + kops_create_cluster \ + "$CLUSTER_NAME" \ + "$KOPS_BIN" \ + "$ZONES" \ + "$NODE_COUNT" \ + "$INSTANCE_TYPE" \ + "$AMI_ID" \ + "$K8S_VERSION_KOPS" \ + "$CLUSTER_FILE" \ + "$KUBECONFIG" \ + "$KOPS_PATCH_FILE" \ + "$KOPS_PATCH_NODE_FILE" \ + "$KOPS_STATE_FILE" + elif [[ "${CLUSTER_TYPE}" == "eksctl" ]]; then + eksctl_create_cluster \ + "$CLUSTER_NAME" \ + "$REGION" \ + "$KUBECONFIG" + fi +} + +function delete_cluster() { + if [[ "${CLUSTER_TYPE}" == "kops" ]]; then + kops_delete_cluster \ + "${KOPS_BIN}" \ + "${CLUSTER_NAME}" \ + "${KOPS_STATE_FILE}" + elif [[ "${CLUSTER_TYPE}" == "eksctl" ]]; then + eksctl_delete_cluster + fi +} + +if [[ "${ACTION}" == "install_tools" ]]; then + install_tools +elif [[ "${ACTION}" == "create_cluster" ]]; then + create_cluster +elif [[ "${ACTION}" == "install_driver" ]]; then + helm_install_driver \ + "$HELM_BIN" \ + "$KUBECTL_BIN" \ + "$HELM_RELEASE_NAME" \ + "${REGISTRY}/${IMAGE_NAME}" \ + "${TAG}" \ + "${KUBECONFIG}" +elif [[ "${ACTION}" == "run_tests" ]]; then + KUBECONFIG=${KUBECONFIG} go test -ginkgo.vv --bucket-region=${REGION} --commit-id=${TAG}; +elif [[ "${ACTION}" == "uninstall_driver" ]]; then + helm_uninstall_driver \ + "$HELM_BIN" \ + "$KUBECTL_BIN" \ + "$HELM_RELEASE_NAME" \ + "${KUBECONFIG}" +elif [[ "${ACTION}" == "delete_cluster" ]]; then + delete_cluster +else + echo "ACTION := install_tools|create_cluster|install_driver|run_tests|uninstall_driver|delete_cluster" + exit 1 +fi