Add backup/restore to CI using MinIO #40
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: run-tests | |
on: | |
pull_request: | |
# Not running on "closed" - that is taken care of by "push" (if merged) | |
types: [opened, synchronize, reopened] | |
# This cancels any previous job from the same PR if the PR has been updated. | |
# This cancel-in-progress only works per PR (thus, two different PRs wont be cancelled). | |
# Concurrency is not an issue because the self-hosted worker will anyways only run one | |
# job at a time from one repo. | |
concurrency: | |
group: ${{ github.workflow }}-${{ github.ref }} | |
cancel-in-progress: true | |
jobs: | |
lint-test: | |
name: Lint helm charts | |
runs-on: ubuntu-latest | |
env: | |
TEMPLATE_OUT_DIR: template_out | |
steps: | |
- name: Checkout main repo | |
uses: actions/checkout@v4 | |
- name: Set up Helm | |
uses: azure/setup-helm@v4 | |
with: | |
version: '3.13.3' | |
- run: helm lint --strict . | |
- run: helm lint --strict --values values/dummy_lint.yaml . | |
- name: Helm template | |
if: always() | |
run: | | |
helm template . \ | |
--include-crds \ | |
--debug \ | |
--dry-run \ | |
--values values/dummy_lint.yaml \ | |
--output-dir $TEMPLATE_OUT_DIR | |
- run: ls -l $TEMPLATE_OUT_DIR/** | |
- uses: docker://ghcr.io/yannh/kubeconform:latest | |
with: | |
entrypoint: '/kubeconform' | |
args: "-summary -strict -ignore-missing-schemas ${{ env.TEMPLATE_OUT_DIR }}" | |
- name: Upload templates | |
if: always() | |
uses: actions/upload-artifact@v4 | |
with: | |
name: templates | |
path: ${{ env.TEMPLATE_OUT_DIR }} | |
retention-days: 5 | |
test-deploy: | |
needs: [lint-test] | |
if: github.repository == 'logicalclocks/rondb-helm' | |
runs-on: [self-hosted, ARM64] | |
env: | |
K8S_NAMESPACE: rondb-helm-${{ github.run_id }}-${{ github.run_attempt }} | |
RONDB_CLUSTER_NAME: my-rondb | |
BUCKET_NAME: rondb-helm | |
BUCKET_REGION: eu-north-1 | |
BUCKET_SECRET_NAME: bucket-credentials | |
MINIO_ACCESS_KEY: minio | |
MINIO_SECRET_KEY: minio123 | |
MINIO_TENANT_NAMESPACE: minio-tenant-${{ github.run_id }}-${{ github.run_attempt }} | |
BACKUP_JOB_NAME: manual-backup | |
steps: | |
- name: Checkout main repo | |
uses: actions/checkout@v4 | |
- name: Check kubectl (should be Minikube) | |
run: | | |
kubectl version --client | |
kubectl get nodes | |
- name: Create MinIO tenant | |
run: | | |
helm install \ | |
--namespace $MINIO_TENANT_NAMESPACE \ | |
--create-namespace \ | |
tenant minio/tenant \ | |
--set "tenant.pools[0].name=my-pool" \ | |
--set "tenant.pools[0].servers=1" \ | |
--set "tenant.pools[0].volumesPerServer=1" \ | |
--set "tenant.pools[0].size=4Gi" \ | |
--set "tenant.certificate.requestAutoCert=false" \ | |
--set "tenant.configSecret.name=myminio-env-configuration" \ | |
--set "tenant.configSecret.accessKey=${MINIO_ACCESS_KEY}" \ | |
--set "tenant.configSecret.secretKey=${MINIO_SECRET_KEY}" \ | |
--set "tenant.buckets[0].name=${BUCKET_NAME}" \ | |
--set "tenant.buckets[0].region=${BUCKET_REGION}" | |
- run: echo "MINIO_ENDPOINT=http://minio.${{ env.MINIO_TENANT_NAMESPACE }}.svc.cluster.local" >> $GITHUB_ENV | |
- name: Create namespace | |
run: kubectl create namespace $K8S_NAMESPACE | |
# MinIO also creates a Secret but with a different format | |
- name: Create RonDB's Bucket Secret | |
run: | | |
kubectl create secret generic $BUCKET_SECRET_NAME \ | |
--namespace=$K8S_NAMESPACE \ | |
--from-literal "key_id=${MINIO_ACCESS_KEY}" \ | |
--from-literal "access_key=${MINIO_SECRET_KEY}" | |
- name: Create original RonDB cluster | |
run: | | |
helm install $RONDB_CLUSTER_NAME \ | |
--namespace=$K8S_NAMESPACE \ | |
--values ./values/minikube/mini.yaml \ | |
--set benchmarking.enabled=true \ | |
--set clusterSize.minNumMySQLServers=1 \ | |
--set clusterSize.maxNumMySQLServers=2 \ | |
--set backups.enabled=true \ | |
--set backups.s3.provider=Minio \ | |
--set backups.s3.endpoint=$MINIO_ENDPOINT \ | |
--set backups.s3.bucketName=$BUCKET_NAME \ | |
--set backups.s3.region=$BUCKET_REGION \ | |
--set backups.s3.serverSideEncryption=null \ | |
--set backups.s3.keyCredentialsSecret.name=$BUCKET_SECRET_NAME \ | |
--set backups.s3.keyCredentialsSecret.key=key_id \ | |
--set backups.s3.secretCredentialsSecret.name=$BUCKET_SECRET_NAME \ | |
--set backups.s3.secretCredentialsSecret.key=access_key \ | |
. | |
sleep 10 | |
- run: helm test -n $K8S_NAMESPACE $RONDB_CLUSTER_NAME --logs --filter name=generate-data | |
- name: Waiting for benchmark job to complete | |
run: bash .github/wait_job.sh $K8S_NAMESPACE benchs 240 | |
- name: Collect bench logs | |
if: always() | |
uses: ./.github/actions/collect_bench_logs | |
with: | |
namespace: ${{ env.K8S_NAMESPACE }} | |
- name: Terminate RonDB data node | |
run: kubectl -n $K8S_NAMESPACE delete pod node-group-0-0 --force | |
- name: Test deploy stability | |
shell: bash | |
timeout-minutes: 6 | |
env: | |
SLEEP_SECONDS: 10 | |
MIN_STABLE_MINUTES: 1 | |
run: bash .github/test_deploy_stability.sh | |
# Check that data has been created correctly | |
- run: helm test -n $K8S_NAMESPACE $RONDB_CLUSTER_NAME --logs --filter name=verify-data | |
- run: kubectl create job -n $K8S_NAMESPACE --from=cronjob/create-backup $BACKUP_JOB_NAME | |
- name: Waiting for backup job to complete | |
shell: bash | |
run: bash .github/wait_job.sh $K8S_NAMESPACE $BACKUP_JOB_NAME 180 | |
- name: Get backup ID | |
run: | | |
kubectl get pods -n $K8S_NAMESPACE --selector=job-name=$BACKUP_JOB_NAME | |
POD_NAME=$(kubectl get pods -n $K8S_NAMESPACE --selector=job-name=$BACKUP_JOB_NAME -o jsonpath='{.items[?(@.status.phase=="Succeeded")].metadata.name}' | head -n 1) | |
echo "POD_NAME is ${POD_NAME}" | |
kubectl logs $POD_NAME -n $K8S_NAMESPACE --container=upload-native-backups | |
BACKUP_ID=$(kubectl logs $POD_NAME -n $K8S_NAMESPACE --container=upload-native-backups | grep -o "BACKUP-[0-9]\+" | head -n 1 | awk -F '-' '{print $2}') | |
echo "BACKUP_ID is ${BACKUP_ID}" | |
echo "BACKUP_ID=${BACKUP_ID}" >> $GITHUB_ENV | |
- name: Collect logs | |
if: always() | |
uses: ./.github/actions/collect_logs | |
with: | |
namespace: ${{ env.K8S_NAMESPACE }} | |
i: 1 | |
- name: Remove cluster | |
if: always() | |
uses: ./.github/actions/remove_cluster | |
timeout-minutes: 4 | |
with: | |
namespace: ${{ env.K8S_NAMESPACE }} | |
- name: Create namespace | |
run: kubectl create namespace $K8S_NAMESPACE | |
# MinIO also creates a Secret but with a different format | |
- name: Create RonDB's Bucket Secret | |
run: | | |
kubectl create secret generic $BUCKET_SECRET_NAME \ | |
--namespace=$K8S_NAMESPACE \ | |
--from-literal "key_id=${MINIO_ACCESS_KEY}" \ | |
--from-literal "access_key=${MINIO_SECRET_KEY}" | |
- name: Create second RonDB cluster | |
run: | | |
helm install $RONDB_CLUSTER_NAME \ | |
--namespace=$K8S_NAMESPACE \ | |
--values ./values/minikube/mini.yaml \ | |
--set restoreFromBackup.backupId=${BACKUP_ID} \ | |
--set restoreFromBackup.s3.provider=Minio \ | |
--set restoreFromBackup.s3.endpoint=$MINIO_ENDPOINT \ | |
--set restoreFromBackup.s3.bucketName=$BUCKET_NAME \ | |
--set restoreFromBackup.s3.region=$BUCKET_REGION \ | |
--set restoreFromBackup.s3.serverSideEncryption=null \ | |
--set restoreFromBackup.s3.keyCredentialsSecret.name=$BUCKET_SECRET_NAME \ | |
--set restoreFromBackup.s3.keyCredentialsSecret.key=key_id \ | |
--set restoreFromBackup.s3.secretCredentialsSecret.name=$BUCKET_SECRET_NAME \ | |
--set restoreFromBackup.s3.secretCredentialsSecret.key=access_key \ | |
. | |
sleep 10 | |
- name: Test deploy stability again | |
shell: bash | |
timeout-minutes: 6 | |
env: | |
SLEEP_SECONDS: 10 | |
MIN_STABLE_MINUTES: 1 | |
run: bash .github/test_deploy_stability.sh | |
# Check that data has been restored correctly | |
- run: helm test -n $K8S_NAMESPACE $RONDB_CLUSTER_NAME --logs --filter name=verify-data | |
- name: Collect logs | |
if: always() | |
uses: ./.github/actions/collect_logs | |
with: | |
namespace: ${{ env.K8S_NAMESPACE }} | |
i: 2 | |
- name: Remove cluster | |
if: always() | |
uses: ./.github/actions/remove_cluster | |
timeout-minutes: 4 | |
with: | |
namespace: ${{ env.K8S_NAMESPACE }} | |
- name: Delete MinIO tenant | |
if: always() | |
run: helm delete --namespace $MINIO_TENANT_NAMESPACE tenant | |
- name: Delete MinIO namespace | |
if: always() | |
run: kubectl delete namespace $MINIO_TENANT_NAMESPACE --timeout=50s || true |