-
Notifications
You must be signed in to change notification settings - Fork 0
239 lines (204 loc) · 8.47 KB
/
run-tests.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
name: run-tests
on:
pull_request:
# Not running on "closed" - that is taken care of by "push" (if merged)
types: [opened, synchronize, reopened]
# This cancels any previous job from the same PR if the PR has been updated.
# This cancel-in-progress only works per PR (thus, two different PRs wont be cancelled).
# Concurrency is not an issue because the self-hosted worker will anyways only run one
# job at a time from one repo.
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
lint-test:
name: Lint helm charts
runs-on: ubuntu-latest
env:
TEMPLATE_OUT_DIR: template_out
steps:
- name: Checkout main repo
uses: actions/checkout@v4
- name: Set up Helm
uses: azure/setup-helm@v4
with:
version: '3.13.3'
- run: helm lint --strict .
- run: helm lint --strict --values values/dummy_lint.yaml .
- name: Helm template
if: always()
run: |
helm template . \
--include-crds \
--debug \
--dry-run \
--values values/dummy_lint.yaml \
--output-dir $TEMPLATE_OUT_DIR
- run: ls -l $TEMPLATE_OUT_DIR/**
- uses: docker://ghcr.io/yannh/kubeconform:latest
with:
entrypoint: '/kubeconform'
args: "-summary -strict -ignore-missing-schemas ${{ env.TEMPLATE_OUT_DIR }}"
- name: Upload templates
if: always()
uses: actions/upload-artifact@v4
with:
name: templates
path: ${{ env.TEMPLATE_OUT_DIR }}
retention-days: 5
test-deploy:
needs: [lint-test]
if: github.repository == 'logicalclocks/rondb-helm'
runs-on: [self-hosted, ARM64]
env:
K8S_NAMESPACE: rondb-helm-${{ github.run_id }}-${{ github.run_attempt }}
RONDB_CLUSTER_NAME: my-rondb
BUCKET_NAME: rondb-helm
BUCKET_REGION: eu-north-1
BUCKET_SECRET_NAME: bucket-credentials
MINIO_ACCESS_KEY: minio
MINIO_SECRET_KEY: minio123
MINIO_TENANT_NAMESPACE: minio-tenant-${{ github.run_id }}-${{ github.run_attempt }}
steps:
- name: Checkout main repo
uses: actions/checkout@v4
- name: Check kubectl (should be Minikube)
run: |
kubectl version --client
kubectl get nodes
- name: Create MinIO tenant
run: |
helm install \
--namespace $MINIO_TENANT_NAMESPACE \
--create-namespace \
tenant minio/tenant \
--set "tenant.pools[0].name=my-pool" \
--set "tenant.pools[0].servers=1" \
--set "tenant.pools[0].volumesPerServer=1" \
--set "tenant.pools[0].size=4Gi" \
--set "tenant.certificate.requestAutoCert=false" \
--set "tenant.configSecret.name=myminio-env-configuration" \
--set "tenant.configSecret.accessKey=${MINIO_ACCESS_KEY}" \
--set "tenant.configSecret.secretKey=${MINIO_SECRET_KEY}" \
--set "tenant.buckets[0].name=${BUCKET_NAME}" \
--set "tenant.buckets[0].region=${BUCKET_REGION}"
- run: echo "MINIO_ENDPOINT=http://minio.${{ env.MINIO_TENANT_NAMESPACE }}.svc.cluster.local" >> $GITHUB_ENV
- name: Create namespace
run: kubectl create namespace $K8S_NAMESPACE
# MinIO also creates a Secret but with a different format
- name: Create RonDB's Bucket Secret
run: |
kubectl create secret generic $BUCKET_SECRET_NAME \
--namespace=$K8S_NAMESPACE \
--from-literal "key_id=${MINIO_ACCESS_KEY}" \
--from-literal "access_key=${MINIO_SECRET_KEY}"
- name: Create original RonDB cluster
run: |
helm install $RONDB_CLUSTER_NAME \
--namespace=$K8S_NAMESPACE \
--values ./values/minikube/mini.yaml \
--set benchmarking.enabled=true \
--set backups.enabled=true \
--set backups.s3.provider=Minio \
--set backups.s3.endpoint=$MINIO_ENDPOINT \
--set backups.s3.bucketName=$BUCKET_NAME \
--set backups.s3.region=$BUCKET_REGION \
--set backups.s3.serverSideEncryption=null \
--set backups.s3.keyCredentialsSecret.name=$BUCKET_SECRET_NAME \
--set backups.s3.keyCredentialsSecret.key=key_id \
--set backups.s3.secretCredentialsSecret.name=$BUCKET_SECRET_NAME \
--set backups.s3.secretCredentialsSecret.key=access_key \
.
sleep 10
- run: helm test -n $K8S_NAMESPACE $RONDB_CLUSTER_NAME --logs --filter name=generate-data
- name: Waiting for benchmark job to complete
run: bash .github/wait_job.sh $K8S_NAMESPACE benchs 240
- name: Collect bench logs
if: always()
uses: ./.github/actions/collect_bench_logs
with:
namespace: ${{ env.K8S_NAMESPACE }}
- name: Test deploy stability
shell: bash
timeout-minutes: 6
env:
SLEEP_SECONDS: 10
MIN_STABLE_MINUTES: 1
run: bash .github/test_deploy_stability.sh
- name: Terminate RonDB data node
run: kubectl -n $K8S_NAMESPACE delete pod node-group-0-0 --force
- name: Test deploy stability again
shell: bash
timeout-minutes: 6
env:
SLEEP_SECONDS: 10
MIN_STABLE_MINUTES: 1
run: bash .github/test_deploy_stability.sh
# Check that data has been created correctly
- run: helm test -n $K8S_NAMESPACE $RONDB_CLUSTER_NAME --logs --filter name=verify-data
- run: kubectl create job -n $K8S_NAMESPACE --from=cronjob/create-backup manual-backup
- name: Waiting for backup job to complete
shell: bash
run: bash .github/wait_job.sh $K8S_NAMESPACE manual-backup 180
- run: |
POD_NAME=$(kubectl get pods -n rondb-helm --selector=job-name=manual-backup -o jsonpath='{.items[?(@.status.phase=="Succeeded")].metadata.name}' | head -n 1)
echo "POD_NAME is ${POD_NAME}"
BACKUP_ID=$(kubectl logs $POD_NAME -n rondb-helm --container=upload-native-backups | grep -o "BACKUP-[0-9]\+" | head -n 1 | awk -F '-' '{print $2}')
echo "BACKUP_ID is ${BACKUP_ID}"
echo "BACKUP_ID=${BACKUP_ID}" >> $GITHUB_ENV
- name: Collect logs
if: always()
uses: ./.github/actions/collect_logs
with:
namespace: ${{ env.K8S_NAMESPACE }}
i: 1
- name: Remove cluster
if: always()
uses: ./.github/actions/remove_cluster
timeout-minutes: 4
with:
namespace: ${{ env.K8S_NAMESPACE }}
- name: Create second RonDB cluster
run: |
helm install $RONDB_CLUSTER_NAME \
--namespace=$K8S_NAMESPACE \
--values ./values/minikube/mini.yaml \
--set restoreFromBackup.backupId=${BACKUP_ID} \
--set restoreFromBackup.s3.provider=Minio \
--set restoreFromBackup.s3.endpoint=$MINIO_ENDPOINT \
--set restoreFromBackup.s3.bucketName=$BUCKET_NAME \
--set restoreFromBackup.s3.region=$BUCKET_REGION \
--set restoreFromBackup.s3.serverSideEncryption=null \
--set restoreFromBackup.s3.keyCredentialsSecret.name=$BUCKET_SECRET_NAME \
--set restoreFromBackup.s3.keyCredentialsSecret.key=key_id \
--set restoreFromBackup.s3.secretCredentialsSecret.name=$BUCKET_SECRET_NAME \
--set restoreFromBackup.s3.secretCredentialsSecret.key=access_key \
.
sleep 10
- name: Test deploy stability again
shell: bash
timeout-minutes: 6
env:
SLEEP_SECONDS: 10
MIN_STABLE_MINUTES: 1
run: bash .github/test_deploy_stability.sh
# Check that data has been restored correctly
- run: helm test -n $K8S_NAMESPACE $RONDB_CLUSTER_NAME --logs --filter name=verify-data
- name: Collect logs
if: always()
uses: ./.github/actions/collect_logs
with:
namespace: ${{ env.K8S_NAMESPACE }}
i: 2
- name: Remove cluster
if: always()
uses: ./.github/actions/remove_cluster
timeout-minutes: 4
with:
namespace: ${{ env.K8S_NAMESPACE }}
- name: Delete MinIO tenant
if: always()
run: helm delete --namespace $MINIO_TENANT_NAMESPACE tenant
- shell: bash
if: always()
run: kubectl delete namespace $MINIO_TENANT_NAMESPACE --timeout=50s || true