-
Notifications
You must be signed in to change notification settings - Fork 0
192 lines (163 loc) · 6.26 KB
/
run-tests.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
name: run-tests
on:
pull_request:
# Not running on "closed" - that is taken care of by "push" (if merged)
types: [opened, synchronize, reopened]
# This cancels any previous job from the same PR if the PR has been updated.
# This cancel-in-progress only works per PR (thus, two different PRs wont be cancelled).
# Concurrency is not an issue because the self-hosted worker will anyways only run one
# job at a time from one repo.
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
lint-test:
name: Lint helm charts
runs-on: ubuntu-latest
env:
TEMPLATE_OUT_DIR: template_out
steps:
- name: Checkout main repo
uses: actions/checkout@v4
- name: Set up Helm
uses: azure/setup-helm@v4
with:
version: '3.13.3'
- run: helm lint --strict .
- run: helm lint --strict --values values/dummy_lint.yaml .
- name: Helm template
if: always()
run: |
helm template . \
--include-crds \
--debug \
--dry-run \
--values values/dummy_lint.yaml \
--output-dir $TEMPLATE_OUT_DIR
- run: ls -l $TEMPLATE_OUT_DIR/**
- uses: docker://ghcr.io/yannh/kubeconform:latest
with:
entrypoint: '/kubeconform'
args: "-summary -strict -ignore-missing-schemas ${{ env.TEMPLATE_OUT_DIR }}"
- name: Upload templates
if: always()
uses: actions/upload-artifact@v4
with:
name: templates
path: ${{ env.TEMPLATE_OUT_DIR }}
retention-days: 5
test-deploy:
needs: [lint-test]
if: github.repository == 'logicalclocks/rondb-helm'
runs-on: [self-hosted, ARM64]
env:
K8S_NAMESPACE: rondb-helm-${{ github.run_id }}-${{ github.run_attempt }}
RONDB_CLUSTER_NAME: my-rondb
BUCKET_SECRET_NAME: bucket-credentials
MINIO_ACCESS_KEY: minio
MINIO_SECRET_KEY: minio123
steps:
- name: Checkout main repo
uses: actions/checkout@v4
- name: Check kubectl (should be Minikube)
run: |
kubectl version --client
kubectl get nodes
- name: Create namespace
run: kubectl create namespace $K8S_NAMESPACE
- name: Create MinIO tenant
run: |
helm install \
--namespace $K8S_NAMESPACE \
tenant minio/tenant \
--set "tenant.pools[0].name=my-pool" \
--set "tenant.pools[0].servers=1" \
--set "tenant.pools[0].volumesPerServer=1" \
--set "tenant.pools[0].size=4Gi" \
--set configSecret.name=myminio-env-configuration \
--set configSecret.accessKey=$MINIO_ACCESS_KEY \
--set configSecret.secretKey=$MINIO_SECRET_KEY
- name: Create RonDB's Bucket Secret
run: |
kubectl create secret generic $BUCKET_SECRET_NAME \
--namespace=$K8S_NAMESPACE \
--from-literal "key_id=${MINIO_ACCESS_KEY}" \
--from-literal "access_key=${MINIO_SECRET_KEY}"
# TODO: Run benchmarks as well later
- name: Create RonDB cluster
run: |
helm upgrade -i $RONDB_CLUSTER_NAME \
--namespace=$K8S_NAMESPACE \
--values ./values/minikube/mini.yaml \
--set benchmarking.enabled=true \
--set terminationGracePeriodSeconds=10 \
--set backups.enabled=true \
--set backups.s3.endpoint=minio.default.svc.cluster.local:9000 \
--set backups.s3.keyCredentialsSecret.name=$BUCKET_SECRET_NAME \
--set backups.s3.keyCredentialsSecret.key=key_id \
--set backups.s3.secretCredentialsSecret.name=$BUCKET_SECRET_NAME \
--set backups.s3.secretCredentialsSecret.key=access_key \
.
sleep 10
- run: helm test -n $K8S_NAMESPACE $RONDB_CLUSTER_NAME --logs --filter name=generate-data
- name: Waiting for benchmark job to complete
shell: bash
run: |
kubectl wait --for=condition=complete --timeout=400s -n $K8S_NAMESPACE job/benchs &
JOB_COMPLETION_PID=$!
set +e
while kill -0 $JOB_COMPLETION_PID 2> /dev/null; do
echo "$(date) Benchmark job is still running..."
sleep 20
kubectl get pods -o wide -n $K8S_NAMESPACE
done
set -e
wait $JOB_COMPLETION_PID
exit $?
- name: Test deploy stability
shell: bash
timeout-minutes: 6
env:
SLEEP_SECONDS: 10
MIN_STABLE_MINUTES: 1
run: bash .github/test_deploy_stability.sh
- name: Terminate RonDB data node
run: kubectl -n $K8S_NAMESPACE delete pod node-group-0-0 --force
- name: Test deploy stability again
shell: bash
timeout-minutes: 6
env:
SLEEP_SECONDS: 10
MIN_STABLE_MINUTES: 1
run: bash .github/test_deploy_stability.sh
- run: kubectl create job -n $K8S_NAMESPACE --from=cronjob/create-backup manual-backup
# Check that data has been created correctly
- run: helm test -n $K8S_NAMESPACE $RONDB_CLUSTER_NAME --logs --filter name=verify-data
- name: Collect logs
if: always()
uses: ./.github/actions/collect_logs
with:
namespace: ${{ env.K8S_NAMESPACE }}
- name: Delete RonDB Helmchart
if: always()
run: helm delete --namespace=$K8S_NAMESPACE $RONDB_CLUSTER_NAME
- if: always()
name: Delete Helm test Pods
run: kubectl delete pods --all --namespace $K8S_NAMESPACE
- name: Wait for resource deletions
if: always()
timeout-minutes: 2
run: |
echo -e "Waiting for resources in namespace $K8S_NAMESPACE to be deleted...";
while true; do
RESOURCES=$(kubectl get all --namespace $K8S_NAMESPACE --no-headers)
if [ -z "$RESOURCES" ]; then
echo "All resources deleted.";
exit 0;
fi
echo -e "\nRemaining resources in namespace $K8S_NAMESPACE:\n"
echo "$RESOURCES"
sleep 3;
done
- if: always()
run: kubectl delete namespace $K8S_NAMESPACE --timeout=50s || true