diff --git a/.github/ct.yaml b/.github/ct.yaml
new file mode 100644
index 00000000..bd8ffddb
--- /dev/null
+++ b/.github/ct.yaml
@@ -0,0 +1,4 @@
+# See https://github.com/helm/chart-testing#configuration
+remote: origin
+target-branch: main
+helm-extra-args: --timeout 600s
diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
index e4e9acfa..03bd9a0a 100644
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -29,13 +29,13 @@ jobs:
- name: Run chart-testing (list-changed)
id: list-changed
run: |
- changed=$(ct list-changed)
+ changed=$(ct list-changed --config .github/ct.yaml)
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
fi
- name: Run chart-testing (lint)
- run: ct lint
+ run: ct lint --config .github/ct.yaml
- name: Create kind cluster
uses: helm/kind-action@v1.1.0
@@ -43,4 +43,4 @@ jobs:
if: steps.list-changed.outputs.changed == 'true'
- name: Run chart-testing (install)
- run: ct install
+ run: ct install --config .github/ct.yaml
diff --git a/charts/psmdb-db/Chart.yaml b/charts/psmdb-db/Chart.yaml
index a9610b38..830a6bfc 100644
--- a/charts/psmdb-db/Chart.yaml
+++ b/charts/psmdb-db/Chart.yaml
@@ -1,9 +1,9 @@
apiVersion: v1
-appVersion: "1.6.0"
+appVersion: "1.7.0"
description: A Helm chart for installing Percona Server MongoDB Cluster Databases using the PSMDB Operator.
name: psmdb-db
home: https://www.percona.com/doc/kubernetes-operator-for-psmongodb/index.html
-version: 0.1.2
+version: 1.7.0
maintainers:
- name: cap1984
email: ivan.pylypenko@percona.com
diff --git a/charts/psmdb-db/README.md b/charts/psmdb-db/README.md
index 034bc048..e04e0728 100644
--- a/charts/psmdb-db/README.md
+++ b/charts/psmdb-db/README.md
@@ -5,9 +5,9 @@ This chart implements Percona Server MongoDB deployment in Kubernets via Custom
## Pre-requisites
* [PSMDB operator](https://hub.helm.sh/charts/percona/psmdb-operator) running in you K8S cluster
-* Kubernetes 1.11+
+* Kubernetes 1.15+
* PV support on the underlying infrastructure - only if you are provisioning persistent volume(s).
-* At least `v2.4.0` version of helm
+* At least `v2.5.0` version of helm
## Custom Resource Details
*
@@ -24,7 +24,7 @@ To install the chart with the `psmdb` release name using a dedicated namespace (
```sh
helm repo add percona https://percona.github.io/percona-helm-charts/
-helm install my-db percona/psmdb-db --version 0.1.1 --namespace my-namespace
+helm install my-db percona/psmdb-db --version 1.7.0 --namespace my-namespace
```
The chart can be customized using the following configurable parameters:
@@ -38,7 +38,7 @@ The chart can be customized using the following configurable parameters:
| `upgradeOptions.apply` | PSMDB image to apply from version service - recommended, latest, actual version like 4.4.2-4 | `recommended` |
| `upgradeOptions.schedule` | Cron formatted time to execute the update | `"0 2 * * *"` |
| `image.repository` | PSMDB Container image repository | `percona/percona-server-mongodb` |
-| `image.tag` | PSMDB Container image tag | `4.4.2-4` |
+| `image.tag` | PSMDB Container image tag | `4.4.3-5` |
| `imagePullSecrets` | PSMDB Container pull secret | `[]` |
| `runUid` | Set UserID | `""` |
| `secrets` | Users secret structure | `{}` |
@@ -47,35 +47,37 @@ The chart can be customized using the following configurable parameters:
| `pmm.image.tag` | PMM Container image tag | `2.12.0` |
| `pmm.serverHost` | PMM server related K8S service hostname | `monitoring-service` |
||
-| `replset.name` | ReplicaSet name | `rs0` |
-| `replset.size` | ReplicaSet size (pod quantity) | `3` |
-| `replset.antiAffinityTopologyKey` | ReplicaSet Pod affinity | `kubernetes.io/hostname` |
-| `replset.priorityClass` | ReplicaSet Pod priorityClassName | `""` |
-| `replset.annotations` | ReplicaSet Pod annotations | `{}` |
-| `replset.labels` | ReplicaSet Pod labels | `{}` |
-| `replset.nodeSelector` | ReplicaSet Pod nodeSelector labels | `{}` |
-| `replset.livenessProbe` | ReplicaSet Pod livenessProbe structure | `{}` |
-| `replset.podDisruptionBudget.maxUnavailable` | ReplicaSet failed Pods maximum quantity | `1` |
-| `replset.expose.enabled` | Allow access to replicaSet from outside of Kubernetes | `false` |
-| `replset.expose.exposeType` | Network service access point type | `LoadBalancer` |
-| `replset.arbiter.enabled` | Create MongoDB arbiter service | `false` |
-| `replset.arbiter.size` | MongoDB arbiter Pod quantity | `1` |
-| `replset.arbiter.antiAffinityTopologyKey` | MongoDB arbiter Pod affinity | `kubernetes.io/hostname` |
-| `replset.arbiter.priorityClass` | MongoDB arbiter priorityClassName | `""` |
-| `replset.arbiter.annotations` | MongoDB arbiter Pod annotations | `{}` |
-| `replset.arbiter.labels` | MongoDB arbiter Pod labels | `{}` |
-| `replset.arbiter.nodeSelector` | MongoDB arbiter Pod nodeSelector labels | `{}` |
-| `replset.arbiter.livenessProbe` | MongoDB arbiter Pod livenessProbe structure | `{}` |
-| `replset.schedulerName` | ReplicaSet Pod schedulerName | `""` |
-| `replset.resources` | ReplicaSet Pods resource requests and limits | `{}` |
-| `replset.volumeSpec` | ReplicaSet Pods storage resources | `{}` |
-| `replset.volumeSpec.emptyDir` | ReplicaSet Pods emptyDir K8S storage | `{}` |
-| `replset.volumeSpec.hostPath` | ReplicaSet Pods hostPath K8S storage | |
-| `replset.volumeSpec.hostPath.path` | ReplicaSet Pods hostPath K8S storage path | `""` |
-| `replset.volumeSpec.pvc` | ReplicaSet Pods PVC request parameters | |
-| `replset.volumeSpec.pvc.storageClassName` | ReplicaSet Pods PVC target storageClass | `""` |
-| `replset.volumeSpec.pvc.accessModes` | ReplicaSet Pods PVC access policy | `[]` |
-| `replset.volumeSpec.pvc.resources.requests.storage` | ReplicaSet Pods PVC storage size | `3Gi` |
+| `replsets[0].name` | ReplicaSet name | `rs0` |
+| `replsets[0].size` | ReplicaSet size (pod quantity) | `3` |
+| `replsets[0].antiAffinityTopologyKey` | ReplicaSet Pod affinity | `kubernetes.io/hostname` |
+| `replsets[0].priorityClass` | ReplicaSet Pod priorityClassName | `""` |
+| `replsets[0].annotations` | ReplicaSet Pod annotations | `{}` |
+| `replsets[0].labels` | ReplicaSet Pod labels | `{}` |
+| `replsets[0].nodeSelector` | ReplicaSet Pod nodeSelector labels | `{}` |
+| `replsets[0].livenessProbe` | ReplicaSet Pod livenessProbe structure | `{}` |
+| `replsets[0].runtimeClass` | ReplicaSet Pod runtimeClassName | `""` |
+| `replsets[0].sidecars` | ReplicaSet Pod sidecars | `{}` |
+| `replsets[0].podDisruptionBudget.maxUnavailable` | ReplicaSet failed Pods maximum quantity | `1` |
+| `replsets[0].expose.enabled` | Allow access to replicaSet from outside of Kubernetes | `false` |
+| `replsets[0].expose.exposeType` | Network service access point type | `LoadBalancer` |
+| `replsets[0].arbiter.enabled` | Create MongoDB arbiter service | `false` |
+| `replsets[0].arbiter.size` | MongoDB arbiter Pod quantity | `1` |
+| `replsets[0].arbiter.antiAffinityTopologyKey` | MongoDB arbiter Pod affinity | `kubernetes.io/hostname` |
+| `replsets[0].arbiter.priorityClass` | MongoDB arbiter priorityClassName | `""` |
+| `replsets[0].arbiter.annotations` | MongoDB arbiter Pod annotations | `{}` |
+| `replsets[0].arbiter.labels` | MongoDB arbiter Pod labels | `{}` |
+| `replsets[0].arbiter.nodeSelector` | MongoDB arbiter Pod nodeSelector labels | `{}` |
+| `replsets[0].arbiter.livenessProbe` | MongoDB arbiter Pod livenessProbe structure | `{}` |
+| `replsets[0].schedulerName` | ReplicaSet Pod schedulerName | `""` |
+| `replsets[0].resources` | ReplicaSet Pods resource requests and limits | `{}` |
+| `replsets[0].volumeSpec` | ReplicaSet Pods storage resources | `{}` |
+| `replsets[0].volumeSpec.emptyDir` | ReplicaSet Pods emptyDir K8S storage | `{}` |
+| `replsets[0].volumeSpec.hostPath` | ReplicaSet Pods hostPath K8S storage | |
+| `replsets[0].volumeSpec.hostPath.path` | ReplicaSet Pods hostPath K8S storage path | `""` |
+| `replsets[0].volumeSpec.pvc` | ReplicaSet Pods PVC request parameters | |
+| `replsets[0].volumeSpec.pvc.storageClassName` | ReplicaSet Pods PVC target storageClass | `""` |
+| `replsets[0].volumeSpec.pvc.accessModes` | ReplicaSet Pods PVC access policy | `[]` |
+| `replsets[0].volumeSpec.pvc.resources.requests.storage` | ReplicaSet Pods PVC storage size | `3Gi` |
| |
| `sharding.enabled` | Enable sharding setup | `true` |
| `sharding.configrs.size` | Config ReplicaSet size (pod quantity) | `3` |
@@ -84,6 +86,8 @@ The chart can be customized using the following configurable parameters:
| `sharding.configrs.annotations` | Config ReplicaSet Pod annotations | `{}` |
| `sharding.configrs.labels` | Config ReplicaSet Pod labels | `{}` |
| `sharding.configrs.nodeSelector` | Config ReplicaSet Pod nodeSelector labels | `{}` |
+| `sharding.configrs.runtimeClass` | Config ReplicaSet Pod runtimeClassName | `""` |
+| `sharding.configrs.sidecars` | Config ReplicaSet Pod sidecars | `{}` |
| `sharding.configrs.podDisruptionBudget.maxUnavailable` | Config ReplicaSet failed Pods maximum quantity | `1` |
| `sharding.configrs.resources.limits.cpu` | Config ReplicaSet resource limits CPU | `300m` |
| `sharding.configrs.resources.limits.memory` | Config ReplicaSet resource limits memory | `0.5G` |
@@ -102,6 +106,8 @@ The chart can be customized using the following configurable parameters:
| `sharding.mongos.annotations` | Mongos Pods annotations | `{}` |
| `sharding.mongos.labels` | Mongos Pods labels | `{}` |
| `sharding.mongos.nodeSelector` | Mongos Pods nodeSelector labels | `{}` |
+| `sharding.mongos.runtimeClass` | Mongos Pod runtimeClassName | `""` |
+| `sharding.mongos.sidecars` | Mongos Pod sidecars | `{}` |
| `sharding.mongos.podDisruptionBudget.maxUnavailable` | Mongos failed Pods maximum quantity | `1` |
| `sharding.mongos.resources.limits.cpu` | Mongos Pods resource limits CPU | `300m` |
| `sharding.mongos.resources.limits.memory` | Mongos Pods resource limits memory | `0.5G` |
@@ -114,7 +120,7 @@ The chart can be customized using the following configurable parameters:
| `backup.enabled` | Enable backup PBM agent | `true` |
| `backup.restartOnFailure` | Backup Pods restart policy | `true` |
| `backup.image.repository` | PBM Container image repository | `percona/percona-server-mongodb-operator` |
-| `backup.image.tag` | PBM Container image tag | `1.6.0-backup` |
+| `backup.image.tag` | PBM Container image tag | `1.7.0-backup` |
| `backup.serviceAccountName` | Run PBM Container under specified K8S SA | `percona-server-mongodb-operator` |
| `backup.storages` | Local/remote backup storages settings | `{}` |
| `backup.tasks` | Backup working schedule | `{}` |
@@ -122,6 +128,7 @@ The chart can be customized using the following configurable parameters:
Specify parameters using `--set key=value[,key=value]` argument to `helm install`
+Notice that you can use multiple replica sets only with sharding enabled.
## Examples
@@ -131,6 +138,6 @@ This is great for a dev PSMDB/MongoDB cluster as it doesn't bother with backups
```bash
$ helm install dev --namespace psmdb . \
- --set runUid=1001 --set replset.volumeSpec.pvc.resources.requests.storage=20Gi \
+ --set runUid=1001 --set "replsets[0].volumeSpec.pvc.resources.requests.storage=20Gi" \
--set backup.enabled=false --set sharding.enabled=false
```
diff --git a/charts/psmdb-db/crds/crd.yaml b/charts/psmdb-db/crds/crd.yaml
index 80a74677..10c01864 100644
--- a/charts/psmdb-db/crds/crd.yaml
+++ b/charts/psmdb-db/crds/crd.yaml
@@ -32,6 +32,9 @@ spec:
storage: false
served: true
- name: v1-6-0
+ storage: false
+ served: true
+ - name: v1-7-0
storage: true
served: true
- name: v1alpha1
diff --git a/charts/psmdb-db/production-values.yaml b/charts/psmdb-db/production-values.yaml
index eb7839a5..02b7beae 100644
--- a/charts/psmdb-db/production-values.yaml
+++ b/charts/psmdb-db/production-values.yaml
@@ -6,7 +6,11 @@
# platform: kubernetes
# Cluster DNS Suffix
-# DNSsuffix: .svc.cluster.local
+# DNSsuffix: svc.cluster.local
+
+finalizers:
+## Set this if you want to delete database persistent volumes on cluster deletion
+# - delete-psmdb-pvc
nameOverride: ""
fullnameOverride: ""
@@ -21,7 +25,7 @@ upgradeOptions:
image:
repository: percona/percona-server-mongodb
- tag: 4.4.2-4
+ tag: 4.4.3-5
# imagePullSecrets: []
# runUid: 1001
@@ -34,53 +38,59 @@ pmm:
tag: 2.12.0
serverHost: monitoring-service
-replset:
- name: rs0
- size: 3
- antiAffinityTopologyKey: "kubernetes.io/hostname"
- # priorityClass: ""
- # annotations: {}
- # labels: {}
- # nodeSelector: {}
- livenessProbe:
- failureThreshold: 4
- initialDelaySeconds: 60
- periodSeconds: 30
- successThreshold: 1
- timeoutSeconds: 5
- startupDelaySeconds: 7200
- podDisruptionBudget:
- maxUnavailable: 1
- expose:
- enabled: false
- exposeType: LoadBalancer
- arbiter:
- enabled: false
- size: 1
+replsets:
+ - name: rs0
+ size: 3
antiAffinityTopologyKey: "kubernetes.io/hostname"
# priorityClass: ""
# annotations: {}
# labels: {}
# nodeSelector: {}
- # livenessProbe: {}
- # schedulerName: ""
- resources:
- limits:
- cpu: "300m"
- memory: "0.5G"
- requests:
- cpu: "300m"
- memory: "0.5G"
- volumeSpec:
- # emptyDir: {}
- # hostPath:
- # path: /data
- pvc:
- # storageClassName: standard
- # accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 3Gi
+ livenessProbe:
+ failureThreshold: 4
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ successThreshold: 1
+ timeoutSeconds: 5
+ startupDelaySeconds: 7200
+ # runtimeClassName: image-rc
+ # sidecars:
+ # - image: busybox
+ # command: ["/bin/sh"]
+ # args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
+ # name: rs-sidecar-1
+ podDisruptionBudget:
+ maxUnavailable: 1
+ expose:
+ enabled: false
+ exposeType: LoadBalancer
+ arbiter:
+ enabled: false
+ size: 1
+ antiAffinityTopologyKey: "kubernetes.io/hostname"
+ # priorityClass: ""
+ # annotations: {}
+ # labels: {}
+ # nodeSelector: {}
+ # livenessProbe: {}
+ # schedulerName: ""
+ resources:
+ limits:
+ cpu: "300m"
+ memory: "0.5G"
+ requests:
+ cpu: "300m"
+ memory: "0.5G"
+ volumeSpec:
+ # emptyDir: {}
+ # hostPath:
+ # path: /data
+ pvc:
+ # storageClassName: standard
+ # accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 3Gi
sharding:
enabled: true
@@ -92,6 +102,12 @@ sharding:
# annotations: {}
# labels: {}
# nodeSelector: {}
+ # runtimeClassName: image-rc
+ # sidecars:
+ # - image: busybox
+ # command: ["/bin/sh"]
+ # args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
+ # name: rs-sidecar-1
podDisruptionBudget:
maxUnavailable: 1
resources:
@@ -120,6 +136,12 @@ sharding:
# annotations: {}
# labels: {}
# nodeSelector: {}
+ # runtimeClassName: image-rc
+ # sidecars:
+ # - image: busybox
+ # command: ["/bin/sh"]
+ # args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
+ # name: rs-sidecar-1
podDisruptionBudget:
maxUnavailable: 1
resources:
@@ -145,7 +167,7 @@ backup:
restartOnFailure: true
image:
repository: percona/percona-server-mongodb-operator
- tag: 1.6.0-backup
+ tag: 1.7.0-backup
serviceAccountName: percona-server-mongodb-operator
# resources:
# limits:
@@ -172,11 +194,13 @@ backup:
# - name: daily-s3-us-west
# enabled: true
# schedule: "0 0 * * *"
+ # keep: 3
# storageName: s3-us-west
# compressionType: gzip
# - name: weekly-s3-us-west
# enabled: false
# schedule: "0 0 * * 0"
+ # keep: 5
# storageName: s3-us-west
# compressionType: gzip
diff --git a/charts/psmdb-db/templates/NOTES.txt b/charts/psmdb-db/templates/NOTES.txt
index a912a513..aa309672 100644
--- a/charts/psmdb-db/templates/NOTES.txt
+++ b/charts/psmdb-db/templates/NOTES.txt
@@ -5,7 +5,7 @@ To get a MongoDB prompt inside your new cluster you can run:
And then for replica set:
$ kubectl run -i --rm --tty percona-client --image=percona/percona-server-mongodb:4.4 --restart=Never \
- -- mongo "mongodb+srv://${ADMIN_USER}:${ADMIN_PASSWORD}@{{ include "psmdb-database.fullname" . }}-{{ .Values.replset.name }}.{{ .Release.Namespace }}.svc.cluster.local/admin?replicaSet=rs0&ssl=false"
+ -- mongo "mongodb+srv://${ADMIN_USER}:${ADMIN_PASSWORD}@{{ include "psmdb-database.fullname" . }}-{{ (index .Values.replsets 0).name }}.{{ .Release.Namespace }}.svc.cluster.local/admin?replicaSet=rs0&ssl=false"
Or for sharding setup:
$ kubectl run -i --rm --tty percona-client --image=percona/percona-server-mongodb:4.4 --restart=Never \
diff --git a/charts/psmdb-db/templates/cluster.yaml b/charts/psmdb-db/templates/cluster.yaml
index 34cc15b1..c295e16e 100644
--- a/charts/psmdb-db/templates/cluster.yaml
+++ b/charts/psmdb-db/templates/cluster.yaml
@@ -1,12 +1,14 @@
-apiVersion: psmdb.percona.com/v1-6-0
+apiVersion: psmdb.percona.com/v1-7-0
kind: PerconaServerMongoDB
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
- {"apiVersion":"psmdb.percona.com/v1-6-0","kind":"PerconaServerMongoDB"}
+ {"apiVersion":"psmdb.percona.com/v1-7-0","kind":"PerconaServerMongoDB"}
name: {{ include "psmdb-database.fullname" . }}
labels:
{{ include "psmdb-database.labels" . | indent 4 }}
+ finalizers:
+{{ .Values.finalizers | toYaml | indent 4 }}
spec:
pause: {{ .Values.pause }}
{{- if .Values.platform }}
@@ -46,74 +48,83 @@ spec:
image: "{{ .Values.pmm.image.repository }}:{{ .Values.pmm.image.tag }}"
serverHost: {{ .Values.pmm.serverHost }}
replsets:
- - name: {{ .Values.replset.name }}
- size: {{ .Values.replset.size }}
+ {{- range $replset := .Values.replsets }}
+ - name: {{ $replset.name }}
+ size: {{ $replset.size }}
affinity:
- antiAffinityTopologyKey: {{ .Values.replset.antiAffinityTopologyKey }}
- {{- if .Values.replset.priorityClass }}
- priorityClassName: {{ .Values.replset.priorityClass }}
+ antiAffinityTopologyKey: {{ $replset.antiAffinityTopologyKey }}
+ {{- if $replset.priorityClass }}
+ priorityClassName: {{ $replset.priorityClass }}
{{- end }}
- {{- if .Values.replset.annotations }}
+ {{- if $replset.annotations }}
annotations:
-{{ .Values.replset.annotations | toYaml | indent 6 }}
+{{ $replset.annotations | toYaml | indent 6 }}
{{- end }}
- {{- if .Values.replset.labels }}
+ {{- if $replset.labels }}
labels:
-{{ .Values.replset.labels | toYaml | indent 6 }}
+{{ $replset.labels | toYaml | indent 6 }}
{{- end }}
- {{- if .Values.replset.nodeSelector }}
+ {{- if $replset.nodeSelector }}
nodeSelector:
-{{ .Values.replset.nodeSelector | toYaml | indent 6 }}
+{{ $replset.nodeSelector | toYaml | indent 6 }}
{{- end }}
- {{- if .Values.replset.livenessProbe }}
+ {{- if $replset.livenessProbe }}
livenessProbe:
-{{ .Values.replset.livenessProbe | toYaml | indent 6 }}
+{{ $replset.livenessProbe | toYaml | indent 6 }}
+ {{- end }}
+ {{- if $replset.runtimeClass }}
+ runtimeClassName: {{ $replset.runtimeClass }}
+ {{- end }}
+ {{- if $replset.sidecars }}
+ sidecars:
+{{ $replset.sidecars | toYaml | indent 6 }}
{{- end }}
podDisruptionBudget:
- {{- if .Values.replset.podDisruptionBudget.maxUnavailable }}
- maxUnavailable: {{ .Values.replset.podDisruptionBudget.maxUnavailable }}
+ {{- if $replset.podDisruptionBudget.maxUnavailable }}
+ maxUnavailable: {{ $replset.podDisruptionBudget.maxUnavailable }}
{{- else }}
- minAvailable: {{ .Values.replset.podDisruptionBudget.minAvailable }}
+ minAvailable: {{ $replset.podDisruptionBudget.minAvailable }}
{{- end }}
expose:
- enabled: {{ .Values.replset.expose.enabled }}
- exposeType: {{ .Values.replset.expose.loadBalancer }}
+ enabled: {{ $replset.expose.enabled }}
+ exposeType: {{ $replset.expose.loadBalancer }}
arbiter:
- enabled: {{ .Values.replset.arbiter.enabled }}
- size: {{ .Values.replset.arbiter.size }}
+ enabled: {{ $replset.arbiter.enabled }}
+ size: {{ $replset.arbiter.size }}
affinity:
- antiAffinityTopologyKey: {{ .Values.replset.arbiter.antiAffinityTopologyKey }}
- {{- if .Values.replset.arbiter.priorityClass }}
- priorityClassName: {{ .Values.replset.arbiter.priorityClass }}
+ antiAffinityTopologyKey: {{ $replset.arbiter.antiAffinityTopologyKey }}
+ {{- if $replset.arbiter.priorityClass }}
+ priorityClassName: {{ $replset.arbiter.priorityClass }}
{{- end }}
- {{- if .Values.replset.arbiter.annotations }}
+ {{- if $replset.arbiter.annotations }}
annotations:
-{{ .Values.replset.arbiter.annotations | toYaml | indent 8 }}
+{{ $replset.arbiter.annotations | toYaml | indent 8 }}
{{- end }}
- {{- if .Values.replset.arbiter.labels }}
+ {{- if $replset.arbiter.labels }}
labels:
-{{ .Values.replset.arbiter.labels | toYaml | indent 8 }}
+{{ $replset.arbiter.labels | toYaml | indent 8 }}
{{- end }}
- {{- if .Values.replset.arbiter.nodeSelector }}
+ {{- if $replset.arbiter.nodeSelector }}
nodeSelector:
-{{ .Values.replset.arbiter.nodeSelector | toYaml | indent 8 }}
+{{ $replset.arbiter.nodeSelector | toYaml | indent 8 }}
{{- end }}
- {{- if .Values.replset.schedulerName }}
- schedulerName: {{ .Values.replset.schedulerName }}
+ {{- if $replset.schedulerName }}
+ schedulerName: {{ $replset.schedulerName }}
{{- end }}
resources:
-{{ .Values.replset.resources | toYaml | indent 6 }}
+{{ $replset.resources | toYaml | indent 6 }}
volumeSpec:
- {{- if .Values.replset.volumeSpec.hostPath }}
+ {{- if $replset.volumeSpec.hostPath }}
hostPath:
- path: {{ .Values.replset.volumeSpec.hostPath }}
+ path: {{ $replset.volumeSpec.hostPath }}
type: Directory
- {{- else if .Values.replset.volumeSpec.pvc }}
+ {{- else if $replset.volumeSpec.pvc }}
persistentVolumeClaim:
-{{ .Values.replset.volumeSpec.pvc | toYaml | indent 8 }}
+{{ $replset.volumeSpec.pvc | toYaml | indent 8 }}
{{- else }}
emptyDir: {}
{{- end }}
+ {{- end }}
sharding:
enabled: {{ .Values.sharding.enabled }}
@@ -136,7 +147,14 @@ spec:
{{- if .Values.sharding.configrs.nodeSelector }}
nodeSelector:
{{ .Values.sharding.configrs.nodeSelector | toYaml | indent 8 }}
- {{- end }}
+ {{- end }}
+ {{- if .Values.sharding.configrs.runtimeClass }}
+ runtimeClassName: {{ .Values.sharding.configrs.runtimeClass }}
+ {{- end }}
+ {{- if .Values.sharding.configrs.sidecars }}
+ sidecars:
+{{ .Values.sharding.configrs.sidecars | toYaml | indent 8 }}
+ {{- end }}
podDisruptionBudget:
{{- if .Values.sharding.configrs.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.sharding.configrs.podDisruptionBudget.maxUnavailable }}
@@ -180,6 +198,13 @@ spec:
{{- if .Values.sharding.mongos.nodeSelector }}
nodeSelector:
{{ .Values.sharding.mongos.nodeSelector | toYaml | indent 8 }}
+ {{- end }}
+ {{- if .Values.sharding.mongos.runtimeClass }}
+ runtimeClassName: {{ .Values.sharding.mongos.runtimeClass }}
+ {{- end }}
+ {{- if .Values.sharding.mongos.sidecars }}
+ sidecars:
+{{ .Values.sharding.mongos.sidecars | toYaml | indent 8 }}
{{- end }}
podDisruptionBudget:
{{- if .Values.sharding.mongos.podDisruptionBudget.maxUnavailable }}
diff --git a/charts/psmdb-db/values.yaml b/charts/psmdb-db/values.yaml
index 8ca94355..03834506 100644
--- a/charts/psmdb-db/values.yaml
+++ b/charts/psmdb-db/values.yaml
@@ -8,6 +8,10 @@
# Cluster DNS Suffix
# DNSsuffix: svc.cluster.local
+finalizers:
+## Set this if you want to delete database persistent volumes on cluster deletion
+ - delete-psmdb-pvc
+
pause: false
allowUnsafeConfigurations: false
updateStrategy: SmartUpdate
@@ -18,7 +22,7 @@ upgradeOptions:
image:
repository: percona/percona-server-mongodb
- tag: 4.4.2-4
+ tag: 4.4.3-5
# imagePullSecrets: []
# runUid: 1001
@@ -31,53 +35,59 @@ pmm:
tag: 2.12.0
serverHost: monitoring-service
-replset:
- name: rs0
- size: 3
- antiAffinityTopologyKey: "kubernetes.io/hostname"
- # priorityClass: ""
- # annotations: {}
- # labels: {}
- # nodeSelector: {}
- livenessProbe:
- failureThreshold: 4
- initialDelaySeconds: 60
- periodSeconds: 30
- successThreshold: 1
- timeoutSeconds: 5
- startupDelaySeconds: 7200
- podDisruptionBudget:
- maxUnavailable: 1
- expose:
- enabled: false
- exposeType: LoadBalancer
- arbiter:
- enabled: false
- size: 1
+replsets:
+ - name: rs0
+ size: 3
antiAffinityTopologyKey: "kubernetes.io/hostname"
# priorityClass: ""
# annotations: {}
# labels: {}
# nodeSelector: {}
- # livenessProbe: {}
- # schedulerName: ""
- resources:
- limits:
- cpu: "300m"
- memory: "0.5G"
- requests:
- cpu: "300m"
- memory: "0.5G"
- volumeSpec:
- # emptyDir: {}
- # hostPath:
- # path: /data
- pvc:
- # storageClassName: standard
- # accessModes: [ "ReadWriteOnce" ]
- resources:
- requests:
- storage: 3Gi
+ livenessProbe:
+ failureThreshold: 4
+ initialDelaySeconds: 60
+ periodSeconds: 30
+ successThreshold: 1
+ timeoutSeconds: 5
+ startupDelaySeconds: 7200
+ # runtimeClassName: image-rc
+ # sidecars:
+ # - image: busybox
+ # command: ["/bin/sh"]
+ # args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
+ # name: rs-sidecar-1
+ podDisruptionBudget:
+ maxUnavailable: 1
+ expose:
+ enabled: false
+ exposeType: LoadBalancer
+ arbiter:
+ enabled: false
+ size: 1
+ antiAffinityTopologyKey: "kubernetes.io/hostname"
+ # priorityClass: ""
+ # annotations: {}
+ # labels: {}
+ # nodeSelector: {}
+ # livenessProbe: {}
+ # schedulerName: ""
+ resources:
+ limits:
+ cpu: "300m"
+ memory: "0.5G"
+ requests:
+ cpu: "300m"
+ memory: "0.5G"
+ volumeSpec:
+ # emptyDir: {}
+ # hostPath:
+ # path: /data
+ pvc:
+ # storageClassName: standard
+ # accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 3Gi
sharding:
enabled: true
@@ -89,6 +99,12 @@ sharding:
# annotations: {}
# labels: {}
# nodeSelector: {}
+ # runtimeClassName: image-rc
+ # sidecars:
+ # - image: busybox
+ # command: ["/bin/sh"]
+ # args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
+ # name: rs-sidecar-1
podDisruptionBudget:
maxUnavailable: 1
resources:
@@ -117,6 +133,12 @@ sharding:
# annotations: {}
# labels: {}
# nodeSelector: {}
+ # runtimeClassName: image-rc
+ # sidecars:
+ # - image: busybox
+ # command: ["/bin/sh"]
+ # args: ["-c", "while true; do echo echo $(date -u) 'test' >> /dev/null; sleep 5;done"]
+ # name: rs-sidecar-1
podDisruptionBudget:
maxUnavailable: 1
resources:
@@ -142,7 +164,7 @@ backup:
restartOnFailure: true
image:
repository: percona/percona-server-mongodb-operator
- tag: 1.6.0-backup
+ tag: 1.7.0-backup
serviceAccountName: percona-server-mongodb-operator
# resources:
# limits:
@@ -169,11 +191,13 @@ backup:
# - name: daily-s3-us-west
# enabled: true
# schedule: "0 0 * * *"
+ # keep: 3
# storageName: s3-us-west
# compressionType: gzip
# - name: weekly-s3-us-west
# enabled: false
# schedule: "0 0 * * 0"
+ # keep: 5
# storageName: s3-us-west
# compressionType: gzip
diff --git a/charts/psmdb-operator/Chart.yaml b/charts/psmdb-operator/Chart.yaml
index 07b5db64..39a4a151 100644
--- a/charts/psmdb-operator/Chart.yaml
+++ b/charts/psmdb-operator/Chart.yaml
@@ -1,9 +1,9 @@
apiVersion: v1
-appVersion: "1.6.0"
+appVersion: "1.7.0"
description: A Helm chart for Deploying the Percona Kubernetes Operator for Percona Server for MongoDB
name: psmdb-operator
home: https://www.percona.com/doc/kubernetes-operator-for-psmongodb/kubernetes.html
-version: 0.1.1
+version: 1.7.0
maintainers:
- name: cap1984
email: ivan.pylypenko@percona.com
diff --git a/charts/psmdb-operator/README.md b/charts/psmdb-operator/README.md
index f2682fde..39790b60 100644
--- a/charts/psmdb-operator/README.md
+++ b/charts/psmdb-operator/README.md
@@ -4,9 +4,9 @@ This chart implements Percona Server MongoDB operator deployment. The Operator i
*
## Pre-requisites
-* Kubernetes 1.11+
+* Kubernetes 1.15+
* PV support on the underlying infrastructure - only if you are provisioning persistent volume(s).
-* At least `v2.4.0` version of helm
+* At least `v2.5.0` version of helm
## Deployment Details
*
@@ -20,7 +20,7 @@ To install the chart with the `psmdb` release name using a dedicated namespace (
```sh
helm repo add percona https://percona.github.io/percona-helm-charts/
-helm install my-operator percona/psmdb-operator --version 0.1.0 --namespace my-namespace
+helm install my-operator percona/psmdb-operator --version 1.7.0 --namespace my-namespace
```
The chart can be customized using the following configurable parameters:
@@ -28,7 +28,7 @@ The chart can be customized using the following configurable parameters:
| Parameter | Description | Default |
| ------------------------------- | ------------------------------------------------------------------------------| ------------------------------------------|
| `image.repository` | PSMDB Operator Container image name | `percona/percona-server-mongodb-operator` |
-| `image.tag` | PSMDB Operator Container image tag | `1.4.0` |
+| `image.tag` | PSMDB Operator Container image tag | `1.7.0` |
| `image.pullPolicy` | PSMDB Operator Container pull policy | `Always` |
| `image.pullSecrets` | PSMDB Operator Pod pull secret | `[]` |
| `replicaCount` | PSMDB Operator Pod quantity | `1` |
diff --git a/charts/psmdb-operator/crds/crd.yaml b/charts/psmdb-operator/crds/crd.yaml
index 80a74677..10c01864 100644
--- a/charts/psmdb-operator/crds/crd.yaml
+++ b/charts/psmdb-operator/crds/crd.yaml
@@ -32,6 +32,9 @@ spec:
storage: false
served: true
- name: v1-6-0
+ storage: false
+ served: true
+ - name: v1-7-0
storage: true
served: true
- name: v1alpha1
diff --git a/charts/psmdb-operator/templates/role.yaml b/charts/psmdb-operator/templates/role.yaml
index 14e37698..d91631f8 100644
--- a/charts/psmdb-operator/templates/role.yaml
+++ b/charts/psmdb-operator/templates/role.yaml
@@ -21,9 +21,11 @@ rules:
verbs:
- get
- list
- - update
- watch
- create
+ - update
+ - patch
+ - delete
- apiGroups:
- ""
resources:
diff --git a/charts/psmdb-operator/values.yaml b/charts/psmdb-operator/values.yaml
index 346bbd99..009d328a 100644
--- a/charts/psmdb-operator/values.yaml
+++ b/charts/psmdb-operator/values.yaml
@@ -6,7 +6,7 @@ replicaCount: 1
image:
repository: percona/percona-server-mongodb-operator
- tag: 1.6.0
+ tag: 1.7.0
pullPolicy: IfNotPresent
# set if you want to specify a namespace to watch
diff --git a/ct.yaml b/ct.yaml
deleted file mode 100644
index ebe925b9..00000000
--- a/ct.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-# See https://github.com/helm/chart-testing#configuration
-helm-extra-args: --timeout 600