diff --git a/Dockerfile b/Dockerfile index 185fd394..44c7106a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,7 +27,7 @@ RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} GO111MODULE=on go FROM registry.access.redhat.com/ubi9/ubi-minimal:latest # Version of Operator (build arg) -ARG VERSION="3.3.1" +ARG VERSION="3.4.0" # User to run container as ARG USER="root" diff --git a/Jenkinsfile b/Jenkinsfile index db2d6d55..b0f79912 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -24,7 +24,7 @@ pipeline { AEROSPIKE_CUSTOM_INIT_REGISTRY="568976754000.dkr.ecr.ap-south-1.amazonaws.com" AEROSPIKE_CUSTOM_INIT_REGISTRY_NAMESPACE="aerospike" - AEROSPIKE_CUSTOM_INIT_NAME_TAG="aerospike-kubernetes-init:2.2.1" + AEROSPIKE_CUSTOM_INIT_NAME_TAG="aerospike-kubernetes-init:2.2.2" } stages { @@ -119,7 +119,7 @@ boolean isNightly() { } String getVersion() { - def prefix = "3.3.1" + def prefix = "3.4.0" def candidateName = "" if(isNightly()) { def timestamp = new Date().format("yyyy-MM-dd") diff --git a/Makefile b/Makefile index 78e15994..5790e8b1 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ OPENSHIFT_VERSION="v4.9" # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) # TODO: Version must be pulled from git tags -VERSION ?= 3.3.1 +VERSION ?= 3.4.0 # Platforms supported PLATFORMS ?= linux/amd64,linux/arm64 @@ -313,7 +313,7 @@ submodules: ## Pull and update git submodules recursively # Generate bundle manifests and metadata, then validate generated files. # For OpenShift bundles run -# CHANNELS=stable DEFAULT_CHANNEL=stable OPENSHIFT_VERSION=v4.6 IMG=docker.io/aerospike/aerospike-kubernetes-operator-nightly:3.3.1 make bundle +# CHANNELS=stable DEFAULT_CHANNEL=stable OPENSHIFT_VERSION=v4.6 IMG=docker.io/aerospike/aerospike-kubernetes-operator-nightly:3.4.0 make bundle .PHONY: bundle bundle: manifests kustomize operator-sdk rm -rf $(ROOT_DIR)/bundle.Dockerfile $(BUNDLE_DIR) diff --git a/README.md b/README.md index 5eeffa18..e239a8ae 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,8 @@ The Operator supports the following capabilities: * Configure persistent storage and resource allocation * Standardize and validate configurations * Cluster security management +* Aerospike cluster monitoring +* Backup and restore Aerospike clusters ## Building and quick start @@ -42,7 +44,7 @@ Run the following command with the appropriate name and version for the operator ```sh IMAGE_TAG_BASE=aerospike/aerospike-kubernetes-operator-nightly -VERSION=3.3.1 +VERSION=3.4.0 make docker-buildx IMG=${IMAGE_TAG_BASE}:${VERSION} PLATFORMS=linux/amd64 ``` **Note**: Change `PLATFORMS` var as per host machine or remove it to build multi-arch image @@ -82,8 +84,8 @@ operator using OLM. ### Install operator-sdk -Install operator-sdk version 1.28.0 using the -installation [guide](https://v1-28-x.sdk.operatorframework.io/docs/installation/) +Install operator-sdk version 1.36.0 using the +installation [guide](https://v1-36-x.sdk.operatorframework.io/docs/installation/) ### Build the bundle @@ -94,7 +96,7 @@ Set up the environment with image names. ```shell export ACCOUNT=aerospike export IMAGE_TAG_BASE=${ACCOUNT}/aerospike-kubernetes-operator -export VERSION=3.3.1 +export VERSION=3.4.0 export IMG=docker.io/${IMAGE_TAG_BASE}-nightly:${VERSION} export BUNDLE_IMG=docker.io/${IMAGE_TAG_BASE}-bundle-nightly:${VERSION} export CATALOG_IMG=docker.io/${IMAGE_TAG_BASE}-catalog-nightly:${VERSION} diff --git a/api/v1/aerospikecluster_types.go b/api/v1/aerospikecluster_types.go index 6bceddd9..5df987c1 100644 --- a/api/v1/aerospikecluster_types.go +++ b/api/v1/aerospikecluster_types.go @@ -958,7 +958,7 @@ type AerospikePodStatus struct { //nolint:govet // for readability // AerospikeCluster is the schema for the AerospikeCluster API // +operator-sdk:csv:customresourcedefinitions:displayName="Aerospike Cluster",resources={{Service, v1},{Pod,v1},{StatefulSet,v1}} -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=3.3.1" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=3.4.0" // //nolint:lll // for readability type AerospikeCluster struct { //nolint:govet // for readability diff --git a/api/v1/utils.go b/api/v1/utils.go index f69f92a0..a464748f 100644 --- a/api/v1/utils.go +++ b/api/v1/utils.go @@ -75,7 +75,7 @@ const ( AerospikeInitContainerNameTagEnvVar = "AEROSPIKE_KUBERNETES_INIT_NAME_TAG" AerospikeInitContainerDefaultRegistry = "docker.io" AerospikeInitContainerDefaultRegistryNamespace = "aerospike" - AerospikeInitContainerDefaultNameAndTag = "aerospike-kubernetes-init:2.2.1" + AerospikeInitContainerDefaultNameAndTag = "aerospike-kubernetes-init:2.2.2" AerospikeAppLabel = "app" AerospikeAppLabelValue = "aerospike-cluster" AerospikeCustomResourceLabel = "aerospike.com/cr" @@ -132,11 +132,15 @@ func getInitContainerImage(registry, namespace, repoAndTag string) string { } func GetAerospikeInitContainerImage(aeroCluster *AerospikeCluster) string { - registry := getInitContainerImageValue(aeroCluster, AerospikeInitContainerRegistryEnvVar, - AerospikeInitContainerDefaultRegistry) + registry := getInitContainerImageValue( + aeroCluster, AerospikeInitContainerRegistryEnvVar, + AerospikeInitContainerDefaultRegistry, + ) namespace := getInitContainerImageRegistryNamespace(aeroCluster) - repoAndTag := getInitContainerImageValue(aeroCluster, AerospikeInitContainerNameTagEnvVar, - AerospikeInitContainerDefaultNameAndTag) + repoAndTag := getInitContainerImageValue( + aeroCluster, AerospikeInitContainerNameTagEnvVar, + AerospikeInitContainerDefaultNameAndTag, + ) return getInitContainerImage(registry, namespace, repoAndTag) } diff --git a/api/v1beta1/aerospikebackup_types.go b/api/v1beta1/aerospikebackup_types.go index 87f06ff4..a1e8bf78 100644 --- a/api/v1beta1/aerospikebackup_types.go +++ b/api/v1beta1/aerospikebackup_types.go @@ -83,7 +83,7 @@ type AerospikeBackupStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=3.3.1" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=3.4.0" // +kubebuilder:printcolumn:name="Backup Service Name",type=string,JSONPath=`.spec.backupService.name` // +kubebuilder:printcolumn:name="Backup Service Namespace",type=string,JSONPath=`.spec.backupService.namespace` // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" diff --git a/api/v1beta1/aerospikebackupservice_types.go b/api/v1beta1/aerospikebackupservice_types.go index 123af340..18b8dffb 100644 --- a/api/v1beta1/aerospikebackupservice_types.go +++ b/api/v1beta1/aerospikebackupservice_types.go @@ -105,7 +105,7 @@ type AerospikeBackupServiceStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=3.3.1" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=3.4.0" // +kubebuilder:printcolumn:name="Image",type=string,JSONPath=`.spec.image` // +kubebuilder:printcolumn:name="Service Type",type=string,JSONPath=`.spec.service.type` // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` diff --git a/api/v1beta1/aerospikerestore_types.go b/api/v1beta1/aerospikerestore_types.go index d7562df6..73b17e2c 100644 --- a/api/v1beta1/aerospikerestore_types.go +++ b/api/v1beta1/aerospikerestore_types.go @@ -84,7 +84,7 @@ type AerospikeRestoreStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=3.3.1" +// +kubebuilder:metadata:annotations="aerospike-kubernetes-operator/version=3.4.0" // +kubebuilder:printcolumn:name="Backup Service Name",type=string,JSONPath=`.spec.backupService.name` // +kubebuilder:printcolumn:name="Backup Service Namespace",type=string,JSONPath=`.spec.backupService.namespace` // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` diff --git a/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml b/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml index 6d7d8a4d..0521940d 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikebackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 3.3.1 + aerospike-kubernetes-operator/version: 3.4.0 controller-gen.kubebuilder.io/version: v0.14.0 name: aerospikebackups.asdb.aerospike.com spec: diff --git a/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml b/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml index 0c8159c6..75f19cb8 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikebackupservices.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 3.3.1 + aerospike-kubernetes-operator/version: 3.4.0 controller-gen.kubebuilder.io/version: v0.14.0 name: aerospikebackupservices.asdb.aerospike.com spec: diff --git a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml index 4ad9433e..c502577e 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 3.3.1 + aerospike-kubernetes-operator/version: 3.4.0 controller-gen.kubebuilder.io/version: v0.14.0 name: aerospikeclusters.asdb.aerospike.com spec: diff --git a/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml b/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml index b29b2678..56bc292a 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikerestores.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 3.3.1 + aerospike-kubernetes-operator/version: 3.4.0 controller-gen.kubebuilder.io/version: v0.14.0 name: aerospikerestores.asdb.aerospike.com spec: diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 21e9a7b5..4130e0d4 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -14,4 +14,4 @@ configMapGenerator: images: - name: controller newName: docker.io/aerospike/aerospike-kubernetes-operator-nightly - newTag: 3.3.1 + newTag: 3.4.0 diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 6b28756a..521c4594 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -66,7 +66,7 @@ spec: value: aerospike - name: AEROSPIKE_KUBERNETES_INIT_NAME_TAG # this is the name and tag of aerospike-init image - value: aerospike-kubernetes-init:2.2.1 + value: aerospike-kubernetes-init:2.2.2 serviceAccountName: controller-manager terminationGracePeriodSeconds: 10 diff --git a/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml b/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml index fad07a05..28f87370 100644 --- a/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml @@ -257,6 +257,8 @@ spec: - Configure persistent storage and resource allocation - Standardize and validate configurations - Cluster security management + - Aerospike cluster monitoring + - Backup and restore Aerospike clusters displayName: Aerospike Kubernetes Operator icon: - base64data: PHN2ZyB3aWR0aD0iMjUwMCIgaGVpZ2h0PSIyNTAwIiB2aWV3Qm94PSIwIDAgMjU2IDI1NiIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiBwcmVzZXJ2ZUFzcGVjdFJhdGlvPSJ4TWlkWU1pZCI+PHBhdGggZD0iTTAgMGgyNTZ2MjU2SDBWMHoiIGZpbGw9IiNCMDI1MkEiLz48cGF0aCBkPSJNMTU2LjI5MyA5NS42MDVsLTczLjE2OSAzMi41OTQgNzMuMTcgMzIuODFWOTUuNjA1em0tOTIuMDMyIDM5Ljk3OWwtMTcuMDQ5LTcuMyAxNy4wNDktNy44ODIgMTQ0LjUyNy02NS4zNzZ2MTcuNDAzbC0zNy45MzIgMTYuODI0djc4LjExN2wzNy45MzIgMTd2MTYuNjA0TDY0LjI2IDEzNS41ODR6IiBmaWxsPSIjRkZGIi8+PC9zdmc+ diff --git a/helm-charts/aerospike-backup-service/Chart.yaml b/helm-charts/aerospike-backup-service/Chart.yaml index 8a774e72..5875bc46 100644 --- a/helm-charts/aerospike-backup-service/Chart.yaml +++ b/helm-charts/aerospike-backup-service/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-backup-service # version tracks chart changes -version: 3.3.1 +version: 3.4.0 # appVersion tracks operator version -appVersion: 3.3.1 +appVersion: 3.4.0 description: A Helm chart for Aerospike Backup Service Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-backup-service/values.yaml b/helm-charts/aerospike-backup-service/values.yaml index 4a1d0a15..577dd6df 100644 --- a/helm-charts/aerospike-backup-service/values.yaml +++ b/helm-charts/aerospike-backup-service/values.yaml @@ -33,7 +33,6 @@ backupServiceConfig: {} # test-policy: # parallel: 3 # remove-files: KeepAll -# type: 1 # storage: # local: # path: /localStorage @@ -42,6 +41,7 @@ backupServiceConfig: {} # type: aws-s3 # path: "s3://test-bucket" # s3-region: us-east-1 +# s3-endpoint-override: "" # s3-profile: default ## SecretMounts is the list of secret to be mounted in the backup service. diff --git a/helm-charts/aerospike-backup/Chart.yaml b/helm-charts/aerospike-backup/Chart.yaml index b9f08816..4252fef6 100644 --- a/helm-charts/aerospike-backup/Chart.yaml +++ b/helm-charts/aerospike-backup/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-backup # version tracks chart changes -version: 3.3.1 +version: 3.4.0 # appVersion tracks operator version -appVersion: 3.3.1 +appVersion: 3.4.0 description: A Helm chart for Aerospike Backup Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-backup/values.yaml b/helm-charts/aerospike-backup/values.yaml index 064aa819..3d51fb30 100644 --- a/helm-charts/aerospike-backup/values.yaml +++ b/helm-charts/aerospike-backup/values.yaml @@ -17,7 +17,8 @@ backupService: {} ## This config is used to trigger backups. It includes: aerospike-cluster, backup-routines backupConfig: {} # aerospike-cluster: -# aerospike-aerospikebackup-test-cluster: # Name format: -- + # Name format: The name must begin with the prefix - +# aerospike-aerospikebackup-test-cluster: # credentials: # password: admin123 # user: admin @@ -25,7 +26,8 @@ backupConfig: {} # - host-name: aerocluster.aerospike.svc.cluster.local # port: 3000 # backup-routines: -# aerospike-aerospikebackup-test-routine: # Name format: -- + # Name format: The name must begin with the prefix - +# aerospike-aerospikebackup-test-routine: # backup-policy: test-policy # interval-cron: "@daily" # incr-interval-cron: "@hourly" diff --git a/helm-charts/aerospike-cluster/Chart.yaml b/helm-charts/aerospike-cluster/Chart.yaml index 8a5aef52..da78f065 100644 --- a/helm-charts/aerospike-cluster/Chart.yaml +++ b/helm-charts/aerospike-cluster/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-cluster # version tracks chart changes -version: 3.3.1 +version: 3.4.0 # appVersion tracks operator version -appVersion: 3.3.1 +appVersion: 3.4.0 description: A Helm chart for Aerospike Cluster Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-kubernetes-operator/Chart.yaml b/helm-charts/aerospike-kubernetes-operator/Chart.yaml index 83124a87..0c646c0d 100644 --- a/helm-charts/aerospike-kubernetes-operator/Chart.yaml +++ b/helm-charts/aerospike-kubernetes-operator/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-kubernetes-operator # version tracks chart changes -version: 3.3.1 +version: 3.4.0 # appVersion tracks operator version -appVersion: 3.3.1 +appVersion: 3.4.0 description: A Helm chart for Aerospike Kubernetes Operator icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/helm-charts/aerospike-kubernetes-operator/README.md b/helm-charts/aerospike-kubernetes-operator/README.md index 4f3f89c8..2d865cf5 100644 --- a/helm-charts/aerospike-kubernetes-operator/README.md +++ b/helm-charts/aerospike-kubernetes-operator/README.md @@ -37,7 +37,7 @@ helm install aerospike-kubernetes-operator ./aerospike-kubernetes-operator --set |-------------------------------------|-------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------| | `replicas` | Number of operator replicas | `2` | | `operatorImage.repository` | Operator image repository | `aerospike/aerospike-kubernetes-operator` | -| `operatorImage.tag` | Operator image tag | `3.3.1` | +| `operatorImage.tag` | Operator image tag | `3.4.0` | | `operatorImage.pullPolicy` | Image pull policy | `IfNotPresent` | | `imagePullSecrets` | Secrets containing credentials to pull Operator image from a private registry | `{}` (nil) | | `rbac.create` | Set this to `true` to let helm chart automatically create RBAC resources necessary for operator | `true` | diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml index 6d7d8a4d..0521940d 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackups.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 3.3.1 + aerospike-kubernetes-operator/version: 3.4.0 controller-gen.kubebuilder.io/version: v0.14.0 name: aerospikebackups.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml index 0c8159c6..75f19cb8 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikebackupservices.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 3.3.1 + aerospike-kubernetes-operator/version: 3.4.0 controller-gen.kubebuilder.io/version: v0.14.0 name: aerospikebackupservices.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml index 4ad9433e..c502577e 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 3.3.1 + aerospike-kubernetes-operator/version: 3.4.0 controller-gen.kubebuilder.io/version: v0.14.0 name: aerospikeclusters.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml index b29b2678..56bc292a 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikerestores.asdb.aerospike.com.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - aerospike-kubernetes-operator/version: 3.3.1 + aerospike-kubernetes-operator/version: 3.4.0 controller-gen.kubebuilder.io/version: v0.14.0 name: aerospikerestores.asdb.aerospike.com spec: diff --git a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackup-editor-clusterrole.yaml b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackup-editor-clusterrole.yaml new file mode 100644 index 00000000..5267f63f --- /dev/null +++ b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackup-editor-clusterrole.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aerospike-operator-aerospikebackup-editor-role + labels: + app: {{ template "aerospike-kubernetes-operator.fullname" . }} + chart: {{ .Chart.Name }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackups + verbs: + - create + - delete + - patch + - update +{{- end }} diff --git a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackup-viewer-clusterrole.yaml b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackup-viewer-clusterrole.yaml new file mode 100644 index 00000000..e47abd02 --- /dev/null +++ b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackup-viewer-clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aerospike-operator-aerospikebackup-viewer-role + labels: + app: {{ template "aerospike-kubernetes-operator.fullname" . }} + chart: {{ .Chart.Name }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackups + verbs: + - get + - list + - watch +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackups/status + verbs: + - get +{{- end }} diff --git a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackupservice-editor-clusterrole.yaml b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackupservice-editor-clusterrole.yaml new file mode 100644 index 00000000..01aeb1fe --- /dev/null +++ b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackupservice-editor-clusterrole.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aerospike-operator-aerospikebackupservice-editor-role + labels: + app: {{ template "aerospike-kubernetes-operator.fullname" . }} + chart: {{ .Chart.Name }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackupservices + verbs: + - create + - delete + - patch + - update +{{- end }} diff --git a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackupservice-viewer-clusterrole.yaml b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackupservice-viewer-clusterrole.yaml new file mode 100644 index 00000000..58e31c95 --- /dev/null +++ b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikebackupservice-viewer-clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aerospike-operator-aerospikebackupservice-viewer-role + labels: + app: {{ template "aerospike-kubernetes-operator.fullname" . }} + chart: {{ .Chart.Name }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackupservices + verbs: + - get + - list + - watch +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackupservices/status + verbs: + - get +{{- end }} diff --git a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikerestore-editor-clusterrole.yaml b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikerestore-editor-clusterrole.yaml new file mode 100644 index 00000000..a8189e1b --- /dev/null +++ b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikerestore-editor-clusterrole.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aerospike-operator-aerospikerestore-editor-role + labels: + app: {{ template "aerospike-kubernetes-operator.fullname" . }} + chart: {{ .Chart.Name }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikerestores + verbs: + - create + - delete + - patch + - update +{{- end }} diff --git a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikerestore-viewer-clusterrole.yaml b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikerestore-viewer-clusterrole.yaml new file mode 100644 index 00000000..24685905 --- /dev/null +++ b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-aerospikerestore-viewer-clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: aerospike-operator-aerospikerestore-viewer-role + labels: + app: {{ template "aerospike-kubernetes-operator.fullname" . }} + chart: {{ .Chart.Name }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikerestores + verbs: + - get + - list + - watch +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikerestores/status + verbs: + - get +{{- end }} diff --git a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-manager-clusterrole.yaml b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-manager-clusterrole.yaml index 05053cfb..cf275998 100644 --- a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-manager-clusterrole.yaml +++ b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-manager-clusterrole.yaml @@ -9,6 +9,17 @@ metadata: chart: {{ .Chart.Name }} release: {{ .Release.Name }} rules: +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - patch + - update + - watch - apiGroups: - apps resources: @@ -21,6 +32,58 @@ rules: - patch - update - watch +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackups + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackups/finalizers + verbs: + - update +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackups/status + verbs: + - get + - patch + - update +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackupservices + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackupservices/finalizers + verbs: + - update +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikebackupservices/status + verbs: + - get + - patch + - update - apiGroups: - asdb.aerospike.com resources: @@ -47,6 +110,32 @@ rules: - get - patch - update +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikerestores + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikerestores/finalizers + verbs: + - update +- apiGroups: + - asdb.aerospike.com + resources: + - aerospikerestores/status + verbs: + - get + - patch + - update - apiGroups: - "" resources: diff --git a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-mutating-webhook-configuration.yaml b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-mutating-webhook-configuration.yaml index 4e891403..c2e83031 100644 --- a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-mutating-webhook-configuration.yaml +++ b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-mutating-webhook-configuration.yaml @@ -29,3 +29,23 @@ webhooks: resources: - aerospikeclusters sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: aerospike-operator-webhook-service + namespace: {{ .Release.Namespace }} + path: /mutate-asdb-aerospike-com-v1beta1-aerospikerestore + failurePolicy: Fail + name: maerospikerestore.kb.io + rules: + - apiGroups: + - asdb.aerospike.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - aerospikerestores + sideEffects: None diff --git a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-validating-webhook-configuration.yaml b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-validating-webhook-configuration.yaml index 664edbd2..a4e5d3fb 100644 --- a/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-validating-webhook-configuration.yaml +++ b/helm-charts/aerospike-kubernetes-operator/templates/aerospike-operator-validating-webhook-configuration.yaml @@ -29,3 +29,63 @@ webhooks: resources: - aerospikeclusters sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: aerospike-operator-webhook-service + namespace: {{ .Release.Namespace }} + path: /validate-asdb-aerospike-com-v1beta1-aerospikebackup + failurePolicy: Fail + name: vaerospikebackup.kb.io + rules: + - apiGroups: + - asdb.aerospike.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - aerospikebackups + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: aerospike-operator-webhook-service + namespace: {{ .Release.Namespace }} + path: /validate-asdb-aerospike-com-v1beta1-aerospikebackupservice + failurePolicy: Fail + name: vaerospikebackupservice.kb.io + rules: + - apiGroups: + - asdb.aerospike.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - aerospikebackupservices + sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: aerospike-operator-webhook-service + namespace: {{ .Release.Namespace }} + path: /validate-asdb-aerospike-com-v1beta1-aerospikerestore + failurePolicy: Fail + name: vaerospikerestore.kb.io + rules: + - apiGroups: + - asdb.aerospike.com + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - aerospikerestores + sideEffects: None diff --git a/helm-charts/aerospike-kubernetes-operator/values.yaml b/helm-charts/aerospike-kubernetes-operator/values.yaml index c88ded59..8e80d757 100644 --- a/helm-charts/aerospike-kubernetes-operator/values.yaml +++ b/helm-charts/aerospike-kubernetes-operator/values.yaml @@ -4,7 +4,7 @@ replicas: 2 ## Operator image operatorImage: repository: aerospike/aerospike-kubernetes-operator - tag: 3.3.1 + tag: 3.4.0 pullPolicy: IfNotPresent ## In case the above image is pulled from a registry that requires @@ -37,7 +37,7 @@ aerospikeKubernetesInitRegistry: "docker.io" aerospikeKubernetesInitRegistryNamespace: "aerospike" # Name and tag of aerospike-init image -aerospikeKubernetesInitNameTag: "aerospike-kubernetes-init:2.2.1" +aerospikeKubernetesInitNameTag: "aerospike-kubernetes-init:2.2.2" ## Resources - limits / requests resources: diff --git a/helm-charts/aerospike-restore/Chart.yaml b/helm-charts/aerospike-restore/Chart.yaml index ef766f04..181b07a8 100644 --- a/helm-charts/aerospike-restore/Chart.yaml +++ b/helm-charts/aerospike-restore/Chart.yaml @@ -3,9 +3,9 @@ type: application name: aerospike-restore # version tracks chart changes -version: 3.3.1 +version: 3.4.0 # appVersion tracks operator version -appVersion: 3.3.1 +appVersion: 3.4.0 description: A Helm chart for Aerospike Restore Custom Resource icon: https://avatars0.githubusercontent.com/u/2214313?s=200&v=4 diff --git a/test/cluster/podspec_test.go b/test/cluster/podspec_test.go index 2a522654..6c36aa9c 100644 --- a/test/cluster/podspec_test.go +++ b/test/cluster/podspec_test.go @@ -82,9 +82,11 @@ var _ = Describe( Racks: racks, } aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Annotations = map[string]string{ - "annotation-test-1": "test-1"} + "annotation-test-1": "test-1", + } aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Labels = map[string]string{ - "label-test-1": "test-1"} + "label-test-1": "test-1", + } err = deployCluster(k8sClient, ctx, aeroCluster) Expect(err).ToNot(HaveOccurred()) }, @@ -101,414 +103,452 @@ var _ = Describe( Expect(err).ToNot(HaveOccurred()) }, ) - It("Should validate annotations and labels addition", func() { - By("Validating Annotations") - actual, err := getPodSpecAnnotations(k8sClient, ctx, clusterNamespacedName) - Expect(err).ToNot(HaveOccurred()) - valid := ValidateAttributes(actual, - map[string]string{"annotation-test-1": "test-1"}) - Expect(valid).To( - BeTrue(), "Unable to find annotations", - ) - By("Validating Labels") - actual, err = getPodSpecLabels(k8sClient, ctx, clusterNamespacedName) - Expect(err).ToNot(HaveOccurred()) - valid = ValidateAttributes(actual, - map[string]string{"label-test-1": "test-1"}) - Expect(valid).To( - BeTrue(), "Unable to find labels", - ) - }) - - It("Should validate added annotations and labels flow", func() { - aeroCluster, err := getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) - - zones, err := getZones(ctx, k8sClient) - Expect(err).ToNot(HaveOccurred()) - zone := zones[0] - if len(zones) > 1 { - for i := 0; i < len(zones); i++ { - if zones[i] != aeroCluster.Spec.RackConfig.Racks[0].Zone { - zone = zones[i] - break + It( + "Should validate annotations and labels addition", func() { + By("Validating Annotations") + actual, err := getPodSpecAnnotations(k8sClient, ctx, clusterNamespacedName) + Expect(err).ToNot(HaveOccurred()) + valid := ValidateAttributes( + actual, + map[string]string{"annotation-test-1": "test-1"}, + ) + Expect(valid).To( + BeTrue(), "Unable to find annotations", + ) + By("Validating Labels") + actual, err = getPodSpecLabels(k8sClient, ctx, clusterNamespacedName) + Expect(err).ToNot(HaveOccurred()) + valid = ValidateAttributes( + actual, + map[string]string{"label-test-1": "test-1"}, + ) + Expect(valid).To( + BeTrue(), "Unable to find labels", + ) + }, + ) + + It( + "Should validate added annotations and labels flow", func() { + aeroCluster, err := getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) + + zones, err := getZones(ctx, k8sClient) + Expect(err).ToNot(HaveOccurred()) + zone := zones[0] + if len(zones) > 1 { + for i := 0; i < len(zones); i++ { + if zones[i] != aeroCluster.Spec.RackConfig.Racks[0].Zone { + zone = zones[i] + break + } } } - } - aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Annotations["annotation-test-2"] = "test-2" - aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Labels["label-test-2"] = "test-2" - err = addRack( - k8sClient, ctx, clusterNamespacedName, &asdbv1.Rack{ID: 2, Zone: zone}) - Expect(err).ToNot(HaveOccurred()) - By("Validating Added Annotations") - actual, err := getPodSpecAnnotations(k8sClient, ctx, clusterNamespacedName) - Expect(err).ToNot(HaveOccurred()) - valid := ValidateAttributes(actual, - map[string]string{"annotation-test-1": "test-1", "annotation-test-2": "test-2"}) - Expect(valid).To( - BeTrue(), "Unable to find annotations", - ) - By("Validating Added Labels") - actual, err = getPodSpecLabels(k8sClient, ctx, clusterNamespacedName) - Expect(err).ToNot(HaveOccurred()) - valid = ValidateAttributes(actual, - map[string]string{"label-test-1": "test-1", "label-test-2": "test-2"}) - Expect(valid).To( - BeTrue(), "Unable to find labels", - ) - }) - - It("Should validate the sidecar workflow", func() { - - By("Adding the container1") - - aeroCluster, err := getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) - - aeroCluster.Spec.PodSpec.Sidecars = append( - aeroCluster.Spec.PodSpec.Sidecars, sidecar1, - ) - - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - - By("Adding the container2") - - aeroCluster, err = getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) - - aeroCluster.Spec.PodSpec.Sidecars = append( - aeroCluster.Spec.PodSpec.Sidecars, sidecar2, - ) - - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - - By("Updating the container2") - - aeroCluster, err = getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) - - aeroCluster.Spec.PodSpec.Sidecars[1].Command = []string{ - "sh", "-c", "sleep 3600", - } - - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - - By("Removing all the containers") - - aeroCluster, err = getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) - - aeroCluster.Spec.PodSpec.Sidecars = []corev1.Container{} - - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - }, + aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Annotations["annotation-test-2"] = "test-2" + aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Labels["label-test-2"] = "test-2" + err = addRack( + k8sClient, ctx, clusterNamespacedName, &asdbv1.Rack{ID: 2, Zone: zone}, + ) + Expect(err).ToNot(HaveOccurred()) + By("Validating Added Annotations") + actual, err := getPodSpecAnnotations(k8sClient, ctx, clusterNamespacedName) + Expect(err).ToNot(HaveOccurred()) + valid := ValidateAttributes( + actual, + map[string]string{"annotation-test-1": "test-1", "annotation-test-2": "test-2"}, + ) + Expect(valid).To( + BeTrue(), "Unable to find annotations", + ) + By("Validating Added Labels") + actual, err = getPodSpecLabels(k8sClient, ctx, clusterNamespacedName) + Expect(err).ToNot(HaveOccurred()) + valid = ValidateAttributes( + actual, + map[string]string{"label-test-1": "test-1", "label-test-2": "test-2"}, + ) + Expect(valid).To( + BeTrue(), "Unable to find labels", + ) + }, ) - It("Should validate the initcontainer workflow", func() { + It( + "Should validate the sidecar workflow", func() { - By("Adding the container1") + By("Adding the container1") - aeroCluster, err := getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) + aeroCluster, err := getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) - aeroCluster.Spec.PodSpec.InitContainers = append( - aeroCluster.Spec.PodSpec.InitContainers, initCont1, - ) + aeroCluster.Spec.PodSpec.Sidecars = append( + aeroCluster.Spec.PodSpec.Sidecars, sidecar1, + ) - aeroCluster.Spec.Storage.Volumes[1].InitContainers = []asdbv1.VolumeAttachment{ - { - ContainerName: "init-myservice", - Path: "/workdir", - }, - } + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) + By("Adding the container2") - // validate - stsList, err := getSTSList(aeroCluster, k8sClient) - Expect(err).ToNot(HaveOccurred()) - Expect(len(stsList.Items)).ToNot(BeZero()) + aeroCluster, err = getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) - for _, sts := range stsList.Items { - stsInitMountPath := sts.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath - Expect(stsInitMountPath).To(Equal("/workdir")) - } + aeroCluster.Spec.PodSpec.Sidecars = append( + aeroCluster.Spec.PodSpec.Sidecars, sidecar2, + ) - // By("Adding the container2") + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) - // aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) - // Expect(err).ToNot(HaveOccurred()) + By("Updating the container2") - // aeroCluster.Spec.PodSpec.InitContainers = append(aeroCluster.Spec.PodSpec.InitContainers, initCont2) + aeroCluster, err = getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) - // err = updateCluster(k8sClient, ctx, aeroCluster) - // Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.PodSpec.Sidecars[1].Command = []string{ + "sh", "-c", "sleep 3600", + } - By("Updating the container2") + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) - aeroCluster, err = getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) + By("Removing all the containers") - aeroCluster.Spec.PodSpec.InitContainers[0].Command = []string{ - "sh", "-c", "echo The app is running; sleep 5", - } + aeroCluster, err = getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) + aeroCluster.Spec.PodSpec.Sidecars = []corev1.Container{} - By("Removing all the containers") + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + }, + ) - aeroCluster, err = getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) + It( + "Should validate the initcontainer workflow", func() { - aeroCluster.Spec.PodSpec.InitContainers = []corev1.Container{} - aeroCluster.Spec.Storage.Volumes[1].InitContainers = []asdbv1.VolumeAttachment{} + By("Adding the container1") - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - }, + aeroCluster, err := getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) + + aeroCluster.Spec.PodSpec.InitContainers = append( + aeroCluster.Spec.PodSpec.InitContainers, initCont1, + ) + + aeroCluster.Spec.Storage.Volumes[1].InitContainers = []asdbv1.VolumeAttachment{ + { + ContainerName: "init-myservice", + Path: "/workdir", + }, + } + + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + // validate + stsList, err := getSTSList(aeroCluster, k8sClient) + Expect(err).ToNot(HaveOccurred()) + Expect(len(stsList.Items)).ToNot(BeZero()) + + for _, sts := range stsList.Items { + stsInitMountPath := sts.Spec.Template.Spec.InitContainers[1].VolumeMounts[0].MountPath + Expect(stsInitMountPath).To(Equal("/workdir")) + } + + // By("Adding the container2") + + // aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) + // Expect(err).ToNot(HaveOccurred()) + + // aeroCluster.Spec.PodSpec.InitContainers = append(aeroCluster.Spec.PodSpec.InitContainers, initCont2) + + // err = updateCluster(k8sClient, ctx, aeroCluster) + // Expect(err).ToNot(HaveOccurred()) + + By("Updating the container2") + + aeroCluster, err = getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) + + aeroCluster.Spec.PodSpec.InitContainers[0].Command = []string{ + "sh", "-c", "echo The app is running; sleep 5", + } + + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Removing all the containers") + + aeroCluster, err = getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) + + aeroCluster.Spec.PodSpec.InitContainers = []corev1.Container{} + aeroCluster.Spec.Storage.Volumes[1].InitContainers = []asdbv1.VolumeAttachment{} + + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + }, ) // Test affinity // try deploying in specific hosts - It("Should validate affinity", func() { - aeroCluster, err := getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) - - pods, err := getPodList(aeroCluster, k8sClient) - Expect(err).ToNot(HaveOccurred()) + It( + "Should validate affinity", func() { + aeroCluster, err := getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) - // All pods will be moved to this node - nodeName := pods.Items[0].Spec.NodeName + pods, err := getPodList(aeroCluster, k8sClient) + Expect(err).ToNot(HaveOccurred()) - affinity := &corev1.Affinity{} - ns := &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "kubernetes.io/hostname", - Operator: corev1.NodeSelectorOpIn, - Values: []string{nodeName}, + // All pods will be moved to this node + nodeName := pods.Items[0].Spec.NodeName + + affinity := &corev1.Affinity{} + ns := &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/hostname", + Operator: corev1.NodeSelectorOpIn, + Values: []string{nodeName}, + }, }, }, }, - }, - } - - affinity.NodeAffinity = &corev1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: ns, - } - aeroCluster.Spec.PodSpec.Affinity = affinity - - // All pods should move to node with nodeName - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - - // Verify if all the pods are moved to given node - aeroCluster, err = getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) - - pods, err = getPodList(aeroCluster, k8sClient) - Expect(err).ToNot(HaveOccurred()) - - for _, pod := range pods.Items { - Expect(pod.Spec.NodeName).Should(Equal(nodeName)) - } - // Test toleration - // Test nodeSelector - }, + } + + affinity.NodeAffinity = &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: ns, + } + aeroCluster.Spec.PodSpec.Affinity = affinity + + // All pods should move to node with nodeName + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + // Verify if all the pods are moved to given node + aeroCluster, err = getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) + + pods, err = getPodList(aeroCluster, k8sClient) + Expect(err).ToNot(HaveOccurred()) + + for _, pod := range pods.Items { + Expect(pod.Spec.NodeName).Should(Equal(nodeName)) + } + // Test toleration + // Test nodeSelector + }, ) - It("Should be able to update container image and other fields together", func() { - - By("Adding the container") - aeroCluster, err := getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) - - aeroCluster.Spec.PodSpec.Sidecars = append( - aeroCluster.Spec.PodSpec.Sidecars, sidecar1, - ) - - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - - By("Updating container image and affinity together") - - aeroCluster, err = getCluster( - k8sClient, ctx, clusterNamespacedName, - ) - Expect(err).ToNot(HaveOccurred()) - - // Update image - newImage := "nginx:1.21.4" - aeroCluster.Spec.PodSpec.Sidecars[0].Image = newImage - - // Update affinity - region, err := getRegion(ctx, k8sClient) - Expect(err).ToNot(HaveOccurred()) - - desiredAffinity := corev1.Affinity{ - NodeAffinity: &corev1.NodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "topology.kubernetes.io/region", - Operator: corev1.NodeSelectorOpIn, - Values: []string{region}, + It( + "Should be able to update container image and other fields together", func() { + + By("Adding the container") + aeroCluster, err := getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) + + aeroCluster.Spec.PodSpec.Sidecars = append( + aeroCluster.Spec.PodSpec.Sidecars, sidecar1, + ) + + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + By("Updating container image and affinity together") + + aeroCluster, err = getCluster( + k8sClient, ctx, clusterNamespacedName, + ) + Expect(err).ToNot(HaveOccurred()) + + // Update image + newImage := "nginx:1.21.4" + aeroCluster.Spec.PodSpec.Sidecars[0].Image = newImage + + // Update affinity + region, err := getRegion(ctx, k8sClient) + Expect(err).ToNot(HaveOccurred()) + + desiredAffinity := corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "topology.kubernetes.io/region", + Operator: corev1.NodeSelectorOpIn, + Values: []string{region}, + }, }, }, }, }, }, - }, - } - aeroCluster.Spec.PodSpec.Affinity = &desiredAffinity - - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - - // validate - stsList, err := getSTSList(aeroCluster, k8sClient) - Expect(err).ToNot(HaveOccurred()) - Expect(len(stsList.Items)).ToNot(BeZero()) - - var meFound bool - for _, sts := range stsList.Items { - actualNodeAffinity := sts.Spec.Template.Spec.Affinity.NodeAffinity - for _, ns := range actualNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { - for _, me := range ns.MatchExpressions { - if me.Key == "topology.kubernetes.io/region" { - isEqual := reflect.DeepEqual( - me, - desiredAffinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution. - NodeSelectorTerms[0].MatchExpressions[0], - ) - msg := fmt.Sprintf("node affinity actual: %v, desired: %v", actualNodeAffinity, desiredAffinity.NodeAffinity) - Expect(isEqual).To(BeTrue(), msg) - - meFound = true + } + aeroCluster.Spec.PodSpec.Affinity = &desiredAffinity + + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + // validate + stsList, err := getSTSList(aeroCluster, k8sClient) + Expect(err).ToNot(HaveOccurred()) + Expect(len(stsList.Items)).ToNot(BeZero()) + + var meFound bool + for _, sts := range stsList.Items { + actualNodeAffinity := sts.Spec.Template.Spec.Affinity.NodeAffinity + for _, ns := range actualNodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { + for _, me := range ns.MatchExpressions { + if me.Key == "topology.kubernetes.io/region" { + isEqual := reflect.DeepEqual( + me, + desiredAffinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution. + NodeSelectorTerms[0].MatchExpressions[0], + ) + msg := fmt.Sprintf( + "node affinity actual: %v, desired: %v", actualNodeAffinity, + desiredAffinity.NodeAffinity, + ) + Expect(isEqual).To(BeTrue(), msg) + + meFound = true + } } } + + msg := fmt.Sprintf( + "node affinity actual: %v, desired: %v", actualNodeAffinity, + desiredAffinity.NodeAffinity, + ) + Expect(meFound).To(BeTrue(), msg) + + // 1st is aerospike-server image, 2nd is 1st sidecare + Expect(sts.Spec.Template.Spec.Containers[1].Image).To(Equal(newImage)) } + }, + ) + + It( + "Should be able to set/update aerospike-init custom registry, namespace and name", func() { + operatorEnvVarRegistry := "docker.io" + operatorEnvVarRegistryNamespace := "aerospike" + operatorEnvVarNameAndTag := "aerospike-kubernetes-init:2.2.2" + customRegistry := getEnvVar(customInitRegistryEnvVar) + customRegistryNamespace := getEnvVar(customInitRegistryNamespaceEnvVar) + customInitNameAndTag := getEnvVar(customInitNameAndTagEnvVar) + imagePullSecret := getEnvVar(imagePullSecretNameEnvVar) + + By("Updating imagePullSecret") + aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) + Expect(err).ToNot(HaveOccurred()) + + aeroCluster.Spec.PodSpec.ImagePullSecrets = []corev1.LocalObjectReference{ + { + Name: imagePullSecret, + }, + } + + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) - msg := fmt.Sprintf("node affinity actual: %v, desired: %v", actualNodeAffinity, desiredAffinity.NodeAffinity) - Expect(meFound).To(BeTrue(), msg) - - // 1st is aerospike-server image, 2nd is 1st sidecare - Expect(sts.Spec.Template.Spec.Containers[1].Image).To(Equal(newImage)) - } - }) - - It("Should be able to set/update aerospike-init custom registry, namespace and name", func() { - operatorEnvVarRegistry := "docker.io" - operatorEnvVarRegistryNamespace := "aerospike" - operatorEnvVarNameAndTag := "aerospike-kubernetes-init:2.2.1" - customRegistry := getEnvVar(customInitRegistryEnvVar) - customRegistryNamespace := getEnvVar(customInitRegistryNamespaceEnvVar) - customInitNameAndTag := getEnvVar(customInitNameAndTagEnvVar) - imagePullSecret := getEnvVar(imagePullSecretNameEnvVar) - - By("Updating imagePullSecret") - aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) - Expect(err).ToNot(HaveOccurred()) - - aeroCluster.Spec.PodSpec.ImagePullSecrets = []corev1.LocalObjectReference{ - { - Name: imagePullSecret, - }, - } - - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - - By("Using registry, namespace and name in CR") - aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) - Expect(err).ToNot(HaveOccurred()) - - aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistry = customRegistry - aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistryNamespace = &customRegistryNamespace - aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageNameAndTag = customInitNameAndTag - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - - validateInitImage(k8sClient, aeroCluster, customRegistry, - customRegistryNamespace, customInitNameAndTag) - - By("Using envVar registry, namespace and name") - aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) - Expect(err).ToNot(HaveOccurred()) - - // Empty imageRegistry, should use operator envVar docker.io - aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistry = "" - aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistryNamespace = nil - aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageNameAndTag = "" - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - - validateInitImage(k8sClient, aeroCluster, operatorEnvVarRegistry, - operatorEnvVarRegistryNamespace, operatorEnvVarNameAndTag) - }) - - It("Should be able to recover cluster after setting correct aerospike-init custom registry/namespace", func() { - operatorEnvVarRegistry := "docker.io" - operatorEnvVarRegistryNamespace := "aerospike" - operatorEnvVarNameAndTag := "aerospike-kubernetes-init:2.2.1" - incorrectCustomRegistryNamespace := "incorrectnamespace" - - By("Using incorrect registry namespace in CR") - aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) - Expect(err).ToNot(HaveOccurred()) - - aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistryNamespace = &incorrectCustomRegistryNamespace - err = updateClusterWithTO(k8sClient, ctx, aeroCluster, time.Minute*1) - Expect(err).Should(HaveOccurred()) - - validateInitImage(k8sClient, aeroCluster, operatorEnvVarRegistry, - incorrectCustomRegistryNamespace, operatorEnvVarNameAndTag) - - By("Using correct registry namespace in CR") - aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) - Expect(err).ToNot(HaveOccurred()) - - // Nil ImageRegistryNamespace, should use operator envVar aerospike - aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistryNamespace = nil - err = updateCluster(k8sClient, ctx, aeroCluster) - Expect(err).ToNot(HaveOccurred()) - - validateInitImage(k8sClient, aeroCluster, operatorEnvVarRegistry, - operatorEnvVarRegistryNamespace, operatorEnvVarNameAndTag) - }) - }) + By("Using registry, namespace and name in CR") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) + Expect(err).ToNot(HaveOccurred()) + + aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistry = customRegistry + aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistryNamespace = &customRegistryNamespace + aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageNameAndTag = customInitNameAndTag + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + validateInitImage( + k8sClient, aeroCluster, customRegistry, + customRegistryNamespace, customInitNameAndTag, + ) + + By("Using envVar registry, namespace and name") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) + Expect(err).ToNot(HaveOccurred()) + + // Empty imageRegistry, should use operator envVar docker.io + aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistry = "" + aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistryNamespace = nil + aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageNameAndTag = "" + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + validateInitImage( + k8sClient, aeroCluster, operatorEnvVarRegistry, + operatorEnvVarRegistryNamespace, operatorEnvVarNameAndTag, + ) + }, + ) + + It( + "Should be able to recover cluster after setting correct aerospike-init custom registry/namespace", + func() { + operatorEnvVarRegistry := "docker.io" + operatorEnvVarRegistryNamespace := "aerospike" + operatorEnvVarNameAndTag := "aerospike-kubernetes-init:2.2.2" + incorrectCustomRegistryNamespace := "incorrectnamespace" + + By("Using incorrect registry namespace in CR") + aeroCluster, err := getCluster(k8sClient, ctx, clusterNamespacedName) + Expect(err).ToNot(HaveOccurred()) + + aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistryNamespace = &incorrectCustomRegistryNamespace + err = updateClusterWithTO(k8sClient, ctx, aeroCluster, time.Minute*1) + Expect(err).Should(HaveOccurred()) + + validateInitImage( + k8sClient, aeroCluster, operatorEnvVarRegistry, + incorrectCustomRegistryNamespace, operatorEnvVarNameAndTag, + ) + + By("Using correct registry namespace in CR") + aeroCluster, err = getCluster(k8sClient, ctx, clusterNamespacedName) + Expect(err).ToNot(HaveOccurred()) + + // Nil ImageRegistryNamespace, should use operator envVar aerospike + aeroCluster.Spec.PodSpec.AerospikeInitContainerSpec.ImageRegistryNamespace = nil + err = updateCluster(k8sClient, ctx, aeroCluster) + Expect(err).ToNot(HaveOccurred()) + + validateInitImage( + k8sClient, aeroCluster, operatorEnvVarRegistry, + operatorEnvVarRegistryNamespace, operatorEnvVarNameAndTag, + ) + }, + ) + }, + ) Context( "When doing invalid operation", func() { It( @@ -518,11 +558,13 @@ var _ = Describe( clusterNamespacedName, 2, ) aeroCluster.Spec.PodSpec.AerospikeObjectMeta.Labels = map[string]string{ - asdbv1.AerospikeAppLabel: "test"} + asdbv1.AerospikeAppLabel: "test", + } err := k8sClient.Create(ctx, aeroCluster) Expect(err).Should(HaveOccurred()) - }) + }, + ) It( "Should fail for adding sidecar container with same name", @@ -573,8 +615,10 @@ func getEnvVar(envVar string) string { return envVarVal } -func validateInitImage(k8sClient client.Client, aeroCluster *asdbv1.AerospikeCluster, - registry, namespace, nameAndTag string) { +func validateInitImage( + k8sClient client.Client, aeroCluster *asdbv1.AerospikeCluster, + registry, namespace, nameAndTag string, +) { stsList, err := getSTSList(aeroCluster, k8sClient) Expect(err).ToNot(HaveOccurred()) @@ -582,7 +626,11 @@ func validateInitImage(k8sClient client.Client, aeroCluster *asdbv1.AerospikeClu for stsIndex := range stsList.Items { image := stsList.Items[stsIndex].Spec.Template.Spec.InitContainers[0].Image - Expect(image == expectedImage).To(BeTrue(), fmt.Sprintf("expected init image %s, found image %s", - expectedImage, image)) + Expect(image == expectedImage).To( + BeTrue(), fmt.Sprintf( + "expected init image %s, found image %s", + expectedImage, image, + ), + ) } } diff --git a/test/test.sh b/test/test.sh index 0fcfe528..926e50d9 100755 --- a/test/test.sh +++ b/test/test.sh @@ -30,7 +30,7 @@ done CRED_PATH=${CRED_PATH:-$HOME/.docker/config.json} REGISTRY=${REGISTRY:-568976754000.dkr.ecr.ap-south-1.amazonaws.com} REGISTRY_NAMESPACE=${REGISTRY_NAMESPACE:-aerospike} -INIT_IMAGE_NAME_TAG=${INIT_IMAGE_NAME_TAG:-aerospike-kubernetes-init:2.2.1} +INIT_IMAGE_NAME_TAG=${INIT_IMAGE_NAME_TAG:-aerospike-kubernetes-init:2.2.2} DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"