diff --git a/cspell.config.yaml b/cspell.config.yaml index 6b06eb756..526c795b1 100644 --- a/cspell.config.yaml +++ b/cspell.config.yaml @@ -1,2 +1,3 @@ words: -- tailscale + - tailscale + - Velero diff --git a/library/common-test/Chart.yaml b/library/common-test/Chart.yaml index 5bafe0a73..2d3d11c7f 100644 --- a/library/common-test/Chart.yaml +++ b/library/common-test/Chart.yaml @@ -3,7 +3,7 @@ appVersion: "" dependencies: - name: common repository: file://../common - version: ~15.2.0 + version: ~15.3.0 deprecated: false description: Helper chart to test different use cases of the common library home: https://github.com/truecharts/apps/tree/master/charts/library/common-test diff --git a/library/common-test/ci/backupstoragelocation-values.yaml b/library/common-test/ci/backupstoragelocation-values.yaml new file mode 100644 index 000000000..31f6e9c27 --- /dev/null +++ b/library/common-test/ci/backupstoragelocation-values.yaml @@ -0,0 +1,76 @@ +service: + main: + enabled: true + primary: true + ports: + main: + enabled: true + primary: true + protocol: http + port: 8080 + +workload: + main: + enabled: true + primary: true + type: Deployment + podSpec: + containers: + main: + enabled: true + primary: true + args: + - --port + - "8080" + probes: + liveness: + enabled: true + type: http + port: "{{ .Values.service.main.ports.main.port }}" + readiness: + enabled: true + type: http + port: "{{ .Values.service.main.ports.main.port }}" + startup: + enabled: true + type: http + port: "{{ .Values.service.main.ports.main.port }}" + +manifestManager: + enabled: false + +# Parameters for the BackupStorageLocation(s). Configure multiple by adding other element(s) to the backupStorageLocation slice. +# See https://velero.io/docs/v1.6/api-types/backupstoragelocation/ +backupStorageLocation: + # name is the name of the backup storage location where backups should be stored. If a name is not provided, + # a backup storage location will be created with the name "default". Optional. +- name: test + enabled: true + # provider is the name for the backup storage location provider. + provider: aws + objectStorage: + # bucket is the name of the bucket to store backups in. Required. + bucket: test + credential: + # AWS/s3 credentials to be put into secret (mandatory if provider == aws/s3) + aws: + id: fdgsdfghsdfgh + key: dfgdfhsdfgh + # Additional provider-specific configuration. See link above + # for details of required/optional fields for your provider. + config: {} + # region: + # s3ForcePathStyle: + # s3Url: + # kmsKeyId: + # resourceGroup: + # The ID of the subscription containing the storage account, if different from the cluster’s subscription. (Azure only) + # subscriptionId: + # storageAccount: + # publicUrl: + # Name of the GCP service account to use for this backup storage location. Specify the + # service account here if you want to use workload identity instead of providing the key file.(GCP only) + # serviceAccount: + # Option to skip certificate validation or not if insecureSkipTLSVerify is set to be true, the client side should set the + # flag. For Velero client Command like velero backup describe, velero backup logs needs to add the flag --insecure-skip-tls-verify + # insecureSkipTLSVerify: diff --git a/library/common-test/ci/schedule-values.yaml b/library/common-test/ci/schedule-values.yaml new file mode 100644 index 000000000..bc928b31f --- /dev/null +++ b/library/common-test/ci/schedule-values.yaml @@ -0,0 +1,55 @@ +service: + main: + enabled: true + primary: true + ports: + main: + enabled: true + primary: true + protocol: http + port: 8080 + +workload: + main: + enabled: true + primary: true + type: Deployment + podSpec: + containers: + main: + enabled: true + primary: true + args: + - --port + - "8080" + probes: + liveness: + enabled: true + type: http + port: "{{ .Values.service.main.ports.main.port }}" + readiness: + enabled: true + type: http + port: "{{ .Values.service.main.ports.main.port }}" + startup: + enabled: true + type: http + port: "{{ .Values.service.main.ports.main.port }}" + +manifestManager: + enabled: false + +schedules: + - name: + enabled: true + labels: + myenv: foo + annotations: + myenv: foo + schedule: "0 0 * * *" + useOwnerReferencesInBackup: false + template: + ttl: "240h" + storageLocation: default + includedNamespaces: + - foo diff --git a/library/common-test/ci/volumesnapshotlocation-values.yaml b/library/common-test/ci/volumesnapshotlocation-values.yaml new file mode 100644 index 000000000..0a8e7c4ab --- /dev/null +++ b/library/common-test/ci/volumesnapshotlocation-values.yaml @@ -0,0 +1,58 @@ +service: + main: + enabled: true + primary: true + ports: + main: + enabled: true + primary: true + protocol: http + port: 8080 + +workload: + main: + enabled: true + primary: true + type: Deployment + podSpec: + containers: + main: + enabled: true + primary: true + args: + - --port + - "8080" + probes: + liveness: + enabled: true + type: http + port: "{{ .Values.service.main.ports.main.port }}" + readiness: + enabled: true + type: http + port: "{{ .Values.service.main.ports.main.port }}" + startup: + enabled: true + type: http + port: "{{ .Values.service.main.ports.main.port }}" + +manifestManager: + enabled: false + + +# Parameters for the VolumeSnapshotLocation(s). Configure multiple by adding other element(s) to the volumeSnapshotLocation slice. +# See https://velero.io/docs/v1.6/api-types/volumesnapshotlocation/ +volumeSnapshotLocation: + # name is the name of the volume snapshot location where snapshots are being taken. Required. +- name: test + enabled: true + # provider is the name for the volume snapshot provider. + provider: aws + credential: + # AWS/s3 credentials to be put into secret (mandatory if provider == aws/s3) + aws: + id: fdgsdfghsdfgh + key: dfgdfhsdfgh + # Additional provider-specific configuration. See link above + # for details of required/optional fields for your provider. + config: {} diff --git a/library/common-test/tests/veleroBackupStorageLocation/metadata_test.yaml b/library/common-test/tests/veleroBackupStorageLocation/metadata_test.yaml new file mode 100644 index 000000000..484360bff --- /dev/null +++ b/library/common-test/tests/veleroBackupStorageLocation/metadata_test.yaml @@ -0,0 +1,100 @@ +suite: velero backupStorageLocation metadata test +templates: + - common.yaml +chart: + appVersion: &appVer v9.9.9 +release: + name: test-release-name + namespace: test-release-namespace +tests: + - it: should pass with storageClass created with labels and annotations + set: + label1: label1 + label2: global_label2 + annotation1: annotation1 + annotation2: global_annotation2 + global: + labels: + g_label1: global_label1 + g_label2: "{{ .Values.label2 }}" + annotations: + g_annotation1: global_annotation1 + g_annotation2: "{{ .Values.annotation2 }}" + backupStorageLocation: + - enabled: true + name: my-snap + labels: + label1: "{{ .Values.label1 }}" + label2: label2 + annotations: + annotation1: "{{ .Values.annotation1 }}" + annotation2: annotation2 + objectStorage: + bucket: my-bucket + provider: aws + credential: + aws: + id: my-id + key: my-key + asserts: + - documentIndex: &secretDoc 0 + isKind: + of: Secret + - documentIndex: *secretDoc + isAPIVersion: + of: v1 + - documentIndex: *secretDoc + equal: + path: metadata.annotations + value: + annotation1: annotation1 + annotation2: annotation2 + g_annotation1: global_annotation1 + g_annotation2: global_annotation2 + - documentIndex: *secretDoc + equal: + path: metadata.labels + value: + app: common-test-1.0.0 + release: test-release-name + helm-revision: "0" + helm.sh/chart: common-test-1.0.0 + app.kubernetes.io/name: common-test + app.kubernetes.io/instance: test-release-name + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: *appVer + g_label1: global_label1 + g_label2: global_label2 + label1: label1 + label2: label2 + + - documentIndex: &backStoreLocDoc 1 + isKind: + of: BackupStorageLocation + - documentIndex: *backStoreLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *backStoreLocDoc + equal: + path: metadata.annotations + value: + annotation1: annotation1 + annotation2: annotation2 + g_annotation1: global_annotation1 + g_annotation2: global_annotation2 + - documentIndex: *backStoreLocDoc + equal: + path: metadata.labels + value: + app: common-test-1.0.0 + release: test-release-name + helm-revision: "0" + helm.sh/chart: common-test-1.0.0 + app.kubernetes.io/name: common-test + app.kubernetes.io/instance: test-release-name + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: *appVer + g_label1: global_label1 + g_label2: global_label2 + label1: label1 + label2: label2 diff --git a/library/common-test/tests/veleroBackupStorageLocation/names_test.yaml b/library/common-test/tests/veleroBackupStorageLocation/names_test.yaml new file mode 100644 index 000000000..d5df5ae35 --- /dev/null +++ b/library/common-test/tests/veleroBackupStorageLocation/names_test.yaml @@ -0,0 +1,69 @@ +suite: velero backupStorageLocation name test +templates: + - common.yaml +release: + name: test-release-name + namespace: test-release-namespace +tests: + - it: should generate correct name + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + objectStorage: + bucket: my-bucket + provider: aws + credential: + aws: + id: my-id + key: my-key + - enabled: true + name: my-snap2 + objectStorage: + bucket: my-bucket + provider: aws + credential: + aws: + id: my-id + key: my-key + asserts: + - documentIndex: &secretDoc 0 + isKind: + of: Secret + - documentIndex: *secretDoc + isAPIVersion: + of: v1 + - documentIndex: *secretDoc + equal: + path: metadata.name + value: bsl-test-release-name-common-test-my-snap1 + - documentIndex: &backStoreLocDoc 1 + isKind: + of: BackupStorageLocation + - documentIndex: *backStoreLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *backStoreLocDoc + equal: + path: metadata.name + value: test-release-name-common-test-my-snap1 + - documentIndex: &otherSecretDoc 2 + isKind: + of: Secret + - documentIndex: *otherSecretDoc + isAPIVersion: + of: v1 + - documentIndex: *otherSecretDoc + equal: + path: metadata.name + value: bsl-test-release-name-common-test-my-snap2 + - documentIndex: &otherbackStoreLocDoc 3 + isKind: + of: BackupStorageLocation + - documentIndex: *otherbackStoreLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *otherbackStoreLocDoc + equal: + path: metadata.name + value: test-release-name-common-test-my-snap2 diff --git a/library/common-test/tests/veleroBackupStorageLocation/spec_test.yaml b/library/common-test/tests/veleroBackupStorageLocation/spec_test.yaml new file mode 100644 index 000000000..8bb6996bf --- /dev/null +++ b/library/common-test/tests/veleroBackupStorageLocation/spec_test.yaml @@ -0,0 +1,443 @@ +suite: velero backupStorageLocation spec test +templates: + - common.yaml +release: + name: test-release-name + namespace: test-release-namespace +tests: + - it: should generate correct spec with aws provider and credential + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: aws + credential: + aws: + id: my-id + key: my-key + objectStorage: + bucket: my-bucket + asserts: + - documentIndex: &secretDoc 0 + isKind: + of: Secret + - documentIndex: *secretDoc + isAPIVersion: + of: v1 + - documentIndex: *secretDoc + equal: + path: stringData + value: + cloud: |- + [default] + aws_access_key_id=my-id + aws_secret_access_key=my-key + - documentIndex: &backStoreLocDoc 1 + isKind: + of: BackupStorageLocation + - documentIndex: *backStoreLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *backStoreLocDoc + equal: + path: spec + value: + provider: velero.io/aws + credential: + name: bsl-test-release-name-common-test-my-snap1 + key: cloud + accessMode: ReadWrite + objectStorage: + bucket: my-bucket + + - it: should generate correct spec with s3 provider and credential + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: s3 + credential: + s3: + id: my-id + key: my-key + objectStorage: + bucket: my-bucket + asserts: + - documentIndex: &secretDoc 0 + isKind: + of: Secret + - documentIndex: *secretDoc + isAPIVersion: + of: v1 + - documentIndex: *secretDoc + equal: + path: stringData + value: + cloud: |- + [default] + aws_access_key_id=my-id + aws_secret_access_key=my-key + - documentIndex: &backStoreLocDoc 1 + isKind: + of: BackupStorageLocation + - documentIndex: *backStoreLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *backStoreLocDoc + equal: + path: spec + value: + provider: velero.io/aws + credential: + name: bsl-test-release-name-common-test-my-snap1 + key: cloud + accessMode: ReadWrite + objectStorage: + bucket: my-bucket + + - it: should generate correct spec with provided provider and credential + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: my-provider + credential: + name: my-credential + key: my-key + objectStorage: + bucket: my-bucket + asserts: + - documentIndex: &backStoreLocDoc 0 + isKind: + of: BackupStorageLocation + - documentIndex: *backStoreLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *backStoreLocDoc + equal: + path: spec + value: + provider: my-provider + credential: + name: my-credential + key: my-key + accessMode: ReadWrite + objectStorage: + bucket: my-bucket + + - it: should generate correct spec with provided provider, credential and custom config + set: + region: us-east-1 + useS3PathStyle: true + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: aws + objectStorage: + bucket: my-bucket + credential: + aws: + id: my-id + key: my-key + config: + region: "{{ .Values.region }}" + s3ForcePathStyle: "{{ .Values.useS3PathStyle }}" + bool: false + asserts: + - documentIndex: &backStoreLocDoc 1 + isKind: + of: BackupStorageLocation + - documentIndex: *backStoreLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *backStoreLocDoc + equal: + path: spec + value: + provider: velero.io/aws + credential: + name: bsl-test-release-name-common-test-my-snap1 + key: cloud + config: + region: us-east-1 + s3ForcePathStyle: "true" + bool: "false" + accessMode: ReadWrite + objectStorage: + bucket: my-bucket + + - it: should generate correct spec with provided accessMode + set: + region: us-east-1 + useS3PathStyle: true + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: aws + accessMode: ReadOnly + objectStorage: + bucket: my-bucket + credential: + aws: + id: my-id + key: my-key + config: + region: "{{ .Values.region }}" + s3ForcePathStyle: "{{ .Values.useS3PathStyle }}" + bool: false + asserts: + - documentIndex: &backStoreLocDoc 1 + isKind: + of: BackupStorageLocation + - documentIndex: *backStoreLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *backStoreLocDoc + equal: + path: spec + value: + provider: velero.io/aws + credential: + name: bsl-test-release-name-common-test-my-snap1 + key: cloud + config: + region: us-east-1 + s3ForcePathStyle: "true" + bool: "false" + accessMode: ReadOnly + objectStorage: + bucket: my-bucket + + - it: should generate correct spec with provided objectStorage prefix and caCert + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: aws + accessMode: ReadOnly + objectStorage: + bucket: my-bucket + prefix: my-prefix + caCert: my-ca-cert + credential: + aws: + id: my-id + key: my-key + asserts: + - documentIndex: &backStoreLocDoc 1 + isKind: + of: BackupStorageLocation + - documentIndex: *backStoreLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *backStoreLocDoc + equal: + path: spec + value: + provider: velero.io/aws + credential: + name: bsl-test-release-name-common-test-my-snap1 + key: cloud + accessMode: ReadOnly + objectStorage: + bucket: my-bucket + prefix: my-prefix + caCert: my-ca-cert + + - it: should generate correct spec with provided backupSyncPeriod + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: aws + backupSyncPeriod: 1h + objectStorage: + bucket: my-bucket + credential: + aws: + id: my-id + key: my-key + asserts: + - documentIndex: &backStoreLocDoc 1 + isKind: + of: BackupStorageLocation + - documentIndex: *backStoreLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *backStoreLocDoc + equal: + path: spec + value: + provider: velero.io/aws + credential: + name: bsl-test-release-name-common-test-my-snap1 + key: cloud + accessMode: ReadWrite + backupSyncPeriod: 1h + objectStorage: + bucket: my-bucket + + - it: should generate correct spec with provided validationFrequency + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: aws + validationFrequency: 1h + objectStorage: + bucket: my-bucket + credential: + aws: + id: my-id + key: my-key + asserts: + - documentIndex: &backStoreLocDoc 1 + isKind: + of: BackupStorageLocation + - documentIndex: *backStoreLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *backStoreLocDoc + equal: + path: spec + value: + provider: velero.io/aws + credential: + name: bsl-test-release-name-common-test-my-snap1 + key: cloud + accessMode: ReadWrite + validationFrequency: 1h + objectStorage: + bucket: my-bucket + + # Failures + - it: should fail without name + set: + backupStorageLocation: + - enabled: true + provider: aws + credential: + aws: + id: my-id + key: my-key + asserts: + - failedTemplate: + errorMessage: Backup Storage Location - Expected non-empty [name] + + - it: should fail without provider + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + credential: + aws: + id: my-id + key: my-key + asserts: + - failedTemplate: + errorMessage: Backup Storage Location - Expected non-empty [provider] + + - it: should fail without credential + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: my-provider + asserts: + - failedTemplate: + errorMessage: Backup Storage Location - Expected non-empty [credential] + + - it: should fail without credential.name + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: my-provider + credential: + key: my-key + asserts: + - failedTemplate: + errorMessage: Backup Storage Location - Expected non-empty [credential.name] + + - it: should fail without credential.key + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: my-provider + credential: + name: my-name + asserts: + - failedTemplate: + errorMessage: Backup Storage Location - Expected non-empty [credential.key] + + - it: should fail without credential.aws.id with provider aws + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: aws + credential: + aws: + key: my-key + asserts: + - failedTemplate: + errorMessage: Velero Provider Secret - Expected non-empty [credential.aws|s3.id] for [aws|s3] provider + + - it: should fail without credential.aws.key with provider aws + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: aws + credential: + aws: + id: my-id + asserts: + - failedTemplate: + errorMessage: Velero Provider Secret - Expected non-empty [credential.aws|s3.key] for [aws|s3] provider + + - it: should fail invalid accessMode + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: aws + accessMode: invalid + credential: + aws: + id: my-id + key: my-key + asserts: + - failedTemplate: + errorMessage: Backup Storage Location - Expected [accessMode] to be one of [ReadOnly, ReadWrite], but got [invalid] + + - it: should fail without objectStorage + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: aws + credential: + aws: + id: my-id + key: my-key + asserts: + - failedTemplate: + errorMessage: Backup Storage Location - Expected non-empty [objectStorage] + + - it: should fail without objectStorage.bucket + set: + backupStorageLocation: + - enabled: true + name: my-snap1 + provider: aws + objectStorage: + prefix: my-prefix + credential: + aws: + id: my-id + key: my-key + asserts: + - failedTemplate: + errorMessage: Backup Storage Location - Expected non-empty [objectStorage.bucket] diff --git a/library/common-test/tests/veleroSchedule/metadata_test.yaml b/library/common-test/tests/veleroSchedule/metadata_test.yaml new file mode 100644 index 000000000..ef5a42882 --- /dev/null +++ b/library/common-test/tests/veleroSchedule/metadata_test.yaml @@ -0,0 +1,63 @@ +suite: velero schedule metadata test +templates: + - common.yaml +chart: + appVersion: &appVer v9.9.9 +release: + name: test-release-name + namespace: test-release-namespace +tests: + - it: should pass with schedule created with labels and annotations + set: + label1: label1 + label2: global_label2 + annotation1: annotation1 + annotation2: global_annotation2 + global: + labels: + g_label1: global_label1 + g_label2: "{{ .Values.label2 }}" + annotations: + g_annotation1: global_annotation1 + g_annotation2: "{{ .Values.annotation2 }}" + schedules: + my-sched: + enabled: true + labels: + label1: "{{ .Values.label1 }}" + label2: label2 + annotations: + annotation1: "{{ .Values.annotation1 }}" + annotation2: annotation2 + schedule: "0 0 * * *" + asserts: + - documentIndex: &scheduleDoc 0 + isKind: + of: Schedule + - documentIndex: *scheduleDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *scheduleDoc + equal: + path: metadata.annotations + value: + annotation1: annotation1 + annotation2: annotation2 + g_annotation1: global_annotation1 + g_annotation2: global_annotation2 + - documentIndex: *scheduleDoc + equal: + path: metadata.labels + value: + app: common-test-1.0.0 + release: test-release-name + helm-revision: "0" + helm.sh/chart: common-test-1.0.0 + app.kubernetes.io/name: common-test + app.kubernetes.io/instance: test-release-name + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: *appVer + g_label1: global_label1 + g_label2: global_label2 + label1: label1 + label2: label2 diff --git a/library/common-test/tests/veleroSchedule/names_test.yaml b/library/common-test/tests/veleroSchedule/names_test.yaml new file mode 100644 index 000000000..0671bbd11 --- /dev/null +++ b/library/common-test/tests/veleroSchedule/names_test.yaml @@ -0,0 +1,37 @@ +suite: velero schedule name test +templates: + - common.yaml +release: + name: test-release-name + namespace: test-release-namespace +tests: + - it: should generate correct name + set: + schedules: + my-sched1: + enabled: true + schedule: "0 0 * * *" + my-sched2: + enabled: true + schedule: "0 0 * * *" + asserts: + - documentIndex: &scheduleDoc 0 + isKind: + of: Schedule + - documentIndex: *scheduleDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *scheduleDoc + equal: + path: metadata.name + value: test-release-name-common-test-my-sched1 + - documentIndex: &otherScheduleDoc 1 + isKind: + of: Schedule + - documentIndex: *otherScheduleDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *otherScheduleDoc + equal: + path: metadata.name + value: test-release-name-common-test-my-sched2 diff --git a/library/common-test/tests/veleroSchedule/spec_test.yaml b/library/common-test/tests/veleroSchedule/spec_test.yaml new file mode 100644 index 000000000..d8cd3c8cc --- /dev/null +++ b/library/common-test/tests/veleroSchedule/spec_test.yaml @@ -0,0 +1,94 @@ +suite: velero schedule spec test +templates: + - common.yaml +release: + name: test-release-name + namespace: test-release-namespace +tests: + - it: should generate correct spec with provided provider and credential + set: + schedules: + my-sched: + enabled: true + schedule: "0 2 * * *" + asserts: + - documentIndex: &scheduleDoc 0 + isKind: + of: Schedule + - documentIndex: *scheduleDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *scheduleDoc + equal: + path: spec + value: + schedule: "0 2 * * *" + + - it: should generate correct spec with useOwnerReferencesInBackup + set: + schedules: + my-sched: + enabled: true + schedule: "0 2 * * *" + useOwnerReferencesInBackup: true + asserts: + - documentIndex: &scheduleDoc 0 + isKind: + of: Schedule + - documentIndex: *scheduleDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *scheduleDoc + equal: + path: spec + value: + schedule: "0 2 * * *" + useOwnerReferencesInBackup: true + + - it: should generate correct spec with template + set: + schedules: + my-sched: + enabled: true + schedule: "0 2 * * *" + template: + ttl: 720h + includeClusterResources: true + snapshotVolumes: true + asserts: + - documentIndex: &scheduleDoc 0 + isKind: + of: Schedule + - documentIndex: *scheduleDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *scheduleDoc + equal: + path: spec + value: + schedule: "0 2 * * *" + template: + ttl: 720h + includeClusterResources: true + snapshotVolumes: true + + # Failures + - it: should fail without schedule + set: + schedules: + my-sched: + enabled: true + asserts: + - failedTemplate: + errorMessage: Velero Schedule - Expected non-empty [schedule] + + - it: should fail with useOwnerReferencesInBackup not a bool + set: + schedules: + my-sched: + enabled: true + schedule: "0 2 * * *" + useOwnerReferencesInBackup: "true" + asserts: + - failedTemplate: + errorMessage: Velero Schedule - Expected [useOwnerReferencesInBackup] to be a boolean diff --git a/library/common-test/tests/veleroVolumeSnapshotLocation/metadata_test.yaml b/library/common-test/tests/veleroVolumeSnapshotLocation/metadata_test.yaml new file mode 100644 index 000000000..7d7534a1f --- /dev/null +++ b/library/common-test/tests/veleroVolumeSnapshotLocation/metadata_test.yaml @@ -0,0 +1,98 @@ +suite: velero volumeSnapshotLocation metadata test +templates: + - common.yaml +chart: + appVersion: &appVer v9.9.9 +release: + name: test-release-name + namespace: test-release-namespace +tests: + - it: should pass with volumeSnapshotLocation created with labels and annotations + set: + label1: label1 + label2: global_label2 + annotation1: annotation1 + annotation2: global_annotation2 + global: + labels: + g_label1: global_label1 + g_label2: "{{ .Values.label2 }}" + annotations: + g_annotation1: global_annotation1 + g_annotation2: "{{ .Values.annotation2 }}" + volumeSnapshotLocation: + - enabled: true + name: my-snap + labels: + label1: "{{ .Values.label1 }}" + label2: label2 + annotations: + annotation1: "{{ .Values.annotation1 }}" + annotation2: annotation2 + provider: aws + credential: + aws: + id: my-id + key: my-key + asserts: + - documentIndex: &secretDoc 0 + isKind: + of: Secret + - documentIndex: *secretDoc + isAPIVersion: + of: v1 + - documentIndex: *secretDoc + equal: + path: metadata.annotations + value: + annotation1: annotation1 + annotation2: annotation2 + g_annotation1: global_annotation1 + g_annotation2: global_annotation2 + - documentIndex: *secretDoc + equal: + path: metadata.labels + value: + app: common-test-1.0.0 + release: test-release-name + helm-revision: "0" + helm.sh/chart: common-test-1.0.0 + app.kubernetes.io/name: common-test + app.kubernetes.io/instance: test-release-name + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: *appVer + g_label1: global_label1 + g_label2: global_label2 + label1: label1 + label2: label2 + + - documentIndex: &volSnapLocDoc 1 + isKind: + of: VolumeSnapshotLocation + - documentIndex: *volSnapLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *volSnapLocDoc + equal: + path: metadata.annotations + value: + annotation1: annotation1 + annotation2: annotation2 + g_annotation1: global_annotation1 + g_annotation2: global_annotation2 + - documentIndex: *volSnapLocDoc + equal: + path: metadata.labels + value: + app: common-test-1.0.0 + release: test-release-name + helm-revision: "0" + helm.sh/chart: common-test-1.0.0 + app.kubernetes.io/name: common-test + app.kubernetes.io/instance: test-release-name + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/version: *appVer + g_label1: global_label1 + g_label2: global_label2 + label1: label1 + label2: label2 diff --git a/library/common-test/tests/veleroVolumeSnapshotLocation/names_test.yaml b/library/common-test/tests/veleroVolumeSnapshotLocation/names_test.yaml new file mode 100644 index 000000000..ed485e730 --- /dev/null +++ b/library/common-test/tests/veleroVolumeSnapshotLocation/names_test.yaml @@ -0,0 +1,65 @@ +suite: velero volumeSnapshotLocation name test +templates: + - common.yaml +release: + name: test-release-name + namespace: test-release-namespace +tests: + - it: should generate correct name + set: + volumeSnapshotLocation: + - enabled: true + name: my-snap1 + provider: aws + credential: + aws: + id: my-id + key: my-key + - enabled: true + name: my-snap2 + provider: aws + credential: + aws: + id: my-id + key: my-key + asserts: + - documentIndex: &secretDoc 0 + isKind: + of: Secret + - documentIndex: *secretDoc + isAPIVersion: + of: v1 + - documentIndex: *secretDoc + equal: + path: metadata.name + value: vsl-test-release-name-common-test-my-snap1 + - documentIndex: &volSnapLocDoc 1 + isKind: + of: VolumeSnapshotLocation + - documentIndex: *volSnapLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *volSnapLocDoc + equal: + path: metadata.name + value: test-release-name-common-test-my-snap1 + - documentIndex: &otherSecretDoc 2 + isKind: + of: Secret + - documentIndex: *otherSecretDoc + isAPIVersion: + of: v1 + - documentIndex: *otherSecretDoc + equal: + path: metadata.name + value: vsl-test-release-name-common-test-my-snap2 + - documentIndex: &otherVolSnapLocDoc 3 + isKind: + of: VolumeSnapshotLocation + - documentIndex: *otherVolSnapLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *otherVolSnapLocDoc + equal: + path: metadata.name + value: test-release-name-common-test-my-snap2 diff --git a/library/common-test/tests/veleroVolumeSnapshotLocation/spec_test.yaml b/library/common-test/tests/veleroVolumeSnapshotLocation/spec_test.yaml new file mode 100644 index 000000000..ccad540e5 --- /dev/null +++ b/library/common-test/tests/veleroVolumeSnapshotLocation/spec_test.yaml @@ -0,0 +1,234 @@ +suite: velero volumeSnapshotLocation spec test +templates: + - common.yaml +release: + name: test-release-name + namespace: test-release-namespace +tests: + - it: should generate correct spec with aws provider and credential + set: + volumeSnapshotLocation: + - enabled: true + name: my-snap1 + provider: aws + credential: + aws: + id: my-id + key: my-key + asserts: + - documentIndex: &secretDoc 0 + isKind: + of: Secret + - documentIndex: *secretDoc + isAPIVersion: + of: v1 + - documentIndex: *secretDoc + equal: + path: stringData + value: + cloud: |- + [default] + aws_access_key_id=my-id + aws_secret_access_key=my-key + - documentIndex: &volSnapLocDoc 1 + isKind: + of: VolumeSnapshotLocation + - documentIndex: *volSnapLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *volSnapLocDoc + equal: + path: spec + value: + provider: velero.io/aws + credential: + name: vsl-test-release-name-common-test-my-snap1 + key: cloud + + - it: should generate correct spec with s3 provider and credential + set: + volumeSnapshotLocation: + - enabled: true + name: my-snap1 + provider: s3 + credential: + s3: + id: my-id + key: my-key + asserts: + - documentIndex: &secretDoc 0 + isKind: + of: Secret + - documentIndex: *secretDoc + isAPIVersion: + of: v1 + - documentIndex: *secretDoc + equal: + path: stringData + value: + cloud: |- + [default] + aws_access_key_id=my-id + aws_secret_access_key=my-key + - documentIndex: &volSnapLocDoc 1 + isKind: + of: VolumeSnapshotLocation + - documentIndex: *volSnapLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *volSnapLocDoc + equal: + path: spec + value: + provider: velero.io/aws + credential: + name: vsl-test-release-name-common-test-my-snap1 + key: cloud + + - it: should generate correct spec with provided provider and credential + set: + volumeSnapshotLocation: + - enabled: true + name: my-snap1 + provider: my-provider + credential: + name: my-credential + key: my-key + asserts: + - documentIndex: &volSnapLocDoc 0 + isKind: + of: VolumeSnapshotLocation + - documentIndex: *volSnapLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *volSnapLocDoc + equal: + path: spec + value: + provider: my-provider + credential: + name: my-credential + key: my-key + + - it: should generate correct spec with provided provider, credential and custom config + set: + region: us-east-1 + useS3PathStyle: true + volumeSnapshotLocation: + - enabled: true + name: my-snap1 + provider: aws + credential: + aws: + id: my-id + key: my-key + config: + region: "{{ .Values.region }}" + s3ForcePathStyle: "{{ .Values.useS3PathStyle }}" + bool: false + asserts: + - documentIndex: &volSnapLocDoc 1 + isKind: + of: VolumeSnapshotLocation + - documentIndex: *volSnapLocDoc + isAPIVersion: + of: velero.io/v1 + - documentIndex: *volSnapLocDoc + equal: + path: spec + value: + provider: velero.io/aws + credential: + name: vsl-test-release-name-common-test-my-snap1 + key: cloud + config: + region: us-east-1 + s3ForcePathStyle: "true" + bool: "false" + + # Failures + - it: should fail without name + set: + volumeSnapshotLocation: + - enabled: true + provider: aws + credential: + aws: + id: my-id + key: my-key + asserts: + - failedTemplate: + errorMessage: Volume Snapshot Location - Expected non-empty [name] + + - it: should fail without provider + set: + volumeSnapshotLocation: + - enabled: true + name: my-snap1 + credential: + aws: + id: my-id + key: my-key + asserts: + - failedTemplate: + errorMessage: Volume Snapshot Location - Expected non-empty [provider] + + - it: should fail without credential + set: + volumeSnapshotLocation: + - enabled: true + name: my-snap1 + provider: my-provider + asserts: + - failedTemplate: + errorMessage: Volume Snapshot Location - Expected non-empty [credential] + + - it: should fail without credential.name + set: + volumeSnapshotLocation: + - enabled: true + name: my-snap1 + provider: my-provider + credential: + key: my-key + asserts: + - failedTemplate: + errorMessage: Volume Snapshot Location - Expected non-empty [credential.name] + + - it: should fail without credential.key + set: + volumeSnapshotLocation: + - enabled: true + name: my-snap1 + provider: my-provider + credential: + name: my-name + asserts: + - failedTemplate: + errorMessage: Volume Snapshot Location - Expected non-empty [credential.key] + + - it: should fail without credential.aws.id with provider aws + set: + volumeSnapshotLocation: + - enabled: true + name: my-snap1 + provider: aws + credential: + aws: + key: my-key + asserts: + - failedTemplate: + errorMessage: Velero Provider Secret - Expected non-empty [credential.aws|s3.id] for [aws|s3] provider + + - it: should fail without credential.aws.key with provider aws + set: + volumeSnapshotLocation: + - enabled: true + name: my-snap1 + provider: aws + credential: + aws: + id: my-id + asserts: + - failedTemplate: + errorMessage: Velero Provider Secret - Expected non-empty [credential.aws|s3.key] for [aws|s3] provider diff --git a/library/common/Chart.yaml b/library/common/Chart.yaml index f1e35a278..10941ee1d 100644 --- a/library/common/Chart.yaml +++ b/library/common/Chart.yaml @@ -15,4 +15,4 @@ maintainers: name: common sources: null type: library -version: 15.2.0 +version: 15.3.0 diff --git a/library/common/templates/class/velero/_backupStorageLocation.tpl b/library/common/templates/class/velero/_backupStorageLocation.tpl new file mode 100644 index 000000000..83fc288b3 --- /dev/null +++ b/library/common/templates/class/velero/_backupStorageLocation.tpl @@ -0,0 +1,61 @@ +{{/* backupstoragelocation Class */}} +{{/* Call this template: +{{ include "tc.v1.common.class.velero.backupstoragelocation" (dict "rootCtx" $ "objectData" $objectData) }} + +rootCtx: The root context of the chart. +objectData: + name: The name of the backupstoragelocation. + labels: The labels of the backupstoragelocation. + annotations: The annotations of the backupstoragelocation. + namespace: The namespace of the backupstoragelocation. (Optional) +*/}} + +{{- define "tc.v1.common.class.velero.backupstoragelocation" -}} + + {{- $rootCtx := .rootCtx -}} + {{- $objectData := .objectData }} +--- +apiVersion: velero.io/v1 +kind: BackupStorageLocation +metadata: + name: {{ $objectData.name }} + namespace: {{ include "tc.v1.common.lib.metadata.namespace" (dict "rootCtx" $rootCtx "objectData" $objectData "caller" "backupstoragelocation") }} + {{- $labels := (mustMerge ($objectData.labels | default dict) (include "tc.v1.common.lib.metadata.allLabels" $rootCtx | fromYaml)) -}} + {{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $rootCtx "labels" $labels) | trim) }} + labels: + {{- . | nindent 4 }} + {{- end -}} + {{- $annotations := (mustMerge ($objectData.annotations | default dict) (include "tc.v1.common.lib.metadata.allAnnotations" $rootCtx | fromYaml)) -}} + {{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $rootCtx "annotations" $annotations) | trim) }} + annotations: + {{- . | nindent 4 }} + {{- end }} +spec: + provider: {{ $objectData.provider }} + {{- with $objectData.credential }} + credential: + name: {{ .name }} + key: {{ .key }} + {{- end -}} + {{- with $objectData.config }} + config: + {{- range $k, $v := . }} + {{ $k }}: {{ tpl (toString $v) $rootCtx | quote }} + {{- end -}} + {{- end -}} + {{- with $objectData.backupSyncPeriod }} + backupSyncPeriod: {{ . }} + {{- end -}} + {{- with $objectData.validationFrequency }} + validationFrequency: {{ . }} + {{- end }} + accessMode: {{ $objectData.accessMode | default "ReadWrite" }} + objectStorage: + bucket: {{ $objectData.objectStorage.bucket | quote }} + {{- with $objectData.objectStorage.prefix }} + prefix: {{ . | quote }} + {{- end -}} + {{- with $objectData.objectStorage.caCert }} + caCert: {{ . }} + {{- end -}} +{{- end -}} diff --git a/library/common/templates/class/velero/_schedule.tpl b/library/common/templates/class/velero/_schedule.tpl new file mode 100644 index 000000000..ff5419e0a --- /dev/null +++ b/library/common/templates/class/velero/_schedule.tpl @@ -0,0 +1,42 @@ +{{/* schedule Class */}} +{{/* Call this template: +{{ include "tc.v1.common.class.velero.schedule" (dict "rootCtx" $ "objectData" $objectData) }} + +rootCtx: The root context of the chart. +objectData: + name: The name of the schedule. + labels: The labels of the schedule. + annotations: The annotations of the schedule. + namespace: The namespace of the schedule. (Optional) +*/}} + +{{- define "tc.v1.common.class.velero.schedule" -}} + + {{- $rootCtx := .rootCtx -}} + {{- $objectData := .objectData }} +--- +apiVersion: velero.io/v1 +kind: Schedule +metadata: + name: {{ $objectData.name }} + namespace: {{ include "tc.v1.common.lib.metadata.namespace" (dict "rootCtx" $rootCtx "objectData" $objectData "caller" "Schedule") }} + {{- $labels := (mustMerge ($objectData.labels | default dict) (include "tc.v1.common.lib.metadata.allLabels" $rootCtx | fromYaml)) -}} + {{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $rootCtx "labels" $labels) | trim) }} + labels: + {{- . | nindent 4 }} + {{- end -}} + {{- $annotations := (mustMerge ($objectData.annotations | default dict) (include "tc.v1.common.lib.metadata.allAnnotations" $rootCtx | fromYaml)) -}} + {{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $rootCtx "annotations" $annotations) | trim) }} + annotations: + {{- . | nindent 4 }} + {{- end }} +spec: + schedule: {{ $objectData.schedule | quote }} + {{- if (kindIs "bool" $objectData.useOwnerReferencesInBackup) }} + useOwnerReferencesInBackup: {{ $objectData.useOwnerReferencesInBackup }} + {{- end -}} + {{- with $objectData.template }} + template: + {{- toYaml . | nindent 4 }} + {{- end -}} +{{- end -}} diff --git a/library/common/templates/class/velero/_volumeSnapshotLocation.tpl b/library/common/templates/class/velero/_volumeSnapshotLocation.tpl new file mode 100644 index 000000000..117284b98 --- /dev/null +++ b/library/common/templates/class/velero/_volumeSnapshotLocation.tpl @@ -0,0 +1,46 @@ +{{/* volumesnapshotlocation Class */}} +{{/* Call this template: +{{ include "tc.v1.common.class.velero.volumesnapshotlocation" (dict "rootCtx" $ "objectData" $objectData) }} + +rootCtx: The root context of the chart. +objectData: + name: The name of the volumesnapshotlocation. + labels: The labels of the volumesnapshotlocation. + annotations: The annotations of the volumesnapshotlocation. + namespace: The namespace of the volumesnapshotlocation. (Optional) +*/}} + +{{- define "tc.v1.common.class.velero.volumesnapshotlocation" -}} + + {{- $rootCtx := .rootCtx -}} + {{- $objectData := .objectData }} +--- +apiVersion: velero.io/v1 +kind: VolumeSnapshotLocation +metadata: + name: {{ $objectData.name }} + namespace: {{ include "tc.v1.common.lib.metadata.namespace" (dict "rootCtx" $rootCtx "objectData" $objectData "caller" "Volume Snapshot Location") }} + {{- $labels := (mustMerge ($objectData.labels | default dict) (include "tc.v1.common.lib.metadata.allLabels" $rootCtx | fromYaml)) -}} + {{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $rootCtx "labels" $labels) | trim) }} + labels: + {{- . | nindent 4 }} + {{- end -}} + {{- $annotations := (mustMerge ($objectData.annotations | default dict) (include "tc.v1.common.lib.metadata.allAnnotations" $rootCtx | fromYaml)) -}} + {{- with (include "tc.v1.common.lib.metadata.render" (dict "rootCtx" $rootCtx "annotations" $annotations) | trim) }} + annotations: + {{- . | nindent 4 }} + {{- end }} +spec: + provider: {{ $objectData.provider }} + {{- with $objectData.credential }} + credential: + name: {{ .name }} + key: {{ .key }} + {{- end -}} + {{- with $objectData.config }} + config: + {{- range $k, $v := . }} + {{ $k }}: {{ tpl (toString $v) $rootCtx | quote }} + {{- end -}} + {{- end -}} +{{- end -}} diff --git a/library/common/templates/lib/velero/_backupStorageLocationValidation.tpl b/library/common/templates/lib/velero/_backupStorageLocationValidation.tpl new file mode 100644 index 000000000..0bcfb7725 --- /dev/null +++ b/library/common/templates/lib/velero/_backupStorageLocationValidation.tpl @@ -0,0 +1,43 @@ +{{/* Velero BackupStorageLocation Validation */}} +{{/* Call this template: +{{ include "tc.v1.common.lib.velero.backupstoragelocation.validation" (dict "objectData" $objectData) -}} +objectData: + rootCtx: The root context of the chart. + objectData: The backupstoragelocation object. +*/}} + +{{- define "tc.v1.common.lib.velero.backupstoragelocation.validation" -}} + {{- $rootCtx := .rootCtx -}} + {{- $objectData := .objectData -}} + + {{- if not $objectData.provider -}} + {{- fail "Backup Storage Location - Expected non-empty [provider]" -}} + {{- end -}} + + {{- if not $objectData.credential -}} + {{- fail "Backup Storage Location - Expected non-empty [credential]" -}} + {{- end -}} + + {{- if not $objectData.credential.name -}} + {{- fail "Backup Storage Location - Expected non-empty [credential.name]" -}} + {{- end -}} + + {{- if not $objectData.credential.key -}} + {{- fail "Backup Storage Location - Expected non-empty [credential.key]" -}} + {{- end -}} + + {{- if $objectData.accessMode -}} + {{- $validModes := (list "ReadOnly" "ReadWrite") -}} + {{- if not (mustHas $objectData.accessMode $validModes) -}} + {{- fail (printf "Backup Storage Location - Expected [accessMode] to be one of [%s], but got [%s]" (join ", " $validModes) $objectData.accessMode) -}} + {{- end -}} + {{- end -}} + + {{- if not $objectData.objectStorage -}} + {{- fail "Backup Storage Location - Expected non-empty [objectStorage]" -}} + {{- end -}} + + {{- if not $objectData.objectStorage.bucket -}} + {{- fail "Backup Storage Location - Expected non-empty [objectStorage.bucket]" -}} + {{- end -}} +{{- end -}} diff --git a/library/common/templates/lib/velero/_providerSecret.tpl b/library/common/templates/lib/velero/_providerSecret.tpl new file mode 100644 index 000000000..6fc3c5aab --- /dev/null +++ b/library/common/templates/lib/velero/_providerSecret.tpl @@ -0,0 +1,37 @@ +{{- define "tc.v1.common.lib.velero.provider.secret" -}} + {{- $rootCtx := .rootCtx }} + {{- $objectData := .objectData -}} + {{- $prefix := .prefix -}} + + {{- $creds := "" -}} + + {{/* Make sure provider is a string */}} + {{- $provider := $objectData.provider | toString -}} + + {{- if and (eq $provider "aws") $objectData.credential.aws -}} + {{- $creds = (include "tc.v1.common.lib.velero.provider.aws.secret" (dict "creds" $objectData.credential.aws) | fromYaml).data -}} + {{- $_ := set $objectData "provider" "velero.io/aws" -}} + {{- else if and (eq $provider "s3") $objectData.credential.s3 -}} + {{- $creds = (include "tc.v1.common.lib.velero.provider.aws.secret" (dict "creds" $objectData.credential.s3) | fromYaml).data -}} + {{- $_ := set $objectData "provider" "velero.io/aws" -}} + {{- end -}} + + {{/* If we matched a provider, create the secret */}} + {{- if $creds -}} + {{- $secretData := (dict + "name" (printf "%s-%s" $prefix $objectData.name) + "labels" $objectData.labels + "annotations" $objectData.annotations + "data" (dict "cloud" $creds) + ) -}} + + {{/* Create the secret */}} + {{- include "tc.v1.common.class.secret" (dict "rootCtx" $rootCtx "objectData" $secretData) -}} + + {{/* Update the credential object with the name and key */}} + {{- $_ := set $objectData.credential "name" (printf "%s-%s" $prefix $objectData.name) -}} + {{- $_ := set $objectData.credential "key" "cloud" -}} + + {{- end -}} + +{{- end -}} diff --git a/library/common/templates/lib/velero/_scheduleValidation.tpl b/library/common/templates/lib/velero/_scheduleValidation.tpl new file mode 100644 index 000000000..94d88524f --- /dev/null +++ b/library/common/templates/lib/velero/_scheduleValidation.tpl @@ -0,0 +1,22 @@ +{{/* Velero Schedule Validation */}} +{{/* Call this template: +{{ include "tc.v1.common.lib.velero.schedule.validation" (dict "objectData" $objectData) -}} +objectData: + rootCtx: The root context of the chart. + objectData: The schedule object. +*/}} + +{{- define "tc.v1.common.lib.velero.schedule.validation" -}} + {{- $rootCtx := .rootCtx -}} + {{- $objectData := .objectData -}} + + {{- if not $objectData.schedule -}} + {{- fail "Velero Schedule - Expected non-empty [schedule]" -}} + {{- end -}} + + {{- if (hasKey $objectData "useOwnerReferencesInBackup") -}} + {{- if not (kindIs "bool" $objectData.useOwnerReferencesInBackup) -}} + {{ fail "Velero Schedule - Expected [useOwnerReferencesInBackup] to be a boolean" -}} + {{- end -}} + {{- end -}} +{{- end -}} diff --git a/library/common/templates/lib/velero/_volumeSnapshotLocationValidation.tpl b/library/common/templates/lib/velero/_volumeSnapshotLocationValidation.tpl new file mode 100644 index 000000000..8f5496583 --- /dev/null +++ b/library/common/templates/lib/velero/_volumeSnapshotLocationValidation.tpl @@ -0,0 +1,29 @@ +{{/* Velero VolumeSnapshotLocation Validation */}} +{{/* Call this template: +{{ include "tc.v1.common.lib.velero.volumesnapshotlocation.validation" (dict "objectData" $objectData) -}} +objectData: + rootCtx: The root context of the chart. + objectData: The persistence object. +*/}} + +{{- define "tc.v1.common.lib.velero.volumesnapshotlocation.validation" -}} + {{- $rootCtx := .rootCtx -}} + {{- $objectData := .objectData -}} + + {{- if not $objectData.provider -}} + {{- fail "Volume Snapshot Location - Expected non-empty [provider]" -}} + {{- end -}} + + {{- if not $objectData.credential -}} + {{- fail "Volume Snapshot Location - Expected non-empty [credential]" -}} + {{- end -}} + + {{- if not $objectData.credential.name -}} + {{- fail "Volume Snapshot Location - Expected non-empty [credential.name]" -}} + {{- end -}} + + {{- if not $objectData.credential.key -}} + {{- fail "Volume Snapshot Location - Expected non-empty [credential.key]" -}} + {{- end -}} + +{{- end -}} diff --git a/library/common/templates/lib/velero/secretData/_aws.tpl b/library/common/templates/lib/velero/secretData/_aws.tpl new file mode 100644 index 000000000..6933b4867 --- /dev/null +++ b/library/common/templates/lib/velero/secretData/_aws.tpl @@ -0,0 +1,14 @@ +{{- define "tc.v1.common.lib.velero.provider.aws.secret" -}} + {{- $creds := .creds -}} + + {{- $reqKeys := list "id" "key" -}} + {{- range $k := $reqKeys -}} + {{- if not (get $creds $k) -}} + {{- fail (printf "Velero Provider Secret - Expected non-empty [credential.aws|s3.%s] for [aws|s3] provider" $k) -}} + {{- end -}} + {{- end }} +data: | + [default] + aws_access_key_id={{ $creds.id }} + aws_secret_access_key={{ $creds.key }} +{{- end -}} diff --git a/library/common/templates/loader/_apply.tpl b/library/common/templates/loader/_apply.tpl index c85d616c6..2abe27eb4 100644 --- a/library/common/templates/loader/_apply.tpl +++ b/library/common/templates/loader/_apply.tpl @@ -79,4 +79,13 @@ {{/* Render/Set portal configmap, .Values.iXPortals and APPURL */}} {{- include "tc.v1.common.spawner.portal" . | nindent 0 -}} + {{/* Render Velero VolumeSnapshotLocation*/}} + {{- include "tc.v1.common.spawner.velero.volumesnapshotlocation" . | nindent 0 -}} + + {{/* Render Velero BackupStorageLocation*/}} + {{- include "tc.v1.common.spawner.velero.backupstoragelocation" . | nindent 0 -}} + + {{/* Render Velero Schedule*/}} + {{- include "tc.v1.common.spawner.velero.schedule" . | nindent 0 -}} + {{- end -}} diff --git a/library/common/templates/spawner/velero/_backupstoragelocation.tpl b/library/common/templates/spawner/velero/_backupstoragelocation.tpl new file mode 100644 index 000000000..8d289de98 --- /dev/null +++ b/library/common/templates/spawner/velero/_backupstoragelocation.tpl @@ -0,0 +1,72 @@ +{{/* backupstoragelocation Spawwner */}} +{{/* Call this template: +{{ include "tc.v1.common.spawner.velero.backupstoragelocation" $ -}} +*/}} + +{{- define "tc.v1.common.spawner.velero.backupstoragelocation" -}} + {{- $fullname := include "tc.v1.common.lib.chart.names.fullname" $ -}} + + {{- range $backupStorageLoc := .Values.backupStorageLocation -}} + + {{- $enabled := false -}} + {{- if hasKey $backupStorageLoc "enabled" -}} + {{- if not (kindIs "invalid" $backupStorageLoc.enabled) -}} + {{- $enabled = $backupStorageLoc.enabled -}} + {{- else -}} + {{- fail (printf "Backup Storage Location - Expected the defined key [enabled] in [backupStorageLocation.%s] to not be empty" $backupStorageLoc.name) -}} + {{- end -}} + {{- end -}} + + {{- if kindIs "string" $enabled -}} + {{- $enabled = tpl $enabled $ -}} + + {{/* After tpl it becomes a string, not a bool */}} + {{- if eq $enabled "true" -}} + {{- $enabled = true -}} + {{- else if eq $enabled "false" -}} + {{- $enabled = false -}} + {{- end -}} + {{- end -}} + + {{- if $enabled -}} + + {{/* Create a copy of the backupstoragelocation */}} + {{- $objectData := (mustDeepCopy $backupStorageLoc) -}} + + {{- if not $backupStorageLoc.name -}} + {{- fail "Backup Storage Location - Expected non-empty [name]" -}} + {{- end -}} + + {{- $objectName := (printf "%s-%s" $fullname $backupStorageLoc.name) -}} + {{- if hasKey $objectData "expandObjectName" -}} + {{- if not $objectData.expandObjectName -}} + {{- $objectName = $backupStorageLoc.name -}} + {{- end -}} + {{- end -}} + + {{/* Set namespace to velero location or itself, just in case its used from within velero */}} + {{- $operator := index $.Values.operator "velero" -}} + {{- $namespace := $operator.namespace | default (include "tc.v1.common.lib.metadata.namespace" (dict "rootCtx" $ "objectData" $objectData "caller" "Backup Storage Location")) -}} + {{- $_ := set $objectData "namespace" $namespace -}} + + {{/* Perform validations */}} {{/* backupstoragelocations have a max name length of 253 */}} + {{- include "tc.v1.common.lib.chart.names.validation" (dict "name" $objectName "length" 253) -}} + {{- include "tc.v1.common.lib.metadata.validation" (dict "objectData" $objectData "caller" "Backup Storage Location") -}} + + {{/* Set the name of the backupstoragelocation */}} + {{- $_ := set $objectData "name" $objectName -}} + {{- $_ := set $objectData "shortName" $backupStorageLoc.name -}} + + {{/* Create secret with creds for provider, if the provider is not matched, it will skip creation */}} + {{- include "tc.v1.common.lib.velero.provider.secret" (dict "rootCtx" $ "objectData" $objectData "prefix" "bsl") -}} + + {{- include "tc.v1.common.lib.velero.backupstoragelocation.validation" (dict "objectData" $objectData) -}} + + {{/* Call class to create the object */}} + {{- include "tc.v1.common.class.velero.backupstoragelocation" (dict "rootCtx" $ "objectData" $objectData) -}} + + {{- end -}} + + {{- end -}} + +{{- end -}} diff --git a/library/common/templates/spawner/velero/_schedule.tpl b/library/common/templates/spawner/velero/_schedule.tpl new file mode 100644 index 000000000..85259fc75 --- /dev/null +++ b/library/common/templates/spawner/velero/_schedule.tpl @@ -0,0 +1,64 @@ +{{/* schedule Spawwner */}} +{{/* Call this template: +{{ include "tc.v1.common.spawner.velero.schedule" $ -}} +*/}} + +{{- define "tc.v1.common.spawner.velero.schedule" -}} + {{- $fullname := include "tc.v1.common.lib.chart.names.fullname" $ -}} + + {{- range $name, $schedule := .Values.schedules -}} + + {{- $enabled := false -}} + {{- if hasKey $schedule "enabled" -}} + {{- if not (kindIs "invalid" $schedule.enabled) -}} + {{- $enabled = $schedule.enabled -}} + {{- else -}} + {{- fail (printf "Schedule - Expected the defined key [enabled] in [schedules.%s] to not be empty" $name) -}} + {{- end -}} + {{- end -}} + + {{- if kindIs "string" $enabled -}} + {{- $enabled = tpl $enabled $ -}} + + {{/* After tpl it becomes a string, not a bool */}} + {{- if eq $enabled "true" -}} + {{- $enabled = true -}} + {{- else if eq $enabled "false" -}} + {{- $enabled = false -}} + {{- end -}} + {{- end -}} + + {{- if $enabled -}} + + {{/* Create a copy of the schedule */}} + {{- $objectData := (mustDeepCopy $schedule) -}} + + {{- $objectName := (printf "%s-%s" $fullname $name) -}} + {{- if hasKey $objectData "expandObjectName" -}} + {{- if not $objectData.expandObjectName -}} + {{- $objectName = $name -}} + {{- end -}} + {{- end -}} + + {{/* Perform validations */}} {{/* schedules have a max name length of 253 */}} + {{- include "tc.v1.common.lib.chart.names.validation" (dict "name" $objectName "length" 253) -}} + {{- include "tc.v1.common.lib.velero.schedule.validation" (dict "objectData" $objectData) -}} + {{- include "tc.v1.common.lib.metadata.validation" (dict "objectData" $objectData "caller" "Schedule") -}} + + {{/* Set the name of the schedule */}} + {{- $_ := set $objectData "name" $objectName -}} + {{- $_ := set $objectData "shortName" $name -}} + + {{/* Set namespace to velero location or itself, just in case its used from within velero */}} + {{- $operator := index $.Values.operator "velero" -}} + {{- $namespace := $operator.namespace | default (include "tc.v1.common.lib.metadata.namespace" (dict "rootCtx" $ "objectData" $objectData "caller" "Schedule")) -}} + {{- $_ := set $objectData "namespace" $namespace -}} + + {{/* Call class to create the object */}} + {{- include "tc.v1.common.class.velero.schedule" (dict "rootCtx" $ "objectData" $objectData) -}} + + {{- end -}} + + {{- end -}} + +{{- end -}} diff --git a/library/common/templates/spawner/velero/_volumeSnapshotLocation.tpl b/library/common/templates/spawner/velero/_volumeSnapshotLocation.tpl new file mode 100644 index 000000000..867a2df83 --- /dev/null +++ b/library/common/templates/spawner/velero/_volumeSnapshotLocation.tpl @@ -0,0 +1,72 @@ +{{/* volumesnapshotlocation Spawwner */}} +{{/* Call this template: +{{ include "tc.v1.common.spawner.velero.volumesnapshotlocation" $ -}} +*/}} + +{{- define "tc.v1.common.spawner.velero.volumesnapshotlocation" -}} + {{- $fullname := include "tc.v1.common.lib.chart.names.fullname" $ -}} + + {{- range $volSnapLoc := .Values.volumeSnapshotLocation -}} + + {{- $enabled := false -}} + {{- if hasKey $volSnapLoc "enabled" -}} + {{- if not (kindIs "invalid" $volSnapLoc.enabled) -}} + {{- $enabled = $volSnapLoc.enabled -}} + {{- else -}} + {{- fail (printf "Volume Snapshot Location - Expected the defined key [enabled] in [volumeSnapshotLocation.%s] to not be empty" $volSnapLoc.name) -}} + {{- end -}} + {{- end -}} + + {{- if kindIs "string" $enabled -}} + {{- $enabled = tpl $enabled $ -}} + + {{/* After tpl it becomes a string, not a bool */}} + {{- if eq $enabled "true" -}} + {{- $enabled = true -}} + {{- else if eq $enabled "false" -}} + {{- $enabled = false -}} + {{- end -}} + {{- end -}} + + {{- if $enabled -}} + + {{/* Create a copy of the volumesnapshotlocation */}} + {{- $objectData := (mustDeepCopy $volSnapLoc) -}} + + {{- if not $volSnapLoc.name -}} + {{- fail "Volume Snapshot Location - Expected non-empty [name]" -}} + {{- end -}} + + {{- $objectName := (printf "%s-%s" $fullname $volSnapLoc.name) -}} + {{- if hasKey $objectData "expandObjectName" -}} + {{- if not $objectData.expandObjectName -}} + {{- $objectName = $volSnapLoc.name -}} + {{- end -}} + {{- end -}} + + {{/* Set namespace to velero location or itself, just in case its used from within velero */}} + {{- $operator := index $.Values.operator "velero" -}} + {{- $namespace := $operator.namespace | default (include "tc.v1.common.lib.metadata.namespace" (dict "rootCtx" $ "objectData" $objectData "caller" "Volume Snapshot Location")) -}} + {{- $_ := set $objectData "namespace" $namespace -}} + + {{/* Perform validations */}} {{/* volumesnapshotlocations have a max name length of 253 */}} + {{- include "tc.v1.common.lib.chart.names.validation" (dict "name" $objectName "length" 253) -}} + {{- include "tc.v1.common.lib.metadata.validation" (dict "objectData" $objectData "caller" "Volume Snapshot Location") -}} + + {{/* Set the name of the volumesnapshotlocation */}} + {{- $_ := set $objectData "name" $objectName -}} + {{- $_ := set $objectData "shortName" $volSnapLoc.name -}} + + {{/* Create secret with creds for provider, if the provider is not matched, it will skip creation */}} + {{- include "tc.v1.common.lib.velero.provider.secret" (dict "rootCtx" $ "objectData" $objectData "prefix" "vsl") -}} + + {{- include "tc.v1.common.lib.velero.volumesnapshotlocation.validation" (dict "objectData" $objectData) -}} + + {{/* Call class to create the object */}} + {{- include "tc.v1.common.class.velero.volumesnapshotlocation" (dict "rootCtx" $ "objectData" $objectData) -}} + + {{- end -}} + + {{- end -}} + +{{- end -}} diff --git a/library/common/values.yaml b/library/common/values.yaml index 61c39cdd4..58788be9b 100644 --- a/library/common/values.yaml +++ b/library/common/values.yaml @@ -708,6 +708,102 @@ webhook: # volumeBindingMode: Immediate # mountOptions: [] +# # Parameters for the BackupStorageLocation(s). Configure multiple by adding other element(s) to the backupStorageLocation slice. +# # See https://velero.io/docs/v1.6/api-types/backupstoragelocation/ +backupStorageLocation: [] +# backupStorageLocation: +# # name is the name of the backup storage location where backups should be stored. If a name is not provided, +# # a backup storage location will be created with the name "default". Optional. +# - name: +# # provider is the name for the backup storage location provider. +# provider: +# objectStorage: +# # bucket is the name of the bucket to store backups in. Required. +# bucket: +# # caCert defines a base64 encoded CA bundle to use when verifying TLS connections to the provider. Optional. +# caCert: +# # prefix is the directory under which all Velero data should be stored within the bucket. Optional. +# prefix: +# # validationFrequency defines how frequently Velero should validate the object storage. Optional. +# validationFrequency: +# # accessMode determines if velero can write to this backup storage location. Optional. +# # default to ReadWrite, ReadOnly is used during migrations and restores. +# accessMode: ReadWrite +# credential: +# # AWS/s3 credentials to be put into secret (mandatory if provider == aws/s3) +# aws: +# id: fdgsdfghsdfgh +# key: dfgdfhsdfgh +# # name of the secret used by this backupStorageLocation. +# name: +# # name of key that contains the secret data to be used. +# key: +# # Additional provider-specific configuration. See link above +# # for details of required/optional fields for your provider. +# config: {} +# # region: +# # s3ForcePathStyle: +# # s3Url: +# # kmsKeyId: +# # resourceGroup: +# # The ID of the subscription containing the storage account, if different from the cluster’s subscription. (Azure only) +# # subscriptionId: +# # storageAccount: +# # publicUrl: +# # Name of the GCP service account to use for this backup storage location. Specify the +# # service account here if you want to use workload identity instead of providing the key file.(GCP only) +# # serviceAccount: +# # Option to skip certificate validation or not if insecureSkipTLSVerify is set to be true, the client side should set the +# # flag. For Velero client Command like velero backup describe, velero backup logs needs to add the flag --insecure-skip-tls-verify +# # insecureSkipTLSVerify: +# +# # Parameters for the VolumeSnapshotLocation(s). Configure multiple by adding other element(s) to the volumeSnapshotLocation slice. +# # See https://velero.io/docs/v1.6/api-types/volumesnapshotlocation/ +volumeSnapshotLocation: [] +# volumeSnapshotLocation: +# # name is the name of the volume snapshot location where snapshots are being taken. Required. +# - name: +# enabled: false +# # provider is the name for the volume snapshot provider. +# provider: +# credential: +# # AWS/s3 credentials to be put into secret (mandatory if provider == aws/s3) +# aws: +# id: fdgsdfghsdfgh +# key: dfgdfhsdfgh +# # name of the secret used by this volumeSnapshotLocation. (Optional/Advanced) +# name: +# # name of key that contains the secret data to be used. (Optional/Advanced) +# key: +# # Additional provider-specific configuration. See link above +# # for details of required/optional fields for your provider. +# config: {} +# # region: +# # apiTimeout: +# # resourceGroup: +# # The ID of the subscription where volume snapshots should be stored, if different from the cluster’s subscription. If specified, also requires `configuration.volumeSnapshotLocation.config.resourceGroup`to be set. (Azure only) +# # subscriptionId: +# # incremental: +# # snapshotLocation: +# # project: + +# Backup schedules to create. +schedules: {} +# Eg: +# schedules: +# mybackup: +# disabled: false +# labels: +# myenv: foo +# annotations: +# myenv: foo +# schedule: "0 0 * * *" +# useOwnerReferencesInBackup: false +# template: +# ttl: "240h" +# storageLocation: default +# includedNamespaces: +# - foo # # -- create volumeSnapshotClass on demand # volumeSnapshotClass: # example: