Skip to content

Commit

Permalink
[CI] Helm Chart Update rook-ceph
Browse files Browse the repository at this point in the history
  • Loading branch information
Hritik Batra committed Nov 11, 2024
1 parent d9c0ee5 commit fb262a5
Show file tree
Hide file tree
Showing 9 changed files with 164 additions and 35 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ All releases and the changes included in them (pulled from git commits added sin
- Updated argo-cd from version 7.6.12 to 7.7.0

### Patch Version Upgrades %%^^
- Updated rook-ceph-cluster from version v1.15.4 to v1.15.5
- Updated rook-ceph from version v1.15.4 to v1.15.5
- Updated redmine from version 30.0.2 to 30.0.4
- Updated rabbitmq-cluster-operator from version 4.3.25 to 4.3.27
- Updated opencost from version 1.42.2 to 1.42.3
Expand Down
8 changes: 4 additions & 4 deletions argocd-helm-charts/rook-ceph/Chart.lock
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
dependencies:
- name: rook-ceph
repository: https://charts.rook.io/release
version: v1.15.4
version: v1.15.5
- name: rook-ceph-cluster
repository: https://charts.rook.io/release
version: v1.15.4
digest: sha256:ae429918c2d45e303db8cc6ee31f17b845680df9d4167a7018533bbcfa0a706b
generated: "2024-10-22T13:38:38.263303183+05:30"
version: v1.15.5
digest: sha256:4815016ef3b49f530de5edbb9ad1ea532ff7f610ff789a0d3e57fa972faf9584
generated: "2024-11-11T12:27:44.546133575+05:30"
4 changes: 2 additions & 2 deletions argocd-helm-charts/rook-ceph/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@ name: rook-ceph
version: 1.0.0
dependencies:
- name: rook-ceph
version: v1.15.4
version: v1.15.5
repository: https://charts.rook.io/release
#repository: "oci://ghcr.io/Obmondo"
- name: rook-ceph-cluster
version: v1.15.4
version: v1.15.5
repository: https://charts.rook.io/release
#repository: "oci://ghcr.io/Obmondo"
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: v1.15.4
appVersion: v1.15.5
dependencies:
- name: library
repository: file://../library
Expand All @@ -9,4 +9,4 @@ icon: https://rook.io/images/rook-logo.svg
name: rook-ceph-cluster
sources:
- https://github.com/rook/rook
version: v1.15.4
version: v1.15.5
22 changes: 11 additions & 11 deletions argocd-helm-charts/rook-ceph/charts/rook-ceph-cluster/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ cephClusterSpec:
# Do not set to true in production.
allowUnsupported: false

# The path on the host where configuration files will be persisted. Must be specified.
# The path on the host where configuration files will be persisted. Must be specified. If there are multiple clusters, the directory must be unique for each cluster.
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
dataDirHostPath: /var/lib/rook
Expand Down Expand Up @@ -157,7 +157,7 @@ cephClusterSpec:
# the corresponding "backend protocol" annotation(s) for your ingress controller of choice)
ssl: true

# Network configuration, see: https://github.com/rook/rook/blob/v1.15.4/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
# Network configuration, see: https://github.com/rook/rook/blob/v1.15.5/Documentation/CRDs/Cluster/ceph-cluster-crd.md#network-configuration-settings
network:
connections:
# Whether to encrypt the data in transit across the wire to prevent eavesdropping the data on the network.
Expand Down Expand Up @@ -208,7 +208,7 @@ cephClusterSpec:
periodicity: daily # one of: hourly, daily, weekly, monthly
maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.

# automate [data cleanup process](https://github.com/rook/rook/blob/v1.15.4/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
# automate [data cleanup process](https://github.com/rook/rook/blob/v1.15.5/Documentation/Storage-Configuration/ceph-teardown.md#delete-the-data-on-hosts) in cluster destruction.
cleanupPolicy:
# Since cluster cleanup is destructive to data, confirmation is required.
# To destroy all Rook data on hosts during uninstall, confirmation must be set to "yes-really-destroy-data".
Expand Down Expand Up @@ -382,7 +382,7 @@ cephClusterSpec:
# The section for configuring management of daemon disruptions during upgrade or fencing.
disruptionManagement:
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/v1.15.4/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/v1.15.5/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
# block eviction of OSDs by default and unblock them safely when drains are detected.
managePodBudgets: true
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
Expand Down Expand Up @@ -441,7 +441,7 @@ ingress:
# @default -- See [below](#ceph-block-pools)
cephBlockPools:
- name: ceph-blockpool
# see https://github.com/rook/rook/blob/v1.15.4/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
# see https://github.com/rook/rook/blob/v1.15.5/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
spec:
failureDomain: host
replicated:
Expand All @@ -465,7 +465,7 @@ cephBlockPools:
# - key: rook-ceph-role
# values:
# - storage-node
# see https://github.com/rook/rook/blob/v1.15.4/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
# see https://github.com/rook/rook/blob/v1.15.5/Documentation/Storage-Configuration/Block-Storage-RBD/block-storage.md#provision-storage for available configuration
parameters:
# (optional) mapOptions is a comma-separated list of map options.
# For krbd options refer
Expand Down Expand Up @@ -506,7 +506,7 @@ cephBlockPools:
# @default -- See [below](#ceph-file-systems)
cephFileSystems:
- name: ceph-filesystem
# see https://github.com/rook/rook/blob/v1.15.4/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
# see https://github.com/rook/rook/blob/v1.15.5/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration
spec:
metadataPool:
replicated:
Expand All @@ -515,7 +515,7 @@ cephFileSystems:
- failureDomain: host
replicated:
size: 3
# Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/v1.15.4/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
# Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/v1.15.5/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools
name: data0
metadataServer:
activeCount: 1
Expand All @@ -539,7 +539,7 @@ cephFileSystems:
annotations: {}
labels: {}
mountOptions: []
# see https://github.com/rook/rook/blob/v1.15.4/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
# see https://github.com/rook/rook/blob/v1.15.5/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration
parameters:
# The secrets contain Ceph admin credentials.
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
Expand Down Expand Up @@ -581,7 +581,7 @@ cephBlockPoolsVolumeSnapshotClass:
# @default -- See [below](#ceph-object-stores)
cephObjectStores:
- name: ceph-objectstore
# see https://github.com/rook/rook/blob/v1.15.4/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
# see https://github.com/rook/rook/blob/v1.15.5/Documentation/CRDs/Object-Storage/ceph-object-store-crd.md#object-store-settings for available configuration
spec:
metadataPool:
failureDomain: host
Expand Down Expand Up @@ -612,7 +612,7 @@ cephObjectStores:
volumeBindingMode: "Immediate"
annotations: {}
labels: {}
# see https://github.com/rook/rook/blob/v1.15.4/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
# see https://github.com/rook/rook/blob/v1.15.5/Documentation/Storage-Configuration/Object-Storage-RGW/ceph-object-bucket-claim.md#storageclass for available configuration
parameters:
# note: objectStoreNamespace and objectStoreName are configured by the chart
region: us-east-1
Expand Down
4 changes: 2 additions & 2 deletions argocd-helm-charts/rook-ceph/charts/rook-ceph/Chart.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: v1.15.4
appVersion: v1.15.5
dependencies:
- name: library
repository: file://../library
Expand All @@ -9,4 +9,4 @@ icon: https://rook.io/images/rook-logo.svg
name: rook-ceph
sources:
- https://github.com/rook/rook
version: v1.15.4
version: v1.15.5
Original file line number Diff line number Diff line change
Expand Up @@ -170,9 +170,6 @@ data:
{{- if .Values.csi.nfsProvisionerNodeAffinity }}
CSI_NFS_PROVISIONER_NODE_AFFINITY: {{ .Values.csi.nfsProvisionerNodeAffinity }}
{{- end }}
{{- if .Values.csi.allowUnsupportedVersion }}
ROOK_CSI_ALLOW_UNSUPPORTED_VERSION: {{ .Values.csi.allowUnsupportedVersion | quote }}
{{- end }}
{{- if .Values.csi.pluginTolerations }}
CSI_PLUGIN_TOLERATIONS: {{ toYaml .Values.csi.pluginTolerations | quote }}
{{- end }}
Expand Down
145 changes: 139 additions & 6 deletions argocd-helm-charts/rook-ceph/charts/rook-ceph/templates/resources.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3613,6 +3613,9 @@ spec:
type: array
onlyApplyOSDPlacement:
type: boolean
scheduleAlways:
description: Whether to always schedule OSDs on a node even if the node is not currently scheduleable or ready
type: boolean
storageClassDeviceSets:
items:
description: StorageClassDeviceSet is a storage class device set
Expand Down Expand Up @@ -12330,12 +12333,77 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
poolPlacements:
description: |-
PoolPlacements control which Pools are associated with a particular RGW bucket.
Once PoolPlacements are defined, RGW client will be able to associate pool
with ObjectStore bucket by providing "<LocationConstraint>" during s3 bucket creation
or "X-Storage-Policy" header during swift container creation.
See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
PoolPlacement with name: "default" will be used as a default pool if no option
is provided during bucket creation.
If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
items:
properties:
dataNonECPoolName:
description: |-
The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
type: string
dataPoolName:
description: The data pool used to store ObjectStore objects data.
minLength: 1
type: string
default:
description: |-
Sets given placement as default. Only one placement in the list can be marked as default.
Default is false.
type: boolean
metadataPoolName:
description: The metadata pool used to store ObjectStore bucket index.
minLength: 1
type: string
name:
description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
minLength: 1
pattern: ^[a-zA-Z0-9._/-]+$
type: string
storageClasses:
description: |-
StorageClasses can be selected by user to override dataPoolName during object creation.
Each placement has default STANDARD StorageClass pointing to dataPoolName.
This list allows defining additional StorageClasses on top of default STANDARD storage class.
items:
properties:
dataPoolName:
description: DataPoolName is the data pool used to store ObjectStore objects data.
minLength: 1
type: string
name:
description: |-
Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
however most clients/libs insist on AWS names so it is recommended to use
one of the valid x-amz-storage-class values for better compatibility:
REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
minLength: 1
pattern: ^[a-zA-Z0-9._/-]+$
type: string
required:
- dataPoolName
- name
type: object
type: array
required:
- dataPoolName
- metadataPoolName
- name
type: object
type: array
preserveRadosNamespaceDataOnDelete:
description: Whether the RADOS namespaces should be preserved on deletion of the object store
type: boolean
required:
- dataPoolName
- metadataPoolName
type: object
zone:
description: The multisite info
Expand Down Expand Up @@ -13192,12 +13260,77 @@ spec:
x-kubernetes-validations:
- message: object store shared metadata pool is immutable
rule: self == oldSelf
poolPlacements:
description: |-
PoolPlacements control which Pools are associated with a particular RGW bucket.
Once PoolPlacements are defined, RGW client will be able to associate pool
with ObjectStore bucket by providing "<LocationConstraint>" during s3 bucket creation
or "X-Storage-Policy" header during swift container creation.
See: https://docs.ceph.com/en/latest/radosgw/placement/#placement-targets
PoolPlacement with name: "default" will be used as a default pool if no option
is provided during bucket creation.
If default placement is not provided, spec.sharedPools.dataPoolName and spec.sharedPools.MetadataPoolName will be used as default pools.
If spec.sharedPools are also empty, then RGW pools (spec.dataPool and spec.metadataPool) will be used as defaults.
items:
properties:
dataNonECPoolName:
description: |-
The data pool used to store ObjectStore data that cannot use erasure coding (ex: multi-part uploads).
If dataPoolName is not erasure coded, then there is no need for dataNonECPoolName.
type: string
dataPoolName:
description: The data pool used to store ObjectStore objects data.
minLength: 1
type: string
default:
description: |-
Sets given placement as default. Only one placement in the list can be marked as default.
Default is false.
type: boolean
metadataPoolName:
description: The metadata pool used to store ObjectStore bucket index.
minLength: 1
type: string
name:
description: Pool placement name. Name can be arbitrary. Placement with name "default" will be used as default.
minLength: 1
pattern: ^[a-zA-Z0-9._/-]+$
type: string
storageClasses:
description: |-
StorageClasses can be selected by user to override dataPoolName during object creation.
Each placement has default STANDARD StorageClass pointing to dataPoolName.
This list allows defining additional StorageClasses on top of default STANDARD storage class.
items:
properties:
dataPoolName:
description: DataPoolName is the data pool used to store ObjectStore objects data.
minLength: 1
type: string
name:
description: |-
Name is the StorageClass name. Ceph allows arbitrary name for StorageClasses,
however most clients/libs insist on AWS names so it is recommended to use
one of the valid x-amz-storage-class values for better compatibility:
REDUCED_REDUNDANCY | STANDARD_IA | ONEZONE_IA | INTELLIGENT_TIERING | GLACIER | DEEP_ARCHIVE | OUTPOSTS | GLACIER_IR | SNOW | EXPRESS_ONEZONE
See AWS docs: https://aws.amazon.com/de/s3/storage-classes/
minLength: 1
pattern: ^[a-zA-Z0-9._/-]+$
type: string
required:
- dataPoolName
- name
type: object
type: array
required:
- dataPoolName
- metadataPoolName
- name
type: object
type: array
preserveRadosNamespaceDataOnDelete:
description: Whether the RADOS namespaces should be preserved on deletion of the object store
type: boolean
required:
- dataPoolName
- metadataPoolName
type: object
zoneGroup:
description: The display name for the ceph users
Expand Down
Loading

0 comments on commit fb262a5

Please sign in to comment.