From f07a5aa53df9170107cb5e8bdc15f52d91484977 Mon Sep 17 00:00:00 2001 From: Ryan Emerson Date: Mon, 30 Sep 2024 16:15:51 +0100 Subject: [PATCH] Add documentation for defining HorizontalPodAutoscaler #2133 --- .../stories/assembly_auto_scaling.adoc | 27 +++++++++++++++++++ documentation/asciidoc/titles/stories.adoc | 1 + .../topics/proc_configuring_auto_scaling.adoc | 18 +++++++++++++ .../yaml/horizontal_pod_autoscaler.yaml | 18 +++++++++++++ 4 files changed, 64 insertions(+) create mode 100644 documentation/asciidoc/stories/assembly_auto_scaling.adoc create mode 100644 documentation/asciidoc/topics/proc_configuring_auto_scaling.adoc create mode 100644 documentation/asciidoc/topics/yaml/horizontal_pod_autoscaler.yaml diff --git a/documentation/asciidoc/stories/assembly_auto_scaling.adoc b/documentation/asciidoc/stories/assembly_auto_scaling.adoc new file mode 100644 index 000000000..2ccebb52e --- /dev/null +++ b/documentation/asciidoc/stories/assembly_auto_scaling.adoc @@ -0,0 +1,27 @@ +ifdef::context[:parent-context: {context}] +[id='auto-scaling'] +:context: scaling += Auto Scaling + +[role="_abstract"] +Kubernetes includes the `HorizontalPodAutoscaler` which allows StatefulSets or Deployments to be automatically scaled up or +down based upon specified metrics. The Infinispan CR exposes the `.status.scale` sub-resource, which enables `HorizontalPodAutoscaler` +resources to target the Infinispan CR. + +Before defining a `HorizontalPodAutoscaler` configuration, consider the types of {brandname} caches that you define. Distributed +and Replicated caches have very different scaling requirements, so defining a `HorizontalPodAutoscaler` for server's running +a combination of these cache types may not be advantageous. For example, defining a `HorizontalPodAutoscaler` that scales +when memory usage reaches a certain percentage will allow overall cache capacity to be increased when defining Distributed +caches as cache entries are spread across pods, however it will not work with replicated cache as every pod hosts all cache +entries. Conversely, configuring a `HorizontalPodAutoscaler` based upon CPU usage will be more beneficial for clusters +with replicated cache as every pod contains all cache entries and so distributing read requests across additional nodes +will allow a greater number of requests to be processed simultaneously. + +include::{topics}/proc_configuring_auto_scaling.adoc[leveloffset=+1] + +IMPORTANT: HorizontalPodAutoscaler should be removed when upgrading a {brandname} cluster, as the automatic scaling will +cause the upgrade process to enter unexpected state, as the Operator needs to scale the cluster down to 0 pods. + +// Restore the parent context. +ifdef::parent-context[:context: {parent-context}] +ifndef::parent-context[:!context:] diff --git a/documentation/asciidoc/titles/stories.adoc b/documentation/asciidoc/titles/stories.adoc index 2da306bdd..9c07fb824 100644 --- a/documentation/asciidoc/titles/stories.adoc +++ b/documentation/asciidoc/titles/stories.adoc @@ -13,6 +13,7 @@ include::{stories}/assembly_network_access.adoc[leveloffset=+1] include::{stories}/assembly_cross_site_replication.adoc[leveloffset=+1] include::{stories}/assembly_monitoring.adoc[leveloffset=+1] include::{stories}/assembly_anti_affinity.adoc[leveloffset=+1] +include::{stories}/assembly_auto_scaling.adoc[leveloffset=+1] include::{stories}/assembly_cache_cr.adoc[leveloffset=+1] include::{stories}/assembly_batch_cr.adoc[leveloffset=+1] include::{stories}/assembly_backing_up_restoring.adoc[leveloffset=+1] diff --git a/documentation/asciidoc/topics/proc_configuring_auto_scaling.adoc b/documentation/asciidoc/topics/proc_configuring_auto_scaling.adoc new file mode 100644 index 000000000..badb3442e --- /dev/null +++ b/documentation/asciidoc/topics/proc_configuring_auto_scaling.adoc @@ -0,0 +1,18 @@ +[id='configuring_auto-scaling-{context}'] += Configuring HorizontalPodAutoscaler + +[role="_abstract"] +Create a HorizontalPodAutoScaler resource that targets your Infinispan CR. + +.Procedure + +. Define a `HorizontalPodAutoscaler` resource in the same namespace as your `Infinispan` CR ++ +[source,options="nowrap",subs=attributes+] +---- +include::yaml/horizontal_pod_autoscaler.yaml[] +---- ++ +<1> The name of your `Infinispan` CR + +NOTE: If using metric resource of type `cpu` or `memory`, you must configure request/limits for this resource in your `Infinispan` CR. \ No newline at end of file diff --git a/documentation/asciidoc/topics/yaml/horizontal_pod_autoscaler.yaml b/documentation/asciidoc/topics/yaml/horizontal_pod_autoscaler.yaml new file mode 100644 index 000000000..64fca6a0b --- /dev/null +++ b/documentation/asciidoc/topics/yaml/horizontal_pod_autoscaler.yaml @@ -0,0 +1,18 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: infinispan-auto +spec: + scaleTargetRef: + apiVersion: infinispan.org/v1 + kind: Infinispan + name: example # <1> + minReplicas: 1 + maxReplicas: 10 + metrics: + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: 50