diff --git a/assets/rke2-coredns/rke2-coredns-1.29.007.tgz b/assets/rke2-coredns/rke2-coredns-1.29.007.tgz new file mode 100755 index 00000000..8a27a5ff Binary files /dev/null and b/assets/rke2-coredns/rke2-coredns-1.29.007.tgz differ diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/.helmignore b/charts/rke2-coredns/rke2-coredns/1.29.007/.helmignore new file mode 100755 index 00000000..7c04072e --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/Chart.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/Chart.yaml new file mode 100755 index 00000000..debc0a4c --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + artifacthub.io/changes: | + - kind: changed + description: Ignore duplicate strings in the fullname helper template + - kind: removed + description: Removed deprecated "engine: gotpl" from the Chart.yaml +apiVersion: v2 +appVersion: 1.11.1 +description: CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS + Services +home: https://coredns.io +icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png +keywords: +- coredns +- dns +- kubedns +maintainers: +- name: mrueg +- name: haad +- name: hagaibarel +- name: shubham-cmyk +name: rke2-coredns +sources: +- https://github.com/coredns/coredns +type: application +version: 1.29.007 diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/README.md b/charts/rke2-coredns/rke2-coredns/1.29.007/README.md new file mode 100755 index 00000000..b98c4a24 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/README.md @@ -0,0 +1,248 @@ +# CoreDNS + +[CoreDNS](https://coredns.io/) is a DNS server that chains plugins and provides DNS Services + +# TL;DR; + +```console +$ helm repo add coredns https://coredns.github.io/helm +$ helm --namespace=kube-system install coredns coredns/coredns +``` + +## Introduction + +This chart bootstraps a [CoreDNS](https://github.com/coredns/coredns) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. This chart will provide DNS Services and can be deployed in multiple configuration to support various scenarios listed below: + +- CoreDNS as a cluster dns service and a drop-in replacement for Kube/SkyDNS. This is the default mode and CoreDNS is deployed as cluster-service in kube-system namespace. This mode is chosen by setting `isClusterService` to true. +- CoreDNS as an external dns service. In this mode CoreDNS is deployed as any kubernetes app in user specified namespace. The CoreDNS service can be exposed outside the cluster by using using either the NodePort or LoadBalancer type of service. This mode is chosen by setting `isClusterService` to false. +- CoreDNS as an external dns provider for kubernetes federation. This is a sub case of 'external dns service' which uses etcd plugin for CoreDNS backend. This deployment mode as a dependency on `etcd-operator` chart, which needs to be pre-installed. + +## Prerequisites + +- Kubernetes 1.10 or later + +## Installing the Chart + +The chart can be installed as follows: + +```console +$ helm repo add coredns https://coredns.github.io/helm +$ helm --namespace=kube-system install coredns coredns/coredns +``` + +The command deploys CoreDNS on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists various ways to override default configuration during deployment. + +> **Tip**: List all releases using `helm list --all-namespaces` + +## Uninstalling the Chart + +To uninstall/delete the `coredns` deployment: + +```console +$ helm uninstall coredns +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Configuration + +| Parameter | Description | Default | +| :--------------------------------------------- | :---------------------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------- | +| `image.repository` | The image repository to pull from | coredns/coredns | +| `image.tag` | The image tag to pull from (derived from Chart.yaml) | `` | +| `image.pullPolicy` | Image pull policy | IfNotPresent | +| `image.pullSecrets` | Specify container image pull secrets | `[]` | +| `replicaCount` | Number of replicas | 1 | +| `resources.limits.cpu` | Container maximum CPU | `100m` | +| `resources.limits.memory` | Container maximum memory | `128Mi` | +| `resources.requests.cpu` | Container requested CPU | `100m` | +| `resources.requests.memory` | Container requested memory | `128Mi` | +| `serviceType` | Kubernetes Service type | `ClusterIP` | +| `prometheus.service.enabled` | Set this to `true` to create Service for Prometheus metrics | `false` | +| `prometheus.service.annotations` | Annotations to add to the metrics Service | `{prometheus.io/scrape: "true", prometheus.io/port: "9153"}` | +| `prometheus.monitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `prometheus.monitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | {} | +| `prometheus.monitor.namespace` | Selector to select which namespaces the Endpoints objects are discovered from. | `""` | +| `prometheus.monitor.interval` | Scrape interval for polling the metrics endpoint. (E.g. "30s") | `""` | +| `service.clusterIP` | IP address to assign to service | `""` | +| `service.clusterIPs` | IP addresses to assign to service | `[]` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `""` | +| `service.externalIPs` | External IP addresses | [] | +| `service.externalTrafficPolicy` | Enable client source IP preservation | [] | +| `service.ipFamilyPolicy` | Service dual-stack policy | `""` | +| `service.annotations` | Annotations to add to service | {} | +| `serviceAccount.create` | If true, create & use serviceAccount | false | +| `serviceAccount.name` | If not set & create is true, use template fullname | | +| `rbac.create` | If true, create & use RBAC resources | true | +| `rbac.pspEnable` | Specifies whether a PodSecurityPolicy should be created. | `false` | +| `isClusterService` | Specifies whether chart should be deployed as cluster-service or normal k8s app. | true | +| `priorityClassName` | Name of Priority Class to assign pods | `""` | +| `securityContext` | securityContext definition for pods | capabilities.add.NET_BIND_SERVICE | +| `servers` | Configuration for CoreDNS and plugins | See values.yml | +| `livenessProbe.enabled` | Enable/disable the Liveness probe | `true` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `60` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `readinessProbe.enabled` | Enable/disable the Readiness probe | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `affinity` | Affinity settings for pod assignment | {} | +| `nodeSelector` | Node labels for pod assignment | {} | +| `tolerations` | Tolerations for pod assignment | [] | +| `zoneFiles` | Configure custom Zone files | [] | +| `extraContainers` | Optional array of sidecar containers | [] | +| `extraVolumes` | Optional array of volumes to create | [] | +| `extraVolumeMounts` | Optional array of volumes to mount inside the CoreDNS container | [] | +| `extraSecrets` | Optional array of secrets to mount inside the CoreDNS container | [] | +| `customLabels` | Optional labels for Deployment(s), Pod, Service, ServiceMonitor objects | {} | +| `customAnnotations` | Optional annotations for Deployment(s), Pod, Service, ServiceMonitor objects | +| `rollingUpdate.maxUnavailable` | Maximum number of unavailable replicas during rolling update | `1` | +| `rollingUpdate.maxSurge` | Maximum number of pods created above desired number of pods | `25%` | +| `podDisruptionBudget` | Optional PodDisruptionBudget | {} | +| `podAnnotations` | Optional Pod only Annotations | {} | +| `terminationGracePeriodSeconds` | Optional duration in seconds the pod needs to terminate gracefully. | 30 | +| `hpa.enabled` | Enable Hpa autoscaler instead of proportional one | `false` | +| `hpa.minReplicas` | Hpa minimum number of CoreDNS replicas | `1` | +| `hpa.maxReplicas` | Hpa maximum number of CoreDNS replicas | `2` | +| `hpa.metrics` | Metrics definitions used by Hpa to scale up and down | {} | +| `autoscaler.enabled` | Optionally enabled a cluster-proportional-autoscaler for CoreDNS | `false` | +| `autoscaler.coresPerReplica` | Number of cores in the cluster per CoreDNS replica | `256` | +| `autoscaler.nodesPerReplica` | Number of nodes in the cluster per CoreDNS replica | `16` | +| `autoscaler.min` | Min size of replicaCount | 0 | +| `autoscaler.max` | Max size of replicaCount | 0 (aka no max) | +| `autoscaler.includeUnschedulableNodes` | Should the replicas scale based on the total number or only schedulable nodes | `false` | +| `autoscaler.preventSinglePointFailure` | If true does not allow single points of failure to form | `true` | +| `autoscaler.customFlags` | A list of custom flags to pass into cluster-proportional-autoscaler | (no args) | +| `autoscaler.image.repository` | The image repository to pull autoscaler from | registry.k8s.io/cpa/cluster-proportional-autoscaler | +| `autoscaler.image.tag` | The image tag to pull autoscaler from | `1.8.5` | +| `autoscaler.image.pullPolicy` | Image pull policy for the autoscaler | IfNotPresent | +| `autoscaler.image.pullSecrets` | Specify container image pull secrets | `[]` | +| `autoscaler.priorityClassName` | Optional priority class for the autoscaler pod. `priorityClassName` used if not set. | `""` | +| `autoscaler.affinity` | Affinity settings for pod assignment for autoscaler | {} | +| `autoscaler.nodeSelector` | Node labels for pod assignment for autoscaler | {} | +| `autoscaler.tolerations` | Tolerations for pod assignment for autoscaler | [] | +| `autoscaler.resources.limits.cpu` | Container maximum CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.limits.memory` | Container maximum memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.resources.requests.cpu` | Container requested CPU for cluster-proportional-autoscaler | `20m` | +| `autoscaler.resources.requests.memory` | Container requested memory for cluster-proportional-autoscaler | `10Mi` | +| `autoscaler.configmap.annotations` | Annotations to add to autoscaler config map. For example to stop CI renaming them | {} | +| `autoscaler.livenessProbe.enabled` | Enable/disable the Liveness probe | `true` | +| `autoscaler.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `10` | +| `autoscaler.livenessProbe.periodSeconds` | How often to perform the probe | `5` | +| `autoscaler.livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `autoscaler.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `3` | +| `autoscaler.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `autoscaler.extraContainers` | Optional array of sidecar containers | [] | +| `deployment.enabled` | Optionally disable the main deployment and its respective resources. | `true` | +| `deployment.name` | Name of the deployment if `deployment.enabled` is true. Otherwise the name of an existing deployment for the autoscaler or HPA to target. | `""` | +| `deployment.annotations` | Annotations to add to the main deployment | `{}` | + +See `values.yaml` for configuration notes. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install coredns \ + coredns/coredns \ + --set rbac.create=false +``` + +The above command disables automatic creation of RBAC rules. + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +$ helm install coredns coredns/coredns -f values.yaml +``` + +> **Tip**: You can use the default [values.yaml](/charts/coredns/values.yaml) + +## Caveats + +The chart will automatically determine which protocols to listen on based on +the protocols you define in your zones. This means that you could potentially +use both "TCP" and "UDP" on a single port. +Some cloud environments like "GCE" or "Azure container service" cannot +create external loadbalancers with both "TCP" and "UDP" protocols. So +When deploying CoreDNS with `serviceType="LoadBalancer"` on such cloud +environments, make sure you do not attempt to use both protocols at the same +time. + +## Autoscaling + +By setting `autoscaler.enabled = true` a +[cluster-proportional-autoscaler](https://github.com/kubernetes-incubator/cluster-proportional-autoscaler) +will be deployed. This will default to a coredns replica for every 256 cores, or +16 nodes in the cluster. These can be changed with `autoscaler.coresPerReplica` +and `autoscaler.nodesPerReplica`. When cluster is using large nodes (with more +cores), `coresPerReplica` should dominate. If using small nodes, +`nodesPerReplica` should dominate. + +This also creates a ServiceAccount, ClusterRole, and ClusterRoleBinding for +the autoscaler deployment. + +`replicaCount` is ignored if this is enabled. + +By setting `hpa.enabled = true` a [Horizontal Pod Autoscaler](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) +is enabled for Coredns deployment. This can scale number of replicas based on meitrics +like CpuUtilization, MemoryUtilization or Custom ones. + +## Adopting existing CoreDNS resources + +If you do not want to delete the existing CoreDNS resources in your cluster, you can adopt the resources into a release as of Helm 3.2.0. + +You will also need to annotate and label your existing resources to allow Helm to assume control of them. See: https://github.com/helm/helm/pull/7649 + +``` +annotations: + meta.helm.sh/release-name: your-release-name + meta.helm.sh/release-namespace: your-release-namespace +label: + app.kubernetes.io/managed-by: Helm +``` + +Once you have annotated and labeled all the resources this chart specifies, you may need to locally template the chart and compare against existing manifest to ensure there are no changes/diffs.s If +you have been careful this should not diff and leave all the resources unmodified and now under management of helm. + +Some values to investigate to help adopt your existing manifests to the Helm release are: + +- k8sAppLabelOverride +- service.name +- customLabels + +In some cases, you will need to orphan delete your existing deployment since selector labels are immutable. + +``` +kubectl delete deployment coredns --cascade=orphan +``` + +This will delete the deployment and leave the replicaset to ensure no downtime in the cluster. You will need to manually delete the replicaset AFTER Helm has released a new deployment. + +Here is an example script to modify the annotations and labels of existing resources: + +WARNING: Substitute YOUR_HELM_RELEASE_NAME_HERE with the name of your helm release. + +``` +#!/usr/bin/env bash + +set -euo pipefail + +for kind in config service serviceAccount; do + echo "setting annotations and labels on $kind/coredns" + kubectl -n kube-system annotate --overwrite $kind coredns meta.helm.sh/release-name=YOUR_HELM_RELEASE_NAME_HERE + kubectl -n kube-system annotate --overwrite $kind coredns meta.helm.sh/release-namespace=kube-system + kubectl -n kube-system label --overwrite $kind coredns app.kubernetes.io/managed-by=Helm +done +``` + +NOTE: Sometimes, previous deployments of kube-dns that have been migrated to CoreDNS still use kube-dns for the service name as well. + +``` +echo "setting annotations and labels on service/kube-dns" +kubectl -n kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=YOUR_HELM_RELEASE_NAME_HERE +kubectl -n kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system +kubectl -n kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm +``` diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/NOTES.txt b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/NOTES.txt new file mode 100755 index 00000000..3a1883b3 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/NOTES.txt @@ -0,0 +1,30 @@ +{{- if .Values.isClusterService }} +CoreDNS is now running in the cluster as a cluster-service. +{{- else }} +CoreDNS is now running in the cluster. +It can be accessed using the below endpoint +{{- if contains "NodePort" .Values.serviceType }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "coredns.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "$NODE_IP:$NODE_PORT" +{{- else if contains "LoadBalancer" .Values.serviceType }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status by running 'kubectl get svc -w {{ template "coredns.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "coredns.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo $SERVICE_IP +{{- else if contains "ClusterIP" .Values.serviceType }} + "{{ template "coredns.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local" + from within the cluster +{{- end }} +{{- end }} + +It can be tested with the following: + +1. Launch a Pod with DNS tools: + +kubectl run -it --rm --restart=Never --image=infoblox/dnstools:latest dnstools + +2. Query the DNS server: + +/ # host kubernetes diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/_helpers.tpl b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/_helpers.tpl new file mode 100755 index 00000000..4e2e926c --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/_helpers.tpl @@ -0,0 +1,291 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "coredns.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "coredns.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "coredns.labels" -}} +app.kubernetes.io/managed-by: {{ .Release.Service | quote }} +app.kubernetes.io/instance: {{ .Release.Name | quote }} +helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- if .Values.isClusterService }} +k8s-app: {{ template "coredns.k8sapplabel" . }} +kubernetes.io/cluster-service: "true" +kubernetes.io/name: "CoreDNS" +{{- end }} +app.kubernetes.io/name: {{ template "coredns.name" . }} +{{- end -}} + +{{/* +Common labels with autoscaler +*/}} +{{- define "coredns.labels.autoscaler" -}} +app.kubernetes.io/managed-by: {{ .Release.Service | quote }} +app.kubernetes.io/instance: {{ .Release.Name | quote }} +helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" +{{- if .Values.isClusterService }} +k8s-app: {{ template "coredns.k8sapplabel" . }}-autoscaler +kubernetes.io/cluster-service: "true" +kubernetes.io/name: "CoreDNS" +{{- end }} +app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler +{{- end -}} + +{{/* +Allow k8s-app label to be overridden +*/}} +{{- define "coredns.k8sapplabel" -}} +{{- coalesce .Values.k8sApp .Values.k8sAppLabelOverride .Chart.Name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.servicePorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq (default false .use_tcp) true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{- if .nodePort -}} + {{- $innerdict := set $innerdict "nodePort" .nodePort -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- $portList := list -}} + {{- if index $innerdict "isudp" -}} + {{- $portList = append $portList (dict "port" ($port | int) "protocol" "UDP" "name" (printf "udp-%s" $port)) -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- $portList = append $portList (dict "port" ($port | int) "protocol" "TCP" "name" (printf "tcp-%s" $port)) -}} + {{- end -}} + + {{- range $portDict := $portList -}} + {{- if index $innerdict "nodePort" -}} + {{- $portDict := set $portDict "nodePort" (get $innerdict "nodePort" | int) -}} + {{- end -}} + + {{- printf "- %s\n" (toJson $portDict) -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Generate the list of ports automatically from the server definitions +*/}} +{{- define "coredns.containerPorts" -}} + {{/* Set ports to be an empty dict */}} + {{- $ports := dict -}} + {{/* Iterate through each of the server blocks */}} + {{- range .Values.servers -}} + {{/* Capture port to avoid scoping awkwardness */}} + {{- $port := toString .port -}} + + {{/* If none of the server blocks has mentioned this port yet take note of it */}} + {{- if not (hasKey $ports $port) -}} + {{- $ports := set $ports $port (dict "istcp" false "isudp" false) -}} + {{- end -}} + {{/* Retrieve the inner dict that holds the protocols for a given port */}} + {{- $innerdict := index $ports $port -}} + + {{/* + Look at each of the zones and check which protocol they serve + At the moment the following are supported by CoreDNS: + UDP: dns:// + TCP: tls://, grpc:// + */}} + {{- range .zones -}} + {{- if has (default "" .scheme) (list "dns://") -}} + {{/* Optionally enable tcp for this service as well */}} + {{- if eq (default false .use_tcp) true }} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end }} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- end -}} + + {{- if has (default "" .scheme) (list "tls://" "grpc://") -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + {{- end -}} + + {{/* If none of the zones specify scheme, default to dns:// on both tcp & udp */}} + {{- if and (not (index $innerdict "istcp")) (not (index $innerdict "isudp")) -}} + {{- $innerdict := set $innerdict "isudp" true -}} + {{- $innerdict := set $innerdict "istcp" true -}} + {{- end -}} + + {{- if .hostPort -}} + {{- $innerdict := set $innerdict "hostPort" .hostPort -}} + {{- end -}} + + {{/* Write the dict back into the outer dict */}} + {{- $ports := set $ports $port $innerdict -}} + + {{/* Fetch port from the configuration if the prometheus section exists */}} + {{- range .plugins -}} + {{- if eq .name "prometheus" -}} + {{- $prometheus_addr := toString .parameters -}} + {{- $prometheus_addr_list := regexSplit ":" $prometheus_addr -1 -}} + {{- $prometheus_port := index $prometheus_addr_list 1 -}} + {{- $ports := set $ports $prometheus_port (dict "istcp" true "isudp" false) -}} + {{- end -}} + {{- end -}} + {{- end -}} + + {{/* Write out the ports according to the info collected above */}} + {{- range $port, $innerdict := $ports -}} + {{- $portList := list -}} + {{- if index $innerdict "isudp" -}} + {{- $portList = append $portList (dict "containerPort" ($port | int) "protocol" "UDP" "name" (printf "udp-%s" $port)) -}} + {{- end -}} + {{- if index $innerdict "istcp" -}} + {{- $portList = append $portList (dict "containerPort" ($port | int) "protocol" "TCP" "name" (printf "tcp-%s" $port)) -}} + {{- end -}} + + {{- range $portDict := $portList -}} + {{- if index $innerdict "hostPort" -}} + {{- $portDict := set $portDict "hostPort" (get $innerdict "hostPort" | int) -}} + {{- end -}} + + {{- printf "- %s\n" (toJson $portDict) -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "coredns.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "coredns.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} + +{{/* +Set the clusterDNS service IP +*/}} +{{- define "clusterDNSServerIP" -}} +{{- if .Values.service.clusterIP }} + {{- .Values.service.clusterIP }} +{{ else }} + {{- $dnsIPs := split "," .Values.global.clusterDNS }} + {{- $dnsCount := len $dnsIPs }} + {{- if eq $dnsCount 1 }} + {{- .Values.global.clusterDNS -}} + {{- else }} + {{- if gt $dnsCount 1 }} + {{- $dnsIPs._0 -}} + {{ else }} + {{- "10.43.0.10" }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Pass the clusterDNS service IP for the nodelocal config +*/}} +{{- define "nodelocalUpstreamDNSServerIP" -}} +{{- if .Values.nodelocal.ipvs }} +{{- "" -}} +{{ else }} +{{- (include "clusterDNSServerIP" .) -}} +{{- end }} +{{- end }} + +{{/* +Fill the localip flag in the nodelocal CLI +*/}} +{{- define "nodelocalLocalIPFlag" -}} +{{- if .Values.nodelocal.ipvs }} +{{- "" -}} +{{ else }} +{{- printf ",%s" (include "clusterDNSServerIP" .) -}} +{{- end }} +{{- end }} + +{{/* +Fill the ipFamily correctly +*/}} +{{- define "ipFamilyPolicy" -}} +{{- if .Values.service.ipFamilyPolicy }} + {{- .Values.service.ipFamilyPolicy }} +{{ else }} + {{- $dnsIPs := split "," .Values.global.clusterDNS }} + {{- $dnsCount := len $dnsIPs }} + {{- if gt $dnsCount 1 }} + {{- "PreferDualStack" }} + {{ else }} + {{- "SingleStack" }} + {{- end }} +{{- end }} +{{- end }} + diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrole-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrole-autoscaler.yaml new file mode 100755 index 00000000..9bf57d23 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrole-autoscaler.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- with .Values.customAnnotations }} + annotations: +{{- toYaml . | nindent 4 }} +{{- end }} +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list","watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions", "apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] +# Remove the configmaps rule once below issue is fixed: +# kubernetes-incubator/cluster-proportional-autoscaler#16 + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrole.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrole.yaml new file mode 100755 index 00000000..c33762c4 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrole.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.deployment.enabled .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "coredns.fullname" . }} + labels: {{- include "coredns.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch +{{- if .Values.rbac.pspEnable }} +- apiGroups: + - policy + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "coredns.fullname" . }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrolebinding-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrolebinding-autoscaler.yaml new file mode 100755 index 00000000..ef32306f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrolebinding-autoscaler.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- with .Values.customAnnotations }} + annotations: +{{- toYaml . | nindent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }}-autoscaler +subjects: +- kind: ServiceAccount + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrolebinding.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrolebinding.yaml new file mode 100755 index 00000000..36fa21c0 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.deployment.enabled .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "coredns.fullname" . }} + labels: {{- include "coredns.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "coredns.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "coredns.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/configmap-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/configmap-autoscaler.yaml new file mode 100755 index 00000000..b10eb59e --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/configmap-autoscaler.yaml @@ -0,0 +1,33 @@ +{{- if .Values.autoscaler.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }} + {{- if .Values.customLabels }} + {{- toYaml .Values.customLabels | nindent 4 }} + {{- end }} + {{- if or .Values.autoscaler.configmap.annotations .Values.customAnnotations }} + annotations: + {{- if .Values.customAnnotations }} + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + {{- if .Values.autoscaler.configmap.annotations -}} + {{ toYaml .Values.autoscaler.configmap.annotations | nindent 4 }} + {{- end }} + {{- end }} +data: + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + linear: |- + { + "coresPerReplica": {{ .Values.autoscaler.coresPerReplica | float64 }}, + "nodesPerReplica": {{ .Values.autoscaler.nodesPerReplica | float64 }}, + "preventSinglePointFailure": {{ .Values.autoscaler.preventSinglePointFailure }}, + "min": {{ .Values.autoscaler.min | int }}, + "max": {{ .Values.autoscaler.max | int }}, + "includeUnschedulableNodes": {{ .Values.autoscaler.includeUnschedulableNodes }} + } +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/configmap-nodelocal.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/configmap-nodelocal.yaml new file mode 100755 index 00000000..5e4b26ce --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/configmap-nodelocal.yaml @@ -0,0 +1,82 @@ +{{- if .Values.nodelocal.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: node-local-dns + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile +data: + Corefile: | + {{ coalesce .Values.global.clusterDomain "cluster.local" }}:53 { + errors + cache { + success 9984 30 + denial 9984 5 + } + reload + loop +{{- if .Values.nodelocal.use_cilium_lrp }} + bind 0.0.0.0 +{{- else }} + bind {{ .Values.nodelocal.ip_address }} {{ template "nodelocalUpstreamDNSServerIP" . }} +{{- end}} + forward . {{ ternary (include "clusterDNSServerIP" .) "__PILLAR__CLUSTER__DNS__" .Values.nodelocal.ipvs }} { + force_tcp + } + prometheus :9253 +{{- if .Values.nodelocal.use_cilium_lrp }} + health +{{- else }} + health {{ .Values.nodelocal.ip_address }}:8080 +{{- end}} + + + } + in-addr.arpa:53 { + errors + cache 30 + reload + loop +{{- if .Values.nodelocal.use_cilium_lrp }} + bind 0.0.0.0 +{{- else }} + bind {{ .Values.nodelocal.ip_address }} {{ template "nodelocalUpstreamDNSServerIP" . }} +{{- end}} + forward . {{ ternary (include "clusterDNSServerIP" .) "__PILLAR__CLUSTER__DNS__" .Values.nodelocal.ipvs }} { + force_tcp + } + prometheus :9253 + } + ip6.arpa:53 { + errors + cache 30 + reload + loop +{{- if .Values.nodelocal.use_cilium_lrp }} + bind 0.0.0.0 +{{- else }} + bind {{ .Values.nodelocal.ip_address }} {{ template "nodelocalUpstreamDNSServerIP" . }} +{{- end}} + forward . {{ ternary (include "clusterDNSServerIP" .) "__PILLAR__CLUSTER__DNS__" .Values.nodelocal.ipvs }} { + force_tcp + } + prometheus :9253 + } + .:53 { + errors + cache 30 + reload + loop +{{- if .Values.nodelocal.use_cilium_lrp }} + bind 0.0.0.0 +{{- else }} + bind {{ .Values.nodelocal.ip_address }} {{ template "nodelocalUpstreamDNSServerIP" . }} +{{- end}} + forward . __PILLAR__UPSTREAM__SERVERS__ { + force_tcp + } + prometheus :9253 + } +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/configmap.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/configmap.yaml new file mode 100755 index 00000000..e0190e1f --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/configmap.yaml @@ -0,0 +1,35 @@ +{{- if .Values.deployment.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "coredns.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "coredns.labels" . | nindent 4 }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- with .Values.customAnnotations }} + annotations: +{{- toYaml . | nindent 4 }} +{{- end }} +data: + Corefile: |- + {{- range $name, $conf := .Values.extraConfig }} + {{ $name }}{{ if $conf.parameters }} {{ $conf.parameters }}{{ end }} + {{- end }} + {{ range .Values.servers }} + {{- range $idx, $zone := .zones }}{{ if $idx }} {{ else }}{{ end }}{{ default "" $zone.scheme }}{{ default "." $zone.zone }}{{ else }}.{{ end -}} + {{- if .port }}:{{ .port }} {{ end -}} + { + {{- range .plugins }} + {{ .name }}{{ if .parameters }} {{if eq .name "kubernetes" }} {{ coalesce $.Values.global.clusterDomain "cluster.local" }} {{ end }} {{.parameters}}{{ end }}{{ if .configBlock }} { +{{ .configBlock | indent 12 }} + }{{ end }} + {{- end }} + } + {{ end }} + {{- range .Values.zoneFiles }} + {{ .filename }}: {{ toYaml .contents | indent 4 }} + {{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/daemonset-nodelocal.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/daemonset-nodelocal.yaml new file mode 100755 index 00000000..90529962 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/daemonset-nodelocal.yaml @@ -0,0 +1,107 @@ +{{- if .Values.nodelocal.enabled }} +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: node-local-dns + namespace: kube-system + labels: + k8s-app: node-local-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +spec: + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: node-local-dns + template: + metadata: + labels: + k8s-app: node-local-dns + spec: + priorityClassName: system-node-critical +{{- if .Values.rbac.create }} + serviceAccountName: node-local-dns +{{- end }} +{{- if .Values.nodelocal.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodelocal.nodeSelector | indent 8 }} +{{- end }} + hostNetwork: {{not .Values.nodelocal.use_cilium_lrp }} + dnsPolicy: Default # Don't use cluster DNS. + tolerations: + - operator: Exists +{{- if eq .Values.nodelocal.ipvs false }} + initContainers: + - name: wait-coredns + image: {{ template "system_default_registry" . }}{{ .Values.nodelocal.initimage.repository }}:{{ .Values.nodelocal.initimage.tag }} + command: ['sh', '-c', "until nc -zv {{ template "clusterDNSServerIP" . }} 53; do echo waiting for dns service; sleep 2; done"] +{{- end }} + containers: + - name: node-cache + image: {{ template "system_default_registry" . }}{{ .Values.nodelocal.image.repository }}:{{ .Values.nodelocal.image.tag }} + resources: + requests: + cpu: 25m + memory: 5Mi + args: + - "-localip" + - "{{.Values.nodelocal.ip_address}}{{ template "nodelocalLocalIPFlag" . }}" + - "-conf" + - "/etc/Corefile" + - "-upstreamsvc" + - "kube-dns-upstream" +{{- if .Values.nodelocal.use_cilium_lrp }} + - "-skipteardown=true" + - "-setupinterface=false" + - "-setupiptables=false" +{{- end}} + securityContext: + privileged: true + ports: + - containerPort: 53 + name: udp-53 + protocol: UDP + - containerPort: 53 + name: tcp-53 + protocol: TCP + - containerPort: 9253 + name: metrics + protocol: TCP + livenessProbe: + httpGet: +{{- if not .Values.nodelocal.use_cilium_lrp }} + host: {{.Values.nodelocal.ip_address}} +{{- end}} + path: /health + port: 8080 + initialDelaySeconds: 60 + timeoutSeconds: 5 + volumeMounts: +{{- if not .Values.nodelocal.use_cilium_lrp }} + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false +{{- end}} + - name: config-volume + mountPath: /etc/coredns + - name: kube-dns-config + mountPath: /etc/kube-dns + volumes: + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: kube-dns-config + configMap: + name: kube-dns + optional: true + - name: config-volume + configMap: + name: node-local-dns + items: + - key: Corefile + path: Corefile.base +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/deployment-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/deployment-autoscaler.yaml new file mode 100755 index 00000000..667af8ea --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/deployment-autoscaler.yaml @@ -0,0 +1,98 @@ +{{- if and (.Values.autoscaler.enabled) (not .Values.hpa.enabled) }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- with .Values.customAnnotations }} + annotations: +{{- toYaml . | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ template "coredns.k8sapplabel" . }}-autoscaler + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + template: + metadata: + labels: + {{- if .Values.isClusterService }} + {{- if not (hasKey .Values.customLabels "k8s-app")}} + k8s-app: {{ template "coredns.k8sapplabel" . }}-autoscaler + {{- end }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }}-autoscaler + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.customLabels }} + {{ toYaml .Values.customLabels | nindent 8 }} + {{- end }} + annotations: + checksum/configmap: {{ include (print $.Template.BasePath "/configmap-autoscaler.yaml") . | sha256sum }} + {{- if .Values.isClusterService }} + scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' + {{- end }} + {{- with .Values.autoscaler.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "coredns.fullname" . }}-autoscaler + {{- $priorityClassName := default .Values.priorityClassName .Values.autoscaler.priorityClassName }} + {{- if $priorityClassName }} + priorityClassName: {{ $priorityClassName | quote }} + {{- end }} + {{- if .Values.autoscaler.affinity }} + affinity: +{{ toYaml .Values.autoscaler.affinity | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.tolerations }} + tolerations: +{{ toYaml .Values.autoscaler.tolerations | indent 8 }} + {{- end }} + {{- if .Values.autoscaler.nodeSelector }} + nodeSelector: +{{ toYaml .Values.autoscaler.nodeSelector | indent 8 }} + {{- end }} + {{- if not (empty .Values.autoscaler.image.pullSecrets) }} + imagePullSecrets: +{{ toYaml .Values.autoscaler.image.pullSecrets | indent 8 }} + {{- end }} + containers: + - name: autoscaler + image: {{ template "system_default_registry" . }}{{ .Values.autoscaler.image.repository }}:{{ .Values.autoscaler.image.tag }} + imagePullPolicy: {{ .Values.autoscaler.image.pullPolicy }} + resources: +{{ toYaml .Values.autoscaler.resources | indent 10 }} + {{- if .Values.autoscaler.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: {{ .Values.autoscaler.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.autoscaler.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.autoscaler.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.autoscaler.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.autoscaler.livenessProbe.failureThreshold }} + {{- end }} + command: + - /cluster-proportional-autoscaler + - --namespace={{ .Release.Namespace }} + - --configmap={{ template "coredns.fullname" . }}-autoscaler + - --target=Deployment/{{ default (include "coredns.fullname" .) .Values.deployment.name }} + - --logtostderr=true + - --v=2 + {{- if .Values.autoscaler.customFlags }} +{{ toYaml .Values.autoscaler.customFlags | indent 10 }} + {{- end }} +{{- if .Values.autoscaler.extraContainers }} +{{ toYaml .Values.autoscaler.extraContainers | indent 6 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/deployment.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/deployment.yaml new file mode 100755 index 00000000..bd5f4d15 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/deployment.yaml @@ -0,0 +1,165 @@ +{{- if .Values.deployment.enabled }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ default (include "coredns.fullname" .) .Values.deployment.name }} + namespace: {{ .Release.Namespace }} + labels: {{- include "coredns.labels" . | nindent 4 }} + app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + {{- if or .Values.deployment.annotations .Values.customAnnotations }} + annotations: + {{- if .Values.customAnnotations }} + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + {{- if .Values.deployment.annotations }} + {{- toYaml .Values.deployment.annotations | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if and (not .Values.autoscaler.enabled) (not .Values.hpa.enabled) }} + replicas: {{ .Values.replicaCount }} + {{- end }} + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: {{ .Values.rollingUpdate.maxUnavailable }} + maxSurge: {{ .Values.rollingUpdate.maxSurge }} + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ template "coredns.k8sapplabel" . }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + template: + metadata: + labels: + {{- if .Values.isClusterService }} + k8s-app: {{ template "coredns.k8sapplabel" . }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/instance: {{ .Release.Name | quote }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- if .Values.podAnnotations }} +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.podSecurityContext }} + securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + serviceAccountName: {{ template "coredns.serviceAccountName" . }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.isClusterService }} + dnsPolicy: Default + {{- end }} + {{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: +{{ tpl (toYaml .Values.topologySpreadConstraints) $ | indent 8 }} + {{- end }} + {{- if or (.Values.isClusterService) (.Values.tolerations) }} + tolerations: + {{- if .Values.isClusterService }} + - key: CriticalAddonsOnly + operator: Exists + {{- end }} + {{- if .Values.tolerations }} +{{ toYaml .Values.tolerations | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeSelector | indent 8 }} + {{- end }} + {{- if not (empty .Values.image.pullSecrets) }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} + {{- end }} + containers: + - name: "coredns" + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns +{{- range .Values.extraSecrets }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: true +{{- end }} +{{- if .Values.extraVolumeMounts }} +{{- toYaml .Values.extraVolumeMounts | nindent 8}} +{{- end }} + resources: +{{ toYaml .Values.resources | indent 10 }} + ports: +{{ include "coredns.containerPorts" . | indent 8 }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} +{{- if .Values.securityContext }} + securityContext: +{{- toYaml .Values.securityContext | nindent 10 }} +{{- end }} +{{- if .Values.extraContainers }} +{{ toYaml .Values.extraContainers | indent 6 }} +{{- end }} + volumes: + - name: config-volume + configMap: + name: {{ template "coredns.fullname" . }} + items: + - key: Corefile + path: Corefile + {{ range .Values.zoneFiles }} + - key: {{ .filename }} + path: {{ .filename }} + {{ end }} +{{- range .Values.extraSecrets }} + - name: {{ .name }} + secret: + secretName: {{ .name }} + defaultMode: {{ default 400 .defaultMode }} +{{- end }} +{{- if .Values.extraVolumes }} +{{ toYaml .Values.extraVolumes | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/hpa.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/hpa.yaml new file mode 100755 index 00000000..7fcc9931 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/hpa.yaml @@ -0,0 +1,33 @@ +{{- if and (.Values.hpa.enabled) (not .Values.autoscaler.enabled) }} +--- +{{- if .Capabilities.APIVersions.Has "autoscaling/v2" }} +apiVersion: autoscaling/v2 +{{- else }} +apiVersion: autoscaling/v2beta2 +{{- end }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "coredns.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "coredns.labels" . | nindent 4 }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- with .Values.customAnnotations }} + annotations: +{{- toYaml . | nindent 4 }} +{{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ default (include "coredns.fullname" .) .Values.deployment.name }} + minReplicas: {{ .Values.hpa.minReplicas }} + maxReplicas: {{ .Values.hpa.maxReplicas }} + metrics: +{{ toYaml .Values.hpa.metrics | indent 4 }} +{{- if .Values.hpa.behavior }} + behavior: +{{ toYaml .Values.hpa.behavior | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/lrp-nodelocal.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/lrp-nodelocal.yaml new file mode 100755 index 00000000..bcfb1eb1 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/lrp-nodelocal.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.nodelocal.enabled .Values.nodelocal.use_cilium_lrp }} +apiVersion: "cilium.io/v2" +kind: CiliumLocalRedirectPolicy +metadata: + name: "lrp-nodelocal" +spec: + redirectFrontend: + serviceMatcher: + serviceName: {{ default (include "coredns.fullname" .) .Values.service.name }} + namespace: {{ .Release.Namespace }} + redirectBackend: + localEndpointSelector: + matchLabels: + k8s-app: node-local-dns + toPorts: + - port: "53" + name: udp-53 + protocol: UDP + - port: "53" + name: tcp-53 + protocol: TCP +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/poddisruptionbudget.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/poddisruptionbudget.yaml new file mode 100755 index 00000000..9cc62c1b --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/poddisruptionbudget.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.deployment.enabled .Values.podDisruptionBudget -}} +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ template "coredns.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "coredns.labels" . | nindent 4 }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- with .Values.customAnnotations }} + annotations: +{{- toYaml . | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ template "coredns.k8sapplabel" . }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/podsecuritypolicy.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/podsecuritypolicy.yaml new file mode 100755 index 00000000..6e02e00d --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/podsecuritypolicy.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.deployment.enabled .Values.rbac.pspEnable }} +{{ if .Capabilities.APIVersions.Has "policy/v1beta1" }} +apiVersion: policy/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: PodSecurityPolicy +metadata: + name: {{ template "coredns.fullname" . }} + labels: {{- include "coredns.labels" . | nindent 4 }} +spec: + privileged: false + # Required to prevent escalations to root. + allowPrivilegeEscalation: false + # Add back CAP_NET_BIND_SERVICE so that coredns can run on port 53 + allowedCapabilities: + - NET_BIND_SERVICE + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Require the container to run without root privileges. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/service-metrics.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/service-metrics.yaml new file mode 100755 index 00000000..1fc7b2d8 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/service-metrics.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.deployment.enabled .Values.prometheus.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "coredns.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} + labels: {{- include "coredns.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + {{- if or .Values.prometheus.service.annotations .Values.service.annotations .Values.customAnnotations }} + annotations: + {{- if .Values.prometheus.service.annotations }} + {{- toYaml .Values.prometheus.service.annotations | nindent 4 }} + {{- end }} + {{- if .Values.service.annotations }} + {{- toYaml .Values.service.annotations | nindent 4 }} + {{- end }} + {{- if .Values.customAnnotations }} + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + {{- end }} +spec: + selector: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ template "coredns.k8sapplabel" . }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + ports: + - name: metrics + port: 9153 + targetPort: 9153 +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/service-nodelocal.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/service-nodelocal.yaml new file mode 100755 index 00000000..bad0b52e --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/service-nodelocal.yaml @@ -0,0 +1,24 @@ +{{- if .Values.nodelocal.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: kube-dns-upstream + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + kubernetes.io/name: "KubeDNSUpstream" +spec: + ports: + - name: udp-53 + port: 53 + protocol: UDP + targetPort: 53 + - name: tcp-53 + port: 53 + protocol: TCP + targetPort: 53 + selector: + k8s-app: kube-dns +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/service.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/service.yaml new file mode 100755 index 00000000..c8303ea1 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/service.yaml @@ -0,0 +1,51 @@ +{{- if .Values.deployment.enabled }} +{{- $dnsIPs := split "," .Values.global.clusterDNS }} +{{- $dnsCount := len $dnsIPs }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ default (include "coredns.fullname" .) .Values.service.name }} + namespace: {{ .Release.Namespace }} + labels: {{- include "coredns.labels" . | nindent 4 }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + {{- if or .Values.service.annotations .Values.customAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{- toYaml .Values.service.annotations | nindent 4 }} + {{- end }} + {{- if .Values.customAnnotations }} + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + {{- end }} +spec: + selector: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ template "coredns.k8sapplabel" . }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + clusterIP: {{ template "clusterDNSServerIP" . }} + {{- if gt $dnsCount 1 }} + clusterIPs: + {{- range $dnsIP := $dnsIPs }} + - {{ $dnsIP }} + {{- end }} + {{- end }} + {{- if .Values.service.externalIPs }} + externalIPs: + {{- toYaml .Values.service.externalIPs | nindent 4 }} + {{- end }} + {{- if .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy }} + {{- end }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: +{{ include "coredns.servicePorts" . | indent 2 -}} + type: {{ default "ClusterIP" .Values.serviceType }} + ipFamilyPolicy: {{ template "ipFamilyPolicy" . }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/serviceaccount-autoscaler.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/serviceaccount-autoscaler.yaml new file mode 100755 index 00000000..8b0e9c7e --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/serviceaccount-autoscaler.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.autoscaler.enabled .Values.rbac.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.fullname" . }}-autoscaler + namespace: {{ .Release.Namespace }} + labels: {{- include "coredns.labels.autoscaler" . | nindent 4 }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +{{- with .Values.customAnnotations }} + annotations: +{{- toYaml . | nindent 4 }} +{{- end }} +{{- if .Values.autoscaler.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.autoscaler.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/serviceaccount-nodelocal.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/serviceaccount-nodelocal.yaml new file mode 100755 index 00000000..1088a869 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/serviceaccount-nodelocal.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.nodelocal.enabled .Values.rbac.create }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-local-dns + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/serviceaccount.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/serviceaccount.yaml new file mode 100755 index 00000000..e4ee52c8 --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/serviceaccount.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.deployment.enabled .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "coredns.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "coredns.labels" . | nindent 4 }} + {{- if or .Values.serviceAccount.annotations .Values.customAnnotations }} + annotations: + {{- if .Values.customAnnotations }} + {{- toYaml .Values.customAnnotations | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} + {{- end }} +{{- if .Values.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/templates/servicemonitor.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/servicemonitor.yaml new file mode 100755 index 00000000..b5fc642e --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/templates/servicemonitor.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.deployment.enabled .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "coredns.fullname" . }} + {{- if .Values.prometheus.monitor.namespace }} + namespace: {{ .Values.prometheus.monitor.namespace }} + {{- end }} + labels: {{- include "coredns.labels" . | nindent 4 }} + {{- if .Values.prometheus.monitor.additionalLabels }} +{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} + {{- end }} +{{- with .Values.customAnnotations }} + annotations: +{{- toYaml . | nindent 4 }} +{{- end }} +spec: + {{- if ne .Values.prometheus.monitor.namespace .Release.Namespace }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + {{- end }} + selector: + matchLabels: + app.kubernetes.io/instance: {{ .Release.Name | quote }} + {{- if .Values.isClusterService }} + k8s-app: {{ template "coredns.k8sapplabel" . }} + {{- end }} + app.kubernetes.io/name: {{ template "coredns.name" . }} + app.kubernetes.io/component: metrics + endpoints: + - port: metrics + {{- if .Values.prometheus.monitor.interval }} + interval: {{ .Values.prometheus.monitor.interval }} + {{- end }} +{{- end }} diff --git a/charts/rke2-coredns/rke2-coredns/1.29.007/values.yaml b/charts/rke2-coredns/rke2-coredns/1.29.007/values.yaml new file mode 100755 index 00000000..2ef7a49b --- /dev/null +++ b/charts/rke2-coredns/rke2-coredns/1.29.007/values.yaml @@ -0,0 +1,404 @@ +# Default values for coredns. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: rancher/hardened-coredns + # Overrides the image tag whose default is the chart appVersion. + tag: "v1.11.1-build20240910" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: [] + # pullSecrets: + # - name: myRegistryKeySecretName + +replicaCount: 1 + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +rollingUpdate: + maxUnavailable: 1 + maxSurge: 25% + +terminationGracePeriodSeconds: 30 + +podAnnotations: {} +# cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + +serviceType: "ClusterIP" + +prometheus: + service: + enabled: false + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9153" + monitor: + enabled: false + additionalLabels: {} + namespace: "" + interval: "" + +service: +# clusterIP: "" +# loadBalancerIP: "" +# externalIPs: [] +# externalTrafficPolicy: "" + ipFamilyPolicy: "" + # The name of the Service + # If not set, a name is generated using the fullname template + name: "" + annotations: {} + +serviceAccount: + create: true + # The name of the ServiceAccount to use + # If not set and create is true, a name is generated using the fullname template + name: "coredns" + annotations: {} + +rbac: + # If true, create & use RBAC resources + create: true + # If true, create and use PodSecurityPolicy + pspEnable: false + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + # name: + +# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app. +isClusterService: true + +# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set. +priorityClassName: "system-cluster-critical" + +# Configure the pod level securityContext. +podSecurityContext: {} + +# Configure SecurityContext for Pod. +# Ensure that required linux capability to bind port number below 1024 is assigned (`CAP_NET_BIND_SERVICE`). +securityContext: + capabilities: + add: + - NET_BIND_SERVICE + +# Default zone is what Kubernetes recommends: +# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options +servers: +- zones: + - zone: . + port: 53 + # If serviceType is nodePort you can specify nodePort here + # nodePort: 30053 + # hostPort: 53 + plugins: + - name: errors + # Serves a /health endpoint on :8080, required for livenessProbe + - name: health + configBlock: |- + lameduck 5s + # Serves a /ready endpoint on :8181, required for readinessProbe + - name: ready + # Required to query kubernetes API for data + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + # Serves a /metrics endpoint on :9153, required for serviceMonitor + - name: prometheus + parameters: 0.0.0.0:9153 + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance + +# Complete example with all the options: +# - zones: # the `zones` block can be left out entirely, defaults to "." +# - zone: hello.world. # optional, defaults to "." +# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS) +# - zone: foo.bar. +# scheme: dns:// +# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol +# # Note that this will not work if you are also exposing tls or grpc on the same server +# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS) +# plugins: # the plugins to use for this server block +# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it! +# parameters: foo bar # list of parameters after the plugin +# configBlock: |- # if the plugin supports extra block style config, supply it here +# hello world +# foo bar + +# Extra configuration that is applied outside of the default zone block. +# Example to include additional config files, which may come from extraVolumes: +# extraConfig: +# import: +# parameters: /opt/coredns/*.conf +extraConfig: {} + +# To use the livenessProbe, the health plugin needs to be enabled in CoreDNS' server config +livenessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 +# To use the readinessProbe, the ready plugin needs to be enabled in CoreDNS' server config +readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core +affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: "kubernetes.io/hostname" + labelSelector: + matchExpressions: + - key: k8s-app + operator: In + values: + - kube-dns + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#topologyspreadconstraint-v1-core +# and supports Helm templating. +# For example: +# topologySpreadConstraints: +# - labelSelector: +# matchLabels: +# app.kubernetes.io/name: '{{ template "coredns.name" . }}' +# app.kubernetes.io/instance: '{{ .Release.Name }}' +# topologyKey: topology.kubernetes.io/zone +# maxSkew: 1 +# whenUnsatisfiable: ScheduleAnyway +# - labelSelector: +# matchLabels: +# app.kubernetes.io/name: '{{ template "coredns.name" . }}' +# app.kubernetes.io/instance: '{{ .Release.Name }}' +# topologyKey: kubernetes.io/hostname +# maxSkew: 1 +# whenUnsatisfiable: ScheduleAnyway +topologySpreadConstraints: [] + +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: + kubernetes.io/os: linux + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core +tolerations: +- key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" +- key: "node-role.kubernetes.io/etcd" + operator: "Exists" + effect: "NoExecute" + +# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +podDisruptionBudget: {} + +# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/ +zoneFiles: [] +# - filename: example.db +# domain: example.com +# contents: | +# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600 +# example.com. IN NS b.iana-servers.net. +# example.com. IN NS a.iana-servers.net. +# example.com. IN A 192.168.99.102 +# *.example.com. IN A 192.168.99.102 + +# optional array of sidecar containers +extraContainers: [] +# - name: some-container-name +# image: some-image:latest +# imagePullPolicy: Always +# optional array of extra volumes to create +extraVolumes: [] +# - name: some-volume-name +# emptyDir: {} +# optional array of mount points for extraVolumes +extraVolumeMounts: [] +# - name: some-volume-name +# mountPath: /etc/wherever + +# optional array of secrets to mount inside coredns container +# possible usecase: need for secure connection with etcd backend +extraSecrets: [] +# - name: etcd-client-certs +# mountPath: /etc/coredns/tls/etcd +# defaultMode: 420 +# - name: some-fancy-secret +# mountPath: /etc/wherever +# defaultMode: 440 + +# To support legacy deployments using CoreDNS with the "k8s-app: kube-dns" label selectors. +# See https://github.com/coredns/helm/blob/master/charts/coredns/README.md#adopting-existing-coredns-resources +# k8sAppLabelOverride: "kube-dns" + +# Custom labels to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled. +customLabels: {} + +# Custom annotations to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled. +customAnnotations: {} + +## Alternative configuration for HPA deployment if wanted +## Create HorizontalPodAutoscaler object. +## +# hpa: +# enabled: false +# minReplicas: 1 +# maxReplicas: 10 +# metrics: +# metrics: +# - type: Resource +# resource: +# name: memory +# target: +# type: Utilization +# averageUtilization: 60 +# - type: Resource +# resource: +# name: cpu +# target: +# type: Utilization +# averageUtilization: 60 + +hpa: + enabled: false + minReplicas: 1 + maxReplicas: 2 + metrics: [] + +## Configue a cluster-proportional-autoscaler for coredns +# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler +autoscaler: + # Enabled the cluster-proportional-autoscaler + enabled: true + + # Number of cores in the cluster per coredns replica + coresPerReplica: 256 + # Number of nodes in the cluster per coredns replica + nodesPerReplica: 16 + # Min size of replicaCount + min: 0 + # Max size of replicaCount (default of 0 is no max) + max: 0 + # Whether to include unschedulable nodes in the nodes/cores calculations - this requires version 1.8.0+ of the autoscaler + includeUnschedulableNodes: false + # If true does not allow single points of failure to form + preventSinglePointFailure: true + + # Annotations for the coredns proportional autoscaler pods + podAnnotations: {} + + ## Optionally specify some extra flags to pass to cluster-proprtional-autoscaler. + ## Useful for e.g. the nodelabels flag. + # customFlags: + # - --nodelabels=topology.kubernetes.io/zone=us-east-1a + + image: + repository: rancher/hardened-cluster-autoscaler + tag: "v1.8.11-build20240910" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: [] + # pullSecrets: + # - name: myRegistryKeySecretName + + # Optional priority class to be used for the autoscaler pods. priorityClassName used if not set. + priorityClassName: "" + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core + affinity: {} + + # Node labels for pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: + kubernetes.io/os: linux + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core + tolerations: + - key: "node-role.kubernetes.io/control-plane" + operator: "Exists" + effect: "NoSchedule" + - key: "node-role.kubernetes.io/etcd" + operator: "Exists" + effect: "NoExecute" + + # resources for autoscaler pod + resources: + requests: + cpu: "25m" + memory: "16Mi" + limits: + cpu: "100m" + memory: "64Mi" + + # Options for autoscaler configmap + configmap: + ## Annotations for the coredns-autoscaler configmap + # i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed + annotations: {} + + # Enables the livenessProbe for cluster-proportional-autoscaler - this requires version 1.8.0+ of the autoscaler + livenessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 30 + timeoutSeconds: 10 + failureThreshold: 3 + successThreshold: 1 + + # optional array of sidecar containers + extraContainers: [] + # - name: some-container-name + # image: some-image:latest + # imagePullPolicy: Always + +deployment: + enabled: true + name: "" + ## Annotations for the coredns deployment + annotations: {} + +k8sApp: "kube-dns" + +nodelocal: + enabled: false + ip_address: "169.254.20.10" + ipvs: false + # set to true, if you wish to use nodelocal with cilium in kube-proxy replacement mode. + # This sets up a Cilium Local Redirect Policy (LRP) to steer DNS traffic to the nodelocal dns cache. + # See https://docs.cilium.io/en/v1.15/network/kubernetes/local-redirect-policy/#node-local-dns-cache for reference + use_cilium_lrp: false + image: + repository: rancher/hardened-dns-node-cache + tag: "1.23.1-build20240910" + initimage: + repository: rancher/hardened-dns-node-cache + tag: "1.23.1-build20240910" + nodeSelector: + kubernetes.io/os: linux + diff --git a/index.yaml b/index.yaml index f6299ede..8b60eae5 100755 --- a/index.yaml +++ b/index.yaml @@ -8711,6 +8711,36 @@ entries: - assets/rke2-cilium/rke2-cilium-1.9.401.tgz version: 1.9.401 rke2-coredns: + - annotations: + artifacthub.io/changes: | + - kind: changed + description: Ignore duplicate strings in the fullname helper template + - kind: removed + description: Removed deprecated "engine: gotpl" from the Chart.yaml + apiVersion: v2 + appVersion: 1.11.1 + created: "2024-09-25T18:03:39.810567193Z" + description: CoreDNS is a DNS server that chains plugins and provides Kubernetes + DNS Services + digest: 6934ae207a671f8bcf87c5838bc01406b514d12bd631c3f1e58e8838baf31278 + home: https://coredns.io + icon: https://coredns.io/images/CoreDNS_Colour_Horizontal.png + keywords: + - coredns + - dns + - kubedns + maintainers: + - name: mrueg + - name: haad + - name: hagaibarel + - name: shubham-cmyk + name: rke2-coredns + sources: + - https://github.com/coredns/coredns + type: application + urls: + - assets/rke2-coredns/rke2-coredns-1.29.007.tgz + version: 1.29.007 - annotations: artifacthub.io/changes: | - kind: changed