diff --git a/k8s/manifests/charts/Makefile b/k8s/manifests/charts/Makefile new file mode 100644 index 000000000..1f54bbac2 --- /dev/null +++ b/k8s/manifests/charts/Makefile @@ -0,0 +1,76 @@ +default: help + +SOURCES_FILE_NAME = sources.txt +TMP_SOURCES_FILE_NAME := tmp_${SOURCES_FILE_NAME} +COREDNS_FILE_NAME := coredns-1.36.0 +CK_LOADBALANCER_FILE_NAME = ck-loadbalancer +CILIUM_FILE_NAME := cilium-1.16.3 +METALLB_FILE_NAME := metallb-0.14.8 +RAWFILE_CSI_FILE_NAME := rawfile-csi-0.9.0 +METRICS_SERVER_FILE_NAME := metrics-server-3.12.2 +TARGET_DIR := ../../../src/k8s/pkg/k8sd/features/values + +.PHONY: help +help: + @echo "Available targets:" + @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf " %-15s %s\n", $$1, $$2}' + + + +.PHONY: extract-values +extract-values: ## Extract values.yaml files and update sources.txt + > ${TMP_SOURCES_FILE_NAME} + + @echo "Extracting CoreDNS values..." + @tar --transform='s|coredns/values.yaml|${COREDNS_FILE_NAME}_values.yaml|' -zxf ${COREDNS_FILE_NAME}.tgz coredns/values.yaml + @echo ${COREDNS_FILE_NAME}_values.yaml >> ${TMP_SOURCES_FILE_NAME} + + @echo "Extracting Cilium values..." + @tar --transform='s|cilium/values.yaml|${CILIUM_FILE_NAME}_values.yaml|' -zxf ${CILIUM_FILE_NAME}.tgz cilium/values.yaml + @echo ${CILIUM_FILE_NAME}_values.yaml >> ${TMP_SOURCES_FILE_NAME} + + @echo "Copying CK Loadbalancer values..." + @cp ck-loadbalancer/values.yaml ${CK_LOADBALANCER_FILE_NAME}_values.yaml + @echo "${CK_LOADBALANCER_FILE_NAME}_values.yaml" >> ${TMP_SOURCES_FILE_NAME} + + @echo "Extracting MetalLB values..." + @tar --transform='s|metallb/values.yaml|${METALLB_FILE_NAME}_values.yaml|' -zxf ${METALLB_FILE_NAME}.tgz metallb/values.yaml + @echo ${METALLB_FILE_NAME}_values.yaml >> ${TMP_SOURCES_FILE_NAME} + + @echo "Extracting Rawfile CSI values..." + @tar --transform='s|rawfile-csi/values.yaml|${RAWFILE_CSI_FILE_NAME}_values.yaml|' -zxf ${RAWFILE_CSI_FILE_NAME}.tgz rawfile-csi/values.yaml + @echo ${RAWFILE_CSI_FILE_NAME}_values.yaml >> ${TMP_SOURCES_FILE_NAME} + + @echo "Extracting Metrics Server values..." + @tar --transform='s|metrics-server/values.yaml|${METRICS_SERVER_FILE_NAME}_values.yaml|' -zxf ${METRICS_SERVER_FILE_NAME}.tgz metrics-server/values.yaml + @echo ${METRICS_SERVER_FILE_NAME}_values.yaml >> ${TMP_SOURCES_FILE_NAME} + + @mv ${TMP_SOURCES_FILE_NAME} ${SOURCES_FILE_NAME} + +.PHONY: clean +clean: ## Clean up. + rm -f ${SOURCES_FILE_NAME} + + rm -f ${COREDNS_FILE_NAME}_values.yaml + rm -f ${TARGET_DIR}/${COREDNS_FILE_NAME}_values.go + + rm -f ${CILIUM_FILE_NAME}_values.yaml + rm -f ${TARGET_DIR}/${CILIUM_FILE_NAME}_values.go + + rm -f ${CK_LOADBALANCER_FILE_NAME}_values.yaml + rm -f ${TARGET_DIR}/${CK_LOADBALANCER_FILE_NAME}_values.go + + rm -f ${METALLB_FILE_NAME}_values.yaml + rm -f ${TARGET_DIR}/${METALLB_FILE_NAME}_values.go + + rm -f ${RAWFILE_CSI_FILE_NAME}_values.yaml + rm -f ${TARGET_DIR}/${RAWFILE_CSI_FILE_NAME}_values.go + + rm -f ${METRICS_SERVER_FILE_NAME}_values.yaml + rm -f ${TARGET_DIR}/${METRICS_SERVER_FILE_NAME}_values.go + +.PHONY: gen +gen: ${SOURCES_FILE_NAME} + @go run generator.go -sources=${SOURCES_FILE_NAME} -pkg=values -dir=${TARGET_DIR} -advanced-types=true -unsafe-field=true + +$(SOURCES_FILE_NAME): extract-values diff --git a/k8s/manifests/charts/cilium-1.16.3_values.yaml b/k8s/manifests/charts/cilium-1.16.3_values.yaml new file mode 100644 index 000000000..a90fa5764 --- /dev/null +++ b/k8s/manifests/charts/cilium-1.16.3_values.yaml @@ -0,0 +1,3557 @@ +# File generated by install/kubernetes/Makefile; DO NOT EDIT. +# This file is based on install/kubernetes/cilium/*values.yaml.tmpl. + + +# @schema +# type: [null, string] +# @schema +# -- upgradeCompatibility helps users upgrading to ensure that the configMap for +# Cilium will not change critical values to ensure continued operation +# This flag is not required for new installations. +# For example: '1.7', '1.8', '1.9' +upgradeCompatibility: null +debug: + # -- Enable debug logging + enabled: false + # @schema + # type: [null, string] + # @schema + # -- Configure verbosity levels for debug logging + # This option is used to enable debug messages for operations related to such + # sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is + # for enabling debug messages emitted per request, message and connection. + # Multiple values can be set via a space-separated string (e.g. "datapath envoy"). + # + # Applicable values: + # - flow + # - kvstore + # - envoy + # - datapath + # - policy + verbose: ~ +rbac: + # -- Enable creation of Resource-Based Access Control configuration. + create: true +# -- Configure image pull secrets for pulling container images +imagePullSecrets: [] +# - name: "image-pull-secret" + +# -- (string) Kubernetes config path +# @default -- `"~/.kube/config"` +kubeConfigPath: "" +# -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap (kubeadm-based clusters only) +k8sServiceHost: "" +# @schema +# type: [string, integer] +# @schema +# -- (string) Kubernetes service port +k8sServicePort: "" +# -- Configure the client side rate limit for the agent and operator +# +# If the amount of requests to the Kubernetes API server exceeds the configured +# rate limit, the agent and operator will start to throttle requests by delaying +# them until there is budget or the request times out. +k8sClientRateLimit: + # @schema + # type: [null, integer] + # @schema + # -- (int) The sustained request rate in requests per second. + # @default -- 5 for k8s up to 1.26. 10 for k8s version 1.27+ + qps: + # @schema + # type: [null, integer] + # @schema + # -- (int) The burst request rate in requests per second. + # The rate limiter will allow short bursts with a higher rate. + # @default -- 10 for k8s up to 1.26. 20 for k8s version 1.27+ + burst: +cluster: + # -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE. + # It must respect the following constraints: + # * It must contain at most 32 characters; + # * It must begin and end with a lower case alphanumeric character; + # * It may contain lower case alphanumeric characters and dashes between. + # The "default" name cannot be used if the Cluster ID is different from 0. + name: default + # -- (int) Unique ID of the cluster. Must be unique across all connected + # clusters and in the range of 1 to 255. Only required for Cluster Mesh, + # may be 0 if Cluster Mesh is not used. + id: 0 +# -- Define serviceAccount names for components. +# @default -- Component's fully qualified name. +serviceAccounts: + cilium: + create: true + name: cilium + automount: true + annotations: {} + nodeinit: + create: true + # -- Enabled is temporary until https://github.com/cilium/cilium-cli/issues/1396 is implemented. + # Cilium CLI doesn't create the SAs for node-init, thus the workaround. Helm is not affected by + # this issue. Name and automount can be configured, if enabled is set to true. + # Otherwise, they are ignored. Enabled can be removed once the issue is fixed. + # Cilium-nodeinit DS must also be fixed. + enabled: false + name: cilium-nodeinit + automount: true + annotations: {} + envoy: + create: true + name: cilium-envoy + automount: true + annotations: {} + operator: + create: true + name: cilium-operator + automount: true + annotations: {} + preflight: + create: true + name: cilium-pre-flight + automount: true + annotations: {} + relay: + create: true + name: hubble-relay + automount: false + annotations: {} + ui: + create: true + name: hubble-ui + automount: true + annotations: {} + clustermeshApiserver: + create: true + name: clustermesh-apiserver + automount: true + annotations: {} + # -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob + clustermeshcertgen: + create: true + name: clustermesh-apiserver-generate-certs + automount: true + annotations: {} + # -- Hubblecertgen is used if hubble.tls.auto.method=cronJob + hubblecertgen: + create: true + name: hubble-generate-certs + automount: true + annotations: {} +# -- Configure termination grace period for cilium-agent DaemonSet. +terminationGracePeriodSeconds: 1 +# -- Install the cilium agent resources. +agent: true +# -- Agent container name. +name: cilium +# -- Roll out cilium agent pods automatically when configmap is updated. +rollOutCiliumPods: false +# -- Agent container image. +image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium" + tag: "v1.16.3" + pullPolicy: "IfNotPresent" + # cilium-digest + digest: "sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + useDigest: true +# -- Affinity for cilium-agent. +affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium +# -- Node selector for cilium-agent. +nodeSelector: + kubernetes.io/os: linux +# -- Node tolerations for agent scheduling to nodes with taints +# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" +# -- The priority class to use for cilium-agent. +priorityClassName: "" +# -- DNS policy for Cilium agent pods. +# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy +dnsPolicy: "" +# -- Additional containers added to the cilium DaemonSet. +extraContainers: [] +# -- Additional initContainers added to the cilium Daemonset. +extraInitContainers: [] +# -- Additional agent container arguments. +extraArgs: [] +# -- Additional agent container environment variables. +extraEnv: [] +# -- Additional agent hostPath mounts. +extraHostPathMounts: [] +# - name: host-mnt-data +# mountPath: /host/mnt/data +# hostPath: /mnt/data +# hostPathType: Directory +# readOnly: true +# mountPropagation: HostToContainer + +# -- Additional agent volumes. +extraVolumes: [] +# -- Additional agent volumeMounts. +extraVolumeMounts: [] +# -- extraConfig allows you to specify additional configuration parameters to be +# included in the cilium-config configmap. +extraConfig: {} +# my-config-a: "1234" +# my-config-b: |- +# test 1 +# test 2 +# test 3 + +# -- Annotations to be added to all top-level cilium-agent objects (resources under templates/cilium-agent) +annotations: {} +# -- Security Context for cilium-agent pods. +podSecurityContext: + # -- AppArmorProfile options for the `cilium-agent` and init containers + appArmorProfile: + type: "Unconfined" +# -- Annotations to be added to agent pods +podAnnotations: {} +# -- Labels to be added to agent pods +podLabels: {} +# -- Agent resource limits & requests +# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +resources: {} +# limits: +# cpu: 4000m +# memory: 4Gi +# requests: +# cpu: 100m +# memory: 512Mi + +# -- resources & limits for the agent init containers +initResources: {} +securityContext: + # -- User to run the pod with + # runAsUser: 0 + # -- Run the pod with elevated privileges + privileged: false + # -- SELinux options for the `cilium-agent` and init containers + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + # -- Capabilities for the `cilium-agent` container + ciliumAgent: + # Use to set socket permission + - CHOWN + # Used to terminate envoy child process + - KILL + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used since cilium creates raw sockets, etc... + - NET_RAW + # Used since cilium monitor uses mmap + - IPC_LOCK + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # Needed to switch network namespaces (used for health endpoint, socket-LB). + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + # Allow discretionary access control (e.g. required for package installation) + - DAC_OVERRIDE + # Allow to set Access Control Lists (ACLs) on arbitrary files (e.g. required for package installation) + - FOWNER + # Allow to execute program that changes GID (e.g. required for package installation) + - SETGID + # Allow to execute program that changes UID (e.g. required for package installation) + - SETUID + # -- Capabilities for the `mount-cgroup` init container + mountCgroup: + # Only used for 'mount' cgroup + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + # -- capabilities for the `apply-sysctl-overwrites` init container + applySysctlOverwrites: + # Required in order to access host's /etc/sysctl.d dir + - SYS_ADMIN + # Used for nsenter + - SYS_CHROOT + - SYS_PTRACE + # -- Capabilities for the `clean-cilium-state` init container + cleanCiliumState: + # Most of the capabilities here are the same ones used in the + # cilium-agent's container because this container can be used to + # uninstall all Cilium resources, and therefore it is likely that + # will need the same capabilities. + # Used since cilium modifies routing tables, etc... + - NET_ADMIN + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + - SYS_RESOURCE + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF +# -- Cilium agent update strategy +updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 2 +# Configuration Values for cilium-agent +aksbyocni: + # -- Enable AKS BYOCNI integration. + # Note that this is incompatible with AKS clusters not created in BYOCNI mode: + # use Azure integration (`azure.enabled`) instead. + enabled: false +# @schema +# type: [boolean, string] +# @schema +# -- Enable installation of PodCIDR routes between worker +# nodes if worker nodes share a common L2 network segment. +autoDirectNodeRoutes: false +# -- Enable skipping of PodCIDR routes between worker +# nodes if the worker nodes are in a different L2 network segment. +directRoutingSkipUnreachable: false +# -- Annotate k8s node upon initialization with Cilium's metadata. +annotateK8sNode: false +azure: + # -- Enable Azure integration. + # Note that this is incompatible with AKS clusters created in BYOCNI mode: use + # AKS BYOCNI integration (`aksbyocni.enabled`) instead. + enabled: false + # usePrimaryAddress: false + # resourceGroup: group1 + # subscriptionID: 00000000-0000-0000-0000-000000000000 + # tenantID: 00000000-0000-0000-0000-000000000000 + # clientID: 00000000-0000-0000-0000-000000000000 + # clientSecret: 00000000-0000-0000-0000-000000000000 + # userAssignedIdentityID: 00000000-0000-0000-0000-000000000000 +alibabacloud: + # -- Enable AlibabaCloud ENI integration + enabled: false +# -- Enable bandwidth manager to optimize TCP and UDP workloads and allow +# for rate-limiting traffic from individual Pods with EDT (Earliest Departure +# Time) through the "kubernetes.io/egress-bandwidth" Pod annotation. +bandwidthManager: + # -- Enable bandwidth manager infrastructure (also prerequirement for BBR) + enabled: false + # -- Activate BBR TCP congestion control for Pods + bbr: false +# -- Configure standalone NAT46/NAT64 gateway +nat46x64Gateway: + # -- Enable RFC8215-prefixed translation + enabled: false +# -- EnableHighScaleIPcache enables the special ipcache mode for high scale +# clusters. The ipcache content will be reduced to the strict minimum and +# traffic will be encapsulated to carry security identities. +highScaleIPcache: + # -- Enable the high scale mode for the ipcache. + enabled: false +# -- Configure L2 announcements +l2announcements: + # -- Enable L2 announcements + enabled: false + # -- If a lease is not renewed for X duration, the current leader is considered dead, a new leader is picked + # leaseDuration: 15s + # -- The interval at which the leader will renew the lease + # leaseRenewDeadline: 5s + # -- The timeout between retries if renewal fails + # leaseRetryPeriod: 2s +# -- Configure L2 pod announcements +l2podAnnouncements: + # -- Enable L2 pod announcements + enabled: false + # -- Interface used for sending Gratuitous ARP pod announcements + interface: "eth0" +# -- Configure BGP +bgp: + # -- Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside + # cilium-agent and cilium-operator + enabled: false + announce: + # -- Enable allocation and announcement of service LoadBalancer IPs + loadbalancerIP: false + # -- Enable announcement of node pod CIDR + podCIDR: false +# -- This feature set enables virtual BGP routers to be created via +# CiliumBGPPeeringPolicy CRDs. +bgpControlPlane: + # -- Enables the BGP control plane. + enabled: false + # -- SecretsNamespace is the namespace which BGP support will retrieve secrets from. + secretsNamespace: + # -- Create secrets namespace for BGP secrets. + create: false + # -- The name of the secret namespace to which Cilium agents are given read access + name: kube-system +pmtuDiscovery: + # -- Enable path MTU discovery to send ICMP fragmentation-needed replies to + # the client. + enabled: false +bpf: + autoMount: + # -- Enable automatic mount of BPF filesystem + # When `autoMount` is enabled, the BPF filesystem is mounted at + # `bpf.root` path on the underlying host and inside the cilium agent pod. + # If users disable `autoMount`, it's expected that users have mounted + # bpffs filesystem at the specified `bpf.root` volume, and then the + # volume will be mounted inside the cilium agent pod at the same path. + enabled: true + # -- Configure the mount point for the BPF filesystem + root: /sys/fs/bpf + # -- Enables pre-allocation of eBPF map values. This increases + # memory usage but can reduce latency. + preallocateMaps: false + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries in auth map. + # @default -- `524288` + authMapMax: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries in the TCP connection tracking + # table. + # @default -- `524288` + ctTcpMax: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the non-TCP connection + # tracking table. + # @default -- `262144` + ctAnyMax: ~ + # -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. + events: + drop: + # -- Enable drop events. + enabled: true + policyVerdict: + # -- Enable policy verdict events. + enabled: true + trace: + # -- Enable trace events. + enabled: true + # @schema + # type: [null, integer] + # @schema + # -- Configure the maximum number of service entries in the + # load balancer maps. + lbMapMax: 65536 + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the NAT table. + # @default -- `524288` + natMax: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Configure the maximum number of entries for the neighbor table. + # @default -- `524288` + neighMax: ~ + # @schema + # type: [null, integer] + # @schema + # @default -- `16384` + # -- (int) Configures the maximum number of entries for the node table. + nodeMapMax: ~ + # -- Configure the maximum number of entries in endpoint policy map (per endpoint). + # @schema + # type: [null, integer] + # @schema + policyMapMax: 16384 + # @schema + # type: [null, number] + # @schema + # -- (float64) Configure auto-sizing for all BPF maps based on available memory. + # ref: https://docs.cilium.io/en/stable/network/ebpf/maps/ + # @default -- `0.0025` + mapDynamicSizeRatio: ~ + # -- Configure the level of aggregation for monitor notifications. + # Valid options are none, low, medium, maximum. + monitorAggregation: medium + # -- Configure the typical time between monitor notifications for + # active connections. + monitorInterval: "5s" + # -- Configure which TCP flags trigger notifications when seen for the + # first time in a connection. + monitorFlags: "all" + # -- Allow cluster external access to ClusterIP services. + lbExternalClusterIP: false + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Enable native IP masquerade support in eBPF + # @default -- `false` + masquerade: ~ + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Configure whether direct routing mode should route traffic via + # host stack (true) or directly and more efficiently out of BPF (false) if + # the kernel supports it. The latter has the implication that it will also + # bypass netfilter in the host namespace. + # @default -- `false` + hostLegacyRouting: ~ + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Configure the eBPF-based TPROXY to reduce reliance on iptables rules + # for implementing Layer 7 policy. + # @default -- `false` + tproxy: ~ + # @schema + # type: [null, array] + # @schema + # -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass. + # [0] will allow all VLAN id's without any filtering. + # @default -- `[]` + vlanBypass: ~ + # -- (bool) Disable ExternalIP mitigation (CVE-2020-8554) + # @default -- `false` + disableExternalIPMitigation: false + # -- (bool) Attach endpoint programs using tcx instead of legacy tc hooks on + # supported kernels. + # @default -- `true` + enableTCX: true + # -- (string) Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only) + # @default -- `veth` + datapathMode: veth +# -- Enable BPF clock source probing for more efficient tick retrieval. +bpfClockProbe: false +# -- Clean all eBPF datapath state from the initContainer of the cilium-agent +# DaemonSet. +# +# WARNING: Use with care! +cleanBpfState: false +# -- Clean all local Cilium state from the initContainer of the cilium-agent +# DaemonSet. Implies cleanBpfState: true. +# +# WARNING: Use with care! +cleanState: false +# -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy" +# init container before launching cilium-agent. +# More context can be found in the commit message of below PR +# https://github.com/cilium/cilium/pull/20123 +waitForKubeProxy: false +cni: + # -- Install the CNI configuration and binary files into the filesystem. + install: true + # -- Remove the CNI configuration and binary files on agent shutdown. Enable this + # if you're removing Cilium from the cluster. Disable this to prevent the CNI + # configuration file from being removed during agent upgrade, which can cause + # nodes to go unmanageable. + uninstall: false + # @schema + # type: [null, string] + # @schema + # -- Configure chaining on top of other CNI plugins. Possible values: + # - none + # - aws-cni + # - flannel + # - generic-veth + # - portmap + chainingMode: ~ + # @schema + # type: [null, string] + # @schema + # -- A CNI network name in to which the Cilium plugin should be added as a chained plugin. + # This will cause the agent to watch for a CNI network with this network name. When it is + # found, this will be used as the basis for Cilium's CNI configuration file. If this is + # set, it assumes a chaining mode of generic-veth. As a special case, a chaining mode + # of aws-cni implies a chainingTarget of aws-cni. + chainingTarget: ~ + # -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the + # node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. + # This ensures no Pods can be scheduled using other CNI plugins during Cilium + # agent downtime. + exclusive: true + # -- Configure the log file for CNI logging with retention policy of 7 days. + # Disable CNI file logging by setting this field to empty explicitly. + logFile: /var/run/cilium/cilium-cni.log + # -- Skip writing of the CNI configuration. This can be used if + # writing of the CNI configuration is performed by external automation. + customConf: false + # -- Configure the path to the CNI configuration directory on the host. + confPath: /etc/cni/net.d + # -- Configure the path to the CNI binary directory on the host. + binPath: /opt/cni/bin + # -- Specify the path to a CNI config to read from on agent start. + # This can be useful if you want to manage your CNI + # configuration outside of a Kubernetes environment. This parameter is + # mutually exclusive with the 'cni.configMap' parameter. The agent will + # write this to 05-cilium.conflist on startup. + # readCniConf: /host/etc/cni/net.d/05-sample.conflist.input + + # -- When defined, configMap will mount the provided value as ConfigMap and + # interpret the cniConf variable as CNI configuration file and write it + # when the agent starts up + # configMap: cni-configuration + + # -- Configure the key in the CNI ConfigMap to read the contents of + # the CNI configuration from. + configMapKey: cni-config + # -- Configure the path to where to mount the ConfigMap inside the agent pod. + confFileMountPath: /tmp/cni-configuration + # -- Configure the path to where the CNI configuration directory is mounted + # inside the agent pod. + hostConfDirMountPath: /host/etc/cni/net.d + # -- Specifies the resources for the cni initContainer + resources: + requests: + cpu: 100m + memory: 10Mi + # -- Enable route MTU for pod netns when CNI chaining is used + enableRouteMTUForCNIChaining: false +# -- (string) Configure how frequently garbage collection should occur for the datapath +# connection tracking table. +# @default -- `"0s"` +conntrackGCInterval: "" +# -- (string) Configure the maximum frequency for the garbage collection of the +# connection tracking table. Only affects the automatic computation for the frequency +# and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently +# clean up unused identities created from ToFQDN policies. +conntrackGCMaxInterval: "" +# -- (string) Configure timeout in which Cilium will exit if CRDs are not available +# @default -- `"5m"` +crdWaitTimeout: "" +# -- Tail call hooks for custom eBPF programs. +customCalls: + # -- Enable tail call hooks for custom eBPF programs. + enabled: false +daemon: + # -- Configure where Cilium runtime state should be stored. + runPath: "/var/run/cilium" + # @schema + # type: [null, string] + # @schema + # -- Configure a custom list of possible configuration override sources + # The default is "config-map:cilium-config,cilium-node-config". For supported + # values, see the help text for the build-config subcommand. + # Note that this value should be a comma-separated string. + configSources: ~ + # @schema + # type: [null, string] + # @schema + # -- allowedConfigOverrides is a list of config-map keys that can be overridden. + # That is to say, if this value is set, config sources (excepting the first one) can + # only override keys in this list. + # + # This takes precedence over blockedConfigOverrides. + # + # By default, all keys may be overridden. To disable overrides, set this to "none" or + # change the configSources variable. + allowedConfigOverrides: ~ + # @schema + # type: [null, string] + # @schema + # -- blockedConfigOverrides is a list of config-map keys that may not be overridden. + # In other words, if any of these keys appear in a configuration source excepting the + # first one, they will be ignored + # + # This is ignored if allowedConfigOverrides is set. + # + # By default, all keys may be overridden. + blockedConfigOverrides: ~ +# -- Specify which network interfaces can run the eBPF datapath. This means +# that a packet sent from a pod to a destination outside the cluster will be +# masqueraded (to an output device IPv4 address), if the output device runs the +# program. When not specified, probing will automatically detect devices that have +# a non-local route. This should be used only when autodetection is not suitable. +# devices: "" + +# -- Enables experimental support for the detection of new and removed datapath +# devices. When devices change the eBPF datapath is reloaded and services updated. +# If "devices" is set then only those devices, or devices matching a wildcard will +# be considered. +# +# This option has been deprecated and is a no-op. +enableRuntimeDeviceDetection: true +# -- Forces the auto-detection of devices, even if specific devices are explicitly listed +forceDeviceDetection: false +# -- Chains to ignore when installing feeder rules. +# disableIptablesFeederRules: "" + +# -- Limit iptables-based egress masquerading to interface selector. +# egressMasqueradeInterfaces: "" + +# -- Enable setting identity mark for local traffic. +# enableIdentityMark: true + +# -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it. +# enableK8sEndpointSlice: true + +# -- Enable CiliumEndpointSlice feature (deprecated, please use `ciliumEndpointSlice.enabled` instead). +enableCiliumEndpointSlice: false +ciliumEndpointSlice: + # -- Enable Cilium EndpointSlice feature. + enabled: false + # -- List of rate limit options to be used for the CiliumEndpointSlice controller. + # Each object in the list must have the following fields: + # nodes: Count of nodes at which to apply the rate limit. + # limit: The sustained request rate in requests per second. The maximum rate that can be configured is 50. + # burst: The burst request rate in requests per second. The maximum burst that can be configured is 100. + rateLimits: + - nodes: 0 + limit: 10 + burst: 20 + - nodes: 100 + limit: 7 + burst: 15 + - nodes: 500 + limit: 5 + burst: 10 +envoyConfig: + # -- Enable CiliumEnvoyConfig CRD + # CiliumEnvoyConfig CRD can also be implicitly enabled by other options. + enabled: false + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. + secretsNamespace: + # -- Create secrets namespace for CiliumEnvoyConfig CRDs. + create: true + # -- The name of the secret namespace to which Cilium agents are given read access. + name: cilium-secrets + # -- Interval in which an attempt is made to reconcile failed EnvoyConfigs. If the duration is zero, the retry is deactivated. + retryInterval: 15s +ingressController: + # -- Enable cilium ingress controller + # This will automatically set enable-envoy-config as well. + enabled: false + # -- Set cilium ingress controller to be the default ingress controller + # This will let cilium ingress controller route entries without ingress class set + default: false + # -- Default ingress load balancer mode + # Supported values: shared, dedicated + # For granular control, use the following annotations on the ingress resource: + # "ingress.cilium.io/loadbalancer-mode: dedicated" (or "shared"). + loadbalancerMode: dedicated + # -- Enforce https for host having matching TLS host in Ingress. + # Incoming traffic to http listener will return 308 http error code with respective location in header. + enforceHttps: true + # -- Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + enableProxyProtocol: false + # -- IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service + ingressLBAnnotationPrefixes: ['lbipam.cilium.io', 'nodeipam.cilium.io', 'service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com'] + # @schema + # type: [null, string] + # @schema + # -- Default secret namespace for ingresses without .spec.tls[].secretName set. + defaultSecretNamespace: + # @schema + # type: [null, string] + # @schema + # -- Default secret name for ingresses without .spec.tls[].secretName set. + defaultSecretName: + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + secretsNamespace: + # -- Create secrets namespace for Ingress. + create: true + # -- Name of Ingress secret namespace. + name: cilium-secrets + # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + # If disabled, TLS secrets must be maintained externally. + sync: true + # -- Load-balancer service in shared mode. + # This is a single load-balancer service for all Ingress resources. + service: + # -- Service name + name: cilium-ingress + # -- Labels to be added for the shared LB service + labels: {} + # -- Annotations to be added for the shared LB service + annotations: {} + # -- Service type for the shared LB service + type: LoadBalancer + # @schema + # type: [null, integer] + # @schema + # -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service + insecureNodePort: ~ + # @schema + # type: [null, integer] + # @schema + # -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service + secureNodePort: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+) + loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerIP on the shared LB service + loadBalancerIP: ~ + # @schema + # type: [null, boolean] + # @schema + # -- Configure if node port allocation is required for LB service + # ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + allocateLoadBalancerNodePorts: ~ + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for Cilium Ingress in shared mode. + # Valid values are "Cluster" and "Local". + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # -- Configure a specific port on the host network that gets used for the shared listener. + sharedListenerPort: 8080 + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} +gatewayAPI: + # -- Enable support for Gateway API in cilium + # This will automatically set enable-envoy-config as well. + enabled: false + # -- Enable proxy protocol for all GatewayAPI listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + enableProxyProtocol: false + # -- Enable Backend Protocol selection support (GEP-1911) for Gateway API via appProtocol. + enableAppProtocol: false + # -- Enable ALPN for all listeners configured with Gateway API. ALPN will attempt HTTP/2, then HTTP 1.1. + # Note that this will also enable `appProtocol` support, and services that wish to use HTTP/2 will need to indicate that via their `appProtocol`. + enableAlpn: false + # -- The number of additional GatewayAPI proxy hops from the right side of the HTTP header to trust when determining the origin client's IP address. + xffNumTrustedHops: 0 + # -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for all Cilium GatewayAPI Gateway instances. Valid values are "Cluster" and "Local". + # Note that this value will be ignored when `hostNetwork.enabled == true`. + # ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + externalTrafficPolicy: Cluster + gatewayClass: + # -- Enable creation of GatewayClass resource + # The default value is 'auto' which decides according to presence of gateway.networking.k8s.io/v1/GatewayClass in the cluster. + # Other possible values are 'true' and 'false', which will either always or never create the GatewayClass, respectively. + create: auto + # -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + secretsNamespace: + # -- Create secrets namespace for Gateway API. + create: true + # -- Name of Gateway API secret namespace. + name: cilium-secrets + # -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + # If disabled, TLS secrets must be maintained externally. + sync: true + # Host Network related configuration + hostNetwork: + # -- Configure whether the Envoy listeners should be exposed on the host network. + enabled: false + # Specify the nodes where the Ingress listeners should be exposed + nodes: + # -- Specify the labels of the nodes where the Ingress listeners should be exposed + # + # matchLabels: + # kubernetes.io/os: linux + # kubernetes.io/hostname: kind-worker + matchLabels: {} +# -- Enables the fallback compatibility solution for when the xt_socket kernel +# module is missing and it is needed for the datapath L7 redirection to work +# properly. See documentation for details on when this can be disabled: +# https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel. +enableXTSocketFallback: true +encryption: + # -- Enable transparent network encryption. + enabled: false + # -- Encryption method. Can be either ipsec or wireguard. + type: ipsec + # -- Enable encryption for pure node to node traffic. + # This option is only effective when encryption.type is set to "wireguard". + nodeEncryption: false + # -- Configure the WireGuard Pod2Pod strict mode. + strictMode: + # -- Enable WireGuard Pod2Pod strict mode. + enabled: false + # -- CIDR for the WireGuard Pod2Pod strict mode. + cidr: "" + # -- Allow dynamic lookup of remote node identities. + # This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. + allowRemoteNodeIdentities: false + ipsec: + # -- Name of the key file inside the Kubernetes secret configured via secretName. + keyFile: keys + # -- Path to mount the secret inside the Cilium pod. + mountPath: /etc/ipsec + # -- Name of the Kubernetes secret containing the encryption keys. + secretName: cilium-ipsec-keys + # -- The interface to use for encrypted traffic. + interface: "" + # -- Enable the key watcher. If disabled, a restart of the agent will be + # necessary on key rotations. + keyWatcher: true + # -- Maximum duration of the IPsec key rotation. The previous key will be + # removed after that delay. + keyRotationDuration: "5m" + # -- Enable IPsec encrypted overlay + encryptedOverlay: false + wireguard: + # -- Enables the fallback to the user-space implementation (deprecated). + userspaceFallback: false + # -- Controls WireGuard PersistentKeepalive option. Set 0s to disable. + persistentKeepalive: 0s +endpointHealthChecking: + # -- Enable connectivity health checking between virtual endpoints. + enabled: true +endpointRoutes: + # @schema + # type: [boolean, string] + # @schema + # -- Enable use of per endpoint routes instead of routing via + # the cilium_host interface. + enabled: false +k8sNetworkPolicy: + # -- Enable support for K8s NetworkPolicy + enabled: true +eni: + # -- Enable Elastic Network Interface (ENI) integration. + enabled: false + # -- Update ENI Adapter limits from the EC2 API + updateEC2AdapterLimitViaAPI: true + # -- Release IPs not used from the ENI + awsReleaseExcessIPs: false + # -- Enable ENI prefix delegation + awsEnablePrefixDelegation: false + # -- EC2 API endpoint to use + ec2APIEndpoint: "" + # -- Tags to apply to the newly created ENIs + eniTags: {} + # -- Interval for garbage collection of unattached ENIs. Set to "0s" to disable. + # @default -- `"5m"` + gcInterval: "" + # -- Additional tags attached to ENIs created by Cilium. + # Dangling ENIs with this tag will be garbage collected + # @default -- `{"io.cilium/cilium-managed":"true,"io.cilium/cluster-name":""}` + gcTags: {} + # -- If using IAM role for Service Accounts will not try to + # inject identity values from cilium-aws kubernetes secret. + # Adds annotation to service account if managed by Helm. + # See https://github.com/aws/amazon-eks-pod-identity-webhook + iamRole: "" + # -- Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetIDsFilter: [] + # -- Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs + # Important note: This requires that each instance has an ENI with a matching subnet attached + # when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + # use the CNI configuration file settings (cni.customConf) instead. + subnetTagsFilter: [] + # -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances + # are going to be used to create new ENIs + instanceTagsFilter: [] +externalIPs: + # -- Enable ExternalIPs service support. + enabled: false +# fragmentTracking enables IPv4 fragment tracking support in the datapath. +# fragmentTracking: true +gke: + # -- Enable Google Kubernetes Engine integration + enabled: false +# -- Enable connectivity health checking. +healthChecking: true +# -- TCP port for the agent health API. This is not the port for cilium-health. +healthPort: 9879 +# -- Configure the host firewall. +hostFirewall: + # -- Enables the enforcement of host policies in the eBPF datapath. + enabled: false +hostPort: + # -- Enable hostPort service support. + enabled: false +# -- Configure socket LB +socketLB: + # -- Enable socket LB + enabled: false + # -- Disable socket lb for non-root ns. This is used to enable Istio routing rules. + # hostNamespaceOnly: false + # -- Enable terminating pod connections to deleted service backends. + # terminatePodConnections: true +# -- Configure certificate generation for Hubble integration. +# If hubble.tls.auto.method=cronJob, these values are used +# for the Kubernetes CronJob which will be scheduled regularly to +# (re)generate any certificates not provided manually. +certgen: + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/certgen" + tag: "v0.2.0" + digest: "sha256:169d93fd8f2f9009db3b9d5ccd37c2b753d0989e1e7cd8fe79f9160c459eef4f" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Seconds after which the completed job pod will be deleted + ttlSecondsAfterFinished: 1800 + # -- Labels to be added to hubble-certgen pods + podLabels: {} + # -- Annotations to be added to the hubble-certgen initial Job and CronJob + annotations: + job: {} + cronJob: {} + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- Additional certgen volumes. + extraVolumes: [] + # -- Additional certgen volumeMounts. + extraVolumeMounts: [] + # -- Affinity for certgen + affinity: {} +hubble: + # -- Enable Hubble (true by default). + enabled: true + # -- Annotations to be added to all top-level hubble objects (resources under templates/hubble) + annotations: {} + # -- Buffer size of the channel Hubble uses to receive monitor events. If this + # value is not set, the queue size is set to the default monitor queue size. + # eventQueueSize: "" + + # -- Number of recent flows for Hubble to cache. Defaults to 4095. + # Possible values are: + # 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, + # 2047, 4095, 8191, 16383, 32767, 65535 + # eventBufferCapacity: "4095" + + # -- Hubble metrics configuration. + # See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics + # for more comprehensive documentation about Hubble metrics. + metrics: + # @schema + # type: [null, array] + # @schema + # -- Configures the list of metrics to collect. If empty or null, metrics + # are disabled. + # Example: + # + # enabled: + # - dns:query;ignoreAAAA + # - drop + # - tcp + # - flow + # - icmp + # - http + # + # You can specify the list of metrics from the helm CLI: + # + # --set hubble.metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}" + # + enabled: ~ + # -- Enables exporting hubble metrics in OpenMetrics format. + enableOpenMetrics: false + # -- Configure the port the hubble metric server listens on. + port: 9965 + tls: + # Enable hubble metrics server TLS. + enabled: false + # Configure hubble metrics server TLS. + server: + # -- Name of the Secret containing the certificate and key for the Hubble metrics server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble metrics server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble metrics server key (deprecated). + # Use existingSecret instead. + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- Configure mTLS for the Hubble metrics server. + mtls: + # When set to true enforces mutual TLS between Hubble Metrics server and its clients. + # False allow non-mutual TLS connections. + # This option has no effect when TLS is disabled. + enabled: false + useSecret: false + # -- Name of the ConfigMap containing the CA to validate client certificates against. + # If mTLS is enabled and this is unspecified, it will default to the + # same CA used for Hubble metrics server certificates. + name: ~ + # -- Entry of the ConfigMap containing the CA. + key: ca.crt + # -- Annotations to be added to hubble-metrics service. + serviceAnnotations: {} + serviceMonitor: + # -- Create ServiceMonitor resources for Prometheus Operator. + # This requires the prometheus CRDs to be available. + # ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble + labels: {} + # -- Annotations to add to ServiceMonitor hubble + annotations: {} + # -- jobLabel to add for ServiceMonitor hubble + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # -- Relabeling configs for the ServiceMonitor hubble + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor hubble + metricRelabelings: ~ + # Configure TLS for the ServiceMonitor. + # Note, when using TLS you will either need to specify + # tlsConfig.insecureSkipVerify or specify a CA to use. + tlsConfig: {} + # -- Grafana dashboards for hubble + # grafana can import dashboards based on the label and value + # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards + dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} + # -- Unix domain socket path to listen to when Hubble is enabled. + socketPath: /var/run/cilium/hubble.sock + # -- Enables redacting sensitive information present in Layer 7 flows. + redact: + enabled: false + http: + # -- Enables redacting URL query (GET) parameters. + # Example: + # + # redact: + # enabled: true + # http: + # urlQuery: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.urlQuery="true" + urlQuery: false + # -- Enables redacting user info, e.g., password when basic auth is used. + # Example: + # + # redact: + # enabled: true + # http: + # userInfo: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.userInfo="true" + userInfo: true + headers: + # -- List of HTTP headers to allow: headers not matching will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. + # Example: + # redact: + # enabled: true + # http: + # headers: + # allow: + # - traceparent + # - tracestate + # - Cache-Control + # + # You can specify the options from the helm CLI: + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.headers.allow="traceparent,tracestate,Cache-Control" + allow: [] + # -- List of HTTP headers to deny: matching headers will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. + # Example: + # redact: + # enabled: true + # http: + # headers: + # deny: + # - Authorization + # - Proxy-Authorization + # + # You can specify the options from the helm CLI: + # --set hubble.redact.enabled="true" + # --set hubble.redact.http.headers.deny="Authorization,Proxy-Authorization" + deny: [] + kafka: + # -- Enables redacting Kafka's API key. + # Example: + # + # redact: + # enabled: true + # kafka: + # apiKey: true + # + # You can specify the options from the helm CLI: + # + # --set hubble.redact.enabled="true" + # --set hubble.redact.kafka.apiKey="true" + apiKey: false + # -- An additional address for Hubble to listen to. + # Set this field ":4244" if you are enabling Hubble Relay, as it assumes that + # Hubble is listening on port 4244. + listenAddress: ":4244" + # -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available. + preferIpv6: false + # @schema + # type: [null, boolean] + # @schema + # -- (bool) Skip Hubble events with unknown cgroup ids + # @default -- `true` + skipUnknownCGroupIDs: ~ + peerService: + # -- Service Port for the Peer service. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + # -- Target Port for the Peer service, must match the hubble.listenAddress' + # port. + targetPort: 4244 + # -- The cluster domain to use to query the Hubble Peer service. It should + # be the local cluster. + clusterDomain: cluster.local + # -- TLS configuration for Hubble + tls: + # -- Enable mutual TLS for listenAddress. Setting this value to false is + # highly discouraged as the Hubble API provides access to potentially + # sensitive network flow metadata and is exposed on the host network. + enabled: true + # -- Configure automatic TLS certificates generation. + auto: + # -- Auto-generate certificates. + # When set to true, automatically generate a CA and certificates to + # enable mTLS between Hubble server and Hubble Relay instances. If set to + # false, the certs for Hubble server need to be provided by setting + # appropriate values below. + enabled: true + # -- Set the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + certValidityDuration: 1095 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Defaults to midnight of the first day of every fourth month. For syntax, see + # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + schedule: "0 0 1 */4 *" + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when hubble.tls.auto.method=certmanager. + certManagerIssuerRef: {} + # -- The Hubble server certificate and private key + server: + # -- Name of the Secret containing the certificate and key for the Hubble server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble server key (deprecated). + # Use existingSecret instead. + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + relay: + # -- Enable Hubble Relay (requires hubble.enabled=true) + enabled: false + # -- Roll out Hubble Relay pods automatically when configmap is updated. + rollOutPods: false + # -- Hubble-relay container image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-relay" + tag: "v1.16.3" + # hubble-relay-digest + digest: "sha256:feb60efd767e0e7863a94689f4a8db56a0acc7c1d2b307dee66422e3dc25a089" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Specifies the resources for the hubble-relay pods + resources: {} + # -- Number of replicas run for the hubble-relay deployment. + replicas: 1 + # -- Affinity for hubble-replay + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + # -- Pod topology spread constraints for hubble-relay + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- Additional hubble-relay environment variables. + extraEnv: [] + # -- Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay) + annotations: {} + # -- Annotations to be added to hubble-relay pods + podAnnotations: {} + # -- Labels to be added to hubble-relay pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- The priority class to use for hubble-relay + priorityClassName: "" + # -- Configure termination grace period for hubble relay Deployment. + terminationGracePeriodSeconds: 1 + # -- hubble-relay update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 1 + # -- Additional hubble-relay volumes. + extraVolumes: [] + # -- Additional hubble-relay volumeMounts. + extraVolumeMounts: [] + # -- hubble-relay pod security context + podSecurityContext: + fsGroup: 65532 + # -- hubble-relay container security context + securityContext: + # readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + capabilities: + drop: + - ALL + # -- hubble-relay service configuration. + service: + # --- The type of service used for Hubble Relay access, either ClusterIP or NodePort. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31234 + # -- Host to listen to. Specify an empty string to bind to all the interfaces. + listenHost: "" + # -- Port to listen to. + listenPort: "4245" + # -- TLS configuration for Hubble Relay + tls: + # -- The hubble-relay client certificate and private key. + # This keypair is presented to Hubble server instances for mTLS + # authentication and is required when hubble.tls.enabled is true. + # These values need to be set manually if hubble.tls.auto.enabled is false. + client: + # -- Name of the Secret containing the certificate and key for the Hubble metrics server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble relay client certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble relay client key (deprecated). + # Use existingSecret instead. + key: "" + # -- The hubble-relay server certificate and private key + server: + # When set to true, enable TLS on for Hubble Relay server + # (ie: for clients connecting to the Hubble Relay API). + enabled: false + # When set to true enforces mutual TLS between Hubble Relay server and its clients. + # False allow non-mutual TLS connections. + # This option has no effect when TLS is disabled. + mtls: false + # -- Name of the Secret containing the certificate and key for the Hubble relay server. + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble relay server certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble relay server key (deprecated). + # Use existingSecret instead. + key: "" + # -- extra DNS names added to certificate when its auto gen + extraDnsNames: [] + # -- extra IP addresses added to certificate when its auto gen + extraIpAddresses: [] + # DNS name used by the backend to connect to the relay + # This is a simple workaround as the relay certificates are currently hardcoded to + # *.hubble-relay.cilium.io + # See https://github.com/cilium/cilium/pull/28709#discussion_r1371792546 + # For GKE Dataplane V2 this should be set to relay.kube-system.svc.cluster.local + relayName: "ui.hubble-relay.cilium.io" + # @schema + # type: [null, string] + # @schema + # -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s"). + dialTimeout: ~ + # @schema + # type: [null, string] + # @schema + # -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s"). + retryTimeout: ~ + # @schema + # type: [null, integer] + # @schema + # -- (int) Max number of flows that can be buffered for sorting before being sent to the + # client (per request) (e.g. 100). + sortBufferLenMax: ~ + # @schema + # type: [null, string] + # @schema + # -- When the per-request flows sort buffer is not full, a flow is drained every + # time this timeout is reached (only affects requests in follow-mode) (e.g. "1s"). + sortBufferDrainTimeout: ~ + # -- Port to use for the k8s service backed by hubble-relay pods. + # If not set, it is dynamically assigned to port 443 if TLS is enabled and to + # port 80 if not. + # servicePort: 80 + + # -- Enable prometheus metrics for hubble-relay on the configured port at + # /metrics + prometheus: + enabled: false + port: 9966 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor hubble-relay + labels: {} + # -- Annotations to add to ServiceMonitor hubble-relay + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor hubble-relay + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor hubble-relay + metricRelabelings: ~ + gops: + # -- Enable gops for hubble-relay + enabled: true + # -- Configure gops listen port for hubble-relay + port: 9893 + pprof: + # -- Enable pprof for hubble-relay + enabled: false + # -- Configure pprof listen address for hubble-relay + address: localhost + # -- Configure pprof listen port for hubble-relay + port: 6062 + ui: + # -- Whether to enable the Hubble UI. + enabled: false + standalone: + # -- When true, it will allow installing the Hubble UI only, without checking dependencies. + # It is useful if a cluster already has cilium and Hubble relay installed and you just + # want Hubble UI to be deployed. + # When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui` + enabled: false + tls: + # -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required + # to provide a volume for mounting the client certificates. + certsVolume: {} + # projected: + # defaultMode: 0400 + # sources: + # - secret: + # name: hubble-ui-client-certs + # items: + # - key: tls.crt + # path: client.crt + # - key: tls.key + # path: client.key + # - key: ca.crt + # path: hubble-relay-ca.crt + # -- Roll out Hubble-ui pods automatically when configmap is updated. + rollOutPods: false + tls: + client: + # -- Name of the Secret containing the client certificate and key for Hubble UI + # If specified, cert and key are ignored. + existingSecret: "" + # -- base64 encoded PEM values for the Hubble UI client certificate (deprecated). + # Use existingSecret instead. + cert: "" + # -- base64 encoded PEM values for the Hubble UI client key (deprecated). + # Use existingSecret instead. + key: "" + backend: + # -- Hubble-ui backend image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-ui-backend" + tag: "v0.13.1" + digest: "sha256:0e0eed917653441fded4e7cdb096b7be6a3bddded5a2dd10812a27b1fc6ed95b" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Hubble-ui backend security context. + securityContext: {} + # -- Additional hubble-ui backend environment variables. + extraEnv: [] + # -- Additional hubble-ui backend volumes. + extraVolumes: [] + # -- Additional hubble-ui backend volumeMounts. + extraVolumeMounts: [] + livenessProbe: + # -- Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) + enabled: false + readinessProbe: + # -- Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) + enabled: false + # -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + frontend: + # -- Hubble-ui frontend image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/hubble-ui" + tag: "v0.13.1" + digest: "sha256:e2e9313eb7caf64b0061d9da0efbdad59c6c461f6ca1752768942bfeda0796c6" + useDigest: true + pullPolicy: "IfNotPresent" + # -- Hubble-ui frontend security context. + securityContext: {} + # -- Additional hubble-ui frontend environment variables. + extraEnv: [] + # -- Additional hubble-ui frontend volumes. + extraVolumes: [] + # -- Additional hubble-ui frontend volumeMounts. + extraVolumeMounts: [] + # -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. + resources: {} + # limits: + # cpu: 1000m + # memory: 1024M + # requests: + # cpu: 100m + # memory: 64Mi + server: + # -- Controls server listener for ipv6 + ipv6: + enabled: true + # -- The number of replicas of Hubble UI to deploy. + replicas: 1 + # -- Annotations to be added to all top-level hubble-ui objects (resources under templates/hubble-ui) + annotations: {} + # -- Annotations to be added to hubble-ui pods + podAnnotations: {} + # -- Labels to be added to hubble-ui pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- Affinity for hubble-ui + affinity: {} + # -- Pod topology spread constraints for hubble-ui + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- The priority class to use for hubble-ui + priorityClassName: "" + # -- hubble-ui update strategy. + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 1 + # -- Security context to be added to Hubble UI pods + securityContext: + runAsUser: 1001 + runAsGroup: 1001 + fsGroup: 1001 + # -- hubble-ui service configuration. + service: + # -- Annotations to be added for the Hubble UI service + annotations: {} + # --- The type of service used for Hubble UI access, either ClusterIP or NodePort. + type: ClusterIP + # --- The port to use when the service type is set to NodePort. + nodePort: 31235 + # -- Defines base url prefix for all hubble-ui http requests. + # It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. + # Trailing `/` is required for custom path, ex. `/service-map/` + baseUrl: "/" + # -- hubble-ui ingress configuration. + ingress: + enabled: false + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + className: "" + hosts: + - chart-example.local + labels: {} + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + # -- Hubble flows export. + export: + # --- Defines max file size of output file before it gets rotated. + fileMaxSizeMb: 10 + # --- Defines max number of backup/rotated files. + fileMaxBackups: 5 + # --- Static exporter configuration. + # Static exporter is bound to agent lifecycle. + static: + enabled: false + filePath: /var/run/cilium/hubble/events.log + fieldMask: [] + # - time + # - source + # - destination + # - verdict + allowList: [] + # - '{"verdict":["DROPPED","ERROR"]}' + denyList: [] + # - '{"source_pod":["kube-system/"]}' + # - '{"destination_pod":["kube-system/"]}' + # --- Dynamic exporters configuration. + # Dynamic exporters may be reconfigured without a need of agent restarts. + dynamic: + enabled: false + config: + # ---- Name of configmap with configuration that may be altered to reconfigure exporters within a running agents. + configMapName: cilium-flowlog-config + # ---- True if helm installer should create config map. + # Switch to false if you want to self maintain the file content. + createConfigMap: true + # ---- Exporters configuration in YAML format. + content: + - name: all + fieldMask: [] + includeFilters: [] + excludeFilters: [] + filePath: "/var/run/cilium/hubble/events.log" + # - name: "test002" + # filePath: "/var/log/network/flow-log/pa/test002.log" + # fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"] + # includeFilters: + # - source_pod: ["default/"] + # event_type: + # - type: 1 + # - destination_pod: ["frontend/nginx-975996d4c-7hhgt"] + # excludeFilters: [] + # end: "2023-10-09T23:59:59-07:00" + # -- Emit v1.Events related to pods on detection of packet drops. + # This feature is alpha, please provide feedback at https://github.com/cilium/cilium/issues/33975. + dropEventEmitter: + enabled: false + # --- Minimum time between emitting same events. + interval: 2m + # --- Drop reasons to emit events for. + # ref: https://docs.cilium.io/en/stable/_api/v1/flow/README/#dropreason + reasons: + - auth_required + - policy_denied +# -- Method to use for identity allocation (`crd` or `kvstore`). +identityAllocationMode: "crd" +# -- (string) Time to wait before using new identity on endpoint identity change. +# @default -- `"5s"` +identityChangeGracePeriod: "" +# -- Install Iptables rules to skip netfilter connection tracking on all pod +# traffic. This option is only effective when Cilium is running in direct +# routing and full KPR mode. Moreover, this option cannot be enabled when Cilium +# is running in a managed Kubernetes environment or in a chained CNI setup. +installNoConntrackIptablesRules: false +ipam: + # -- Configure IP Address Management mode. + # ref: https://docs.cilium.io/en/stable/network/concepts/ipam/ + mode: "cluster-pool" + # -- Maximum rate at which the CiliumNode custom resource is updated. + ciliumNodeUpdateRate: "15s" + operator: + # @schema + # type: [array, string] + # @schema + # -- IPv4 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv4PodCIDRList: ["10.0.0.0/8"] + # -- IPv4 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv4MaskSize: 24 + # @schema + # type: [array, string] + # @schema + # -- IPv6 CIDR list range to delegate to individual nodes for IPAM. + clusterPoolIPv6PodCIDRList: ["fd00::/104"] + # -- IPv6 CIDR mask size to delegate to individual nodes for IPAM. + clusterPoolIPv6MaskSize: 120 + # -- IP pools to auto-create in multi-pool IPAM mode. + autoCreateCiliumPodIPPools: {} + # default: + # ipv4: + # cidrs: + # - 10.10.0.0/8 + # maskSize: 24 + # other: + # ipv6: + # cidrs: + # - fd00:100::/80 + # maskSize: 96 + # @schema + # type: [null, integer] + # @schema + # -- (int) The maximum burst size when rate limiting access to external APIs. + # Also known as the token bucket capacity. + # @default -- `20` + externalAPILimitBurstSize: ~ + # @schema + # type: [null, number] + # @schema + # -- (float) The maximum queries per second when rate limiting access to + # external APIs. Also known as the bucket refill rate, which is used to + # refill the bucket up to the burst size capacity. + # @default -- `4.0` + externalAPILimitQPS: ~ +nodeIPAM: + # -- Configure Node IPAM + # ref: https://docs.cilium.io/en/stable/network/node-ipam/ + enabled: false +# @schema +# type: [null, string] +# @schema +# -- The api-rate-limit option can be used to overwrite individual settings of the default configuration for rate limiting calls to the Cilium Agent API +apiRateLimit: ~ +# -- Configure the eBPF-based ip-masq-agent +ipMasqAgent: + enabled: false +# the config of nonMasqueradeCIDRs +# config: +# nonMasqueradeCIDRs: [] +# masqLinkLocal: false +# masqLinkLocalIPv6: false + +# iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium. +# iptablesLockTimeout: "5s" +ipv4: + # -- Enable IPv4 support. + enabled: true +ipv6: + # -- Enable IPv6 support. + enabled: false +# -- Configure Kubernetes specific configuration +k8s: + # -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + requireIPv4PodCIDR: false + # -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR + # range via the Kubernetes node resource + requireIPv6PodCIDR: false +# -- Keep the deprecated selector labels when deploying Cilium DaemonSet. +keepDeprecatedLabels: false +# -- Keep the deprecated probes when deploying Cilium DaemonSet +keepDeprecatedProbes: false +startupProbe: + # -- failure threshold of startup probe. + # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) + failureThreshold: 105 + # -- interval between checks of the startup probe + periodSeconds: 2 +livenessProbe: + # -- failure threshold of liveness probe + failureThreshold: 10 + # -- interval between checks of the liveness probe + periodSeconds: 30 +readinessProbe: + # -- failure threshold of readiness probe + failureThreshold: 3 + # -- interval between checks of the readiness probe + periodSeconds: 30 +# -- Configure the kube-proxy replacement in Cilium BPF datapath +# Valid options are "true" or "false". +# ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/ +#kubeProxyReplacement: "false" + +# -- healthz server bind address for the kube-proxy replacement. +# To enable set the value to '0.0.0.0:10256' for all ipv4 +# addresses and this '[::]:10256' for all ipv6 addresses. +# By default it is disabled. +kubeProxyReplacementHealthzBindAddr: "" +l2NeighDiscovery: + # -- Enable L2 neighbor discovery in the agent + enabled: true + # -- Override the agent's default neighbor resolution refresh period. + refreshPeriod: "30s" +# -- Enable Layer 7 network policy. +l7Proxy: true +# -- Enable Local Redirect Policy. +localRedirectPolicy: false +# To include or exclude matched resources from cilium identity evaluation +# labels: "" + +# logOptions allows you to define logging options. eg: +# logOptions: +# format: json + +# -- Enables periodic logging of system load +logSystemLoad: false +# -- Configure maglev consistent hashing +maglev: {} +# -- tableSize is the size (parameter M) for the backend table of one +# service entry +# tableSize: + +# -- hashSeed is the cluster-wide base64 encoded seed for the hashing +# hashSeed: + +# -- Enables masquerading of IPv4 traffic leaving the node from endpoints. +enableIPv4Masquerade: true +# -- Enables masquerading of IPv6 traffic leaving the node from endpoints. +enableIPv6Masquerade: true +# -- Enables masquerading to the source of the route for traffic leaving the node from endpoints. +enableMasqueradeRouteSource: false +# -- Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods +enableIPv4BIGTCP: false +# -- Enables IPv6 BIG TCP support which increases maximum IPv6 GSO/GRO limits for nodes and pods +enableIPv6BIGTCP: false +nat: + # -- Number of the top-k SNAT map connections to track in Cilium statedb. + mapStatsEntries: 32 + # -- Interval between how often SNAT map is counted for stats. + mapStatsInterval: 30s +egressGateway: + # -- Enables egress gateway to redirect and SNAT the traffic that leaves the + # cluster. + enabled: false + # -- Time between triggers of egress gateway state reconciliations + reconciliationTriggerInterval: 1s + # -- Maximum number of entries in egress gateway policy map + # maxPolicyEntries: 16384 +vtep: + # -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow + # Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. + enabled: false + # -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" + endpoint: "" + # -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" + cidr: "" + # -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0" + mask: "" + # -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y" + mac: "" +# -- (string) Allows to explicitly specify the IPv4 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +ipv4NativeRoutingCIDR: "" +# -- (string) Allows to explicitly specify the IPv6 CIDR for native routing. +# When specified, Cilium assumes networking for this CIDR is preconfigured and +# hands traffic destined for that range to the Linux network stack without +# applying any SNAT. +# Generally speaking, specifying a native routing CIDR implies that Cilium can +# depend on the underlying networking stack to route packets to their +# destination. To offer a concrete example, if Cilium is configured to use +# direct routing and the Kubernetes CIDR is included in the native routing CIDR, +# the user must configure the routes to reach pods, either manually or by +# setting the auto-direct-node-routes flag. +ipv6NativeRoutingCIDR: "" +# -- cilium-monitor sidecar. +monitor: + # -- Enable the cilium-monitor sidecar. + enabled: false +# -- Configure service load balancing +loadBalancer: + # -- standalone enables the standalone L4LB which does not connect to + # kube-apiserver. + # standalone: false + + # -- algorithm is the name of the load balancing algorithm for backend + # selection e.g. random or maglev + # algorithm: random + + # -- mode is the operation mode of load balancing for remote backends + # e.g. snat, dsr, hybrid + # mode: snat + + # -- acceleration is the option to accelerate service handling via XDP + # Applicable values can be: disabled (do not use XDP), native (XDP BPF + # program is run directly out of the networking driver's early receive + # path), or best-effort (use native mode XDP acceleration on devices + # that support it). + acceleration: disabled + # -- dsrDispatch configures whether IP option or IPIP encapsulation is + # used to pass a service IP and port to remote backend + # dsrDispatch: opt + + # -- serviceTopology enables K8s Topology Aware Hints -based service + # endpoints filtering + # serviceTopology: false + + # -- L7 LoadBalancer + l7: + # -- Enable L7 service load balancing via envoy proxy. + # The request to a k8s service, which has specific annotation e.g. service.cilium.io/lb-l7, + # will be forwarded to the local backend proxy to be load balanced to the service endpoints. + # Please refer to docs for supported annotations for more configuration. + # + # Applicable values: + # - envoy: Enable L7 load balancing via envoy proxy. This will automatically set enable-envoy-config as well. + # - disabled: Disable L7 load balancing by way of service annotation. + backend: disabled + # -- List of ports from service to be automatically redirected to above backend. + # Any service exposing one of these ports will be automatically redirected. + # Fine-grained control can be achieved by using the service annotation. + ports: [] + # -- Default LB algorithm + # The default LB algorithm to be used for services, which can be overridden by the + # service annotation (e.g. service.cilium.io/lb-l7-algorithm) + # Applicable values: round_robin, least_request, random + algorithm: round_robin +# -- Configure N-S k8s service loadbalancing +nodePort: + # -- Enable the Cilium NodePort service implementation. + enabled: false + # -- Port range to use for NodePort services. + # range: "30000,32767" + + # @schema + # type: [null, string, array] + # @schema + # -- List of CIDRs for choosing which IP addresses assigned to native devices are used for NodePort load-balancing. + # By default this is empty and the first suitable, preferably private, IPv4 and IPv6 address assigned to each device is used. + # + # Example: + # + # addresses: ["192.168.1.0/24", "2001::/64"] + # + addresses: ~ + # -- Set to true to prevent applications binding to service ports. + bindProtection: true + # -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral + # ports is detected. + autoProtectPortRange: true + # -- Enable healthcheck nodePort server for NodePort services + enableHealthCheck: true + # -- Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs + # EnableHealthCheck to be enabled + enableHealthCheckLoadBalancerIP: false +# policyAuditMode: false + +# -- The agent can be put into one of the three policy enforcement modes: +# default, always and never. +# ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes +policyEnforcementMode: "default" +# @schema +# type: [null, string, array] +# @schema +# -- policyCIDRMatchMode is a list of entities that may be selected by CIDR selector. +# The possible value is "nodes". +policyCIDRMatchMode: +pprof: + # -- Enable pprof for cilium-agent + enabled: false + # -- Configure pprof listen address for cilium-agent + address: localhost + # -- Configure pprof listen port for cilium-agent + port: 6060 +# -- Configure prometheus metrics on the configured port at /metrics +prometheus: + enabled: false + port: 9962 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-agent + labels: {} + # -- Annotations to add to ServiceMonitor cilium-agent + annotations: {} + # -- jobLabel to add for ServiceMonitor cilium-agent + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # -- Relabeling configs for the ServiceMonitor cilium-agent + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-agent + metricRelabelings: ~ + # -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying + trustCRDsExist: false + # @schema + # type: [null, array] + # @schema + # -- Metrics that should be enabled or disabled from the default metric list. + # The list is expected to be separated by a space. (+metric_foo to enable + # metric_foo , -metric_bar to disable metric_bar). + # ref: https://docs.cilium.io/en/stable/observability/metrics/ + metrics: ~ + # --- Enable controller group metrics for monitoring specific Cilium + # subsystems. The list is a list of controller group names. The special + # values of "all" and "none" are supported. The set of controller + # group names is not guaranteed to be stable between Cilium versions. + controllerGroupMetrics: + - write-cni-file + - sync-host-ips + - sync-lb-maps-with-k8s-services +# -- Grafana dashboards for cilium-agent +# grafana can import dashboards based on the label and value +# ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards +dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} +# Configure Cilium Envoy options. +envoy: + # @schema + # type: [null, boolean] + # @schema + # -- Enable Envoy Proxy in standalone DaemonSet. + # This field is enabled by default for new installation. + # @default -- `true` for new installation + enabled: ~ + # -- (int) + # Set Envoy'--base-id' to use when allocating shared memory regions. + # Only needs to be changed if multiple Envoy instances will run on the same node and may have conflicts. Supported values: 0 - 4294967295. Defaults to '0' + baseID: 0 + log: + # -- The format string to use for laying out the log message metadata of Envoy. + format: "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v" + # -- Path to a separate Envoy log file, if any. Defaults to /dev/stdout. + path: "" + # -- Time in seconds after which a TCP connection attempt times out + connectTimeoutSeconds: 2 + # -- ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for Envoy + maxRequestsPerConnection: 0 + # -- Set Envoy HTTP option max_connection_duration seconds. Default 0 (disable) + maxConnectionDurationSeconds: 0 + # -- Set Envoy upstream HTTP idle connection timeout seconds. + # Does not apply to connections with pending requests. Default 60s + idleTimeoutDurationSeconds: 60 + # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. + xffNumTrustedHopsL7PolicyIngress: 0 + # -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. + xffNumTrustedHopsL7PolicyEgress: 0 + # -- Envoy container image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium-envoy" + tag: "v1.29.9-1728346947-0d05e48bfbb8c4737ec40d5781d970a550ed2bbd" + pullPolicy: "IfNotPresent" + digest: "sha256:42614a44e508f70d03a04470df5f61e3cffd22462471a0be0544cf116f2c50ba" + useDigest: true + # -- Additional containers added to the cilium Envoy DaemonSet. + extraContainers: [] + # -- Additional envoy container arguments. + extraArgs: [] + # -- Additional envoy container environment variables. + extraEnv: [] + # -- Additional envoy hostPath mounts. + extraHostPathMounts: [] + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer + + # -- Additional envoy volumes. + extraVolumes: [] + # -- Additional envoy volumeMounts. + extraVolumeMounts: [] + # -- Configure termination grace period for cilium-envoy DaemonSet. + terminationGracePeriodSeconds: 1 + # -- TCP port for the health API. + healthPort: 9878 + # -- cilium-envoy update strategy + # ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 2 + # -- Roll out cilium envoy pods automatically when configmap is updated. + rollOutPods: false + # -- Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy) + annotations: {} + # -- Security Context for cilium-envoy pods. + podSecurityContext: + # -- AppArmorProfile options for the `cilium-agent` and init containers + appArmorProfile: + type: "Unconfined" + # -- Annotations to be added to envoy pods + podAnnotations: {} + # -- Labels to be added to envoy pods + podLabels: {} + # -- Envoy resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + startupProbe: + # -- failure threshold of startup probe. + # 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) + failureThreshold: 105 + # -- interval between checks of the startup probe + periodSeconds: 2 + livenessProbe: + # -- failure threshold of liveness probe + failureThreshold: 10 + # -- interval between checks of the liveness probe + periodSeconds: 30 + readinessProbe: + # -- failure threshold of readiness probe + failureThreshold: 3 + # -- interval between checks of the readiness probe + periodSeconds: 30 + securityContext: + # -- User to run the pod with + # runAsUser: 0 + # -- Run the pod with elevated privileges + privileged: false + # -- SELinux options for the `cilium-envoy` container + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + # -- Capabilities for the `cilium-envoy` container. + # Even though granted to the container, the cilium-envoy-starter wrapper drops + # all capabilities after forking the actual Envoy process. + # `NET_BIND_SERVICE` is the only capability that can be passed to the Envoy process by + # setting `envoy.securityContext.capabilities.keepNetBindService=true` (in addition to granting the + # capability to the container). + # Note: In case of embedded envoy, the capability must be granted to the cilium-agent container. + envoy: + # Used since cilium proxy uses setting IPPROTO_IP/IP_TRANSPARENT + - NET_ADMIN + # We need it for now but might not need it for >= 5.11 specially + # for the 'SYS_RESOURCE'. + # In >= 5.8 there's already BPF and PERMON capabilities + - SYS_ADMIN + # Both PERFMON and BPF requires kernel 5.8, container runtime + # cri-o >= v1.22.0 or containerd >= v1.5.0. + # If available, SYS_ADMIN can be removed. + #- PERFMON + #- BPF + # -- Keep capability `NET_BIND_SERVICE` for Envoy process. + keepCapNetBindService: false + # -- Affinity for cilium-envoy. + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium-envoy + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: cilium.io/no-schedule + operator: NotIn + values: + - "true" + # -- Node selector for cilium-envoy. + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for envoy scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # @schema + # type: [null, string] + # @schema + # -- The priority class to use for cilium-envoy. + priorityClassName: ~ + # @schema + # type: [null, string] + # @schema + # -- DNS policy for Cilium envoy pods. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + dnsPolicy: ~ + debug: + admin: + # -- Enable admin interface for cilium-envoy. + # This is useful for debugging and should not be enabled in production. + enabled: false + # -- Port number (bound to loopback interface). + # kubectl port-forward can be used to access the admin interface. + port: 9901 + # -- Configure Cilium Envoy Prometheus options. + # Note that some of these apply to either cilium-agent or cilium-envoy. + prometheus: + # -- Enable prometheus metrics for cilium-envoy + enabled: true + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + # Note that this setting applies to both cilium-envoy _and_ cilium-agent + # with Envoy enabled. + enabled: false + # -- Labels to add to ServiceMonitor cilium-envoy + labels: {} + # -- Annotations to add to ServiceMonitor cilium-envoy + annotations: {} + # -- Interval for scrape metrics. + interval: "10s" + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + # -- Relabeling configs for the ServiceMonitor cilium-envoy + # or for cilium-agent with Envoy configured. + relabelings: + - sourceLabels: + - __meta_kubernetes_pod_node_name + targetLabel: node + replacement: ${1} + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-envoy + # or for cilium-agent with Envoy configured. + metricRelabelings: ~ + # -- Serve prometheus metrics for cilium-envoy on the configured port + port: "9964" +# -- Enable/Disable use of node label based identity +nodeSelectorLabels: false +# -- Enable resource quotas for priority classes used in the cluster. +resourceQuotas: + enabled: false + cilium: + hard: + # 5k nodes * 2 DaemonSets (Cilium and cilium node init) + pods: "10k" + operator: + hard: + # 15 "clusterwide" Cilium Operator pods for HA + pods: "15" +# Need to document default +################## +#sessionAffinity: false + +# -- Do not run Cilium agent when running with clean mode. Useful to completely +# uninstall Cilium as it will stop Cilium from starting and create artifacts +# in the node. +sleepAfterInit: false +# -- Enable check of service source ranges (currently, only for LoadBalancer). +svcSourceRangeCheck: true +# -- Synchronize Kubernetes nodes to kvstore and perform CNP GC. +synchronizeK8sNodes: true +# -- Configure TLS configuration in the agent. +tls: + # -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies + # (namely the secrets referenced by terminatingTLS and originatingTLS). + # Possible values: + # - local + # - k8s + secretsBackend: local + # -- Base64 encoded PEM values for the CA certificate and private key. + # This can be used as common CA to generate certificates used by hubble and clustermesh components. + # It is neither required nor used when cert-manager is used to generate the certificates. + ca: + # -- Optional CA cert. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + cert: "" + # -- Optional CA private key. If it is provided, it will be used by cilium to + # generate all other certificates. Otherwise, an ephemeral CA is generated. + key: "" + # -- Generated certificates validity duration in days. This will be used for auto generated CA. + certValidityDuration: 1095 + # -- Configure the CA trust bundle used for the validation of the certificates + # leveraged by hubble and clustermesh. When enabled, it overrides the content of the + # 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time. + caBundle: + # -- Enable the use of the CA trust bundle. + enabled: false + # -- Name of the ConfigMap containing the CA trust bundle. + name: cilium-root-ca.crt + # -- Entry of the ConfigMap containing the CA trust bundle. + key: ca.crt + # -- Use a Secret instead of a ConfigMap. + useSecret: false + # If uncommented, creates the ConfigMap and fills it with the specified content. + # Otherwise, the ConfigMap is assumed to be already present in .Release.Namespace. + # + # content: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- +# -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels. +# Possible values: +# - "" +# - vxlan +# - geneve +# @default -- `"vxlan"` +tunnelProtocol: "" +# -- Enable native-routing mode or tunneling mode. +# Possible values: +# - "" +# - native +# - tunnel +# @default -- `"tunnel"` +routingMode: "" +# -- Configure VXLAN and Geneve tunnel port. +# @default -- Port 8472 for VXLAN, Port 6081 for Geneve +tunnelPort: 0 +# -- Configure what the response should be to traffic for a service without backends. +# "reject" only works on kernels >= 5.10, on lower kernels we fallback to "drop". +# Possible values: +# - reject (default) +# - drop +serviceNoBackendResponse: reject +# -- Configure the underlying network MTU to overwrite auto-detected MTU. +# This value doesn't change the host network interface MTU i.e. eth0 or ens0. +# It changes the MTU for cilium_net@cilium_host, cilium_host@cilium_net, +# cilium_vxlan and lxc_health interfaces. +MTU: 0 +# -- Disable the usage of CiliumEndpoint CRD. +disableEndpointCRD: false +wellKnownIdentities: + # -- Enable the use of well-known identities. + enabled: false +etcd: + # -- Enable etcd mode for the agent. + enabled: false + # -- List of etcd endpoints + endpoints: + - https://CHANGE-ME:2379 + # -- Enable use of TLS/SSL for connectivity to etcd. + ssl: false +operator: + # -- Enable the cilium-operator component (required). + enabled: true + # -- Roll out cilium-operator pods automatically when configmap is updated. + rollOutPods: false + # -- cilium-operator image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/operator" + tag: "v1.16.3" + # operator-generic-digest + genericDigest: "sha256:6e2925ef47a1c76e183c48f95d4ce0d34a1e5e848252f910476c3e11ce1ec94b" + # operator-azure-digest + azureDigest: "sha256:2882aaf03c32525a99181b7c065b2bb19c03eba6626fc736aebe368d90791542" + # operator-aws-digest + awsDigest: "sha256:47f5abc5fa528472d3509c3199d7aab1e120833fb68df455e3b4476916385916" + # operator-alibabacloud-digest + alibabacloudDigest: "sha256:d80a785c0e807fc708264a3fcb19be404114f619fd756dd5214f4cad5a281898" + useDigest: true + pullPolicy: "IfNotPresent" + suffix: "" + # -- Number of replicas to run for the cilium-operator deployment + replicas: 2 + # -- The priority class to use for cilium-operator + priorityClassName: "" + # -- DNS policy for Cilium operator pods. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + dnsPolicy: "" + # -- cilium-operator update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxSurge: 25% + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 50% + # -- Affinity for cilium-operator + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + io.cilium/app: operator + # -- Pod topology spread constraints for cilium-operator + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for cilium-operator pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for cilium-operator scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # -- Additional cilium-operator container arguments. + extraArgs: [] + # -- Additional cilium-operator environment variables. + extraEnv: [] + # -- Additional cilium-operator hostPath mounts. + extraHostPathMounts: [] + # - name: host-mnt-data + # mountPath: /host/mnt/data + # hostPath: /mnt/data + # hostPathType: Directory + # readOnly: true + # mountPropagation: HostToContainer + + # -- Additional cilium-operator volumes. + extraVolumes: [] + # -- Additional cilium-operator volumeMounts. + extraVolumeMounts: [] + # -- Annotations to be added to all top-level cilium-operator objects (resources under templates/cilium-operator) + annotations: {} + # -- HostNetwork setting + hostNetwork: true + # -- Security context to be added to cilium-operator pods + podSecurityContext: {} + # -- Annotations to be added to cilium-operator pods + podAnnotations: {} + # -- Labels to be added to cilium-operator pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- cilium-operator resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 1000m + # memory: 1Gi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- Security context to be added to cilium-operator pods + securityContext: {} + # runAsUser: 0 + + # -- Interval for endpoint garbage collection. + endpointGCInterval: "5m0s" + # -- Interval for cilium node garbage collection. + nodeGCInterval: "5m0s" + # -- Interval for identity garbage collection. + identityGCInterval: "15m0s" + # -- Timeout for identity heartbeats. + identityHeartbeatTimeout: "30m0s" + pprof: + # -- Enable pprof for cilium-operator + enabled: false + # -- Configure pprof listen address for cilium-operator + address: localhost + # -- Configure pprof listen port for cilium-operator + port: 6061 + # -- Enable prometheus metrics for cilium-operator on the configured port at + # /metrics + prometheus: + enabled: true + port: 9963 + serviceMonitor: + # -- Enable service monitors. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor cilium-operator + labels: {} + # -- Annotations to add to ServiceMonitor cilium-operator + annotations: {} + # -- jobLabel to add for ServiceMonitor cilium-operator + jobLabel: "" + # -- Interval for scrape metrics. + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor cilium-operator + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor cilium-operator + metricRelabelings: ~ + # -- Grafana dashboards for cilium-operator + # grafana can import dashboards based on the label and value + # ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards + dashboards: + enabled: false + label: grafana_dashboard + # @schema + # type: [null, string] + # @schema + namespace: ~ + labelValue: "1" + annotations: {} + # -- Skip CRDs creation for cilium-operator + skipCRDCreation: false + # -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium + # pod running. + removeNodeTaints: true + # @schema + # type: [null, boolean] + # @schema + # -- Taint nodes where Cilium is scheduled but not running. This prevents pods + # from being scheduled to nodes where Cilium is not the default CNI provider. + # @default -- same as removeNodeTaints + setNodeTaints: ~ + # -- Set Node condition NetworkUnavailable to 'false' with the reason + # 'CiliumIsUp' for nodes that have a healthy Cilium pod. + setNodeNetworkStatus: true + unmanagedPodWatcher: + # -- Restart any pod that are not managed by Cilium. + restart: true + # -- Interval, in seconds, to check if there are any pods that are not + # managed by Cilium. + intervalSeconds: 15 +nodeinit: + # -- Enable the node initialization DaemonSet + enabled: false + # -- node-init image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/startup-script" + tag: "c54c7edeab7fde4da68e59acd319ab24af242c3f" + digest: "sha256:8d7b41c4ca45860254b3c19e20210462ef89479bb6331d6760c4e609d651b29c" + useDigest: true + pullPolicy: "IfNotPresent" + # -- The priority class to use for the nodeinit pod. + priorityClassName: "" + # -- node-init update strategy + updateStrategy: + type: RollingUpdate + # -- Additional nodeinit environment variables. + extraEnv: [] + # -- Additional nodeinit volumes. + extraVolumes: [] + # -- Additional nodeinit volumeMounts. + extraVolumeMounts: [] + # -- Affinity for cilium-nodeinit + affinity: {} + # -- Node labels for nodeinit pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for nodeinit scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit) + annotations: {} + # -- Annotations to be added to node-init pods. + podAnnotations: {} + # -- Labels to be added to node-init pods. + podLabels: {} + # -- Security Context for cilium-node-init pods. + podSecurityContext: + # -- AppArmorProfile options for the `cilium-node-init` and init containers + appArmorProfile: + type: "Unconfined" + # -- nodeinit resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + requests: + cpu: 100m + memory: 100Mi + # -- Security context to be added to nodeinit pods. + securityContext: + privileged: false + seLinuxOptions: + level: 's0' + # Running with spc_t since we have removed the privileged mode. + # Users can change it to a different type as long as they have the + # type available on the system. + type: 'spc_t' + capabilities: + add: + # Used in iptables. Consider removing once we are iptables-free + - SYS_MODULE + # Used for nsenter + - NET_ADMIN + - SYS_ADMIN + - SYS_CHROOT + - SYS_PTRACE + # -- bootstrapFile is the location of the file where the bootstrap timestamp is + # written by the node-init DaemonSet + bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time" + # -- startup offers way to customize startup nodeinit script (pre and post position) + startup: + preScript: "" + postScript: "" + # -- prestop offers way to customize prestop nodeinit script (pre and post position) + prestop: + preScript: "" + postScript: "" +preflight: + # -- Enable Cilium pre-flight resources (required for upgrade) + enabled: false + # -- Cilium pre-flight image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/cilium" + tag: "v1.16.3" + # cilium-digest + digest: "sha256:62d2a09bbef840a46099ac4c69421c90f84f28d018d479749049011329aa7f28" + useDigest: true + pullPolicy: "IfNotPresent" + # -- The priority class to use for the preflight pod. + priorityClassName: "" + # -- preflight update strategy + updateStrategy: + type: RollingUpdate + # -- Additional preflight environment variables. + extraEnv: [] + # -- Additional preflight volumes. + extraVolumes: [] + # -- Additional preflight volumeMounts. + extraVolumeMounts: [] + # -- Affinity for cilium-preflight + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: kubernetes.io/hostname + labelSelector: + matchLabels: + k8s-app: cilium + # -- Node labels for preflight pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for preflight scheduling to nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - operator: Exists + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight) + annotations: {} + # -- Security context to be added to preflight pods. + podSecurityContext: {} + # -- Annotations to be added to preflight pods + podAnnotations: {} + # -- Labels to be added to the preflight pod. + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- preflight resource limits & requests + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: {} + # limits: + # cpu: 4000m + # memory: 4Gi + # requests: + # cpu: 100m + # memory: 512Mi + + readinessProbe: + # -- For how long kubelet should wait before performing the first probe + initialDelaySeconds: 5 + # -- interval between checks of the readiness probe + periodSeconds: 5 + # -- Security context to be added to preflight pods + securityContext: {} + # runAsUser: 0 + + # -- Path to write the `--tofqdns-pre-cache` file to. + tofqdnsPreCache: "" + # -- Configure termination grace period for preflight Deployment and DaemonSet. + terminationGracePeriodSeconds: 1 + # -- By default we should always validate the installed CNPs before upgrading + # Cilium. This will make sure the user will have the policies deployed in the + # cluster with the right schema. + validateCNPs: true +# -- Explicitly enable or disable priority class. +# .Capabilities.KubeVersion is unsettable in `helm template` calls, +# it depends on k8s libraries version that Helm was compiled against. +# This option allows to explicitly disable setting the priority class, which +# is useful for rendering charts for gke clusters in advance. +enableCriticalPriorityClass: true +# disableEnvoyVersionCheck removes the check for Envoy, which can be useful +# on AArch64 as the images do not currently ship a version of Envoy. +#disableEnvoyVersionCheck: false +clustermesh: + # -- Deploy clustermesh-apiserver for clustermesh + useAPIServer: false + # -- The maximum number of clusters to support in a ClusterMesh. This value + # cannot be changed on running clusters, and all clusters in a ClusterMesh + # must be configured with the same value. Values > 255 will decrease the + # maximum allocatable cluster-local identities. + # Supported values are 255 and 511. + maxConnectedClusters: 255 + # -- Enable the synchronization of Kubernetes EndpointSlices corresponding to + # the remote endpoints of appropriately-annotated global services through ClusterMesh + enableEndpointSliceSynchronization: false + # -- Enable Multi-Cluster Services API support + enableMCSAPISupport: false + # -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config) + annotations: {} + # -- Clustermesh explicit configuration. + config: + # -- Enable the Clustermesh explicit configuration. + enabled: false + # -- Default dns domain for the Clustermesh API servers + # This is used in the case cluster addresses are not provided + # and IPs are used. + domain: mesh.cilium.io + # -- List of clusters to be peered in the mesh. + clusters: [] + # clusters: + # # -- Name of the cluster + # - name: cluster1 + # # -- Address of the cluster, use this if you created DNS records for + # # the cluster Clustermesh API server. + # address: cluster1.mesh.cilium.io + # # -- Port of the cluster Clustermesh API server. + # port: 2379 + # # -- IPs of the cluster Clustermesh API server, use multiple ones when + # # you have multiple IPs to access the Clustermesh API server. + # ips: + # - 172.18.255.201 + # # -- base64 encoded PEM values for the cluster client certificate, private key and certificate authority. + # # These fields can (and should) be omitted in case the CA is shared across clusters. In that case, the + # # "remote" private key and certificate available in the local cluster are automatically used instead. + # tls: + # cert: "" + # key: "" + # caCert: "" + apiserver: + # -- Clustermesh API server image. + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "quay.io/cilium/clustermesh-apiserver" + tag: "v1.16.3" + # clustermesh-apiserver-digest + digest: "sha256:598cb4fd30b47bf2bc229cd6a011e451cf14753e56a80bb9ef01a09a519f52fb" + useDigest: true + pullPolicy: "IfNotPresent" + # -- TCP port for the clustermesh-apiserver health API. + healthPort: 9880 + # -- Configuration for the clustermesh-apiserver readiness probe. + readinessProbe: {} + etcd: + # The etcd binary is included in the clustermesh API server image, so the same image from above is reused. + # Independent override isn't supported, because clustermesh-apiserver is tested against the etcd version it is + # built with. + + # -- Specifies the resources for etcd container in the apiserver + resources: {} + # requests: + # cpu: 200m + # memory: 256Mi + # limits: + # cpu: 1000m + # memory: 256Mi + + # -- Security context to be added to clustermesh-apiserver etcd containers + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- lifecycle setting for the etcd container + lifecycle: {} + init: + # -- Specifies the resources for etcd init container in the apiserver + resources: {} + # requests: + # cpu: 100m + # memory: 100Mi + # limits: + # cpu: 100m + # memory: 100Mi + + # -- Additional arguments to `clustermesh-apiserver etcdinit`. + extraArgs: [] + # -- Additional environment variables to `clustermesh-apiserver etcdinit`. + extraEnv: [] + # @schema + # enum: [Disk, Memory] + # @schema + # -- Specifies whether etcd data is stored in a temporary volume backed by + # the node's default medium, such as disk, SSD or network storage (Disk), or + # RAM (Memory). The Memory option enables improved etcd read and write + # performance at the cost of additional memory usage, which counts against + # the memory limits of the container. + storageMedium: Disk + kvstoremesh: + # -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved + # from the remote clusters in the local etcd instance. + enabled: true + # -- TCP port for the KVStoreMesh health API. + healthPort: 9881 + # -- Configuration for the KVStoreMesh readiness probe. + readinessProbe: {} + # -- Additional KVStoreMesh arguments. + extraArgs: [] + # -- Additional KVStoreMesh environment variables. + extraEnv: [] + # -- Resource requests and limits for the KVStoreMesh container + resources: {} + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M + + # -- Additional KVStoreMesh volumeMounts. + extraVolumeMounts: [] + # -- KVStoreMesh Security context + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- lifecycle setting for the KVStoreMesh container + lifecycle: {} + service: + # -- The type of service used for apiserver access. + type: NodePort + # -- Optional port to use as the node port for apiserver access. + # + # WARNING: make sure to configure a different NodePort in each cluster if + # kube-proxy replacement is enabled, as Cilium is currently affected by a known + # bug (#24692) when NodePorts are handled by the KPR implementation. If a service + # with the same NodePort exists both in the local and the remote cluster, all + # traffic originating from inside the cluster and targeting the corresponding + # NodePort will be redirected to a local backend, regardless of whether the + # destination node belongs to the local or the remote cluster. + nodePort: 32379 + # -- Annotations for the clustermesh-apiserver + # For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" + # For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: "true" + annotations: {} + # @schema + # enum: [Local, Cluster] + # @schema + # -- The externalTrafficPolicy of service used for apiserver access. + externalTrafficPolicy: Cluster + # @schema + # enum: [Local, Cluster] + # @schema + # -- The internalTrafficPolicy of service used for apiserver access. + internalTrafficPolicy: Cluster + # @schema + # enum: [HAOnly, Always, Never] + # @schema + # -- Defines when to enable session affinity. + # Each replica in a clustermesh-apiserver deployment runs its own discrete + # etcd cluster. Remote clients connect to one of the replicas through a + # shared Kubernetes Service. A client reconnecting to a different backend + # will require a full resync to ensure data integrity. Session affinity + # can reduce the likelihood of this happening, but may not be supported + # by all cloud providers. + # Possible values: + # - "HAOnly" (default) Only enable session affinity for deployments with more than 1 replica. + # - "Always" Always enable session affinity. + # - "Never" Never enable session affinity. Useful in environments where + # session affinity is not supported, but may lead to slightly + # degraded performance due to more frequent reconnections. + enableSessionAffinity: "HAOnly" + # @schema + # type: [null, string] + # @schema + # -- Configure a loadBalancerClass. + # Allows to configure the loadBalancerClass on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer + # (requires Kubernetes 1.24+). + loadBalancerClass: ~ + # @schema + # type: [null, string] + # @schema + # -- Configure a specific loadBalancerIP. + # Allows to configure a specific loadBalancerIP on the clustermesh-apiserver + # LB service in case the Service type is set to LoadBalancer. + loadBalancerIP: ~ + # -- Number of replicas run for the clustermesh-apiserver deployment. + replicas: 1 + # -- lifecycle setting for the apiserver container + lifecycle: {} + # -- terminationGracePeriodSeconds for the clustermesh-apiserver deployment + terminationGracePeriodSeconds: 30 + # -- Additional clustermesh-apiserver arguments. + extraArgs: [] + # -- Additional clustermesh-apiserver environment variables. + extraEnv: [] + # -- Additional clustermesh-apiserver volumes. + extraVolumes: [] + # -- Additional clustermesh-apiserver volumeMounts. + extraVolumeMounts: [] + # -- Security context to be added to clustermesh-apiserver containers + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + # -- Security context to be added to clustermesh-apiserver pods + podSecurityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + fsGroup: 65532 + # -- Annotations to be added to clustermesh-apiserver pods + podAnnotations: {} + # -- Labels to be added to clustermesh-apiserver pods + podLabels: {} + # PodDisruptionBudget settings + podDisruptionBudget: + # -- enable PodDisruptionBudget + # ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + enabled: false + # @schema + # type: [null, integer, string] + # @schema + # -- Minimum number/percentage of pods that should remain scheduled. + # When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + minAvailable: null + # @schema + # type: [null, integer, string] + # @schema + # -- Maximum number/percentage of pods that may be made unavailable + maxUnavailable: 1 + # -- Resource requests and limits for the clustermesh-apiserver + resources: {} + # requests: + # cpu: 100m + # memory: 64Mi + # limits: + # cpu: 1000m + # memory: 1024M + + # -- Affinity for clustermesh.apiserver + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchLabels: + k8s-app: clustermesh-apiserver + topologyKey: kubernetes.io/hostname + # -- Pod topology spread constraints for clustermesh-apiserver + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Node labels for pod assignment + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: + kubernetes.io/os: linux + # -- Node tolerations for pod assignment on nodes with taints + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # -- clustermesh-apiserver update strategy + updateStrategy: + type: RollingUpdate + rollingUpdate: + # @schema + # type: [integer, string] + # @schema + maxSurge: 1 + # @schema + # type: [integer, string] + # @schema + maxUnavailable: 0 + # -- The priority class to use for clustermesh-apiserver + priorityClassName: "" + tls: + # -- Configure the clustermesh authentication mode. + # Supported values: + # - legacy: All clusters access remote clustermesh instances with the same + # username (i.e., remote). The "remote" certificate must be + # generated with CN=remote if provided manually. + # - migration: Intermediate mode required to upgrade from legacy to cluster + # (and vice versa) with no disruption. Specifically, it enables + # the creation of the per-cluster usernames, while still using + # the common one for authentication. The "remote" certificate must + # be generated with CN=remote if provided manually (same as legacy). + # - cluster: Each cluster accesses remote etcd instances with a username + # depending on the local cluster name (i.e., remote-). + # The "remote" certificate must be generated with CN=remote- + # if provided manually. Cluster mode is meaningful only when the same + # CA is shared across all clusters part of the mesh. + authMode: legacy + # -- Allow users to provide their own certificates + # Users may need to provide their certificates using + # a mechanism that requires they provide their own secrets. + # This setting does not apply to any of the auto-generated + # mechanisms below, it only restricts the creation of secrets + # via the `tls-provided` templates. + enableSecrets: true + # -- Configure automatic TLS certificates generation. + # A Kubernetes CronJob is used the generate any + # certificates not provided by the user at installation + # time. + auto: + # -- When set to true, automatically generate a CA and certificates to + # enable mTLS between clustermesh-apiserver and external workload instances. + # If set to false, the certs to be provided by setting appropriate values below. + enabled: true + # Sets the method to auto-generate certificates. Supported values: + # - helm: This method uses Helm to generate all certificates. + # - cronJob: This method uses a Kubernetes CronJob the generate any + # certificates not provided by the user at installation + # time. + # - certmanager: This method use cert-manager to generate & rotate certificates. + method: helm + # -- Generated certificates validity duration in days. + certValidityDuration: 1095 + # -- Schedule for certificates regeneration (regardless of their expiration date). + # Only used if method is "cronJob". If nil, then no recurring job will be created. + # Instead, only the one-shot job is deployed to generate the certificates at + # installation time. + # + # Due to the out-of-band distribution of client certs to external workloads the + # CA is (re)regenerated only if it is not provided as a helm value and the k8s + # secret is manually deleted. + # + # Defaults to none. Commented syntax gives midnight of the first day of every + # fourth month. For syntax, see + # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + # schedule: "0 0 1 */4 *" + + # [Example] + # certManagerIssuerRef: + # group: cert-manager.io + # kind: ClusterIssuer + # name: ca-issuer + # -- certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager. + certManagerIssuerRef: {} + # -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key. + # Used if 'auto' is not enabled. + server: + cert: "" + key: "" + # -- Extra DNS names added to certificate when it's auto generated + extraDnsNames: [] + # -- Extra IP addresses added to certificate when it's auto generated + extraIpAddresses: [] + # -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key. + # Used if 'auto' is not enabled. + admin: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key. + # Used if 'auto' is not enabled. + client: + cert: "" + key: "" + # -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key. + # Used if 'auto' is not enabled. + remote: + cert: "" + key: "" + # clustermesh-apiserver Prometheus metrics configuration + metrics: + # -- Enables exporting apiserver metrics in OpenMetrics format. + enabled: true + # -- Configure the port the apiserver metric server listens on. + port: 9962 + kvstoremesh: + # -- Enables exporting KVStoreMesh metrics in OpenMetrics format. + enabled: true + # -- Configure the port the KVStoreMesh metric server listens on. + port: 9964 + etcd: + # -- Enables exporting etcd metrics in OpenMetrics format. + enabled: true + # -- Set level of detail for etcd metrics; specify 'extensive' to include server side gRPC histogram metrics. + mode: basic + # -- Configure the port the etcd metric server listens on. + port: 9963 + serviceMonitor: + # -- Enable service monitor. + # This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + enabled: false + # -- Labels to add to ServiceMonitor clustermesh-apiserver + labels: {} + # -- Annotations to add to ServiceMonitor clustermesh-apiserver + annotations: {} + # -- Specify the Kubernetes namespace where Prometheus expects to find + # service monitors configured. + # namespace: "" + + # -- Interval for scrape metrics (apiserver metrics) + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) + metricRelabelings: ~ + kvstoremesh: + # -- Interval for scrape metrics (KVStoreMesh metrics) + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) + metricRelabelings: ~ + etcd: + # -- Interval for scrape metrics (etcd metrics) + interval: "10s" + # @schema + # type: [null, array] + # @schema + # -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) + relabelings: ~ + # @schema + # type: [null, array] + # @schema + # -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) + metricRelabelings: ~ +# -- Configure external workloads support +externalWorkloads: + # -- Enable support for external workloads, such as VMs (false by default). + enabled: false +# -- Configure cgroup related configuration +cgroup: + autoMount: + # -- Enable auto mount of cgroup2 filesystem. + # When `autoMount` is enabled, cgroup2 filesystem is mounted at + # `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod. + # If users disable `autoMount`, it's expected that users have mounted + # cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the + # volume will be mounted inside the cilium agent pod at the same path. + enabled: true + # -- Init Container Cgroup Automount resource limits & requests + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + # -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) + hostRoot: /run/cilium/cgroupv2 +# -- Configure sysctl override described in #20072. +sysctlfix: + # -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. + enabled: true +# -- Configure whether to enable auto detect of terminating state for endpoints +# in order to support graceful termination. +enableK8sTerminatingEndpoint: true +# -- Configure whether to unload DNS policy rules on graceful shutdown +# dnsPolicyUnloadOnShutdown: false + +# -- Configure the key of the taint indicating that Cilium is not ready on the node. +# When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up. +agentNotReadyTaintKey: "node.cilium.io/agent-not-ready" +dnsProxy: + # -- Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. + socketLingerTimeout: 10 + # -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'. + dnsRejectResponseCode: refused + # -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present. + enableDnsCompression: true + # -- Maximum number of IPs to maintain per FQDN name for each endpoint. + endpointMaxIpPerHostname: 50 + # -- Time during which idle but previously active connections with expired DNS lookups are still considered alive. + idleConnectionGracePeriod: 0s + # -- Maximum number of IPs to retain for expired DNS lookups with still-active connections. + maxDeferredConnectionDeletes: 10000 + # -- The minimum time, in seconds, to use DNS data for toFQDNs policies. If + # the upstream DNS server returns a DNS record with a shorter TTL, Cilium + # overwrites the TTL with this value. Setting this value to zero means that + # Cilium will honor the TTLs returned by the upstream DNS server. + minTtl: 0 + # -- DNS cache data at this path is preloaded on agent startup. + preCache: "" + # -- Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port. + proxyPort: 0 + # -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information. + proxyResponseMaxDelay: 100ms + # -- DNS proxy operation mode (true/false, or unset to use version dependent defaults) + # enableTransparentMode: true +# -- SCTP Configuration Values +sctp: + # -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming. + enabled: false +# Configuration for types of authentication for Cilium (beta) +authentication: + # -- Enable authentication processing and garbage collection. + # Note that if disabled, policy enforcement will still block requests that require authentication. + # But the resulting authentication requests for these requests will not be processed, therefore the requests not be allowed. + enabled: true + # -- Buffer size of the channel Cilium uses to receive authentication events from the signal map. + queueSize: 1024 + # -- Buffer size of the channel Cilium uses to receive certificate expiration events from auth handlers. + rotatedIdentitiesQueueSize: 1024 + # -- Interval for garbage collection of auth map entries. + gcInterval: "5m0s" + # Configuration for Cilium's service-to-service mutual authentication using TLS handshakes. + # Note that this is not full mTLS support without also enabling encryption of some form. + # Current encryption options are WireGuard or IPsec, configured in encryption block above. + mutual: + # -- Port on the agent where mutual authentication handshakes between agents will be performed + port: 4250 + # -- Timeout for connecting to the remote node TCP socket + connectTimeout: 5s + # Settings for SPIRE + spire: + # -- Enable SPIRE integration (beta) + enabled: false + # -- Annotations to be added to all top-level spire objects (resources under templates/spire) + annotations: {} + # Settings to control the SPIRE installation and configuration + install: + # -- Enable SPIRE installation. + # This will only take effect only if authentication.mutual.spire.enabled is true + enabled: true + # -- SPIRE namespace to install into + namespace: cilium-spire + # -- SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. + existingNamespace: false + # -- init container image of SPIRE agent and server + initImage: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "docker.io/library/busybox" + tag: "1.36.1" + digest: "sha256:c230832bd3b0be59a6c47ed64294f9ce71e91b327957920b6929a0caa8353140" + useDigest: true + pullPolicy: "IfNotPresent" + # SPIRE agent configuration + agent: + # -- SPIRE agent image + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "ghcr.io/spiffe/spire-agent" + tag: "1.9.6" + digest: "sha256:5106ac601272a88684db14daf7f54b9a45f31f77bb16a906bd5e87756ee7b97c" + useDigest: true + pullPolicy: "IfNotPresent" + # -- SPIRE agent service account + serviceAccount: + create: true + name: spire-agent + # -- SPIRE agent annotations + annotations: {} + # -- SPIRE agent labels + labels: {} + # -- SPIRE Workload Attestor kubelet verification. + skipKubeletVerification: true + # -- SPIRE agent tolerations configuration + # By default it follows the same tolerations as the agent itself + # to allow the Cilium agent on this node to connect to SPIRE. + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: + - key: node.kubernetes.io/not-ready + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + - key: node.cloudprovider.kubernetes.io/uninitialized + effect: NoSchedule + value: "true" + - key: CriticalAddonsOnly + operator: "Exists" + # -- SPIRE agent affinity configuration + affinity: {} + # -- SPIRE agent nodeSelector configuration + # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # -- Security context to be added to spire agent pods. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + # -- Security context to be added to spire agent containers. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + securityContext: {} + server: + # -- SPIRE server image + image: + # @schema + # type: [null, string] + # @schema + override: ~ + repository: "ghcr.io/spiffe/spire-server" + tag: "1.9.6" + digest: "sha256:59a0b92b39773515e25e68a46c40d3b931b9c1860bc445a79ceb45a805cab8b4" + useDigest: true + pullPolicy: "IfNotPresent" + # -- SPIRE server service account + serviceAccount: + create: true + name: spire-server + # -- SPIRE server init containers + initContainers: [] + # -- SPIRE server annotations + annotations: {} + # -- SPIRE server labels + labels: {} + # SPIRE server service configuration + service: + # -- Service type for the SPIRE server service + type: ClusterIP + # -- Annotations to be added to the SPIRE server service + annotations: {} + # -- Labels to be added to the SPIRE server service + labels: {} + # -- SPIRE server affinity configuration + affinity: {} + # -- SPIRE server nodeSelector configuration + # ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # -- SPIRE server tolerations configuration + # ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + tolerations: [] + # SPIRE server datastorage configuration + dataStorage: + # -- Enable SPIRE server data storage + enabled: true + # -- Size of the SPIRE server data storage + size: 1Gi + # -- Access mode of the SPIRE server data storage + accessMode: ReadWriteOnce + # @schema + # type: [null, string] + # @schema + # -- StorageClass of the SPIRE server data storage + storageClass: null + # -- Security context to be added to spire server pods. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + podSecurityContext: {} + # -- Security context to be added to spire server containers. + # SecurityContext holds pod-level security attributes and common container settings. + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + securityContext: {} + # SPIRE CA configuration + ca: + # -- SPIRE CA key type + # AWS requires the use of RSA. EC cryptography is not supported + keyType: "rsa-4096" + # -- SPIRE CA Subject + subject: + country: "US" + organization: "SPIRE" + commonName: "Cilium SPIRE CA" + # @schema + # type: [null, string] + # @schema + # -- SPIRE server address used by Cilium Operator + # + # If k8s Service DNS along with port number is used (e.g. ..svc(.*): format), + # Cilium Operator will resolve its address by looking up the clusterIP from Service resource. + # + # Example values: 10.0.0.1:8081, spire-server.cilium-spire.svc:8081 + serverAddress: ~ + # -- SPIFFE trust domain to use for fetching certificates + trustDomain: spiffe.cilium + # -- SPIRE socket path where the SPIRE delegated api agent is listening + adminSocketPath: /run/spire/sockets/admin.sock + # -- SPIRE socket path where the SPIRE workload agent is listening. + # Applies to both the Cilium Agent and Operator + agentSocketPath: /run/spire/sockets/agent/agent.sock + # -- SPIRE connection timeout + connectionTimeout: 30s diff --git a/k8s/manifests/charts/ck-loadbalancer_values.yaml b/k8s/manifests/charts/ck-loadbalancer_values.yaml new file mode 100644 index 000000000..e1a9eef69 --- /dev/null +++ b/k8s/manifests/charts/ck-loadbalancer_values.yaml @@ -0,0 +1,21 @@ +driver: + +l2: + enabled: true + # interfaces: + # - "^eth[0-9]+" + interfaces: [] + +ipPool: + # cidrs: + # - cidr: "10.42.254.176/28" + cidrs: [] + +bgp: + enabled: false + localASN: 64512 + # neighbors: + # - peerAddress: '10.0.0.60/24' + # peerASN: 65100 + # peerPort: 179 + neighbors: [] diff --git a/k8s/manifests/charts/coredns-1.36.0_values.yaml b/k8s/manifests/charts/coredns-1.36.0_values.yaml new file mode 100644 index 000000000..4c1333f76 --- /dev/null +++ b/k8s/manifests/charts/coredns-1.36.0_values.yaml @@ -0,0 +1,400 @@ +# Default values for coredns. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: coredns/coredns + # Overrides the image tag whose default is the chart appVersion. + tag: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: [] + # pullSecrets: + # - name: myRegistryKeySecretName + +replicaCount: 1 + +resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +rollingUpdate: + maxUnavailable: 1 + maxSurge: 25% + +terminationGracePeriodSeconds: 30 + +podAnnotations: {} +# cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + +serviceType: "ClusterIP" + +prometheus: + service: + enabled: false + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9153" + selector: {} + monitor: + enabled: false + additionalLabels: {} + namespace: "" + interval: "" + selector: {} + +service: +# clusterIP: "" +# clusterIPs: [] +# loadBalancerIP: "" +# loadBalancerClass: "" +# externalIPs: [] +# externalTrafficPolicy: "" +# ipFamilyPolicy: "" +# trafficDistribution: PreferClose + # The name of the Service + # If not set, a name is generated using the fullname template + name: "" + annotations: {} + # Pod selector + selector: {} + +serviceAccount: + create: false + # The name of the ServiceAccount to use + # If not set and create is true, a name is generated using the fullname template + name: "" + annotations: {} + +rbac: + # If true, create & use RBAC resources + create: true + # If true, create and use PodSecurityPolicy + pspEnable: false + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + # name: + +# isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app. +isClusterService: true + +# Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set. +priorityClassName: "" + +# Configure the pod level securityContext. +podSecurityContext: {} + +# Configure SecurityContext for Pod. +# Ensure that required linux capability to bind port number below 1024 is assigned (`CAP_NET_BIND_SERVICE`). +securityContext: + capabilities: + add: + - NET_BIND_SERVICE + +# Default zone is what Kubernetes recommends: +# https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options +servers: +- zones: + - zone: . + port: 53 + # -- expose the service on a different port + # servicePort: 5353 + # If serviceType is nodePort you can specify nodePort here + # nodePort: 30053 + # hostPort: 53 + plugins: + - name: errors + # Serves a /health endpoint on :8080, required for livenessProbe + - name: health + configBlock: |- + lameduck 5s + # Serves a /ready endpoint on :8181, required for readinessProbe + - name: ready + # Required to query kubernetes API for data + - name: kubernetes + parameters: cluster.local in-addr.arpa ip6.arpa + configBlock: |- + pods insecure + fallthrough in-addr.arpa ip6.arpa + ttl 30 + # Serves a /metrics endpoint on :9153, required for serviceMonitor + - name: prometheus + parameters: 0.0.0.0:9153 + - name: forward + parameters: . /etc/resolv.conf + - name: cache + parameters: 30 + - name: loop + - name: reload + - name: loadbalance + +# Complete example with all the options: +# - zones: # the `zones` block can be left out entirely, defaults to "." +# - zone: hello.world. # optional, defaults to "." +# scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS) +# - zone: foo.bar. +# scheme: dns:// +# use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol +# # Note that this will not work if you are also exposing tls or grpc on the same server +# port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS) +# plugins: # the plugins to use for this server block +# - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it! +# parameters: foo bar # list of parameters after the plugin +# configBlock: |- # if the plugin supports extra block style config, supply it here +# hello world +# foo bar + +# Extra configuration that is applied outside of the default zone block. +# Example to include additional config files, which may come from extraVolumes: +# extraConfig: +# import: +# parameters: /opt/coredns/*.conf +extraConfig: {} + +# To use the livenessProbe, the health plugin needs to be enabled in CoreDNS' server config +livenessProbe: + enabled: true + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 +# To use the readinessProbe, the ready plugin needs to be enabled in CoreDNS' server config +readinessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core +# for example: +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: foo.bar.com/role +# operator: In +# values: +# - master +affinity: {} + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#topologyspreadconstraint-v1-core +# and supports Helm templating. +# For example: +# topologySpreadConstraints: +# - labelSelector: +# matchLabels: +# app.kubernetes.io/name: '{{ template "coredns.name" . }}' +# app.kubernetes.io/instance: '{{ .Release.Name }}' +# topologyKey: topology.kubernetes.io/zone +# maxSkew: 1 +# whenUnsatisfiable: ScheduleAnyway +# - labelSelector: +# matchLabels: +# app.kubernetes.io/name: '{{ template "coredns.name" . }}' +# app.kubernetes.io/instance: '{{ .Release.Name }}' +# topologyKey: kubernetes.io/hostname +# maxSkew: 1 +# whenUnsatisfiable: ScheduleAnyway +topologySpreadConstraints: [] + +# Node labels for pod assignment +# Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core +# for example: +# tolerations: +# - key: foo.bar.com/role +# operator: Equal +# value: master +# effect: NoSchedule +tolerations: [] + +# https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget +podDisruptionBudget: {} + +# configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/ +zoneFiles: [] +# - filename: example.db +# domain: example.com +# contents: | +# example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600 +# example.com. IN NS b.iana-servers.net. +# example.com. IN NS a.iana-servers.net. +# example.com. IN A 192.168.99.102 +# *.example.com. IN A 192.168.99.102 + +# optional array of sidecar containers +extraContainers: [] +# - name: some-container-name +# image: some-image:latest +# imagePullPolicy: Always +# optional array of extra volumes to create +extraVolumes: [] +# - name: some-volume-name +# emptyDir: {} +# optional array of mount points for extraVolumes +extraVolumeMounts: [] +# - name: some-volume-name +# mountPath: /etc/wherever + +# optional array of secrets to mount inside coredns container +# possible usecase: need for secure connection with etcd backend +extraSecrets: [] +# - name: etcd-client-certs +# mountPath: /etc/coredns/tls/etcd +# defaultMode: 420 +# - name: some-fancy-secret +# mountPath: /etc/wherever +# defaultMode: 440 + +# optional array of environment variables for coredns container +# possible usecase: provides username and password for etcd user authentications +env: [] +# - name: WHATEVER_ENV +# value: whatever +# - name: SOME_SECRET_ENV +# valueFrom: +# secretKeyRef: +# name: some-secret-name +# key: secret-key + +# To support legacy deployments using CoreDNS with the "k8s-app: kube-dns" label selectors. +# See https://github.com/coredns/helm/blob/master/charts/coredns/README.md#adopting-existing-coredns-resources +# k8sAppLabelOverride: "kube-dns" + +# Custom labels to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled. +customLabels: {} + +# Custom annotations to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled. +customAnnotations: {} + +## Alternative configuration for HPA deployment if wanted +## Create HorizontalPodAutoscaler object. +## +# hpa: +# enabled: false +# minReplicas: 1 +# maxReplicas: 10 +# metrics: +# metrics: +# - type: Resource +# resource: +# name: memory +# target: +# type: Utilization +# averageUtilization: 60 +# - type: Resource +# resource: +# name: cpu +# target: +# type: Utilization +# averageUtilization: 60 + +hpa: + enabled: false + minReplicas: 1 + maxReplicas: 2 + metrics: [] + +## Configue a cluster-proportional-autoscaler for coredns +# See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler +autoscaler: + # Enabled the cluster-proportional-autoscaler + enabled: false + + # Number of cores in the cluster per coredns replica + coresPerReplica: 256 + # Number of nodes in the cluster per coredns replica + nodesPerReplica: 16 + # Min size of replicaCount + min: 0 + # Max size of replicaCount (default of 0 is no max) + max: 0 + # Whether to include unschedulable nodes in the nodes/cores calculations - this requires version 1.8.0+ of the autoscaler + includeUnschedulableNodes: false + # If true does not allow single points of failure to form + preventSinglePointFailure: true + + # Annotations for the coredns proportional autoscaler pods + podAnnotations: {} + + ## Optionally specify some extra flags to pass to cluster-proprtional-autoscaler. + ## Useful for e.g. the nodelabels flag. + # customFlags: + # - --nodelabels=topology.kubernetes.io/zone=us-east-1a + + image: + repository: registry.k8s.io/cpa/cluster-proportional-autoscaler + tag: "1.8.5" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + pullSecrets: [] + # pullSecrets: + # - name: myRegistryKeySecretName + + # Optional priority class to be used for the autoscaler pods. priorityClassName used if not set. + priorityClassName: "" + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core + affinity: {} + + # Node labels for pod assignment + # Ref: https://kubernetes.io/docs/user-guide/node-selection/ + nodeSelector: {} + + # expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core + tolerations: [] + + # resources for autoscaler pod + resources: + requests: + cpu: "20m" + memory: "10Mi" + limits: + cpu: "20m" + memory: "10Mi" + + # Options for autoscaler configmap + configmap: + ## Annotations for the coredns-autoscaler configmap + # i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed + annotations: {} + + # Enables the livenessProbe for cluster-proportional-autoscaler - this requires version 1.8.0+ of the autoscaler + livenessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + + # optional array of sidecar containers + extraContainers: [] + # - name: some-container-name + # image: some-image:latest + # imagePullPolicy: Always + +deployment: + skipConfig: false + enabled: true + name: "" + ## Annotations for the coredns deployment + annotations: {} + ## Pod selector + selector: {} diff --git a/k8s/manifests/charts/generator.go b/k8s/manifests/charts/generator.go new file mode 100644 index 000000000..aba0502d1 --- /dev/null +++ b/k8s/manifests/charts/generator.go @@ -0,0 +1,439 @@ +package main + +import ( + "bufio" + "flag" + "fmt" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/iancoleman/strcase" + "gopkg.in/yaml.v3" +) + +const ( + toolName = "CHART_VALUES_STRUCT_GENERATOR" + unsafeFieldName = "UNSAFE_MISC_FIELDS" +) + +type structMeta struct { + // isRoot is true if the struct is the root struct of the file + isRoot bool + + name string + docString string + // inner can be either a fieldMeta or a structMeta + fields []*fieldMeta +} + +type fieldMeta struct { + name string + originalYamlName string + docString string + typ string +} + +type goRecipe struct { + advancedTypesEnabled bool + unsafeFieldEnabled bool + + pkgName string + structs []*structMeta +} + +// fill recursively generates Go recipe definitions from a YAML Node +func (recipe *goRecipe) fill(structName string, node *yaml.Node, docString string, isRoot bool) { + stMeta := &structMeta{ + isRoot: isRoot, + name: structName, + docString: docString, + } + + for i := 0; i < len(node.Content); i += 2 { + keyNode := node.Content[i] + valueNode := node.Content[i+1] + fieldName := strcase.ToCamel(keyNode.Value) + + field := &fieldMeta{ + name: fieldName, + originalYamlName: keyNode.Value, + docString: strings.Join(extractComments(keyNode), "\n"), + } + + supportingComments := []string{} + + // TODO: handle such cases: + // controller: + // <<: *defaults + + switch valueNode.Kind { + case yaml.MappingNode: + // nested struct + if len(valueNode.Content) == 0 { + // struct of unknown type, e.g. a map[string]any + field.typ = "map[string]any" + } else { + // struct of known type, the type will be the name of the struct + nestedStructName := structName + "_" + fieldName + field.typ = nestedStructName + recipe.fill(nestedStructName, valueNode, field.docString, false) + } + case yaml.SequenceNode: + if len(valueNode.Content) == 0 || len(valueNode.Content[0].Content) == 0 { + // if the list has items, but the items are not mappings, include them as comment + // to act as a hint for the user + if len(valueNode.Content) > 0 { + for _, c := range valueNode.Content { + supportingComments = append(supportingComments, extractComments(c)...) + supportingComments = append(supportingComments, fmt.Sprintf("// - %s", c.Value)) + } + } + if recipe.advancedTypesEnabled { + field.typ = infereTypeString(valueNode) + } else { + field.typ = "[]any" + } + + } else { + // list with its own struct + nestedListName := structName + "_" + fieldName + "Item" + field.typ = "[]" + nestedListName + recipe.fill(nestedListName, valueNode.Content[0], field.docString, false) + } + case yaml.ScalarNode: + // scalar value + if recipe.advancedTypesEnabled { + field.typ = infereTypeString(valueNode) + } else { + field.typ = "any" + } + } + + if len(supportingComments) > 0 { + supDocString := strings.Join(supportingComments, "\n") + if field.docString != "" { + field.docString = fmt.Sprintf("%s\n%s", field.docString, supDocString) + } else { + field.docString = supDocString + } + } + stMeta.fields = append(stMeta.fields, field) + } + + recipe.structs = append(recipe.structs, stMeta) +} + +// extractComments extracts comments from a YAML node +func extractComments(n *yaml.Node) []string { + totalLines := []string{} + if hc := n.HeadComment; hc != "" { + lines := strings.Split(hc, "\n") + for _, l := range lines { + l = strings.TrimSpace(l) + l = strings.TrimLeft(l, "#") + l = strings.TrimSpace(l) + totalLines = append(totalLines, fmt.Sprintf("// %s", l)) + } + } + if lc := n.LineComment; lc != "" { + lines := strings.Split(lc, "\n") + for _, l := range lines { + l = strings.TrimSpace(l) + l = strings.TrimLeft(l, "#") + l = strings.TrimSpace(l) + totalLines = append(totalLines, fmt.Sprintf("// %s", l)) + } + } + if fc := n.FootComment; fc != "" { + lines := strings.Split(fc, "\n") + for _, l := range lines { + l = strings.TrimSpace(l) + l = strings.TrimLeft(l, "#") + l = strings.TrimSpace(l) + totalLines = append(totalLines, fmt.Sprintf("// %s", l)) + } + } + + return totalLines +} + +func (recipe *goRecipe) generateGoFile() string { + out := []string{ + fmt.Sprintf(`// Code generated by %[1]s. DO NOT EDIT. + // + // This file was autogenerated by the %[1]s tool on %[2]s. + // Any changes will be overwritten. + // + // These files are generated from the values.yaml files in the k8s/manifests/charts directory. + // Head to the k8s/manifests/charts/Makefile to see how to generate these files. + // + // Package %[3]s contains the Go structs representing the values of the Helm chart. + package %[3]s + + import ( + "fmt" + "github.com/mitchellh/mapstructure" + ) + `, toolName, time.Now().Format(time.DateOnly), recipe.pkgName), + } + + for _, st := range recipe.structs { + // add struct docstring + if st.docString != "" { + out = append(out, st.docString) + } + + // add struct definition + out = append(out, fmt.Sprintf("type %s struct {", st.name)) + for _, f := range st.fields { + // TODO: theoretically this should be removed if all cases are handled (e.g. <<: *defaults) + if f.name == "" || f.typ == "" { + continue + } + + if f.docString != "" { + out = append(out, f.docString) + } + out = append(out, fmt.Sprintf("\t%s %s `mapstructure:\"%s,omitempty\"`", f.name, f.typ, f.originalYamlName)) + } + // unsafe miscellanous fields + if st.isRoot && recipe.unsafeFieldEnabled { + out = append(out, fmt.Sprintf(`// UNSAFE. USE WITH CAUTION + // + // %[1]s is a place for any additional fields that are not handled by the generator + // The value of this field is going to be available as is in the output of .ToMap() + // The fields in this map will overwrite other fields if their names match. + // Field A has the same name as field B in the %[1]s map, if the mapstructure format + // of field A is exactly equal to the actual string literal of field B. + // Example: + // type Values struct { + // FieldA string`+" `mapstructure:\"myField\"`"+` + // %[1]s map[string]any + // } + // v := Values{ + // FieldA: "originalValue" + // %[1]s: map[string]any{ + // "myField": "newValue", // same as FieldA mapstructure format + // "anotherField": "anotherValue", // new field that will be included in the map output + // } + // } + // v.ToMap() // returns map[string]any{"myField": "newValue", "anotherField": "anotherValue"} + //`, unsafeFieldName)) + out = append(out, fmt.Sprintf("\t%s map[string]any `mapstructure:\"-\"`", unsafeFieldName)) + } + out = append(out, "}") + + // add ToMap() method + out = append(out, fmt.Sprintf(` + func (v *%[1]s) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %%w", err) + }`, st.name)) + if st.isRoot && recipe.unsafeFieldEnabled { + out = append(out, fmt.Sprintf(` + // handle UNSAFE fields + for k, v := range v.%s { + result[k] = v + } + `, unsafeFieldName)) + } + out = append(out, "return result, nil }") + } + + return strings.Join(out, "\n") +} + +// processYAMLFile reads a YAML file and generates the corresponding .go file +func processYAMLFile(filePath, structName, outputFilePath string, recipe *goRecipe) error { + data, err := os.ReadFile(filePath) + if err != nil { + return fmt.Errorf("failed to read YAML file %s: %w", filePath, err) + } + + var rootNode yaml.Node + if err := yaml.Unmarshal(data, &rootNode); err != nil { + return fmt.Errorf("failed to parse YAML file %s: %w", filePath, err) + } + + docString := fmt.Sprintf("// %s represents the values of the %s chart", structName, filePath) + recipe.fill(structName, rootNode.Content[0], docString, true) + + if err := os.WriteFile(outputFilePath, []byte(recipe.generateGoFile()), 0644); err != nil { + return fmt.Errorf("failed to write Go file %s: %w", outputFilePath, err) + } + + if err := formatGoFile(outputFilePath); err != nil { + return fmt.Errorf("failed to format Go file %s: %w", outputFilePath, err) + } + + fmt.Printf("Generated %s\n", outputFilePath) + return nil +} + +// formatGoFile formats a Go file using gofmt +func formatGoFile(filePath string) error { + if err := runCmd("gofmt", "-w", filePath); err != nil { + return fmt.Errorf("failed to format %s: %w", filePath, err) + } + return nil +} + +// runCmd runs a command +func runCmd(parts ...string) error { + if len(parts) == 0 { + return fmt.Errorf("no command provided") + } + + cmd := exec.Command(parts[0], parts[1:]...) + var out strings.Builder + cmd.Stdout = &out + cmd.Stderr = &out + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to run command: %w\nOutput: %s", err, out.String()) + } + + return nil +} + +func infereTypeString(n *yaml.Node) string { + switch n.Tag { + case "!!bool": + return "bool" + case "!!int": + return "int64" + case "!!float": + return "float64" + case "!!null": + return "any" + case "!!str": + return "string" + case "!!seq": + if len(n.Content) == 0 { + return "[]any" + } + return "[]" + infereTypeString(n.Content[0]) + case "!!map": + return "map[string]any" + } + + switch n.Kind { + case yaml.ScalarNode: + if isBool(n.Value) { + return "bool" + } else if isInt(n.Value) { + return "int64" + } else if isFloat(n.Value) { + return "float64" + } else if isNull(n.Value) { + return "any" + } else if strings.HasPrefix(n.Value, "\"") { + return "string" + } + return "string" + case yaml.SequenceNode: + if len(n.Content) == 0 { + return "[]any" + } + return "[]" + infereTypeString(n.Content[0]) + default: + return "any" + } +} + +func isBool(s string) bool { + s = strings.TrimSpace(s) + if strings.HasPrefix(s, "\"") { + return false + } + return s == "true" || s == "false" || + s == "True" || s == "False" || + s == "TRUE" || s == "FALSE" || + s == "yes" || s == "no" || + s == "Yes" || s == "No" || + s == "YES" || s == "NO" || + s == "on" || s == "off" || + s == "On" || s == "Off" || + s == "ON" || s == "OFF" +} + +func isInt(s string) bool { + s = strings.TrimSpace(s) + _, err := strconv.ParseInt(s, 10, 64) + return err == nil +} + +func isFloat(s string) bool { + s = strings.TrimSpace(s) + _, err := strconv.ParseFloat(s, 64) + return err == nil +} + +func isNull(s string) bool { + s = strings.TrimSpace(strings.ToLower(s)) + return len(s) == 0 || s == "null" || s == "~" || s == "\"\"" +} + +func main() { + var ( + sourcesFile string + pkgName string + targetDir string + advancedTypesEnabled bool + unsafeFieldEnabled bool + ) + + flag.StringVar(&sourcesFile, "sources", "sources.txt", "Path to the file containing the list of YAML sources") + flag.StringVar(&pkgName, "pkg", "main", "Name of the package to generate") + flag.StringVar(&targetDir, "dir", ".", "Directory where the generated files will be saved") + flag.BoolVar(&advancedTypesEnabled, "advanced-types", false, "Enable advanced types (e.g. string instead of any where possible)") + flag.BoolVar(&unsafeFieldEnabled, "unsafe-field", false, "Add a map[string]any field to the root struct to handle any additional fields") + flag.Parse() + + if _, err := os.Stat(targetDir); os.IsNotExist(err) { + err := os.Mkdir(targetDir, 0755) + if err != nil { + log.Fatalf("Failed to create directory: %v\n", err) + } + } + + file, err := os.Open(sourcesFile) + if err != nil { + log.Fatalf("Failed to open sources file: %v\n", err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + yamlFile := scanner.Text() + + if yamlFile == "" { + // empty line + continue + } + + baseName := strings.TrimSuffix(filepath.Base(yamlFile), filepath.Ext(yamlFile)) + structName := strcase.ToCamel(strings.ReplaceAll(strings.ReplaceAll(baseName, ".", "_"), "-", "_")) + outputFilePath := path.Join(targetDir, fmt.Sprintf("%s.go", baseName)) + + recipe := &goRecipe{ + pkgName: pkgName, + advancedTypesEnabled: advancedTypesEnabled, + unsafeFieldEnabled: unsafeFieldEnabled, + } + if err := processYAMLFile(yamlFile, structName, outputFilePath, recipe); err != nil { + log.Fatalf("Error processing file %s: %v\n", yamlFile, err) + } + } + + if err := scanner.Err(); err != nil { + log.Fatalf("Error reading sources file: %v\n", err) + } +} diff --git a/k8s/manifests/charts/go.mod b/k8s/manifests/charts/go.mod new file mode 100644 index 000000000..06645d625 --- /dev/null +++ b/k8s/manifests/charts/go.mod @@ -0,0 +1,8 @@ +module generator + +go 1.22.6 + +require ( + github.com/iancoleman/strcase v0.3.0 + gopkg.in/yaml.v3 v3.0.1 +) diff --git a/k8s/manifests/charts/go.sum b/k8s/manifests/charts/go.sum new file mode 100644 index 000000000..2f23dde48 --- /dev/null +++ b/k8s/manifests/charts/go.sum @@ -0,0 +1,6 @@ +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/k8s/manifests/charts/metallb-0.14.8_values.yaml b/k8s/manifests/charts/metallb-0.14.8_values.yaml new file mode 100644 index 000000000..bc96d3550 --- /dev/null +++ b/k8s/manifests/charts/metallb-0.14.8_values.yaml @@ -0,0 +1,365 @@ +# Default values for metallb. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" +loadBalancerClass: "" + +# To configure MetalLB, you must specify ONE of the following two +# options. + +rbac: + # create specifies whether to install and use RBAC rules. + create: true + +prometheus: + # scrape annotations specifies whether to add Prometheus metric + # auto-collection annotations to pods. See + # https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml + # for a corresponding Prometheus configuration. Alternatively, you + # may want to use the Prometheus Operator + # (https://github.com/coreos/prometheus-operator) for more powerful + # monitoring configuration. If you use the Prometheus operator, this + # can be left at false. + scrapeAnnotations: false + + # port both controller and speaker will listen on for metrics + metricsPort: 7472 + + # if set, enables rbac proxy on the controller and speaker to expose + # the metrics via tls. + # secureMetricsPort: 9120 + + # the name of the secret to be mounted in the speaker pod + # to expose the metrics securely. If not present, a self signed + # certificate to be used. + speakerMetricsTLSSecret: "" + + # the name of the secret to be mounted in the controller pod + # to expose the metrics securely. If not present, a self signed + # certificate to be used. + controllerMetricsTLSSecret: "" + + # prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one + rbacPrometheus: true + + # the service account used by prometheus + # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true " + serviceAccount: "" + + # the namespace where prometheus is deployed + # required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true " + namespace: "" + + # the image to be used for the kuberbacproxy container + rbacProxy: + repository: gcr.io/kubebuilder/kube-rbac-proxy + tag: v0.12.0 + pullPolicy: + + # Prometheus Operator PodMonitors + podMonitor: + # enable support for Prometheus Operator + enabled: false + + # optional additionnal labels for podMonitors + additionalLabels: {} + + # optional annotations for podMonitors + annotations: {} + + # Job label for scrape target + jobLabel: "app.kubernetes.io/name" + + # Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: + + # metric relabel configs to apply to samples before ingestion. + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # target_label: nodename + # replacement: $1 + # action: replace + + # Prometheus Operator ServiceMonitors. To be used as an alternative + # to podMonitor, supports secure metrics. + serviceMonitor: + # enable support for Prometheus Operator + enabled: false + + speaker: + # optional additional labels for the speaker serviceMonitor + additionalLabels: {} + # optional additional annotations for the speaker serviceMonitor + annotations: {} + # optional tls configuration for the speaker serviceMonitor, in case + # secure metrics are enabled. + tlsConfig: + insecureSkipVerify: true + + controller: + # optional additional labels for the controller serviceMonitor + additionalLabels: {} + # optional additional annotations for the controller serviceMonitor + annotations: {} + # optional tls configuration for the controller serviceMonitor, in case + # secure metrics are enabled. + tlsConfig: + insecureSkipVerify: true + + # Job label for scrape target + jobLabel: "app.kubernetes.io/name" + + # Scrape interval. If not set, the Prometheus default scrape interval is used. + interval: + + # metric relabel configs to apply to samples before ingestion. + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # target_label: nodename + # replacement: $1 + # action: replace + + # Prometheus Operator alertmanager alerts + prometheusRule: + # enable alertmanager alerts + enabled: false + + # optional additionnal labels for prometheusRules + additionalLabels: {} + + # optional annotations for prometheusRules + annotations: {} + + # MetalLBStaleConfig + staleConfig: + enabled: true + labels: + severity: warning + + # MetalLBConfigNotLoaded + configNotLoaded: + enabled: true + labels: + severity: warning + + # MetalLBAddressPoolExhausted + addressPoolExhausted: + enabled: true + labels: + severity: alert + + addressPoolUsage: + enabled: true + thresholds: + - percent: 75 + labels: + severity: warning + - percent: 85 + labels: + severity: warning + - percent: 95 + labels: + severity: alert + + # MetalLBBGPSessionDown + bgpSessionDown: + enabled: true + labels: + severity: alert + + extraAlerts: [] + +# controller contains configuration specific to the MetalLB cluster +# controller. +controller: + enabled: true + # -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` + logLevel: info + # command: /controller + # webhookMode: enabled + image: + repository: quay.io/metallb/controller + tag: + pullPolicy: + ## @param controller.updateStrategy.type Metallb controller deployment strategy type. + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + ## e.g: + ## strategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + strategy: + type: RollingUpdate + serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. If not set and create is + # true, a name is generated using the fullname template + name: "" + annotations: {} + securityContext: + runAsNonRoot: true + # nobody + runAsUser: 65534 + fsGroup: 65534 + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + nodeSelector: {} + tolerations: [] + priorityClassName: "" + runtimeClassName: "" + affinity: {} + podAnnotations: {} + labels: {} + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + tlsMinVersion: "VersionTLS12" + tlsCipherSuites: "" + + extraContainers: [] + +# speaker contains configuration specific to the MetalLB speaker +# daemonset. +speaker: + enabled: true + # command: /speaker + # -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` + logLevel: info + tolerateMaster: true + memberlist: + enabled: true + mlBindPort: 7946 + mlBindAddrOverride: "" + mlSecretKeyPath: "/etc/ml_secret_key" + excludeInterfaces: + enabled: true + # ignore the exclude-from-external-loadbalancer label + ignoreExcludeLB: false + + image: + repository: quay.io/metallb/speaker + tag: + pullPolicy: + ## @param speaker.updateStrategy.type Speaker daemonset strategy type + ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ + ## + updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. If not set and create is + # true, a name is generated using the fullname template + name: "" + annotations: {} + securityContext: {} + ## Defines a secret name for the controller to generate a memberlist encryption secret + ## By default secretName: {{ "metallb.fullname" }}-memberlist + ## + # secretName: + resources: {} + # limits: + # cpu: 100m + # memory: 100Mi + nodeSelector: {} + tolerations: [] + priorityClassName: "" + affinity: {} + ## Selects which runtime class will be used by the pod. + runtimeClassName: "" + podAnnotations: {} + labels: {} + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + startupProbe: + enabled: true + failureThreshold: 30 + periodSeconds: 5 + # frr contains configuration specific to the MetalLB FRR container, + # for speaker running alongside FRR. + frr: + enabled: true + image: + repository: quay.io/frrouting/frr + tag: 9.1.0 + pullPolicy: + metricsPort: 7473 + resources: {} + + # if set, enables a rbac proxy sidecar container on the speaker to + # expose the frr metrics via tls. + # secureMetricsPort: 9121 + + + reloader: + resources: {} + + frrMetrics: + resources: {} + + extraContainers: [] + +crds: + enabled: true + validationFailurePolicy: Fail + +# frrk8s contains the configuration related to using an frrk8s instance +# (github.com/metallb/frr-k8s) as the backend for the BGP implementation. +# This allows configuring additional frr parameters in combination to those +# applied by MetalLB. +frrk8s: + # if set, enables frrk8s as a backend. This is mutually exclusive to frr + # mode. + enabled: false + external: false + namespace: "" diff --git a/k8s/manifests/charts/metrics-server-3.12.2_values.yaml b/k8s/manifests/charts/metrics-server-3.12.2_values.yaml new file mode 100644 index 000000000..be843db41 --- /dev/null +++ b/k8s/manifests/charts/metrics-server-3.12.2_values.yaml @@ -0,0 +1,200 @@ +# Default values for metrics-server. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + repository: registry.k8s.io/metrics-server/metrics-server + # Overrides the image tag whose default is v{{ .Chart.AppVersion }} + tag: "" + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - name: registrySecretName + +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + # The list of secrets mountable by this service account. + # See https://kubernetes.io/docs/reference/labels-annotations-taints/#enforce-mountable-secrets + secrets: [] + +rbac: + # Specifies whether RBAC resources should be created + create: true + # Note: PodSecurityPolicy will not be created when Kubernetes version is 1.25 or later. + pspEnabled: false + +apiService: + # Specifies if the v1beta1.metrics.k8s.io API service should be created. + # + # You typically want this enabled! If you disable API service creation you have to + # manage it outside of this chart for e.g horizontal pod autoscaling to + # work with this release. + create: true + # Annotations to add to the API service + annotations: {} + # Specifies whether to skip TLS verification + insecureSkipTLSVerify: true + # The PEM encoded CA bundle for TLS verification + caBundle: "" + +commonLabels: {} +podLabels: {} +podAnnotations: {} + +podSecurityContext: {} + +securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + +priorityClassName: system-cluster-critical + +containerPort: 10250 + +hostNetwork: + # Specifies if metrics-server should be started in hostNetwork mode. + # + # You would require this enabled if you use alternate overlay networking for pods and + # API server unable to communicate with metrics-server. As an example, this is required + # if you use Weave network on EKS + enabled: false + +replicas: 1 + +revisionHistoryLimit: + +updateStrategy: {} +# type: RollingUpdate +# rollingUpdate: +# maxSurge: 0 +# maxUnavailable: 1 + +podDisruptionBudget: + # https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + enabled: false + minAvailable: + maxUnavailable: + +defaultArgs: + - --cert-dir=/tmp + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + +args: [] + +livenessProbe: + httpGet: + path: /livez + port: https + scheme: HTTPS + initialDelaySeconds: 0 + periodSeconds: 10 + failureThreshold: 3 + +readinessProbe: + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + failureThreshold: 3 + +service: + type: ClusterIP + port: 443 + annotations: {} + labels: {} + # Add these labels to have metrics-server show up in `kubectl cluster-info` + # kubernetes.io/cluster-service: "true" + # kubernetes.io/name: "Metrics-server" + +addonResizer: + enabled: false + image: + repository: registry.k8s.io/autoscaling/addon-resizer + tag: 1.8.21 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + resources: + requests: + cpu: 40m + memory: 25Mi + limits: + cpu: 40m + memory: 25Mi + nanny: + cpu: 0m + extraCpu: 1m + memory: 0Mi + extraMemory: 2Mi + minClusterSize: 100 + pollPeriod: 300000 + threshold: 5 + +metrics: + enabled: false + +serviceMonitor: + enabled: false + additionalLabels: {} + interval: 1m + scrapeTimeout: 10s + metricRelabelings: [] + relabelings: [] + +# See https://github.com/kubernetes-sigs/metrics-server#scaling +resources: + requests: + cpu: 100m + memory: 200Mi + # limits: + # cpu: + # memory: + +extraVolumeMounts: [] + +extraVolumes: [] + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +topologySpreadConstraints: [] + +dnsConfig: {} + +# Annotations to add to the deployment +deploymentAnnotations: {} + +schedulerName: "" + +tmpVolume: + emptyDir: {} diff --git a/k8s/manifests/charts/rawfile-csi-0.9.0_values.yaml b/k8s/manifests/charts/rawfile-csi-0.9.0_values.yaml new file mode 100644 index 000000000..f555c2aa0 --- /dev/null +++ b/k8s/manifests/charts/rawfile-csi-0.9.0_values.yaml @@ -0,0 +1,45 @@ +provisionerName: "rawfile.csi.openebs.io" + +defaults: &defaults + image: + repository: docker.io/openebs/rawfile-localpv + tag: 0.8.0 + pullPolicy: Always + resources: + limits: + cpu: 1 + memory: 100Mi + requests: + cpu: 10m + memory: 100Mi + +controller: + <<: *defaults + csiDriverArgs: + - csi-driver + - --disable-metrics + +images: + csiNodeDriverRegistrar: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1 + csiProvisioner: registry.k8s.io/sig-storage/csi-provisioner:v5.0.1 + csiResizer: registry.k8s.io/sig-storage/csi-resizer:v1.11.1 + csiSnapshotter: registry.k8s.io/sig-storage/csi-snapshotter:v8.0.1 + +node: + <<: *defaults + storage: + path: /var/csi/rawfile + metrics: + enabled: false + +storageClass: + enabled: false + name: "csi-rawfile-default" + isDefault: true + reclaimPolicy: Delete + volumeBindingMode: WaitForFirstConsumer + +imagePullSecrets: [] +serviceMonitor: + enabled: true + interval: 1m diff --git a/k8s/manifests/charts/sources.txt b/k8s/manifests/charts/sources.txt new file mode 100644 index 000000000..02dae0c63 --- /dev/null +++ b/k8s/manifests/charts/sources.txt @@ -0,0 +1,6 @@ +coredns-1.36.0_values.yaml +cilium-1.16.3_values.yaml +ck-loadbalancer_values.yaml +metallb-0.14.8_values.yaml +rawfile-csi-0.9.0_values.yaml +metrics-server-3.12.2_values.yaml diff --git a/src/k8s/pkg/k8sd/features/metallb/test/main.go b/src/k8s/pkg/k8sd/features/metallb/test/main.go new file mode 100644 index 000000000..0c0f4af1a --- /dev/null +++ b/src/k8s/pkg/k8sd/features/metallb/test/main.go @@ -0,0 +1,99 @@ +package main + +import ( + "fmt" + "log" + + "github.com/canonical/k8s/pkg/k8sd/features" + "github.com/canonical/k8s/pkg/k8sd/features/values" + "github.com/canonical/k8s/pkg/k8sd/types" + "k8s.io/utils/ptr" +) + +func main() { + loadbalancer := &types.LoadBalancer{ + Enabled: ptr.To(true), + L2Mode: ptr.To(true), + L2Interfaces: ptr.To([]string{"eth0", "eth1"}), + BGPMode: ptr.To(true), + BGPLocalASN: ptr.To(64512), + BGPPeerAddress: ptr.To("10.0.0.1/32"), + BGPPeerASN: ptr.To(64513), + BGPPeerPort: ptr.To(179), + CIDRs: ptr.To([]string{"192.0.2.0/24"}), + IPRanges: ptr.To([]types.LoadBalancer_IPRange{ + {Start: "20.0.20.100", Stop: "20.0.20.200"}, + }), + } + + cidrs := []map[string]any{} + for _, cidr := range loadbalancer.GetCIDRs() { + cidrs = append(cidrs, map[string]any{"cidr": cidr}) + } + for _, ipRange := range loadbalancer.GetIPRanges() { + cidrs = append(cidrs, map[string]any{"start": ipRange.Start, "stop": ipRange.Stop}) + } + + oldValues := map[string]any{ + // "driver": "metallb", + "l2": map[string]any{ + "enabled": loadbalancer.GetL2Mode(), + "interfaces": loadbalancer.GetL2Interfaces(), + }, + "ipPool": map[string]any{ + "cidrs": cidrs, + }, + "bgp": map[string]any{ + "enabled": loadbalancer.GetBGPMode(), + "localASN": loadbalancer.GetBGPLocalASN(), + "neighbors": []map[string]any{ + { + "peerAddress": loadbalancer.GetBGPPeerAddress(), + "peerASN": loadbalancer.GetBGPPeerASN(), + "peerPort": loadbalancer.GetBGPPeerPort(), + }, + }, + }, + } + + lbValues := values.CkLoadbalancerValues{ + Driver: "metallb", + L2: values.CkLoadbalancerValues_L2{ + Enabled: loadbalancer.GetL2Mode(), + Interfaces: features.ToAnyList(loadbalancer.GetL2Interfaces()), + }, + IpPool: values.CkLoadbalancerValues_IpPool{ + Cidrs: features.ToAnyList(cidrs), + }, + Bgp: values.CkLoadbalancerValues_Bgp{ + Enabled: loadbalancer.GetBGPMode(), + LocalAsn: int64(loadbalancer.GetBGPLocalASN()), + Neighbors: features.ToAnyList([]map[string]any{ + { + "peerAddress": loadbalancer.GetBGPPeerAddress(), + "peerASN": loadbalancer.GetBGPPeerASN(), + "peerPort": loadbalancer.GetBGPPeerPort(), + }, + }), + }, + UNSAFE_MISC_FIELDS: map[string]any{ + "random-key": "random-val", + "another": map[string]any{ + "key": "val", + }, + "driver": "overwritten", + }, + } + + lbValuesMap, err := lbValues.ToMap() + if err != nil { + log.Fatalf("failed to convert LoadBalancer values to map: %v", err) + } + + fmt.Println(lbValuesMap) + /* output: + map[bgp:map[enabled:true localAsn:64512 neighbors:[map[peerASN:64513 peerAddress:10.0.0.1/32 peerPort:179]]] driver:metallb ipPool:map[cidrs:[map[cidr:192.0.2.0/24] map[start:20.0.20.100 stop:20.0.20.200]]] l2:map[enabled:true interfaces:[eth0 eth1]]] + */ + fmt.Println() + fmt.Println(oldValues) +} diff --git a/src/k8s/pkg/k8sd/features/utils.go b/src/k8s/pkg/k8sd/features/utils.go new file mode 100644 index 000000000..96cdf3582 --- /dev/null +++ b/src/k8s/pkg/k8sd/features/utils.go @@ -0,0 +1,9 @@ +package features + +func ToAnyList[T any](l []T) []any { + out := make([]any, len(l)) + for i, v := range l { + out[i] = v + } + return out +} diff --git a/src/k8s/pkg/k8sd/features/values/cilium-1.16.3_values.go b/src/k8s/pkg/k8sd/features/values/cilium-1.16.3_values.go new file mode 100644 index 000000000..e23269176 --- /dev/null +++ b/src/k8s/pkg/k8sd/features/values/cilium-1.16.3_values.go @@ -0,0 +1,7627 @@ +// Code generated by CHART_VALUES_STRUCT_GENERATOR. DO NOT EDIT. +// +// This file was autogenerated by the CHART_VALUES_STRUCT_GENERATOR tool on 2024-12-12. +// Any changes will be overwritten. +// +// These files are generated from the values.yaml files in the k8s/manifests/charts directory. +// Head to the k8s/manifests/charts/Makefile to see how to generate these files. +// +// Package values contains the Go structs representing the values of the Helm chart. +package values + +import ( + "fmt" + "github.com/mitchellh/mapstructure" +) + +type Cilium1163Values_Debug struct { + // -- Enable debug logging + Enabled bool `mapstructure:"enabled,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- Configure verbosity levels for debug logging + // This option is used to enable debug messages for operations related to such + // sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is + // for enabling debug messages emitted per request, message and connection. + // Multiple values can be set via a space-separated string (e.g. "datapath envoy"). + // + // Applicable values: + // - flow + // - kvstore + // - envoy + // - datapath + // - policy + Verbose any `mapstructure:"verbose,omitempty"` +} + +func (v *Cilium1163Values_Debug) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Rbac struct { + // -- Enable creation of Resource-Based Access Control configuration. + Create bool `mapstructure:"create,omitempty"` +} + +func (v *Cilium1163Values_Rbac) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure the client side rate limit for the agent and operator +// +// If the amount of requests to the Kubernetes API server exceeds the configured +// rate limit, the agent and operator will start to throttle requests by delaying +// them until there is budget or the request times out. +type Cilium1163Values_K8SClientRateLimit struct { + // @schema + // type: [null, integer] + // @schema + // -- (int) The sustained request rate in requests per second. + // @default -- 5 for k8s up to 1.26. 10 for k8s version 1.27+ + Qps any `mapstructure:"qps,omitempty"` + // @schema + // type: [null, integer] + // @schema + // -- (int) The burst request rate in requests per second. + // The rate limiter will allow short bursts with a higher rate. + // @default -- 10 for k8s up to 1.26. 20 for k8s version 1.27+ + Burst any `mapstructure:"burst,omitempty"` +} + +func (v *Cilium1163Values_K8SClientRateLimit) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Cluster struct { + // -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE. + // It must respect the following constraints: + // * It must contain at most 32 characters; + // * It must begin and end with a lower case alphanumeric character; + // * It may contain lower case alphanumeric characters and dashes between. + // The "default" name cannot be used if the Cluster ID is different from 0. + Name string `mapstructure:"name,omitempty"` + // -- (int) Unique ID of the cluster. Must be unique across all connected + // clusters and in the range of 1 to 255. Only required for Cluster Mesh, + // may be 0 if Cluster Mesh is not used. + Id int64 `mapstructure:"id,omitempty"` +} + +func (v *Cilium1163Values_Cluster) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ServiceAccounts_Cilium struct { + Create bool `mapstructure:"create,omitempty"` + Name string `mapstructure:"name,omitempty"` + Automount bool `mapstructure:"automount,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_ServiceAccounts_Cilium) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ServiceAccounts_Nodeinit struct { + Create bool `mapstructure:"create,omitempty"` + // -- Enabled is temporary until https://github.com/cilium/cilium-cli/issues/1396 is implemented. + // Cilium CLI doesn't create the SAs for node-init, thus the workaround. Helm is not affected by + // this issue. Name and automount can be configured, if enabled is set to true. + // Otherwise, they are ignored. Enabled can be removed once the issue is fixed. + // Cilium-nodeinit DS must also be fixed. + Enabled bool `mapstructure:"enabled,omitempty"` + Name string `mapstructure:"name,omitempty"` + Automount bool `mapstructure:"automount,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_ServiceAccounts_Nodeinit) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ServiceAccounts_Envoy struct { + Create bool `mapstructure:"create,omitempty"` + Name string `mapstructure:"name,omitempty"` + Automount bool `mapstructure:"automount,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_ServiceAccounts_Envoy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ServiceAccounts_Operator struct { + Create bool `mapstructure:"create,omitempty"` + Name string `mapstructure:"name,omitempty"` + Automount bool `mapstructure:"automount,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_ServiceAccounts_Operator) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ServiceAccounts_Preflight struct { + Create bool `mapstructure:"create,omitempty"` + Name string `mapstructure:"name,omitempty"` + Automount bool `mapstructure:"automount,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_ServiceAccounts_Preflight) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ServiceAccounts_Relay struct { + Create bool `mapstructure:"create,omitempty"` + Name string `mapstructure:"name,omitempty"` + Automount bool `mapstructure:"automount,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_ServiceAccounts_Relay) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ServiceAccounts_Ui struct { + Create bool `mapstructure:"create,omitempty"` + Name string `mapstructure:"name,omitempty"` + Automount bool `mapstructure:"automount,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_ServiceAccounts_Ui) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ServiceAccounts_ClustermeshApiserver struct { + Create bool `mapstructure:"create,omitempty"` + Name string `mapstructure:"name,omitempty"` + Automount bool `mapstructure:"automount,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_ServiceAccounts_ClustermeshApiserver) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob +type Cilium1163Values_ServiceAccounts_Clustermeshcertgen struct { + Create bool `mapstructure:"create,omitempty"` + Name string `mapstructure:"name,omitempty"` + Automount bool `mapstructure:"automount,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_ServiceAccounts_Clustermeshcertgen) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Hubblecertgen is used if hubble.tls.auto.method=cronJob +type Cilium1163Values_ServiceAccounts_Hubblecertgen struct { + Create bool `mapstructure:"create,omitempty"` + Name string `mapstructure:"name,omitempty"` + Automount bool `mapstructure:"automount,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_ServiceAccounts_Hubblecertgen) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Define serviceAccount names for components. +// @default -- Component's fully qualified name. +type Cilium1163Values_ServiceAccounts struct { + Cilium Cilium1163Values_ServiceAccounts_Cilium `mapstructure:"cilium,omitempty"` + Nodeinit Cilium1163Values_ServiceAccounts_Nodeinit `mapstructure:"nodeinit,omitempty"` + Envoy Cilium1163Values_ServiceAccounts_Envoy `mapstructure:"envoy,omitempty"` + Operator Cilium1163Values_ServiceAccounts_Operator `mapstructure:"operator,omitempty"` + Preflight Cilium1163Values_ServiceAccounts_Preflight `mapstructure:"preflight,omitempty"` + Relay Cilium1163Values_ServiceAccounts_Relay `mapstructure:"relay,omitempty"` + Ui Cilium1163Values_ServiceAccounts_Ui `mapstructure:"ui,omitempty"` + ClustermeshApiserver Cilium1163Values_ServiceAccounts_ClustermeshApiserver `mapstructure:"clustermeshApiserver,omitempty"` + // -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob + Clustermeshcertgen Cilium1163Values_ServiceAccounts_Clustermeshcertgen `mapstructure:"clustermeshcertgen,omitempty"` + // -- Hubblecertgen is used if hubble.tls.auto.method=cronJob + Hubblecertgen Cilium1163Values_ServiceAccounts_Hubblecertgen `mapstructure:"hubblecertgen,omitempty"` +} + +func (v *Cilium1163Values_ServiceAccounts) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Agent container image. +type Cilium1163Values_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` + // cilium-digest + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` +} + +func (v *Cilium1163Values_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels struct { + K8SApp string `mapstructure:"k8s-app,omitempty"` +} + +func (v *Cilium1163Values_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector struct { + MatchLabels Cilium1163Values_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels `mapstructure:"matchLabels,omitempty"` +} + +func (v *Cilium1163Values_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem struct { + TopologyKey string `mapstructure:"topologyKey,omitempty"` + LabelSelector Cilium1163Values_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector `mapstructure:"labelSelector,omitempty"` +} + +func (v *Cilium1163Values_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Affinity_PodAntiAffinity struct { + RequiredDuringSchedulingIgnoredDuringExecution []Cilium1163Values_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem `mapstructure:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +func (v *Cilium1163Values_Affinity_PodAntiAffinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Affinity for cilium-agent. +type Cilium1163Values_Affinity struct { + PodAntiAffinity Cilium1163Values_Affinity_PodAntiAffinity `mapstructure:"podAntiAffinity,omitempty"` +} + +func (v *Cilium1163Values_Affinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node selector for cilium-agent. +type Cilium1163Values_NodeSelector struct { + KubernetesIoos string `mapstructure:"kubernetes.io/os,omitempty"` +} + +func (v *Cilium1163Values_NodeSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node tolerations for agent scheduling to nodes with taints +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +type Cilium1163Values_TolerationsItem struct { + // - key: "key" + // operator: "Equal|Exists" + // value: "value" + // effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + Operator string `mapstructure:"operator,omitempty"` +} + +func (v *Cilium1163Values_TolerationsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- AppArmorProfile options for the `cilium-agent` and init containers +type Cilium1163Values_PodSecurityContext_AppArmorProfile struct { + Type string `mapstructure:"type,omitempty"` +} + +func (v *Cilium1163Values_PodSecurityContext_AppArmorProfile) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Security Context for cilium-agent pods. +type Cilium1163Values_PodSecurityContext struct { + // -- AppArmorProfile options for the `cilium-agent` and init containers + AppArmorProfile Cilium1163Values_PodSecurityContext_AppArmorProfile `mapstructure:"appArmorProfile,omitempty"` +} + +func (v *Cilium1163Values_PodSecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SELinux options for the `cilium-agent` and init containers +type Cilium1163Values_SecurityContext_SeLinuxOptions struct { + Level string `mapstructure:"level,omitempty"` + // Running with spc_t since we have removed the privileged mode. + // Users can change it to a different type as long as they have the + // type available on the system. + Type string `mapstructure:"type,omitempty"` +} + +func (v *Cilium1163Values_SecurityContext_SeLinuxOptions) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_SecurityContext_Capabilities struct { + // -- Capabilities for the `cilium-agent` container + // Use to set socket permission + // - CHOWN + // Used to terminate envoy child process + // - KILL + // Used since cilium modifies routing tables, etc... + // - NET_ADMIN + // Used since cilium creates raw sockets, etc... + // - NET_RAW + // Used since cilium monitor uses mmap + // - IPC_LOCK + // Used in iptables. Consider removing once we are iptables-free + // - SYS_MODULE + // Needed to switch network namespaces (used for health endpoint, socket-LB). + // We need it for now but might not need it for >= 5.11 specially + // for the 'SYS_RESOURCE'. + // In >= 5.8 there's already BPF and PERMON capabilities + // - SYS_ADMIN + // Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + // - SYS_RESOURCE + // Both PERFMON and BPF requires kernel 5.8, container runtime + // cri-o >= v1.22.0 or containerd >= v1.5.0. + // If available, SYS_ADMIN can be removed. + // - PERFMON + // - BPF + // Allow discretionary access control (e.g. required for package installation) + // - DAC_OVERRIDE + // Allow to set Access Control Lists (ACLs) on arbitrary files (e.g. required for package installation) + // - FOWNER + // Allow to execute program that changes GID (e.g. required for package installation) + // - SETGID + // Allow to execute program that changes UID (e.g. required for package installation) + // - SETUID + CiliumAgent []string `mapstructure:"ciliumAgent,omitempty"` + // -- Capabilities for the `mount-cgroup` init container + // Only used for 'mount' cgroup + // - SYS_ADMIN + // Used for nsenter + // - SYS_CHROOT + // - SYS_PTRACE + MountCgroup []string `mapstructure:"mountCgroup,omitempty"` + // -- capabilities for the `apply-sysctl-overwrites` init container + // Required in order to access host's /etc/sysctl.d dir + // - SYS_ADMIN + // Used for nsenter + // - SYS_CHROOT + // - SYS_PTRACE + ApplySysctlOverwrites []string `mapstructure:"applySysctlOverwrites,omitempty"` + // -- Capabilities for the `clean-cilium-state` init container + // Most of the capabilities here are the same ones used in the + // cilium-agent's container because this container can be used to + // uninstall all Cilium resources, and therefore it is likely that + // will need the same capabilities. + // Used since cilium modifies routing tables, etc... + // - NET_ADMIN + // Used in iptables. Consider removing once we are iptables-free + // - SYS_MODULE + // We need it for now but might not need it for >= 5.11 specially + // for the 'SYS_RESOURCE'. + // In >= 5.8 there's already BPF and PERMON capabilities + // - SYS_ADMIN + // Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC + // Both PERFMON and BPF requires kernel 5.8, container runtime + // cri-o >= v1.22.0 or containerd >= v1.5.0. + // If available, SYS_ADMIN can be removed. + // - PERFMON + // - BPF + // - SYS_RESOURCE + CleanCiliumState []string `mapstructure:"cleanCiliumState,omitempty"` +} + +func (v *Cilium1163Values_SecurityContext_Capabilities) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_SecurityContext struct { + // -- User to run the pod with + // runAsUser: 0 + // -- Run the pod with elevated privileges + Privileged bool `mapstructure:"privileged,omitempty"` + // -- SELinux options for the `cilium-agent` and init containers + SeLinuxOptions Cilium1163Values_SecurityContext_SeLinuxOptions `mapstructure:"seLinuxOptions,omitempty"` + Capabilities Cilium1163Values_SecurityContext_Capabilities `mapstructure:"capabilities,omitempty"` +} + +func (v *Cilium1163Values_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_UpdateStrategy_RollingUpdate struct { + // @schema + // type: [integer, string] + // @schema + MaxUnavailable int64 `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *Cilium1163Values_UpdateStrategy_RollingUpdate) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Cilium agent update strategy +type Cilium1163Values_UpdateStrategy struct { + Type string `mapstructure:"type,omitempty"` + RollingUpdate Cilium1163Values_UpdateStrategy_RollingUpdate `mapstructure:"rollingUpdate,omitempty"` +} + +func (v *Cilium1163Values_UpdateStrategy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Configuration Values for cilium-agent +type Cilium1163Values_Aksbyocni struct { + // -- Enable AKS BYOCNI integration. + // Note that this is incompatible with AKS clusters not created in BYOCNI mode: + // use Azure integration (`azure.enabled`) instead. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Aksbyocni) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Azure struct { + // -- Enable Azure integration. + // Note that this is incompatible with AKS clusters created in BYOCNI mode: use + // AKS BYOCNI integration (`aksbyocni.enabled`) instead. + // usePrimaryAddress: false + // resourceGroup: group1 + // subscriptionID: 00000000-0000-0000-0000-000000000000 + // tenantID: 00000000-0000-0000-0000-000000000000 + // clientID: 00000000-0000-0000-0000-000000000000 + // clientSecret: 00000000-0000-0000-0000-000000000000 + // userAssignedIdentityID: 00000000-0000-0000-0000-000000000000 + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Azure) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Alibabacloud struct { + // -- Enable AlibabaCloud ENI integration + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Alibabacloud) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Enable bandwidth manager to optimize TCP and UDP workloads and allow +// for rate-limiting traffic from individual Pods with EDT (Earliest Departure +// Time) through the "kubernetes.io/egress-bandwidth" Pod annotation. +type Cilium1163Values_BandwidthManager struct { + // -- Enable bandwidth manager infrastructure (also prerequirement for BBR) + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Activate BBR TCP congestion control for Pods + Bbr bool `mapstructure:"bbr,omitempty"` +} + +func (v *Cilium1163Values_BandwidthManager) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure standalone NAT46/NAT64 gateway +type Cilium1163Values_Nat46X64Gateway struct { + // -- Enable RFC8215-prefixed translation + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Nat46X64Gateway) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- EnableHighScaleIPcache enables the special ipcache mode for high scale +// clusters. The ipcache content will be reduced to the strict minimum and +// traffic will be encapsulated to carry security identities. +type Cilium1163Values_HighScaleIpcache struct { + // -- Enable the high scale mode for the ipcache. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_HighScaleIpcache) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure L2 announcements +type Cilium1163Values_L2Announcements struct { + // -- Enable L2 announcements + // -- If a lease is not renewed for X duration, the current leader is considered dead, a new leader is picked + // leaseDuration: 15s + // -- The interval at which the leader will renew the lease + // leaseRenewDeadline: 5s + // -- The timeout between retries if renewal fails + // leaseRetryPeriod: 2s + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_L2Announcements) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure L2 pod announcements +type Cilium1163Values_L2PodAnnouncements struct { + // -- Enable L2 pod announcements + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Interface used for sending Gratuitous ARP pod announcements + Interface string `mapstructure:"interface,omitempty"` +} + +func (v *Cilium1163Values_L2PodAnnouncements) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Bgp_Announce struct { + // -- Enable allocation and announcement of service LoadBalancer IPs + LoadbalancerIp bool `mapstructure:"loadbalancerIP,omitempty"` + // -- Enable announcement of node pod CIDR + PodCidr bool `mapstructure:"podCIDR,omitempty"` +} + +func (v *Cilium1163Values_Bgp_Announce) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure BGP +type Cilium1163Values_Bgp struct { + // -- Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside + // cilium-agent and cilium-operator + Enabled bool `mapstructure:"enabled,omitempty"` + Announce Cilium1163Values_Bgp_Announce `mapstructure:"announce,omitempty"` +} + +func (v *Cilium1163Values_Bgp) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SecretsNamespace is the namespace which BGP support will retrieve secrets from. +type Cilium1163Values_BgpControlPlane_SecretsNamespace struct { + // -- Create secrets namespace for BGP secrets. + Create bool `mapstructure:"create,omitempty"` + // -- The name of the secret namespace to which Cilium agents are given read access + Name string `mapstructure:"name,omitempty"` +} + +func (v *Cilium1163Values_BgpControlPlane_SecretsNamespace) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- This feature set enables virtual BGP routers to be created via +// CiliumBGPPeeringPolicy CRDs. +type Cilium1163Values_BgpControlPlane struct { + // -- Enables the BGP control plane. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- SecretsNamespace is the namespace which BGP support will retrieve secrets from. + SecretsNamespace Cilium1163Values_BgpControlPlane_SecretsNamespace `mapstructure:"secretsNamespace,omitempty"` +} + +func (v *Cilium1163Values_BgpControlPlane) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_PmtuDiscovery struct { + // -- Enable path MTU discovery to send ICMP fragmentation-needed replies to + // the client. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_PmtuDiscovery) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Bpf_AutoMount struct { + // -- Enable automatic mount of BPF filesystem + // When `autoMount` is enabled, the BPF filesystem is mounted at + // `bpf.root` path on the underlying host and inside the cilium agent pod. + // If users disable `autoMount`, it's expected that users have mounted + // bpffs filesystem at the specified `bpf.root` volume, and then the + // volume will be mounted inside the cilium agent pod at the same path. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Bpf_AutoMount) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Bpf_Events_Drop struct { + // -- Enable drop events. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Bpf_Events_Drop) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Bpf_Events_PolicyVerdict struct { + // -- Enable policy verdict events. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Bpf_Events_PolicyVerdict) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Bpf_Events_Trace struct { + // -- Enable trace events. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Bpf_Events_Trace) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. +type Cilium1163Values_Bpf_Events struct { + Drop Cilium1163Values_Bpf_Events_Drop `mapstructure:"drop,omitempty"` + PolicyVerdict Cilium1163Values_Bpf_Events_PolicyVerdict `mapstructure:"policyVerdict,omitempty"` + Trace Cilium1163Values_Bpf_Events_Trace `mapstructure:"trace,omitempty"` +} + +func (v *Cilium1163Values_Bpf_Events) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Bpf struct { + AutoMount Cilium1163Values_Bpf_AutoMount `mapstructure:"autoMount,omitempty"` + // -- Configure the mount point for the BPF filesystem + Root string `mapstructure:"root,omitempty"` + // -- Enables pre-allocation of eBPF map values. This increases + // memory usage but can reduce latency. + PreallocateMaps bool `mapstructure:"preallocateMaps,omitempty"` + // @schema + // type: [null, integer] + // @schema + // -- (int) Configure the maximum number of entries in auth map. + // @default -- `524288` + AuthMapMax any `mapstructure:"authMapMax,omitempty"` + // @schema + // type: [null, integer] + // @schema + // -- (int) Configure the maximum number of entries in the TCP connection tracking + // table. + // @default -- `524288` + CtTcpMax any `mapstructure:"ctTcpMax,omitempty"` + // @schema + // type: [null, integer] + // @schema + // -- (int) Configure the maximum number of entries for the non-TCP connection + // tracking table. + // @default -- `262144` + CtAnyMax any `mapstructure:"ctAnyMax,omitempty"` + // -- Control events generated by the Cilium datapath exposed to Cilium monitor and Hubble. + Events Cilium1163Values_Bpf_Events `mapstructure:"events,omitempty"` + // @schema + // type: [null, integer] + // @schema + // -- Configure the maximum number of service entries in the + // load balancer maps. + LbMapMax int64 `mapstructure:"lbMapMax,omitempty"` + // @schema + // type: [null, integer] + // @schema + // -- (int) Configure the maximum number of entries for the NAT table. + // @default -- `524288` + NatMax any `mapstructure:"natMax,omitempty"` + // @schema + // type: [null, integer] + // @schema + // -- (int) Configure the maximum number of entries for the neighbor table. + // @default -- `524288` + NeighMax any `mapstructure:"neighMax,omitempty"` + // @schema + // type: [null, integer] + // @schema + // @default -- `16384` + // -- (int) Configures the maximum number of entries for the node table. + NodeMapMax any `mapstructure:"nodeMapMax,omitempty"` + // -- Configure the maximum number of entries in endpoint policy map (per endpoint). + // @schema + // type: [null, integer] + // @schema + PolicyMapMax int64 `mapstructure:"policyMapMax,omitempty"` + // @schema + // type: [null, number] + // @schema + // -- (float64) Configure auto-sizing for all BPF maps based on available memory. + // ref: https://docs.cilium.io/en/stable/network/ebpf/maps/ + // @default -- `0.0025` + MapDynamicSizeRatio any `mapstructure:"mapDynamicSizeRatio,omitempty"` + // -- Configure the level of aggregation for monitor notifications. + // Valid options are none, low, medium, maximum. + MonitorAggregation string `mapstructure:"monitorAggregation,omitempty"` + // -- Configure the typical time between monitor notifications for + // active connections. + MonitorInterval string `mapstructure:"monitorInterval,omitempty"` + // -- Configure which TCP flags trigger notifications when seen for the + // first time in a connection. + MonitorFlags string `mapstructure:"monitorFlags,omitempty"` + // -- Allow cluster external access to ClusterIP services. + LbExternalClusterIp bool `mapstructure:"lbExternalClusterIP,omitempty"` + // @schema + // type: [null, boolean] + // @schema + // -- (bool) Enable native IP masquerade support in eBPF + // @default -- `false` + Masquerade any `mapstructure:"masquerade,omitempty"` + // @schema + // type: [null, boolean] + // @schema + // -- (bool) Configure whether direct routing mode should route traffic via + // host stack (true) or directly and more efficiently out of BPF (false) if + // the kernel supports it. The latter has the implication that it will also + // bypass netfilter in the host namespace. + // @default -- `false` + HostLegacyRouting any `mapstructure:"hostLegacyRouting,omitempty"` + // @schema + // type: [null, boolean] + // @schema + // -- (bool) Configure the eBPF-based TPROXY to reduce reliance on iptables rules + // for implementing Layer 7 policy. + // @default -- `false` + Tproxy any `mapstructure:"tproxy,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass. + // [0] will allow all VLAN id's without any filtering. + // @default -- `[]` + VlanBypass any `mapstructure:"vlanBypass,omitempty"` + // -- (bool) Disable ExternalIP mitigation (CVE-2020-8554) + // @default -- `false` + DisableExternalIpmitigation bool `mapstructure:"disableExternalIPMitigation,omitempty"` + // -- (bool) Attach endpoint programs using tcx instead of legacy tc hooks on + // supported kernels. + // @default -- `true` + EnableTcx bool `mapstructure:"enableTCX,omitempty"` + // -- (string) Mode for Pod devices for the core datapath (veth, netkit, netkit-l2, lb-only) + // @default -- `veth` + DatapathMode string `mapstructure:"datapathMode,omitempty"` +} + +func (v *Cilium1163Values_Bpf) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Cni_Resources_Requests struct { + Cpu string `mapstructure:"cpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` +} + +func (v *Cilium1163Values_Cni_Resources_Requests) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Specifies the resources for the cni initContainer +type Cilium1163Values_Cni_Resources struct { + Requests Cilium1163Values_Cni_Resources_Requests `mapstructure:"requests,omitempty"` +} + +func (v *Cilium1163Values_Cni_Resources) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Cni struct { + // -- Install the CNI configuration and binary files into the filesystem. + Install bool `mapstructure:"install,omitempty"` + // -- Remove the CNI configuration and binary files on agent shutdown. Enable this + // if you're removing Cilium from the cluster. Disable this to prevent the CNI + // configuration file from being removed during agent upgrade, which can cause + // nodes to go unmanageable. + Uninstall bool `mapstructure:"uninstall,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- Configure chaining on top of other CNI plugins. Possible values: + // - none + // - aws-cni + // - flannel + // - generic-veth + // - portmap + ChainingMode any `mapstructure:"chainingMode,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- A CNI network name in to which the Cilium plugin should be added as a chained plugin. + // This will cause the agent to watch for a CNI network with this network name. When it is + // found, this will be used as the basis for Cilium's CNI configuration file. If this is + // set, it assumes a chaining mode of generic-veth. As a special case, a chaining mode + // of aws-cni implies a chainingTarget of aws-cni. + ChainingTarget any `mapstructure:"chainingTarget,omitempty"` + // -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the + // node, renaming all non-Cilium CNI configurations to `*.cilium_bak`. + // This ensures no Pods can be scheduled using other CNI plugins during Cilium + // agent downtime. + Exclusive bool `mapstructure:"exclusive,omitempty"` + // -- Configure the log file for CNI logging with retention policy of 7 days. + // Disable CNI file logging by setting this field to empty explicitly. + LogFile string `mapstructure:"logFile,omitempty"` + // -- Skip writing of the CNI configuration. This can be used if + // writing of the CNI configuration is performed by external automation. + CustomConf bool `mapstructure:"customConf,omitempty"` + // -- Configure the path to the CNI configuration directory on the host. + ConfPath string `mapstructure:"confPath,omitempty"` + // -- Configure the path to the CNI binary directory on the host. + // -- Specify the path to a CNI config to read from on agent start. + // This can be useful if you want to manage your CNI + // configuration outside of a Kubernetes environment. This parameter is + // mutually exclusive with the 'cni.configMap' parameter. The agent will + // write this to 05-cilium.conflist on startup. + // readCniConf: /host/etc/cni/net.d/05-sample.conflist.input + BinPath string `mapstructure:"binPath,omitempty"` + // -- When defined, configMap will mount the provided value as ConfigMap and + // interpret the cniConf variable as CNI configuration file and write it + // when the agent starts up + // configMap: cni-configuration + // + // -- Configure the key in the CNI ConfigMap to read the contents of + // the CNI configuration from. + ConfigMapKey string `mapstructure:"configMapKey,omitempty"` + // -- Configure the path to where to mount the ConfigMap inside the agent pod. + ConfFileMountPath string `mapstructure:"confFileMountPath,omitempty"` + // -- Configure the path to where the CNI configuration directory is mounted + // inside the agent pod. + HostConfDirMountPath string `mapstructure:"hostConfDirMountPath,omitempty"` + // -- Specifies the resources for the cni initContainer + Resources Cilium1163Values_Cni_Resources `mapstructure:"resources,omitempty"` + // -- Enable route MTU for pod netns when CNI chaining is used + EnableRouteMtuforCnichaining bool `mapstructure:"enableRouteMTUForCNIChaining,omitempty"` +} + +func (v *Cilium1163Values_Cni) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Tail call hooks for custom eBPF programs. +type Cilium1163Values_CustomCalls struct { + // -- Enable tail call hooks for custom eBPF programs. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_CustomCalls) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Specify which network interfaces can run the eBPF datapath. This means +// that a packet sent from a pod to a destination outside the cluster will be +// masqueraded (to an output device IPv4 address), if the output device runs the +// program. When not specified, probing will automatically detect devices that have +// a non-local route. This should be used only when autodetection is not suitable. +// devices: "" +type Cilium1163Values_Daemon struct { + // -- Configure where Cilium runtime state should be stored. + RunPath string `mapstructure:"runPath,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- Configure a custom list of possible configuration override sources + // The default is "config-map:cilium-config,cilium-node-config". For supported + // values, see the help text for the build-config subcommand. + // Note that this value should be a comma-separated string. + ConfigSources any `mapstructure:"configSources,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- allowedConfigOverrides is a list of config-map keys that can be overridden. + // That is to say, if this value is set, config sources (excepting the first one) can + // only override keys in this list. + // + // This takes precedence over blockedConfigOverrides. + // + // By default, all keys may be overridden. To disable overrides, set this to "none" or + // change the configSources variable. + AllowedConfigOverrides any `mapstructure:"allowedConfigOverrides,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- blockedConfigOverrides is a list of config-map keys that may not be overridden. + // In other words, if any of these keys appear in a configuration source excepting the + // first one, they will be ignored + // + // This is ignored if allowedConfigOverrides is set. + // + // By default, all keys may be overridden. + BlockedConfigOverrides any `mapstructure:"blockedConfigOverrides,omitempty"` +} + +func (v *Cilium1163Values_Daemon) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- List of rate limit options to be used for the CiliumEndpointSlice controller. +// Each object in the list must have the following fields: +// nodes: Count of nodes at which to apply the rate limit. +// limit: The sustained request rate in requests per second. The maximum rate that can be configured is 50. +// burst: The burst request rate in requests per second. The maximum burst that can be configured is 100. +type Cilium1163Values_CiliumEndpointSlice_RateLimitsItem struct { + Nodes int64 `mapstructure:"nodes,omitempty"` + Limit int64 `mapstructure:"limit,omitempty"` + Burst int64 `mapstructure:"burst,omitempty"` +} + +func (v *Cilium1163Values_CiliumEndpointSlice_RateLimitsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_CiliumEndpointSlice struct { + // -- Enable Cilium EndpointSlice feature. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- List of rate limit options to be used for the CiliumEndpointSlice controller. + // Each object in the list must have the following fields: + // nodes: Count of nodes at which to apply the rate limit. + // limit: The sustained request rate in requests per second. The maximum rate that can be configured is 50. + // burst: The burst request rate in requests per second. The maximum burst that can be configured is 100. + RateLimits []Cilium1163Values_CiliumEndpointSlice_RateLimitsItem `mapstructure:"rateLimits,omitempty"` +} + +func (v *Cilium1163Values_CiliumEndpointSlice) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. +type Cilium1163Values_EnvoyConfig_SecretsNamespace struct { + // -- Create secrets namespace for CiliumEnvoyConfig CRDs. + Create bool `mapstructure:"create,omitempty"` + // -- The name of the secret namespace to which Cilium agents are given read access. + Name string `mapstructure:"name,omitempty"` +} + +func (v *Cilium1163Values_EnvoyConfig_SecretsNamespace) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_EnvoyConfig struct { + // -- Enable CiliumEnvoyConfig CRD + // CiliumEnvoyConfig CRD can also be implicitly enabled by other options. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from. + SecretsNamespace Cilium1163Values_EnvoyConfig_SecretsNamespace `mapstructure:"secretsNamespace,omitempty"` + // -- Interval in which an attempt is made to reconcile failed EnvoyConfigs. If the duration is zero, the retry is deactivated. + RetryInterval string `mapstructure:"retryInterval,omitempty"` +} + +func (v *Cilium1163Values_EnvoyConfig) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. +type Cilium1163Values_IngressController_SecretsNamespace struct { + // -- Create secrets namespace for Ingress. + Create bool `mapstructure:"create,omitempty"` + // -- Name of Ingress secret namespace. + Name string `mapstructure:"name,omitempty"` + // -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + // If disabled, TLS secrets must be maintained externally. + Sync bool `mapstructure:"sync,omitempty"` +} + +func (v *Cilium1163Values_IngressController_SecretsNamespace) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Load-balancer service in shared mode. +// This is a single load-balancer service for all Ingress resources. +type Cilium1163Values_IngressController_Service struct { + // -- Service name + Name string `mapstructure:"name,omitempty"` + // -- Labels to be added for the shared LB service + Labels map[string]any `mapstructure:"labels,omitempty"` + // -- Annotations to be added for the shared LB service + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Service type for the shared LB service + Type string `mapstructure:"type,omitempty"` + // @schema + // type: [null, integer] + // @schema + // -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service + InsecureNodePort any `mapstructure:"insecureNodePort,omitempty"` + // @schema + // type: [null, integer] + // @schema + // -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service + SecureNodePort any `mapstructure:"secureNodePort,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+) + LoadBalancerClass any `mapstructure:"loadBalancerClass,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- Configure a specific loadBalancerIP on the shared LB service + LoadBalancerIp any `mapstructure:"loadBalancerIP,omitempty"` + // @schema + // type: [null, boolean] + // @schema + // -- Configure if node port allocation is required for LB service + // ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + AllocateLoadBalancerNodePorts any `mapstructure:"allocateLoadBalancerNodePorts,omitempty"` + // -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for Cilium Ingress in shared mode. + // Valid values are "Cluster" and "Local". + // ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + ExternalTrafficPolicy string `mapstructure:"externalTrafficPolicy,omitempty"` +} + +func (v *Cilium1163Values_IngressController_Service) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Specify the nodes where the Ingress listeners should be exposed +type Cilium1163Values_IngressController_HostNetwork_Nodes struct { + // -- Specify the labels of the nodes where the Ingress listeners should be exposed + // + // matchLabels: + // kubernetes.io/os: linux + // kubernetes.io/hostname: kind-worker + MatchLabels map[string]any `mapstructure:"matchLabels,omitempty"` +} + +func (v *Cilium1163Values_IngressController_HostNetwork_Nodes) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Host Network related configuration +type Cilium1163Values_IngressController_HostNetwork struct { + // -- Configure whether the Envoy listeners should be exposed on the host network. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Configure a specific port on the host network that gets used for the shared listener. + SharedListenerPort int64 `mapstructure:"sharedListenerPort,omitempty"` + // Specify the nodes where the Ingress listeners should be exposed + Nodes Cilium1163Values_IngressController_HostNetwork_Nodes `mapstructure:"nodes,omitempty"` +} + +func (v *Cilium1163Values_IngressController_HostNetwork) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_IngressController struct { + // -- Enable cilium ingress controller + // This will automatically set enable-envoy-config as well. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Set cilium ingress controller to be the default ingress controller + // This will let cilium ingress controller route entries without ingress class set + Default bool `mapstructure:"default,omitempty"` + // -- Default ingress load balancer mode + // Supported values: shared, dedicated + // For granular control, use the following annotations on the ingress resource: + // "ingress.cilium.io/loadbalancer-mode: dedicated" (or "shared"). + LoadbalancerMode string `mapstructure:"loadbalancerMode,omitempty"` + // -- Enforce https for host having matching TLS host in Ingress. + // Incoming traffic to http listener will return 308 http error code with respective location in header. + EnforceHttps bool `mapstructure:"enforceHttps,omitempty"` + // -- Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + EnableProxyProtocol bool `mapstructure:"enableProxyProtocol,omitempty"` + // -- IngressLBAnnotations are the annotation and label prefixes, which are used to filter annotations and/or labels to propagate from Ingress to the Load Balancer service + // - lbipam.cilium.io + // - nodeipam.cilium.io + // - service.beta.kubernetes.io + // - service.kubernetes.io + // - cloud.google.com + IngressLbannotationPrefixes []string `mapstructure:"ingressLBAnnotationPrefixes,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- Default secret namespace for ingresses without .spec.tls[].secretName set. + DefaultSecretNamespace any `mapstructure:"defaultSecretNamespace,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- Default secret name for ingresses without .spec.tls[].secretName set. + DefaultSecretName any `mapstructure:"defaultSecretName,omitempty"` + // -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + SecretsNamespace Cilium1163Values_IngressController_SecretsNamespace `mapstructure:"secretsNamespace,omitempty"` + // -- Load-balancer service in shared mode. + // This is a single load-balancer service for all Ingress resources. + Service Cilium1163Values_IngressController_Service `mapstructure:"service,omitempty"` + // Host Network related configuration + HostNetwork Cilium1163Values_IngressController_HostNetwork `mapstructure:"hostNetwork,omitempty"` +} + +func (v *Cilium1163Values_IngressController) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_GatewayApi_GatewayClass struct { + // -- Enable creation of GatewayClass resource + // The default value is 'auto' which decides according to presence of gateway.networking.k8s.io/v1/GatewayClass in the cluster. + // Other possible values are 'true' and 'false', which will either always or never create the GatewayClass, respectively. + Create string `mapstructure:"create,omitempty"` +} + +func (v *Cilium1163Values_GatewayApi_GatewayClass) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. +type Cilium1163Values_GatewayApi_SecretsNamespace struct { + // -- Create secrets namespace for Gateway API. + Create bool `mapstructure:"create,omitempty"` + // -- Name of Gateway API secret namespace. + Name string `mapstructure:"name,omitempty"` + // -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name. + // If disabled, TLS secrets must be maintained externally. + Sync bool `mapstructure:"sync,omitempty"` +} + +func (v *Cilium1163Values_GatewayApi_SecretsNamespace) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Specify the nodes where the Ingress listeners should be exposed +type Cilium1163Values_GatewayApi_HostNetwork_Nodes struct { + // -- Specify the labels of the nodes where the Ingress listeners should be exposed + // + // matchLabels: + // kubernetes.io/os: linux + // kubernetes.io/hostname: kind-worker + MatchLabels map[string]any `mapstructure:"matchLabels,omitempty"` +} + +func (v *Cilium1163Values_GatewayApi_HostNetwork_Nodes) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Host Network related configuration +type Cilium1163Values_GatewayApi_HostNetwork struct { + // -- Configure whether the Envoy listeners should be exposed on the host network. + Enabled bool `mapstructure:"enabled,omitempty"` + // Specify the nodes where the Ingress listeners should be exposed + Nodes Cilium1163Values_GatewayApi_HostNetwork_Nodes `mapstructure:"nodes,omitempty"` +} + +func (v *Cilium1163Values_GatewayApi_HostNetwork) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_GatewayApi struct { + // -- Enable support for Gateway API in cilium + // This will automatically set enable-envoy-config as well. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Enable proxy protocol for all GatewayAPI listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled. + EnableProxyProtocol bool `mapstructure:"enableProxyProtocol,omitempty"` + // -- Enable Backend Protocol selection support (GEP-1911) for Gateway API via appProtocol. + EnableAppProtocol bool `mapstructure:"enableAppProtocol,omitempty"` + // -- Enable ALPN for all listeners configured with Gateway API. ALPN will attempt HTTP/2, then HTTP 1.1. + // Note that this will also enable `appProtocol` support, and services that wish to use HTTP/2 will need to indicate that via their `appProtocol`. + EnableAlpn bool `mapstructure:"enableAlpn,omitempty"` + // -- The number of additional GatewayAPI proxy hops from the right side of the HTTP header to trust when determining the origin client's IP address. + XffNumTrustedHops int64 `mapstructure:"xffNumTrustedHops,omitempty"` + // -- Control how traffic from external sources is routed to the LoadBalancer Kubernetes Service for all Cilium GatewayAPI Gateway instances. Valid values are "Cluster" and "Local". + // Note that this value will be ignored when `hostNetwork.enabled == true`. + // ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#external-traffic-policy + ExternalTrafficPolicy string `mapstructure:"externalTrafficPolicy,omitempty"` + GatewayClass Cilium1163Values_GatewayApi_GatewayClass `mapstructure:"gatewayClass,omitempty"` + // -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from. + SecretsNamespace Cilium1163Values_GatewayApi_SecretsNamespace `mapstructure:"secretsNamespace,omitempty"` + // Host Network related configuration + HostNetwork Cilium1163Values_GatewayApi_HostNetwork `mapstructure:"hostNetwork,omitempty"` +} + +func (v *Cilium1163Values_GatewayApi) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure the WireGuard Pod2Pod strict mode. +type Cilium1163Values_Encryption_StrictMode struct { + // -- Enable WireGuard Pod2Pod strict mode. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- CIDR for the WireGuard Pod2Pod strict mode. + Cidr string `mapstructure:"cidr,omitempty"` + // -- Allow dynamic lookup of remote node identities. + // This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap. + AllowRemoteNodeIdentities bool `mapstructure:"allowRemoteNodeIdentities,omitempty"` +} + +func (v *Cilium1163Values_Encryption_StrictMode) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Encryption_Ipsec struct { + // -- Name of the key file inside the Kubernetes secret configured via secretName. + KeyFile string `mapstructure:"keyFile,omitempty"` + // -- Path to mount the secret inside the Cilium pod. + MountPath string `mapstructure:"mountPath,omitempty"` + // -- Name of the Kubernetes secret containing the encryption keys. + SecretName string `mapstructure:"secretName,omitempty"` + // -- The interface to use for encrypted traffic. + Interface string `mapstructure:"interface,omitempty"` + // -- Enable the key watcher. If disabled, a restart of the agent will be + // necessary on key rotations. + KeyWatcher bool `mapstructure:"keyWatcher,omitempty"` + // -- Maximum duration of the IPsec key rotation. The previous key will be + // removed after that delay. + KeyRotationDuration string `mapstructure:"keyRotationDuration,omitempty"` + // -- Enable IPsec encrypted overlay + EncryptedOverlay bool `mapstructure:"encryptedOverlay,omitempty"` +} + +func (v *Cilium1163Values_Encryption_Ipsec) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Encryption_Wireguard struct { + // -- Enables the fallback to the user-space implementation (deprecated). + UserspaceFallback bool `mapstructure:"userspaceFallback,omitempty"` + // -- Controls WireGuard PersistentKeepalive option. Set 0s to disable. + PersistentKeepalive string `mapstructure:"persistentKeepalive,omitempty"` +} + +func (v *Cilium1163Values_Encryption_Wireguard) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Encryption struct { + // -- Enable transparent network encryption. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Encryption method. Can be either ipsec or wireguard. + Type string `mapstructure:"type,omitempty"` + // -- Enable encryption for pure node to node traffic. + // This option is only effective when encryption.type is set to "wireguard". + NodeEncryption bool `mapstructure:"nodeEncryption,omitempty"` + // -- Configure the WireGuard Pod2Pod strict mode. + StrictMode Cilium1163Values_Encryption_StrictMode `mapstructure:"strictMode,omitempty"` + Ipsec Cilium1163Values_Encryption_Ipsec `mapstructure:"ipsec,omitempty"` + Wireguard Cilium1163Values_Encryption_Wireguard `mapstructure:"wireguard,omitempty"` +} + +func (v *Cilium1163Values_Encryption) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_EndpointHealthChecking struct { + // -- Enable connectivity health checking between virtual endpoints. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_EndpointHealthChecking) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_EndpointRoutes struct { + // @schema + // type: [boolean, string] + // @schema + // -- Enable use of per endpoint routes instead of routing via + // the cilium_host interface. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_EndpointRoutes) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_K8SNetworkPolicy struct { + // -- Enable support for K8s NetworkPolicy + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_K8SNetworkPolicy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Eni struct { + // -- Enable Elastic Network Interface (ENI) integration. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Update ENI Adapter limits from the EC2 API + UpdateEc2AdapterLimitViaApi bool `mapstructure:"updateEC2AdapterLimitViaAPI,omitempty"` + // -- Release IPs not used from the ENI + AwsReleaseExcessIps bool `mapstructure:"awsReleaseExcessIPs,omitempty"` + // -- Enable ENI prefix delegation + AwsEnablePrefixDelegation bool `mapstructure:"awsEnablePrefixDelegation,omitempty"` + // -- EC2 API endpoint to use + Ec2Apiendpoint string `mapstructure:"ec2APIEndpoint,omitempty"` + // -- Tags to apply to the newly created ENIs + EniTags map[string]any `mapstructure:"eniTags,omitempty"` + // -- Interval for garbage collection of unattached ENIs. Set to "0s" to disable. + // @default -- `"5m"` + GcInterval string `mapstructure:"gcInterval,omitempty"` + // -- Additional tags attached to ENIs created by Cilium. + // Dangling ENIs with this tag will be garbage collected + // @default -- `{"io.cilium/cilium-managed":"true,"io.cilium/cluster-name":""}` + GcTags map[string]any `mapstructure:"gcTags,omitempty"` + // -- If using IAM role for Service Accounts will not try to + // inject identity values from cilium-aws kubernetes secret. + // Adds annotation to service account if managed by Helm. + // See https://github.com/aws/amazon-eks-pod-identity-webhook + IamRole string `mapstructure:"iamRole,omitempty"` + // -- Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs + // Important note: This requires that each instance has an ENI with a matching subnet attached + // when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + // use the CNI configuration file settings (cni.customConf) instead. + SubnetIdsFilter []any `mapstructure:"subnetIDsFilter,omitempty"` + // -- Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs + // Important note: This requires that each instance has an ENI with a matching subnet attached + // when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium, + // use the CNI configuration file settings (cni.customConf) instead. + SubnetTagsFilter []any `mapstructure:"subnetTagsFilter,omitempty"` + // -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances + // are going to be used to create new ENIs + InstanceTagsFilter []any `mapstructure:"instanceTagsFilter,omitempty"` +} + +func (v *Cilium1163Values_Eni) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ExternalIps struct { + // -- Enable ExternalIPs service support. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_ExternalIps) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// fragmentTracking enables IPv4 fragment tracking support in the datapath. +// fragmentTracking: true +type Cilium1163Values_Gke struct { + // -- Enable Google Kubernetes Engine integration + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Gke) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure the host firewall. +type Cilium1163Values_HostFirewall struct { + // -- Enables the enforcement of host policies in the eBPF datapath. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_HostFirewall) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_HostPort struct { + // -- Enable hostPort service support. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_HostPort) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure socket LB +type Cilium1163Values_SocketLb struct { + // -- Enable socket LB + // -- Disable socket lb for non-root ns. This is used to enable Istio routing rules. + // hostNamespaceOnly: false + // -- Enable terminating pod connections to deleted service backends. + // terminatePodConnections: true + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_SocketLb) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Certgen_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Cilium1163Values_Certgen_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Annotations to be added to the hubble-certgen initial Job and CronJob +type Cilium1163Values_Certgen_Annotations struct { + Job map[string]any `mapstructure:"job,omitempty"` + CronJob map[string]any `mapstructure:"cronJob,omitempty"` +} + +func (v *Cilium1163Values_Certgen_Annotations) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure certificate generation for Hubble integration. +// If hubble.tls.auto.method=cronJob, these values are used +// for the Kubernetes CronJob which will be scheduled regularly to +// (re)generate any certificates not provided manually. +type Cilium1163Values_Certgen struct { + Image Cilium1163Values_Certgen_Image `mapstructure:"image,omitempty"` + // -- Seconds after which the completed job pod will be deleted + TtlSecondsAfterFinished int64 `mapstructure:"ttlSecondsAfterFinished,omitempty"` + // -- Labels to be added to hubble-certgen pods + PodLabels map[string]any `mapstructure:"podLabels,omitempty"` + // -- Annotations to be added to the hubble-certgen initial Job and CronJob + Annotations Cilium1163Values_Certgen_Annotations `mapstructure:"annotations,omitempty"` + // -- Node tolerations for pod assignment on nodes with taints + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + Tolerations []any `mapstructure:"tolerations,omitempty"` + // -- Additional certgen volumes. + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + // -- Additional certgen volumeMounts. + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + // -- Affinity for certgen + Affinity map[string]any `mapstructure:"affinity,omitempty"` +} + +func (v *Cilium1163Values_Certgen) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure mTLS for the Hubble metrics server. +type Cilium1163Values_Hubble_Metrics_Tls_Server_Mtls struct { + // When set to true enforces mutual TLS between Hubble Metrics server and its clients. + // False allow non-mutual TLS connections. + // This option has no effect when TLS is disabled. + Enabled bool `mapstructure:"enabled,omitempty"` + UseSecret bool `mapstructure:"useSecret,omitempty"` + // -- Name of the ConfigMap containing the CA to validate client certificates against. + // If mTLS is enabled and this is unspecified, it will default to the + // same CA used for Hubble metrics server certificates. + Name any `mapstructure:"name,omitempty"` + // -- Entry of the ConfigMap containing the CA. + Key string `mapstructure:"key,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Metrics_Tls_Server_Mtls) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Configure hubble metrics server TLS. +type Cilium1163Values_Hubble_Metrics_Tls_Server struct { + // -- Name of the Secret containing the certificate and key for the Hubble metrics server. + // If specified, cert and key are ignored. + ExistingSecret string `mapstructure:"existingSecret,omitempty"` + // -- base64 encoded PEM values for the Hubble metrics server certificate (deprecated). + // Use existingSecret instead. + Cert string `mapstructure:"cert,omitempty"` + // -- base64 encoded PEM values for the Hubble metrics server key (deprecated). + // Use existingSecret instead. + Key string `mapstructure:"key,omitempty"` + // -- Extra DNS names added to certificate when it's auto generated + ExtraDnsNames []any `mapstructure:"extraDnsNames,omitempty"` + // -- Extra IP addresses added to certificate when it's auto generated + ExtraIpAddresses []any `mapstructure:"extraIpAddresses,omitempty"` + // -- Configure mTLS for the Hubble metrics server. + Mtls Cilium1163Values_Hubble_Metrics_Tls_Server_Mtls `mapstructure:"mtls,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Metrics_Tls_Server) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Metrics_Tls struct { + // Enable hubble metrics server TLS. + Enabled bool `mapstructure:"enabled,omitempty"` + // Configure hubble metrics server TLS. + Server Cilium1163Values_Hubble_Metrics_Tls_Server `mapstructure:"server,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Metrics_Tls) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Relabeling configs for the ServiceMonitor hubble +type Cilium1163Values_Hubble_Metrics_ServiceMonitor_RelabelingsItem struct { + // - __meta_kubernetes_pod_node_name + SourceLabels []string `mapstructure:"sourceLabels,omitempty"` + TargetLabel string `mapstructure:"targetLabel,omitempty"` + Replacement string `mapstructure:"replacement,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Metrics_ServiceMonitor_RelabelingsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Metrics_ServiceMonitor struct { + // -- Create ServiceMonitor resources for Prometheus Operator. + // This requires the prometheus CRDs to be available. + // ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Labels to add to ServiceMonitor hubble + Labels map[string]any `mapstructure:"labels,omitempty"` + // -- Annotations to add to ServiceMonitor hubble + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- jobLabel to add for ServiceMonitor hubble + JobLabel string `mapstructure:"jobLabel,omitempty"` + // -- Interval for scrape metrics. + Interval string `mapstructure:"interval,omitempty"` + // -- Relabeling configs for the ServiceMonitor hubble + Relabelings []Cilium1163Values_Hubble_Metrics_ServiceMonitor_RelabelingsItem `mapstructure:"relabelings,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Metrics relabeling configs for the ServiceMonitor hubble + MetricRelabelings any `mapstructure:"metricRelabelings,omitempty"` + // Configure TLS for the ServiceMonitor. + // Note, when using TLS you will either need to specify + // tlsConfig.insecureSkipVerify or specify a CA to use. + TlsConfig map[string]any `mapstructure:"tlsConfig,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Metrics_ServiceMonitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Grafana dashboards for hubble +// grafana can import dashboards based on the label and value +// ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards +type Cilium1163Values_Hubble_Metrics_Dashboards struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Label string `mapstructure:"label,omitempty"` + // @schema + // type: [null, string] + // @schema + Namespace any `mapstructure:"namespace,omitempty"` + LabelValue string `mapstructure:"labelValue,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Metrics_Dashboards) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Number of recent flows for Hubble to cache. Defaults to 4095. +// Possible values are: +// 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, +// 2047, 4095, 8191, 16383, 32767, 65535 +// eventBufferCapacity: "4095" +// +// -- Hubble metrics configuration. +// See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics +// for more comprehensive documentation about Hubble metrics. +type Cilium1163Values_Hubble_Metrics struct { + // @schema + // type: [null, array] + // @schema + // -- Configures the list of metrics to collect. If empty or null, metrics + // are disabled. + // Example: + // + // enabled: + // - dns:query;ignoreAAAA + // - drop + // - tcp + // - flow + // - icmp + // - http + // + // You can specify the list of metrics from the helm CLI: + // + // --set hubble.metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}" + // + Enabled any `mapstructure:"enabled,omitempty"` + // -- Enables exporting hubble metrics in OpenMetrics format. + EnableOpenMetrics bool `mapstructure:"enableOpenMetrics,omitempty"` + // -- Configure the port the hubble metric server listens on. + Port int64 `mapstructure:"port,omitempty"` + Tls Cilium1163Values_Hubble_Metrics_Tls `mapstructure:"tls,omitempty"` + // -- Annotations to be added to hubble-metrics service. + ServiceAnnotations map[string]any `mapstructure:"serviceAnnotations,omitempty"` + ServiceMonitor Cilium1163Values_Hubble_Metrics_ServiceMonitor `mapstructure:"serviceMonitor,omitempty"` + // -- Grafana dashboards for hubble + // grafana can import dashboards based on the label and value + // ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards + Dashboards Cilium1163Values_Hubble_Metrics_Dashboards `mapstructure:"dashboards,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Metrics) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Redact_Http_Headers struct { + // -- List of HTTP headers to allow: headers not matching will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. + // Example: + // redact: + // enabled: true + // http: + // headers: + // allow: + // - traceparent + // - tracestate + // - Cache-Control + // + // You can specify the options from the helm CLI: + // --set hubble.redact.enabled="true" + // --set hubble.redact.http.headers.allow="traceparent,tracestate,Cache-Control" + Allow []any `mapstructure:"allow,omitempty"` + // -- List of HTTP headers to deny: matching headers will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present. + // Example: + // redact: + // enabled: true + // http: + // headers: + // deny: + // - Authorization + // - Proxy-Authorization + // + // You can specify the options from the helm CLI: + // --set hubble.redact.enabled="true" + // --set hubble.redact.http.headers.deny="Authorization,Proxy-Authorization" + Deny []any `mapstructure:"deny,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Redact_Http_Headers) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Redact_Http struct { + // -- Enables redacting URL query (GET) parameters. + // Example: + // + // redact: + // enabled: true + // http: + // urlQuery: true + // + // You can specify the options from the helm CLI: + // + // --set hubble.redact.enabled="true" + // --set hubble.redact.http.urlQuery="true" + UrlQuery bool `mapstructure:"urlQuery,omitempty"` + // -- Enables redacting user info, e.g., password when basic auth is used. + // Example: + // + // redact: + // enabled: true + // http: + // userInfo: true + // + // You can specify the options from the helm CLI: + // + // --set hubble.redact.enabled="true" + // --set hubble.redact.http.userInfo="true" + UserInfo bool `mapstructure:"userInfo,omitempty"` + Headers Cilium1163Values_Hubble_Redact_Http_Headers `mapstructure:"headers,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Redact_Http) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Redact_Kafka struct { + // -- Enables redacting Kafka's API key. + // Example: + // + // redact: + // enabled: true + // kafka: + // apiKey: true + // + // You can specify the options from the helm CLI: + // + // --set hubble.redact.enabled="true" + // --set hubble.redact.kafka.apiKey="true" + ApiKey bool `mapstructure:"apiKey,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Redact_Kafka) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Enables redacting sensitive information present in Layer 7 flows. +type Cilium1163Values_Hubble_Redact struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Http Cilium1163Values_Hubble_Redact_Http `mapstructure:"http,omitempty"` + Kafka Cilium1163Values_Hubble_Redact_Kafka `mapstructure:"kafka,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Redact) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_PeerService struct { + // -- Service Port for the Peer service. + // If not set, it is dynamically assigned to port 443 if TLS is enabled and to + // port 80 if not. + // servicePort: 80 + // -- Target Port for the Peer service, must match the hubble.listenAddress' + // port. + TargetPort int64 `mapstructure:"targetPort,omitempty"` + // -- The cluster domain to use to query the Hubble Peer service. It should + // be the local cluster. + ClusterDomain string `mapstructure:"clusterDomain,omitempty"` +} + +func (v *Cilium1163Values_Hubble_PeerService) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure automatic TLS certificates generation. +type Cilium1163Values_Hubble_Tls_Auto struct { + // -- Auto-generate certificates. + // When set to true, automatically generate a CA and certificates to + // enable mTLS between Hubble server and Hubble Relay instances. If set to + // false, the certs for Hubble server need to be provided by setting + // appropriate values below. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Set the method to auto-generate certificates. Supported values: + // - helm: This method uses Helm to generate all certificates. + // - cronJob: This method uses a Kubernetes CronJob the generate any + // certificates not provided by the user at installation + // time. + // - certmanager: This method use cert-manager to generate & rotate certificates. + Method string `mapstructure:"method,omitempty"` + // -- Generated certificates validity duration in days. + CertValidityDuration int64 `mapstructure:"certValidityDuration,omitempty"` + // -- Schedule for certificates regeneration (regardless of their expiration date). + // Only used if method is "cronJob". If nil, then no recurring job will be created. + // Instead, only the one-shot job is deployed to generate the certificates at + // installation time. + // + // Defaults to midnight of the first day of every fourth month. For syntax, see + // https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + Schedule string `mapstructure:"schedule,omitempty"` + // [Example] + // certManagerIssuerRef: + // group: cert-manager.io + // kind: ClusterIssuer + // name: ca-issuer + // -- certmanager issuer used when hubble.tls.auto.method=certmanager. + CertManagerIssuerRef map[string]any `mapstructure:"certManagerIssuerRef,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Tls_Auto) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- The Hubble server certificate and private key +type Cilium1163Values_Hubble_Tls_Server struct { + // -- Name of the Secret containing the certificate and key for the Hubble server. + // If specified, cert and key are ignored. + ExistingSecret string `mapstructure:"existingSecret,omitempty"` + // -- base64 encoded PEM values for the Hubble server certificate (deprecated). + // Use existingSecret instead. + Cert string `mapstructure:"cert,omitempty"` + // -- base64 encoded PEM values for the Hubble server key (deprecated). + // Use existingSecret instead. + Key string `mapstructure:"key,omitempty"` + // -- Extra DNS names added to certificate when it's auto generated + ExtraDnsNames []any `mapstructure:"extraDnsNames,omitempty"` + // -- Extra IP addresses added to certificate when it's auto generated + ExtraIpAddresses []any `mapstructure:"extraIpAddresses,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Tls_Server) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- TLS configuration for Hubble +type Cilium1163Values_Hubble_Tls struct { + // -- Enable mutual TLS for listenAddress. Setting this value to false is + // highly discouraged as the Hubble API provides access to potentially + // sensitive network flow metadata and is exposed on the host network. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Configure automatic TLS certificates generation. + Auto Cilium1163Values_Hubble_Tls_Auto `mapstructure:"auto,omitempty"` + // -- The Hubble server certificate and private key + Server Cilium1163Values_Hubble_Tls_Server `mapstructure:"server,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Tls) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Hubble-relay container image. +type Cilium1163Values_Hubble_Relay_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + // hubble-relay-digest + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Relay_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels struct { + K8SApp string `mapstructure:"k8s-app,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Relay_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector struct { + MatchLabels Cilium1163Values_Hubble_Relay_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels `mapstructure:"matchLabels,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Relay_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem struct { + TopologyKey string `mapstructure:"topologyKey,omitempty"` + LabelSelector Cilium1163Values_Hubble_Relay_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector `mapstructure:"labelSelector,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Relay_Affinity_PodAffinity struct { + RequiredDuringSchedulingIgnoredDuringExecution []Cilium1163Values_Hubble_Relay_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem `mapstructure:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Affinity_PodAffinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Affinity for hubble-replay +type Cilium1163Values_Hubble_Relay_Affinity struct { + PodAffinity Cilium1163Values_Hubble_Relay_Affinity_PodAffinity `mapstructure:"podAffinity,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Affinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node labels for pod assignment +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector +type Cilium1163Values_Hubble_Relay_NodeSelector struct { + KubernetesIoos string `mapstructure:"kubernetes.io/os,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_NodeSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// PodDisruptionBudget settings +type Cilium1163Values_Hubble_Relay_PodDisruptionBudget struct { + // -- enable PodDisruptionBudget + // ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + Enabled bool `mapstructure:"enabled,omitempty"` + // @schema + // type: [null, integer, string] + // @schema + // -- Minimum number/percentage of pods that should remain scheduled. + // When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + MinAvailable any `mapstructure:"minAvailable,omitempty"` + // @schema + // type: [null, integer, string] + // @schema + // -- Maximum number/percentage of pods that may be made unavailable + MaxUnavailable int64 `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_PodDisruptionBudget) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Relay_UpdateStrategy_RollingUpdate struct { + // @schema + // type: [integer, string] + // @schema + MaxUnavailable int64 `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_UpdateStrategy_RollingUpdate) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- hubble-relay update strategy +type Cilium1163Values_Hubble_Relay_UpdateStrategy struct { + Type string `mapstructure:"type,omitempty"` + RollingUpdate Cilium1163Values_Hubble_Relay_UpdateStrategy_RollingUpdate `mapstructure:"rollingUpdate,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_UpdateStrategy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- hubble-relay pod security context +type Cilium1163Values_Hubble_Relay_PodSecurityContext struct { + FsGroup int64 `mapstructure:"fsGroup,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_PodSecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Relay_SecurityContext_Capabilities struct { + // - ALL + Drop []string `mapstructure:"drop,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_SecurityContext_Capabilities) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- hubble-relay container security context +type Cilium1163Values_Hubble_Relay_SecurityContext struct { + // readOnlyRootFilesystem: true + RunAsNonRoot bool `mapstructure:"runAsNonRoot,omitempty"` + RunAsUser int64 `mapstructure:"runAsUser,omitempty"` + RunAsGroup int64 `mapstructure:"runAsGroup,omitempty"` + Capabilities Cilium1163Values_Hubble_Relay_SecurityContext_Capabilities `mapstructure:"capabilities,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- hubble-relay service configuration. +type Cilium1163Values_Hubble_Relay_Service struct { + // --- The type of service used for Hubble Relay access, either ClusterIP or NodePort. + Type string `mapstructure:"type,omitempty"` + // --- The port to use when the service type is set to NodePort. + NodePort int64 `mapstructure:"nodePort,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Service) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- The hubble-relay client certificate and private key. +// This keypair is presented to Hubble server instances for mTLS +// authentication and is required when hubble.tls.enabled is true. +// These values need to be set manually if hubble.tls.auto.enabled is false. +type Cilium1163Values_Hubble_Relay_Tls_Client struct { + // -- Name of the Secret containing the certificate and key for the Hubble metrics server. + // If specified, cert and key are ignored. + ExistingSecret string `mapstructure:"existingSecret,omitempty"` + // -- base64 encoded PEM values for the Hubble relay client certificate (deprecated). + // Use existingSecret instead. + Cert string `mapstructure:"cert,omitempty"` + // -- base64 encoded PEM values for the Hubble relay client key (deprecated). + // Use existingSecret instead. + Key string `mapstructure:"key,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Tls_Client) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- The hubble-relay server certificate and private key +type Cilium1163Values_Hubble_Relay_Tls_Server struct { + // When set to true, enable TLS on for Hubble Relay server + // (ie: for clients connecting to the Hubble Relay API). + Enabled bool `mapstructure:"enabled,omitempty"` + // When set to true enforces mutual TLS between Hubble Relay server and its clients. + // False allow non-mutual TLS connections. + // This option has no effect when TLS is disabled. + Mtls bool `mapstructure:"mtls,omitempty"` + // -- Name of the Secret containing the certificate and key for the Hubble relay server. + // If specified, cert and key are ignored. + ExistingSecret string `mapstructure:"existingSecret,omitempty"` + // -- base64 encoded PEM values for the Hubble relay server certificate (deprecated). + // Use existingSecret instead. + Cert string `mapstructure:"cert,omitempty"` + // -- base64 encoded PEM values for the Hubble relay server key (deprecated). + // Use existingSecret instead. + Key string `mapstructure:"key,omitempty"` + // -- extra DNS names added to certificate when its auto gen + ExtraDnsNames []any `mapstructure:"extraDnsNames,omitempty"` + // -- extra IP addresses added to certificate when its auto gen + ExtraIpAddresses []any `mapstructure:"extraIpAddresses,omitempty"` + // DNS name used by the backend to connect to the relay + // This is a simple workaround as the relay certificates are currently hardcoded to + // *.hubble-relay.cilium.io + // See https://github.com/cilium/cilium/pull/28709#discussion_r1371792546 + // For GKE Dataplane V2 this should be set to relay.kube-system.svc.cluster.local + RelayName string `mapstructure:"relayName,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Tls_Server) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- TLS configuration for Hubble Relay +type Cilium1163Values_Hubble_Relay_Tls struct { + // -- The hubble-relay client certificate and private key. + // This keypair is presented to Hubble server instances for mTLS + // authentication and is required when hubble.tls.enabled is true. + // These values need to be set manually if hubble.tls.auto.enabled is false. + Client Cilium1163Values_Hubble_Relay_Tls_Client `mapstructure:"client,omitempty"` + // -- The hubble-relay server certificate and private key + Server Cilium1163Values_Hubble_Relay_Tls_Server `mapstructure:"server,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Tls) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Relay_Prometheus_ServiceMonitor struct { + // -- Enable service monitors. + // This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Labels to add to ServiceMonitor hubble-relay + Labels map[string]any `mapstructure:"labels,omitempty"` + // -- Annotations to add to ServiceMonitor hubble-relay + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Interval for scrape metrics. + Interval string `mapstructure:"interval,omitempty"` + // -- Specify the Kubernetes namespace where Prometheus expects to find + // service monitors configured. + // namespace: "" + // @schema + // type: [null, array] + // @schema + // -- Relabeling configs for the ServiceMonitor hubble-relay + Relabelings any `mapstructure:"relabelings,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Metrics relabeling configs for the ServiceMonitor hubble-relay + MetricRelabelings any `mapstructure:"metricRelabelings,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Prometheus_ServiceMonitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Enable prometheus metrics for hubble-relay on the configured port at +// /metrics +type Cilium1163Values_Hubble_Relay_Prometheus struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Port int64 `mapstructure:"port,omitempty"` + ServiceMonitor Cilium1163Values_Hubble_Relay_Prometheus_ServiceMonitor `mapstructure:"serviceMonitor,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Prometheus) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Relay_Gops struct { + // -- Enable gops for hubble-relay + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Configure gops listen port for hubble-relay + Port int64 `mapstructure:"port,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Gops) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Relay_Pprof struct { + // -- Enable pprof for hubble-relay + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Configure pprof listen address for hubble-relay + Address string `mapstructure:"address,omitempty"` + // -- Configure pprof listen port for hubble-relay + Port int64 `mapstructure:"port,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay_Pprof) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Relay struct { + // -- Enable Hubble Relay (requires hubble.enabled=true) + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Roll out Hubble Relay pods automatically when configmap is updated. + RollOutPods bool `mapstructure:"rollOutPods,omitempty"` + // -- Hubble-relay container image. + Image Cilium1163Values_Hubble_Relay_Image `mapstructure:"image,omitempty"` + // -- Specifies the resources for the hubble-relay pods + Resources map[string]any `mapstructure:"resources,omitempty"` + // -- Number of replicas run for the hubble-relay deployment. + Replicas int64 `mapstructure:"replicas,omitempty"` + // -- Affinity for hubble-replay + Affinity Cilium1163Values_Hubble_Relay_Affinity `mapstructure:"affinity,omitempty"` + // -- Pod topology spread constraints for hubble-relay + // - maxSkew: 1 + // topologyKey: topology.kubernetes.io/zone + // whenUnsatisfiable: DoNotSchedule + TopologySpreadConstraints []any `mapstructure:"topologySpreadConstraints,omitempty"` + // -- Node labels for pod assignment + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + NodeSelector Cilium1163Values_Hubble_Relay_NodeSelector `mapstructure:"nodeSelector,omitempty"` + // -- Node tolerations for pod assignment on nodes with taints + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + Tolerations []any `mapstructure:"tolerations,omitempty"` + // -- Additional hubble-relay environment variables. + ExtraEnv []any `mapstructure:"extraEnv,omitempty"` + // -- Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay) + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Annotations to be added to hubble-relay pods + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + // -- Labels to be added to hubble-relay pods + PodLabels map[string]any `mapstructure:"podLabels,omitempty"` + // PodDisruptionBudget settings + PodDisruptionBudget Cilium1163Values_Hubble_Relay_PodDisruptionBudget `mapstructure:"podDisruptionBudget,omitempty"` + // -- The priority class to use for hubble-relay + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + // -- Configure termination grace period for hubble relay Deployment. + TerminationGracePeriodSeconds int64 `mapstructure:"terminationGracePeriodSeconds,omitempty"` + // -- hubble-relay update strategy + UpdateStrategy Cilium1163Values_Hubble_Relay_UpdateStrategy `mapstructure:"updateStrategy,omitempty"` + // -- Additional hubble-relay volumes. + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + // -- Additional hubble-relay volumeMounts. + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + // -- hubble-relay pod security context + PodSecurityContext Cilium1163Values_Hubble_Relay_PodSecurityContext `mapstructure:"podSecurityContext,omitempty"` + // -- hubble-relay container security context + SecurityContext Cilium1163Values_Hubble_Relay_SecurityContext `mapstructure:"securityContext,omitempty"` + // -- hubble-relay service configuration. + Service Cilium1163Values_Hubble_Relay_Service `mapstructure:"service,omitempty"` + // -- Host to listen to. Specify an empty string to bind to all the interfaces. + ListenHost string `mapstructure:"listenHost,omitempty"` + // -- Port to listen to. + ListenPort string `mapstructure:"listenPort,omitempty"` + // -- TLS configuration for Hubble Relay + Tls Cilium1163Values_Hubble_Relay_Tls `mapstructure:"tls,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s"). + DialTimeout any `mapstructure:"dialTimeout,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s"). + RetryTimeout any `mapstructure:"retryTimeout,omitempty"` + // @schema + // type: [null, integer] + // @schema + // -- (int) Max number of flows that can be buffered for sorting before being sent to the + // client (per request) (e.g. 100). + SortBufferLenMax any `mapstructure:"sortBufferLenMax,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- When the per-request flows sort buffer is not full, a flow is drained every + // time this timeout is reached (only affects requests in follow-mode) (e.g. "1s"). + // -- Port to use for the k8s service backed by hubble-relay pods. + // If not set, it is dynamically assigned to port 443 if TLS is enabled and to + // port 80 if not. + // servicePort: 80 + SortBufferDrainTimeout any `mapstructure:"sortBufferDrainTimeout,omitempty"` + // -- Enable prometheus metrics for hubble-relay on the configured port at + // /metrics + Prometheus Cilium1163Values_Hubble_Relay_Prometheus `mapstructure:"prometheus,omitempty"` + Gops Cilium1163Values_Hubble_Relay_Gops `mapstructure:"gops,omitempty"` + Pprof Cilium1163Values_Hubble_Relay_Pprof `mapstructure:"pprof,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Relay) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Ui_Standalone_Tls struct { + // -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required + // to provide a volume for mounting the client certificates. + // projected: + // defaultMode: 0400 + // sources: + // - secret: + // name: hubble-ui-client-certs + // items: + // - key: tls.crt + // path: client.crt + // - key: tls.key + // path: client.key + // - key: ca.crt + // path: hubble-relay-ca.crt + CertsVolume map[string]any `mapstructure:"certsVolume,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Standalone_Tls) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Ui_Standalone struct { + // -- When true, it will allow installing the Hubble UI only, without checking dependencies. + // It is useful if a cluster already has cilium and Hubble relay installed and you just + // want Hubble UI to be deployed. + // When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui` + Enabled bool `mapstructure:"enabled,omitempty"` + Tls Cilium1163Values_Hubble_Ui_Standalone_Tls `mapstructure:"tls,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Standalone) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Ui_Tls_Client struct { + // -- Name of the Secret containing the client certificate and key for Hubble UI + // If specified, cert and key are ignored. + ExistingSecret string `mapstructure:"existingSecret,omitempty"` + // -- base64 encoded PEM values for the Hubble UI client certificate (deprecated). + // Use existingSecret instead. + Cert string `mapstructure:"cert,omitempty"` + // -- base64 encoded PEM values for the Hubble UI client key (deprecated). + // Use existingSecret instead. + Key string `mapstructure:"key,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Tls_Client) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Ui_Tls struct { + Client Cilium1163Values_Hubble_Ui_Tls_Client `mapstructure:"client,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Tls) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Hubble-ui backend image. +type Cilium1163Values_Hubble_Ui_Backend_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Backend_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Ui_Backend_LivenessProbe struct { + // -- Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Backend_LivenessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Ui_Backend_ReadinessProbe struct { + // -- Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Backend_ReadinessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Ui_Backend struct { + // -- Hubble-ui backend image. + Image Cilium1163Values_Hubble_Ui_Backend_Image `mapstructure:"image,omitempty"` + // -- Hubble-ui backend security context. + SecurityContext map[string]any `mapstructure:"securityContext,omitempty"` + // -- Additional hubble-ui backend environment variables. + ExtraEnv []any `mapstructure:"extraEnv,omitempty"` + // -- Additional hubble-ui backend volumes. + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + // -- Additional hubble-ui backend volumeMounts. + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + LivenessProbe Cilium1163Values_Hubble_Ui_Backend_LivenessProbe `mapstructure:"livenessProbe,omitempty"` + ReadinessProbe Cilium1163Values_Hubble_Ui_Backend_ReadinessProbe `mapstructure:"readinessProbe,omitempty"` + // -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. + // limits: + // cpu: 1000m + // memory: 1024M + // requests: + // cpu: 100m + // memory: 64Mi + Resources map[string]any `mapstructure:"resources,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Backend) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Hubble-ui frontend image. +type Cilium1163Values_Hubble_Ui_Frontend_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Frontend_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Controls server listener for ipv6 +type Cilium1163Values_Hubble_Ui_Frontend_Server_Ipv6 struct { + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Frontend_Server_Ipv6) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// limits: +// cpu: 1000m +// memory: 1024M +// requests: +// cpu: 100m +// memory: 64Mi +type Cilium1163Values_Hubble_Ui_Frontend_Server struct { + // -- Controls server listener for ipv6 + Ipv6 Cilium1163Values_Hubble_Ui_Frontend_Server_Ipv6 `mapstructure:"ipv6,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Frontend_Server) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Ui_Frontend struct { + // -- Hubble-ui frontend image. + Image Cilium1163Values_Hubble_Ui_Frontend_Image `mapstructure:"image,omitempty"` + // -- Hubble-ui frontend security context. + SecurityContext map[string]any `mapstructure:"securityContext,omitempty"` + // -- Additional hubble-ui frontend environment variables. + ExtraEnv []any `mapstructure:"extraEnv,omitempty"` + // -- Additional hubble-ui frontend volumes. + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + // -- Additional hubble-ui frontend volumeMounts. + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + // -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. + Resources map[string]any `mapstructure:"resources,omitempty"` + // limits: + // cpu: 1000m + // memory: 1024M + // requests: + // cpu: 100m + // memory: 64Mi + Server Cilium1163Values_Hubble_Ui_Frontend_Server `mapstructure:"server,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Frontend) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// PodDisruptionBudget settings +type Cilium1163Values_Hubble_Ui_PodDisruptionBudget struct { + // -- enable PodDisruptionBudget + // ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + Enabled bool `mapstructure:"enabled,omitempty"` + // @schema + // type: [null, integer, string] + // @schema + // -- Minimum number/percentage of pods that should remain scheduled. + // When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + MinAvailable any `mapstructure:"minAvailable,omitempty"` + // @schema + // type: [null, integer, string] + // @schema + // -- Maximum number/percentage of pods that may be made unavailable + MaxUnavailable int64 `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_PodDisruptionBudget) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node labels for pod assignment +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector +type Cilium1163Values_Hubble_Ui_NodeSelector struct { + KubernetesIoos string `mapstructure:"kubernetes.io/os,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_NodeSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Ui_UpdateStrategy_RollingUpdate struct { + // @schema + // type: [integer, string] + // @schema + MaxUnavailable int64 `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_UpdateStrategy_RollingUpdate) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- hubble-ui update strategy. +type Cilium1163Values_Hubble_Ui_UpdateStrategy struct { + Type string `mapstructure:"type,omitempty"` + RollingUpdate Cilium1163Values_Hubble_Ui_UpdateStrategy_RollingUpdate `mapstructure:"rollingUpdate,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_UpdateStrategy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Security context to be added to Hubble UI pods +type Cilium1163Values_Hubble_Ui_SecurityContext struct { + RunAsUser int64 `mapstructure:"runAsUser,omitempty"` + RunAsGroup int64 `mapstructure:"runAsGroup,omitempty"` + FsGroup int64 `mapstructure:"fsGroup,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- hubble-ui service configuration. +type Cilium1163Values_Hubble_Ui_Service struct { + // -- Annotations to be added for the Hubble UI service + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // --- The type of service used for Hubble UI access, either ClusterIP or NodePort. + Type string `mapstructure:"type,omitempty"` + // --- The port to use when the service type is set to NodePort. + NodePort int64 `mapstructure:"nodePort,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Service) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- hubble-ui ingress configuration. +type Cilium1163Values_Hubble_Ui_Ingress struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // kubernetes.io/ingress.class: nginx + // kubernetes.io/tls-acme: "true" + ClassName string `mapstructure:"className,omitempty"` + // - chart-example.local + Hosts []string `mapstructure:"hosts,omitempty"` + Labels map[string]any `mapstructure:"labels,omitempty"` + // - secretName: chart-example-tls + // hosts: + // - chart-example.local + Tls []any `mapstructure:"tls,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui_Ingress) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Ui struct { + // -- Whether to enable the Hubble UI. + Enabled bool `mapstructure:"enabled,omitempty"` + Standalone Cilium1163Values_Hubble_Ui_Standalone `mapstructure:"standalone,omitempty"` + // -- Roll out Hubble-ui pods automatically when configmap is updated. + RollOutPods bool `mapstructure:"rollOutPods,omitempty"` + Tls Cilium1163Values_Hubble_Ui_Tls `mapstructure:"tls,omitempty"` + Backend Cilium1163Values_Hubble_Ui_Backend `mapstructure:"backend,omitempty"` + Frontend Cilium1163Values_Hubble_Ui_Frontend `mapstructure:"frontend,omitempty"` + // -- The number of replicas of Hubble UI to deploy. + Replicas int64 `mapstructure:"replicas,omitempty"` + // -- Annotations to be added to all top-level hubble-ui objects (resources under templates/hubble-ui) + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Annotations to be added to hubble-ui pods + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + // -- Labels to be added to hubble-ui pods + PodLabels map[string]any `mapstructure:"podLabels,omitempty"` + // PodDisruptionBudget settings + PodDisruptionBudget Cilium1163Values_Hubble_Ui_PodDisruptionBudget `mapstructure:"podDisruptionBudget,omitempty"` + // -- Affinity for hubble-ui + Affinity map[string]any `mapstructure:"affinity,omitempty"` + // -- Pod topology spread constraints for hubble-ui + // - maxSkew: 1 + // topologyKey: topology.kubernetes.io/zone + // whenUnsatisfiable: DoNotSchedule + TopologySpreadConstraints []any `mapstructure:"topologySpreadConstraints,omitempty"` + // -- Node labels for pod assignment + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + NodeSelector Cilium1163Values_Hubble_Ui_NodeSelector `mapstructure:"nodeSelector,omitempty"` + // -- Node tolerations for pod assignment on nodes with taints + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + Tolerations []any `mapstructure:"tolerations,omitempty"` + // -- The priority class to use for hubble-ui + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + // -- hubble-ui update strategy. + UpdateStrategy Cilium1163Values_Hubble_Ui_UpdateStrategy `mapstructure:"updateStrategy,omitempty"` + // -- Security context to be added to Hubble UI pods + SecurityContext Cilium1163Values_Hubble_Ui_SecurityContext `mapstructure:"securityContext,omitempty"` + // -- hubble-ui service configuration. + Service Cilium1163Values_Hubble_Ui_Service `mapstructure:"service,omitempty"` + // -- Defines base url prefix for all hubble-ui http requests. + // It needs to be changed in case if ingress for hubble-ui is configured under some sub-path. + // Trailing `/` is required for custom path, ex. `/service-map/` + BaseUrl string `mapstructure:"baseUrl,omitempty"` + // -- hubble-ui ingress configuration. + Ingress Cilium1163Values_Hubble_Ui_Ingress `mapstructure:"ingress,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Ui) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// --- Static exporter configuration. +// Static exporter is bound to agent lifecycle. +type Cilium1163Values_Hubble_Export_Static struct { + Enabled bool `mapstructure:"enabled,omitempty"` + FilePath string `mapstructure:"filePath,omitempty"` + FieldMask []any `mapstructure:"fieldMask,omitempty"` + // - time + // - source + // - destination + // - verdict + AllowList []any `mapstructure:"allowList,omitempty"` + // - '{"verdict":["DROPPED","ERROR"]}' + // - '{"source_pod":["kube-system/"]}' + // - '{"destination_pod":["kube-system/"]}' + DenyList []any `mapstructure:"denyList,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Export_Static) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// ---- Exporters configuration in YAML format. +type Cilium1163Values_Hubble_Export_Dynamic_Config_ContentItem struct { + Name string `mapstructure:"name,omitempty"` + FieldMask []any `mapstructure:"fieldMask,omitempty"` + IncludeFilters []any `mapstructure:"includeFilters,omitempty"` + ExcludeFilters []any `mapstructure:"excludeFilters,omitempty"` + // - name: "test002" + // filePath: "/var/log/network/flow-log/pa/test002.log" + // fieldMask: ["source.namespace", "source.pod_name", "destination.namespace", "destination.pod_name", "verdict"] + // includeFilters: + // - source_pod: ["default/"] + // event_type: + // - type: 1 + // - destination_pod: ["frontend/nginx-975996d4c-7hhgt"] + // excludeFilters: [] + // end: "2023-10-09T23:59:59-07:00" + FilePath string `mapstructure:"filePath,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Export_Dynamic_Config_ContentItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble_Export_Dynamic_Config struct { + // ---- Name of configmap with configuration that may be altered to reconfigure exporters within a running agents. + ConfigMapName string `mapstructure:"configMapName,omitempty"` + // ---- True if helm installer should create config map. + // Switch to false if you want to self maintain the file content. + CreateConfigMap bool `mapstructure:"createConfigMap,omitempty"` + // ---- Exporters configuration in YAML format. + Content []Cilium1163Values_Hubble_Export_Dynamic_Config_ContentItem `mapstructure:"content,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Export_Dynamic_Config) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// --- Dynamic exporters configuration. +// Dynamic exporters may be reconfigured without a need of agent restarts. +type Cilium1163Values_Hubble_Export_Dynamic struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Config Cilium1163Values_Hubble_Export_Dynamic_Config `mapstructure:"config,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Export_Dynamic) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Hubble flows export. +type Cilium1163Values_Hubble_Export struct { + // --- Defines max file size of output file before it gets rotated. + FileMaxSizeMb int64 `mapstructure:"fileMaxSizeMb,omitempty"` + // --- Defines max number of backup/rotated files. + FileMaxBackups int64 `mapstructure:"fileMaxBackups,omitempty"` + // --- Static exporter configuration. + // Static exporter is bound to agent lifecycle. + Static Cilium1163Values_Hubble_Export_Static `mapstructure:"static,omitempty"` + // --- Dynamic exporters configuration. + // Dynamic exporters may be reconfigured without a need of agent restarts. + Dynamic Cilium1163Values_Hubble_Export_Dynamic `mapstructure:"dynamic,omitempty"` +} + +func (v *Cilium1163Values_Hubble_Export) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Emit v1.Events related to pods on detection of packet drops. +// This feature is alpha, please provide feedback at https://github.com/cilium/cilium/issues/33975. +type Cilium1163Values_Hubble_DropEventEmitter struct { + Enabled bool `mapstructure:"enabled,omitempty"` + // --- Minimum time between emitting same events. + Interval string `mapstructure:"interval,omitempty"` + // --- Drop reasons to emit events for. + // ref: https://docs.cilium.io/en/stable/_api/v1/flow/README/#dropreason + // - auth_required + // - policy_denied + Reasons []string `mapstructure:"reasons,omitempty"` +} + +func (v *Cilium1163Values_Hubble_DropEventEmitter) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Hubble struct { + // -- Enable Hubble (true by default). + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Annotations to be added to all top-level hubble objects (resources under templates/hubble) + // -- Buffer size of the channel Hubble uses to receive monitor events. If this + // value is not set, the queue size is set to the default monitor queue size. + // eventQueueSize: "" + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Number of recent flows for Hubble to cache. Defaults to 4095. + // Possible values are: + // 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023, + // 2047, 4095, 8191, 16383, 32767, 65535 + // eventBufferCapacity: "4095" + // + // -- Hubble metrics configuration. + // See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics + // for more comprehensive documentation about Hubble metrics. + Metrics Cilium1163Values_Hubble_Metrics `mapstructure:"metrics,omitempty"` + // -- Unix domain socket path to listen to when Hubble is enabled. + SocketPath string `mapstructure:"socketPath,omitempty"` + // -- Enables redacting sensitive information present in Layer 7 flows. + Redact Cilium1163Values_Hubble_Redact `mapstructure:"redact,omitempty"` + // -- An additional address for Hubble to listen to. + // Set this field ":4244" if you are enabling Hubble Relay, as it assumes that + // Hubble is listening on port 4244. + ListenAddress string `mapstructure:"listenAddress,omitempty"` + // -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available. + PreferIpv6 bool `mapstructure:"preferIpv6,omitempty"` + // @schema + // type: [null, boolean] + // @schema + // -- (bool) Skip Hubble events with unknown cgroup ids + // @default -- `true` + SkipUnknownCgroupIds any `mapstructure:"skipUnknownCGroupIDs,omitempty"` + PeerService Cilium1163Values_Hubble_PeerService `mapstructure:"peerService,omitempty"` + // -- TLS configuration for Hubble + Tls Cilium1163Values_Hubble_Tls `mapstructure:"tls,omitempty"` + Relay Cilium1163Values_Hubble_Relay `mapstructure:"relay,omitempty"` + Ui Cilium1163Values_Hubble_Ui `mapstructure:"ui,omitempty"` + // -- Hubble flows export. + Export Cilium1163Values_Hubble_Export `mapstructure:"export,omitempty"` + // -- Emit v1.Events related to pods on detection of packet drops. + // This feature is alpha, please provide feedback at https://github.com/cilium/cilium/issues/33975. + DropEventEmitter Cilium1163Values_Hubble_DropEventEmitter `mapstructure:"dropEventEmitter,omitempty"` +} + +func (v *Cilium1163Values_Hubble) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Ipam_Operator struct { + // @schema + // type: [array, string] + // @schema + // -- IPv4 CIDR list range to delegate to individual nodes for IPAM. + // - 10.0.0.0/8 + ClusterPoolIpv4PodCidrlist []string `mapstructure:"clusterPoolIPv4PodCIDRList,omitempty"` + // -- IPv4 CIDR mask size to delegate to individual nodes for IPAM. + ClusterPoolIpv4MaskSize int64 `mapstructure:"clusterPoolIPv4MaskSize,omitempty"` + // @schema + // type: [array, string] + // @schema + // -- IPv6 CIDR list range to delegate to individual nodes for IPAM. + // - fd00::/104 + ClusterPoolIpv6PodCidrlist []string `mapstructure:"clusterPoolIPv6PodCIDRList,omitempty"` + // -- IPv6 CIDR mask size to delegate to individual nodes for IPAM. + ClusterPoolIpv6MaskSize int64 `mapstructure:"clusterPoolIPv6MaskSize,omitempty"` + // -- IP pools to auto-create in multi-pool IPAM mode. + AutoCreateCiliumPodIppools map[string]any `mapstructure:"autoCreateCiliumPodIPPools,omitempty"` + // default: + // ipv4: + // cidrs: + // - 10.10.0.0/8 + // maskSize: 24 + // other: + // ipv6: + // cidrs: + // - fd00:100::/80 + // maskSize: 96 + // @schema + // type: [null, integer] + // @schema + // -- (int) The maximum burst size when rate limiting access to external APIs. + // Also known as the token bucket capacity. + // @default -- `20` + ExternalApilimitBurstSize any `mapstructure:"externalAPILimitBurstSize,omitempty"` + // @schema + // type: [null, number] + // @schema + // -- (float) The maximum queries per second when rate limiting access to + // external APIs. Also known as the bucket refill rate, which is used to + // refill the bucket up to the burst size capacity. + // @default -- `4.0` + ExternalApilimitQps any `mapstructure:"externalAPILimitQPS,omitempty"` +} + +func (v *Cilium1163Values_Ipam_Operator) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Ipam struct { + // -- Configure IP Address Management mode. + // ref: https://docs.cilium.io/en/stable/network/concepts/ipam/ + Mode string `mapstructure:"mode,omitempty"` + // -- Maximum rate at which the CiliumNode custom resource is updated. + CiliumNodeUpdateRate string `mapstructure:"ciliumNodeUpdateRate,omitempty"` + Operator Cilium1163Values_Ipam_Operator `mapstructure:"operator,omitempty"` +} + +func (v *Cilium1163Values_Ipam) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_NodeIpam struct { + // -- Configure Node IPAM + // ref: https://docs.cilium.io/en/stable/network/node-ipam/ + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_NodeIpam) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure the eBPF-based ip-masq-agent +// the config of nonMasqueradeCIDRs +// config: +// nonMasqueradeCIDRs: [] +// masqLinkLocal: false +// masqLinkLocalIPv6: false +type Cilium1163Values_IpMasqAgent struct { + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_IpMasqAgent) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium. +// iptablesLockTimeout: "5s" +type Cilium1163Values_Ipv4 struct { + // -- Enable IPv4 support. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Ipv4) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Ipv6 struct { + // -- Enable IPv6 support. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Ipv6) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure Kubernetes specific configuration +type Cilium1163Values_K8S struct { + // -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR + // range via the Kubernetes node resource + RequireIpv4PodCidr bool `mapstructure:"requireIPv4PodCIDR,omitempty"` + // -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR + // range via the Kubernetes node resource + RequireIpv6PodCidr bool `mapstructure:"requireIPv6PodCIDR,omitempty"` +} + +func (v *Cilium1163Values_K8S) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_StartupProbe struct { + // -- failure threshold of startup probe. + // 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + // -- interval between checks of the startup probe + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` +} + +func (v *Cilium1163Values_StartupProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_LivenessProbe struct { + // -- failure threshold of liveness probe + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + // -- interval between checks of the liveness probe + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` +} + +func (v *Cilium1163Values_LivenessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure the kube-proxy replacement in Cilium BPF datapath +// Valid options are "true" or "false". +// ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/ +// kubeProxyReplacement: "false" +type Cilium1163Values_ReadinessProbe struct { + // -- failure threshold of readiness probe + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + // -- interval between checks of the readiness probe + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` +} + +func (v *Cilium1163Values_ReadinessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_L2NeighDiscovery struct { + // -- Enable L2 neighbor discovery in the agent + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Override the agent's default neighbor resolution refresh period. + RefreshPeriod string `mapstructure:"refreshPeriod,omitempty"` +} + +func (v *Cilium1163Values_L2NeighDiscovery) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Nat struct { + // -- Number of the top-k SNAT map connections to track in Cilium statedb. + MapStatsEntries int64 `mapstructure:"mapStatsEntries,omitempty"` + // -- Interval between how often SNAT map is counted for stats. + MapStatsInterval string `mapstructure:"mapStatsInterval,omitempty"` +} + +func (v *Cilium1163Values_Nat) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_EgressGateway struct { + // -- Enables egress gateway to redirect and SNAT the traffic that leaves the + // cluster. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Time between triggers of egress gateway state reconciliations + // -- Maximum number of entries in egress gateway policy map + // maxPolicyEntries: 16384 + ReconciliationTriggerInterval string `mapstructure:"reconciliationTriggerInterval,omitempty"` +} + +func (v *Cilium1163Values_EgressGateway) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Vtep struct { + // -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow + // Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1" + Endpoint string `mapstructure:"endpoint,omitempty"` + // -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24" + Cidr string `mapstructure:"cidr,omitempty"` + // -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0" + Mask string `mapstructure:"mask,omitempty"` + // -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y" + Mac string `mapstructure:"mac,omitempty"` +} + +func (v *Cilium1163Values_Vtep) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- cilium-monitor sidecar. +type Cilium1163Values_Monitor struct { + // -- Enable the cilium-monitor sidecar. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Monitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- serviceTopology enables K8s Topology Aware Hints -based service +// endpoints filtering +// serviceTopology: false +// +// -- L7 LoadBalancer +type Cilium1163Values_LoadBalancer_L7 struct { + // -- Enable L7 service load balancing via envoy proxy. + // The request to a k8s service, which has specific annotation e.g. service.cilium.io/lb-l7, + // will be forwarded to the local backend proxy to be load balanced to the service endpoints. + // Please refer to docs for supported annotations for more configuration. + // + // Applicable values: + // - envoy: Enable L7 load balancing via envoy proxy. This will automatically set enable-envoy-config as well. + // - disabled: Disable L7 load balancing by way of service annotation. + Backend string `mapstructure:"backend,omitempty"` + // -- List of ports from service to be automatically redirected to above backend. + // Any service exposing one of these ports will be automatically redirected. + // Fine-grained control can be achieved by using the service annotation. + Ports []any `mapstructure:"ports,omitempty"` + // -- Default LB algorithm + // The default LB algorithm to be used for services, which can be overridden by the + // service annotation (e.g. service.cilium.io/lb-l7-algorithm) + // Applicable values: round_robin, least_request, random + Algorithm string `mapstructure:"algorithm,omitempty"` +} + +func (v *Cilium1163Values_LoadBalancer_L7) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure service load balancing +type Cilium1163Values_LoadBalancer struct { + // -- standalone enables the standalone L4LB which does not connect to + // kube-apiserver. + // standalone: false + // + // -- algorithm is the name of the load balancing algorithm for backend + // selection e.g. random or maglev + // algorithm: random + // + // -- mode is the operation mode of load balancing for remote backends + // e.g. snat, dsr, hybrid + // mode: snat + // + // -- acceleration is the option to accelerate service handling via XDP + // Applicable values can be: disabled (do not use XDP), native (XDP BPF + // program is run directly out of the networking driver's early receive + // path), or best-effort (use native mode XDP acceleration on devices + // that support it). + // -- dsrDispatch configures whether IP option or IPIP encapsulation is + // used to pass a service IP and port to remote backend + // dsrDispatch: opt + Acceleration string `mapstructure:"acceleration,omitempty"` + // -- serviceTopology enables K8s Topology Aware Hints -based service + // endpoints filtering + // serviceTopology: false + // + // -- L7 LoadBalancer + L7 Cilium1163Values_LoadBalancer_L7 `mapstructure:"l7,omitempty"` +} + +func (v *Cilium1163Values_LoadBalancer) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure N-S k8s service loadbalancing +// policyAuditMode: false +type Cilium1163Values_NodePort struct { + // -- Enable the Cilium NodePort service implementation. + // -- Port range to use for NodePort services. + // range: "30000,32767" + Enabled bool `mapstructure:"enabled,omitempty"` + // @schema + // type: [null, string, array] + // @schema + // -- List of CIDRs for choosing which IP addresses assigned to native devices are used for NodePort load-balancing. + // By default this is empty and the first suitable, preferably private, IPv4 and IPv6 address assigned to each device is used. + // + // Example: + // + // addresses: ["192.168.1.0/24", "2001::/64"] + // + Addresses any `mapstructure:"addresses,omitempty"` + // -- Set to true to prevent applications binding to service ports. + BindProtection bool `mapstructure:"bindProtection,omitempty"` + // -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral + // ports is detected. + AutoProtectPortRange bool `mapstructure:"autoProtectPortRange,omitempty"` + // -- Enable healthcheck nodePort server for NodePort services + EnableHealthCheck bool `mapstructure:"enableHealthCheck,omitempty"` + // -- Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs + // EnableHealthCheck to be enabled + EnableHealthCheckLoadBalancerIp bool `mapstructure:"enableHealthCheckLoadBalancerIP,omitempty"` +} + +func (v *Cilium1163Values_NodePort) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Pprof struct { + // -- Enable pprof for cilium-agent + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Configure pprof listen address for cilium-agent + Address string `mapstructure:"address,omitempty"` + // -- Configure pprof listen port for cilium-agent + Port int64 `mapstructure:"port,omitempty"` +} + +func (v *Cilium1163Values_Pprof) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Specify the Kubernetes namespace where Prometheus expects to find +// service monitors configured. +// namespace: "" +// -- Relabeling configs for the ServiceMonitor cilium-agent +type Cilium1163Values_Prometheus_ServiceMonitor_RelabelingsItem struct { + // - __meta_kubernetes_pod_node_name + SourceLabels []string `mapstructure:"sourceLabels,omitempty"` + TargetLabel string `mapstructure:"targetLabel,omitempty"` + Replacement string `mapstructure:"replacement,omitempty"` +} + +func (v *Cilium1163Values_Prometheus_ServiceMonitor_RelabelingsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Prometheus_ServiceMonitor struct { + // -- Enable service monitors. + // This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Labels to add to ServiceMonitor cilium-agent + Labels map[string]any `mapstructure:"labels,omitempty"` + // -- Annotations to add to ServiceMonitor cilium-agent + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- jobLabel to add for ServiceMonitor cilium-agent + JobLabel string `mapstructure:"jobLabel,omitempty"` + // -- Interval for scrape metrics. + Interval string `mapstructure:"interval,omitempty"` + // -- Specify the Kubernetes namespace where Prometheus expects to find + // service monitors configured. + // namespace: "" + // -- Relabeling configs for the ServiceMonitor cilium-agent + Relabelings []Cilium1163Values_Prometheus_ServiceMonitor_RelabelingsItem `mapstructure:"relabelings,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Metrics relabeling configs for the ServiceMonitor cilium-agent + MetricRelabelings any `mapstructure:"metricRelabelings,omitempty"` + // -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying + TrustCrdsExist bool `mapstructure:"trustCRDsExist,omitempty"` +} + +func (v *Cilium1163Values_Prometheus_ServiceMonitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure prometheus metrics on the configured port at /metrics +type Cilium1163Values_Prometheus struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Port int64 `mapstructure:"port,omitempty"` + ServiceMonitor Cilium1163Values_Prometheus_ServiceMonitor `mapstructure:"serviceMonitor,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Metrics that should be enabled or disabled from the default metric list. + // The list is expected to be separated by a space. (+metric_foo to enable + // metric_foo , -metric_bar to disable metric_bar). + // ref: https://docs.cilium.io/en/stable/observability/metrics/ + Metrics any `mapstructure:"metrics,omitempty"` + // --- Enable controller group metrics for monitoring specific Cilium + // subsystems. The list is a list of controller group names. The special + // values of "all" and "none" are supported. The set of controller + // group names is not guaranteed to be stable between Cilium versions. + // - write-cni-file + // - sync-host-ips + // - sync-lb-maps-with-k8s-services + ControllerGroupMetrics []string `mapstructure:"controllerGroupMetrics,omitempty"` +} + +func (v *Cilium1163Values_Prometheus) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Grafana dashboards for cilium-agent +// grafana can import dashboards based on the label and value +// ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards +type Cilium1163Values_Dashboards struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Label string `mapstructure:"label,omitempty"` + // @schema + // type: [null, string] + // @schema + Namespace any `mapstructure:"namespace,omitempty"` + LabelValue string `mapstructure:"labelValue,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_Dashboards) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Log struct { + // -- The format string to use for laying out the log message metadata of Envoy. + Format string `mapstructure:"format,omitempty"` + // -- Path to a separate Envoy log file, if any. Defaults to /dev/stdout. + Path string `mapstructure:"path,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Log) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Envoy container image. +type Cilium1163Values_Envoy_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_UpdateStrategy_RollingUpdate struct { + // @schema + // type: [integer, string] + // @schema + MaxUnavailable int64 `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *Cilium1163Values_Envoy_UpdateStrategy_RollingUpdate) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- cilium-envoy update strategy +// ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset +type Cilium1163Values_Envoy_UpdateStrategy struct { + Type string `mapstructure:"type,omitempty"` + RollingUpdate Cilium1163Values_Envoy_UpdateStrategy_RollingUpdate `mapstructure:"rollingUpdate,omitempty"` +} + +func (v *Cilium1163Values_Envoy_UpdateStrategy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- AppArmorProfile options for the `cilium-agent` and init containers +type Cilium1163Values_Envoy_PodSecurityContext_AppArmorProfile struct { + Type string `mapstructure:"type,omitempty"` +} + +func (v *Cilium1163Values_Envoy_PodSecurityContext_AppArmorProfile) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Security Context for cilium-envoy pods. +type Cilium1163Values_Envoy_PodSecurityContext struct { + // -- AppArmorProfile options for the `cilium-agent` and init containers + AppArmorProfile Cilium1163Values_Envoy_PodSecurityContext_AppArmorProfile `mapstructure:"appArmorProfile,omitempty"` +} + +func (v *Cilium1163Values_Envoy_PodSecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_StartupProbe struct { + // -- failure threshold of startup probe. + // 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s) + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + // -- interval between checks of the startup probe + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` +} + +func (v *Cilium1163Values_Envoy_StartupProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_LivenessProbe struct { + // -- failure threshold of liveness probe + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + // -- interval between checks of the liveness probe + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` +} + +func (v *Cilium1163Values_Envoy_LivenessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_ReadinessProbe struct { + // -- failure threshold of readiness probe + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + // -- interval between checks of the readiness probe + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` +} + +func (v *Cilium1163Values_Envoy_ReadinessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SELinux options for the `cilium-envoy` container +type Cilium1163Values_Envoy_SecurityContext_SeLinuxOptions struct { + Level string `mapstructure:"level,omitempty"` + // Running with spc_t since we have removed the privileged mode. + // Users can change it to a different type as long as they have the + // type available on the system. + Type string `mapstructure:"type,omitempty"` +} + +func (v *Cilium1163Values_Envoy_SecurityContext_SeLinuxOptions) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_SecurityContext_Capabilities struct { + // -- Capabilities for the `cilium-envoy` container. + // Even though granted to the container, the cilium-envoy-starter wrapper drops + // all capabilities after forking the actual Envoy process. + // `NET_BIND_SERVICE` is the only capability that can be passed to the Envoy process by + // setting `envoy.securityContext.capabilities.keepNetBindService=true` (in addition to granting the + // capability to the container). + // Note: In case of embedded envoy, the capability must be granted to the cilium-agent container. + // Used since cilium proxy uses setting IPPROTO_IP/IP_TRANSPARENT + // - NET_ADMIN + // We need it for now but might not need it for >= 5.11 specially + // for the 'SYS_RESOURCE'. + // In >= 5.8 there's already BPF and PERMON capabilities + // Both PERFMON and BPF requires kernel 5.8, container runtime + // cri-o >= v1.22.0 or containerd >= v1.5.0. + // If available, SYS_ADMIN can be removed. + // - PERFMON + // - BPF + // - SYS_ADMIN + Envoy []string `mapstructure:"envoy,omitempty"` + // -- Keep capability `NET_BIND_SERVICE` for Envoy process. + KeepCapNetBindService bool `mapstructure:"keepCapNetBindService,omitempty"` +} + +func (v *Cilium1163Values_Envoy_SecurityContext_Capabilities) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_SecurityContext struct { + // -- User to run the pod with + // runAsUser: 0 + // -- Run the pod with elevated privileges + Privileged bool `mapstructure:"privileged,omitempty"` + // -- SELinux options for the `cilium-envoy` container + SeLinuxOptions Cilium1163Values_Envoy_SecurityContext_SeLinuxOptions `mapstructure:"seLinuxOptions,omitempty"` + Capabilities Cilium1163Values_Envoy_SecurityContext_Capabilities `mapstructure:"capabilities,omitempty"` +} + +func (v *Cilium1163Values_Envoy_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels struct { + K8SApp string `mapstructure:"k8s-app,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector struct { + MatchLabels Cilium1163Values_Envoy_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels `mapstructure:"matchLabels,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem struct { + TopologyKey string `mapstructure:"topologyKey,omitempty"` + LabelSelector Cilium1163Values_Envoy_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector `mapstructure:"labelSelector,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_PodAntiAffinity struct { + RequiredDuringSchedulingIgnoredDuringExecution []Cilium1163Values_Envoy_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem `mapstructure:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_PodAntiAffinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels struct { + K8SApp string `mapstructure:"k8s-app,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector struct { + MatchLabels Cilium1163Values_Envoy_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels `mapstructure:"matchLabels,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem struct { + TopologyKey string `mapstructure:"topologyKey,omitempty"` + LabelSelector Cilium1163Values_Envoy_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector `mapstructure:"labelSelector,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_PodAffinity struct { + RequiredDuringSchedulingIgnoredDuringExecution []Cilium1163Values_Envoy_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem `mapstructure:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_PodAffinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_NodeAffinity_RequiredDuringSchedulingIgnoredDuringExecution_NodeSelectorTermsItem_MatchExpressionsItem struct { + Key string `mapstructure:"key,omitempty"` + Operator string `mapstructure:"operator,omitempty"` + // - true + Values []string `mapstructure:"values,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_NodeAffinity_RequiredDuringSchedulingIgnoredDuringExecution_NodeSelectorTermsItem_MatchExpressionsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_NodeAffinity_RequiredDuringSchedulingIgnoredDuringExecution_NodeSelectorTermsItem struct { + MatchExpressions []Cilium1163Values_Envoy_Affinity_NodeAffinity_RequiredDuringSchedulingIgnoredDuringExecution_NodeSelectorTermsItem_MatchExpressionsItem `mapstructure:"matchExpressions,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_NodeAffinity_RequiredDuringSchedulingIgnoredDuringExecution_NodeSelectorTermsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_NodeAffinity_RequiredDuringSchedulingIgnoredDuringExecution struct { + NodeSelectorTerms []Cilium1163Values_Envoy_Affinity_NodeAffinity_RequiredDuringSchedulingIgnoredDuringExecution_NodeSelectorTermsItem `mapstructure:"nodeSelectorTerms,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_NodeAffinity_RequiredDuringSchedulingIgnoredDuringExecution) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Affinity_NodeAffinity struct { + RequiredDuringSchedulingIgnoredDuringExecution Cilium1163Values_Envoy_Affinity_NodeAffinity_RequiredDuringSchedulingIgnoredDuringExecution `mapstructure:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity_NodeAffinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Affinity for cilium-envoy. +type Cilium1163Values_Envoy_Affinity struct { + PodAntiAffinity Cilium1163Values_Envoy_Affinity_PodAntiAffinity `mapstructure:"podAntiAffinity,omitempty"` + PodAffinity Cilium1163Values_Envoy_Affinity_PodAffinity `mapstructure:"podAffinity,omitempty"` + NodeAffinity Cilium1163Values_Envoy_Affinity_NodeAffinity `mapstructure:"nodeAffinity,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Affinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node selector for cilium-envoy. +type Cilium1163Values_Envoy_NodeSelector struct { + KubernetesIoos string `mapstructure:"kubernetes.io/os,omitempty"` +} + +func (v *Cilium1163Values_Envoy_NodeSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node tolerations for envoy scheduling to nodes with taints +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +type Cilium1163Values_Envoy_TolerationsItem struct { + // - key: "key" + // operator: "Equal|Exists" + // value: "value" + // effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + Operator string `mapstructure:"operator,omitempty"` +} + +func (v *Cilium1163Values_Envoy_TolerationsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Debug_Admin struct { + // -- Enable admin interface for cilium-envoy. + // This is useful for debugging and should not be enabled in production. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Port number (bound to loopback interface). + // kubectl port-forward can be used to access the admin interface. + Port int64 `mapstructure:"port,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Debug_Admin) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Debug struct { + Admin Cilium1163Values_Envoy_Debug_Admin `mapstructure:"admin,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Debug) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Specify the Kubernetes namespace where Prometheus expects to find +// service monitors configured. +// namespace: "" +// -- Relabeling configs for the ServiceMonitor cilium-envoy +// or for cilium-agent with Envoy configured. +type Cilium1163Values_Envoy_Prometheus_ServiceMonitor_RelabelingsItem struct { + // - __meta_kubernetes_pod_node_name + SourceLabels []string `mapstructure:"sourceLabels,omitempty"` + TargetLabel string `mapstructure:"targetLabel,omitempty"` + Replacement string `mapstructure:"replacement,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Prometheus_ServiceMonitor_RelabelingsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Envoy_Prometheus_ServiceMonitor struct { + // -- Enable service monitors. + // This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + // Note that this setting applies to both cilium-envoy _and_ cilium-agent + // with Envoy enabled. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Labels to add to ServiceMonitor cilium-envoy + Labels map[string]any `mapstructure:"labels,omitempty"` + // -- Annotations to add to ServiceMonitor cilium-envoy + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Interval for scrape metrics. + Interval string `mapstructure:"interval,omitempty"` + // -- Specify the Kubernetes namespace where Prometheus expects to find + // service monitors configured. + // namespace: "" + // -- Relabeling configs for the ServiceMonitor cilium-envoy + // or for cilium-agent with Envoy configured. + Relabelings []Cilium1163Values_Envoy_Prometheus_ServiceMonitor_RelabelingsItem `mapstructure:"relabelings,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Metrics relabeling configs for the ServiceMonitor cilium-envoy + // or for cilium-agent with Envoy configured. + MetricRelabelings any `mapstructure:"metricRelabelings,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Prometheus_ServiceMonitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure Cilium Envoy Prometheus options. +// Note that some of these apply to either cilium-agent or cilium-envoy. +type Cilium1163Values_Envoy_Prometheus struct { + // -- Enable prometheus metrics for cilium-envoy + Enabled bool `mapstructure:"enabled,omitempty"` + ServiceMonitor Cilium1163Values_Envoy_Prometheus_ServiceMonitor `mapstructure:"serviceMonitor,omitempty"` + // -- Serve prometheus metrics for cilium-envoy on the configured port + Port string `mapstructure:"port,omitempty"` +} + +func (v *Cilium1163Values_Envoy_Prometheus) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Configure Cilium Envoy options. +type Cilium1163Values_Envoy struct { + // @schema + // type: [null, boolean] + // @schema + // -- Enable Envoy Proxy in standalone DaemonSet. + // This field is enabled by default for new installation. + // @default -- `true` for new installation + Enabled any `mapstructure:"enabled,omitempty"` + // -- (int) + // Set Envoy'--base-id' to use when allocating shared memory regions. + // Only needs to be changed if multiple Envoy instances will run on the same node and may have conflicts. Supported values: 0 - 4294967295. Defaults to '0' + BaseId int64 `mapstructure:"baseID,omitempty"` + Log Cilium1163Values_Envoy_Log `mapstructure:"log,omitempty"` + // -- Time in seconds after which a TCP connection attempt times out + ConnectTimeoutSeconds int64 `mapstructure:"connectTimeoutSeconds,omitempty"` + // -- ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for Envoy + MaxRequestsPerConnection int64 `mapstructure:"maxRequestsPerConnection,omitempty"` + // -- Set Envoy HTTP option max_connection_duration seconds. Default 0 (disable) + MaxConnectionDurationSeconds int64 `mapstructure:"maxConnectionDurationSeconds,omitempty"` + // -- Set Envoy upstream HTTP idle connection timeout seconds. + // Does not apply to connections with pending requests. Default 60s + IdleTimeoutDurationSeconds int64 `mapstructure:"idleTimeoutDurationSeconds,omitempty"` + // -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the ingress L7 policy enforcement Envoy listeners. + XffNumTrustedHopsL7PolicyIngress int64 `mapstructure:"xffNumTrustedHopsL7PolicyIngress,omitempty"` + // -- Number of trusted hops regarding the x-forwarded-for and related HTTP headers for the egress L7 policy enforcement Envoy listeners. + XffNumTrustedHopsL7PolicyEgress int64 `mapstructure:"xffNumTrustedHopsL7PolicyEgress,omitempty"` + // -- Envoy container image. + Image Cilium1163Values_Envoy_Image `mapstructure:"image,omitempty"` + // -- Additional containers added to the cilium Envoy DaemonSet. + ExtraContainers []any `mapstructure:"extraContainers,omitempty"` + // -- Additional envoy container arguments. + ExtraArgs []any `mapstructure:"extraArgs,omitempty"` + // -- Additional envoy container environment variables. + ExtraEnv []any `mapstructure:"extraEnv,omitempty"` + // -- Additional envoy hostPath mounts. + // - name: host-mnt-data + // mountPath: /host/mnt/data + // hostPath: /mnt/data + // hostPathType: Directory + // readOnly: true + // mountPropagation: HostToContainer + ExtraHostPathMounts []any `mapstructure:"extraHostPathMounts,omitempty"` + // -- Additional envoy volumes. + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + // -- Additional envoy volumeMounts. + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + // -- Configure termination grace period for cilium-envoy DaemonSet. + TerminationGracePeriodSeconds int64 `mapstructure:"terminationGracePeriodSeconds,omitempty"` + // -- TCP port for the health API. + HealthPort int64 `mapstructure:"healthPort,omitempty"` + // -- cilium-envoy update strategy + // ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset + UpdateStrategy Cilium1163Values_Envoy_UpdateStrategy `mapstructure:"updateStrategy,omitempty"` + // -- Roll out cilium envoy pods automatically when configmap is updated. + RollOutPods bool `mapstructure:"rollOutPods,omitempty"` + // -- Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy) + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Security Context for cilium-envoy pods. + PodSecurityContext Cilium1163Values_Envoy_PodSecurityContext `mapstructure:"podSecurityContext,omitempty"` + // -- Annotations to be added to envoy pods + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + // -- Labels to be added to envoy pods + PodLabels map[string]any `mapstructure:"podLabels,omitempty"` + // -- Envoy resource limits & requests + // ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // limits: + // cpu: 4000m + // memory: 4Gi + // requests: + // cpu: 100m + // memory: 512Mi + Resources map[string]any `mapstructure:"resources,omitempty"` + StartupProbe Cilium1163Values_Envoy_StartupProbe `mapstructure:"startupProbe,omitempty"` + LivenessProbe Cilium1163Values_Envoy_LivenessProbe `mapstructure:"livenessProbe,omitempty"` + ReadinessProbe Cilium1163Values_Envoy_ReadinessProbe `mapstructure:"readinessProbe,omitempty"` + SecurityContext Cilium1163Values_Envoy_SecurityContext `mapstructure:"securityContext,omitempty"` + // -- Affinity for cilium-envoy. + Affinity Cilium1163Values_Envoy_Affinity `mapstructure:"affinity,omitempty"` + // -- Node selector for cilium-envoy. + NodeSelector Cilium1163Values_Envoy_NodeSelector `mapstructure:"nodeSelector,omitempty"` + // -- Node tolerations for envoy scheduling to nodes with taints + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + Tolerations []Cilium1163Values_Envoy_TolerationsItem `mapstructure:"tolerations,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- The priority class to use for cilium-envoy. + PriorityClassName any `mapstructure:"priorityClassName,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- DNS policy for Cilium envoy pods. + // Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + DnsPolicy any `mapstructure:"dnsPolicy,omitempty"` + Debug Cilium1163Values_Envoy_Debug `mapstructure:"debug,omitempty"` + // -- Configure Cilium Envoy Prometheus options. + // Note that some of these apply to either cilium-agent or cilium-envoy. + Prometheus Cilium1163Values_Envoy_Prometheus `mapstructure:"prometheus,omitempty"` +} + +func (v *Cilium1163Values_Envoy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ResourceQuotas_Cilium_Hard struct { + // 5k nodes * 2 DaemonSets (Cilium and cilium node init) + Pods string `mapstructure:"pods,omitempty"` +} + +func (v *Cilium1163Values_ResourceQuotas_Cilium_Hard) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ResourceQuotas_Cilium struct { + Hard Cilium1163Values_ResourceQuotas_Cilium_Hard `mapstructure:"hard,omitempty"` +} + +func (v *Cilium1163Values_ResourceQuotas_Cilium) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ResourceQuotas_Operator_Hard struct { + // 15 "clusterwide" Cilium Operator pods for HA + Pods string `mapstructure:"pods,omitempty"` +} + +func (v *Cilium1163Values_ResourceQuotas_Operator_Hard) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_ResourceQuotas_Operator struct { + Hard Cilium1163Values_ResourceQuotas_Operator_Hard `mapstructure:"hard,omitempty"` +} + +func (v *Cilium1163Values_ResourceQuotas_Operator) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Enable resource quotas for priority classes used in the cluster. +// Need to document default +// +// sessionAffinity: false +type Cilium1163Values_ResourceQuotas struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Cilium Cilium1163Values_ResourceQuotas_Cilium `mapstructure:"cilium,omitempty"` + Operator Cilium1163Values_ResourceQuotas_Operator `mapstructure:"operator,omitempty"` +} + +func (v *Cilium1163Values_ResourceQuotas) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Base64 encoded PEM values for the CA certificate and private key. +// This can be used as common CA to generate certificates used by hubble and clustermesh components. +// It is neither required nor used when cert-manager is used to generate the certificates. +type Cilium1163Values_Tls_Ca struct { + // -- Optional CA cert. If it is provided, it will be used by cilium to + // generate all other certificates. Otherwise, an ephemeral CA is generated. + Cert string `mapstructure:"cert,omitempty"` + // -- Optional CA private key. If it is provided, it will be used by cilium to + // generate all other certificates. Otherwise, an ephemeral CA is generated. + Key string `mapstructure:"key,omitempty"` + // -- Generated certificates validity duration in days. This will be used for auto generated CA. + CertValidityDuration int64 `mapstructure:"certValidityDuration,omitempty"` +} + +func (v *Cilium1163Values_Tls_Ca) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure the CA trust bundle used for the validation of the certificates +// leveraged by hubble and clustermesh. When enabled, it overrides the content of the +// 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time. +type Cilium1163Values_Tls_CaBundle struct { + // -- Enable the use of the CA trust bundle. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Name of the ConfigMap containing the CA trust bundle. + Name string `mapstructure:"name,omitempty"` + // -- Entry of the ConfigMap containing the CA trust bundle. + Key string `mapstructure:"key,omitempty"` + // -- Use a Secret instead of a ConfigMap. + // If uncommented, creates the ConfigMap and fills it with the specified content. + // Otherwise, the ConfigMap is assumed to be already present in .Release.Namespace. + // + // content: | + // -----BEGIN CERTIFICATE----- + // ... + // -----END CERTIFICATE----- + // -----BEGIN CERTIFICATE----- + // ... + // -----END CERTIFICATE----- + UseSecret bool `mapstructure:"useSecret,omitempty"` +} + +func (v *Cilium1163Values_Tls_CaBundle) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure TLS configuration in the agent. +type Cilium1163Values_Tls struct { + // -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies + // (namely the secrets referenced by terminatingTLS and originatingTLS). + // Possible values: + // - local + // - k8s + SecretsBackend string `mapstructure:"secretsBackend,omitempty"` + // -- Base64 encoded PEM values for the CA certificate and private key. + // This can be used as common CA to generate certificates used by hubble and clustermesh components. + // It is neither required nor used when cert-manager is used to generate the certificates. + Ca Cilium1163Values_Tls_Ca `mapstructure:"ca,omitempty"` + // -- Configure the CA trust bundle used for the validation of the certificates + // leveraged by hubble and clustermesh. When enabled, it overrides the content of the + // 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time. + CaBundle Cilium1163Values_Tls_CaBundle `mapstructure:"caBundle,omitempty"` +} + +func (v *Cilium1163Values_Tls) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_WellKnownIdentities struct { + // -- Enable the use of well-known identities. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_WellKnownIdentities) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Etcd struct { + // -- Enable etcd mode for the agent. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- List of etcd endpoints + // - https://CHANGE-ME:2379 + Endpoints []string `mapstructure:"endpoints,omitempty"` + // -- Enable use of TLS/SSL for connectivity to etcd. + Ssl bool `mapstructure:"ssl,omitempty"` +} + +func (v *Cilium1163Values_Etcd) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- cilium-operator image. +type Cilium1163Values_Operator_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + // operator-generic-digest + GenericDigest string `mapstructure:"genericDigest,omitempty"` + // operator-azure-digest + AzureDigest string `mapstructure:"azureDigest,omitempty"` + // operator-aws-digest + AwsDigest string `mapstructure:"awsDigest,omitempty"` + // operator-alibabacloud-digest + AlibabacloudDigest string `mapstructure:"alibabacloudDigest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` + Suffix string `mapstructure:"suffix,omitempty"` +} + +func (v *Cilium1163Values_Operator_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Operator_UpdateStrategy_RollingUpdate struct { + // @schema + // type: [integer, string] + // @schema + MaxSurge string `mapstructure:"maxSurge,omitempty"` + // @schema + // type: [integer, string] + // @schema + MaxUnavailable string `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *Cilium1163Values_Operator_UpdateStrategy_RollingUpdate) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- cilium-operator update strategy +type Cilium1163Values_Operator_UpdateStrategy struct { + Type string `mapstructure:"type,omitempty"` + RollingUpdate Cilium1163Values_Operator_UpdateStrategy_RollingUpdate `mapstructure:"rollingUpdate,omitempty"` +} + +func (v *Cilium1163Values_Operator_UpdateStrategy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Operator_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels struct { + IoCiliumapp string `mapstructure:"io.cilium/app,omitempty"` +} + +func (v *Cilium1163Values_Operator_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Operator_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector struct { + MatchLabels Cilium1163Values_Operator_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels `mapstructure:"matchLabels,omitempty"` +} + +func (v *Cilium1163Values_Operator_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Operator_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem struct { + TopologyKey string `mapstructure:"topologyKey,omitempty"` + LabelSelector Cilium1163Values_Operator_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector `mapstructure:"labelSelector,omitempty"` +} + +func (v *Cilium1163Values_Operator_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Operator_Affinity_PodAntiAffinity struct { + RequiredDuringSchedulingIgnoredDuringExecution []Cilium1163Values_Operator_Affinity_PodAntiAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem `mapstructure:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +func (v *Cilium1163Values_Operator_Affinity_PodAntiAffinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Affinity for cilium-operator +type Cilium1163Values_Operator_Affinity struct { + PodAntiAffinity Cilium1163Values_Operator_Affinity_PodAntiAffinity `mapstructure:"podAntiAffinity,omitempty"` +} + +func (v *Cilium1163Values_Operator_Affinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node labels for cilium-operator pod assignment +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector +type Cilium1163Values_Operator_NodeSelector struct { + KubernetesIoos string `mapstructure:"kubernetes.io/os,omitempty"` +} + +func (v *Cilium1163Values_Operator_NodeSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node tolerations for cilium-operator scheduling to nodes with taints +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +type Cilium1163Values_Operator_TolerationsItem struct { + // - key: "key" + // operator: "Equal|Exists" + // value: "value" + // effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + Operator string `mapstructure:"operator,omitempty"` +} + +func (v *Cilium1163Values_Operator_TolerationsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// PodDisruptionBudget settings +type Cilium1163Values_Operator_PodDisruptionBudget struct { + // -- enable PodDisruptionBudget + // ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + Enabled bool `mapstructure:"enabled,omitempty"` + // @schema + // type: [null, integer, string] + // @schema + // -- Minimum number/percentage of pods that should remain scheduled. + // When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + MinAvailable any `mapstructure:"minAvailable,omitempty"` + // @schema + // type: [null, integer, string] + // @schema + // -- Maximum number/percentage of pods that may be made unavailable + MaxUnavailable int64 `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *Cilium1163Values_Operator_PodDisruptionBudget) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Operator_Pprof struct { + // -- Enable pprof for cilium-operator + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Configure pprof listen address for cilium-operator + Address string `mapstructure:"address,omitempty"` + // -- Configure pprof listen port for cilium-operator + Port int64 `mapstructure:"port,omitempty"` +} + +func (v *Cilium1163Values_Operator_Pprof) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Operator_Prometheus_ServiceMonitor struct { + // -- Enable service monitors. + // This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Labels to add to ServiceMonitor cilium-operator + Labels map[string]any `mapstructure:"labels,omitempty"` + // -- Annotations to add to ServiceMonitor cilium-operator + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- jobLabel to add for ServiceMonitor cilium-operator + JobLabel string `mapstructure:"jobLabel,omitempty"` + // -- Interval for scrape metrics. + Interval string `mapstructure:"interval,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Relabeling configs for the ServiceMonitor cilium-operator + Relabelings any `mapstructure:"relabelings,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Metrics relabeling configs for the ServiceMonitor cilium-operator + MetricRelabelings any `mapstructure:"metricRelabelings,omitempty"` +} + +func (v *Cilium1163Values_Operator_Prometheus_ServiceMonitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Enable prometheus metrics for cilium-operator on the configured port at +// /metrics +type Cilium1163Values_Operator_Prometheus struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Port int64 `mapstructure:"port,omitempty"` + ServiceMonitor Cilium1163Values_Operator_Prometheus_ServiceMonitor `mapstructure:"serviceMonitor,omitempty"` +} + +func (v *Cilium1163Values_Operator_Prometheus) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Grafana dashboards for cilium-operator +// grafana can import dashboards based on the label and value +// ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards +type Cilium1163Values_Operator_Dashboards struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Label string `mapstructure:"label,omitempty"` + // @schema + // type: [null, string] + // @schema + Namespace any `mapstructure:"namespace,omitempty"` + LabelValue string `mapstructure:"labelValue,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Cilium1163Values_Operator_Dashboards) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Operator_UnmanagedPodWatcher struct { + // -- Restart any pod that are not managed by Cilium. + Restart bool `mapstructure:"restart,omitempty"` + // -- Interval, in seconds, to check if there are any pods that are not + // managed by Cilium. + IntervalSeconds int64 `mapstructure:"intervalSeconds,omitempty"` +} + +func (v *Cilium1163Values_Operator_UnmanagedPodWatcher) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Operator struct { + // -- Enable the cilium-operator component (required). + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Roll out cilium-operator pods automatically when configmap is updated. + RollOutPods bool `mapstructure:"rollOutPods,omitempty"` + // -- cilium-operator image. + Image Cilium1163Values_Operator_Image `mapstructure:"image,omitempty"` + // -- Number of replicas to run for the cilium-operator deployment + Replicas int64 `mapstructure:"replicas,omitempty"` + // -- The priority class to use for cilium-operator + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + // -- DNS policy for Cilium operator pods. + // Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + DnsPolicy string `mapstructure:"dnsPolicy,omitempty"` + // -- cilium-operator update strategy + UpdateStrategy Cilium1163Values_Operator_UpdateStrategy `mapstructure:"updateStrategy,omitempty"` + // -- Affinity for cilium-operator + Affinity Cilium1163Values_Operator_Affinity `mapstructure:"affinity,omitempty"` + // -- Pod topology spread constraints for cilium-operator + // - maxSkew: 1 + // topologyKey: topology.kubernetes.io/zone + // whenUnsatisfiable: DoNotSchedule + TopologySpreadConstraints []any `mapstructure:"topologySpreadConstraints,omitempty"` + // -- Node labels for cilium-operator pod assignment + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + NodeSelector Cilium1163Values_Operator_NodeSelector `mapstructure:"nodeSelector,omitempty"` + // -- Node tolerations for cilium-operator scheduling to nodes with taints + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + Tolerations []Cilium1163Values_Operator_TolerationsItem `mapstructure:"tolerations,omitempty"` + // -- Additional cilium-operator container arguments. + ExtraArgs []any `mapstructure:"extraArgs,omitempty"` + // -- Additional cilium-operator environment variables. + ExtraEnv []any `mapstructure:"extraEnv,omitempty"` + // -- Additional cilium-operator hostPath mounts. + // - name: host-mnt-data + // mountPath: /host/mnt/data + // hostPath: /mnt/data + // hostPathType: Directory + // readOnly: true + // mountPropagation: HostToContainer + ExtraHostPathMounts []any `mapstructure:"extraHostPathMounts,omitempty"` + // -- Additional cilium-operator volumes. + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + // -- Additional cilium-operator volumeMounts. + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + // -- Annotations to be added to all top-level cilium-operator objects (resources under templates/cilium-operator) + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- HostNetwork setting + HostNetwork bool `mapstructure:"hostNetwork,omitempty"` + // -- Security context to be added to cilium-operator pods + PodSecurityContext map[string]any `mapstructure:"podSecurityContext,omitempty"` + // -- Annotations to be added to cilium-operator pods + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + // -- Labels to be added to cilium-operator pods + PodLabels map[string]any `mapstructure:"podLabels,omitempty"` + // PodDisruptionBudget settings + PodDisruptionBudget Cilium1163Values_Operator_PodDisruptionBudget `mapstructure:"podDisruptionBudget,omitempty"` + // -- cilium-operator resource limits & requests + // ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // limits: + // cpu: 1000m + // memory: 1Gi + // requests: + // cpu: 100m + // memory: 128Mi + Resources map[string]any `mapstructure:"resources,omitempty"` + // -- Security context to be added to cilium-operator pods + // runAsUser: 0 + SecurityContext map[string]any `mapstructure:"securityContext,omitempty"` + // -- Interval for endpoint garbage collection. + EndpointGcinterval string `mapstructure:"endpointGCInterval,omitempty"` + // -- Interval for cilium node garbage collection. + NodeGcinterval string `mapstructure:"nodeGCInterval,omitempty"` + // -- Interval for identity garbage collection. + IdentityGcinterval string `mapstructure:"identityGCInterval,omitempty"` + // -- Timeout for identity heartbeats. + IdentityHeartbeatTimeout string `mapstructure:"identityHeartbeatTimeout,omitempty"` + Pprof Cilium1163Values_Operator_Pprof `mapstructure:"pprof,omitempty"` + // -- Enable prometheus metrics for cilium-operator on the configured port at + // /metrics + Prometheus Cilium1163Values_Operator_Prometheus `mapstructure:"prometheus,omitempty"` + // -- Grafana dashboards for cilium-operator + // grafana can import dashboards based on the label and value + // ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards + Dashboards Cilium1163Values_Operator_Dashboards `mapstructure:"dashboards,omitempty"` + // -- Skip CRDs creation for cilium-operator + SkipCrdcreation bool `mapstructure:"skipCRDCreation,omitempty"` + // -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium + // pod running. + RemoveNodeTaints bool `mapstructure:"removeNodeTaints,omitempty"` + // @schema + // type: [null, boolean] + // @schema + // -- Taint nodes where Cilium is scheduled but not running. This prevents pods + // from being scheduled to nodes where Cilium is not the default CNI provider. + // @default -- same as removeNodeTaints + SetNodeTaints any `mapstructure:"setNodeTaints,omitempty"` + // -- Set Node condition NetworkUnavailable to 'false' with the reason + // 'CiliumIsUp' for nodes that have a healthy Cilium pod. + SetNodeNetworkStatus bool `mapstructure:"setNodeNetworkStatus,omitempty"` + UnmanagedPodWatcher Cilium1163Values_Operator_UnmanagedPodWatcher `mapstructure:"unmanagedPodWatcher,omitempty"` +} + +func (v *Cilium1163Values_Operator) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- node-init image. +type Cilium1163Values_Nodeinit_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- node-init update strategy +type Cilium1163Values_Nodeinit_UpdateStrategy struct { + Type string `mapstructure:"type,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_UpdateStrategy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node labels for nodeinit pod assignment +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector +type Cilium1163Values_Nodeinit_NodeSelector struct { + KubernetesIoos string `mapstructure:"kubernetes.io/os,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_NodeSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node tolerations for nodeinit scheduling to nodes with taints +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +type Cilium1163Values_Nodeinit_TolerationsItem struct { + // - key: "key" + // operator: "Equal|Exists" + // value: "value" + // effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + Operator string `mapstructure:"operator,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_TolerationsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- AppArmorProfile options for the `cilium-node-init` and init containers +type Cilium1163Values_Nodeinit_PodSecurityContext_AppArmorProfile struct { + Type string `mapstructure:"type,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_PodSecurityContext_AppArmorProfile) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Security Context for cilium-node-init pods. +type Cilium1163Values_Nodeinit_PodSecurityContext struct { + // -- AppArmorProfile options for the `cilium-node-init` and init containers + AppArmorProfile Cilium1163Values_Nodeinit_PodSecurityContext_AppArmorProfile `mapstructure:"appArmorProfile,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_PodSecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Nodeinit_Resources_Requests struct { + Cpu string `mapstructure:"cpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_Resources_Requests) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- nodeinit resource limits & requests +// ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ +type Cilium1163Values_Nodeinit_Resources struct { + Requests Cilium1163Values_Nodeinit_Resources_Requests `mapstructure:"requests,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_Resources) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Nodeinit_SecurityContext_SeLinuxOptions struct { + Level string `mapstructure:"level,omitempty"` + // Running with spc_t since we have removed the privileged mode. + // Users can change it to a different type as long as they have the + // type available on the system. + Type string `mapstructure:"type,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_SecurityContext_SeLinuxOptions) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Nodeinit_SecurityContext_Capabilities struct { + // Used in iptables. Consider removing once we are iptables-free + // - SYS_MODULE + // Used for nsenter + // - NET_ADMIN + // - SYS_ADMIN + // - SYS_CHROOT + // - SYS_PTRACE + Add []string `mapstructure:"add,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_SecurityContext_Capabilities) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Security context to be added to nodeinit pods. +type Cilium1163Values_Nodeinit_SecurityContext struct { + Privileged bool `mapstructure:"privileged,omitempty"` + SeLinuxOptions Cilium1163Values_Nodeinit_SecurityContext_SeLinuxOptions `mapstructure:"seLinuxOptions,omitempty"` + Capabilities Cilium1163Values_Nodeinit_SecurityContext_Capabilities `mapstructure:"capabilities,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- startup offers way to customize startup nodeinit script (pre and post position) +type Cilium1163Values_Nodeinit_Startup struct { + PreScript string `mapstructure:"preScript,omitempty"` + PostScript string `mapstructure:"postScript,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_Startup) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- prestop offers way to customize prestop nodeinit script (pre and post position) +type Cilium1163Values_Nodeinit_Prestop struct { + PreScript string `mapstructure:"preScript,omitempty"` + PostScript string `mapstructure:"postScript,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit_Prestop) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Nodeinit struct { + // -- Enable the node initialization DaemonSet + Enabled bool `mapstructure:"enabled,omitempty"` + // -- node-init image. + Image Cilium1163Values_Nodeinit_Image `mapstructure:"image,omitempty"` + // -- The priority class to use for the nodeinit pod. + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + // -- node-init update strategy + UpdateStrategy Cilium1163Values_Nodeinit_UpdateStrategy `mapstructure:"updateStrategy,omitempty"` + // -- Additional nodeinit environment variables. + ExtraEnv []any `mapstructure:"extraEnv,omitempty"` + // -- Additional nodeinit volumes. + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + // -- Additional nodeinit volumeMounts. + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + // -- Affinity for cilium-nodeinit + Affinity map[string]any `mapstructure:"affinity,omitempty"` + // -- Node labels for nodeinit pod assignment + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + NodeSelector Cilium1163Values_Nodeinit_NodeSelector `mapstructure:"nodeSelector,omitempty"` + // -- Node tolerations for nodeinit scheduling to nodes with taints + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + Tolerations []Cilium1163Values_Nodeinit_TolerationsItem `mapstructure:"tolerations,omitempty"` + // -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit) + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Annotations to be added to node-init pods. + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + // -- Labels to be added to node-init pods. + PodLabels map[string]any `mapstructure:"podLabels,omitempty"` + // -- Security Context for cilium-node-init pods. + PodSecurityContext Cilium1163Values_Nodeinit_PodSecurityContext `mapstructure:"podSecurityContext,omitempty"` + // -- nodeinit resource limits & requests + // ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + Resources Cilium1163Values_Nodeinit_Resources `mapstructure:"resources,omitempty"` + // -- Security context to be added to nodeinit pods. + SecurityContext Cilium1163Values_Nodeinit_SecurityContext `mapstructure:"securityContext,omitempty"` + // -- bootstrapFile is the location of the file where the bootstrap timestamp is + // written by the node-init DaemonSet + BootstrapFile string `mapstructure:"bootstrapFile,omitempty"` + // -- startup offers way to customize startup nodeinit script (pre and post position) + Startup Cilium1163Values_Nodeinit_Startup `mapstructure:"startup,omitempty"` + // -- prestop offers way to customize prestop nodeinit script (pre and post position) + Prestop Cilium1163Values_Nodeinit_Prestop `mapstructure:"prestop,omitempty"` +} + +func (v *Cilium1163Values_Nodeinit) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Cilium pre-flight image. +type Cilium1163Values_Preflight_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + // cilium-digest + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Cilium1163Values_Preflight_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- preflight update strategy +type Cilium1163Values_Preflight_UpdateStrategy struct { + Type string `mapstructure:"type,omitempty"` +} + +func (v *Cilium1163Values_Preflight_UpdateStrategy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Preflight_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels struct { + K8SApp string `mapstructure:"k8s-app,omitempty"` +} + +func (v *Cilium1163Values_Preflight_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Preflight_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector struct { + MatchLabels Cilium1163Values_Preflight_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector_MatchLabels `mapstructure:"matchLabels,omitempty"` +} + +func (v *Cilium1163Values_Preflight_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Preflight_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem struct { + TopologyKey string `mapstructure:"topologyKey,omitempty"` + LabelSelector Cilium1163Values_Preflight_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem_LabelSelector `mapstructure:"labelSelector,omitempty"` +} + +func (v *Cilium1163Values_Preflight_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Preflight_Affinity_PodAffinity struct { + RequiredDuringSchedulingIgnoredDuringExecution []Cilium1163Values_Preflight_Affinity_PodAffinity_RequiredDuringSchedulingIgnoredDuringExecutionItem `mapstructure:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +func (v *Cilium1163Values_Preflight_Affinity_PodAffinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Affinity for cilium-preflight +type Cilium1163Values_Preflight_Affinity struct { + PodAffinity Cilium1163Values_Preflight_Affinity_PodAffinity `mapstructure:"podAffinity,omitempty"` +} + +func (v *Cilium1163Values_Preflight_Affinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node labels for preflight pod assignment +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector +type Cilium1163Values_Preflight_NodeSelector struct { + KubernetesIoos string `mapstructure:"kubernetes.io/os,omitempty"` +} + +func (v *Cilium1163Values_Preflight_NodeSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node tolerations for preflight scheduling to nodes with taints +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +type Cilium1163Values_Preflight_TolerationsItem struct { + // - key: "key" + // operator: "Equal|Exists" + // value: "value" + // effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + Operator string `mapstructure:"operator,omitempty"` +} + +func (v *Cilium1163Values_Preflight_TolerationsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// PodDisruptionBudget settings +type Cilium1163Values_Preflight_PodDisruptionBudget struct { + // -- enable PodDisruptionBudget + // ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + Enabled bool `mapstructure:"enabled,omitempty"` + // @schema + // type: [null, integer, string] + // @schema + // -- Minimum number/percentage of pods that should remain scheduled. + // When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + MinAvailable any `mapstructure:"minAvailable,omitempty"` + // @schema + // type: [null, integer, string] + // @schema + // -- Maximum number/percentage of pods that may be made unavailable + MaxUnavailable int64 `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *Cilium1163Values_Preflight_PodDisruptionBudget) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Preflight_ReadinessProbe struct { + // -- For how long kubelet should wait before performing the first probe + InitialDelaySeconds int64 `mapstructure:"initialDelaySeconds,omitempty"` + // -- interval between checks of the readiness probe + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` +} + +func (v *Cilium1163Values_Preflight_ReadinessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Preflight struct { + // -- Enable Cilium pre-flight resources (required for upgrade) + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Cilium pre-flight image. + Image Cilium1163Values_Preflight_Image `mapstructure:"image,omitempty"` + // -- The priority class to use for the preflight pod. + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + // -- preflight update strategy + UpdateStrategy Cilium1163Values_Preflight_UpdateStrategy `mapstructure:"updateStrategy,omitempty"` + // -- Additional preflight environment variables. + ExtraEnv []any `mapstructure:"extraEnv,omitempty"` + // -- Additional preflight volumes. + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + // -- Additional preflight volumeMounts. + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + // -- Affinity for cilium-preflight + Affinity Cilium1163Values_Preflight_Affinity `mapstructure:"affinity,omitempty"` + // -- Node labels for preflight pod assignment + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + NodeSelector Cilium1163Values_Preflight_NodeSelector `mapstructure:"nodeSelector,omitempty"` + // -- Node tolerations for preflight scheduling to nodes with taints + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + Tolerations []Cilium1163Values_Preflight_TolerationsItem `mapstructure:"tolerations,omitempty"` + // -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight) + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Security context to be added to preflight pods. + PodSecurityContext map[string]any `mapstructure:"podSecurityContext,omitempty"` + // -- Annotations to be added to preflight pods + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + // -- Labels to be added to the preflight pod. + PodLabels map[string]any `mapstructure:"podLabels,omitempty"` + // PodDisruptionBudget settings + PodDisruptionBudget Cilium1163Values_Preflight_PodDisruptionBudget `mapstructure:"podDisruptionBudget,omitempty"` + // -- preflight resource limits & requests + // ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // limits: + // cpu: 4000m + // memory: 4Gi + // requests: + // cpu: 100m + // memory: 512Mi + Resources map[string]any `mapstructure:"resources,omitempty"` + ReadinessProbe Cilium1163Values_Preflight_ReadinessProbe `mapstructure:"readinessProbe,omitempty"` + // -- Security context to be added to preflight pods + // runAsUser: 0 + SecurityContext map[string]any `mapstructure:"securityContext,omitempty"` + // -- Path to write the `--tofqdns-pre-cache` file to. + TofqdnsPreCache string `mapstructure:"tofqdnsPreCache,omitempty"` + // -- Configure termination grace period for preflight Deployment and DaemonSet. + TerminationGracePeriodSeconds int64 `mapstructure:"terminationGracePeriodSeconds,omitempty"` + // -- By default we should always validate the installed CNPs before upgrading + // Cilium. This will make sure the user will have the policies deployed in the + // cluster with the right schema. + ValidateCnps bool `mapstructure:"validateCNPs,omitempty"` +} + +func (v *Cilium1163Values_Preflight) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Clustermesh explicit configuration. +type Cilium1163Values_Clustermesh_Config struct { + // -- Enable the Clustermesh explicit configuration. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Default dns domain for the Clustermesh API servers + // This is used in the case cluster addresses are not provided + // and IPs are used. + Domain string `mapstructure:"domain,omitempty"` + // -- List of clusters to be peered in the mesh. + // clusters: + // # -- Name of the cluster + // - name: cluster1 + // # -- Address of the cluster, use this if you created DNS records for + // # the cluster Clustermesh API server. + // address: cluster1.mesh.cilium.io + // # -- Port of the cluster Clustermesh API server. + // port: 2379 + // # -- IPs of the cluster Clustermesh API server, use multiple ones when + // # you have multiple IPs to access the Clustermesh API server. + // ips: + // - 172.18.255.201 + // # -- base64 encoded PEM values for the cluster client certificate, private key and certificate authority. + // # These fields can (and should) be omitted in case the CA is shared across clusters. In that case, the + // # "remote" private key and certificate available in the local cluster are automatically used instead. + // tls: + // cert: "" + // key: "" + // caCert: "" + Clusters []any `mapstructure:"clusters,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Config) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Clustermesh API server image. +type Cilium1163Values_Clustermesh_Apiserver_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + // clustermesh-apiserver-digest + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Etcd_SecurityContext_Capabilities struct { + // - ALL + Drop []string `mapstructure:"drop,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Etcd_SecurityContext_Capabilities) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Security context to be added to clustermesh-apiserver etcd containers +type Cilium1163Values_Clustermesh_Apiserver_Etcd_SecurityContext struct { + AllowPrivilegeEscalation bool `mapstructure:"allowPrivilegeEscalation,omitempty"` + Capabilities Cilium1163Values_Clustermesh_Apiserver_Etcd_SecurityContext_Capabilities `mapstructure:"capabilities,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Etcd_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Etcd_Init struct { + // -- Specifies the resources for etcd init container in the apiserver + // requests: + // cpu: 100m + // memory: 100Mi + // limits: + // cpu: 100m + // memory: 100Mi + Resources map[string]any `mapstructure:"resources,omitempty"` + // -- Additional arguments to `clustermesh-apiserver etcdinit`. + ExtraArgs []any `mapstructure:"extraArgs,omitempty"` + // -- Additional environment variables to `clustermesh-apiserver etcdinit`. + ExtraEnv []any `mapstructure:"extraEnv,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Etcd_Init) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Etcd struct { + // The etcd binary is included in the clustermesh API server image, so the same image from above is reused. + // Independent override isn't supported, because clustermesh-apiserver is tested against the etcd version it is + // built with. + // + // -- Specifies the resources for etcd container in the apiserver + // requests: + // cpu: 200m + // memory: 256Mi + // limits: + // cpu: 1000m + // memory: 256Mi + Resources map[string]any `mapstructure:"resources,omitempty"` + // -- Security context to be added to clustermesh-apiserver etcd containers + SecurityContext Cilium1163Values_Clustermesh_Apiserver_Etcd_SecurityContext `mapstructure:"securityContext,omitempty"` + // -- lifecycle setting for the etcd container + Lifecycle map[string]any `mapstructure:"lifecycle,omitempty"` + Init Cilium1163Values_Clustermesh_Apiserver_Etcd_Init `mapstructure:"init,omitempty"` + // @schema + // enum: [Disk, Memory] + // @schema + // -- Specifies whether etcd data is stored in a temporary volume backed by + // the node's default medium, such as disk, SSD or network storage (Disk), or + // RAM (Memory). The Memory option enables improved etcd read and write + // performance at the cost of additional memory usage, which counts against + // the memory limits of the container. + StorageMedium string `mapstructure:"storageMedium,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Etcd) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Kvstoremesh_SecurityContext_Capabilities struct { + // - ALL + Drop []string `mapstructure:"drop,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Kvstoremesh_SecurityContext_Capabilities) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- KVStoreMesh Security context +type Cilium1163Values_Clustermesh_Apiserver_Kvstoremesh_SecurityContext struct { + AllowPrivilegeEscalation bool `mapstructure:"allowPrivilegeEscalation,omitempty"` + Capabilities Cilium1163Values_Clustermesh_Apiserver_Kvstoremesh_SecurityContext_Capabilities `mapstructure:"capabilities,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Kvstoremesh_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Kvstoremesh struct { + // -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved + // from the remote clusters in the local etcd instance. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- TCP port for the KVStoreMesh health API. + HealthPort int64 `mapstructure:"healthPort,omitempty"` + // -- Configuration for the KVStoreMesh readiness probe. + ReadinessProbe map[string]any `mapstructure:"readinessProbe,omitempty"` + // -- Additional KVStoreMesh arguments. + ExtraArgs []any `mapstructure:"extraArgs,omitempty"` + // -- Additional KVStoreMesh environment variables. + ExtraEnv []any `mapstructure:"extraEnv,omitempty"` + // -- Resource requests and limits for the KVStoreMesh container + // requests: + // cpu: 100m + // memory: 64Mi + // limits: + // cpu: 1000m + // memory: 1024M + Resources map[string]any `mapstructure:"resources,omitempty"` + // -- Additional KVStoreMesh volumeMounts. + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + // -- KVStoreMesh Security context + SecurityContext Cilium1163Values_Clustermesh_Apiserver_Kvstoremesh_SecurityContext `mapstructure:"securityContext,omitempty"` + // -- lifecycle setting for the KVStoreMesh container + Lifecycle map[string]any `mapstructure:"lifecycle,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Kvstoremesh) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Service struct { + // -- The type of service used for apiserver access. + Type string `mapstructure:"type,omitempty"` + // -- Optional port to use as the node port for apiserver access. + // + // WARNING: make sure to configure a different NodePort in each cluster if + // kube-proxy replacement is enabled, as Cilium is currently affected by a known + // bug (#24692) when NodePorts are handled by the KPR implementation. If a service + // with the same NodePort exists both in the local and the remote cluster, all + // traffic originating from inside the cluster and targeting the corresponding + // NodePort will be redirected to a local backend, regardless of whether the + // destination node belongs to the local or the remote cluster. + NodePort int64 `mapstructure:"nodePort,omitempty"` + // -- Annotations for the clustermesh-apiserver + // For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal" + // For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: "true" + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // @schema + // enum: [Local, Cluster] + // @schema + // -- The externalTrafficPolicy of service used for apiserver access. + ExternalTrafficPolicy string `mapstructure:"externalTrafficPolicy,omitempty"` + // @schema + // enum: [Local, Cluster] + // @schema + // -- The internalTrafficPolicy of service used for apiserver access. + InternalTrafficPolicy string `mapstructure:"internalTrafficPolicy,omitempty"` + // @schema + // enum: [HAOnly, Always, Never] + // @schema + // -- Defines when to enable session affinity. + // Each replica in a clustermesh-apiserver deployment runs its own discrete + // etcd cluster. Remote clients connect to one of the replicas through a + // shared Kubernetes Service. A client reconnecting to a different backend + // will require a full resync to ensure data integrity. Session affinity + // can reduce the likelihood of this happening, but may not be supported + // by all cloud providers. + // Possible values: + // - "HAOnly" (default) Only enable session affinity for deployments with more than 1 replica. + // - "Always" Always enable session affinity. + // - "Never" Never enable session affinity. Useful in environments where + // session affinity is not supported, but may lead to slightly + // degraded performance due to more frequent reconnections. + EnableSessionAffinity string `mapstructure:"enableSessionAffinity,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- Configure a loadBalancerClass. + // Allows to configure the loadBalancerClass on the clustermesh-apiserver + // LB service in case the Service type is set to LoadBalancer + // (requires Kubernetes 1.24+). + LoadBalancerClass any `mapstructure:"loadBalancerClass,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- Configure a specific loadBalancerIP. + // Allows to configure a specific loadBalancerIP on the clustermesh-apiserver + // LB service in case the Service type is set to LoadBalancer. + LoadBalancerIp any `mapstructure:"loadBalancerIP,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Service) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_SecurityContext_Capabilities struct { + // - ALL + Drop []string `mapstructure:"drop,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_SecurityContext_Capabilities) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Security context to be added to clustermesh-apiserver containers +type Cilium1163Values_Clustermesh_Apiserver_SecurityContext struct { + AllowPrivilegeEscalation bool `mapstructure:"allowPrivilegeEscalation,omitempty"` + Capabilities Cilium1163Values_Clustermesh_Apiserver_SecurityContext_Capabilities `mapstructure:"capabilities,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Security context to be added to clustermesh-apiserver pods +type Cilium1163Values_Clustermesh_Apiserver_PodSecurityContext struct { + RunAsNonRoot bool `mapstructure:"runAsNonRoot,omitempty"` + RunAsUser int64 `mapstructure:"runAsUser,omitempty"` + RunAsGroup int64 `mapstructure:"runAsGroup,omitempty"` + FsGroup int64 `mapstructure:"fsGroup,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_PodSecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// PodDisruptionBudget settings +type Cilium1163Values_Clustermesh_Apiserver_PodDisruptionBudget struct { + // -- enable PodDisruptionBudget + // ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + Enabled bool `mapstructure:"enabled,omitempty"` + // @schema + // type: [null, integer, string] + // @schema + // -- Minimum number/percentage of pods that should remain scheduled. + // When it's set, maxUnavailable must be disabled by `maxUnavailable: null` + MinAvailable any `mapstructure:"minAvailable,omitempty"` + // @schema + // type: [null, integer, string] + // @schema + // -- Maximum number/percentage of pods that may be made unavailable + MaxUnavailable int64 `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_PodDisruptionBudget) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem_PodAffinityTerm_LabelSelector_MatchLabels struct { + K8SApp string `mapstructure:"k8s-app,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem_PodAffinityTerm_LabelSelector_MatchLabels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem_PodAffinityTerm_LabelSelector struct { + MatchLabels Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem_PodAffinityTerm_LabelSelector_MatchLabels `mapstructure:"matchLabels,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem_PodAffinityTerm_LabelSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem_PodAffinityTerm struct { + LabelSelector Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem_PodAffinityTerm_LabelSelector `mapstructure:"labelSelector,omitempty"` + TopologyKey string `mapstructure:"topologyKey,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem_PodAffinityTerm) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem struct { + Weight int64 `mapstructure:"weight,omitempty"` + PodAffinityTerm Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem_PodAffinityTerm `mapstructure:"podAffinityTerm,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity struct { + PreferredDuringSchedulingIgnoredDuringExecution []Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity_PreferredDuringSchedulingIgnoredDuringExecutionItem `mapstructure:"preferredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Affinity for clustermesh.apiserver +type Cilium1163Values_Clustermesh_Apiserver_Affinity struct { + PodAntiAffinity Cilium1163Values_Clustermesh_Apiserver_Affinity_PodAntiAffinity `mapstructure:"podAntiAffinity,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Affinity) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Node labels for pod assignment +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector +type Cilium1163Values_Clustermesh_Apiserver_NodeSelector struct { + KubernetesIoos string `mapstructure:"kubernetes.io/os,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_NodeSelector) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_UpdateStrategy_RollingUpdate struct { + // @schema + // type: [integer, string] + // @schema + MaxSurge int64 `mapstructure:"maxSurge,omitempty"` + // @schema + // type: [integer, string] + // @schema + MaxUnavailable int64 `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_UpdateStrategy_RollingUpdate) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- clustermesh-apiserver update strategy +type Cilium1163Values_Clustermesh_Apiserver_UpdateStrategy struct { + Type string `mapstructure:"type,omitempty"` + RollingUpdate Cilium1163Values_Clustermesh_Apiserver_UpdateStrategy_RollingUpdate `mapstructure:"rollingUpdate,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_UpdateStrategy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure automatic TLS certificates generation. +// A Kubernetes CronJob is used the generate any +// certificates not provided by the user at installation +// time. +type Cilium1163Values_Clustermesh_Apiserver_Tls_Auto struct { + // -- When set to true, automatically generate a CA and certificates to + // enable mTLS between clustermesh-apiserver and external workload instances. + // If set to false, the certs to be provided by setting appropriate values below. + Enabled bool `mapstructure:"enabled,omitempty"` + // Sets the method to auto-generate certificates. Supported values: + // - helm: This method uses Helm to generate all certificates. + // - cronJob: This method uses a Kubernetes CronJob the generate any + // certificates not provided by the user at installation + // time. + // - certmanager: This method use cert-manager to generate & rotate certificates. + Method string `mapstructure:"method,omitempty"` + // -- Generated certificates validity duration in days. + // -- Schedule for certificates regeneration (regardless of their expiration date). + // Only used if method is "cronJob". If nil, then no recurring job will be created. + // Instead, only the one-shot job is deployed to generate the certificates at + // installation time. + // + // Due to the out-of-band distribution of client certs to external workloads the + // CA is (re)regenerated only if it is not provided as a helm value and the k8s + // secret is manually deleted. + // + // Defaults to none. Commented syntax gives midnight of the first day of every + // fourth month. For syntax, see + // https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax + // schedule: "0 0 1 */4 *" + CertValidityDuration int64 `mapstructure:"certValidityDuration,omitempty"` + // [Example] + // certManagerIssuerRef: + // group: cert-manager.io + // kind: ClusterIssuer + // name: ca-issuer + // -- certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager. + CertManagerIssuerRef map[string]any `mapstructure:"certManagerIssuerRef,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Tls_Auto) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key. +// Used if 'auto' is not enabled. +type Cilium1163Values_Clustermesh_Apiserver_Tls_Server struct { + Cert string `mapstructure:"cert,omitempty"` + Key string `mapstructure:"key,omitempty"` + // -- Extra DNS names added to certificate when it's auto generated + ExtraDnsNames []any `mapstructure:"extraDnsNames,omitempty"` + // -- Extra IP addresses added to certificate when it's auto generated + ExtraIpAddresses []any `mapstructure:"extraIpAddresses,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Tls_Server) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key. +// Used if 'auto' is not enabled. +type Cilium1163Values_Clustermesh_Apiserver_Tls_Admin struct { + Cert string `mapstructure:"cert,omitempty"` + Key string `mapstructure:"key,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Tls_Admin) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key. +// Used if 'auto' is not enabled. +type Cilium1163Values_Clustermesh_Apiserver_Tls_Client struct { + Cert string `mapstructure:"cert,omitempty"` + Key string `mapstructure:"key,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Tls_Client) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key. +// Used if 'auto' is not enabled. +type Cilium1163Values_Clustermesh_Apiserver_Tls_Remote struct { + Cert string `mapstructure:"cert,omitempty"` + Key string `mapstructure:"key,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Tls_Remote) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Tls struct { + // -- Configure the clustermesh authentication mode. + // Supported values: + // - legacy: All clusters access remote clustermesh instances with the same + // username (i.e., remote). The "remote" certificate must be + // generated with CN=remote if provided manually. + // - migration: Intermediate mode required to upgrade from legacy to cluster + // (and vice versa) with no disruption. Specifically, it enables + // the creation of the per-cluster usernames, while still using + // the common one for authentication. The "remote" certificate must + // be generated with CN=remote if provided manually (same as legacy). + // - cluster: Each cluster accesses remote etcd instances with a username + // depending on the local cluster name (i.e., remote-). + // The "remote" certificate must be generated with CN=remote- + // if provided manually. Cluster mode is meaningful only when the same + // CA is shared across all clusters part of the mesh. + AuthMode string `mapstructure:"authMode,omitempty"` + // -- Allow users to provide their own certificates + // Users may need to provide their certificates using + // a mechanism that requires they provide their own secrets. + // This setting does not apply to any of the auto-generated + // mechanisms below, it only restricts the creation of secrets + // via the `tls-provided` templates. + EnableSecrets bool `mapstructure:"enableSecrets,omitempty"` + // -- Configure automatic TLS certificates generation. + // A Kubernetes CronJob is used the generate any + // certificates not provided by the user at installation + // time. + Auto Cilium1163Values_Clustermesh_Apiserver_Tls_Auto `mapstructure:"auto,omitempty"` + // -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key. + // Used if 'auto' is not enabled. + Server Cilium1163Values_Clustermesh_Apiserver_Tls_Server `mapstructure:"server,omitempty"` + // -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key. + // Used if 'auto' is not enabled. + Admin Cilium1163Values_Clustermesh_Apiserver_Tls_Admin `mapstructure:"admin,omitempty"` + // -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key. + // Used if 'auto' is not enabled. + Client Cilium1163Values_Clustermesh_Apiserver_Tls_Client `mapstructure:"client,omitempty"` + // -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key. + // Used if 'auto' is not enabled. + Remote Cilium1163Values_Clustermesh_Apiserver_Tls_Remote `mapstructure:"remote,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Tls) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Metrics_Kvstoremesh struct { + // -- Enables exporting KVStoreMesh metrics in OpenMetrics format. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Configure the port the KVStoreMesh metric server listens on. + Port int64 `mapstructure:"port,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Metrics_Kvstoremesh) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Metrics_Etcd struct { + // -- Enables exporting etcd metrics in OpenMetrics format. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Set level of detail for etcd metrics; specify 'extensive' to include server side gRPC histogram metrics. + Mode string `mapstructure:"mode,omitempty"` + // -- Configure the port the etcd metric server listens on. + Port int64 `mapstructure:"port,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Metrics_Etcd) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Metrics_ServiceMonitor_Kvstoremesh struct { + // -- Interval for scrape metrics (KVStoreMesh metrics) + Interval string `mapstructure:"interval,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) + Relabelings any `mapstructure:"relabelings,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics) + MetricRelabelings any `mapstructure:"metricRelabelings,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Metrics_ServiceMonitor_Kvstoremesh) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Metrics_ServiceMonitor_Etcd struct { + // -- Interval for scrape metrics (etcd metrics) + Interval string `mapstructure:"interval,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) + Relabelings any `mapstructure:"relabelings,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics) + MetricRelabelings any `mapstructure:"metricRelabelings,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Metrics_ServiceMonitor_Etcd) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver_Metrics_ServiceMonitor struct { + // -- Enable service monitor. + // This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Labels to add to ServiceMonitor clustermesh-apiserver + Labels map[string]any `mapstructure:"labels,omitempty"` + // -- Annotations to add to ServiceMonitor clustermesh-apiserver + // -- Specify the Kubernetes namespace where Prometheus expects to find + // service monitors configured. + // namespace: "" + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Interval for scrape metrics (apiserver metrics) + Interval string `mapstructure:"interval,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) + Relabelings any `mapstructure:"relabelings,omitempty"` + // @schema + // type: [null, array] + // @schema + // -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics) + MetricRelabelings any `mapstructure:"metricRelabelings,omitempty"` + Kvstoremesh Cilium1163Values_Clustermesh_Apiserver_Metrics_ServiceMonitor_Kvstoremesh `mapstructure:"kvstoremesh,omitempty"` + Etcd Cilium1163Values_Clustermesh_Apiserver_Metrics_ServiceMonitor_Etcd `mapstructure:"etcd,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Metrics_ServiceMonitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// clustermesh-apiserver Prometheus metrics configuration +type Cilium1163Values_Clustermesh_Apiserver_Metrics struct { + // -- Enables exporting apiserver metrics in OpenMetrics format. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Configure the port the apiserver metric server listens on. + Port int64 `mapstructure:"port,omitempty"` + Kvstoremesh Cilium1163Values_Clustermesh_Apiserver_Metrics_Kvstoremesh `mapstructure:"kvstoremesh,omitempty"` + Etcd Cilium1163Values_Clustermesh_Apiserver_Metrics_Etcd `mapstructure:"etcd,omitempty"` + ServiceMonitor Cilium1163Values_Clustermesh_Apiserver_Metrics_ServiceMonitor `mapstructure:"serviceMonitor,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver_Metrics) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Clustermesh_Apiserver struct { + // -- Clustermesh API server image. + Image Cilium1163Values_Clustermesh_Apiserver_Image `mapstructure:"image,omitempty"` + // -- TCP port for the clustermesh-apiserver health API. + HealthPort int64 `mapstructure:"healthPort,omitempty"` + // -- Configuration for the clustermesh-apiserver readiness probe. + ReadinessProbe map[string]any `mapstructure:"readinessProbe,omitempty"` + Etcd Cilium1163Values_Clustermesh_Apiserver_Etcd `mapstructure:"etcd,omitempty"` + Kvstoremesh Cilium1163Values_Clustermesh_Apiserver_Kvstoremesh `mapstructure:"kvstoremesh,omitempty"` + Service Cilium1163Values_Clustermesh_Apiserver_Service `mapstructure:"service,omitempty"` + // -- Number of replicas run for the clustermesh-apiserver deployment. + Replicas int64 `mapstructure:"replicas,omitempty"` + // -- lifecycle setting for the apiserver container + Lifecycle map[string]any `mapstructure:"lifecycle,omitempty"` + // -- terminationGracePeriodSeconds for the clustermesh-apiserver deployment + TerminationGracePeriodSeconds int64 `mapstructure:"terminationGracePeriodSeconds,omitempty"` + // -- Additional clustermesh-apiserver arguments. + ExtraArgs []any `mapstructure:"extraArgs,omitempty"` + // -- Additional clustermesh-apiserver environment variables. + ExtraEnv []any `mapstructure:"extraEnv,omitempty"` + // -- Additional clustermesh-apiserver volumes. + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + // -- Additional clustermesh-apiserver volumeMounts. + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + // -- Security context to be added to clustermesh-apiserver containers + SecurityContext Cilium1163Values_Clustermesh_Apiserver_SecurityContext `mapstructure:"securityContext,omitempty"` + // -- Security context to be added to clustermesh-apiserver pods + PodSecurityContext Cilium1163Values_Clustermesh_Apiserver_PodSecurityContext `mapstructure:"podSecurityContext,omitempty"` + // -- Annotations to be added to clustermesh-apiserver pods + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + // -- Labels to be added to clustermesh-apiserver pods + PodLabels map[string]any `mapstructure:"podLabels,omitempty"` + // PodDisruptionBudget settings + PodDisruptionBudget Cilium1163Values_Clustermesh_Apiserver_PodDisruptionBudget `mapstructure:"podDisruptionBudget,omitempty"` + // -- Resource requests and limits for the clustermesh-apiserver + // requests: + // cpu: 100m + // memory: 64Mi + // limits: + // cpu: 1000m + // memory: 1024M + Resources map[string]any `mapstructure:"resources,omitempty"` + // -- Affinity for clustermesh.apiserver + Affinity Cilium1163Values_Clustermesh_Apiserver_Affinity `mapstructure:"affinity,omitempty"` + // -- Pod topology spread constraints for clustermesh-apiserver + // - maxSkew: 1 + // topologyKey: topology.kubernetes.io/zone + // whenUnsatisfiable: DoNotSchedule + TopologySpreadConstraints []any `mapstructure:"topologySpreadConstraints,omitempty"` + // -- Node labels for pod assignment + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + NodeSelector Cilium1163Values_Clustermesh_Apiserver_NodeSelector `mapstructure:"nodeSelector,omitempty"` + // -- Node tolerations for pod assignment on nodes with taints + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + Tolerations []any `mapstructure:"tolerations,omitempty"` + // -- clustermesh-apiserver update strategy + UpdateStrategy Cilium1163Values_Clustermesh_Apiserver_UpdateStrategy `mapstructure:"updateStrategy,omitempty"` + // -- The priority class to use for clustermesh-apiserver + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + Tls Cilium1163Values_Clustermesh_Apiserver_Tls `mapstructure:"tls,omitempty"` + // clustermesh-apiserver Prometheus metrics configuration + Metrics Cilium1163Values_Clustermesh_Apiserver_Metrics `mapstructure:"metrics,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh_Apiserver) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// disableEnvoyVersionCheck removes the check for Envoy, which can be useful +// on AArch64 as the images do not currently ship a version of Envoy. +// disableEnvoyVersionCheck: false +type Cilium1163Values_Clustermesh struct { + // -- Deploy clustermesh-apiserver for clustermesh + UseApiserver bool `mapstructure:"useAPIServer,omitempty"` + // -- The maximum number of clusters to support in a ClusterMesh. This value + // cannot be changed on running clusters, and all clusters in a ClusterMesh + // must be configured with the same value. Values > 255 will decrease the + // maximum allocatable cluster-local identities. + // Supported values are 255 and 511. + MaxConnectedClusters int64 `mapstructure:"maxConnectedClusters,omitempty"` + // -- Enable the synchronization of Kubernetes EndpointSlices corresponding to + // the remote endpoints of appropriately-annotated global services through ClusterMesh + EnableEndpointSliceSynchronization bool `mapstructure:"enableEndpointSliceSynchronization,omitempty"` + // -- Enable Multi-Cluster Services API support + EnableMcsapisupport bool `mapstructure:"enableMCSAPISupport,omitempty"` + // -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config) + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Clustermesh explicit configuration. + Config Cilium1163Values_Clustermesh_Config `mapstructure:"config,omitempty"` + Apiserver Cilium1163Values_Clustermesh_Apiserver `mapstructure:"apiserver,omitempty"` +} + +func (v *Cilium1163Values_Clustermesh) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure external workloads support +type Cilium1163Values_ExternalWorkloads struct { + // -- Enable support for external workloads, such as VMs (false by default). + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_ExternalWorkloads) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Cgroup_AutoMount struct { + // -- Enable auto mount of cgroup2 filesystem. + // When `autoMount` is enabled, cgroup2 filesystem is mounted at + // `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod. + // If users disable `autoMount`, it's expected that users have mounted + // cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the + // volume will be mounted inside the cilium agent pod at the same path. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Init Container Cgroup Automount resource limits & requests + // limits: + // cpu: 100m + // memory: 128Mi + // requests: + // cpu: 100m + // memory: 128Mi + Resources map[string]any `mapstructure:"resources,omitempty"` +} + +func (v *Cilium1163Values_Cgroup_AutoMount) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure cgroup related configuration +type Cilium1163Values_Cgroup struct { + AutoMount Cilium1163Values_Cgroup_AutoMount `mapstructure:"autoMount,omitempty"` + // -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`) + HostRoot string `mapstructure:"hostRoot,omitempty"` +} + +func (v *Cilium1163Values_Cgroup) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- Configure sysctl override described in #20072. +type Cilium1163Values_Sysctlfix struct { + // -- Enable the sysctl override. When enabled, the init container will mount the /proc of the host so that the `sysctlfix` utility can execute. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Sysctlfix) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_DnsProxy struct { + // -- Timeout (in seconds) when closing the connection between the DNS proxy and the upstream server. If set to 0, the connection is closed immediately (with TCP RST). If set to -1, the connection is closed asynchronously in the background. + SocketLingerTimeout int64 `mapstructure:"socketLingerTimeout,omitempty"` + // -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'. + DnsRejectResponseCode string `mapstructure:"dnsRejectResponseCode,omitempty"` + // -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present. + EnableDnsCompression bool `mapstructure:"enableDnsCompression,omitempty"` + // -- Maximum number of IPs to maintain per FQDN name for each endpoint. + EndpointMaxIpPerHostname int64 `mapstructure:"endpointMaxIpPerHostname,omitempty"` + // -- Time during which idle but previously active connections with expired DNS lookups are still considered alive. + IdleConnectionGracePeriod string `mapstructure:"idleConnectionGracePeriod,omitempty"` + // -- Maximum number of IPs to retain for expired DNS lookups with still-active connections. + MaxDeferredConnectionDeletes int64 `mapstructure:"maxDeferredConnectionDeletes,omitempty"` + // -- The minimum time, in seconds, to use DNS data for toFQDNs policies. If + // the upstream DNS server returns a DNS record with a shorter TTL, Cilium + // overwrites the TTL with this value. Setting this value to zero means that + // Cilium will honor the TTLs returned by the upstream DNS server. + MinTtl int64 `mapstructure:"minTtl,omitempty"` + // -- DNS cache data at this path is preloaded on agent startup. + PreCache string `mapstructure:"preCache,omitempty"` + // -- Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port. + ProxyPort int64 `mapstructure:"proxyPort,omitempty"` + // -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information. + // -- DNS proxy operation mode (true/false, or unset to use version dependent defaults) + // enableTransparentMode: true + ProxyResponseMaxDelay string `mapstructure:"proxyResponseMaxDelay,omitempty"` +} + +func (v *Cilium1163Values_DnsProxy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SCTP Configuration Values +type Cilium1163Values_Sctp struct { + // -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming. + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Cilium1163Values_Sctp) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- init container image of SPIRE agent and server +type Cilium1163Values_Authentication_Mutual_Spire_Install_InitImage struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_InitImage) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SPIRE agent image +type Cilium1163Values_Authentication_Mutual_Spire_Install_Agent_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_Agent_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SPIRE agent service account +type Cilium1163Values_Authentication_Mutual_Spire_Install_Agent_ServiceAccount struct { + Create bool `mapstructure:"create,omitempty"` + Name string `mapstructure:"name,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_Agent_ServiceAccount) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SPIRE agent tolerations configuration +// By default it follows the same tolerations as the agent itself +// to allow the Cilium agent on this node to connect to SPIRE. +// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +type Cilium1163Values_Authentication_Mutual_Spire_Install_Agent_TolerationsItem struct { + Key string `mapstructure:"key,omitempty"` + Effect string `mapstructure:"effect,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_Agent_TolerationsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// SPIRE agent configuration +type Cilium1163Values_Authentication_Mutual_Spire_Install_Agent struct { + // -- SPIRE agent image + Image Cilium1163Values_Authentication_Mutual_Spire_Install_Agent_Image `mapstructure:"image,omitempty"` + // -- SPIRE agent service account + ServiceAccount Cilium1163Values_Authentication_Mutual_Spire_Install_Agent_ServiceAccount `mapstructure:"serviceAccount,omitempty"` + // -- SPIRE agent annotations + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- SPIRE agent labels + Labels map[string]any `mapstructure:"labels,omitempty"` + // -- SPIRE Workload Attestor kubelet verification. + SkipKubeletVerification bool `mapstructure:"skipKubeletVerification,omitempty"` + // -- SPIRE agent tolerations configuration + // By default it follows the same tolerations as the agent itself + // to allow the Cilium agent on this node to connect to SPIRE. + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + Tolerations []Cilium1163Values_Authentication_Mutual_Spire_Install_Agent_TolerationsItem `mapstructure:"tolerations,omitempty"` + // -- SPIRE agent affinity configuration + Affinity map[string]any `mapstructure:"affinity,omitempty"` + // -- SPIRE agent nodeSelector configuration + // ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + NodeSelector map[string]any `mapstructure:"nodeSelector,omitempty"` + // -- Security context to be added to spire agent pods. + // SecurityContext holds pod-level security attributes and common container settings. + // ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + PodSecurityContext map[string]any `mapstructure:"podSecurityContext,omitempty"` + // -- Security context to be added to spire agent containers. + // SecurityContext holds pod-level security attributes and common container settings. + // ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + SecurityContext map[string]any `mapstructure:"securityContext,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_Agent) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SPIRE server image +type Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Image struct { + // @schema + // type: [null, string] + // @schema + Override any `mapstructure:"override,omitempty"` + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + Digest string `mapstructure:"digest,omitempty"` + UseDigest bool `mapstructure:"useDigest,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SPIRE server service account +type Cilium1163Values_Authentication_Mutual_Spire_Install_Server_ServiceAccount struct { + Create bool `mapstructure:"create,omitempty"` + Name string `mapstructure:"name,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_Server_ServiceAccount) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// SPIRE server service configuration +type Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Service struct { + // -- Service type for the SPIRE server service + Type string `mapstructure:"type,omitempty"` + // -- Annotations to be added to the SPIRE server service + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Labels to be added to the SPIRE server service + Labels map[string]any `mapstructure:"labels,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Service) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// SPIRE server datastorage configuration +type Cilium1163Values_Authentication_Mutual_Spire_Install_Server_DataStorage struct { + // -- Enable SPIRE server data storage + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Size of the SPIRE server data storage + Size string `mapstructure:"size,omitempty"` + // -- Access mode of the SPIRE server data storage + AccessMode string `mapstructure:"accessMode,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- StorageClass of the SPIRE server data storage + StorageClass any `mapstructure:"storageClass,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_Server_DataStorage) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- SPIRE CA Subject +type Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Ca_Subject struct { + Country string `mapstructure:"country,omitempty"` + Organization string `mapstructure:"organization,omitempty"` + CommonName string `mapstructure:"commonName,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Ca_Subject) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// SPIRE CA configuration +type Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Ca struct { + // -- SPIRE CA key type + // AWS requires the use of RSA. EC cryptography is not supported + KeyType string `mapstructure:"keyType,omitempty"` + // -- SPIRE CA Subject + Subject Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Ca_Subject `mapstructure:"subject,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Ca) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Cilium1163Values_Authentication_Mutual_Spire_Install_Server struct { + // -- SPIRE server image + Image Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Image `mapstructure:"image,omitempty"` + // -- SPIRE server service account + ServiceAccount Cilium1163Values_Authentication_Mutual_Spire_Install_Server_ServiceAccount `mapstructure:"serviceAccount,omitempty"` + // -- SPIRE server init containers + InitContainers []any `mapstructure:"initContainers,omitempty"` + // -- SPIRE server annotations + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- SPIRE server labels + Labels map[string]any `mapstructure:"labels,omitempty"` + // SPIRE server service configuration + Service Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Service `mapstructure:"service,omitempty"` + // -- SPIRE server affinity configuration + Affinity map[string]any `mapstructure:"affinity,omitempty"` + // -- SPIRE server nodeSelector configuration + // ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + NodeSelector map[string]any `mapstructure:"nodeSelector,omitempty"` + // -- SPIRE server tolerations configuration + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + Tolerations []any `mapstructure:"tolerations,omitempty"` + // SPIRE server datastorage configuration + DataStorage Cilium1163Values_Authentication_Mutual_Spire_Install_Server_DataStorage `mapstructure:"dataStorage,omitempty"` + // -- Security context to be added to spire server pods. + // SecurityContext holds pod-level security attributes and common container settings. + // ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + PodSecurityContext map[string]any `mapstructure:"podSecurityContext,omitempty"` + // -- Security context to be added to spire server containers. + // SecurityContext holds pod-level security attributes and common container settings. + // ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + SecurityContext map[string]any `mapstructure:"securityContext,omitempty"` + // SPIRE CA configuration + Ca Cilium1163Values_Authentication_Mutual_Spire_Install_Server_Ca `mapstructure:"ca,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install_Server) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Settings to control the SPIRE installation and configuration +type Cilium1163Values_Authentication_Mutual_Spire_Install struct { + // -- Enable SPIRE installation. + // This will only take effect only if authentication.mutual.spire.enabled is true + Enabled bool `mapstructure:"enabled,omitempty"` + // -- SPIRE namespace to install into + Namespace string `mapstructure:"namespace,omitempty"` + // -- SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. + ExistingNamespace bool `mapstructure:"existingNamespace,omitempty"` + // -- init container image of SPIRE agent and server + InitImage Cilium1163Values_Authentication_Mutual_Spire_Install_InitImage `mapstructure:"initImage,omitempty"` + // SPIRE agent configuration + Agent Cilium1163Values_Authentication_Mutual_Spire_Install_Agent `mapstructure:"agent,omitempty"` + Server Cilium1163Values_Authentication_Mutual_Spire_Install_Server `mapstructure:"server,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire_Install) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Settings for SPIRE +type Cilium1163Values_Authentication_Mutual_Spire struct { + // -- Enable SPIRE integration (beta) + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Annotations to be added to all top-level spire objects (resources under templates/spire) + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // Settings to control the SPIRE installation and configuration + Install Cilium1163Values_Authentication_Mutual_Spire_Install `mapstructure:"install,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- SPIRE server address used by Cilium Operator + // + // If k8s Service DNS along with port number is used (e.g. ..svc(.*): format), + // Cilium Operator will resolve its address by looking up the clusterIP from Service resource. + // + // Example values: 10.0.0.1:8081, spire-server.cilium-spire.svc:8081 + ServerAddress any `mapstructure:"serverAddress,omitempty"` + // -- SPIFFE trust domain to use for fetching certificates + TrustDomain string `mapstructure:"trustDomain,omitempty"` + // -- SPIRE socket path where the SPIRE delegated api agent is listening + AdminSocketPath string `mapstructure:"adminSocketPath,omitempty"` + // -- SPIRE socket path where the SPIRE workload agent is listening. + // Applies to both the Cilium Agent and Operator + AgentSocketPath string `mapstructure:"agentSocketPath,omitempty"` + // -- SPIRE connection timeout + ConnectionTimeout string `mapstructure:"connectionTimeout,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual_Spire) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Configuration for Cilium's service-to-service mutual authentication using TLS handshakes. +// Note that this is not full mTLS support without also enabling encryption of some form. +// Current encryption options are WireGuard or IPsec, configured in encryption block above. +type Cilium1163Values_Authentication_Mutual struct { + // -- Port on the agent where mutual authentication handshakes between agents will be performed + Port int64 `mapstructure:"port,omitempty"` + // -- Timeout for connecting to the remote node TCP socket + ConnectTimeout string `mapstructure:"connectTimeout,omitempty"` + // Settings for SPIRE + Spire Cilium1163Values_Authentication_Mutual_Spire `mapstructure:"spire,omitempty"` +} + +func (v *Cilium1163Values_Authentication_Mutual) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Configuration for types of authentication for Cilium (beta) +type Cilium1163Values_Authentication struct { + // -- Enable authentication processing and garbage collection. + // Note that if disabled, policy enforcement will still block requests that require authentication. + // But the resulting authentication requests for these requests will not be processed, therefore the requests not be allowed. + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Buffer size of the channel Cilium uses to receive authentication events from the signal map. + QueueSize int64 `mapstructure:"queueSize,omitempty"` + // -- Buffer size of the channel Cilium uses to receive certificate expiration events from auth handlers. + RotatedIdentitiesQueueSize int64 `mapstructure:"rotatedIdentitiesQueueSize,omitempty"` + // -- Interval for garbage collection of auth map entries. + GcInterval string `mapstructure:"gcInterval,omitempty"` + // Configuration for Cilium's service-to-service mutual authentication using TLS handshakes. + // Note that this is not full mTLS support without also enabling encryption of some form. + // Current encryption options are WireGuard or IPsec, configured in encryption block above. + Mutual Cilium1163Values_Authentication_Mutual `mapstructure:"mutual,omitempty"` +} + +func (v *Cilium1163Values_Authentication) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Cilium1163Values represents the values of the cilium-1.16.3_values.yaml chart +type Cilium1163Values struct { + // @schema + // type: [null, string] + // @schema + // -- upgradeCompatibility helps users upgrading to ensure that the configMap for + // Cilium will not change critical values to ensure continued operation + // This flag is not required for new installations. + // For example: '1.7', '1.8', '1.9' + UpgradeCompatibility any `mapstructure:"upgradeCompatibility,omitempty"` + Debug Cilium1163Values_Debug `mapstructure:"debug,omitempty"` + Rbac Cilium1163Values_Rbac `mapstructure:"rbac,omitempty"` + // -- Configure image pull secrets for pulling container images + // - name: "image-pull-secret" + ImagePullSecrets []any `mapstructure:"imagePullSecrets,omitempty"` + // -- (string) Kubernetes config path + // @default -- `"~/.kube/config"` + KubeConfigPath string `mapstructure:"kubeConfigPath,omitempty"` + // -- (string) Kubernetes service host - use "auto" for automatic lookup from the cluster-info ConfigMap (kubeadm-based clusters only) + K8SServiceHost string `mapstructure:"k8sServiceHost,omitempty"` + // @schema + // type: [string, integer] + // @schema + // -- (string) Kubernetes service port + K8SServicePort string `mapstructure:"k8sServicePort,omitempty"` + // -- Configure the client side rate limit for the agent and operator + // + // If the amount of requests to the Kubernetes API server exceeds the configured + // rate limit, the agent and operator will start to throttle requests by delaying + // them until there is budget or the request times out. + K8SClientRateLimit Cilium1163Values_K8SClientRateLimit `mapstructure:"k8sClientRateLimit,omitempty"` + Cluster Cilium1163Values_Cluster `mapstructure:"cluster,omitempty"` + // -- Define serviceAccount names for components. + // @default -- Component's fully qualified name. + ServiceAccounts Cilium1163Values_ServiceAccounts `mapstructure:"serviceAccounts,omitempty"` + // -- Configure termination grace period for cilium-agent DaemonSet. + TerminationGracePeriodSeconds int64 `mapstructure:"terminationGracePeriodSeconds,omitempty"` + // -- Install the cilium agent resources. + Agent bool `mapstructure:"agent,omitempty"` + // -- Agent container name. + Name string `mapstructure:"name,omitempty"` + // -- Roll out cilium agent pods automatically when configmap is updated. + RollOutCiliumPods bool `mapstructure:"rollOutCiliumPods,omitempty"` + // -- Agent container image. + Image Cilium1163Values_Image `mapstructure:"image,omitempty"` + // -- Affinity for cilium-agent. + Affinity Cilium1163Values_Affinity `mapstructure:"affinity,omitempty"` + // -- Node selector for cilium-agent. + NodeSelector Cilium1163Values_NodeSelector `mapstructure:"nodeSelector,omitempty"` + // -- Node tolerations for agent scheduling to nodes with taints + // ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + Tolerations []Cilium1163Values_TolerationsItem `mapstructure:"tolerations,omitempty"` + // -- The priority class to use for cilium-agent. + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + // -- DNS policy for Cilium agent pods. + // Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy + DnsPolicy string `mapstructure:"dnsPolicy,omitempty"` + // -- Additional containers added to the cilium DaemonSet. + ExtraContainers []any `mapstructure:"extraContainers,omitempty"` + // -- Additional initContainers added to the cilium Daemonset. + ExtraInitContainers []any `mapstructure:"extraInitContainers,omitempty"` + // -- Additional agent container arguments. + ExtraArgs []any `mapstructure:"extraArgs,omitempty"` + // -- Additional agent container environment variables. + ExtraEnv []any `mapstructure:"extraEnv,omitempty"` + // -- Additional agent hostPath mounts. + // - name: host-mnt-data + // mountPath: /host/mnt/data + // hostPath: /mnt/data + // hostPathType: Directory + // readOnly: true + // mountPropagation: HostToContainer + ExtraHostPathMounts []any `mapstructure:"extraHostPathMounts,omitempty"` + // -- Additional agent volumes. + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + // -- Additional agent volumeMounts. + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + // -- extraConfig allows you to specify additional configuration parameters to be + // included in the cilium-config configmap. + // my-config-a: "1234" + // my-config-b: |- + // test 1 + // test 2 + // test 3 + ExtraConfig map[string]any `mapstructure:"extraConfig,omitempty"` + // -- Annotations to be added to all top-level cilium-agent objects (resources under templates/cilium-agent) + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // -- Security Context for cilium-agent pods. + PodSecurityContext Cilium1163Values_PodSecurityContext `mapstructure:"podSecurityContext,omitempty"` + // -- Annotations to be added to agent pods + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + // -- Labels to be added to agent pods + PodLabels map[string]any `mapstructure:"podLabels,omitempty"` + // -- Agent resource limits & requests + // ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // limits: + // cpu: 4000m + // memory: 4Gi + // requests: + // cpu: 100m + // memory: 512Mi + Resources map[string]any `mapstructure:"resources,omitempty"` + // -- resources & limits for the agent init containers + InitResources map[string]any `mapstructure:"initResources,omitempty"` + SecurityContext Cilium1163Values_SecurityContext `mapstructure:"securityContext,omitempty"` + // -- Cilium agent update strategy + UpdateStrategy Cilium1163Values_UpdateStrategy `mapstructure:"updateStrategy,omitempty"` + // Configuration Values for cilium-agent + Aksbyocni Cilium1163Values_Aksbyocni `mapstructure:"aksbyocni,omitempty"` + // @schema + // type: [boolean, string] + // @schema + // -- Enable installation of PodCIDR routes between worker + // nodes if worker nodes share a common L2 network segment. + AutoDirectNodeRoutes bool `mapstructure:"autoDirectNodeRoutes,omitempty"` + // -- Enable skipping of PodCIDR routes between worker + // nodes if the worker nodes are in a different L2 network segment. + DirectRoutingSkipUnreachable bool `mapstructure:"directRoutingSkipUnreachable,omitempty"` + // -- Annotate k8s node upon initialization with Cilium's metadata. + AnnotateK8SNode bool `mapstructure:"annotateK8sNode,omitempty"` + Azure Cilium1163Values_Azure `mapstructure:"azure,omitempty"` + Alibabacloud Cilium1163Values_Alibabacloud `mapstructure:"alibabacloud,omitempty"` + // -- Enable bandwidth manager to optimize TCP and UDP workloads and allow + // for rate-limiting traffic from individual Pods with EDT (Earliest Departure + // Time) through the "kubernetes.io/egress-bandwidth" Pod annotation. + BandwidthManager Cilium1163Values_BandwidthManager `mapstructure:"bandwidthManager,omitempty"` + // -- Configure standalone NAT46/NAT64 gateway + Nat46X64Gateway Cilium1163Values_Nat46X64Gateway `mapstructure:"nat46x64Gateway,omitempty"` + // -- EnableHighScaleIPcache enables the special ipcache mode for high scale + // clusters. The ipcache content will be reduced to the strict minimum and + // traffic will be encapsulated to carry security identities. + HighScaleIpcache Cilium1163Values_HighScaleIpcache `mapstructure:"highScaleIPcache,omitempty"` + // -- Configure L2 announcements + L2Announcements Cilium1163Values_L2Announcements `mapstructure:"l2announcements,omitempty"` + // -- Configure L2 pod announcements + L2PodAnnouncements Cilium1163Values_L2PodAnnouncements `mapstructure:"l2podAnnouncements,omitempty"` + // -- Configure BGP + Bgp Cilium1163Values_Bgp `mapstructure:"bgp,omitempty"` + // -- This feature set enables virtual BGP routers to be created via + // CiliumBGPPeeringPolicy CRDs. + BgpControlPlane Cilium1163Values_BgpControlPlane `mapstructure:"bgpControlPlane,omitempty"` + PmtuDiscovery Cilium1163Values_PmtuDiscovery `mapstructure:"pmtuDiscovery,omitempty"` + Bpf Cilium1163Values_Bpf `mapstructure:"bpf,omitempty"` + // -- Enable BPF clock source probing for more efficient tick retrieval. + BpfClockProbe bool `mapstructure:"bpfClockProbe,omitempty"` + // -- Clean all eBPF datapath state from the initContainer of the cilium-agent + // DaemonSet. + // + // WARNING: Use with care! + CleanBpfState bool `mapstructure:"cleanBpfState,omitempty"` + // -- Clean all local Cilium state from the initContainer of the cilium-agent + // DaemonSet. Implies cleanBpfState: true. + // + // WARNING: Use with care! + CleanState bool `mapstructure:"cleanState,omitempty"` + // -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy" + // init container before launching cilium-agent. + // More context can be found in the commit message of below PR + // https://github.com/cilium/cilium/pull/20123 + WaitForKubeProxy bool `mapstructure:"waitForKubeProxy,omitempty"` + Cni Cilium1163Values_Cni `mapstructure:"cni,omitempty"` + // -- (string) Configure how frequently garbage collection should occur for the datapath + // connection tracking table. + // @default -- `"0s"` + ConntrackGcinterval string `mapstructure:"conntrackGCInterval,omitempty"` + // -- (string) Configure the maximum frequency for the garbage collection of the + // connection tracking table. Only affects the automatic computation for the frequency + // and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently + // clean up unused identities created from ToFQDN policies. + ConntrackGcmaxInterval string `mapstructure:"conntrackGCMaxInterval,omitempty"` + // -- (string) Configure timeout in which Cilium will exit if CRDs are not available + // @default -- `"5m"` + CrdWaitTimeout string `mapstructure:"crdWaitTimeout,omitempty"` + // -- Tail call hooks for custom eBPF programs. + CustomCalls Cilium1163Values_CustomCalls `mapstructure:"customCalls,omitempty"` + // -- Specify which network interfaces can run the eBPF datapath. This means + // that a packet sent from a pod to a destination outside the cluster will be + // masqueraded (to an output device IPv4 address), if the output device runs the + // program. When not specified, probing will automatically detect devices that have + // a non-local route. This should be used only when autodetection is not suitable. + // devices: "" + Daemon Cilium1163Values_Daemon `mapstructure:"daemon,omitempty"` + // -- Enables experimental support for the detection of new and removed datapath + // devices. When devices change the eBPF datapath is reloaded and services updated. + // If "devices" is set then only those devices, or devices matching a wildcard will + // be considered. + // + // This option has been deprecated and is a no-op. + EnableRuntimeDeviceDetection bool `mapstructure:"enableRuntimeDeviceDetection,omitempty"` + // -- Forces the auto-detection of devices, even if specific devices are explicitly listed + // -- Chains to ignore when installing feeder rules. + // disableIptablesFeederRules: "" + ForceDeviceDetection bool `mapstructure:"forceDeviceDetection,omitempty"` + // -- Limit iptables-based egress masquerading to interface selector. + // egressMasqueradeInterfaces: "" + // + // -- Enable setting identity mark for local traffic. + // enableIdentityMark: true + // + // -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it. + // enableK8sEndpointSlice: true + // + // -- Enable CiliumEndpointSlice feature (deprecated, please use `ciliumEndpointSlice.enabled` instead). + EnableCiliumEndpointSlice bool `mapstructure:"enableCiliumEndpointSlice,omitempty"` + CiliumEndpointSlice Cilium1163Values_CiliumEndpointSlice `mapstructure:"ciliumEndpointSlice,omitempty"` + EnvoyConfig Cilium1163Values_EnvoyConfig `mapstructure:"envoyConfig,omitempty"` + IngressController Cilium1163Values_IngressController `mapstructure:"ingressController,omitempty"` + GatewayApi Cilium1163Values_GatewayApi `mapstructure:"gatewayAPI,omitempty"` + // -- Enables the fallback compatibility solution for when the xt_socket kernel + // module is missing and it is needed for the datapath L7 redirection to work + // properly. See documentation for details on when this can be disabled: + // https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel. + EnableXtsocketFallback bool `mapstructure:"enableXTSocketFallback,omitempty"` + Encryption Cilium1163Values_Encryption `mapstructure:"encryption,omitempty"` + EndpointHealthChecking Cilium1163Values_EndpointHealthChecking `mapstructure:"endpointHealthChecking,omitempty"` + EndpointRoutes Cilium1163Values_EndpointRoutes `mapstructure:"endpointRoutes,omitempty"` + K8SNetworkPolicy Cilium1163Values_K8SNetworkPolicy `mapstructure:"k8sNetworkPolicy,omitempty"` + Eni Cilium1163Values_Eni `mapstructure:"eni,omitempty"` + ExternalIps Cilium1163Values_ExternalIps `mapstructure:"externalIPs,omitempty"` + // fragmentTracking enables IPv4 fragment tracking support in the datapath. + // fragmentTracking: true + Gke Cilium1163Values_Gke `mapstructure:"gke,omitempty"` + // -- Enable connectivity health checking. + HealthChecking bool `mapstructure:"healthChecking,omitempty"` + // -- TCP port for the agent health API. This is not the port for cilium-health. + HealthPort int64 `mapstructure:"healthPort,omitempty"` + // -- Configure the host firewall. + HostFirewall Cilium1163Values_HostFirewall `mapstructure:"hostFirewall,omitempty"` + HostPort Cilium1163Values_HostPort `mapstructure:"hostPort,omitempty"` + // -- Configure socket LB + SocketLb Cilium1163Values_SocketLb `mapstructure:"socketLB,omitempty"` + // -- Configure certificate generation for Hubble integration. + // If hubble.tls.auto.method=cronJob, these values are used + // for the Kubernetes CronJob which will be scheduled regularly to + // (re)generate any certificates not provided manually. + Certgen Cilium1163Values_Certgen `mapstructure:"certgen,omitempty"` + Hubble Cilium1163Values_Hubble `mapstructure:"hubble,omitempty"` + // -- Method to use for identity allocation (`crd` or `kvstore`). + IdentityAllocationMode string `mapstructure:"identityAllocationMode,omitempty"` + // -- (string) Time to wait before using new identity on endpoint identity change. + // @default -- `"5s"` + IdentityChangeGracePeriod string `mapstructure:"identityChangeGracePeriod,omitempty"` + // -- Install Iptables rules to skip netfilter connection tracking on all pod + // traffic. This option is only effective when Cilium is running in direct + // routing and full KPR mode. Moreover, this option cannot be enabled when Cilium + // is running in a managed Kubernetes environment or in a chained CNI setup. + InstallNoConntrackIptablesRules bool `mapstructure:"installNoConntrackIptablesRules,omitempty"` + Ipam Cilium1163Values_Ipam `mapstructure:"ipam,omitempty"` + NodeIpam Cilium1163Values_NodeIpam `mapstructure:"nodeIPAM,omitempty"` + // @schema + // type: [null, string] + // @schema + // -- The api-rate-limit option can be used to overwrite individual settings of the default configuration for rate limiting calls to the Cilium Agent API + ApiRateLimit any `mapstructure:"apiRateLimit,omitempty"` + // -- Configure the eBPF-based ip-masq-agent + // the config of nonMasqueradeCIDRs + // config: + // nonMasqueradeCIDRs: [] + // masqLinkLocal: false + // masqLinkLocalIPv6: false + IpMasqAgent Cilium1163Values_IpMasqAgent `mapstructure:"ipMasqAgent,omitempty"` + // iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium. + // iptablesLockTimeout: "5s" + Ipv4 Cilium1163Values_Ipv4 `mapstructure:"ipv4,omitempty"` + Ipv6 Cilium1163Values_Ipv6 `mapstructure:"ipv6,omitempty"` + // -- Configure Kubernetes specific configuration + K8S Cilium1163Values_K8S `mapstructure:"k8s,omitempty"` + // -- Keep the deprecated selector labels when deploying Cilium DaemonSet. + KeepDeprecatedLabels bool `mapstructure:"keepDeprecatedLabels,omitempty"` + // -- Keep the deprecated probes when deploying Cilium DaemonSet + KeepDeprecatedProbes bool `mapstructure:"keepDeprecatedProbes,omitempty"` + StartupProbe Cilium1163Values_StartupProbe `mapstructure:"startupProbe,omitempty"` + LivenessProbe Cilium1163Values_LivenessProbe `mapstructure:"livenessProbe,omitempty"` + // -- Configure the kube-proxy replacement in Cilium BPF datapath + // Valid options are "true" or "false". + // ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/ + // kubeProxyReplacement: "false" + ReadinessProbe Cilium1163Values_ReadinessProbe `mapstructure:"readinessProbe,omitempty"` + // -- healthz server bind address for the kube-proxy replacement. + // To enable set the value to '0.0.0.0:10256' for all ipv4 + // addresses and this '[::]:10256' for all ipv6 addresses. + // By default it is disabled. + KubeProxyReplacementHealthzBindAddr string `mapstructure:"kubeProxyReplacementHealthzBindAddr,omitempty"` + L2NeighDiscovery Cilium1163Values_L2NeighDiscovery `mapstructure:"l2NeighDiscovery,omitempty"` + // -- Enable Layer 7 network policy. + L7Proxy bool `mapstructure:"l7Proxy,omitempty"` + // -- Enable Local Redirect Policy. + // To include or exclude matched resources from cilium identity evaluation + // labels: "" + LocalRedirectPolicy bool `mapstructure:"localRedirectPolicy,omitempty"` + // logOptions allows you to define logging options. eg: + // logOptions: + // format: json + // + // -- Enables periodic logging of system load + LogSystemLoad bool `mapstructure:"logSystemLoad,omitempty"` + // -- Configure maglev consistent hashing + // -- tableSize is the size (parameter M) for the backend table of one + // service entry + // tableSize: + Maglev map[string]any `mapstructure:"maglev,omitempty"` + // -- hashSeed is the cluster-wide base64 encoded seed for the hashing + // hashSeed: + // + // -- Enables masquerading of IPv4 traffic leaving the node from endpoints. + EnableIpv4Masquerade bool `mapstructure:"enableIPv4Masquerade,omitempty"` + // -- Enables masquerading of IPv6 traffic leaving the node from endpoints. + EnableIpv6Masquerade bool `mapstructure:"enableIPv6Masquerade,omitempty"` + // -- Enables masquerading to the source of the route for traffic leaving the node from endpoints. + EnableMasqueradeRouteSource bool `mapstructure:"enableMasqueradeRouteSource,omitempty"` + // -- Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods + EnableIpv4Bigtcp bool `mapstructure:"enableIPv4BIGTCP,omitempty"` + // -- Enables IPv6 BIG TCP support which increases maximum IPv6 GSO/GRO limits for nodes and pods + EnableIpv6Bigtcp bool `mapstructure:"enableIPv6BIGTCP,omitempty"` + Nat Cilium1163Values_Nat `mapstructure:"nat,omitempty"` + EgressGateway Cilium1163Values_EgressGateway `mapstructure:"egressGateway,omitempty"` + Vtep Cilium1163Values_Vtep `mapstructure:"vtep,omitempty"` + // -- (string) Allows to explicitly specify the IPv4 CIDR for native routing. + // When specified, Cilium assumes networking for this CIDR is preconfigured and + // hands traffic destined for that range to the Linux network stack without + // applying any SNAT. + // Generally speaking, specifying a native routing CIDR implies that Cilium can + // depend on the underlying networking stack to route packets to their + // destination. To offer a concrete example, if Cilium is configured to use + // direct routing and the Kubernetes CIDR is included in the native routing CIDR, + // the user must configure the routes to reach pods, either manually or by + // setting the auto-direct-node-routes flag. + Ipv4NativeRoutingCidr string `mapstructure:"ipv4NativeRoutingCIDR,omitempty"` + // -- (string) Allows to explicitly specify the IPv6 CIDR for native routing. + // When specified, Cilium assumes networking for this CIDR is preconfigured and + // hands traffic destined for that range to the Linux network stack without + // applying any SNAT. + // Generally speaking, specifying a native routing CIDR implies that Cilium can + // depend on the underlying networking stack to route packets to their + // destination. To offer a concrete example, if Cilium is configured to use + // direct routing and the Kubernetes CIDR is included in the native routing CIDR, + // the user must configure the routes to reach pods, either manually or by + // setting the auto-direct-node-routes flag. + Ipv6NativeRoutingCidr string `mapstructure:"ipv6NativeRoutingCIDR,omitempty"` + // -- cilium-monitor sidecar. + Monitor Cilium1163Values_Monitor `mapstructure:"monitor,omitempty"` + // -- Configure service load balancing + LoadBalancer Cilium1163Values_LoadBalancer `mapstructure:"loadBalancer,omitempty"` + // -- Configure N-S k8s service loadbalancing + // policyAuditMode: false + NodePort Cilium1163Values_NodePort `mapstructure:"nodePort,omitempty"` + // -- The agent can be put into one of the three policy enforcement modes: + // default, always and never. + // ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes + PolicyEnforcementMode string `mapstructure:"policyEnforcementMode,omitempty"` + // @schema + // type: [null, string, array] + // @schema + // -- policyCIDRMatchMode is a list of entities that may be selected by CIDR selector. + // The possible value is "nodes". + PolicyCidrmatchMode any `mapstructure:"policyCIDRMatchMode,omitempty"` + Pprof Cilium1163Values_Pprof `mapstructure:"pprof,omitempty"` + // -- Configure prometheus metrics on the configured port at /metrics + Prometheus Cilium1163Values_Prometheus `mapstructure:"prometheus,omitempty"` + // -- Grafana dashboards for cilium-agent + // grafana can import dashboards based on the label and value + // ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards + Dashboards Cilium1163Values_Dashboards `mapstructure:"dashboards,omitempty"` + // Configure Cilium Envoy options. + Envoy Cilium1163Values_Envoy `mapstructure:"envoy,omitempty"` + // -- Enable/Disable use of node label based identity + NodeSelectorLabels bool `mapstructure:"nodeSelectorLabels,omitempty"` + // -- Enable resource quotas for priority classes used in the cluster. + // Need to document default + // + // sessionAffinity: false + ResourceQuotas Cilium1163Values_ResourceQuotas `mapstructure:"resourceQuotas,omitempty"` + // -- Do not run Cilium agent when running with clean mode. Useful to completely + // uninstall Cilium as it will stop Cilium from starting and create artifacts + // in the node. + SleepAfterInit bool `mapstructure:"sleepAfterInit,omitempty"` + // -- Enable check of service source ranges (currently, only for LoadBalancer). + SvcSourceRangeCheck bool `mapstructure:"svcSourceRangeCheck,omitempty"` + // -- Synchronize Kubernetes nodes to kvstore and perform CNP GC. + SynchronizeK8SNodes bool `mapstructure:"synchronizeK8sNodes,omitempty"` + // -- Configure TLS configuration in the agent. + Tls Cilium1163Values_Tls `mapstructure:"tls,omitempty"` + // -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels. + // Possible values: + // - "" + // - vxlan + // - geneve + // @default -- `"vxlan"` + TunnelProtocol string `mapstructure:"tunnelProtocol,omitempty"` + // -- Enable native-routing mode or tunneling mode. + // Possible values: + // - "" + // - native + // - tunnel + // @default -- `"tunnel"` + RoutingMode string `mapstructure:"routingMode,omitempty"` + // -- Configure VXLAN and Geneve tunnel port. + // @default -- Port 8472 for VXLAN, Port 6081 for Geneve + TunnelPort int64 `mapstructure:"tunnelPort,omitempty"` + // -- Configure what the response should be to traffic for a service without backends. + // "reject" only works on kernels >= 5.10, on lower kernels we fallback to "drop". + // Possible values: + // - reject (default) + // - drop + ServiceNoBackendResponse string `mapstructure:"serviceNoBackendResponse,omitempty"` + // -- Configure the underlying network MTU to overwrite auto-detected MTU. + // This value doesn't change the host network interface MTU i.e. eth0 or ens0. + // It changes the MTU for cilium_net@cilium_host, cilium_host@cilium_net, + // cilium_vxlan and lxc_health interfaces. + Mtu int64 `mapstructure:"MTU,omitempty"` + // -- Disable the usage of CiliumEndpoint CRD. + DisableEndpointCrd bool `mapstructure:"disableEndpointCRD,omitempty"` + WellKnownIdentities Cilium1163Values_WellKnownIdentities `mapstructure:"wellKnownIdentities,omitempty"` + Etcd Cilium1163Values_Etcd `mapstructure:"etcd,omitempty"` + Operator Cilium1163Values_Operator `mapstructure:"operator,omitempty"` + Nodeinit Cilium1163Values_Nodeinit `mapstructure:"nodeinit,omitempty"` + Preflight Cilium1163Values_Preflight `mapstructure:"preflight,omitempty"` + // -- Explicitly enable or disable priority class. + // .Capabilities.KubeVersion is unsettable in `helm template` calls, + // it depends on k8s libraries version that Helm was compiled against. + // This option allows to explicitly disable setting the priority class, which + // is useful for rendering charts for gke clusters in advance. + EnableCriticalPriorityClass bool `mapstructure:"enableCriticalPriorityClass,omitempty"` + // disableEnvoyVersionCheck removes the check for Envoy, which can be useful + // on AArch64 as the images do not currently ship a version of Envoy. + // disableEnvoyVersionCheck: false + Clustermesh Cilium1163Values_Clustermesh `mapstructure:"clustermesh,omitempty"` + // -- Configure external workloads support + ExternalWorkloads Cilium1163Values_ExternalWorkloads `mapstructure:"externalWorkloads,omitempty"` + // -- Configure cgroup related configuration + Cgroup Cilium1163Values_Cgroup `mapstructure:"cgroup,omitempty"` + // -- Configure sysctl override described in #20072. + Sysctlfix Cilium1163Values_Sysctlfix `mapstructure:"sysctlfix,omitempty"` + // -- Configure whether to enable auto detect of terminating state for endpoints + // in order to support graceful termination. + // -- Configure whether to unload DNS policy rules on graceful shutdown + // dnsPolicyUnloadOnShutdown: false + EnableK8STerminatingEndpoint bool `mapstructure:"enableK8sTerminatingEndpoint,omitempty"` + // -- Configure the key of the taint indicating that Cilium is not ready on the node. + // When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up. + AgentNotReadyTaintKey string `mapstructure:"agentNotReadyTaintKey,omitempty"` + DnsProxy Cilium1163Values_DnsProxy `mapstructure:"dnsProxy,omitempty"` + // -- SCTP Configuration Values + Sctp Cilium1163Values_Sctp `mapstructure:"sctp,omitempty"` + // Configuration for types of authentication for Cilium (beta) + Authentication Cilium1163Values_Authentication `mapstructure:"authentication,omitempty"` + // UNSAFE. USE WITH CAUTION + // + // UNSAFE_MISC_FIELDS is a place for any additional fields that are not handled by the generator + // The value of this field is going to be available as is in the output of .ToMap() + // The fields in this map will overwrite other fields if their names match. + // Field A has the same name as field B in the UNSAFE_MISC_FIELDS map, if the mapstructure format + // of field A is exactly equal to the actual string literal of field B. + // Example: + // type Values struct { + // FieldA string `mapstructure:"myField"` + // UNSAFE_MISC_FIELDS map[string]any + // } + // v := Values{ + // FieldA: "originalValue" + // UNSAFE_MISC_FIELDS: map[string]any{ + // "myField": "newValue", // same as FieldA mapstructure format + // "anotherField": "anotherValue", // new field that will be included in the map output + // } + // } + // v.ToMap() // returns map[string]any{"myField": "newValue", "anotherField": "anotherValue"} + // + UNSAFE_MISC_FIELDS map[string]any `mapstructure:"-"` +} + +func (v *Cilium1163Values) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + + // handle UNSAFE fields + for k, v := range v.UNSAFE_MISC_FIELDS { + result[k] = v + } + + return result, nil +} diff --git a/src/k8s/pkg/k8sd/features/values/ck-loadbalancer_values.go b/src/k8s/pkg/k8sd/features/values/ck-loadbalancer_values.go new file mode 100644 index 000000000..eb0aee847 --- /dev/null +++ b/src/k8s/pkg/k8sd/features/values/ck-loadbalancer_values.go @@ -0,0 +1,110 @@ +// Code generated by CHART_VALUES_STRUCT_GENERATOR. DO NOT EDIT. +// +// This file was autogenerated by the CHART_VALUES_STRUCT_GENERATOR tool on 2024-12-12. +// Any changes will be overwritten. +// +// These files are generated from the values.yaml files in the k8s/manifests/charts directory. +// Head to the k8s/manifests/charts/Makefile to see how to generate these files. +// +// Package values contains the Go structs representing the values of the Helm chart. +package values + +import ( + "fmt" + "github.com/mitchellh/mapstructure" +) + +type CkLoadbalancerValues_L2 struct { + Enabled bool `mapstructure:"enabled,omitempty"` + // interfaces: + // - "^eth[0-9]+" + Interfaces []any `mapstructure:"interfaces,omitempty"` +} + +func (v *CkLoadbalancerValues_L2) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type CkLoadbalancerValues_IpPool struct { + // cidrs: + // - cidr: "10.42.254.176/28" + Cidrs []any `mapstructure:"cidrs,omitempty"` +} + +func (v *CkLoadbalancerValues_IpPool) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type CkLoadbalancerValues_Bgp struct { + Enabled bool `mapstructure:"enabled,omitempty"` + LocalAsn int64 `mapstructure:"localASN,omitempty"` + // neighbors: + // - peerAddress: '10.0.0.60/24' + // peerASN: 65100 + // peerPort: 179 + Neighbors []any `mapstructure:"neighbors,omitempty"` +} + +func (v *CkLoadbalancerValues_Bgp) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// CkLoadbalancerValues represents the values of the ck-loadbalancer_values.yaml chart +type CkLoadbalancerValues struct { + Driver any `mapstructure:"driver,omitempty"` + L2 CkLoadbalancerValues_L2 `mapstructure:"l2,omitempty"` + IpPool CkLoadbalancerValues_IpPool `mapstructure:"ipPool,omitempty"` + Bgp CkLoadbalancerValues_Bgp `mapstructure:"bgp,omitempty"` + // UNSAFE. USE WITH CAUTION + // + // UNSAFE_MISC_FIELDS is a place for any additional fields that are not handled by the generator + // The value of this field is going to be available as is in the output of .ToMap() + // The fields in this map will overwrite other fields if their names match. + // Field A has the same name as field B in the UNSAFE_MISC_FIELDS map, if the mapstructure format + // of field A is exactly equal to the actual string literal of field B. + // Example: + // type Values struct { + // FieldA string `mapstructure:"myField"` + // UNSAFE_MISC_FIELDS map[string]any + // } + // v := Values{ + // FieldA: "originalValue" + // UNSAFE_MISC_FIELDS: map[string]any{ + // "myField": "newValue", // same as FieldA mapstructure format + // "anotherField": "anotherValue", // new field that will be included in the map output + // } + // } + // v.ToMap() // returns map[string]any{"myField": "newValue", "anotherField": "anotherValue"} + // + UNSAFE_MISC_FIELDS map[string]any `mapstructure:"-"` +} + +func (v *CkLoadbalancerValues) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + + // handle UNSAFE fields + for k, v := range v.UNSAFE_MISC_FIELDS { + result[k] = v + } + + return result, nil +} diff --git a/src/k8s/pkg/k8sd/features/values/coredns-1.36.0_values.go b/src/k8s/pkg/k8sd/features/values/coredns-1.36.0_values.go new file mode 100644 index 000000000..32ba86477 --- /dev/null +++ b/src/k8s/pkg/k8sd/features/values/coredns-1.36.0_values.go @@ -0,0 +1,764 @@ +// Code generated by CHART_VALUES_STRUCT_GENERATOR. DO NOT EDIT. +// +// This file was autogenerated by the CHART_VALUES_STRUCT_GENERATOR tool on 2024-12-12. +// Any changes will be overwritten. +// +// These files are generated from the values.yaml files in the k8s/manifests/charts directory. +// Head to the k8s/manifests/charts/Makefile to see how to generate these files. +// +// Package values contains the Go structs representing the values of the Helm chart. +package values + +import ( + "fmt" + "github.com/mitchellh/mapstructure" +) + +type Coredns1360Values_Image struct { + Repository string `mapstructure:"repository,omitempty"` + // Overrides the image tag whose default is the chart appVersion. + Tag string `mapstructure:"tag,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` + // Optionally specify an array of imagePullSecrets. + // Secrets must be manually created in the namespace. + // ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + // + // pullSecrets: + // - name: myRegistryKeySecretName + PullSecrets []any `mapstructure:"pullSecrets,omitempty"` +} + +func (v *Coredns1360Values_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Resources_Limits struct { + Cpu string `mapstructure:"cpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` +} + +func (v *Coredns1360Values_Resources_Limits) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Resources_Requests struct { + Cpu string `mapstructure:"cpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` +} + +func (v *Coredns1360Values_Resources_Requests) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Resources struct { + Limits Coredns1360Values_Resources_Limits `mapstructure:"limits,omitempty"` + Requests Coredns1360Values_Resources_Requests `mapstructure:"requests,omitempty"` +} + +func (v *Coredns1360Values_Resources) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_RollingUpdate struct { + MaxUnavailable int64 `mapstructure:"maxUnavailable,omitempty"` + MaxSurge string `mapstructure:"maxSurge,omitempty"` +} + +func (v *Coredns1360Values_RollingUpdate) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Prometheus_Service_Annotations struct { + PrometheusIoscrape string `mapstructure:"prometheus.io/scrape,omitempty"` + PrometheusIoport string `mapstructure:"prometheus.io/port,omitempty"` +} + +func (v *Coredns1360Values_Prometheus_Service_Annotations) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Prometheus_Service struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Annotations Coredns1360Values_Prometheus_Service_Annotations `mapstructure:"annotations,omitempty"` + Selector map[string]any `mapstructure:"selector,omitempty"` +} + +func (v *Coredns1360Values_Prometheus_Service) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Prometheus_Monitor struct { + Enabled bool `mapstructure:"enabled,omitempty"` + AdditionalLabels map[string]any `mapstructure:"additionalLabels,omitempty"` + Namespace string `mapstructure:"namespace,omitempty"` + Interval string `mapstructure:"interval,omitempty"` + Selector map[string]any `mapstructure:"selector,omitempty"` +} + +func (v *Coredns1360Values_Prometheus_Monitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Prometheus struct { + Service Coredns1360Values_Prometheus_Service `mapstructure:"service,omitempty"` + Monitor Coredns1360Values_Prometheus_Monitor `mapstructure:"monitor,omitempty"` +} + +func (v *Coredns1360Values_Prometheus) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Service struct { + // clusterIP: "" + // clusterIPs: [] + // loadBalancerIP: "" + // loadBalancerClass: "" + // externalIPs: [] + // externalTrafficPolicy: "" + // ipFamilyPolicy: "" + // trafficDistribution: PreferClose + // The name of the Service + // If not set, a name is generated using the fullname template + Name string `mapstructure:"name,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // Pod selector + Selector map[string]any `mapstructure:"selector,omitempty"` +} + +func (v *Coredns1360Values_Service) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_ServiceAccount struct { + Create bool `mapstructure:"create,omitempty"` + // The name of the ServiceAccount to use + // If not set and create is true, a name is generated using the fullname template + Name string `mapstructure:"name,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Coredns1360Values_ServiceAccount) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Rbac struct { + // If true, create & use RBAC resources + Create bool `mapstructure:"create,omitempty"` + // If true, create and use PodSecurityPolicy + // The name of the ServiceAccount to use. + // If not set and create is true, a name is generated using the fullname template + // name: + PspEnable bool `mapstructure:"pspEnable,omitempty"` +} + +func (v *Coredns1360Values_Rbac) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_SecurityContext_Capabilities struct { + // - NET_BIND_SERVICE + Add []string `mapstructure:"add,omitempty"` +} + +func (v *Coredns1360Values_SecurityContext_Capabilities) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Configure SecurityContext for Pod. +// Ensure that required linux capability to bind port number below 1024 is assigned (`CAP_NET_BIND_SERVICE`). +type Coredns1360Values_SecurityContext struct { + Capabilities Coredns1360Values_SecurityContext_Capabilities `mapstructure:"capabilities,omitempty"` +} + +func (v *Coredns1360Values_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_ServersItem_ZonesItem struct { + Zone string `mapstructure:"zone,omitempty"` +} + +func (v *Coredns1360Values_ServersItem_ZonesItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// -- expose the service on a different port +// servicePort: 5353 +// If serviceType is nodePort you can specify nodePort here +// nodePort: 30053 +// hostPort: 53 +type Coredns1360Values_ServersItem_PluginsItem struct { + Name string `mapstructure:"name,omitempty"` +} + +func (v *Coredns1360Values_ServersItem_PluginsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Default zone is what Kubernetes recommends: +// https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options +type Coredns1360Values_ServersItem struct { + Zones []Coredns1360Values_ServersItem_ZonesItem `mapstructure:"zones,omitempty"` + Port int64 `mapstructure:"port,omitempty"` + // -- expose the service on a different port + // servicePort: 5353 + // If serviceType is nodePort you can specify nodePort here + // nodePort: 30053 + // hostPort: 53 + Plugins []Coredns1360Values_ServersItem_PluginsItem `mapstructure:"plugins,omitempty"` +} + +func (v *Coredns1360Values_ServersItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// To use the livenessProbe, the health plugin needs to be enabled in CoreDNS' server config +type Coredns1360Values_LivenessProbe struct { + Enabled bool `mapstructure:"enabled,omitempty"` + InitialDelaySeconds int64 `mapstructure:"initialDelaySeconds,omitempty"` + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` + TimeoutSeconds int64 `mapstructure:"timeoutSeconds,omitempty"` + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + SuccessThreshold int64 `mapstructure:"successThreshold,omitempty"` +} + +func (v *Coredns1360Values_LivenessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// To use the readinessProbe, the ready plugin needs to be enabled in CoreDNS' server config +type Coredns1360Values_ReadinessProbe struct { + Enabled bool `mapstructure:"enabled,omitempty"` + InitialDelaySeconds int64 `mapstructure:"initialDelaySeconds,omitempty"` + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` + TimeoutSeconds int64 `mapstructure:"timeoutSeconds,omitempty"` + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + SuccessThreshold int64 `mapstructure:"successThreshold,omitempty"` +} + +func (v *Coredns1360Values_ReadinessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Alternative configuration for HPA deployment if wanted +// Create HorizontalPodAutoscaler object. +// +// hpa: +// enabled: false +// minReplicas: 1 +// maxReplicas: 10 +// metrics: +// metrics: +// - type: Resource +// resource: +// name: memory +// target: +// type: Utilization +// averageUtilization: 60 +// - type: Resource +// resource: +// name: cpu +// target: +// type: Utilization +// averageUtilization: 60 +type Coredns1360Values_Hpa struct { + Enabled bool `mapstructure:"enabled,omitempty"` + MinReplicas int64 `mapstructure:"minReplicas,omitempty"` + MaxReplicas int64 `mapstructure:"maxReplicas,omitempty"` + Metrics []any `mapstructure:"metrics,omitempty"` +} + +func (v *Coredns1360Values_Hpa) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Optionally specify some extra flags to pass to cluster-proprtional-autoscaler. +// Useful for e.g. the nodelabels flag. +// customFlags: +// - --nodelabels=topology.kubernetes.io/zone=us-east-1a +type Coredns1360Values_Autoscaler_Image struct { + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` + // Optionally specify an array of imagePullSecrets. + // Secrets must be manually created in the namespace. + // ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + // + // pullSecrets: + // - name: myRegistryKeySecretName + PullSecrets []any `mapstructure:"pullSecrets,omitempty"` +} + +func (v *Coredns1360Values_Autoscaler_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Autoscaler_Resources_Requests struct { + Cpu string `mapstructure:"cpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` +} + +func (v *Coredns1360Values_Autoscaler_Resources_Requests) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Autoscaler_Resources_Limits struct { + Cpu string `mapstructure:"cpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` +} + +func (v *Coredns1360Values_Autoscaler_Resources_Limits) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// resources for autoscaler pod +type Coredns1360Values_Autoscaler_Resources struct { + Requests Coredns1360Values_Autoscaler_Resources_Requests `mapstructure:"requests,omitempty"` + Limits Coredns1360Values_Autoscaler_Resources_Limits `mapstructure:"limits,omitempty"` +} + +func (v *Coredns1360Values_Autoscaler_Resources) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Options for autoscaler configmap +type Coredns1360Values_Autoscaler_Configmap struct { + // Annotations for the coredns-autoscaler configmap + // i.e. strategy.spinnaker.io/versioned: "false" to ensure configmap isn't renamed + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Coredns1360Values_Autoscaler_Configmap) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Enables the livenessProbe for cluster-proportional-autoscaler - this requires version 1.8.0+ of the autoscaler +type Coredns1360Values_Autoscaler_LivenessProbe struct { + Enabled bool `mapstructure:"enabled,omitempty"` + InitialDelaySeconds int64 `mapstructure:"initialDelaySeconds,omitempty"` + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` + TimeoutSeconds int64 `mapstructure:"timeoutSeconds,omitempty"` + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + SuccessThreshold int64 `mapstructure:"successThreshold,omitempty"` +} + +func (v *Coredns1360Values_Autoscaler_LivenessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Configue a cluster-proportional-autoscaler for coredns +// See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler +type Coredns1360Values_Autoscaler struct { + // Enabled the cluster-proportional-autoscaler + Enabled bool `mapstructure:"enabled,omitempty"` + // Number of cores in the cluster per coredns replica + CoresPerReplica int64 `mapstructure:"coresPerReplica,omitempty"` + // Number of nodes in the cluster per coredns replica + NodesPerReplica int64 `mapstructure:"nodesPerReplica,omitempty"` + // Min size of replicaCount + Min int64 `mapstructure:"min,omitempty"` + // Max size of replicaCount (default of 0 is no max) + Max int64 `mapstructure:"max,omitempty"` + // Whether to include unschedulable nodes in the nodes/cores calculations - this requires version 1.8.0+ of the autoscaler + IncludeUnschedulableNodes bool `mapstructure:"includeUnschedulableNodes,omitempty"` + // If true does not allow single points of failure to form + PreventSinglePointFailure bool `mapstructure:"preventSinglePointFailure,omitempty"` + // Annotations for the coredns proportional autoscaler pods + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + // Optionally specify some extra flags to pass to cluster-proprtional-autoscaler. + // Useful for e.g. the nodelabels flag. + // customFlags: + // - --nodelabels=topology.kubernetes.io/zone=us-east-1a + // + Image Coredns1360Values_Autoscaler_Image `mapstructure:"image,omitempty"` + // Optional priority class to be used for the autoscaler pods. priorityClassName used if not set. + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + // expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core + Affinity map[string]any `mapstructure:"affinity,omitempty"` + // Node labels for pod assignment + // Ref: https://kubernetes.io/docs/user-guide/node-selection/ + NodeSelector map[string]any `mapstructure:"nodeSelector,omitempty"` + // expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core + Tolerations []any `mapstructure:"tolerations,omitempty"` + // resources for autoscaler pod + Resources Coredns1360Values_Autoscaler_Resources `mapstructure:"resources,omitempty"` + // Options for autoscaler configmap + Configmap Coredns1360Values_Autoscaler_Configmap `mapstructure:"configmap,omitempty"` + // Enables the livenessProbe for cluster-proportional-autoscaler - this requires version 1.8.0+ of the autoscaler + LivenessProbe Coredns1360Values_Autoscaler_LivenessProbe `mapstructure:"livenessProbe,omitempty"` + // optional array of sidecar containers + // - name: some-container-name + // image: some-image:latest + // imagePullPolicy: Always + ExtraContainers []any `mapstructure:"extraContainers,omitempty"` +} + +func (v *Coredns1360Values_Autoscaler) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Coredns1360Values_Deployment struct { + SkipConfig bool `mapstructure:"skipConfig,omitempty"` + Enabled bool `mapstructure:"enabled,omitempty"` + Name string `mapstructure:"name,omitempty"` + // Annotations for the coredns deployment + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // Pod selector + Selector map[string]any `mapstructure:"selector,omitempty"` +} + +func (v *Coredns1360Values_Deployment) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Coredns1360Values represents the values of the coredns-1.36.0_values.yaml chart +type Coredns1360Values struct { + Image Coredns1360Values_Image `mapstructure:"image,omitempty"` + ReplicaCount int64 `mapstructure:"replicaCount,omitempty"` + Resources Coredns1360Values_Resources `mapstructure:"resources,omitempty"` + RollingUpdate Coredns1360Values_RollingUpdate `mapstructure:"rollingUpdate,omitempty"` + TerminationGracePeriodSeconds int64 `mapstructure:"terminationGracePeriodSeconds,omitempty"` + // cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + ServiceType string `mapstructure:"serviceType,omitempty"` + Prometheus Coredns1360Values_Prometheus `mapstructure:"prometheus,omitempty"` + Service Coredns1360Values_Service `mapstructure:"service,omitempty"` + ServiceAccount Coredns1360Values_ServiceAccount `mapstructure:"serviceAccount,omitempty"` + Rbac Coredns1360Values_Rbac `mapstructure:"rbac,omitempty"` + // isClusterService specifies whether chart should be deployed as cluster-service or normal k8s app. + IsClusterService bool `mapstructure:"isClusterService,omitempty"` + // Optional priority class to be used for the coredns pods. Used for autoscaler if autoscaler.priorityClassName not set. + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + // Configure the pod level securityContext. + PodSecurityContext map[string]any `mapstructure:"podSecurityContext,omitempty"` + // Configure SecurityContext for Pod. + // Ensure that required linux capability to bind port number below 1024 is assigned (`CAP_NET_BIND_SERVICE`). + SecurityContext Coredns1360Values_SecurityContext `mapstructure:"securityContext,omitempty"` + // Default zone is what Kubernetes recommends: + // https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#coredns-configmap-options + Servers []Coredns1360Values_ServersItem `mapstructure:"servers,omitempty"` + // Complete example with all the options: + // - zones: # the `zones` block can be left out entirely, defaults to "." + // - zone: hello.world. # optional, defaults to "." + // scheme: tls:// # optional, defaults to "" (which equals "dns://" in CoreDNS) + // - zone: foo.bar. + // scheme: dns:// + // use_tcp: true # set this parameter to optionally expose the port on tcp as well as udp for the DNS protocol + // # Note that this will not work if you are also exposing tls or grpc on the same server + // port: 12345 # optional, defaults to "" (which equals 53 in CoreDNS) + // plugins: # the plugins to use for this server block + // - name: kubernetes # name of plugin, if used multiple times ensure that the plugin supports it! + // parameters: foo bar # list of parameters after the plugin + // configBlock: |- # if the plugin supports extra block style config, supply it here + // hello world + // foo bar + // + // Extra configuration that is applied outside of the default zone block. + // Example to include additional config files, which may come from extraVolumes: + // extraConfig: + // import: + // parameters: /opt/coredns/*.conf + ExtraConfig map[string]any `mapstructure:"extraConfig,omitempty"` + // To use the livenessProbe, the health plugin needs to be enabled in CoreDNS' server config + LivenessProbe Coredns1360Values_LivenessProbe `mapstructure:"livenessProbe,omitempty"` + // To use the readinessProbe, the ready plugin needs to be enabled in CoreDNS' server config + ReadinessProbe Coredns1360Values_ReadinessProbe `mapstructure:"readinessProbe,omitempty"` + // expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core + // for example: + // affinity: + // nodeAffinity: + // requiredDuringSchedulingIgnoredDuringExecution: + // nodeSelectorTerms: + // - matchExpressions: + // - key: foo.bar.com/role + // operator: In + // values: + // - master + Affinity map[string]any `mapstructure:"affinity,omitempty"` + // expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#topologyspreadconstraint-v1-core + // and supports Helm templating. + // For example: + // topologySpreadConstraints: + // - labelSelector: + // matchLabels: + // app.kubernetes.io/name: '{{ template "coredns.name" . }}' + // app.kubernetes.io/instance: '{{ .Release.Name }}' + // topologyKey: topology.kubernetes.io/zone + // maxSkew: 1 + // whenUnsatisfiable: ScheduleAnyway + // - labelSelector: + // matchLabels: + // app.kubernetes.io/name: '{{ template "coredns.name" . }}' + // app.kubernetes.io/instance: '{{ .Release.Name }}' + // topologyKey: kubernetes.io/hostname + // maxSkew: 1 + // whenUnsatisfiable: ScheduleAnyway + TopologySpreadConstraints []any `mapstructure:"topologySpreadConstraints,omitempty"` + // Node labels for pod assignment + // Ref: https://kubernetes.io/docs/user-guide/node-selection/ + NodeSelector map[string]any `mapstructure:"nodeSelector,omitempty"` + // expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core + // for example: + // tolerations: + // - key: foo.bar.com/role + // operator: Equal + // value: master + // effect: NoSchedule + Tolerations []any `mapstructure:"tolerations,omitempty"` + // https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + PodDisruptionBudget map[string]any `mapstructure:"podDisruptionBudget,omitempty"` + // configure custom zone files as per https://coredns.io/2017/05/08/custom-dns-entries-for-kubernetes/ + // - filename: example.db + // domain: example.com + // contents: | + // example.com. IN SOA sns.dns.icann.com. noc.dns.icann.com. 2015082541 7200 3600 1209600 3600 + // example.com. IN NS b.iana-servers.net. + // example.com. IN NS a.iana-servers.net. + // example.com. IN A 192.168.99.102 + // *.example.com. IN A 192.168.99.102 + ZoneFiles []any `mapstructure:"zoneFiles,omitempty"` + // optional array of sidecar containers + ExtraContainers []any `mapstructure:"extraContainers,omitempty"` + // - name: some-container-name + // image: some-image:latest + // imagePullPolicy: Always + // optional array of extra volumes to create + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + // - name: some-volume-name + // emptyDir: {} + // optional array of mount points for extraVolumes + // - name: some-volume-name + // mountPath: /etc/wherever + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + // optional array of secrets to mount inside coredns container + // possible usecase: need for secure connection with etcd backend + // - name: etcd-client-certs + // mountPath: /etc/coredns/tls/etcd + // defaultMode: 420 + // - name: some-fancy-secret + // mountPath: /etc/wherever + // defaultMode: 440 + ExtraSecrets []any `mapstructure:"extraSecrets,omitempty"` + // optional array of environment variables for coredns container + // possible usecase: provides username and password for etcd user authentications + // - name: WHATEVER_ENV + // value: whatever + // - name: SOME_SECRET_ENV + // valueFrom: + // secretKeyRef: + // name: some-secret-name + // key: secret-key + Env []any `mapstructure:"env,omitempty"` + // To support legacy deployments using CoreDNS with the "k8s-app: kube-dns" label selectors. + // See https://github.com/coredns/helm/blob/master/charts/coredns/README.md#adopting-existing-coredns-resources + // k8sAppLabelOverride: "kube-dns" + // + // Custom labels to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled. + CustomLabels map[string]any `mapstructure:"customLabels,omitempty"` + // Custom annotations to apply to Deployment, Pod, Configmap, Service, ServiceMonitor. Including autoscaler if enabled. + CustomAnnotations map[string]any `mapstructure:"customAnnotations,omitempty"` + // Alternative configuration for HPA deployment if wanted + // Create HorizontalPodAutoscaler object. + // + // hpa: + // enabled: false + // minReplicas: 1 + // maxReplicas: 10 + // metrics: + // metrics: + // - type: Resource + // resource: + // name: memory + // target: + // type: Utilization + // averageUtilization: 60 + // - type: Resource + // resource: + // name: cpu + // target: + // type: Utilization + // averageUtilization: 60 + // + Hpa Coredns1360Values_Hpa `mapstructure:"hpa,omitempty"` + // Configue a cluster-proportional-autoscaler for coredns + // See https://github.com/kubernetes-incubator/cluster-proportional-autoscaler + Autoscaler Coredns1360Values_Autoscaler `mapstructure:"autoscaler,omitempty"` + Deployment Coredns1360Values_Deployment `mapstructure:"deployment,omitempty"` + // UNSAFE. USE WITH CAUTION + // + // UNSAFE_MISC_FIELDS is a place for any additional fields that are not handled by the generator + // The value of this field is going to be available as is in the output of .ToMap() + // The fields in this map will overwrite other fields if their names match. + // Field A has the same name as field B in the UNSAFE_MISC_FIELDS map, if the mapstructure format + // of field A is exactly equal to the actual string literal of field B. + // Example: + // type Values struct { + // FieldA string `mapstructure:"myField"` + // UNSAFE_MISC_FIELDS map[string]any + // } + // v := Values{ + // FieldA: "originalValue" + // UNSAFE_MISC_FIELDS: map[string]any{ + // "myField": "newValue", // same as FieldA mapstructure format + // "anotherField": "anotherValue", // new field that will be included in the map output + // } + // } + // v.ToMap() // returns map[string]any{"myField": "newValue", "anotherField": "anotherValue"} + // + UNSAFE_MISC_FIELDS map[string]any `mapstructure:"-"` +} + +func (v *Coredns1360Values) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + + // handle UNSAFE fields + for k, v := range v.UNSAFE_MISC_FIELDS { + result[k] = v + } + + return result, nil +} diff --git a/src/k8s/pkg/k8sd/features/values/metallb-0.14.8_values.go b/src/k8s/pkg/k8sd/features/values/metallb-0.14.8_values.go new file mode 100644 index 000000000..dc405f1dd --- /dev/null +++ b/src/k8s/pkg/k8sd/features/values/metallb-0.14.8_values.go @@ -0,0 +1,922 @@ +// Code generated by CHART_VALUES_STRUCT_GENERATOR. DO NOT EDIT. +// +// This file was autogenerated by the CHART_VALUES_STRUCT_GENERATOR tool on 2024-12-12. +// Any changes will be overwritten. +// +// These files are generated from the values.yaml files in the k8s/manifests/charts directory. +// Head to the k8s/manifests/charts/Makefile to see how to generate these files. +// +// Package values contains the Go structs representing the values of the Helm chart. +package values + +import ( + "fmt" + "github.com/mitchellh/mapstructure" +) + +// To configure MetalLB, you must specify ONE of the following two +// options. +type Metallb0148Values_Rbac struct { + // create specifies whether to install and use RBAC rules. + Create bool `mapstructure:"create,omitempty"` +} + +func (v *Metallb0148Values_Rbac) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// the image to be used for the kuberbacproxy container +type Metallb0148Values_Prometheus_RbacProxy struct { + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + PullPolicy any `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_RbacProxy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Prometheus Operator PodMonitors +type Metallb0148Values_Prometheus_PodMonitor struct { + // enable support for Prometheus Operator + Enabled bool `mapstructure:"enabled,omitempty"` + // optional additionnal labels for podMonitors + AdditionalLabels map[string]any `mapstructure:"additionalLabels,omitempty"` + // optional annotations for podMonitors + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // Job label for scrape target + JobLabel string `mapstructure:"jobLabel,omitempty"` + // Scrape interval. If not set, the Prometheus default scrape interval is used. + Interval any `mapstructure:"interval,omitempty"` + // metric relabel configs to apply to samples before ingestion. + // - action: keep + // regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + // sourceLabels: [__name__] + MetricRelabelings []any `mapstructure:"metricRelabelings,omitempty"` + // relabel configs to apply to samples before ingestion. + // - sourceLabels: [__meta_kubernetes_pod_node_name] + // separator: ; + // regex: ^(.*)$ + // target_label: nodename + // replacement: $1 + // action: replace + Relabelings []any `mapstructure:"relabelings,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PodMonitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// optional tls configuration for the speaker serviceMonitor, in case +// secure metrics are enabled. +type Metallb0148Values_Prometheus_ServiceMonitor_Speaker_TlsConfig struct { + InsecureSkipVerify bool `mapstructure:"insecureSkipVerify,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_ServiceMonitor_Speaker_TlsConfig) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Prometheus_ServiceMonitor_Speaker struct { + // optional additional labels for the speaker serviceMonitor + AdditionalLabels map[string]any `mapstructure:"additionalLabels,omitempty"` + // optional additional annotations for the speaker serviceMonitor + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // optional tls configuration for the speaker serviceMonitor, in case + // secure metrics are enabled. + TlsConfig Metallb0148Values_Prometheus_ServiceMonitor_Speaker_TlsConfig `mapstructure:"tlsConfig,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_ServiceMonitor_Speaker) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// optional tls configuration for the controller serviceMonitor, in case +// secure metrics are enabled. +type Metallb0148Values_Prometheus_ServiceMonitor_Controller_TlsConfig struct { + InsecureSkipVerify bool `mapstructure:"insecureSkipVerify,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_ServiceMonitor_Controller_TlsConfig) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Prometheus_ServiceMonitor_Controller struct { + // optional additional labels for the controller serviceMonitor + AdditionalLabels map[string]any `mapstructure:"additionalLabels,omitempty"` + // optional additional annotations for the controller serviceMonitor + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // optional tls configuration for the controller serviceMonitor, in case + // secure metrics are enabled. + TlsConfig Metallb0148Values_Prometheus_ServiceMonitor_Controller_TlsConfig `mapstructure:"tlsConfig,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_ServiceMonitor_Controller) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Prometheus Operator ServiceMonitors. To be used as an alternative +// to podMonitor, supports secure metrics. +type Metallb0148Values_Prometheus_ServiceMonitor struct { + // enable support for Prometheus Operator + Enabled bool `mapstructure:"enabled,omitempty"` + Speaker Metallb0148Values_Prometheus_ServiceMonitor_Speaker `mapstructure:"speaker,omitempty"` + Controller Metallb0148Values_Prometheus_ServiceMonitor_Controller `mapstructure:"controller,omitempty"` + // Job label for scrape target + JobLabel string `mapstructure:"jobLabel,omitempty"` + // Scrape interval. If not set, the Prometheus default scrape interval is used. + Interval any `mapstructure:"interval,omitempty"` + // metric relabel configs to apply to samples before ingestion. + // - action: keep + // regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + // sourceLabels: [__name__] + MetricRelabelings []any `mapstructure:"metricRelabelings,omitempty"` + // relabel configs to apply to samples before ingestion. + // - sourceLabels: [__meta_kubernetes_pod_node_name] + // separator: ; + // regex: ^(.*)$ + // target_label: nodename + // replacement: $1 + // action: replace + Relabelings []any `mapstructure:"relabelings,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_ServiceMonitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Prometheus_PrometheusRule_StaleConfig_Labels struct { + Severity string `mapstructure:"severity,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule_StaleConfig_Labels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// MetalLBStaleConfig +type Metallb0148Values_Prometheus_PrometheusRule_StaleConfig struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Labels Metallb0148Values_Prometheus_PrometheusRule_StaleConfig_Labels `mapstructure:"labels,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule_StaleConfig) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Prometheus_PrometheusRule_ConfigNotLoaded_Labels struct { + Severity string `mapstructure:"severity,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule_ConfigNotLoaded_Labels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// MetalLBConfigNotLoaded +type Metallb0148Values_Prometheus_PrometheusRule_ConfigNotLoaded struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Labels Metallb0148Values_Prometheus_PrometheusRule_ConfigNotLoaded_Labels `mapstructure:"labels,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule_ConfigNotLoaded) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Prometheus_PrometheusRule_AddressPoolExhausted_Labels struct { + Severity string `mapstructure:"severity,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule_AddressPoolExhausted_Labels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// MetalLBAddressPoolExhausted +type Metallb0148Values_Prometheus_PrometheusRule_AddressPoolExhausted struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Labels Metallb0148Values_Prometheus_PrometheusRule_AddressPoolExhausted_Labels `mapstructure:"labels,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule_AddressPoolExhausted) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Prometheus_PrometheusRule_AddressPoolUsage_ThresholdsItem_Labels struct { + Severity string `mapstructure:"severity,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule_AddressPoolUsage_ThresholdsItem_Labels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Prometheus_PrometheusRule_AddressPoolUsage_ThresholdsItem struct { + Percent int64 `mapstructure:"percent,omitempty"` + Labels Metallb0148Values_Prometheus_PrometheusRule_AddressPoolUsage_ThresholdsItem_Labels `mapstructure:"labels,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule_AddressPoolUsage_ThresholdsItem) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Prometheus_PrometheusRule_AddressPoolUsage struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Thresholds []Metallb0148Values_Prometheus_PrometheusRule_AddressPoolUsage_ThresholdsItem `mapstructure:"thresholds,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule_AddressPoolUsage) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Prometheus_PrometheusRule_BgpSessionDown_Labels struct { + Severity string `mapstructure:"severity,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule_BgpSessionDown_Labels) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// MetalLBBGPSessionDown +type Metallb0148Values_Prometheus_PrometheusRule_BgpSessionDown struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Labels Metallb0148Values_Prometheus_PrometheusRule_BgpSessionDown_Labels `mapstructure:"labels,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule_BgpSessionDown) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Prometheus Operator alertmanager alerts +type Metallb0148Values_Prometheus_PrometheusRule struct { + // enable alertmanager alerts + Enabled bool `mapstructure:"enabled,omitempty"` + // optional additionnal labels for prometheusRules + AdditionalLabels map[string]any `mapstructure:"additionalLabels,omitempty"` + // optional annotations for prometheusRules + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // MetalLBStaleConfig + StaleConfig Metallb0148Values_Prometheus_PrometheusRule_StaleConfig `mapstructure:"staleConfig,omitempty"` + // MetalLBConfigNotLoaded + ConfigNotLoaded Metallb0148Values_Prometheus_PrometheusRule_ConfigNotLoaded `mapstructure:"configNotLoaded,omitempty"` + // MetalLBAddressPoolExhausted + AddressPoolExhausted Metallb0148Values_Prometheus_PrometheusRule_AddressPoolExhausted `mapstructure:"addressPoolExhausted,omitempty"` + AddressPoolUsage Metallb0148Values_Prometheus_PrometheusRule_AddressPoolUsage `mapstructure:"addressPoolUsage,omitempty"` + // MetalLBBGPSessionDown + BgpSessionDown Metallb0148Values_Prometheus_PrometheusRule_BgpSessionDown `mapstructure:"bgpSessionDown,omitempty"` + ExtraAlerts []any `mapstructure:"extraAlerts,omitempty"` +} + +func (v *Metallb0148Values_Prometheus_PrometheusRule) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Prometheus struct { + // scrape annotations specifies whether to add Prometheus metric + // auto-collection annotations to pods. See + // https://github.com/prometheus/prometheus/blob/release-2.1/documentation/examples/prometheus-kubernetes.yml + // for a corresponding Prometheus configuration. Alternatively, you + // may want to use the Prometheus Operator + // (https://github.com/coreos/prometheus-operator) for more powerful + // monitoring configuration. If you use the Prometheus operator, this + // can be left at false. + ScrapeAnnotations bool `mapstructure:"scrapeAnnotations,omitempty"` + // port both controller and speaker will listen on for metrics + MetricsPort int64 `mapstructure:"metricsPort,omitempty"` + // if set, enables rbac proxy on the controller and speaker to expose + // the metrics via tls. + // secureMetricsPort: 9120 + // + // the name of the secret to be mounted in the speaker pod + // to expose the metrics securely. If not present, a self signed + // certificate to be used. + SpeakerMetricsTlssecret string `mapstructure:"speakerMetricsTLSSecret,omitempty"` + // the name of the secret to be mounted in the controller pod + // to expose the metrics securely. If not present, a self signed + // certificate to be used. + ControllerMetricsTlssecret string `mapstructure:"controllerMetricsTLSSecret,omitempty"` + // prometheus doens't have the permission to scrape all namespaces so we give it permission to scrape metallb's one + RbacPrometheus bool `mapstructure:"rbacPrometheus,omitempty"` + // the service account used by prometheus + // required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true " + ServiceAccount string `mapstructure:"serviceAccount,omitempty"` + // the namespace where prometheus is deployed + // required when " .Values.prometheus.rbacPrometheus == true " and " .Values.prometheus.podMonitor.enabled=true or prometheus.serviceMonitor.enabled=true " + Namespace string `mapstructure:"namespace,omitempty"` + // the image to be used for the kuberbacproxy container + RbacProxy Metallb0148Values_Prometheus_RbacProxy `mapstructure:"rbacProxy,omitempty"` + // Prometheus Operator PodMonitors + PodMonitor Metallb0148Values_Prometheus_PodMonitor `mapstructure:"podMonitor,omitempty"` + // Prometheus Operator ServiceMonitors. To be used as an alternative + // to podMonitor, supports secure metrics. + ServiceMonitor Metallb0148Values_Prometheus_ServiceMonitor `mapstructure:"serviceMonitor,omitempty"` + // Prometheus Operator alertmanager alerts + PrometheusRule Metallb0148Values_Prometheus_PrometheusRule `mapstructure:"prometheusRule,omitempty"` +} + +func (v *Metallb0148Values_Prometheus) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// command: /controller +// webhookMode: enabled +type Metallb0148Values_Controller_Image struct { + Repository string `mapstructure:"repository,omitempty"` + Tag any `mapstructure:"tag,omitempty"` + PullPolicy any `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Metallb0148Values_Controller_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// @param controller.updateStrategy.type Metallb controller deployment strategy type. +// ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +// e.g: +// strategy: +// type: RollingUpdate +// rollingUpdate: +// maxSurge: 25% +// maxUnavailable: 25% +type Metallb0148Values_Controller_Strategy struct { + Type string `mapstructure:"type,omitempty"` +} + +func (v *Metallb0148Values_Controller_Strategy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Controller_ServiceAccount struct { + // Specifies whether a ServiceAccount should be created + Create bool `mapstructure:"create,omitempty"` + // The name of the ServiceAccount to use. If not set and create is + // true, a name is generated using the fullname template + Name string `mapstructure:"name,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Metallb0148Values_Controller_ServiceAccount) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Controller_SecurityContext struct { + RunAsNonRoot bool `mapstructure:"runAsNonRoot,omitempty"` + // nobody + RunAsUser int64 `mapstructure:"runAsUser,omitempty"` + FsGroup int64 `mapstructure:"fsGroup,omitempty"` +} + +func (v *Metallb0148Values_Controller_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Controller_LivenessProbe struct { + Enabled bool `mapstructure:"enabled,omitempty"` + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + InitialDelaySeconds int64 `mapstructure:"initialDelaySeconds,omitempty"` + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` + SuccessThreshold int64 `mapstructure:"successThreshold,omitempty"` + TimeoutSeconds int64 `mapstructure:"timeoutSeconds,omitempty"` +} + +func (v *Metallb0148Values_Controller_LivenessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Controller_ReadinessProbe struct { + Enabled bool `mapstructure:"enabled,omitempty"` + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + InitialDelaySeconds int64 `mapstructure:"initialDelaySeconds,omitempty"` + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` + SuccessThreshold int64 `mapstructure:"successThreshold,omitempty"` + TimeoutSeconds int64 `mapstructure:"timeoutSeconds,omitempty"` +} + +func (v *Metallb0148Values_Controller_ReadinessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// controller contains configuration specific to the MetalLB cluster +// controller. +type Metallb0148Values_Controller struct { + Enabled bool `mapstructure:"enabled,omitempty"` + // -- Controller log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` + LogLevel string `mapstructure:"logLevel,omitempty"` + // command: /controller + // webhookMode: enabled + Image Metallb0148Values_Controller_Image `mapstructure:"image,omitempty"` + // @param controller.updateStrategy.type Metallb controller deployment strategy type. + // ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + // e.g: + // strategy: + // type: RollingUpdate + // rollingUpdate: + // maxSurge: 25% + // maxUnavailable: 25% + // + Strategy Metallb0148Values_Controller_Strategy `mapstructure:"strategy,omitempty"` + ServiceAccount Metallb0148Values_Controller_ServiceAccount `mapstructure:"serviceAccount,omitempty"` + SecurityContext Metallb0148Values_Controller_SecurityContext `mapstructure:"securityContext,omitempty"` + Resources map[string]any `mapstructure:"resources,omitempty"` + // limits: + // cpu: 100m + // memory: 100Mi + NodeSelector map[string]any `mapstructure:"nodeSelector,omitempty"` + Tolerations []any `mapstructure:"tolerations,omitempty"` + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + RuntimeClassName string `mapstructure:"runtimeClassName,omitempty"` + Affinity map[string]any `mapstructure:"affinity,omitempty"` + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + Labels map[string]any `mapstructure:"labels,omitempty"` + LivenessProbe Metallb0148Values_Controller_LivenessProbe `mapstructure:"livenessProbe,omitempty"` + ReadinessProbe Metallb0148Values_Controller_ReadinessProbe `mapstructure:"readinessProbe,omitempty"` + TlsMinVersion string `mapstructure:"tlsMinVersion,omitempty"` + TlsCipherSuites string `mapstructure:"tlsCipherSuites,omitempty"` + ExtraContainers []any `mapstructure:"extraContainers,omitempty"` +} + +func (v *Metallb0148Values_Controller) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Speaker_Memberlist struct { + Enabled bool `mapstructure:"enabled,omitempty"` + MlBindPort int64 `mapstructure:"mlBindPort,omitempty"` + MlBindAddrOverride string `mapstructure:"mlBindAddrOverride,omitempty"` + MlSecretKeyPath string `mapstructure:"mlSecretKeyPath,omitempty"` +} + +func (v *Metallb0148Values_Speaker_Memberlist) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Speaker_ExcludeInterfaces struct { + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *Metallb0148Values_Speaker_ExcludeInterfaces) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Speaker_Image struct { + Repository string `mapstructure:"repository,omitempty"` + Tag any `mapstructure:"tag,omitempty"` + PullPolicy any `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Metallb0148Values_Speaker_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// @param speaker.updateStrategy.type Speaker daemonset strategy type +// ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ +type Metallb0148Values_Speaker_UpdateStrategy struct { + // StrategyType + // Can be set to RollingUpdate or OnDelete + // + Type string `mapstructure:"type,omitempty"` +} + +func (v *Metallb0148Values_Speaker_UpdateStrategy) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Speaker_ServiceAccount struct { + // Specifies whether a ServiceAccount should be created + Create bool `mapstructure:"create,omitempty"` + // The name of the ServiceAccount to use. If not set and create is + // true, a name is generated using the fullname template + Name string `mapstructure:"name,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` +} + +func (v *Metallb0148Values_Speaker_ServiceAccount) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Speaker_LivenessProbe struct { + Enabled bool `mapstructure:"enabled,omitempty"` + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + InitialDelaySeconds int64 `mapstructure:"initialDelaySeconds,omitempty"` + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` + SuccessThreshold int64 `mapstructure:"successThreshold,omitempty"` + TimeoutSeconds int64 `mapstructure:"timeoutSeconds,omitempty"` +} + +func (v *Metallb0148Values_Speaker_LivenessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Speaker_ReadinessProbe struct { + Enabled bool `mapstructure:"enabled,omitempty"` + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + InitialDelaySeconds int64 `mapstructure:"initialDelaySeconds,omitempty"` + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` + SuccessThreshold int64 `mapstructure:"successThreshold,omitempty"` + TimeoutSeconds int64 `mapstructure:"timeoutSeconds,omitempty"` +} + +func (v *Metallb0148Values_Speaker_ReadinessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Speaker_StartupProbe struct { + Enabled bool `mapstructure:"enabled,omitempty"` + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` +} + +func (v *Metallb0148Values_Speaker_StartupProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Speaker_Frr_Image struct { + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + PullPolicy any `mapstructure:"pullPolicy,omitempty"` +} + +func (v *Metallb0148Values_Speaker_Frr_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// frr contains configuration specific to the MetalLB FRR container, +// for speaker running alongside FRR. +type Metallb0148Values_Speaker_Frr struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Image Metallb0148Values_Speaker_Frr_Image `mapstructure:"image,omitempty"` + MetricsPort int64 `mapstructure:"metricsPort,omitempty"` + // if set, enables a rbac proxy sidecar container on the speaker to + // expose the frr metrics via tls. + // secureMetricsPort: 9121 + // + Resources map[string]any `mapstructure:"resources,omitempty"` +} + +func (v *Metallb0148Values_Speaker_Frr) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Speaker_Reloader struct { + Resources map[string]any `mapstructure:"resources,omitempty"` +} + +func (v *Metallb0148Values_Speaker_Reloader) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Speaker_FrrMetrics struct { + Resources map[string]any `mapstructure:"resources,omitempty"` +} + +func (v *Metallb0148Values_Speaker_FrrMetrics) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// speaker contains configuration specific to the MetalLB speaker +// daemonset. +type Metallb0148Values_Speaker struct { + Enabled bool `mapstructure:"enabled,omitempty"` + // command: /speaker + // -- Speaker log level. Must be one of: `all`, `debug`, `info`, `warn`, `error` or `none` + LogLevel string `mapstructure:"logLevel,omitempty"` + TolerateMaster bool `mapstructure:"tolerateMaster,omitempty"` + Memberlist Metallb0148Values_Speaker_Memberlist `mapstructure:"memberlist,omitempty"` + ExcludeInterfaces Metallb0148Values_Speaker_ExcludeInterfaces `mapstructure:"excludeInterfaces,omitempty"` + // ignore the exclude-from-external-loadbalancer label + IgnoreExcludeLb bool `mapstructure:"ignoreExcludeLB,omitempty"` + Image Metallb0148Values_Speaker_Image `mapstructure:"image,omitempty"` + // @param speaker.updateStrategy.type Speaker daemonset strategy type + // ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ + // + UpdateStrategy Metallb0148Values_Speaker_UpdateStrategy `mapstructure:"updateStrategy,omitempty"` + ServiceAccount Metallb0148Values_Speaker_ServiceAccount `mapstructure:"serviceAccount,omitempty"` + SecurityContext map[string]any `mapstructure:"securityContext,omitempty"` + // Defines a secret name for the controller to generate a memberlist encryption secret + // By default secretName: {{ "metallb.fullname" }}-memberlist + // + // secretName: + Resources map[string]any `mapstructure:"resources,omitempty"` + // limits: + // cpu: 100m + // memory: 100Mi + NodeSelector map[string]any `mapstructure:"nodeSelector,omitempty"` + Tolerations []any `mapstructure:"tolerations,omitempty"` + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + Affinity map[string]any `mapstructure:"affinity,omitempty"` + // Selects which runtime class will be used by the pod. + RuntimeClassName string `mapstructure:"runtimeClassName,omitempty"` + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + Labels map[string]any `mapstructure:"labels,omitempty"` + LivenessProbe Metallb0148Values_Speaker_LivenessProbe `mapstructure:"livenessProbe,omitempty"` + ReadinessProbe Metallb0148Values_Speaker_ReadinessProbe `mapstructure:"readinessProbe,omitempty"` + StartupProbe Metallb0148Values_Speaker_StartupProbe `mapstructure:"startupProbe,omitempty"` + // frr contains configuration specific to the MetalLB FRR container, + // for speaker running alongside FRR. + Frr Metallb0148Values_Speaker_Frr `mapstructure:"frr,omitempty"` + Reloader Metallb0148Values_Speaker_Reloader `mapstructure:"reloader,omitempty"` + FrrMetrics Metallb0148Values_Speaker_FrrMetrics `mapstructure:"frrMetrics,omitempty"` + ExtraContainers []any `mapstructure:"extraContainers,omitempty"` +} + +func (v *Metallb0148Values_Speaker) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type Metallb0148Values_Crds struct { + Enabled bool `mapstructure:"enabled,omitempty"` + ValidationFailurePolicy string `mapstructure:"validationFailurePolicy,omitempty"` +} + +func (v *Metallb0148Values_Crds) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// frrk8s contains the configuration related to using an frrk8s instance +// (github.com/metallb/frr-k8s) as the backend for the BGP implementation. +// This allows configuring additional frr parameters in combination to those +// applied by MetalLB. +type Metallb0148Values_Frrk8S struct { + // if set, enables frrk8s as a backend. This is mutually exclusive to frr + // mode. + Enabled bool `mapstructure:"enabled,omitempty"` + External bool `mapstructure:"external,omitempty"` + Namespace string `mapstructure:"namespace,omitempty"` +} + +func (v *Metallb0148Values_Frrk8S) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// Metallb0148Values represents the values of the metallb-0.14.8_values.yaml chart +type Metallb0148Values struct { + ImagePullSecrets []any `mapstructure:"imagePullSecrets,omitempty"` + NameOverride string `mapstructure:"nameOverride,omitempty"` + FullnameOverride string `mapstructure:"fullnameOverride,omitempty"` + LoadBalancerClass string `mapstructure:"loadBalancerClass,omitempty"` + // To configure MetalLB, you must specify ONE of the following two + // options. + // + Rbac Metallb0148Values_Rbac `mapstructure:"rbac,omitempty"` + Prometheus Metallb0148Values_Prometheus `mapstructure:"prometheus,omitempty"` + // controller contains configuration specific to the MetalLB cluster + // controller. + Controller Metallb0148Values_Controller `mapstructure:"controller,omitempty"` + // speaker contains configuration specific to the MetalLB speaker + // daemonset. + Speaker Metallb0148Values_Speaker `mapstructure:"speaker,omitempty"` + Crds Metallb0148Values_Crds `mapstructure:"crds,omitempty"` + // frrk8s contains the configuration related to using an frrk8s instance + // (github.com/metallb/frr-k8s) as the backend for the BGP implementation. + // This allows configuring additional frr parameters in combination to those + // applied by MetalLB. + Frrk8S Metallb0148Values_Frrk8S `mapstructure:"frrk8s,omitempty"` + // UNSAFE. USE WITH CAUTION + // + // UNSAFE_MISC_FIELDS is a place for any additional fields that are not handled by the generator + // The value of this field is going to be available as is in the output of .ToMap() + // The fields in this map will overwrite other fields if their names match. + // Field A has the same name as field B in the UNSAFE_MISC_FIELDS map, if the mapstructure format + // of field A is exactly equal to the actual string literal of field B. + // Example: + // type Values struct { + // FieldA string `mapstructure:"myField"` + // UNSAFE_MISC_FIELDS map[string]any + // } + // v := Values{ + // FieldA: "originalValue" + // UNSAFE_MISC_FIELDS: map[string]any{ + // "myField": "newValue", // same as FieldA mapstructure format + // "anotherField": "anotherValue", // new field that will be included in the map output + // } + // } + // v.ToMap() // returns map[string]any{"myField": "newValue", "anotherField": "anotherValue"} + // + UNSAFE_MISC_FIELDS map[string]any `mapstructure:"-"` +} + +func (v *Metallb0148Values) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + + // handle UNSAFE fields + for k, v := range v.UNSAFE_MISC_FIELDS { + result[k] = v + } + + return result, nil +} diff --git a/src/k8s/pkg/k8sd/features/values/metrics-server-3.12.2_values.go b/src/k8s/pkg/k8sd/features/values/metrics-server-3.12.2_values.go new file mode 100644 index 000000000..9394f3bd4 --- /dev/null +++ b/src/k8s/pkg/k8sd/features/values/metrics-server-3.12.2_values.go @@ -0,0 +1,558 @@ +// Code generated by CHART_VALUES_STRUCT_GENERATOR. DO NOT EDIT. +// +// This file was autogenerated by the CHART_VALUES_STRUCT_GENERATOR tool on 2024-12-12. +// Any changes will be overwritten. +// +// These files are generated from the values.yaml files in the k8s/manifests/charts directory. +// Head to the k8s/manifests/charts/Makefile to see how to generate these files. +// +// Package values contains the Go structs representing the values of the Helm chart. +package values + +import ( + "fmt" + "github.com/mitchellh/mapstructure" +) + +type MetricsServer3122Values_Image struct { + Repository string `mapstructure:"repository,omitempty"` + // Overrides the image tag whose default is v{{ .Chart.AppVersion }} + Tag string `mapstructure:"tag,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *MetricsServer3122Values_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_ServiceAccount struct { + // Specifies whether a service account should be created + Create bool `mapstructure:"create,omitempty"` + // Annotations to add to the service account + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // The name of the service account to use. + // If not set and create is true, a name is generated using the fullname template + Name string `mapstructure:"name,omitempty"` + // The list of secrets mountable by this service account. + // See https://kubernetes.io/docs/reference/labels-annotations-taints/#enforce-mountable-secrets + Secrets []any `mapstructure:"secrets,omitempty"` +} + +func (v *MetricsServer3122Values_ServiceAccount) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_Rbac struct { + // Specifies whether RBAC resources should be created + Create bool `mapstructure:"create,omitempty"` + // Note: PodSecurityPolicy will not be created when Kubernetes version is 1.25 or later. + PspEnabled bool `mapstructure:"pspEnabled,omitempty"` +} + +func (v *MetricsServer3122Values_Rbac) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_ApiService struct { + // Specifies if the v1beta1.metrics.k8s.io API service should be created. + // + // You typically want this enabled! If you disable API service creation you have to + // manage it outside of this chart for e.g horizontal pod autoscaling to + // work with this release. + Create bool `mapstructure:"create,omitempty"` + // Annotations to add to the API service + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // Specifies whether to skip TLS verification + InsecureSkipTlsverify bool `mapstructure:"insecureSkipTLSVerify,omitempty"` + // The PEM encoded CA bundle for TLS verification + CaBundle string `mapstructure:"caBundle,omitempty"` +} + +func (v *MetricsServer3122Values_ApiService) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_SecurityContext_SeccompProfile struct { + Type string `mapstructure:"type,omitempty"` +} + +func (v *MetricsServer3122Values_SecurityContext_SeccompProfile) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_SecurityContext_Capabilities struct { + // - ALL + Drop []string `mapstructure:"drop,omitempty"` +} + +func (v *MetricsServer3122Values_SecurityContext_Capabilities) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_SecurityContext struct { + AllowPrivilegeEscalation bool `mapstructure:"allowPrivilegeEscalation,omitempty"` + ReadOnlyRootFilesystem bool `mapstructure:"readOnlyRootFilesystem,omitempty"` + RunAsNonRoot bool `mapstructure:"runAsNonRoot,omitempty"` + RunAsUser int64 `mapstructure:"runAsUser,omitempty"` + SeccompProfile MetricsServer3122Values_SecurityContext_SeccompProfile `mapstructure:"seccompProfile,omitempty"` + Capabilities MetricsServer3122Values_SecurityContext_Capabilities `mapstructure:"capabilities,omitempty"` +} + +func (v *MetricsServer3122Values_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_HostNetwork struct { + // Specifies if metrics-server should be started in hostNetwork mode. + // + // You would require this enabled if you use alternate overlay networking for pods and + // API server unable to communicate with metrics-server. As an example, this is required + // if you use Weave network on EKS + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *MetricsServer3122Values_HostNetwork) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_PodDisruptionBudget struct { + // https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + Enabled bool `mapstructure:"enabled,omitempty"` + MinAvailable any `mapstructure:"minAvailable,omitempty"` + MaxUnavailable any `mapstructure:"maxUnavailable,omitempty"` +} + +func (v *MetricsServer3122Values_PodDisruptionBudget) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_LivenessProbe_HttpGet struct { + Path string `mapstructure:"path,omitempty"` + Port string `mapstructure:"port,omitempty"` + Scheme string `mapstructure:"scheme,omitempty"` +} + +func (v *MetricsServer3122Values_LivenessProbe_HttpGet) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_LivenessProbe struct { + HttpGet MetricsServer3122Values_LivenessProbe_HttpGet `mapstructure:"httpGet,omitempty"` + InitialDelaySeconds int64 `mapstructure:"initialDelaySeconds,omitempty"` + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` +} + +func (v *MetricsServer3122Values_LivenessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_ReadinessProbe_HttpGet struct { + Path string `mapstructure:"path,omitempty"` + Port string `mapstructure:"port,omitempty"` + Scheme string `mapstructure:"scheme,omitempty"` +} + +func (v *MetricsServer3122Values_ReadinessProbe_HttpGet) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_ReadinessProbe struct { + HttpGet MetricsServer3122Values_ReadinessProbe_HttpGet `mapstructure:"httpGet,omitempty"` + InitialDelaySeconds int64 `mapstructure:"initialDelaySeconds,omitempty"` + PeriodSeconds int64 `mapstructure:"periodSeconds,omitempty"` + FailureThreshold int64 `mapstructure:"failureThreshold,omitempty"` +} + +func (v *MetricsServer3122Values_ReadinessProbe) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_Service struct { + Type string `mapstructure:"type,omitempty"` + Port int64 `mapstructure:"port,omitempty"` + Annotations map[string]any `mapstructure:"annotations,omitempty"` + // Add these labels to have metrics-server show up in `kubectl cluster-info` + // kubernetes.io/cluster-service: "true" + // kubernetes.io/name: "Metrics-server" + Labels map[string]any `mapstructure:"labels,omitempty"` +} + +func (v *MetricsServer3122Values_Service) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_AddonResizer_Image struct { + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` +} + +func (v *MetricsServer3122Values_AddonResizer_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_AddonResizer_SecurityContext_SeccompProfile struct { + Type string `mapstructure:"type,omitempty"` +} + +func (v *MetricsServer3122Values_AddonResizer_SecurityContext_SeccompProfile) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_AddonResizer_SecurityContext_Capabilities struct { + // - ALL + Drop []string `mapstructure:"drop,omitempty"` +} + +func (v *MetricsServer3122Values_AddonResizer_SecurityContext_Capabilities) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_AddonResizer_SecurityContext struct { + AllowPrivilegeEscalation bool `mapstructure:"allowPrivilegeEscalation,omitempty"` + ReadOnlyRootFilesystem bool `mapstructure:"readOnlyRootFilesystem,omitempty"` + RunAsNonRoot bool `mapstructure:"runAsNonRoot,omitempty"` + RunAsUser int64 `mapstructure:"runAsUser,omitempty"` + SeccompProfile MetricsServer3122Values_AddonResizer_SecurityContext_SeccompProfile `mapstructure:"seccompProfile,omitempty"` + Capabilities MetricsServer3122Values_AddonResizer_SecurityContext_Capabilities `mapstructure:"capabilities,omitempty"` +} + +func (v *MetricsServer3122Values_AddonResizer_SecurityContext) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_AddonResizer_Resources_Requests struct { + Cpu string `mapstructure:"cpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` +} + +func (v *MetricsServer3122Values_AddonResizer_Resources_Requests) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_AddonResizer_Resources_Limits struct { + Cpu string `mapstructure:"cpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` +} + +func (v *MetricsServer3122Values_AddonResizer_Resources_Limits) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_AddonResizer_Resources struct { + Requests MetricsServer3122Values_AddonResizer_Resources_Requests `mapstructure:"requests,omitempty"` + Limits MetricsServer3122Values_AddonResizer_Resources_Limits `mapstructure:"limits,omitempty"` +} + +func (v *MetricsServer3122Values_AddonResizer_Resources) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_AddonResizer_Nanny struct { + Cpu string `mapstructure:"cpu,omitempty"` + ExtraCpu string `mapstructure:"extraCpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` + ExtraMemory string `mapstructure:"extraMemory,omitempty"` + MinClusterSize int64 `mapstructure:"minClusterSize,omitempty"` + PollPeriod int64 `mapstructure:"pollPeriod,omitempty"` + Threshold int64 `mapstructure:"threshold,omitempty"` +} + +func (v *MetricsServer3122Values_AddonResizer_Nanny) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_AddonResizer struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Image MetricsServer3122Values_AddonResizer_Image `mapstructure:"image,omitempty"` + SecurityContext MetricsServer3122Values_AddonResizer_SecurityContext `mapstructure:"securityContext,omitempty"` + Resources MetricsServer3122Values_AddonResizer_Resources `mapstructure:"resources,omitempty"` + Nanny MetricsServer3122Values_AddonResizer_Nanny `mapstructure:"nanny,omitempty"` +} + +func (v *MetricsServer3122Values_AddonResizer) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_Metrics struct { + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *MetricsServer3122Values_Metrics) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_ServiceMonitor struct { + Enabled bool `mapstructure:"enabled,omitempty"` + AdditionalLabels map[string]any `mapstructure:"additionalLabels,omitempty"` + Interval string `mapstructure:"interval,omitempty"` + ScrapeTimeout string `mapstructure:"scrapeTimeout,omitempty"` + MetricRelabelings []any `mapstructure:"metricRelabelings,omitempty"` + Relabelings []any `mapstructure:"relabelings,omitempty"` +} + +func (v *MetricsServer3122Values_ServiceMonitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// limits: +// cpu: +// memory: +type MetricsServer3122Values_Resources_Requests struct { + Cpu string `mapstructure:"cpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` +} + +func (v *MetricsServer3122Values_Resources_Requests) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// See https://github.com/kubernetes-sigs/metrics-server#scaling +type MetricsServer3122Values_Resources struct { + // limits: + // cpu: + // memory: + Requests MetricsServer3122Values_Resources_Requests `mapstructure:"requests,omitempty"` +} + +func (v *MetricsServer3122Values_Resources) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type MetricsServer3122Values_TmpVolume struct { + EmptyDir map[string]any `mapstructure:"emptyDir,omitempty"` +} + +func (v *MetricsServer3122Values_TmpVolume) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// MetricsServer3122Values represents the values of the metrics-server-3.12.2_values.yaml chart +type MetricsServer3122Values struct { + Image MetricsServer3122Values_Image `mapstructure:"image,omitempty"` + // - name: registrySecretName + ImagePullSecrets []any `mapstructure:"imagePullSecrets,omitempty"` + NameOverride string `mapstructure:"nameOverride,omitempty"` + FullnameOverride string `mapstructure:"fullnameOverride,omitempty"` + ServiceAccount MetricsServer3122Values_ServiceAccount `mapstructure:"serviceAccount,omitempty"` + Rbac MetricsServer3122Values_Rbac `mapstructure:"rbac,omitempty"` + ApiService MetricsServer3122Values_ApiService `mapstructure:"apiService,omitempty"` + CommonLabels map[string]any `mapstructure:"commonLabels,omitempty"` + PodLabels map[string]any `mapstructure:"podLabels,omitempty"` + PodAnnotations map[string]any `mapstructure:"podAnnotations,omitempty"` + PodSecurityContext map[string]any `mapstructure:"podSecurityContext,omitempty"` + SecurityContext MetricsServer3122Values_SecurityContext `mapstructure:"securityContext,omitempty"` + PriorityClassName string `mapstructure:"priorityClassName,omitempty"` + ContainerPort int64 `mapstructure:"containerPort,omitempty"` + HostNetwork MetricsServer3122Values_HostNetwork `mapstructure:"hostNetwork,omitempty"` + Replicas int64 `mapstructure:"replicas,omitempty"` + RevisionHistoryLimit any `mapstructure:"revisionHistoryLimit,omitempty"` + // type: RollingUpdate + // rollingUpdate: + // maxSurge: 0 + // maxUnavailable: 1 + UpdateStrategy map[string]any `mapstructure:"updateStrategy,omitempty"` + PodDisruptionBudget MetricsServer3122Values_PodDisruptionBudget `mapstructure:"podDisruptionBudget,omitempty"` + // - --cert-dir=/tmp + // - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + // - --kubelet-use-node-status-port + // - --metric-resolution=15s + DefaultArgs []string `mapstructure:"defaultArgs,omitempty"` + Args []any `mapstructure:"args,omitempty"` + LivenessProbe MetricsServer3122Values_LivenessProbe `mapstructure:"livenessProbe,omitempty"` + ReadinessProbe MetricsServer3122Values_ReadinessProbe `mapstructure:"readinessProbe,omitempty"` + Service MetricsServer3122Values_Service `mapstructure:"service,omitempty"` + AddonResizer MetricsServer3122Values_AddonResizer `mapstructure:"addonResizer,omitempty"` + Metrics MetricsServer3122Values_Metrics `mapstructure:"metrics,omitempty"` + ServiceMonitor MetricsServer3122Values_ServiceMonitor `mapstructure:"serviceMonitor,omitempty"` + // See https://github.com/kubernetes-sigs/metrics-server#scaling + Resources MetricsServer3122Values_Resources `mapstructure:"resources,omitempty"` + ExtraVolumeMounts []any `mapstructure:"extraVolumeMounts,omitempty"` + ExtraVolumes []any `mapstructure:"extraVolumes,omitempty"` + NodeSelector map[string]any `mapstructure:"nodeSelector,omitempty"` + Tolerations []any `mapstructure:"tolerations,omitempty"` + Affinity map[string]any `mapstructure:"affinity,omitempty"` + TopologySpreadConstraints []any `mapstructure:"topologySpreadConstraints,omitempty"` + DnsConfig map[string]any `mapstructure:"dnsConfig,omitempty"` + // Annotations to add to the deployment + DeploymentAnnotations map[string]any `mapstructure:"deploymentAnnotations,omitempty"` + SchedulerName string `mapstructure:"schedulerName,omitempty"` + TmpVolume MetricsServer3122Values_TmpVolume `mapstructure:"tmpVolume,omitempty"` + // UNSAFE. USE WITH CAUTION + // + // UNSAFE_MISC_FIELDS is a place for any additional fields that are not handled by the generator + // The value of this field is going to be available as is in the output of .ToMap() + // The fields in this map will overwrite other fields if their names match. + // Field A has the same name as field B in the UNSAFE_MISC_FIELDS map, if the mapstructure format + // of field A is exactly equal to the actual string literal of field B. + // Example: + // type Values struct { + // FieldA string `mapstructure:"myField"` + // UNSAFE_MISC_FIELDS map[string]any + // } + // v := Values{ + // FieldA: "originalValue" + // UNSAFE_MISC_FIELDS: map[string]any{ + // "myField": "newValue", // same as FieldA mapstructure format + // "anotherField": "anotherValue", // new field that will be included in the map output + // } + // } + // v.ToMap() // returns map[string]any{"myField": "newValue", "anotherField": "anotherValue"} + // + UNSAFE_MISC_FIELDS map[string]any `mapstructure:"-"` +} + +func (v *MetricsServer3122Values) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + + // handle UNSAFE fields + for k, v := range v.UNSAFE_MISC_FIELDS { + result[k] = v + } + + return result, nil +} diff --git a/src/k8s/pkg/k8sd/features/values/rawfile-csi-0.9.0_values.go b/src/k8s/pkg/k8sd/features/values/rawfile-csi-0.9.0_values.go new file mode 100644 index 000000000..ec259dab5 --- /dev/null +++ b/src/k8s/pkg/k8sd/features/values/rawfile-csi-0.9.0_values.go @@ -0,0 +1,237 @@ +// Code generated by CHART_VALUES_STRUCT_GENERATOR. DO NOT EDIT. +// +// This file was autogenerated by the CHART_VALUES_STRUCT_GENERATOR tool on 2024-12-12. +// Any changes will be overwritten. +// +// These files are generated from the values.yaml files in the k8s/manifests/charts directory. +// Head to the k8s/manifests/charts/Makefile to see how to generate these files. +// +// Package values contains the Go structs representing the values of the Helm chart. +package values + +import ( + "fmt" + "github.com/mitchellh/mapstructure" +) + +type RawfileCsi090Values_Defaults_Image struct { + Repository string `mapstructure:"repository,omitempty"` + Tag string `mapstructure:"tag,omitempty"` + PullPolicy string `mapstructure:"pullPolicy,omitempty"` +} + +func (v *RawfileCsi090Values_Defaults_Image) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type RawfileCsi090Values_Defaults_Resources_Limits struct { + Cpu int64 `mapstructure:"cpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` +} + +func (v *RawfileCsi090Values_Defaults_Resources_Limits) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type RawfileCsi090Values_Defaults_Resources_Requests struct { + Cpu string `mapstructure:"cpu,omitempty"` + Memory string `mapstructure:"memory,omitempty"` +} + +func (v *RawfileCsi090Values_Defaults_Resources_Requests) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type RawfileCsi090Values_Defaults_Resources struct { + Limits RawfileCsi090Values_Defaults_Resources_Limits `mapstructure:"limits,omitempty"` + Requests RawfileCsi090Values_Defaults_Resources_Requests `mapstructure:"requests,omitempty"` +} + +func (v *RawfileCsi090Values_Defaults_Resources) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type RawfileCsi090Values_Defaults struct { + Image RawfileCsi090Values_Defaults_Image `mapstructure:"image,omitempty"` + Resources RawfileCsi090Values_Defaults_Resources `mapstructure:"resources,omitempty"` +} + +func (v *RawfileCsi090Values_Defaults) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type RawfileCsi090Values_Controller struct { + // - csi-driver + // - --disable-metrics + CsiDriverArgs []string `mapstructure:"csiDriverArgs,omitempty"` +} + +func (v *RawfileCsi090Values_Controller) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type RawfileCsi090Values_Images struct { + CsiNodeDriverRegistrar string `mapstructure:"csiNodeDriverRegistrar,omitempty"` + CsiProvisioner string `mapstructure:"csiProvisioner,omitempty"` + CsiResizer string `mapstructure:"csiResizer,omitempty"` + CsiSnapshotter string `mapstructure:"csiSnapshotter,omitempty"` +} + +func (v *RawfileCsi090Values_Images) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type RawfileCsi090Values_Node_Storage struct { + Path string `mapstructure:"path,omitempty"` +} + +func (v *RawfileCsi090Values_Node_Storage) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type RawfileCsi090Values_Node_Metrics struct { + Enabled bool `mapstructure:"enabled,omitempty"` +} + +func (v *RawfileCsi090Values_Node_Metrics) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type RawfileCsi090Values_Node struct { + Storage RawfileCsi090Values_Node_Storage `mapstructure:"storage,omitempty"` + Metrics RawfileCsi090Values_Node_Metrics `mapstructure:"metrics,omitempty"` +} + +func (v *RawfileCsi090Values_Node) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type RawfileCsi090Values_StorageClass struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Name string `mapstructure:"name,omitempty"` + IsDefault bool `mapstructure:"isDefault,omitempty"` + ReclaimPolicy string `mapstructure:"reclaimPolicy,omitempty"` + VolumeBindingMode string `mapstructure:"volumeBindingMode,omitempty"` +} + +func (v *RawfileCsi090Values_StorageClass) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +type RawfileCsi090Values_ServiceMonitor struct { + Enabled bool `mapstructure:"enabled,omitempty"` + Interval string `mapstructure:"interval,omitempty"` +} + +func (v *RawfileCsi090Values_ServiceMonitor) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + return result, nil +} + +// RawfileCsi090Values represents the values of the rawfile-csi-0.9.0_values.yaml chart +type RawfileCsi090Values struct { + ProvisionerName string `mapstructure:"provisionerName,omitempty"` + Defaults RawfileCsi090Values_Defaults `mapstructure:"defaults,omitempty"` + Controller RawfileCsi090Values_Controller `mapstructure:"controller,omitempty"` + Images RawfileCsi090Values_Images `mapstructure:"images,omitempty"` + Node RawfileCsi090Values_Node `mapstructure:"node,omitempty"` + StorageClass RawfileCsi090Values_StorageClass `mapstructure:"storageClass,omitempty"` + ImagePullSecrets []any `mapstructure:"imagePullSecrets,omitempty"` + ServiceMonitor RawfileCsi090Values_ServiceMonitor `mapstructure:"serviceMonitor,omitempty"` + // UNSAFE. USE WITH CAUTION + // + // UNSAFE_MISC_FIELDS is a place for any additional fields that are not handled by the generator + // The value of this field is going to be available as is in the output of .ToMap() + // The fields in this map will overwrite other fields if their names match. + // Field A has the same name as field B in the UNSAFE_MISC_FIELDS map, if the mapstructure format + // of field A is exactly equal to the actual string literal of field B. + // Example: + // type Values struct { + // FieldA string `mapstructure:"myField"` + // UNSAFE_MISC_FIELDS map[string]any + // } + // v := Values{ + // FieldA: "originalValue" + // UNSAFE_MISC_FIELDS: map[string]any{ + // "myField": "newValue", // same as FieldA mapstructure format + // "anotherField": "anotherValue", // new field that will be included in the map output + // } + // } + // v.ToMap() // returns map[string]any{"myField": "newValue", "anotherField": "anotherValue"} + // + UNSAFE_MISC_FIELDS map[string]any `mapstructure:"-"` +} + +func (v *RawfileCsi090Values) ToMap() (map[string]any, error) { + var result map[string]any + err := mapstructure.Decode(v, &result) + if err != nil { + return nil, fmt.Errorf("failed to decode to map[string]any: %w", err) + } + + // handle UNSAFE fields + for k, v := range v.UNSAFE_MISC_FIELDS { + result[k] = v + } + + return result, nil +}