From d69acf41da6d58e59bcb1602e4dc8dad97a4eda4 Mon Sep 17 00:00:00 2001 From: gardener-robot-ci-3 Date: Tue, 10 Dec 2024 16:52:03 +0000 Subject: [PATCH] Automatic build triggered by last commit --- docs/404.html | 2 +- docs/_print/adopter/index.html | 2 +- docs/_print/community/index.html | 2 +- docs/_print/contribute/docs/index.html | 2 +- docs/_print/docs/contribute/code/index.html | 2 +- docs/adopter/index.html | 2 +- docs/blog/2018/06.11-anti-patterns/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../2018/06.11-namespace-isolation/index.html | 2 +- .../2018/06.11-namespace-scope/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../12.22-cookies-are-dangerous/index.html | 2 +- .../2018/12.25-gardener_cookies/index.html | 2 +- docs/blog/2018/_print/index.html | 2 +- docs/blog/2018/index.html | 2 +- docs/blog/2018/page/2/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- docs/blog/2019/_print/index.html | 2 +- docs/blog/2019/index.html | 2 +- .../index.html | 2 +- .../2020/05.27-pingcaps-experience/index.html | 2 +- .../08.06-gardener-v1.8.0-released/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../11.23-gardener-v1.13-released/index.html | 2 +- .../index.html | 2 +- docs/blog/2020/_print/index.html | 2 +- docs/blog/2020/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- docs/blog/2021/_print/index.html | 2 +- docs/blog/2021/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- docs/blog/2022/_print/index.html | 2 +- docs/blog/2022/index.html | 2 +- .../index.html | 2 +- docs/blog/2023/_print/index.html | 2 +- docs/blog/2023/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../2024/11-06-promcon-eu-2024/index.html | 2 +- docs/blog/2024/11-09-demo/index.html | 2 +- .../index.html | 2 +- docs/blog/2024/_print/index.html | 2 +- docs/blog/2024/index.html | 2 +- docs/blog/_print/index.html | 2 +- docs/blog/index.html | 2 +- docs/blog/page/2/index.html | 2 +- docs/blog/page/3/index.html | 2 +- docs/blog/page/4/index.html | 2 +- docs/blog/page/5/index.html | 2 +- docs/community/index.html | 2 +- docs/contribute/docs/index.html | 2 +- docs/curated-links/index.html | 2 +- docs/docs/_print/index.html | 52 +- docs/docs/contribute/_print/index.html | 2 +- docs/docs/contribute/code/cicd/index.html | 2 +- .../contributing-bigger-changes/index.html | 2 +- .../contribute/code/dependencies/index.html | 2 +- docs/docs/contribute/code/index.html | 2 +- .../code/security-guide/_print/index.html | 2 +- .../contribute/code/security-guide/index.html | 2 +- .../adding-existing-documentation/index.html | 2 +- .../documentation/formatting-guide/index.html | 2 +- .../documentation/images/index.html | 2 +- .../documentation/markup/index.html | 2 +- .../documentation/organization/index.html | 2 +- .../documentation/pr-description/index.html | 2 +- .../documentation/shortcodes/index.html | 2 +- .../style-guide/_print/index.html | 2 +- .../style-guide/concept_template/index.html | 2 +- .../documentation/style-guide/index.html | 2 +- .../style-guide/reference_template/index.html | 2 +- .../style-guide/task_template/index.html | 2 +- docs/docs/contribute/index.html | 2 +- docs/docs/dashboard/_print/index.html | 2 +- .../dashboard/access-restrictions/index.html | 2 +- docs/docs/dashboard/architecture/index.html | 2 +- .../automated-resource-management/index.html | 2 +- .../docs/dashboard/connect-kubectl/index.html | 2 +- docs/docs/dashboard/custom-fields/index.html | 2 +- docs/docs/dashboard/customization/index.html | 2 +- docs/docs/dashboard/index.html | 2 +- docs/docs/dashboard/local-setup/index.html | 2 +- docs/docs/dashboard/process/index.html | 2 +- .../dashboard/project-operations/index.html | 2 +- .../dashboard/terminal-shortcuts/index.html | 2 +- docs/docs/dashboard/testing/index.html | 2 +- docs/docs/dashboard/using-terminal/index.html | 2 +- docs/docs/dashboard/webterminals/index.html | 2 +- .../working-with-projects/index.html | 2 +- docs/docs/extensions/_print/index.html | 2 +- .../_print/index.html | 2 +- .../_print/index.html | 2 +- .../index.html | 2 +- .../container-runtime-extensions/index.html | 2 +- docs/docs/extensions/index.html | 2 +- .../_print/index.html | 2 +- .../_print/index.html | 2 +- .../deployment/index.html | 2 +- .../index.html | 2 +- .../local-setup/index.html | 2 +- .../operations/index.html | 2 +- .../tutorials/_print/index.html | 2 +- .../tutorials/index.html | 2 +- .../index.html | 2 +- .../usage/index.html | 2 +- .../_print/index.html | 2 +- .../deployment/index.html | 2 +- .../dual-stack-ingress/index.html | 2 +- .../index.html | 2 +- .../ipv6/index.html | 2 +- .../local-setup/index.html | 2 +- .../operations/index.html | 2 +- .../index.html | 2 +- .../usage/index.html | 2 +- .../_print/index.html | 2 +- .../azure-permissions/index.html | 2 +- .../deployment/index.html | 2 +- .../index.html | 2 +- .../local-setup/index.html | 2 +- .../operations/index.html | 2 +- .../tutorials/_print/index.html | 2 +- .../tutorials/index.html | 2 +- .../index.html | 2 +- .../usage/index.html | 2 +- .../_print/index.html | 2 +- .../index.html | 2 +- .../operations/index.html | 2 +- .../usage/index.html | 2 +- .../_print/index.html | 2 +- .../deployment/index.html | 2 +- .../index.html | 2 +- .../local-setup/index.html | 2 +- .../operations/index.html | 2 +- .../datadisk-image-restore/index.html | 2 +- .../tutorials/_print/index.html | 2 +- .../tutorials/index.html | 2 +- .../index.html | 2 +- .../usage/index.html | 2 +- .../_print/index.html | 2 +- .../deployment/index.html | 2 +- .../index.html | 2 +- .../local-setup/index.html | 2 +- .../operations/index.html | 2 +- .../usage/index.html | 2 +- .../infrastructure-extensions/index.html | 2 +- .../network-extensions/_print/index.html | 2 +- .../_print/index.html | 2 +- .../deployment/index.html | 2 +- .../index.html | 2 +- .../operations/index.html | 2 +- .../shoot_overlay_network/index.html | 2 +- .../usage/index.html | 2 +- .../_print/index.html | 2 +- .../index.html | 2 +- .../usage/index.html | 2 +- .../extensions/network-extensions/index.html | 2 +- .../os-extensions/_print/index.html | 2 +- .../_print/index.html | 2 +- .../gardener-extension-os-coreos/index.html | 2 +- .../usage/index.html | 2 +- .../_print/index.html | 2 +- .../index.html | 2 +- .../_print/index.html | 2 +- .../index.html | 2 +- .../usage/index.html | 2 +- .../_print/index.html | 2 +- .../gardener-extension-os-ubuntu/index.html | 2 +- .../usage/index.html | 2 +- docs/docs/extensions/os-extensions/index.html | 2 +- docs/docs/extensions/others/_print/index.html | 2 +- .../_print/index.html | 2 +- .../extension-registry-cache/index.html | 2 +- .../getting-started-locally/index.html | 2 +- .../getting-started-remotely/index.html | 2 +- .../index.html | 2 +- .../registry-cache/configuration/index.html | 2 +- .../upstream-credentials/index.html | 2 +- .../registry-mirror/configuration/index.html | 2 +- .../_print/index.html | 2 +- .../alerting/index.html | 2 +- .../custom_shoot_issuer/index.html | 2 +- .../deployment/index.html | 2 +- .../index.html | 2 +- .../request_cert/index.html | 2 +- .../request_default_domain_cert/index.html | 2 +- .../tutorials/gateway-api-gateways/index.html | 2 +- .../tutorials/istio-gateways/index.html | 2 +- .../index.html | 2 +- .../_print/index.html | 2 +- .../configuration/index.html | 2 +- .../deployment/index.html | 2 +- .../dns_names/index.html | 2 +- .../dns_providers/index.html | 2 +- .../index.html | 2 +- .../tutorials/gateway-api-gateways/index.html | 2 +- .../tutorials/istio-gateways/index.html | 2 +- .../_print/index.html | 2 +- .../deployment/index.html | 2 +- .../index.html | 2 +- .../lakom/index.html | 2 +- .../shoot-extension/index.html | 2 +- .../_print/index.html | 2 +- .../deployment/index.html | 2 +- .../index.html | 2 +- .../shoot-networking-filter/index.html | 2 +- .../_print/index.html | 2 +- .../deployment/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../_print/index.html | 2 +- .../deployment/index.html | 2 +- .../index.html | 2 +- .../openidconnects/index.html | 2 +- .../_print/index.html | 2 +- .../configuration/index.html | 2 +- .../getting-started-remotely/index.html | 2 +- .../getting-started/index.html | 2 +- .../index.html | 2 +- .../monitoring/index.html | 2 +- .../shoot-rsyslog-relp/index.html | 2 +- docs/docs/extensions/others/index.html | 2 +- docs/docs/faq/_print/index.html | 2 +- docs/docs/faq/add-feature-gates/index.html | 2 +- docs/docs/faq/automatic-migrate/index.html | 2 +- docs/docs/faq/automatic-upgrade/index.html | 2 +- docs/docs/faq/backup/index.html | 2 +- docs/docs/faq/clusterhealthz/index.html | 2 +- .../faq/configure-worker-pools/index.html | 2 +- docs/docs/faq/dns-config/index.html | 2 +- docs/docs/faq/index.html | 2 +- .../docs/faq/privileged-containers/index.html | 2 +- .../docs/faq/reconciliation-impact/index.html | 2 +- docs/docs/faq/rotate-iaas-keys/index.html | 2 +- docs/docs/gardenctl-v2/index.html | 8 +- docs/docs/gardener/_print/index.html | 46 +- docs/docs/gardener/advanced/_print/index.html | 2 +- .../index.html | 2 +- .../index.html | 2 +- .../advanced/csi_components/index.html | 2 +- .../custom-containerd-config/index.html | 2 +- docs/docs/gardener/advanced/index.html | 2 +- .../advanced/node-readiness/index.html | 2 +- .../advanced/shoot_cleanup/index.html | 2 +- .../gardener/advanced/tolerations/index.html | 2 +- .../gardener/api-reference/_print/index.html | 12 +- .../api-reference/authentication/index.html | 2 +- .../gardener/api-reference/core-v1/index.html | 2 +- .../gardener/api-reference/core/index.html | 13 +- .../api-reference/extensions/index.html | 12 +- docs/docs/gardener/api-reference/index.html | 2 +- .../api-reference/operations/index.html | 2 +- .../api-reference/operator/index.html | 2 +- .../api-reference/provider-local/index.html | 2 +- .../api-reference/resources/index.html | 2 +- .../api-reference/security/index.html | 2 +- .../api-reference/seedmanagement/index.html | 2 +- .../api-reference/settings/index.html | 2 +- .../index.html | 2 +- .../gardener/autoscaling/_print/index.html | 4 +- .../autoscaling/dns-autoscaling/index.html | 2 +- docs/docs/gardener/autoscaling/index.html | 2 +- .../autoscaling/shoot_autoscaling/index.html | 2 +- .../index.html | 5 +- .../docs/gardener/changing-the-api/index.html | 2 +- .../gardener/component-checklist/index.html | 2 +- docs/docs/gardener/concepts/_print/index.html | 8 +- .../concepts/admission-controller/index.html | 2 +- .../apiserver-admission-plugins/index.html | 2 +- .../gardener/concepts/apiserver/index.html | 2 +- .../gardener/concepts/architecture/index.html | 2 +- .../concepts/backup-restore/index.html | 2 +- .../gardener/concepts/cluster-api/index.html | 2 +- .../concepts/controller-manager/index.html | 2 +- docs/docs/gardener/concepts/etcd/index.html | 2 +- .../gardener/concepts/gardenadm/index.html | 2 +- .../gardener/concepts/gardenlet/index.html | 2 +- docs/docs/gardener/concepts/index.html | 2 +- .../gardener/concepts/node-agent/index.html | 2 +- .../gardener/concepts/operator/index.html | 2 +- .../concepts/resource-manager/index.html | 15 +- .../gardener/concepts/scheduler/index.html | 2 +- docs/docs/gardener/configuration/index.html | 2 +- .../control_plane_migration/index.html | 2 +- docs/docs/gardener/defaulting/index.html | 2 +- docs/docs/gardener/dependencies/index.html | 2 +- .../gardener/deployment/_print/index.html | 6 +- .../index.html | 2 +- .../deployment/configuring_logging/index.html | 2 +- .../deployment/deploy_gardenlet/index.html | 2 +- .../deploy_gardenlet_automatically/index.html | 2 +- .../deploy_gardenlet_manually/index.html | 2 +- .../deploy_gardenlet_via_operator/index.html | 2 +- .../deployment/feature_gates/index.html | 7 +- .../gardenlet_api_access/index.html | 2 +- .../getting_started_locally/index.html | 2 +- .../index.html | 2 +- .../deployment/image_vector/index.html | 2 +- docs/docs/gardener/deployment/index.html | 2 +- .../deployment/migration_v0_to_v1/index.html | 2 +- .../index.html | 2 +- .../deployment/setup_gardener/index.html | 5 +- .../deployment/version_skew_policy/index.html | 2 +- .../gardener/extensions/_print/index.html | 2 +- .../gardener/extensions/admission/index.html | 2 +- .../extensions/ca-rotation/index.html | 2 +- .../gardener/extensions/cluster/index.html | 2 +- .../controllerregistration/index.html | 2 +- .../controlplane-webhooks/index.html | 2 +- .../extensions/conventions/index.html | 2 +- .../extensions/force-deletion/index.html | 2 +- .../extensions/garden-api-access/index.html | 2 +- .../extensions/healthcheck-library/index.html | 2 +- .../gardener/extensions/heartbeat/index.html | 2 +- docs/docs/gardener/extensions/index.html | 2 +- .../logging-and-monitoring/index.html | 2 +- .../index.html | 2 +- .../extensions/managedresources/index.html | 2 +- .../gardener/extensions/migration/index.html | 2 +- .../gardener/extensions/overview/index.html | 2 +- .../extensions/project-roles/index.html | 2 +- .../extensions/provider-local/index.html | 2 +- .../extensions/reconcile-trigger/index.html | 2 +- .../referenced-resources/index.html | 2 +- .../extensions/resources/_print/index.html | 2 +- .../resources/backupbucket/index.html | 2 +- .../resources/backupentry/index.html | 2 +- .../extensions/resources/bastion/index.html | 2 +- .../resources/containerruntime/index.html | 2 +- .../controlplane-exposure/index.html | 2 +- .../resources/controlplane/index.html | 2 +- .../extensions/resources/dnsrecord/index.html | 2 +- .../extensions/resources/extension/index.html | 2 +- .../gardener/extensions/resources/index.html | 2 +- .../resources/infrastructure/index.html | 2 +- .../extensions/resources/network/index.html | 2 +- .../operatingsystemconfig/index.html | 2 +- .../extensions/resources/worker/index.html | 2 +- .../shoot-health-status-conditions/index.html | 2 +- .../extensions/shoot-maintenance/index.html | 2 +- .../extensions/shoot-webhooks/index.html | 2 +- .../getting_started_locally/index.html | 2 +- .../index.html | 2 +- .../high-availability/_print/index.html | 2 +- .../gardener/high-availability/index.html | 2 +- .../shoot_high_availability/index.html | 2 +- .../index.html | 2 +- docs/docs/gardener/index.html | 2 +- docs/docs/gardener/index.xml | 2 +- docs/docs/gardener/ipv6/index.html | 2 +- docs/docs/gardener/istio/index.html | 2 +- .../gardener/kubernetes-clients/index.html | 2 +- docs/docs/gardener/local_setup/index.html | 2 +- docs/docs/gardener/log_parsers/index.html | 2 +- docs/docs/gardener/logging/index.html | 5 +- docs/docs/gardener/managed_seed/index.html | 5 +- .../docs/gardener/monitoring-stack/index.html | 5 +- .../gardener/monitoring/_print/index.html | 4 +- .../gardener/monitoring/alerting/index.html | 5 +- .../monitoring/connectivity/index.html | 2 +- docs/docs/gardener/monitoring/index.html | 2 +- .../gardener/monitoring/profiling/index.html | 2 +- .../docs/gardener/network_policies/index.html | 13 +- .../gardener/networking/_print/index.html | 2 +- .../networking/custom-dns-config/index.html | 2 +- .../dns-search-path-optimization/index.html | 2 +- .../networking/exposureclasses/index.html | 2 +- docs/docs/gardener/networking/index.html | 2 +- .../networking/node-local-dns/index.html | 2 +- .../index.html | 2 +- .../networking/shoot_networking/index.html | 2 +- .../gardener/new-cloud-provider/index.html | 5 +- .../new-kubernetes-version/index.html | 5 +- .../gardener/observability/_print/index.html | 4 +- docs/docs/gardener/observability/index.html | 2 +- .../gardener/observability/logging/index.html | 7 +- .../docs/gardener/priority-classes/index.html | 2 +- docs/docs/gardener/process/index.html | 2 +- docs/docs/gardener/project/_print/index.html | 2 +- docs/docs/gardener/project/index.html | 2 +- .../namespaced-cloud-profiles/index.html | 2 +- .../docs/gardener/project/projects/index.html | 2 +- .../service-account-manager/index.html | 2 +- .../gardener/reversed-vpn-tunnel/index.html | 2 +- .../gardener/secrets_management/index.html | 2 +- docs/docs/gardener/security/_print/index.html | 2 +- .../default_seccomp_profile/index.html | 2 +- .../etcd_encryption_config/index.html | 2 +- docs/docs/gardener/security/index.html | 2 +- .../security/openidconnect-presets/index.html | 2 +- .../gardener/security/pod-security/index.html | 2 +- .../security/shoot_auditpolicy/index.html | 2 +- .../security/shoot_serviceaccounts/index.html | 2 +- .../gardener/seed_bootstrapping/index.html | 2 +- docs/docs/gardener/seed_settings/index.html | 5 +- .../shoot-operations/_print/index.html | 2 +- .../docs/gardener/shoot-operations/index.html | 2 +- .../shoot_credentials_rotation/index.html | 2 +- .../shoot_operations/index.html | 2 +- .../shoot-operations/shoot_updates/index.html | 2 +- .../shoot_versions/index.html | 2 +- .../supported_k8s_versions/index.html | 2 +- .../worker_pool_k8s_versions/index.html | 2 +- docs/docs/gardener/shoot/_print/index.html | 2 +- .../shoot/access_restrictions/index.html | 2 +- docs/docs/gardener/shoot/index.html | 2 +- .../gardener/shoot/shoot_access/index.html | 2 +- .../gardener/shoot/shoot_hibernate/index.html | 2 +- .../shoot/shoot_info_configmap/index.html | 2 +- .../shoot/shoot_maintenance/index.html | 2 +- .../gardener/shoot/shoot_purposes/index.html | 2 +- .../shoot_scheduling_profiles/index.html | 2 +- .../gardener/shoot/shoot_status/index.html | 2 +- .../shoot_supported_architectures/index.html | 2 +- .../shoot/shoot_workerless/index.html | 2 +- .../shoot/shoot_workers_settings/index.html | 2 +- docs/docs/gardener/testing/index.html | 2 +- .../gardener/testmachinery_tests/index.html | 7 +- .../topology_aware_routing/index.html | 2 +- .../trusted-tls-for-control-planes/index.html | 2 +- .../trusted-tls-for-garden-runtime/index.html | 2 +- docs/docs/getting-started/_print/index.html | 2 +- .../getting-started/architecture/index.html | 2 +- .../getting-started/ca-components/index.html | 2 +- .../common-pitfalls/index.html | 2 +- .../features/_print/index.html | 2 +- .../certificate-management/index.html | 2 +- .../features/cluster-autoscaler/index.html | 2 +- .../features/credential-rotation/index.html | 2 +- .../features/dns-management/index.html | 2 +- .../features/hibernation/index.html | 2 +- docs/docs/getting-started/features/index.html | 2 +- .../getting-started/features/vpa/index.html | 2 +- .../features/workerless-shoots/index.html | 2 +- docs/docs/getting-started/index.html | 2 +- .../getting-started/introduction/index.html | 2 +- .../docs/getting-started/lifecycle/index.html | 2 +- .../observability/_print/index.html | 2 +- .../observability/alerts/index.html | 2 +- .../observability/components/index.html | 2 +- .../getting-started/observability/index.html | 2 +- .../observability/shoot-status/index.html | 2 +- docs/docs/getting-started/project/index.html | 2 +- docs/docs/getting-started/shoots/index.html | 2 +- docs/docs/glossary/_print/index.html | 2 +- docs/docs/glossary/index.html | 2 +- docs/docs/guides/_print/index.html | 4 +- .../administer-shoots/_print/index.html | 2 +- .../backup-restore/index.html | 2 +- .../conversion-webhook/index.html | 2 +- .../create-delete-shoot/index.html | 2 +- .../index.html | 2 +- .../guides/administer-shoots/gpu/index.html | 2 +- docs/docs/guides/administer-shoots/index.html | 2 +- .../maintain-shoot/index.html | 2 +- .../administer-shoots/oidc-login/index.html | 2 +- .../administer-shoots/scalability/index.html | 2 +- .../administer-shoots/tailscale/index.html | 2 +- .../guides/applications/_print/index.html | 4 +- .../access-pod-from-local/index.html | 2 +- .../applications/antipattern/index.html | 2 +- .../commit-secret-fail/index.html | 2 +- .../applications/container-startup/index.html | 2 +- .../applications/content_trust/index.html | 2 +- .../dockerfile-pitfall/index.html | 2 +- .../applications/dynamic-pvc/index.html | 2 +- .../applications/image-pull-policy/index.html | 2 +- docs/docs/guides/applications/index.html | 2 +- .../insecure-configuration/index.html | 2 +- .../applications/knative-install/index.html | 2 +- .../missing-registry-permission/index.html | 2 +- .../applications/network-isolation/index.html | 2 +- .../pod-disruption-budget/index.html | 2 +- .../guides/applications/prometheus/index.html | 2 +- .../applications/secure-seccomp/index.html | 2 +- .../service-cache-control/index.html | 2 +- .../index.html | 5 +- .../guides/client-tools/_print/index.html | 2 +- .../client-tools/bash-kubeconfig/index.html | 2 +- .../guides/client-tools/bash-tips/index.html | 2 +- docs/docs/guides/client-tools/index.html | 2 +- .../working-with-kubeconfig/index.html | 2 +- .../high-availability/_print/index.html | 2 +- .../best-practices/index.html | 2 +- .../chaos-engineering/index.html | 2 +- .../control-plane/index.html | 2 +- docs/docs/guides/high-availability/index.html | 2 +- docs/docs/guides/index.html | 2 +- .../_print/index.html | 2 +- .../analyzing-node-failures/index.html | 2 +- .../debug-a-pod/index.html | 2 +- .../monitoring-and-troubleshooting/index.html | 2 +- .../shell-to-node/index.html | 2 +- .../tail-logfile/index.html | 2 +- docs/docs/guides/networking/_print/index.html | 2 +- .../index.html | 2 +- .../certificate-extension/index.html | 2 +- .../networking/dns-extension/index.html | 2 +- .../index.html | 2 +- docs/docs/guides/networking/index.html | 2 +- docs/docs/index.html | 2 +- docs/docs/other-components/_print/index.html | 2 +- .../dependency-watchdog/_print/index.html | 2 +- .../concepts/_print/index.html | 2 +- .../dependency-watchdog/concepts/index.html | 2 +- .../concepts/prober/index.html | 2 +- .../concepts/weeder/index.html | 2 +- .../contribution/index.html | 2 +- .../deployment/_print/index.html | 2 +- .../deployment/configure/index.html | 2 +- .../dependency-watchdog/deployment/index.html | 2 +- .../deployment/monitor/index.html | 2 +- .../dependency-watchdog/index.html | 2 +- .../setup/dwd-using-local-garden/index.html | 2 +- .../dependency-watchdog/testing/index.html | 2 +- .../etcd-druid/_print/index.html | 2 +- .../add-new-etcd-cluster-component/index.html | 2 +- .../api-reference/etcd-druid-api/index.html | 2 +- .../etcd-druid/api-reference/index.html | 2 +- .../benchmark/etcd-network-latency/index.html | 2 +- .../etcd-druid/changing-api/index.html | 2 +- .../etcd-cluster-components/index.html | 2 +- .../index.html | 2 +- .../etcd-druid/contribution/index.html | 2 +- .../etcd-druid/controllers/index.html | 2 +- .../dependency-management/index.html | 2 +- .../configure-etcd-druid/index.html | 2 +- .../deployment/feature-gates/index.html | 2 +- .../getting-started-locally/index.html | 2 +- .../manage-azurite-emulator/index.html | 2 +- .../manage-s3-emulator/index.html | 2 +- .../index.html | 2 +- .../version-compatibility-matrix/index.html | 2 +- .../getting-started-locally/index.html | 2 +- .../other-components/etcd-druid/index.html | 2 +- .../managing-etcd-clusters/index.html | 2 +- .../etcd-druid/monitoring/metrics/index.html | 2 +- .../prepare-dev-environment/index.html | 2 +- .../proposals/00-template/index.html | 2 +- .../01-multi-node-etcd-clusters/index.html | 2 +- .../02-snapshot-compaction/index.html | 2 +- .../03-scaling-up-an-etcd-cluster/index.html | 2 +- .../04-etcd-member-custom-resource/index.html | 2 +- .../05-etcd-operator-tasks/index.html | 2 +- .../etcd-druid/raising-a-pr/index.html | 2 +- .../recovering-etcd-clusters/index.html | 2 +- .../etcd-druid/running-e2e-tests/index.html | 2 +- .../securing-etcd-clusters/index.html | 2 +- .../etcd-druid/testing/index.html | 2 +- .../updating-documentation/index.html | 2 +- docs/docs/other-components/index.html | 2 +- .../_print/index.html | 2 +- .../cp_support_new/index.html | 2 +- .../deployment/index.html | 2 +- .../documents/_print/index.html | 2 +- .../documents/apis/index.html | 2 +- .../documents/index.html | 2 +- .../machine-controller-manager/faq/index.html | 2 +- .../machine-controller-manager/index.html | 2 +- .../integration_tests/index.html | 2 +- .../local_setup/index.html | 2 +- .../machine/index.html | 2 +- .../machine_deployment/index.html | 2 +- .../machine_error_codes/index.html | 2 +- .../machine_set/index.html | 2 +- .../prerequisite/index.html | 2 +- .../proposals/_print/index.html | 2 +- .../excess_reserve_capacity/index.html | 2 +- .../external_providers_grpc/index.html | 2 +- .../proposals/hotupdate-instances/index.html | 2 +- .../proposals/index.html | 2 +- .../proposals/initialize-machine/index.html | 2 +- .../testing_and_dependencies/index.html | 2 +- .../todo/_print/index.html | 2 +- .../todo/index.html | 2 +- .../todo/outline/index.html | 2 +- docs/docs/resources/_print/index.html | 2 +- docs/docs/resources/index.html | 2 +- docs/docs/resources/videos/_print/index.html | 2 +- .../resources/videos/fairy-tail/index.html | 2 +- .../videos/gardener-teaser/index.html | 2 +- .../videos/in-out-networking/index.html | 2 +- docs/docs/resources/videos/index.html | 2 +- .../videos/livecheck-readiness/index.html | 2 +- .../microservices-in_kubernetes/index.html | 2 +- .../resources/videos/namespace/index.html | 2 +- .../videos/small-container/index.html | 2 +- .../videos/why-kubernetes/index.html | 2 +- .../security-and-compliance/_print/index.html | 2 +- .../credential-rotation/index.html | 2 +- .../disa-k8s-stig-shoot/index.html | 2 +- docs/docs/security-and-compliance/index.html | 2 +- .../kubernetes-hardening/index.html | 2 +- .../regional-restrictions/index.html | 2 +- .../security-and-compliance/report/index.html | 2 +- docs/index.html | 2 +- docs/js/404.js | 730 +++++++++--------- ...ndex.118284ff37843555829ad9329f456bb7.json | 1 - ...ndex.a1f6bf31729e283f2b3ff771c29dd781.json | 1 + docs/tags/task/index.html | 2 +- 618 files changed, 1103 insertions(+), 1079 deletions(-) delete mode 100644 docs/offline-search-index.118284ff37843555829ad9329f456bb7.json create mode 100644 docs/offline-search-index.a1f6bf31729e283f2b3ff771c29dd781.json diff --git a/docs/404.html b/docs/404.html index 91e97a74f15..0bf3bb7be12 100644 --- a/docs/404.html +++ b/docs/404.html @@ -2,5 +2,5 @@

Page Not Found

We dug around, but couldn't find the page that you were looking for.

You could go back to our home page or use the search bar to find what you were looking for.

Page Not Found

We dug around, but couldn't find the page that you were looking for.

You could go back to our home page or use the search bar to find what you were looking for.

\ No newline at end of file diff --git a/docs/_print/adopter/index.html b/docs/_print/adopter/index.html index 4c9b955214a..1068e024b5b 100644 --- a/docs/_print/adopter/index.html +++ b/docs/_print/adopter/index.html @@ -2,5 +2,5 @@

See who is using Gardener

Gardener adopters in production environments that have publicly shared details of their usage.

teaser

SAPSAP BTP, Kubernetes environment (internal) uses Gardener to deploy and manage Kubernetes clusters at scale in a uniform way across infrastructures (AWS, Azure, GCP, Alicloud, as well as generic interfaces to OpenStack and vSphere). Workloads include Databases (SAP HANA Cloud), Big Data (SAP Data Intelligence), Kyma, many other cloud native applications, and diverse business workloads.
OVHcloudGardener can now be run by customers on the Public Cloud Platform of the leading European Cloud Provider OVHcloud.
ScaleUp TechnologiesScaleUp Technologies runs Gardener within their public Openstack Clouds (Hamburg, Berlin, Düsseldorf). Their clients run all kinds of workloads on top of Gardener maintained Kubernetes clusters ranging from databases to Software-as-a-Service applications.
Finanz Informatik Technologie Services GmbHFinanz Informatik Technologie Services GmbH uses Gardener to offer k8s as a service for customers in the financial industry in Germany. It is built on top of a “metal as a service” infrastructure implemented from scratch for k8s workloads in mind. The result is k8s on top of bare metal in minutes.
PingCAPPingCAP TiDB, is a cloud-native distributed SQL database with MySQL compatibility, and one of the most popular open-source database projects - with 23.5K+ stars and 400+ contributors. Its sister project TiKV is a Cloud Native Interactive Landscape project. PingCAP envisioned their managed TiDB service, known as TiDB Cloud, to be multi-tenant, secure, cost-efficient, and to be compatible with different cloud providers and they chose Gardener.
BeezlabsBeezlabs uses Gardener to deliver Intelligent Process Automation platform, on multiple cloud providers and reduce costs and lock-in risks.
b’nerdb’nerd uses Gardener as the core technology for its own managed Kubernetes as a Service solution and operates multiple Gardener installations for several cloud hosting service providers.
STACKITSTACKIT is a digital brand of Europe’s biggest retailer, the Schwarz Group, which includes Lidl, Kaufland, but also production and recycling companies. It uses Gardener to offer public and private Kubernetes as a service in own data centers in Europe and targets to become the cloud provider for German and European small and mid-sized companies.
T-SystemsSupporting and managing multiple application landscapes on-premises and across different hyperscaler infrastructures can be painful. At T-Systems we use Gardener both for internal usage and to manage clusters for our customers. We love the openness of the project, the flexibility and the architecture that allows us to manage clusters around the world with only one team from one single pane of glass and to meet industry specific certification standards. The sovereignty by design is another great value, the technology implicitly brings along.
23 TechnologiesThe German-based company 23 Technologies uses Gardener to offer an enterprise-class Kubernetes engine for industrial use cases as well as cloud service providers and offers managed and professional services for it. 23T is also the team behind okeanos.dev, a public service that can be used by anyone to try out Gardener.
B1 Systems GmbHB1 Systems GmbH is a international provider of Linux & Open Source consulting, training, managed service & support. We are founded in 2004 and based in Germany. Our team of 140 Linux experts offers tailor-made solutions based on cloud & container technologies, virtualization & high availability as well as monitoring, system & configuration management. B1 is using Gardener internally and also set up solutions/environments for customers.
finleap connect GmbHfinleap connect GmbH is the leading independent Open Banking platform provider in Europe. It enables companies across a multitude of industries to provide the next generation of financial services by understanding how customers transact and interact. With its “full-stack” platform of solutions, finleap connect makes it possible for its clients to compliantly access the financial transactions data of customers, enrich said data with analytics tools, provide digital banking services and deliver high-quality, digital financial services products and services to customers. Gardener uniquly enables us to deploy our platform in Europe and across the globe in a uniform way on the providers preferred by our customers.
CodesphereCodesphere is a Cloud IDE with integrated and automated deployment of web apps. It uses Gardener internally to manage clusters that host customer deployments and internal systems all over the world.
plusserverplusserver combines its own cloud offerings with hyperscaler platforms to provide individually tailored multi-cloud solutions. The plusserver Kubernetes Engine (PSKE) based on Gardener reduces the complexity in managing multi-cloud environments and enables companies to orchestrate their containers and cloud-native applications across a variety of platforms such as plusserver’s pluscloud open or hyperscalers such as AWS, either by mouseclick or via an API. With PSKE, companies remain vendor-independent and profit from guaranteed data sovereignty and data security due to GDPR-compliant cloud platforms in the certified plusserver data centers in Germany.
Fuga CloudFuga Cloud uses Gardener as the basis for its Enterprise Managed Kubernetes (EMK), a platform that simplifies the management of your k8s and provides insight into usage and performance. The other Fuga Cloud services can be added with a mouse click, and the choice of another cloud provider is a negotiable option. Fuga Cloud stands for Digital Sovereignty, Data Portability and GDPR compatibility.
Metalstack Cloudmetalstack.cloud uses Gardener and is based on the open-source software metal-stack.io, which is developed for regulated financial institutions. The focus here is on the highest possible security and compliance conformity. This makes metalstack.cloud perfect for running enterprise-grade container applications and provides your workloads with the highest possible performance.
CleuraCleura uses Gardener to power its Container Orchestration Engine for Cleura Public Cloud and Cleura Compliant Cloud. Cleura Container Orchestration Engine simplifies the creation and management of Kubernetes clusters through their user-friendly Cleura Cloud Management Panel or API, allowing users to focus on deploying applications instead of maintaining the underlying infrastructure.
PITS Globale DatenrettungsdienstePITS Globale Datenrettungsdienste is a data recovery company located in Germany specializing in recovering lost or damaged files from hard drives, solid-state drives, flash drives, and other storage media. Gardener is used to handle highly-loaded internal infrastructure and provide reliable, fully-managed K8 cluster solutions.

If you’re using Gardener and you aren’t on this list, submit a pull request!

See who is using Gardener

Gardener adopters in production environments that have publicly shared details of their usage.

teaser

SAPSAP BTP, Kubernetes environment (internal) uses Gardener to deploy and manage Kubernetes clusters at scale in a uniform way across infrastructures (AWS, Azure, GCP, Alicloud, as well as generic interfaces to OpenStack and vSphere). Workloads include Databases (SAP HANA Cloud), Big Data (SAP Data Intelligence), Kyma, many other cloud native applications, and diverse business workloads.
OVHcloudGardener can now be run by customers on the Public Cloud Platform of the leading European Cloud Provider OVHcloud.
ScaleUp TechnologiesScaleUp Technologies runs Gardener within their public Openstack Clouds (Hamburg, Berlin, Düsseldorf). Their clients run all kinds of workloads on top of Gardener maintained Kubernetes clusters ranging from databases to Software-as-a-Service applications.
Finanz Informatik Technologie Services GmbHFinanz Informatik Technologie Services GmbH uses Gardener to offer k8s as a service for customers in the financial industry in Germany. It is built on top of a “metal as a service” infrastructure implemented from scratch for k8s workloads in mind. The result is k8s on top of bare metal in minutes.
PingCAPPingCAP TiDB, is a cloud-native distributed SQL database with MySQL compatibility, and one of the most popular open-source database projects - with 23.5K+ stars and 400+ contributors. Its sister project TiKV is a Cloud Native Interactive Landscape project. PingCAP envisioned their managed TiDB service, known as TiDB Cloud, to be multi-tenant, secure, cost-efficient, and to be compatible with different cloud providers and they chose Gardener.
BeezlabsBeezlabs uses Gardener to deliver Intelligent Process Automation platform, on multiple cloud providers and reduce costs and lock-in risks.
b’nerdb’nerd uses Gardener as the core technology for its own managed Kubernetes as a Service solution and operates multiple Gardener installations for several cloud hosting service providers.
STACKITSTACKIT is a digital brand of Europe’s biggest retailer, the Schwarz Group, which includes Lidl, Kaufland, but also production and recycling companies. It uses Gardener to offer public and private Kubernetes as a service in own data centers in Europe and targets to become the cloud provider for German and European small and mid-sized companies.
T-SystemsSupporting and managing multiple application landscapes on-premises and across different hyperscaler infrastructures can be painful. At T-Systems we use Gardener both for internal usage and to manage clusters for our customers. We love the openness of the project, the flexibility and the architecture that allows us to manage clusters around the world with only one team from one single pane of glass and to meet industry specific certification standards. The sovereignty by design is another great value, the technology implicitly brings along.
23 TechnologiesThe German-based company 23 Technologies uses Gardener to offer an enterprise-class Kubernetes engine for industrial use cases as well as cloud service providers and offers managed and professional services for it. 23T is also the team behind okeanos.dev, a public service that can be used by anyone to try out Gardener.
B1 Systems GmbHB1 Systems GmbH is a international provider of Linux & Open Source consulting, training, managed service & support. We are founded in 2004 and based in Germany. Our team of 140 Linux experts offers tailor-made solutions based on cloud & container technologies, virtualization & high availability as well as monitoring, system & configuration management. B1 is using Gardener internally and also set up solutions/environments for customers.
finleap connect GmbHfinleap connect GmbH is the leading independent Open Banking platform provider in Europe. It enables companies across a multitude of industries to provide the next generation of financial services by understanding how customers transact and interact. With its “full-stack” platform of solutions, finleap connect makes it possible for its clients to compliantly access the financial transactions data of customers, enrich said data with analytics tools, provide digital banking services and deliver high-quality, digital financial services products and services to customers. Gardener uniquly enables us to deploy our platform in Europe and across the globe in a uniform way on the providers preferred by our customers.
CodesphereCodesphere is a Cloud IDE with integrated and automated deployment of web apps. It uses Gardener internally to manage clusters that host customer deployments and internal systems all over the world.
plusserverplusserver combines its own cloud offerings with hyperscaler platforms to provide individually tailored multi-cloud solutions. The plusserver Kubernetes Engine (PSKE) based on Gardener reduces the complexity in managing multi-cloud environments and enables companies to orchestrate their containers and cloud-native applications across a variety of platforms such as plusserver’s pluscloud open or hyperscalers such as AWS, either by mouseclick or via an API. With PSKE, companies remain vendor-independent and profit from guaranteed data sovereignty and data security due to GDPR-compliant cloud platforms in the certified plusserver data centers in Germany.
Fuga CloudFuga Cloud uses Gardener as the basis for its Enterprise Managed Kubernetes (EMK), a platform that simplifies the management of your k8s and provides insight into usage and performance. The other Fuga Cloud services can be added with a mouse click, and the choice of another cloud provider is a negotiable option. Fuga Cloud stands for Digital Sovereignty, Data Portability and GDPR compatibility.
Metalstack Cloudmetalstack.cloud uses Gardener and is based on the open-source software metal-stack.io, which is developed for regulated financial institutions. The focus here is on the highest possible security and compliance conformity. This makes metalstack.cloud perfect for running enterprise-grade container applications and provides your workloads with the highest possible performance.
CleuraCleura uses Gardener to power its Container Orchestration Engine for Cleura Public Cloud and Cleura Compliant Cloud. Cleura Container Orchestration Engine simplifies the creation and management of Kubernetes clusters through their user-friendly Cleura Cloud Management Panel or API, allowing users to focus on deploying applications instead of maintaining the underlying infrastructure.
PITS Globale DatenrettungsdienstePITS Globale Datenrettungsdienste is a data recovery company located in Germany specializing in recovering lost or damaged files from hard drives, solid-state drives, flash drives, and other storage media. Gardener is used to handle highly-loaded internal infrastructure and provide reliable, fully-managed K8 cluster solutions.

If you’re using Gardener and you aren’t on this list, submit a pull request!

\ No newline at end of file diff --git a/docs/_print/community/index.html b/docs/_print/community/index.html index f3046265716..1dfb722ac28 100644 --- a/docs/_print/community/index.html +++ b/docs/_print/community/index.html @@ -14,7 +14,7 @@ Gardener Google Group The recordings are published on the Gardener Project YouTube channel. Topic Speaker Date and Time Link Get more computing power in Gardener by overcoming Kubelet limitations with CRI-resource-manager Pawel Palucki, Alexander D. Kanevskiy October 20, 2022 Recording Summary Cilium / Isovalent Presentation Raymond de Jong October 6, 2022 Recording Summary Gardener Extension Development - From scratch to the gardener-extension-shoot-flux Jens Schneider, Lothar Gesslein June 9, 2022 Recording Summary Deploying and Developing Gardener Locally (Without Any External Infrastructure!) Tim Ebert, Rafael Franzke March 17, 2022 Recording Summary Gardenctl-v2 Holger Kosser, Lukas Gross, Peter Sutter February 17, 2022 Recording Summary Google Calendar">

Gardener Community

Follow - Engage - Contribute

Community Calls

Join our community calls to connect with other Gardener enthusiasts and watch cool presentations.

What content can you expect?

  • Gardener core developers roll out new information, share knowledge with the members and demonstrate new service capabilities.
  • Adopters and contributors share their use-cases, experience and exchange on future requirements.

If you want to receive updates, sign up here:

TopicSpeakerDate and TimeLink
Get more computing power in Gardener by overcoming Kubelet limitations with CRI-resource-managerPawel Palucki, Alexander D. KanevskiyOctober 20, 2022Recording
Summary
Cilium / Isovalent PresentationRaymond de JongOctober 6, 2022Recording
Summary
Gardener Extension Development - From scratch to the gardener-extension-shoot-fluxJens Schneider, Lothar GessleinJune 9, 2022Recording
Summary
Deploying and Developing Gardener Locally (Without Any External Infrastructure!)Tim Ebert, Rafael FranzkeMarch 17, 2022Recording
Summary
Gardenctl-v2Holger Kosser, Lukas Gross, Peter SutterFebruary 17, 2022Recording
Summary

Google Calendar

Presenting a Topic

If there is a topic you would like to present, message us in our #gardener slack channel or get in touch with Jessica Katz.

Gardener Community

Follow - Engage - Contribute

Community Calls

Join our community calls to connect with other Gardener enthusiasts and watch cool presentations.

What content can you expect?

  • Gardener core developers roll out new information, share knowledge with the members and demonstrate new service capabilities.
  • Adopters and contributors share their use-cases, experience and exchange on future requirements.

If you want to receive updates, sign up here:

TopicSpeakerDate and TimeLink
Get more computing power in Gardener by overcoming Kubelet limitations with CRI-resource-managerPawel Palucki, Alexander D. KanevskiyOctober 20, 2022Recording
Summary
Cilium / Isovalent PresentationRaymond de JongOctober 6, 2022Recording
Summary
Gardener Extension Development - From scratch to the gardener-extension-shoot-fluxJens Schneider, Lothar GessleinJune 9, 2022Recording
Summary
Deploying and Developing Gardener Locally (Without Any External Infrastructure!)Tim Ebert, Rafael FranzkeMarch 17, 2022Recording
Summary
Gardenctl-v2Holger Kosser, Lukas Gross, Peter SutterFebruary 17, 2022Recording
Summary

Google Calendar

Presenting a Topic

If there is a topic you would like to present, message us in our #gardener slack channel or get in touch with Jessica Katz.

Get in Touch

@GardenerProject Follow the latest project updates on Twitter
GitHub diff --git a/docs/_print/contribute/docs/index.html b/docs/_print/contribute/docs/index.html index 914864de051..792553eb276 100644 --- a/docs/_print/contribute/docs/index.html +++ b/docs/_print/contribute/docs/index.html @@ -10,7 +10,7 @@ Contributions must be licensed under the Creative Commons Attribution 4.0 International License You need to sign the Contributor License Agreement. We are using CLA assistant providing a click-through workflow for accepting the CLA. For company contributors additionally the company needs to sign a corporate license agreement. See the following sections for details.">

This is the multi-page printable view of this section. +All

This is the multi-page printable view of this section. Click here to print.

Return to the regular view of this page.

Contributing Documentation

You are welcome to contribute documentation to Gardener.

The following rules govern documentation contributions:

  • Contributions must be licensed under the Creative Commons Attribution 4.0 International License
  • You need to sign the Contributor License Agreement. We are using CLA assistant providing a click-through workflow for accepting the CLA. For company contributors additionally the company needs to sign a corporate license agreement. See the following sections for details.

1 - Working with Images

Using images on the website has to contribute to the aesthetics and comprehensibility of the materials, with uncompromised experience when loading and browsing pages. That concerns crisp clear images, their consistent layout and color scheme, dimensions and aspect ratios, flicker-free and fast loading or the feeling of it, even on unreliable mobile networks and devices.

Image Production Guidelines

A good, detailed reference for optimal use of images for the web can be found at web.dev’s Fast Load Times topic. The following summarizes some key points plus suggestions for tools support.

You are strongly encouraged to use vector images (SVG) as much as possible. They scale seamlessly without compromising the quality and are easier to maintain.

If you are just now starting with SVG authoring, here are some tools suggestions: Figma (online/Win/Mac), Sketch (Mac only).

For raster images (JPG, PNG, GIF), consider the following requirements and choose a tool that enables you to conform to them:

  • Be mindful about image size, the total page size and loading times.
  • Larger images (>10K) need to support progressive rendering. Consult with your favorite authoring tool’s documentation to find out if and how it supports that.
  • The site delivers the optimal media content format and size depending on the device screen size. You need to provide several variants (large screen, laptop, tablet, phone). Your authoring tool should be able to resize and resample images. Always save the largest size first and then downscale from it to avoid image quality loss.

If you are looking for a tool that conforms to those guidelines, IrfanView is a very good option.

Screenshots can be taken with whatever tool you have available. A simple Alt+PrtSc (Win) and paste into an image processing tool to save it does the job. If you need to add emphasized steps (1,2,3) when you describe a process on a screeshot, you can use Snaggit. Use red color and numbers. Mind the requirements for raster images laid out above.

Diagrams can be exported as PNG/JPG from a diagraming tool such as Visio or even PowerPoint. Pick whichever you are comfortable with to design the diagram and make sure you comply with the requirements for the raster images production above. Diagrams produced as SVG are welcome too if your authoring tool supports exporting in that format. In any case, ensure that your diagrams “blend” with the content on the site - use the same color scheme and geometry style. Do not complicate diagrams too much. The site also supports Mermaid diagrams produced with markdown and rendered as SVG. You don’t need special tools for them, but for more complex ones you might want to prototype your diagram wth Mermaid’s online live editor, before encoding it in your markdown. More tips on using Mermaid can be found in the Shortcodes documentation.

Using Images in Markdown

The standard for adding images to a topic is to use markdown’s ![caption](image-path). If the image is not showing properly, or if you wish to serve images close to their natural size and avoid scaling, then you can use HTML5’s <picture> tag.

Example:

<picture>
     <!-- default, laptop-width-L max 1200px -->
     <source srcset="https://github.tools.sap/kubernetes/documentation/tree/master/website/documentation/015-tutorials/my-guide/images/overview-XL.png"
diff --git a/docs/_print/docs/contribute/code/index.html b/docs/_print/docs/contribute/code/index.html
index e2c56e2869d..8c32c1fb70f 100644
--- a/docs/_print/docs/contribute/code/index.html
+++ b/docs/_print/docs/contribute/code/index.html
@@ -10,7 +10,7 @@
 Contributions must be licensed under the Apache 2.0 License You need to sign the Contributor License Agreement. We are using CLA assistant providing a click-through workflow for accepting the CLA. For company contributors additionally the company needs to sign a corporate license agreement. See the following sections for details.">

This is the multi-page printable view of this section. +All

This is the multi-page printable view of this section. Click here to print.

Return to the regular view of this page.

Contributing Code

You are welcome to contribute code to Gardener in order to fix a bug or to implement a new feature.

The following rules govern code contributions:

  • Contributions must be licensed under the Apache 2.0 License
  • You need to sign the Contributor License Agreement. We are using CLA assistant providing a click-through workflow for accepting the CLA. For company contributors additionally the company needs to sign a corporate license agreement. See the following sections for details.

1 - Contributing Bigger Changes

Contributing Bigger Changes

Here are the guidelines you should follow when contributing larger changes to Gardener:

  • We strongly recommend to write a Gardener Enhancement Proposal (GEP) to get a common understanding what you want to achieve. This makes it easier for reviewers to understand the big picture.

  • Avoid proposing a big change in one single PR. Instead, split your work into multiple stages which are independently mergeable and create one PR for each stage. For example, if introducing a new API resource and its controller, these stages could be:

    • API resource types, including defaults and generated code.
    • API resource validation.
    • API server storage.
    • Admission plugin(s), if any.
    • Controller(s), including changes to existing controllers. Split this phase further into different functional subsets if appropriate.
  • If you realize later that changes to artifacts introduced in a previous stage are required, by all means make them and explain in the PR why they were needed.

  • Consider splitting a big PR further into multiple commits to allow for more focused reviews. For example, you could add unit tests / documentation in separate commits from the rest of the code. If you have to adapt your PR to review feedback, prefer doing that also in a separate commit to make it easier for reviewers to check how their feedback has been addressed.

  • To make the review process more efficient and avoid too many long discussions in the PR itself, ask for a “main reviewer” to be assigned to your change, then work with this person to make sure he or she understands it in detail, and agree together on any improvements that may be needed. If you can’t reach an agreement on certain topics, comment on the PR and invite other people to join the discussion.

  • Even if you have a “main reviewer” assigned, you may still get feedback from other reviewers. In general, these “non-main reviewers” are advised to focus more on the design and overall approach rather than the implementation details. Make sure that you address any concerns on this level appropriately.

2 - CI/CD

CI/CD

As an execution environment for CI/CD workloads, we use Concourse. We however abstract from the underlying “build executor” and instead offer a Pipeline Definition Contract, through which components declare their build pipelines as diff --git a/docs/adopter/index.html b/docs/adopter/index.html index db85b1a0729..865dabd3b25 100644 --- a/docs/adopter/index.html +++ b/docs/adopter/index.html @@ -2,5 +2,5 @@

See who is using Gardener

Gardener adopters in production environments that have publicly shared details of their usage.

teaser

SAPSAP BTP, Kubernetes environment (internal) uses Gardener to deploy and manage Kubernetes clusters at scale in a uniform way across infrastructures (AWS, Azure, GCP, Alicloud, as well as generic interfaces to OpenStack and vSphere). Workloads include Databases (SAP HANA Cloud), Big Data (SAP Data Intelligence), Kyma, many other cloud native applications, and diverse business workloads.
OVHcloudGardener can now be run by customers on the Public Cloud Platform of the leading European Cloud Provider OVHcloud.
ScaleUp TechnologiesScaleUp Technologies runs Gardener within their public Openstack Clouds (Hamburg, Berlin, Düsseldorf). Their clients run all kinds of workloads on top of Gardener maintained Kubernetes clusters ranging from databases to Software-as-a-Service applications.
Finanz Informatik Technologie Services GmbHFinanz Informatik Technologie Services GmbH uses Gardener to offer k8s as a service for customers in the financial industry in Germany. It is built on top of a “metal as a service” infrastructure implemented from scratch for k8s workloads in mind. The result is k8s on top of bare metal in minutes.
PingCAPPingCAP TiDB, is a cloud-native distributed SQL database with MySQL compatibility, and one of the most popular open-source database projects - with 23.5K+ stars and 400+ contributors. Its sister project TiKV is a Cloud Native Interactive Landscape project. PingCAP envisioned their managed TiDB service, known as TiDB Cloud, to be multi-tenant, secure, cost-efficient, and to be compatible with different cloud providers and they chose Gardener.
BeezlabsBeezlabs uses Gardener to deliver Intelligent Process Automation platform, on multiple cloud providers and reduce costs and lock-in risks.
b’nerdb’nerd uses Gardener as the core technology for its own managed Kubernetes as a Service solution and operates multiple Gardener installations for several cloud hosting service providers.
STACKITSTACKIT is a digital brand of Europe’s biggest retailer, the Schwarz Group, which includes Lidl, Kaufland, but also production and recycling companies. It uses Gardener to offer public and private Kubernetes as a service in own data centers in Europe and targets to become the cloud provider for German and European small and mid-sized companies.
T-SystemsSupporting and managing multiple application landscapes on-premises and across different hyperscaler infrastructures can be painful. At T-Systems we use Gardener both for internal usage and to manage clusters for our customers. We love the openness of the project, the flexibility and the architecture that allows us to manage clusters around the world with only one team from one single pane of glass and to meet industry specific certification standards. The sovereignty by design is another great value, the technology implicitly brings along.
23 TechnologiesThe German-based company 23 Technologies uses Gardener to offer an enterprise-class Kubernetes engine for industrial use cases as well as cloud service providers and offers managed and professional services for it. 23T is also the team behind okeanos.dev, a public service that can be used by anyone to try out Gardener.
B1 Systems GmbHB1 Systems GmbH is a international provider of Linux & Open Source consulting, training, managed service & support. We are founded in 2004 and based in Germany. Our team of 140 Linux experts offers tailor-made solutions based on cloud & container technologies, virtualization & high availability as well as monitoring, system & configuration management. B1 is using Gardener internally and also set up solutions/environments for customers.
finleap connect GmbHfinleap connect GmbH is the leading independent Open Banking platform provider in Europe. It enables companies across a multitude of industries to provide the next generation of financial services by understanding how customers transact and interact. With its “full-stack” platform of solutions, finleap connect makes it possible for its clients to compliantly access the financial transactions data of customers, enrich said data with analytics tools, provide digital banking services and deliver high-quality, digital financial services products and services to customers. Gardener uniquly enables us to deploy our platform in Europe and across the globe in a uniform way on the providers preferred by our customers.
CodesphereCodesphere is a Cloud IDE with integrated and automated deployment of web apps. It uses Gardener internally to manage clusters that host customer deployments and internal systems all over the world.
plusserverplusserver combines its own cloud offerings with hyperscaler platforms to provide individually tailored multi-cloud solutions. The plusserver Kubernetes Engine (PSKE) based on Gardener reduces the complexity in managing multi-cloud environments and enables companies to orchestrate their containers and cloud-native applications across a variety of platforms such as plusserver’s pluscloud open or hyperscalers such as AWS, either by mouseclick or via an API. With PSKE, companies remain vendor-independent and profit from guaranteed data sovereignty and data security due to GDPR-compliant cloud platforms in the certified plusserver data centers in Germany.
Fuga CloudFuga Cloud uses Gardener as the basis for its Enterprise Managed Kubernetes (EMK), a platform that simplifies the management of your k8s and provides insight into usage and performance. The other Fuga Cloud services can be added with a mouse click, and the choice of another cloud provider is a negotiable option. Fuga Cloud stands for Digital Sovereignty, Data Portability and GDPR compatibility.
Metalstack Cloudmetalstack.cloud uses Gardener and is based on the open-source software metal-stack.io, which is developed for regulated financial institutions. The focus here is on the highest possible security and compliance conformity. This makes metalstack.cloud perfect for running enterprise-grade container applications and provides your workloads with the highest possible performance.
CleuraCleura uses Gardener to power its Container Orchestration Engine for Cleura Public Cloud and Cleura Compliant Cloud. Cleura Container Orchestration Engine simplifies the creation and management of Kubernetes clusters through their user-friendly Cleura Cloud Management Panel or API, allowing users to focus on deploying applications instead of maintaining the underlying infrastructure.
PITS Globale DatenrettungsdienstePITS Globale Datenrettungsdienste is a data recovery company located in Germany specializing in recovering lost or damaged files from hard drives, solid-state drives, flash drives, and other storage media. Gardener is used to handle highly-loaded internal infrastructure and provide reliable, fully-managed K8 cluster solutions.

If you’re using Gardener and you aren’t on this list, submit a pull request!

See who is using Gardener

Gardener adopters in production environments that have publicly shared details of their usage.

teaser

SAPSAP BTP, Kubernetes environment (internal) uses Gardener to deploy and manage Kubernetes clusters at scale in a uniform way across infrastructures (AWS, Azure, GCP, Alicloud, as well as generic interfaces to OpenStack and vSphere). Workloads include Databases (SAP HANA Cloud), Big Data (SAP Data Intelligence), Kyma, many other cloud native applications, and diverse business workloads.
OVHcloudGardener can now be run by customers on the Public Cloud Platform of the leading European Cloud Provider OVHcloud.
ScaleUp TechnologiesScaleUp Technologies runs Gardener within their public Openstack Clouds (Hamburg, Berlin, Düsseldorf). Their clients run all kinds of workloads on top of Gardener maintained Kubernetes clusters ranging from databases to Software-as-a-Service applications.
Finanz Informatik Technologie Services GmbHFinanz Informatik Technologie Services GmbH uses Gardener to offer k8s as a service for customers in the financial industry in Germany. It is built on top of a “metal as a service” infrastructure implemented from scratch for k8s workloads in mind. The result is k8s on top of bare metal in minutes.
PingCAPPingCAP TiDB, is a cloud-native distributed SQL database with MySQL compatibility, and one of the most popular open-source database projects - with 23.5K+ stars and 400+ contributors. Its sister project TiKV is a Cloud Native Interactive Landscape project. PingCAP envisioned their managed TiDB service, known as TiDB Cloud, to be multi-tenant, secure, cost-efficient, and to be compatible with different cloud providers and they chose Gardener.
BeezlabsBeezlabs uses Gardener to deliver Intelligent Process Automation platform, on multiple cloud providers and reduce costs and lock-in risks.
b’nerdb’nerd uses Gardener as the core technology for its own managed Kubernetes as a Service solution and operates multiple Gardener installations for several cloud hosting service providers.
STACKITSTACKIT is a digital brand of Europe’s biggest retailer, the Schwarz Group, which includes Lidl, Kaufland, but also production and recycling companies. It uses Gardener to offer public and private Kubernetes as a service in own data centers in Europe and targets to become the cloud provider for German and European small and mid-sized companies.
T-SystemsSupporting and managing multiple application landscapes on-premises and across different hyperscaler infrastructures can be painful. At T-Systems we use Gardener both for internal usage and to manage clusters for our customers. We love the openness of the project, the flexibility and the architecture that allows us to manage clusters around the world with only one team from one single pane of glass and to meet industry specific certification standards. The sovereignty by design is another great value, the technology implicitly brings along.
23 TechnologiesThe German-based company 23 Technologies uses Gardener to offer an enterprise-class Kubernetes engine for industrial use cases as well as cloud service providers and offers managed and professional services for it. 23T is also the team behind okeanos.dev, a public service that can be used by anyone to try out Gardener.
B1 Systems GmbHB1 Systems GmbH is a international provider of Linux & Open Source consulting, training, managed service & support. We are founded in 2004 and based in Germany. Our team of 140 Linux experts offers tailor-made solutions based on cloud & container technologies, virtualization & high availability as well as monitoring, system & configuration management. B1 is using Gardener internally and also set up solutions/environments for customers.
finleap connect GmbHfinleap connect GmbH is the leading independent Open Banking platform provider in Europe. It enables companies across a multitude of industries to provide the next generation of financial services by understanding how customers transact and interact. With its “full-stack” platform of solutions, finleap connect makes it possible for its clients to compliantly access the financial transactions data of customers, enrich said data with analytics tools, provide digital banking services and deliver high-quality, digital financial services products and services to customers. Gardener uniquly enables us to deploy our platform in Europe and across the globe in a uniform way on the providers preferred by our customers.
CodesphereCodesphere is a Cloud IDE with integrated and automated deployment of web apps. It uses Gardener internally to manage clusters that host customer deployments and internal systems all over the world.
plusserverplusserver combines its own cloud offerings with hyperscaler platforms to provide individually tailored multi-cloud solutions. The plusserver Kubernetes Engine (PSKE) based on Gardener reduces the complexity in managing multi-cloud environments and enables companies to orchestrate their containers and cloud-native applications across a variety of platforms such as plusserver’s pluscloud open or hyperscalers such as AWS, either by mouseclick or via an API. With PSKE, companies remain vendor-independent and profit from guaranteed data sovereignty and data security due to GDPR-compliant cloud platforms in the certified plusserver data centers in Germany.
Fuga CloudFuga Cloud uses Gardener as the basis for its Enterprise Managed Kubernetes (EMK), a platform that simplifies the management of your k8s and provides insight into usage and performance. The other Fuga Cloud services can be added with a mouse click, and the choice of another cloud provider is a negotiable option. Fuga Cloud stands for Digital Sovereignty, Data Portability and GDPR compatibility.
Metalstack Cloudmetalstack.cloud uses Gardener and is based on the open-source software metal-stack.io, which is developed for regulated financial institutions. The focus here is on the highest possible security and compliance conformity. This makes metalstack.cloud perfect for running enterprise-grade container applications and provides your workloads with the highest possible performance.
CleuraCleura uses Gardener to power its Container Orchestration Engine for Cleura Public Cloud and Cleura Compliant Cloud. Cleura Container Orchestration Engine simplifies the creation and management of Kubernetes clusters through their user-friendly Cleura Cloud Management Panel or API, allowing users to focus on deploying applications instead of maintaining the underlying infrastructure.
PITS Globale DatenrettungsdienstePITS Globale Datenrettungsdienste is a data recovery company located in Germany specializing in recovering lost or damaged files from hard drives, solid-state drives, flash drives, and other storage media. Gardener is used to handle highly-loaded internal infrastructure and provide reliable, fully-managed K8 cluster solutions.

If you’re using Gardener and you aren’t on this list, submit a pull request!

\ No newline at end of file diff --git a/docs/blog/2018/06.11-anti-patterns/index.html b/docs/blog/2018/06.11-anti-patterns/index.html index 0fedcd80821..d13c7f6f2e3 100644 --- a/docs/blog/2018/06.11-anti-patterns/index.html +++ b/docs/blog/2018/06.11-anti-patterns/index.html @@ -6,7 +6,7 @@ Instead of running a root user, use RUN groupadd -r anygroup && useradd -r -g anygroup myuser to create a group and a user in it. Use the USER command to switch to this user.">
If you have defined relative limits (related to the requests), the default policy to scale the limits proportionally with the requests is fine, but the gap between requests and limits must be zero for QoS Guaranteed and should best be small for QoS Burstable to avoid useless or absurd limits either, e.g. prefer limits being 5 to at most 20% larger than requests as opposed to being 100% larger or more.
  • As a rule of thumb, set minAllowed to the highest observed VPA recommendation (usually during the initialization phase or during any periodical activity) for an otherwise practically idle container, so that you avoid needless trashing (e.g. resource usage calms down over time and recommendations drop consecutively until eviction, which will then lead again to initialization or later periodical activity and higher recommendations and new evictions).
    ⚠️ You may want to provide higher minAllowed values, if you observe that up-scaling takes too long for CPU or memory for a too large percentile of your workload. This will get you out of the danger zone of too few resources for too many pods at the expense of providing too many resources for a few pods. Memory may react faster than CPU, because CPU throttling is not visible and memory gets aided by OOM bump-up incidents, but still, if you observe that up-scaling takes too long, you may want to increase minAllowed accordingly.
  • As a rule of thumb, set maxAllowed to your theoretical maximum load, flanked with alerts to detect erroneous run-away usage or the actual nearing of your practical maximum load, so that you can intervene. However, VPA can easily recommend requests larger than what is allocatable on a node, so you must either ensure large enough nodes (Gardener can scale up from zero, in case you like to define a low-priority worker pool with more resources for very large pods) and/or cap VPA’s target recommendations using maxAllowed at the node allocatable remainder (after daemon set pods) of the largest eligible machine type (may result in under-provisioning resources for a pod). Use your monitoring and check maximum pod usage to decide about the maximum machine type.
  • Recommendations in a Box

    ContainerWhen to useValue
    Requests- Set them (recommended) unless:
    - Do not set requests for QoS BestEffort; useful only if pod can be evicted as often as needed and pod can pick up where it left off without any penalty
    Set requests to 95th percentile (w/o VPA) of the actually observed CPU resp. memory usage in production resp. 5th percentile (w/ VPA) (see below)
    Limits- Avoid them (recommended) unless:
    - Set limits for QoS Guaranteed; useful only if pod has strictly static resource requirements
    - Set CPU limits if you want to throttle CPU usage for containers that can be throttled w/o any other disadvantage than processing time (never do that when time-critical operations like leases are involved)
    - Set limits if you know the healthy range and want to shield against unbound busy loops, unbound memory leaks, or similar
    If you really can (otherwise not), set limits to healthy theoretical max load
    ScalerWhen to useInitialMinimumMaximum
    HPAUse for pods that support horizontal scalingSet initial replicas to 5th percentile of the actually observed replica count in production (prefer scaling on usage, not utilization) and make sure to never overwrite it later when controlled by HPASet minReplicas to 0 (requires feature gate and custom/external metrics), to 1 (regular HPA minimum), or whatever the high availability requirements of the workload demandSet maxReplicas to healthy theoretical max load
    VPAUse for containers that have a significant usage (>50m/100M) and a significant usage spread over time (>2x)Set initial requests to 5th percentile of the actually observed CPU resp. memory usage in productionSet minAllowed to highest observed VPA recommendation (includes start-up phase) for an otherwise practically idle container (avoids pod trashing when pod gets evicted after idling)Set maxAllowed to fresh node allocatable remainder after daemonset pods (avoids pending pods when requests exceed fresh node allocatable remainder) or, if you really can (otherwise not), to healthy theoretical max load (less disruptive than limits as no throttling or OOM happens on under-utilized nodes)
    CAUse for dynamic workloads, definitely if you use HPA and/or VPAN/ASet minimum to 0 or number of nodes required right after cluster creation or wake-upSet maximum to healthy theoretical max load
    information-outline

    Note

    Theoretical max load may be very difficult to ascertain, especially with modern software that consists of building blocks you do not own or know in detail. If you have comprehensive monitoring in place, you may be tempted to pick the observed maximum and add a safety margin or even factor on top (2x, 4x, or any other number), but this is not to be confused with “theoretical max load” (solely depending on the code, not observations from the outside). At any point in time, your numbers may change, e.g. because you updated a software component or your usage increased. If you decide to use numbers that are set based only on observations, make sure to flank those numbers with monitoring alerts, so that you have sufficient time to investigate, revise, and readjust if necessary.

    Conclusion

    Pod autoscaling is a dynamic and complex aspect of Kubernetes, but it is also one of the most powerful tools at your disposal for maintaining efficient, reliable, and cost-effective applications. By carefully selecting the appropriate autoscaler, setting well-considered thresholds, and continuously monitoring and adjusting your strategies, you can ensure that your Kubernetes deployments are well-equipped to handle your resource demands while not over-paying for the provided resources at the same time.

    As Kubernetes continues to evolve (e.g. in-place updates) and as new patterns and practices emerge, the approaches to autoscaling may also change. However, the principles discussed above will remain foundational to creating scalable and resilient Kubernetes workloads. Whether you’re a developer or operations engineer, a solid understanding of pod autoscaling will be instrumental in the successful deployment and management of containerized applications.

    2.6.2 - Specifying a Disruption Budget for Kubernetes Controllers

    Introduction of Disruptions

    We need to understand that some kind of voluntary disruptions can happen to pods. For example, they can be caused by cluster administrators who want to perform automated cluster actions, like upgrading and autoscaling clusters. Typical application owner actions include:

    • deleting the deployment or other controller that manages the pod
    • updating a deployment’s pod template causing a restart
    • directly deleting a pod (e.g., by accident)

    Setup Pod Disruption Budgets

    Kubernetes offers a feature called PodDisruptionBudget (PDB) for each application. A PDB limits the number of pods of a replicated application that are down simultaneously from voluntary disruptions.

    The most common use case is when you want to protect an application specified by one of the built-in Kubernetes controllers:

    • Deployment
    • ReplicationController
    • ReplicaSet
    • StatefulSet

    A PodDisruptionBudget has three fields:

    • A label selector .spec.selector to specify the set of pods to which it applies.
    • .spec.minAvailable which is a description of the number of pods from that set that must still be available after the eviction, even in the absence of the evicted pod. minAvailable can be either an absolute number or a percentage.
    • .spec.maxUnavailable which is a description of the number of pods from that set that can be unavailable after the eviction. It can be either an absolute number or a percentage.

    Cluster Upgrade or Node Deletion Failed due to PDB Violation

    Misconfiguration of the PDB could block the cluster upgrade or node deletion processes. There are two main cases that can cause a misconfiguration.

    Case 1: The replica of Kubernetes controllers is 1

    • Only 1 replica is running: there is no replicaCount setup or replicaCount for the Kubernetes controllers is set to 1

    • PDB configuration

        spec:
      @@ -3360,7 +3360,7 @@
       by the Shoot and try to find respective credentials there (primary provider only). Specifying this field may override
       this behavior, i.e. forcing the Gardener to only look into the given secret.

      type
      string(Optional)

      Type is the DNS provider type.

      zones
      DNSIncludeExclude(Optional)

      Zones contains information about which hosted zones shall be included/excluded for this provider.

      Deprecated: This field is deprecated and will be removed in a future release. Please use the DNS extension provider config (e.g. shoot-dns-service) for additional configuration.

      DataVolume

      (Appears on: -Worker)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DeploymentRef

      (Appears on: +Worker)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DeploymentRef

      (Appears on: ControllerRegistrationDeployment)

      DeploymentRef contains information about ControllerDeployment references.

      FieldDescription
      name
      string

      Name is the name of the ControllerDeployment that is being referred to.

      DualApprovalForDeletion

      (Appears on: ProjectSpec)

      DualApprovalForDeletion contains configuration for the dual approval concept for resource deletion.

      FieldDescription
      resource
      string

      Resource is the name of the resource this applies to.

      selector
      Kubernetes meta/v1.LabelSelector

      Selector is the label selector for the resources.

      includeServiceAccounts
      bool
      (Optional)

      IncludeServiceAccounts specifies whether the concept also applies when deletion is triggered by ServiceAccounts. Defaults to true.

      ETCDEncryptionKeyRotation

      (Appears on: @@ -3437,7 +3437,7 @@ This field is only available for Kubernetes v1.30 or later.

      KubeControllerManagerConfig

      (Appears on: Kubernetes)

      KubeControllerManagerConfig contains configuration settings for the kube-controller-manager.

      FieldDescription
      KubernetesConfig
      KubernetesConfig

      (Members of KubernetesConfig are embedded into this type.)

      horizontalPodAutoscaler
      HorizontalPodAutoscalerConfig
      (Optional)

      HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.

      nodeCIDRMaskSize
      int32
      (Optional)

      NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24). This field is immutable.

      podEvictionTimeout
      Kubernetes meta/v1.Duration
      (Optional)

      PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m.

      Deprecated: The corresponding kube-controller-manager flag --pod-eviction-timeout is deprecated in favor of the kube-apiserver flags --default-not-ready-toleration-seconds and --default-unreachable-toleration-seconds. -The --pod-eviction-timeout flag does not have effect when the taint besed eviction is enabled. The taint +The --pod-eviction-timeout flag does not have effect when the taint based eviction is enabled. The taint based eviction is beta (enabled by default) since Kubernetes 1.13 and GA since Kubernetes 1.18. Hence, instead of setting this field, set the spec.kubernetes.kubeAPIServer.defaultNotReadyTolerationSeconds and spec.kubernetes.kubeAPIServer.defaultUnreachableTolerationSeconds.

      nodeMonitorGracePeriod
      Kubernetes meta/v1.Duration
      (Optional)

      NodeMonitorGracePeriod defines the grace period before an unresponsive node is marked unhealthy.

      KubeProxyConfig

      (Appears on: @@ -3777,7 +3777,7 @@ (default: 0.9)

      recommendationLowerBoundMemoryPercentile
      float64(Optional)

      RecommendationLowerBoundMemoryPercentile is the usage percentile that will be used for the lower bound on memory recommendation. (default: 0.5)

      recommendationUpperBoundMemoryPercentile
      float64(Optional)

      RecommendationUpperBoundMemoryPercentile is the usage percentile that will be used for the upper bound on memory recommendation. (default: 0.95)

      Volume

      (Appears on: -Worker)

      Volume contains information about the volume type, size, and encryption.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      VolumeType

      (Appears on: +Worker)

      Volume contains information about the volume type, size, and encryption.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      VolumeType

      (Appears on: CloudProfileSpec, NamespacedCloudProfileSpec)

      VolumeType contains certain properties of a volume type.

      FieldDescription
      class
      string

      Class is the class of the volume type.

      name
      string

      Name is the name of the volume type.

      usable
      bool
      (Optional)

      Usable defines if the volume type can be used for shoot clusters.

      minSize
      k8s.io/apimachinery/pkg/api/resource.Quantity
      (Optional)

      MinSize is the minimal supported storage size.

      WatchCacheSizes

      (Appears on: KubeAPIServerConfig)

      WatchCacheSizes contains configuration of the API server’s watch cache sizes.

      FieldDescription
      default
      int32
      (Optional)

      Default configures the default watch cache size of the kube-apiserver @@ -3890,7 +3890,7 @@ DNSRecord)

      DNSRecordStatus is the status of a DNSRecord resource.

      FieldDescription
      DefaultStatus
      DefaultStatus

      (Members of DefaultStatus are embedded into this type.)

      DefaultStatus is a structure containing common fields used by all extension resources.

      zone
      string
      (Optional)

      Zone is the DNS hosted zone of this DNS record.

      DNSRecordType (string alias)

      (Appears on: DNSRecordSpec)

      DNSRecordType is a string alias.

      DataVolume

      (Appears on: -WorkerPool)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DefaultSpec

      (Appears on: +WorkerPool)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DefaultSpec

      (Appears on: BackupBucketSpec, BackupEntrySpec, BastionSpec, @@ -3973,7 +3973,7 @@ triggered. For each FilePath there must exist a File with matching Path in OperatingSystemConfig.Spec.Files.

      UnitCommand (string alias)

      (Appears on: Unit)

      UnitCommand is a string alias.

      Volume

      (Appears on: -WorkerPool)

      Volume contains information about the root disks that should be used for worker pools.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      WorkerPool

      (Appears on: +WorkerPool)

      Volume contains information about the root disks that should be used for worker pools.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      WorkerPool

      (Appears on: WorkerSpec)

      WorkerPool is the definition of a specific worker pool.

      FieldDescription
      machineType
      string

      MachineType contains information about the machine type that should be used for this worker pool.

      maximum
      int32

      Maximum is the maximum size of the worker pool.

      maxSurge
      k8s.io/apimachinery/pkg/util/intstr.IntOrString

      MaxSurge is maximum number of VMs that are created during an update.

      maxUnavailable
      k8s.io/apimachinery/pkg/util/intstr.IntOrString

      MaxUnavailable is the maximum number of VMs that can be unavailable during an update.

      annotations
      map[string]string
      (Optional)

      Annotations is a map of key/value pairs for annotations for all the Node objects in this worker pool.

      labels
      map[string]string
      (Optional)

      Labels is a map of key/value pairs for labels for all the Node objects in this worker pool.

      taints
      []Kubernetes core/v1.Taint
      (Optional)

      Taints is a list of taints for all the Node objects in this worker pool.

      machineImage
      MachineImage

      MachineImage contains logical information about the name and the version of the machie image that should be used. The logical information must be mapped to the provider-specific information (e.g., AMIs, …) by the provider itself.

      minimum
      int32

      Minimum is the minimum size of the worker pool.

      name
      string

      Name is the name of this worker pool.

      nodeAgentSecretName
      string
      (Optional)

      NodeAgentSecretName is uniquely identifying selected aspects of the OperatingSystemConfig. If it changes, then the @@ -4368,7 +4368,7 @@ containerPolicies: - controlledValues: RequestsOnly ... -If you have defined relative limits (related to the requests), the default policy to scale the limits proportionally with the requests is fine, but the gap between requests and limits must be zero for QoS Guaranteed and should best be small for QoS Burstable to avoid useless or absurd limits either, e.g. prefer limits being 5 to at most 20% larger than requests as opposed to being 100% larger or more.

    • As a rule of thumb, set minAllowed to the highest observed VPA recommendation (usually during the initialization phase or during any periodical activity) for an otherwise practically idle container, so that you avoid needless trashing (e.g. resource usage calms down over time and recommendations drop consecutively until eviction, which will then lead again to initialization or later periodical activity and higher recommendations and new evictions).
      ⚠️ You may want to provide higher minAllowed values, if you observe that up-scaling takes too long for CPU or memory for a too large percentile of your workload. This will get you out of the danger zone of too few resources for too many pods at the expense of providing too many resources for a few pods. Memory may react faster than CPU, because CPU throttling is not visible and memory gets aided by OOM bump-up incidents, but still, if you observe that up-scaling takes too long, you may want to increase minAllowed accordingly.
    • As a rule of thumb, set maxAllowed to your theoretical maximum load, flanked with alerts to detect erroneous run-away usage or the actual nearing of your practical maximum load, so that you can intervene. However, VPA can easily recommend requests larger than what is allocatable on a node, so you must either ensure large enough nodes (Gardener can scale up from zero, in case you like to define a low-priority worker pool with more resources for very large pods) and/or cap VPA’s target recommendations using maxAllowed at the node allocatable remainder (after daemon set pods) of the largest eligible machine type (may result in under-provisioning resources for a pod). Use your monitoring and check maximum pod usage to decide about the maximum machine type.
    • Recommendations in a Box

      ContainerWhen to useValue
      Requests- Set them (recommended) unless:
      - Do not set requests for QoS BestEffort; useful only if pod can be evicted as often as needed and pod can pick up where it left off without any penalty
      Set requests to 95th percentile (w/o VPA) of the actually observed CPU resp. memory usage in production resp. 5th percentile (w/ VPA) (see below)
      Limits- Avoid them (recommended) unless:
      - Set limits for QoS Guaranteed; useful only if pod has strictly static resource requirements
      - Set CPU limits if you want to throttle CPU usage for containers that can be throttled w/o any other disadvantage than processing time (never do that when time-critical operations like leases are involved)
      - Set limits if you know the healthy range and want to shield against unbound busy loops, unbound memory leaks, or similar
      If you really can (otherwise not), set limits to healthy theoretical max load
      ScalerWhen to useInitialMinimumMaximum
      HPAUse for pods that support horizontal scalingSet initial replicas to 5th percentile of the actually observed replica count in production (prefer scaling on usage, not utilization) and make sure to never overwrite it later when controlled by HPASet minReplicas to 0 (requires feature gate and custom/external metrics), to 1 (regular HPA minimum), or whatever the high availability requirements of the workload demandSet maxReplicas to healthy theoretical max load
      VPAUse for containers that have a significant usage (>50m/100M) and a significant usage spread over time (>2x)Set initial requests to 5th percentile of the actually observed CPU resp. memory usage in productionSet minAllowed to highest observed VPA recommendation (includes start-up phase) for an otherwise practically idle container (avoids pod trashing when pod gets evicted after idling)Set maxAllowed to fresh node allocatable remainder after daemonset pods (avoids pending pods when requests exeed fresh node allocatable remainder) or, if you really can (otherwise not), to healthy theoretical max load (less disruptive than limits as no throttling or OOM happens on under-utilized nodes)
      CAUse for dynamic workloads, definitely if you use HPA and/or VPAN/ASet minimum to 0 or number of nodes required right after cluster creation or wake-upSet maximum to healthy theoretical max load
      information-outline

      Note

      Theoretical max load may be very difficult to ascertain, especially with modern software that consists of building blocks you do not own or know in detail. If you have comprehensive monitoring in place, you may be tempted to pick the observed maximum and add a safety margin or even factor on top (2x, 4x, or any other number), but this is not to be confused with “theoretical max load” (solely depending on the code, not observations from the outside). At any point in time, your numbers may change, e.g. because you updated a software component or your usage increased. If you decide to use numbers that are set based only on observations, make sure to flank those numbers with monitoring alerts, so that you have sufficient time to investigate, revise, and readjust if necessary.

      Conclusion

      Pod autoscaling is a dynamic and complex aspect of Kubernetes, but it is also one of the most powerful tools at your disposal for maintaining efficient, reliable, and cost-effective applications. By carefully selecting the appropriate autoscaler, setting well-considered thresholds, and continuously monitoring and adjusting your strategies, you can ensure that your Kubernetes deployments are well-equipped to handle your resource demands while not over-paying for the provided resources at the same time.

      As Kubernetes continues to evolve (e.g. in-place updates) and as new patterns and practices emerge, the approaches to autoscaling may also change. However, the principles discussed above will remain foundational to creating scalable and resilient Kubernetes workloads. Whether you’re a developer or operations engineer, a solid understanding of pod autoscaling will be instrumental in the successful deployment and management of containerized applications.

      4.4 - Concepts

      4.4.1 - APIServer Admission Plugins

      A list of all gardener managed admission plugins together with their responsibilities

      Overview

      Similar to the kube-apiserver, the gardener-apiserver comes with a few in-tree managed admission plugins. +If you have defined relative limits (related to the requests), the default policy to scale the limits proportionally with the requests is fine, but the gap between requests and limits must be zero for QoS Guaranteed and should best be small for QoS Burstable to avoid useless or absurd limits either, e.g. prefer limits being 5 to at most 20% larger than requests as opposed to being 100% larger or more.

    • As a rule of thumb, set minAllowed to the highest observed VPA recommendation (usually during the initialization phase or during any periodical activity) for an otherwise practically idle container, so that you avoid needless trashing (e.g. resource usage calms down over time and recommendations drop consecutively until eviction, which will then lead again to initialization or later periodical activity and higher recommendations and new evictions).
      ⚠️ You may want to provide higher minAllowed values, if you observe that up-scaling takes too long for CPU or memory for a too large percentile of your workload. This will get you out of the danger zone of too few resources for too many pods at the expense of providing too many resources for a few pods. Memory may react faster than CPU, because CPU throttling is not visible and memory gets aided by OOM bump-up incidents, but still, if you observe that up-scaling takes too long, you may want to increase minAllowed accordingly.
    • As a rule of thumb, set maxAllowed to your theoretical maximum load, flanked with alerts to detect erroneous run-away usage or the actual nearing of your practical maximum load, so that you can intervene. However, VPA can easily recommend requests larger than what is allocatable on a node, so you must either ensure large enough nodes (Gardener can scale up from zero, in case you like to define a low-priority worker pool with more resources for very large pods) and/or cap VPA’s target recommendations using maxAllowed at the node allocatable remainder (after daemon set pods) of the largest eligible machine type (may result in under-provisioning resources for a pod). Use your monitoring and check maximum pod usage to decide about the maximum machine type.
    • Recommendations in a Box

      ContainerWhen to useValue
      Requests- Set them (recommended) unless:
      - Do not set requests for QoS BestEffort; useful only if pod can be evicted as often as needed and pod can pick up where it left off without any penalty
      Set requests to 95th percentile (w/o VPA) of the actually observed CPU resp. memory usage in production resp. 5th percentile (w/ VPA) (see below)
      Limits- Avoid them (recommended) unless:
      - Set limits for QoS Guaranteed; useful only if pod has strictly static resource requirements
      - Set CPU limits if you want to throttle CPU usage for containers that can be throttled w/o any other disadvantage than processing time (never do that when time-critical operations like leases are involved)
      - Set limits if you know the healthy range and want to shield against unbound busy loops, unbound memory leaks, or similar
      If you really can (otherwise not), set limits to healthy theoretical max load
      ScalerWhen to useInitialMinimumMaximum
      HPAUse for pods that support horizontal scalingSet initial replicas to 5th percentile of the actually observed replica count in production (prefer scaling on usage, not utilization) and make sure to never overwrite it later when controlled by HPASet minReplicas to 0 (requires feature gate and custom/external metrics), to 1 (regular HPA minimum), or whatever the high availability requirements of the workload demandSet maxReplicas to healthy theoretical max load
      VPAUse for containers that have a significant usage (>50m/100M) and a significant usage spread over time (>2x)Set initial requests to 5th percentile of the actually observed CPU resp. memory usage in productionSet minAllowed to highest observed VPA recommendation (includes start-up phase) for an otherwise practically idle container (avoids pod trashing when pod gets evicted after idling)Set maxAllowed to fresh node allocatable remainder after daemonset pods (avoids pending pods when requests exceed fresh node allocatable remainder) or, if you really can (otherwise not), to healthy theoretical max load (less disruptive than limits as no throttling or OOM happens on under-utilized nodes)
      CAUse for dynamic workloads, definitely if you use HPA and/or VPAN/ASet minimum to 0 or number of nodes required right after cluster creation or wake-upSet maximum to healthy theoretical max load
      information-outline

      Note

      Theoretical max load may be very difficult to ascertain, especially with modern software that consists of building blocks you do not own or know in detail. If you have comprehensive monitoring in place, you may be tempted to pick the observed maximum and add a safety margin or even factor on top (2x, 4x, or any other number), but this is not to be confused with “theoretical max load” (solely depending on the code, not observations from the outside). At any point in time, your numbers may change, e.g. because you updated a software component or your usage increased. If you decide to use numbers that are set based only on observations, make sure to flank those numbers with monitoring alerts, so that you have sufficient time to investigate, revise, and readjust if necessary.

      Conclusion

      Pod autoscaling is a dynamic and complex aspect of Kubernetes, but it is also one of the most powerful tools at your disposal for maintaining efficient, reliable, and cost-effective applications. By carefully selecting the appropriate autoscaler, setting well-considered thresholds, and continuously monitoring and adjusting your strategies, you can ensure that your Kubernetes deployments are well-equipped to handle your resource demands while not over-paying for the provided resources at the same time.

      As Kubernetes continues to evolve (e.g. in-place updates) and as new patterns and practices emerge, the approaches to autoscaling may also change. However, the principles discussed above will remain foundational to creating scalable and resilient Kubernetes workloads. Whether you’re a developer or operations engineer, a solid understanding of pod autoscaling will be instrumental in the successful deployment and management of containerized applications.

      4.4 - Concepts

      4.4.1 - APIServer Admission Plugins

      A list of all gardener managed admission plugins together with their responsibilities

      Overview

      Similar to the kube-apiserver, the gardener-apiserver comes with a few in-tree managed admission plugins. If you want to get an overview of the what and why of admission plugins then this document might be a good start.

      This document lists all existing admission plugins with a short explanation of what it is responsible for.

      ClusterOpenIDConnectPreset, OpenIDConnectPreset

      (both enabled by default)

      These admission controllers react on CREATE operations for Shoots. If the Shoot does not specify any OIDC configuration (.spec.kubernetes.kubeAPIServer.oidcConfig=nil), then it tries to find a matching ClusterOpenIDConnectPreset or OpenIDConnectPreset, respectively. If there are multiple matches, then the one with the highest weight “wins”. @@ -5044,7 +5044,7 @@

      In this example, the label foo=bar will be injected into the Deployment, as well as into all created ReplicaSets and Pods.

      Preventing Reconciliations

      If a ManagedResource is annotated with resources.gardener.cloud/ignore=true, then it will be skipped entirely by the controller (no reconciliations or deletions of managed resources at all). However, when the ManagedResource itself is deleted (for example when a shoot is deleted), then the annotation is not respected and all resources will be deleted as usual. This feature can be helpful to temporarily patch/change resources managed as part of such ManagedResource. -Condition checks will be skipped for such ManagedResources.

      Modes

      The gardener-resource-manager can manage a resource in the following supported modes:

      • Ignore
        • The corresponding resource is removed from the ManagedResource status (.status.resources). No action is performed on the cluster.
        • The resource is no longer “managed” (updated or deleted).
        • The primary use case is a migration of a resource from one ManagedResource to another one.

      The mode for a resource can be specified with the resources.gardener.cloud/mode annotation. The annotation should be specified in the encoded resource manifest in the Secret that is referenced by the ManagedResource.

      Resource Class and Reconcilation Scope

      By default, the gardener-resource-manager controller watches for ManagedResources in all namespaces. +Condition checks will be skipped for such ManagedResources.

      Modes

      The gardener-resource-manager can manage a resource in the following supported modes:

      • Ignore
        • The corresponding resource is removed from the ManagedResource status (.status.resources). No action is performed on the cluster.
        • The resource is no longer “managed” (updated or deleted).
        • The primary use case is a migration of a resource from one ManagedResource to another one.

      The mode for a resource can be specified with the resources.gardener.cloud/mode annotation. The annotation should be specified in the encoded resource manifest in the Secret that is referenced by the ManagedResource.

      Resource Class and Reconciliation Scope

      By default, the gardener-resource-manager controller watches for ManagedResources in all namespaces. The .sourceClientConnection.namespace field in the component configuration restricts the watch to ManagedResources in a single namespace only. Note that this setting also affects all other controllers and webhooks since it’s a central configuration.

      A ManagedResource has an optional .spec.class field that allows it to indicate that it belongs to a given class of resources. The .controllers.resourceClass field in the component configuration restricts the watch to ManagedResources with the given .spec.class. @@ -5209,7 +5209,9 @@ - name: shoot--foo--bar-token user: token: "" -

      then the .users[0].user.token field of the kubeconfig will be updated accordingly.

      The controller also adds an annotation to the Secret to keep track when to renew the token before it expires. +

      then the .users[0].user.token field of the kubeconfig will be updated accordingly.

      The TokenRequestor can also optionally inject the current CA bundle if the secret is annotated with

      serviceaccount.resources.gardener.cloud/inject-ca-bundle: "true"
      +

      If a kubeconfig is present in the secret, the CA bundle is set in the in the cluster.certificate-authority-data field of the cluster of the current context. +Otherwise, the bundle is stored in an additional secret key bundle.crt.

      The controller also adds an annotation to the Secret to keep track when to renew the token before it expires. By default, the tokens are issued to expire after 12 hours. The expiration time can be set with the following annotation:

      serviceaccount.resources.gardener.cloud/token-expiration-duration: 6h
       

      It automatically renews once 80% of the lifetime is reached, or after 24h.

      Optionally, the controller can also populate the token into a Secret in the target cluster. This can be requested by annotating the Secret in the source cluster with:

      token-requestor.resources.gardener.cloud/target-secret-name: "foo"
       token-requestor.resources.gardener.cloud/target-secret-namespace: "bar"
      @@ -8391,7 +8393,7 @@
       re-creating API objects. The editing process may require some thought.
       This may require downtime for applications that rely on the feature.
    • Recommended for only non-critical uses because of potential for incompatible changes in subsequent releases.
    • Please do try Beta features and give feedback on them! -After they exit beta, it may not be practical for us to make more changes.

      A General Availability (GA) feature is also referred to as a stable feature. It means:

      • The feature is always enabled; you cannot disable it.
      • The corresponding feature gate is no longer needed.
      • Stable versions of features will appear in released software for many subsequent versions.

      List of Feature Gates

      FeatureRelevant ComponentsDescription
      DefaultSeccompProfilegardenlet, gardener-operatorEnables the defaulting of the seccomp profile for Gardener managed workload in the garden or seed to RuntimeDefault.
      ShootForceDeletiongardener-apiserverAllows forceful deletion of Shoots by annotating them with the confirmation.gardener.cloud/force-deletion annotation.
      UseNamespacedCloudProfilegardener-apiserverEnables usage of NamespacedCloudProfiles in Shoots.
      ShootManagedIssuergardenletEnables the shoot managed issuer functionality described in GEP 24.
      ShootCredentialsBindinggardener-apiserverEnables usage of CredentialsBindingName in Shoots.
      NewWorkerPoolHashgardenletEnables usage of the new worker pool hash calculation. The new calculation supports rolling worker pools if kubeReserved, systemReserved, evicitonHard or cpuManagerPolicy in the kubelet configuration are changed. All provider extensions must be upgraded to support this feature first. Existing worker pools are not immediately migrated to the new hash variant, since this would trigger the replacement of all nodes. The migration happens when a rolling update is triggered according to the old or new hash version calculation.
      NewVPNgardenletEnables usage of the new implementation of the VPN (go rewrite) using an IPv6 transfer network.
      NodeAgentAuthorizergardenlet, gardener-node-agentEnables authorization of gardener-node-agent to kube-apiserver of shoot clusters using an authorization webhook. It restricts the permissions of each gardener-node-agent instance to the objects belonging to its own node only.

      4.7.8 - Getting Started Locally

      Deploying Gardener Locally

      This document will walk you through deploying Gardener on your local machine. +After they exit beta, it may not be practical for us to make more changes.

      A General Availability (GA) feature is also referred to as a stable feature. It means:

      • The feature is always enabled; you cannot disable it.
      • The corresponding feature gate is no longer needed.
      • Stable versions of features will appear in released software for many subsequent versions.

      List of Feature Gates

      FeatureRelevant ComponentsDescription
      DefaultSeccompProfilegardenlet, gardener-operatorEnables the defaulting of the seccomp profile for Gardener managed workload in the garden or seed to RuntimeDefault.
      ShootForceDeletiongardener-apiserverAllows forceful deletion of Shoots by annotating them with the confirmation.gardener.cloud/force-deletion annotation.
      UseNamespacedCloudProfilegardener-apiserverEnables usage of NamespacedCloudProfiles in Shoots.
      ShootManagedIssuergardenletEnables the shoot managed issuer functionality described in GEP 24.
      ShootCredentialsBindinggardener-apiserverEnables usage of CredentialsBindingName in Shoots.
      NewWorkerPoolHashgardenletEnables usage of the new worker pool hash calculation. The new calculation supports rolling worker pools if kubeReserved, systemReserved, evictionHard or cpuManagerPolicy in the kubelet configuration are changed. All provider extensions must be upgraded to support this feature first. Existing worker pools are not immediately migrated to the new hash variant, since this would trigger the replacement of all nodes. The migration happens when a rolling update is triggered according to the old or new hash version calculation.
      NewVPNgardenletEnables usage of the new implementation of the VPN (go rewrite) using an IPv6 transfer network.
      NodeAgentAuthorizergardenlet, gardener-node-agentEnables authorization of gardener-node-agent to kube-apiserver of shoot clusters using an authorization webhook. It restricts the permissions of each gardener-node-agent instance to the objects belonging to its own node only.

      4.7.8 - Getting Started Locally

      Deploying Gardener Locally

      This document will walk you through deploying Gardener on your local machine. If you encounter difficulties, please open an issue so that we can make this process easier.

      Overview

      Gardener runs in any Kubernetes cluster. In this guide, we will start a KinD cluster which is used as both garden and seed cluster (please refer to the architecture overview) for simplicity.

      Based on Skaffold, the container images for all required components will be built and deployed into the cluster (via their Helm charts).

      Architecture Diagram

      Alternatives

      When deploying Gardener on your local machine you might face several limitations:

      • Your machine doesn’t have enough compute resources (see prerequisites) for hosting a second seed cluster or multiple shoot clusters.
      • Testing Gardener’s IPv6 features requires a Linux machine and native IPv6 connectivity to the internet, but you’re on macOS or don’t have IPv6 connectivity in your office environment or via your home ISP.

      In these cases, you might want to check out one of the following options that run the setup described in this guide elsewhere for circumventing these limitations:

      Prerequisites

      • Make sure that you have followed the Local Setup guide up until the Get the sources step.
      • Make sure your Docker daemon is up-to-date, up and running and has enough resources (at least 8 CPUs and 8Gi memory; see here how to configure the resources for Docker for Mac).

        Please note that 8 CPU / 8Gi memory might not be enough for more than two Shoot clusters, i.e., you might need to increase these values if you want to run additional Shoots. If you plan on following the optional steps to create a second seed cluster, the required resources will be more - at least 10 CPUs and 18Gi memory. @@ -8772,7 +8774,7 @@ The control plane is deployed in the so-called garden cluster, while the agent is installed into every seed cluster. Please note that it is possible to use the garden cluster as seed cluster by simply deploying the gardenlet into it.

        We are providing Helm charts in order to manage the various resources of the components. Please always make sure that you use the Helm chart version that matches the Gardener version you want to deploy.

        Deploying the Gardener Control Plane (API Server, Admission Controller, Controller Manager, Scheduler)

        In order to deploy the control plane components, please first deploy gardener-operator and create a Garden resource.

        alert-octagon-outline

        Caution

        Below approach is deprecated and will be removed after v1.135 of Gardener has been released (around beginning of 2026).

        The configuration values depict the various options to configure the different components. -Please consult Gardener Configuration and Usage for component specific configurations and Authentication of Gardener Control Plane Components Against the Garden Cluster for authentication related specifics.

        Also, note that all resources and deployments need to be created in the garden namespace (not overrideable). +Please consult Gardener Configuration and Usage for component specific configurations and Authentication of Gardener Control Plane Components Against the Garden Cluster for authentication related specifics.

        Also, note that all resources and deployments need to be created in the garden namespace (not overridable). If you enable the Gardener admission controller as part of you setup, please make sure the garden namespace is labelled with app: gardener. Otherwise, the backing service account for the admission controller Pod might not be created successfully. No action is necessary if you deploy the garden namespace with the Gardener control plane Helm chart.

        After preparing your values in a separate controlplane-values.yaml file (values.yaml can be used as starting point), you can run the following command against your garden cluster:

        helm install charts/gardener/controlplane \
        @@ -9072,7 +9074,7 @@
         
           # Basic Auth
           auth_type: base64(basic)
        -  url: base64(extenal.alertmanager.foo)
        +  url: base64(external.alertmanager.foo)
           username: base64(admin)
           password: base64(password)
         
        @@ -9166,7 +9168,7 @@
           enableContentionProfiling: true
         

        However, the handlers are served on the same port as configured in server.metrics.port via HTTP.

        For example (gardener-admission-controller):

        $ curl http://localhost:2723/debug/pprof/heap > /tmp/heap
         $ go tool pprof /tmp/heap
        -

      4.10 - Observability

      4.10.1 - Logging

      Logging Stack

      Motivation

      Kubernetes uses the underlying container runtime logging, which does not persist logs for stopped and destroyed containers. This makes it difficult to investigate issues in the very common case of not running containers. Gardener provides a solution to this problem for the managed cluster components by introducing its own logging stack.

      Components

      • A Fluent-bit daemonset which works like a log collector and custom Golang plugin which spreads log messages to their Vali instances.
      • One Vali Statefulset in the garden namespace which contains logs for the seed cluster and one per shoot namespace which contains logs for shoot’s controlplane.
      • One Plutono Deployment in garden namespace and two Deployments per shoot namespace (one exposed to the end users and one for the operators). Plutono is the UI component used in the logging stack.

      Container Logs Rotation and Retention

      Container log rotation in Kubernetes describes a subtile but important implementation detail depending on the type of the used high-level container runtime. When the used container runtime is not CRI compliant (such as dockershim), then the kubelet does not provide any rotation or retention implementations, hence leaving those aspects to the downstream components. When the used container runtime is CRI compliant (such as containerd), then the kubelet provides the necessary implementation with two configuration options:

      • ContainerLogMaxSize for rotation
      • ContainerLogMaxFiles for retention

      ContainerD Runtime

      In this case, it is possible to configure the containerLogMaxSize and containerLogMaxFiles fields in the Shoot specification. Both fields are optional and if nothing is specified, then the kubelet rotates on the size 100M. Those fields are part of provider’s workers definition. Here is an example:

      spec:
      +

      4.10 - Observability

      4.10.1 - Logging

      Logging Stack

      Motivation

      Kubernetes uses the underlying container runtime logging, which does not persist logs for stopped and destroyed containers. This makes it difficult to investigate issues in the very common case of not running containers. Gardener provides a solution to this problem for the managed cluster components by introducing its own logging stack.

      Components

      • A Fluent-bit daemonset which works like a log collector and custom Golang plugin which spreads log messages to their Vali instances.
      • One Vali Statefulset in the garden namespace which contains logs for the seed cluster and one per shoot namespace which contains logs for shoot’s controlplane.
      • One Plutono Deployment in garden namespace and two Deployments per shoot namespace (one exposed to the end users and one for the operators). Plutono is the UI component used in the logging stack.

      Container Logs Rotation and Retention

      Container log rotation in Kubernetes describes a subtle but important implementation detail depending on the type of the used high-level container runtime. When the used container runtime is not CRI compliant (such as dockershim), then the kubelet does not provide any rotation or retention implementations, hence leaving those aspects to the downstream components. When the used container runtime is CRI compliant (such as containerd), then the kubelet provides the necessary implementation with two configuration options:

      • ContainerLogMaxSize for rotation
      • ContainerLogMaxFiles for retention

      ContainerD Runtime

      In this case, it is possible to configure the containerLogMaxSize and containerLogMaxFiles fields in the Shoot specification. Both fields are optional and if nothing is specified, then the kubelet rotates on the size 100M. Those fields are part of provider’s workers definition. Here is an example:

      spec:
         provider:
           workers:
             - cri:
      @@ -10561,7 +10563,7 @@
       

      The logger is injected by controller-runtime’s Controller implementation. The logger returned by logf.FromContext is never nil. If the context doesn’t carry a logger, it falls back to the global logger (logf.Log), which might discard logs if not configured, but is also never nil.

      ⚠️ Make sure that you don’t overwrite the name or namespace value keys for such loggers, otherwise you will lose information about the reconciled object.

      The controller implementation (controller-runtime) itself takes care of logging the error returned by reconcilers. Hence, don’t log an error that you are returning. Generally, functions should not return an error, if they already logged it, because that means the error is already handled and not an error anymore. -See Dave Cheney’s post for more on this.

      Messages

      • Log messages should be static. Don’t put variable content in there, i.e., no fmt.Sprintf or string concatenation (+). Use key-value pairs instead.
      • Log messages should be capitalized. Note: This contrasts with error messages, that should not be capitalized. However, both should not end with a punctuation mark.

      Keys and Values

      • Use WithValues instead of repeatedly adding key-value pairs for multiple log statements. WithValues creates a new logger from the parent, that carries the given key-value pairs. E.g., use it when acting on one object in multiple steps and logging something for each step:

        log := parentLog.WithValues("infrastructure", client.ObjectKeyFromObject(infrastrucutre))
        +See Dave Cheney’s post for more on this.

        Messages

        • Log messages should be static. Don’t put variable content in there, i.e., no fmt.Sprintf or string concatenation (+). Use key-value pairs instead.
        • Log messages should be capitalized. Note: This contrasts with error messages, that should not be capitalized. However, both should not end with a punctuation mark.

        Keys and Values

        • Use WithValues instead of repeatedly adding key-value pairs for multiple log statements. WithValues creates a new logger from the parent, that carries the given key-value pairs. E.g., use it when acting on one object in multiple steps and logging something for each step:

          log := parentLog.WithValues("infrastructure", client.ObjectKeyFromObject(infrastructure))
           // ...
           log.Info("Creating Infrastructure")
           // ...
          @@ -10593,7 +10595,7 @@
           

        Logging in Test Code

        • If the tested production code requires a logger, you can pass logr.Discard() or logf.NullLogger{} in your test, which simply discards all logs.

        • logf.Log is safe to use in tests and will not cause a nil pointer deref, even if it’s not initialized via logf.SetLogger. It is initially set to a NullLogger by default, which means all logs are discarded, unless logf.SetLogger is called in the first 30 seconds of execution.

        • Pass zap.WriteTo(GinkgoWriter) in tests where you want to see the logs on test failure but not on success, for example:

          logf.SetLogger(logger.MustNewZapLogger(logger.DebugLevel, logger.FormatJSON, zap.WriteTo(GinkgoWriter)))
           log := logf.Log.WithName("test")
          -

        4.30 - Managed Seed

        ManagedSeeds: Register Shoot as Seed

        An existing shoot can be registered as a seed by creating a ManagedSeed resource. This resource contains:

        • The name of the shoot that should be registered as seed.
        • A gardenlet section that contains:
          • gardenlet deployment parameters, such as the number of replicas, the image, etc.
          • The GardenletConfiguration resource that contains controllers configuration, feature gates, and a seedConfig section that contains the Seed spec and parts of its metadata.
          • Additional configuration parameters, such as the garden connection bootstrap mechanism (see TLS Bootstrapping), and whether to merge the provided configuration with the configuration of the parent gardenlet.

        gardenlet is deployed to the shoot, and it registers a new seed upon startup based on the seedConfig section.

        Note: Earlier Gardener allowed specifying a seedTemplate directly in the ManagedSeed resource. This feature is discontinued, any seed configuration must be via the GardenletConfiguration.

        Note the following important aspects:

        • Unlike the Seed resource, the ManagedSeed resource is namespaced. Currently, managed seeds are restricted to the garden namespace.
        • The newly created Seed resource always has the same name as the ManagedSeed resource. Attempting to specify a different name in the seedConfig will fail.
        • The ManagedSeed resource must always refer to an existing shoot. Attempting to create a ManagedSeed referring to a non-existing shoot will fail.
        • A shoot that is being referred to by a ManagedSeed cannot be deleted. Attempting to delete such a shoot will fail.
        • You can omit practically everything from the gardenlet section, including all or most of the Seed spec fields. Proper defaults will be supplied in all cases, based either on the most common use cases or the information already available in the Shoot resource.
        • Also, if your seed is configured to host HA shoot control planes, then gardenlet will be deployed with multiple replicas across nodes or availability zones by default.
        • Some Seed spec fields, for example the provider type and region, networking CIDRs for pods, services, and nodes, etc., must be the same as the corresponding Shoot spec fields of the shoot that is being registered as seed. Attempting to use different values (except empty ones, so that they are supplied by the defaulting mechanims) will fail.

        Deploying gardenlet to the Shoot

        To register a shoot as a seed and deploy gardenlet to the shoot using a default configuration, create a ManagedSeed resource similar to the following:

        apiVersion: seedmanagement.gardener.cloud/v1alpha1
        +

      4.30 - Managed Seed

      ManagedSeeds: Register Shoot as Seed

      An existing shoot can be registered as a seed by creating a ManagedSeed resource. This resource contains:

      • The name of the shoot that should be registered as seed.
      • A gardenlet section that contains:
        • gardenlet deployment parameters, such as the number of replicas, the image, etc.
        • The GardenletConfiguration resource that contains controllers configuration, feature gates, and a seedConfig section that contains the Seed spec and parts of its metadata.
        • Additional configuration parameters, such as the garden connection bootstrap mechanism (see TLS Bootstrapping), and whether to merge the provided configuration with the configuration of the parent gardenlet.

      gardenlet is deployed to the shoot, and it registers a new seed upon startup based on the seedConfig section.

      Note: Earlier Gardener allowed specifying a seedTemplate directly in the ManagedSeed resource. This feature is discontinued, any seed configuration must be via the GardenletConfiguration.

      Note the following important aspects:

      • Unlike the Seed resource, the ManagedSeed resource is namespaced. Currently, managed seeds are restricted to the garden namespace.
      • The newly created Seed resource always has the same name as the ManagedSeed resource. Attempting to specify a different name in the seedConfig will fail.
      • The ManagedSeed resource must always refer to an existing shoot. Attempting to create a ManagedSeed referring to a non-existing shoot will fail.
      • A shoot that is being referred to by a ManagedSeed cannot be deleted. Attempting to delete such a shoot will fail.
      • You can omit practically everything from the gardenlet section, including all or most of the Seed spec fields. Proper defaults will be supplied in all cases, based either on the most common use cases or the information already available in the Shoot resource.
      • Also, if your seed is configured to host HA shoot control planes, then gardenlet will be deployed with multiple replicas across nodes or availability zones by default.
      • Some Seed spec fields, for example the provider type and region, networking CIDRs for pods, services, and nodes, etc., must be the same as the corresponding Shoot spec fields of the shoot that is being registered as seed. Attempting to use different values (except empty ones, so that they are supplied by the defaulting mechanism) will fail.

      Deploying gardenlet to the Shoot

      To register a shoot as a seed and deploy gardenlet to the shoot using a default configuration, create a ManagedSeed resource similar to the following:

      apiVersion: seedmanagement.gardener.cloud/v1alpha1
       kind: ManagedSeed
       metadata:
         name: my-managed-seed
      @@ -10614,7 +10616,7 @@
         * metrics_name_1
         * metrics_name_2
         ...
      -

      Adding Alerts

      The alert definitons are located in charts/seed-monitoring/charts/core/charts/prometheus/rules. There are two approaches for adding new alerts.

      1. Adding additional alerts for a component which already has a set of alerts. In this case you have to extend the existing rule file for the component.
      2. Adding alerts for a new component. In this case a new rule file with name scheme example-component.rules.yaml needs to be added.
      3. Add the new alert to alertInhibitionGraph.dot, add any required inhibition flows and render the new graph. To render the graph, run:
      dot -Tpng ./content/alertInhibitionGraph.dot -o ./content/alertInhibitionGraph.png
      +

      Adding Alerts

      The alert definitions are located in charts/seed-monitoring/charts/core/charts/prometheus/rules. There are two approaches for adding new alerts.

      1. Adding additional alerts for a component which already has a set of alerts. In this case you have to extend the existing rule file for the component.
      2. Adding alerts for a new component. In this case a new rule file with name scheme example-component.rules.yaml needs to be added.
      3. Add the new alert to alertInhibitionGraph.dot, add any required inhibition flows and render the new graph. To render the graph, run:
      dot -Tpng ./content/alertInhibitionGraph.dot -o ./content/alertInhibitionGraph.png
       
      1. Create a test for the new alert. See Alert Tests.

      Example alert:

      groups:
       * name: example.rules
         rules:
      @@ -10673,7 +10675,7 @@
           ]
         }
       }
      -

      4.32 - Network Policies

      NetworkPolicys In Garden, Seed, Shoot Clusters

      This document describes which Kubernetes NetworkPolicys deployed by Gardener into the various clusters.

      Garden Cluster

      (via gardener-operator and gardener-resource-manager)

      The gardener-operator runs a NetworkPolicy controller which is responsible for the following namespaces:

      • garden
      • istio-system
      • *istio-ingress-*
      • shoot-*
      • extension-* (in case the garden cluster is a seed cluster at the same time)

      It deploys the following so-called “general NetworkPolicys”:

      NamePurpose
      deny-allDenies all ingress and egress traffic for all pods in this namespace. Hence, all traffic must be explicitly allowed.
      allow-to-dnsAllows egress traffic from pods labeled with networking.gardener.cloud/to-dns=allowed to DNS pods running in the kube-sytem namespace. In practice, most of the pods performing network egress traffic need this label.
      allow-to-runtime-apiserverAllows egress traffic from pods labeled with networking.gardener.cloud/to-runtime-apiserver=allowed to the API server of the runtime cluster.
      allow-to-blocked-cidrsAllows egress traffic from pods labeled with networking.gardener.cloud/to-blocked-cidrs=allowed to explicitly blocked addresses configured by human operators (configured via .spec.networking.blockedCIDRs in the Seed). For instance, this can be used to block the cloud provider’s metadata service.
      allow-to-public-networksAllows egress traffic from pods labeled with networking.gardener.cloud/to-public-networks=allowed to all public network IPs, except for private networks (RFC1918), carrier-grade NAT (RFC6598), and explicitly blocked addresses configured by human operators for all pods labeled with networking.gardener.cloud/to-public-networks=allowed. In practice, this blocks egress traffic to all networks in the cluster and only allows egress traffic to public IPv4 addresses.
      allow-to-private-networksAllows egress traffic from pods labeled with networking.gardener.cloud/to-private-networks=allowed to the private networks (RFC1918) and carrier-grade NAT (RFC6598) except for cluster-specific networks (configured via .spec.networks in the Seed).

      Apart from those, the gardener-operator also enables the NetworkPolicy controller of gardener-resource-manager. +

      4.32 - Network Policies

      NetworkPolicys In Garden, Seed, Shoot Clusters

      This document describes which Kubernetes NetworkPolicys deployed by Gardener into the various clusters.

      Garden Cluster

      (via gardener-operator and gardener-resource-manager)

      The gardener-operator runs a NetworkPolicy controller which is responsible for the following namespaces:

      • garden
      • istio-system
      • *istio-ingress-*
      • shoot-*
      • extension-* (in case the garden cluster is a seed cluster at the same time)

      It deploys the following so-called “general NetworkPolicys”:

      NamePurpose
      deny-allDenies all ingress and egress traffic for all pods in this namespace. Hence, all traffic must be explicitly allowed.
      allow-to-dnsAllows egress traffic from pods labeled with networking.gardener.cloud/to-dns=allowed to DNS pods running in the kube-system namespace. In practice, most of the pods performing network egress traffic need this label.
      allow-to-runtime-apiserverAllows egress traffic from pods labeled with networking.gardener.cloud/to-runtime-apiserver=allowed to the API server of the runtime cluster.
      allow-to-blocked-cidrsAllows egress traffic from pods labeled with networking.gardener.cloud/to-blocked-cidrs=allowed to explicitly blocked addresses configured by human operators (configured via .spec.networking.blockedCIDRs in the Seed). For instance, this can be used to block the cloud provider’s metadata service.
      allow-to-public-networksAllows egress traffic from pods labeled with networking.gardener.cloud/to-public-networks=allowed to all public network IPs, except for private networks (RFC1918), carrier-grade NAT (RFC6598), and explicitly blocked addresses configured by human operators for all pods labeled with networking.gardener.cloud/to-public-networks=allowed. In practice, this blocks egress traffic to all networks in the cluster and only allows egress traffic to public IPv4 addresses.
      allow-to-private-networksAllows egress traffic from pods labeled with networking.gardener.cloud/to-private-networks=allowed to the private networks (RFC1918) and carrier-grade NAT (RFC6598) except for cluster-specific networks (configured via .spec.networks in the Seed).

      Apart from those, the gardener-operator also enables the NetworkPolicy controller of gardener-resource-manager. Please find more information in the linked document. In summary, most of the pods that initiate connections with other pods will have labels with networking.resources.gardener.cloud/ prefixes. This way, they leverage the automatically created NetworkPolicys by the controller. @@ -10749,7 +10751,7 @@ app: vpn-shoot

      Implications for Gardener Extensions

      Gardener extensions sometimes need to deploy additional components into the shoot namespace in the seed cluster hosting the control plane. For example, the gardener-extension-provider-aws deploys the cloud-controller-manager into the shoot namespace. -In most cases, such pods require network policy labels to allow the traffic they are initiating.

      For components deployed in the kube-system namespace of the shoots (e.g., CNI plugins or CSI drivers, etc.), custom NetworkPolicys might be required to ensure the respective components can still communicate in case the user creates a deny-all policy.

      4.33 - New Cloud Provider

      Adding Cloud Providers

      This document provides an overview of how to integrate a new cloud provider into Gardener. Each component that requires integration has a detailed description of how to integrate it and the steps required.

      Cloud Components

      Gardener is composed of 2 or more Kubernetes clusters:

      • Shoot: These are the end-user clusters, the regular Kubernetes clusters you have seen. They provide places for your workloads to run.
      • Seed: This is the “management” cluster. It manages the control planes of shoots by running them as native Kubernetes workloads.

      These two clusters can run in the same cloud provider, but they do not need to. For example, you could run your Seed in AWS, while having one shoot in Azure, two in Google, two in Alicloud, and three in Equinix Metal.

      The Seed cluster deploys and manages the Shoot clusters. Importantly, for this discussion, the etcd data store backing each Shoot runs as workloads inside the Seed. Thus, to use the above example, the clusters in Azure, Google, Alicloud and Equinix Metal will have their worker nodes and master nodes running in those clouds, but the etcd clusters backing them will run as separate deployments in the Seed Kubernetes cluster on AWS.

      This distinction becomes important when preparing the integration to a new cloud provider.

      Gardener Cloud Integration

      Gardener and its related components integrate with cloud providers at the following key lifecycle elements:

      • Create/destroy/get/list machines for the Shoot.
      • Create/destroy/get/list infrastructure components for the Shoot, e.g. VPCs, subnets, routes, etc.
      • Backup/restore etcd for the Seed via writing files to and reading them from object storage.

      Thus, the integrations you need for your cloud provider depend on whether you want to deploy Shoot clusters to the provider, Seed or both.

      • Shoot Only: machine lifecycle management, infrastructure
      • Seed: etcd backup/restore

      Gardener API

      In addition to the requirements to integrate with the cloud provider, you also need to enable the core Gardener app to receive, validate, and process requests to use that cloud provider.

      • Expose the cloud provider to the consumers of the Gardener API, so it can be told to use that cloud provider as an option.
      • Validate that API as requests come in.
      • Write cloud provider specific implementation (called “provider extension”).

      Cloud Provider API Requirements

      In order for a cloud provider to integrate with Gardener, the provider must have an API to perform machine lifecycle events, specifically:

      • Create a machine
      • Destroy a machine
      • Get information about a machine and its state
      • List machines

      In addition, if the Seed is to run on the given provider, it also must have an API to save files to block storage and retrieve them, for etcd backup/restore.

      The current integration with cloud providers is to add their API calls to Gardener and the Machine Controller Manager. As both Gardener and the Machine Controller Manager are written in go, the cloud provider should have a go SDK. However, if it has an API that is wrappable in go, e.g. a REST API, then you can use that to integrate.

      The Gardener team is working on bringing cloud provider integrations out-of-tree, making them plugable, which should simplify the process and make it possible to use other SDKs.

      Summary

      To add a new cloud provider, you need some or all of the following. Each repository contains instructions on how to extend it to a new cloud provider.

      TypePurposeLocationDocumentation
      Seed or ShootMachine Lifecyclemachine-controller-managerMCM new cloud provider
      Seed onlyetcd backup/restoreetcd-backup-restoreIn process
      AllExtension implementationgardenerExtension controller

      4.34 - New Kubernetes Version

      Adding Support For a New Kubernetes Version

      This document describes the steps needed to perform in order to confidently add support for a new Kubernetes minor version.

      ⚠️ Typically, once a minor Kubernetes version vX.Y is supported by Gardener, then all patch versions vX.Y.Z are also automatically supported without any required action. +In most cases, such pods require network policy labels to allow the traffic they are initiating.

      For components deployed in the kube-system namespace of the shoots (e.g., CNI plugins or CSI drivers, etc.), custom NetworkPolicys might be required to ensure the respective components can still communicate in case the user creates a deny-all policy.

      4.33 - New Cloud Provider

      Adding Cloud Providers

      This document provides an overview of how to integrate a new cloud provider into Gardener. Each component that requires integration has a detailed description of how to integrate it and the steps required.

      Cloud Components

      Gardener is composed of 2 or more Kubernetes clusters:

      • Shoot: These are the end-user clusters, the regular Kubernetes clusters you have seen. They provide places for your workloads to run.
      • Seed: This is the “management” cluster. It manages the control planes of shoots by running them as native Kubernetes workloads.

      These two clusters can run in the same cloud provider, but they do not need to. For example, you could run your Seed in AWS, while having one shoot in Azure, two in Google, two in Alicloud, and three in Equinix Metal.

      The Seed cluster deploys and manages the Shoot clusters. Importantly, for this discussion, the etcd data store backing each Shoot runs as workloads inside the Seed. Thus, to use the above example, the clusters in Azure, Google, Alicloud and Equinix Metal will have their worker nodes and master nodes running in those clouds, but the etcd clusters backing them will run as separate deployments in the Seed Kubernetes cluster on AWS.

      This distinction becomes important when preparing the integration to a new cloud provider.

      Gardener Cloud Integration

      Gardener and its related components integrate with cloud providers at the following key lifecycle elements:

      • Create/destroy/get/list machines for the Shoot.
      • Create/destroy/get/list infrastructure components for the Shoot, e.g. VPCs, subnets, routes, etc.
      • Backup/restore etcd for the Seed via writing files to and reading them from object storage.

      Thus, the integrations you need for your cloud provider depend on whether you want to deploy Shoot clusters to the provider, Seed or both.

      • Shoot Only: machine lifecycle management, infrastructure
      • Seed: etcd backup/restore

      Gardener API

      In addition to the requirements to integrate with the cloud provider, you also need to enable the core Gardener app to receive, validate, and process requests to use that cloud provider.

      • Expose the cloud provider to the consumers of the Gardener API, so it can be told to use that cloud provider as an option.
      • Validate that API as requests come in.
      • Write cloud provider specific implementation (called “provider extension”).

      Cloud Provider API Requirements

      In order for a cloud provider to integrate with Gardener, the provider must have an API to perform machine lifecycle events, specifically:

      • Create a machine
      • Destroy a machine
      • Get information about a machine and its state
      • List machines

      In addition, if the Seed is to run on the given provider, it also must have an API to save files to block storage and retrieve them, for etcd backup/restore.

      The current integration with cloud providers is to add their API calls to Gardener and the Machine Controller Manager. As both Gardener and the Machine Controller Manager are written in go, the cloud provider should have a go SDK. However, if it has an API that is wrappable in go, e.g. a REST API, then you can use that to integrate.

      The Gardener team is working on bringing cloud provider integrations out-of-tree, making them pluggable, which should simplify the process and make it possible to use other SDKs.

      Summary

      To add a new cloud provider, you need some or all of the following. Each repository contains instructions on how to extend it to a new cloud provider.

      TypePurposeLocationDocumentation
      Seed or ShootMachine Lifecyclemachine-controller-managerMCM new cloud provider
      Seed onlyetcd backup/restoreetcd-backup-restoreIn process
      AllExtension implementationgardenerExtension controller

      4.34 - New Kubernetes Version

      Adding Support For a New Kubernetes Version

      This document describes the steps needed to perform in order to confidently add support for a new Kubernetes minor version.

      ⚠️ Typically, once a minor Kubernetes version vX.Y is supported by Gardener, then all patch versions vX.Y.Z are also automatically supported without any required action. This is because patch versions do not introduce any new feature or API changes, so there is nothing that needs to be adapted in gardener/gardener code.

      The Kubernetes community release a new minor version roughly every 4 months. Please refer to the official documentation about their release cycles for any additional information.

      Shortly before a new release, an “umbrella” issue should be opened which is used to collect the required adaptations and to track the work items. For example, #5102 can be used as a template for the issue description. @@ -10757,7 +10759,7 @@ The first group contains tasks specific to the changes in the given Kubernetes release, the second group contains Kubernetes release-independent tasks.

      ℹ️ Upgrading the k8s.io/* and sigs.k8s.io/controller-runtime Golang dependencies is typically tracked and worked on separately (see e.g. #4772 or #5282).

      Deriving Release-Specific Tasks

      Most new minor Kubernetes releases incorporate API changes, deprecations, or new features. The community announces them via their change logs. In order to derive the release-specific tasks, the respective change log for the new version vX.Y has to be read and understood (for example, the changelog for v1.24).

      As already mentioned, typical changes to watch out for are:

      • API version promotions or deprecations
      • Feature gate promotions or deprecations
      • CLI flag changes for Kubernetes components
      • New default values in resources
      • New available fields in resources
      • New features potentially relevant for the Gardener system
      • Changes of labels or annotations Gardener relies on

      Obviously, this requires a certain experience and understanding of the Gardener project so that all “relevant changes” can be identified. -While reading the change log, add the tasks (along with the respective PR in kubernetes/kubernetes to the umbrella issue).

      ℹ️ Some of the changes might be specific to certain cloud providers. Pay attention to those as well and add related tasks to the issue.

      List Of Release-Independent Tasks

      The following paragraphs describe recurring tasks that need to be performed for each new release.

      Make Sure a New hyperkube Image Is Released

      The gardener/hyperkube repository is used to release container images consisting of the kubectl and kubelet binaries.

      There is a CI/CD job that runs periodically and releases a new hyperkube image when there is a new Kubernetes release. Before proceeding with the next steps, make sure that a new hyperkube image is released for the corresponding new Kubernetes minor version. Make sure that container image is present in GCR.

      Adapting Gardener

      • Allow instantiation of a Kubernetes client for the new minor version and update the README.md:
        • See this example commit.
        • The list of supported versions is meanwhile maintained here in the SupportedVersions variable.
      • Maintain the Kubernetes feature gates used for validation of Shoot resources:
        • The feature gates are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-feature-gates.sh <old-version> <new-version> (e.g. hack/compare-k8s-feature-gates.sh v1.26 v1.27).
        • It will present 3 lists of feature gates: those added and those removed in <new-version> compared to <old-version> and feature gates that got locked to default in <new-version>.
        • Add all added feature gates to the map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed feature gates, add <new-version> as RemovedInVersion to the already existing feature gate in the map.
        • For feature gates locked to default, add <new-version> as LockedToDefaultInVersion to the already existing feature gate in the map.
        • See this example commit.
      • Maintain the Kubernetes kube-apiserver admission plugins used for validation of Shoot resources:
        • The admission plugins are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-admission-plugins.sh <old-version> <new-version> (e.g. hack/compare-k8s-admission-plugins.sh 1.26 1.27).
        • It will present 2 lists of admission plugins: those added and those removed in <new-version> compared to <old-version>.
        • Add all added admission plugins to the admissionPluginsVersionRanges map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed admission plugins, add <new-version> as RemovedInVersion to the already existing admission plugin in the map.
        • Flag any admission plugins that are required (plugins that must not be disabled in the Shoot spec) by setting the Required boolean variable to true for the admission plugin in the map.
        • Flag any admission plugins that are forbidden by setting the Forbidden boolean variable to true for the admission plugin in the map.
      • Maintain the Kubernetes kube-apiserver API groups used for validation of Shoot resources:
        • The API groups are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-api-groups.sh <old-version> <new-version> (e.g. hack/compare-k8s-api-groups.sh 1.26 1.27).
        • It will present 2 lists of API GroupVersions and 2 lists of API GroupVersionResources: those added and those removed in <new-version> compared to <old-version>.
        • Add all added group versions to the apiGroupVersionRanges map and group version resources to the apiGVRVersionRanges map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed APIs, add <new-version> as RemovedInVersion to the already existing API in the corresponding map.
        • Flag any APIs that are required (APIs that must not be disabled in the Shoot spec) by setting the Required boolean variable to true for the API in the apiGVRVersionRanges map. If this API also should not be disabled for Workerless Shoots, then set RequiredForWorkerless boolean variable also to true. If the API is required for both Shoot types, then both of these booleans need to be set to true. If the whole API Group is required, then mark it correspondingly in the apiGroupVersionRanges map.
      • Maintain the Kubernetes kube-controller-manager controllers for each API group used in deploying required KCM controllers based on active APIs:
        • The API groups are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compute-k8s-controllers.sh <old-version> <new-version> (e.g. hack/compute-k8s-controllers.sh 1.28 1.29).
        • If it complains that the path for the controller is not present in the map, check the release branch of the new Kubernetes version and find the correct path for the missing/wrong controller. You can do so by checking the file cmd/kube-controller-manager/app/controllermanager.go and where the controller is initialized from. As of now, there is no straight-forward way to map each controller to its file. If this has improved, please enhance the script.
        • If the paths are correct, it will present 2 lists of controllers: those added and those removed for each API group in <new-version> compared to <old-version>.
        • Add all added controllers to the APIGroupControllerMap map and under the corresponding API group with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed controllers, add <new-version> as RemovedInVersion to the already existing controller in the corresponding API group map. If you are unable to find the removed controller name, then check for its alias. Either in the staging/src/k8s.io/cloud-provider/names/controller_names.go file (example) or in the cmd/kube-controller-manager/app/* files (example for apps API group). This is because for kubernetes versions starting from v1.28, we don’t maintain the aliases in the controller, but the controller names itself since some controllers can be initialized without aliases as well (example). The old alias should still be working since it should be backwards compatible as explained here. Once the support for kubernetes version < v1.28 is droppped, we can drop the usages of these aliases and move completely to controller names.
        • Make sure that the API groups in this file are in sync with the groups in this file. For example, core/v1 is replaced by the script as v1 and apiserverinternal as internal. This is because the API groups registered by the apiserver (example) and the file path imported by the controllers (example) might be slightly different in some cases.
      • Maintain the ServiceAccount names for the controllers part of kube-controller-manager:
        • The names are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-controllers.sh <old-version> <new-version> (e.g. hack/compare-k8s-controllers.sh 1.26 1.27).
        • It will present 2 lists of controllers: those added and those removed in <new-version> compared to <old-version>.
        • Double check whether such ServiceAccount indeed appears in the kube-system namespace when creating a cluster with <new-version>. Note that it sometimes might be hidden behind a default-off feature gate. You can create a local cluster with the new version using the local provider. It could so happen that the name of the controller is used in the form of a constant and not a string, see example, In that case not the value of the constant separetely. You could also cross check the names with the result of the compute-k8s-controllers.sh script used in the previous step.
        • If it appears, add all added controllers to the list based on the Kubernetes version (example).
        • For any removed controllers, add them only to the Kubernetes version if it is low enough.
      • Maintain the names of controllers used for workerless Shoots, here after carefully evaluating whether they are needed if there are no workers.
      • Maintain copies of the DaemonSet controller’s scheduling logic:
        • gardener-resource-manager’s Node controller uses a copy of parts of the DaemonSet controller’s logic for determining whether a specific Node should run a daemon pod of a given DaemonSet: see this file.
        • Check the referenced upstream files for changes to the DaemonSet controller’s logic and adapt our copies accordingly. This might include introducing version-specific checks in our codebase to handle different shoot cluster versions.
      • Maintain version specific defaulting logic in shoot admission plugin:
        • Sometimes default values for shoots are intentionally changed with the introduction of a new Kubernetes version.
        • The final Kubernetes version for a shoot is determined in the Shoot Validator Admission Plugin.
        • Any defaulting logic that depends on the version should be placed in this admission plugin (example).
      • Ensure that maintenance-controller is able to auto-update shoots to the new Kubernetes version. Changes to the shoot spec required for the Kubernetes update should be enforced in such cases (examples).
      • Add the new Kubernetes version to the CloudProfile in local setup.
        • See this example commit.
      • In the next Gardener release, file a PR that bumps the used Kubernetes version for local e2e test.
        • This step must be performed in a PR that targets the next Gardener release because of the e2e upgrade tests. The e2e upgrade tests deploy the previous Gardener version where the new Kubernetes version is not present in the CloudProfile. If the e2e tests are adapted in the same PR that adds the support for the Kubernetes version, then the e2e upgrade tests for that PR will fail because the newly added Kubernetes version in missing in the local CloudProfile from the old release.
        • See this example commit PR.

      Filing the Pull Request

      Work on all the tasks you have collected and validate them using the local provider. +While reading the change log, add the tasks (along with the respective PR in kubernetes/kubernetes to the umbrella issue).

      ℹ️ Some of the changes might be specific to certain cloud providers. Pay attention to those as well and add related tasks to the issue.

      List Of Release-Independent Tasks

      The following paragraphs describe recurring tasks that need to be performed for each new release.

      Make Sure a New hyperkube Image Is Released

      The gardener/hyperkube repository is used to release container images consisting of the kubectl and kubelet binaries.

      There is a CI/CD job that runs periodically and releases a new hyperkube image when there is a new Kubernetes release. Before proceeding with the next steps, make sure that a new hyperkube image is released for the corresponding new Kubernetes minor version. Make sure that container image is present in GCR.

      Adapting Gardener

      • Allow instantiation of a Kubernetes client for the new minor version and update the README.md:
        • See this example commit.
        • The list of supported versions is meanwhile maintained here in the SupportedVersions variable.
      • Maintain the Kubernetes feature gates used for validation of Shoot resources:
        • The feature gates are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-feature-gates.sh <old-version> <new-version> (e.g. hack/compare-k8s-feature-gates.sh v1.26 v1.27).
        • It will present 3 lists of feature gates: those added and those removed in <new-version> compared to <old-version> and feature gates that got locked to default in <new-version>.
        • Add all added feature gates to the map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed feature gates, add <new-version> as RemovedInVersion to the already existing feature gate in the map.
        • For feature gates locked to default, add <new-version> as LockedToDefaultInVersion to the already existing feature gate in the map.
        • See this example commit.
      • Maintain the Kubernetes kube-apiserver admission plugins used for validation of Shoot resources:
        • The admission plugins are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-admission-plugins.sh <old-version> <new-version> (e.g. hack/compare-k8s-admission-plugins.sh 1.26 1.27).
        • It will present 2 lists of admission plugins: those added and those removed in <new-version> compared to <old-version>.
        • Add all added admission plugins to the admissionPluginsVersionRanges map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed admission plugins, add <new-version> as RemovedInVersion to the already existing admission plugin in the map.
        • Flag any admission plugins that are required (plugins that must not be disabled in the Shoot spec) by setting the Required boolean variable to true for the admission plugin in the map.
        • Flag any admission plugins that are forbidden by setting the Forbidden boolean variable to true for the admission plugin in the map.
      • Maintain the Kubernetes kube-apiserver API groups used for validation of Shoot resources:
        • The API groups are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-api-groups.sh <old-version> <new-version> (e.g. hack/compare-k8s-api-groups.sh 1.26 1.27).
        • It will present 2 lists of API GroupVersions and 2 lists of API GroupVersionResources: those added and those removed in <new-version> compared to <old-version>.
        • Add all added group versions to the apiGroupVersionRanges map and group version resources to the apiGVRVersionRanges map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed APIs, add <new-version> as RemovedInVersion to the already existing API in the corresponding map.
        • Flag any APIs that are required (APIs that must not be disabled in the Shoot spec) by setting the Required boolean variable to true for the API in the apiGVRVersionRanges map. If this API also should not be disabled for Workerless Shoots, then set RequiredForWorkerless boolean variable also to true. If the API is required for both Shoot types, then both of these booleans need to be set to true. If the whole API Group is required, then mark it correspondingly in the apiGroupVersionRanges map.
      • Maintain the Kubernetes kube-controller-manager controllers for each API group used in deploying required KCM controllers based on active APIs:
        • The API groups are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compute-k8s-controllers.sh <old-version> <new-version> (e.g. hack/compute-k8s-controllers.sh 1.28 1.29).
        • If it complains that the path for the controller is not present in the map, check the release branch of the new Kubernetes version and find the correct path for the missing/wrong controller. You can do so by checking the file cmd/kube-controller-manager/app/controllermanager.go and where the controller is initialized from. As of now, there is no straight-forward way to map each controller to its file. If this has improved, please enhance the script.
        • If the paths are correct, it will present 2 lists of controllers: those added and those removed for each API group in <new-version> compared to <old-version>.
        • Add all added controllers to the APIGroupControllerMap map and under the corresponding API group with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed controllers, add <new-version> as RemovedInVersion to the already existing controller in the corresponding API group map. If you are unable to find the removed controller name, then check for its alias. Either in the staging/src/k8s.io/cloud-provider/names/controller_names.go file (example) or in the cmd/kube-controller-manager/app/* files (example for apps API group). This is because for kubernetes versions starting from v1.28, we don’t maintain the aliases in the controller, but the controller names itself since some controllers can be initialized without aliases as well (example). The old alias should still be working since it should be backwards compatible as explained here. Once the support for kubernetes version < v1.28 is dropped, we can drop the usages of these aliases and move completely to controller names.
        • Make sure that the API groups in this file are in sync with the groups in this file. For example, core/v1 is replaced by the script as v1 and apiserverinternal as internal. This is because the API groups registered by the apiserver (example) and the file path imported by the controllers (example) might be slightly different in some cases.
      • Maintain the ServiceAccount names for the controllers part of kube-controller-manager:
        • The names are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-controllers.sh <old-version> <new-version> (e.g. hack/compare-k8s-controllers.sh 1.26 1.27).
        • It will present 2 lists of controllers: those added and those removed in <new-version> compared to <old-version>.
        • Double check whether such ServiceAccount indeed appears in the kube-system namespace when creating a cluster with <new-version>. Note that it sometimes might be hidden behind a default-off feature gate. You can create a local cluster with the new version using the local provider. It could so happen that the name of the controller is used in the form of a constant and not a string, see example, In that case not the value of the constant separately. You could also cross check the names with the result of the compute-k8s-controllers.sh script used in the previous step.
        • If it appears, add all added controllers to the list based on the Kubernetes version (example).
        • For any removed controllers, add them only to the Kubernetes version if it is low enough.
      • Maintain the names of controllers used for workerless Shoots, here after carefully evaluating whether they are needed if there are no workers.
      • Maintain copies of the DaemonSet controller’s scheduling logic:
        • gardener-resource-manager’s Node controller uses a copy of parts of the DaemonSet controller’s logic for determining whether a specific Node should run a daemon pod of a given DaemonSet: see this file.
        • Check the referenced upstream files for changes to the DaemonSet controller’s logic and adapt our copies accordingly. This might include introducing version-specific checks in our codebase to handle different shoot cluster versions.
      • Maintain version specific defaulting logic in shoot admission plugin:
        • Sometimes default values for shoots are intentionally changed with the introduction of a new Kubernetes version.
        • The final Kubernetes version for a shoot is determined in the Shoot Validator Admission Plugin.
        • Any defaulting logic that depends on the version should be placed in this admission plugin (example).
      • Ensure that maintenance-controller is able to auto-update shoots to the new Kubernetes version. Changes to the shoot spec required for the Kubernetes update should be enforced in such cases (examples).
      • Add the new Kubernetes version to the CloudProfile in local setup.
        • See this example commit.
      • In the next Gardener release, file a PR that bumps the used Kubernetes version for local e2e test.
        • This step must be performed in a PR that targets the next Gardener release because of the e2e upgrade tests. The e2e upgrade tests deploy the previous Gardener version where the new Kubernetes version is not present in the CloudProfile. If the e2e tests are adapted in the same PR that adds the support for the Kubernetes version, then the e2e upgrade tests for that PR will fail because the newly added Kubernetes version in missing in the local CloudProfile from the old release.
        • See this example commit PR.

      Filing the Pull Request

      Work on all the tasks you have collected and validate them using the local provider. Execute the e2e tests and if everything looks good, then go ahead and file the PR (example PR). Generally, it is great if you add the PRs also to the umbrella issue so that they can be tracked more easily.

      Adapting Provider Extensions

      After the PR in gardener/gardener for the support of the new version has been merged, you can go ahead and work on the provider extensions.

      Actually, you can already start even if the PR is not yet merged and use the branch of your fork.

      • Update the github.com/gardener/gardener dependency in the extension and update the README.md.
      • Work on release-specific tasks related to this provider.

      Maintaining the cloud-controller-manager Images

      Provider extensions are using upstream cloud-controller-manager images. Make sure to adopt the new cloud-controller-manager release for the new Kubernetes minor version (example PR).

      Some of the cloud providers are not using upstream cloud-controller-manager images for some of the supported Kubernetes versions. @@ -10918,7 +10920,7 @@ In case the seed cluster runs at its capacity, then there is no waiting time required during the scale-up. Instead, the low-priority pause pods will be preempted and allow newly created shoot control plane pods to be scheduled fast. In the meantime, the cluster-autoscaler will trigger the scale-up because the preempted pause pods want to run again. -However, this delay doesn’t affect the important shoot control plane pods, which will improve the user experience.

      Use .spec.settings.excessCapacityReservation.configs to create excess capacity reservation deployments which allow to specify custom values for resources, nodeSelector and tolerations. Each config creates a deployment with a minium number of 2 replicas and a maximum equal to the number of zones configured for this seed. +However, this delay doesn’t affect the important shoot control plane pods, which will improve the user experience.

      Use .spec.settings.excessCapacityReservation.configs to create excess capacity reservation deployments which allow to specify custom values for resources, nodeSelector and tolerations. Each config creates a deployment with a minimum number of 2 replicas and a maximum equal to the number of zones configured for this seed. It defaults to a config reserving 2 CPUs and 6Gi of memory for each pod with no nodeSelector and no tolerations.

      Excess capacity reservation is enabled when .spec.settings.excessCapacityReservation.enabled is true or not specified while configs are present. It can be disabled by setting the field to false.

      Scheduling

      By default, the Gardener Scheduler will consider all seed clusters when a new shoot cluster shall be created. However, administrators/operators might want to exclude some of them from being considered by the scheduler. Therefore, seed clusters can be marked as “invisible”. @@ -11130,7 +11132,7 @@ │ ├── operatingsystem │ ├── operations │ └── vpntunnel - ├── suites # suites that run agains a running garden or shoot cluster + ├── suites # suites that run against a running garden or shoot cluster │ ├── gardener │ └── shoot └── system # suites that are used for building a full test flow @@ -11147,7 +11149,7 @@ to control the execution of specific labeled test. See the example below:

      go test -timeout=0 ./test/testmachinery/suites/shoot \
             --v -ginkgo.v -ginkgo.show-node-events -ginkgo.no-color \
             --report-file=/tmp/report.json \                     # write elasticsearch formatted output to a file
      -      --disable-dump=false \                               # disables dumping of teh current state if a test fails
      +      --disable-dump=false \                               # disables dumping of the current state if a test fails
             -kubecfg=/path/to/gardener/kubeconfig \
             -shoot-name=<shoot-name> \                           # Name of the shoot to test
             -project-namespace=<gardener project namespace> \    # Name of the gardener project the test shoot resides
      @@ -19115,8 +19117,8 @@
       your shell completion command. Example:

      gardenctl completion bash --help
       

      Usage

      Targeting

      You can set a target to use it in subsequent commands. You can also overwrite the target for each command individually.

      Note that this will not affect your KUBECONFIG env variable. To update the KUBECONFIG env for your current target see Configure KUBECONFIG section

      Example:

      # target control plane
       gardenctl target --garden landscape-dev --project my-project --shoot my-shoot --control-plane
      -

      Find more information in the documentation.

      Configure KUBECONFIG for Shoot Clusters

      Generate a script that points KUBECONFIG to the targeted cluster for the specified shell. Use together with eval to configure your shell. Example for bash:

      eval $(gardenctl kubectl-env bash)
      -

      Configure Cloud Provider CLIs

      Generate the cloud provider CLI configuration script for the specified shell. Use together with eval to configure your shell. Example for bash:

      eval $(gardenctl provider-env bash)
      +

      Find more information in the documentation.

      Configure KUBECONFIG for Shoot Clusters

      Generate a script that points KUBECONFIG to the targeted cluster for the specified shell. Use together with eval to configure your shell. Example for bash:

      eval "$(gardenctl kubectl-env bash)"
      +

      To load the kubectl configuration for each bash session add the command at the end of the ~/.bashrc file.

      Configure Cloud Provider CLIs

      Generate the cloud provider CLI configuration script for the specified shell. Use together with eval to configure your shell. Example for bash:

      eval "$(gardenctl provider-env bash)"
       

      SSH

      Establish an SSH connection to a Shoot cluster’s node.

      gardenctl ssh my-node
       

      9 - FAQ

      Commonly asked questions about Gardener

      9.1 - Can I run privileged containers?

      While it is possible, we highly recommend not to use privileged containers in your productive environment.

      9.2 - Can Kubernetes upgrade automatically?

      There is no automatic migration of major/minor versions of Kubernetes. You need to update your clusters manually or press the Upgrade button in the Dashboard.

      Before updating a cluster you should be aware of the potential errors this might cause. The following video will dive into a Kubernetes outage in production that Monzo experienced, its causes and effects, and the architectural and operational lessons learned.

      It is therefore recommended to first update your test cluster and validate it before performing changes on a productive environment.

      9.3 - Can you backup your Kubernetes cluster resources?

      Backing up your Kubernetes cluster is possible through the use of specialized software like Velero. Velero consists of a server side component and a client tool that allow you to backup or restore all objects in your cluster, as well as the cluster resources and persistent volumes.

      9.4 - Can you migrate the content of one cluster to another cluster?

      The migration of clusters or content from one cluster to another is out of scope for the Gardener project. For such scenarios you may consider using tools like Velero.

      9.5 - How can you get the status of a shoot API server?

      There are two ways to get the health information of a shoot API server.

      • Try to reach the public endpoint of the shoot API server via "https://api.<shoot-name>.<project-name>.shoot.<canary|office|live>.k8s-hana.ondemand.com/healthz"

      The endpoint is secured, therefore you need to authenticate via basic auth or client cert. Both are available in the admin kubeconfig of the shoot cluster. Note that with those credentials you have full (admin) access to the cluster, therefore it is highly recommended to create custom credentials with some RBAC rules and bindings which only allow access to the /healthz endpoint.

      • Fetch the shoot resource of your cluster via the programmatic API of the Gardener and get the availability information from the status. diff --git a/docs/docs/contribute/_print/index.html b/docs/docs/contribute/_print/index.html index 772f0e888ff..1080be16cca 100644 --- a/docs/docs/contribute/_print/index.html +++ b/docs/docs/contribute/_print/index.html @@ -2,7 +2,7 @@

        This is the multi-page printable view of this section. +All

      This is the multi-page printable view of this section. Click here to print.

      Return to the regular view of this page.

      Contribute

      Contributors guides for code and documentation

      Contributing to Gardener

      Welcome

      Welcome to the Contributor section of Gardener. Here you can learn how it is possible for you to contribute your ideas and expertise to the project and have it grow even more.

      Prerequisites

      Before you begin contributing to Gardener, there are a couple of things you should become familiar with and complete first.

      Code of Conduct

      All members of the Gardener community must abide by the Contributor Covenant. Only by respecting each other can we develop a productive, collaborative community. diff --git a/docs/docs/contribute/code/cicd/index.html b/docs/docs/contribute/code/cicd/index.html index 3e29abe74ac..df3cbf4d840 100644 --- a/docs/docs/contribute/code/cicd/index.html +++ b/docs/docs/contribute/code/cicd/index.html @@ -10,7 +10,7 @@ Typical workloads encompass the execution of tests and builds of a variety of technologies, as well as building and publishing container images, typically containing build results.">

      type
      string
      (Optional)

      Type is the DNS provider type.

      zones
      DNSIncludeExclude
      (Optional)

      Zones contains information about which hosted zones shall be included/excluded for this provider.

      Deprecated: This field is deprecated and will be removed in a future release. Please use the DNS extension provider config (e.g. shoot-dns-service) for additional configuration.

      DataVolume

      (Appears on: -Worker)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DeploymentRef

      (Appears on: +Worker)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DeploymentRef

      (Appears on: ControllerRegistrationDeployment)

      DeploymentRef contains information about ControllerDeployment references.

      FieldDescription
      name
      string

      Name is the name of the ControllerDeployment that is being referred to.

      DualApprovalForDeletion

      (Appears on: ProjectSpec)

      DualApprovalForDeletion contains configuration for the dual approval concept for resource deletion.

      FieldDescription
      resource
      string

      Resource is the name of the resource this applies to.

      selector
      Kubernetes meta/v1.LabelSelector

      Selector is the label selector for the resources.

      includeServiceAccounts
      bool
      (Optional)

      IncludeServiceAccounts specifies whether the concept also applies when deletion is triggered by ServiceAccounts. Defaults to true.

      ETCDEncryptionKeyRotation

      (Appears on: @@ -366,7 +366,7 @@ This field is only available for Kubernetes v1.30 or later.

      KubeControllerManagerConfig

      (Appears on: Kubernetes)

      KubeControllerManagerConfig contains configuration settings for the kube-controller-manager.

      FieldDescription
      KubernetesConfig
      KubernetesConfig

      (Members of KubernetesConfig are embedded into this type.)

      horizontalPodAutoscaler
      HorizontalPodAutoscalerConfig
      (Optional)

      HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.

      nodeCIDRMaskSize
      int32
      (Optional)

      NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24). This field is immutable.

      podEvictionTimeout
      Kubernetes meta/v1.Duration
      (Optional)

      PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m.

      Deprecated: The corresponding kube-controller-manager flag --pod-eviction-timeout is deprecated in favor of the kube-apiserver flags --default-not-ready-toleration-seconds and --default-unreachable-toleration-seconds. -The --pod-eviction-timeout flag does not have effect when the taint besed eviction is enabled. The taint +The --pod-eviction-timeout flag does not have effect when the taint based eviction is enabled. The taint based eviction is beta (enabled by default) since Kubernetes 1.13 and GA since Kubernetes 1.18. Hence, instead of setting this field, set the spec.kubernetes.kubeAPIServer.defaultNotReadyTolerationSeconds and spec.kubernetes.kubeAPIServer.defaultUnreachableTolerationSeconds.

      nodeMonitorGracePeriod
      Kubernetes meta/v1.Duration
      (Optional)

      NodeMonitorGracePeriod defines the grace period before an unresponsive node is marked unhealthy.

      KubeProxyConfig

      (Appears on: @@ -706,7 +706,7 @@ (default: 0.9)

      recommendationLowerBoundMemoryPercentile
      float64(Optional)

      RecommendationLowerBoundMemoryPercentile is the usage percentile that will be used for the lower bound on memory recommendation. (default: 0.5)

      recommendationUpperBoundMemoryPercentile
      float64(Optional)

      RecommendationUpperBoundMemoryPercentile is the usage percentile that will be used for the upper bound on memory recommendation. (default: 0.95)

      Volume

      (Appears on: -Worker)

      Volume contains information about the volume type, size, and encryption.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      VolumeType

      (Appears on: +Worker)

      Volume contains information about the volume type, size, and encryption.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      VolumeType

      (Appears on: CloudProfileSpec, NamespacedCloudProfileSpec)

      VolumeType contains certain properties of a volume type.

      FieldDescription
      class
      string

      Class is the class of the volume type.

      name
      string

      Name is the name of the volume type.

      usable
      bool
      (Optional)

      Usable defines if the volume type can be used for shoot clusters.

      minSize
      k8s.io/apimachinery/pkg/api/resource.Quantity
      (Optional)

      MinSize is the minimal supported storage size.

      WatchCacheSizes

      (Appears on: KubeAPIServerConfig)

      WatchCacheSizes contains configuration of the API server’s watch cache sizes.

      FieldDescription
      default
      int32
      (Optional)

      Default configures the default watch cache size of the kube-apiserver @@ -819,7 +819,7 @@ DNSRecord)

      DNSRecordStatus is the status of a DNSRecord resource.

      FieldDescription
      DefaultStatus
      DefaultStatus

      (Members of DefaultStatus are embedded into this type.)

      DefaultStatus is a structure containing common fields used by all extension resources.

      zone
      string
      (Optional)

      Zone is the DNS hosted zone of this DNS record.

      DNSRecordType (string alias)

      (Appears on: DNSRecordSpec)

      DNSRecordType is a string alias.

      DataVolume

      (Appears on: -WorkerPool)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DefaultSpec

      (Appears on: +WorkerPool)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DefaultSpec

      (Appears on: BackupBucketSpec, BackupEntrySpec, BastionSpec, @@ -902,7 +902,7 @@ triggered. For each FilePath there must exist a File with matching Path in OperatingSystemConfig.Spec.Files.

      UnitCommand (string alias)

      (Appears on: Unit)

      UnitCommand is a string alias.

      Volume

      (Appears on: -WorkerPool)

      Volume contains information about the root disks that should be used for worker pools.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      WorkerPool

      (Appears on: +WorkerPool)

      Volume contains information about the root disks that should be used for worker pools.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      WorkerPool

      (Appears on: WorkerSpec)

      WorkerPool is the definition of a specific worker pool.

      FieldDescription
      machineType
      string

      MachineType contains information about the machine type that should be used for this worker pool.

      maximum
      int32

      Maximum is the maximum size of the worker pool.

      maxSurge
      k8s.io/apimachinery/pkg/util/intstr.IntOrString

      MaxSurge is maximum number of VMs that are created during an update.

      maxUnavailable
      k8s.io/apimachinery/pkg/util/intstr.IntOrString

      MaxUnavailable is the maximum number of VMs that can be unavailable during an update.

      annotations
      map[string]string
      (Optional)

      Annotations is a map of key/value pairs for annotations for all the Node objects in this worker pool.

      labels
      map[string]string
      (Optional)

      Labels is a map of key/value pairs for labels for all the Node objects in this worker pool.

      taints
      []Kubernetes core/v1.Taint
      (Optional)

      Taints is a list of taints for all the Node objects in this worker pool.

      machineImage
      MachineImage

      MachineImage contains logical information about the name and the version of the machie image that should be used. The logical information must be mapped to the provider-specific information (e.g., AMIs, …) by the provider itself.

      minimum
      int32

      Minimum is the minimum size of the worker pool.

      name
      string

      Name is the name of this worker pool.

      nodeAgentSecretName
      string
      (Optional)

      NodeAgentSecretName is uniquely identifying selected aspects of the OperatingSystemConfig. If it changes, then the @@ -1297,7 +1297,7 @@ containerPolicies: - controlledValues: RequestsOnly ... -If you have defined relative limits (related to the requests), the default policy to scale the limits proportionally with the requests is fine, but the gap between requests and limits must be zero for QoS Guaranteed and should best be small for QoS Burstable to avoid useless or absurd limits either, e.g. prefer limits being 5 to at most 20% larger than requests as opposed to being 100% larger or more.

    • As a rule of thumb, set minAllowed to the highest observed VPA recommendation (usually during the initialization phase or during any periodical activity) for an otherwise practically idle container, so that you avoid needless trashing (e.g. resource usage calms down over time and recommendations drop consecutively until eviction, which will then lead again to initialization or later periodical activity and higher recommendations and new evictions).
      ⚠️ You may want to provide higher minAllowed values, if you observe that up-scaling takes too long for CPU or memory for a too large percentile of your workload. This will get you out of the danger zone of too few resources for too many pods at the expense of providing too many resources for a few pods. Memory may react faster than CPU, because CPU throttling is not visible and memory gets aided by OOM bump-up incidents, but still, if you observe that up-scaling takes too long, you may want to increase minAllowed accordingly.
    • As a rule of thumb, set maxAllowed to your theoretical maximum load, flanked with alerts to detect erroneous run-away usage or the actual nearing of your practical maximum load, so that you can intervene. However, VPA can easily recommend requests larger than what is allocatable on a node, so you must either ensure large enough nodes (Gardener can scale up from zero, in case you like to define a low-priority worker pool with more resources for very large pods) and/or cap VPA’s target recommendations using maxAllowed at the node allocatable remainder (after daemon set pods) of the largest eligible machine type (may result in under-provisioning resources for a pod). Use your monitoring and check maximum pod usage to decide about the maximum machine type.
    • Recommendations in a Box

      ContainerWhen to useValue
      Requests- Set them (recommended) unless:
      - Do not set requests for QoS BestEffort; useful only if pod can be evicted as often as needed and pod can pick up where it left off without any penalty
      Set requests to 95th percentile (w/o VPA) of the actually observed CPU resp. memory usage in production resp. 5th percentile (w/ VPA) (see below)
      Limits- Avoid them (recommended) unless:
      - Set limits for QoS Guaranteed; useful only if pod has strictly static resource requirements
      - Set CPU limits if you want to throttle CPU usage for containers that can be throttled w/o any other disadvantage than processing time (never do that when time-critical operations like leases are involved)
      - Set limits if you know the healthy range and want to shield against unbound busy loops, unbound memory leaks, or similar
      If you really can (otherwise not), set limits to healthy theoretical max load
      ScalerWhen to useInitialMinimumMaximum
      HPAUse for pods that support horizontal scalingSet initial replicas to 5th percentile of the actually observed replica count in production (prefer scaling on usage, not utilization) and make sure to never overwrite it later when controlled by HPASet minReplicas to 0 (requires feature gate and custom/external metrics), to 1 (regular HPA minimum), or whatever the high availability requirements of the workload demandSet maxReplicas to healthy theoretical max load
      VPAUse for containers that have a significant usage (>50m/100M) and a significant usage spread over time (>2x)Set initial requests to 5th percentile of the actually observed CPU resp. memory usage in productionSet minAllowed to highest observed VPA recommendation (includes start-up phase) for an otherwise practically idle container (avoids pod trashing when pod gets evicted after idling)Set maxAllowed to fresh node allocatable remainder after daemonset pods (avoids pending pods when requests exeed fresh node allocatable remainder) or, if you really can (otherwise not), to healthy theoretical max load (less disruptive than limits as no throttling or OOM happens on under-utilized nodes)
      CAUse for dynamic workloads, definitely if you use HPA and/or VPAN/ASet minimum to 0 or number of nodes required right after cluster creation or wake-upSet maximum to healthy theoretical max load
      information-outline

      Note

      Theoretical max load may be very difficult to ascertain, especially with modern software that consists of building blocks you do not own or know in detail. If you have comprehensive monitoring in place, you may be tempted to pick the observed maximum and add a safety margin or even factor on top (2x, 4x, or any other number), but this is not to be confused with “theoretical max load” (solely depending on the code, not observations from the outside). At any point in time, your numbers may change, e.g. because you updated a software component or your usage increased. If you decide to use numbers that are set based only on observations, make sure to flank those numbers with monitoring alerts, so that you have sufficient time to investigate, revise, and readjust if necessary.

      Conclusion

      Pod autoscaling is a dynamic and complex aspect of Kubernetes, but it is also one of the most powerful tools at your disposal for maintaining efficient, reliable, and cost-effective applications. By carefully selecting the appropriate autoscaler, setting well-considered thresholds, and continuously monitoring and adjusting your strategies, you can ensure that your Kubernetes deployments are well-equipped to handle your resource demands while not over-paying for the provided resources at the same time.

      As Kubernetes continues to evolve (e.g. in-place updates) and as new patterns and practices emerge, the approaches to autoscaling may also change. However, the principles discussed above will remain foundational to creating scalable and resilient Kubernetes workloads. Whether you’re a developer or operations engineer, a solid understanding of pod autoscaling will be instrumental in the successful deployment and management of containerized applications.

      4 - Concepts

      4.1 - APIServer Admission Plugins

      A list of all gardener managed admission plugins together with their responsibilities

      Overview

      Similar to the kube-apiserver, the gardener-apiserver comes with a few in-tree managed admission plugins. +If you have defined relative limits (related to the requests), the default policy to scale the limits proportionally with the requests is fine, but the gap between requests and limits must be zero for QoS Guaranteed and should best be small for QoS Burstable to avoid useless or absurd limits either, e.g. prefer limits being 5 to at most 20% larger than requests as opposed to being 100% larger or more.

    • As a rule of thumb, set minAllowed to the highest observed VPA recommendation (usually during the initialization phase or during any periodical activity) for an otherwise practically idle container, so that you avoid needless trashing (e.g. resource usage calms down over time and recommendations drop consecutively until eviction, which will then lead again to initialization or later periodical activity and higher recommendations and new evictions).
      ⚠️ You may want to provide higher minAllowed values, if you observe that up-scaling takes too long for CPU or memory for a too large percentile of your workload. This will get you out of the danger zone of too few resources for too many pods at the expense of providing too many resources for a few pods. Memory may react faster than CPU, because CPU throttling is not visible and memory gets aided by OOM bump-up incidents, but still, if you observe that up-scaling takes too long, you may want to increase minAllowed accordingly.
    • As a rule of thumb, set maxAllowed to your theoretical maximum load, flanked with alerts to detect erroneous run-away usage or the actual nearing of your practical maximum load, so that you can intervene. However, VPA can easily recommend requests larger than what is allocatable on a node, so you must either ensure large enough nodes (Gardener can scale up from zero, in case you like to define a low-priority worker pool with more resources for very large pods) and/or cap VPA’s target recommendations using maxAllowed at the node allocatable remainder (after daemon set pods) of the largest eligible machine type (may result in under-provisioning resources for a pod). Use your monitoring and check maximum pod usage to decide about the maximum machine type.
    • Recommendations in a Box

      ContainerWhen to useValue
      Requests- Set them (recommended) unless:
      - Do not set requests for QoS BestEffort; useful only if pod can be evicted as often as needed and pod can pick up where it left off without any penalty
      Set requests to 95th percentile (w/o VPA) of the actually observed CPU resp. memory usage in production resp. 5th percentile (w/ VPA) (see below)
      Limits- Avoid them (recommended) unless:
      - Set limits for QoS Guaranteed; useful only if pod has strictly static resource requirements
      - Set CPU limits if you want to throttle CPU usage for containers that can be throttled w/o any other disadvantage than processing time (never do that when time-critical operations like leases are involved)
      - Set limits if you know the healthy range and want to shield against unbound busy loops, unbound memory leaks, or similar
      If you really can (otherwise not), set limits to healthy theoretical max load
      ScalerWhen to useInitialMinimumMaximum
      HPAUse for pods that support horizontal scalingSet initial replicas to 5th percentile of the actually observed replica count in production (prefer scaling on usage, not utilization) and make sure to never overwrite it later when controlled by HPASet minReplicas to 0 (requires feature gate and custom/external metrics), to 1 (regular HPA minimum), or whatever the high availability requirements of the workload demandSet maxReplicas to healthy theoretical max load
      VPAUse for containers that have a significant usage (>50m/100M) and a significant usage spread over time (>2x)Set initial requests to 5th percentile of the actually observed CPU resp. memory usage in productionSet minAllowed to highest observed VPA recommendation (includes start-up phase) for an otherwise practically idle container (avoids pod trashing when pod gets evicted after idling)Set maxAllowed to fresh node allocatable remainder after daemonset pods (avoids pending pods when requests exceed fresh node allocatable remainder) or, if you really can (otherwise not), to healthy theoretical max load (less disruptive than limits as no throttling or OOM happens on under-utilized nodes)
      CAUse for dynamic workloads, definitely if you use HPA and/or VPAN/ASet minimum to 0 or number of nodes required right after cluster creation or wake-upSet maximum to healthy theoretical max load
      information-outline

      Note

      Theoretical max load may be very difficult to ascertain, especially with modern software that consists of building blocks you do not own or know in detail. If you have comprehensive monitoring in place, you may be tempted to pick the observed maximum and add a safety margin or even factor on top (2x, 4x, or any other number), but this is not to be confused with “theoretical max load” (solely depending on the code, not observations from the outside). At any point in time, your numbers may change, e.g. because you updated a software component or your usage increased. If you decide to use numbers that are set based only on observations, make sure to flank those numbers with monitoring alerts, so that you have sufficient time to investigate, revise, and readjust if necessary.

      Conclusion

      Pod autoscaling is a dynamic and complex aspect of Kubernetes, but it is also one of the most powerful tools at your disposal for maintaining efficient, reliable, and cost-effective applications. By carefully selecting the appropriate autoscaler, setting well-considered thresholds, and continuously monitoring and adjusting your strategies, you can ensure that your Kubernetes deployments are well-equipped to handle your resource demands while not over-paying for the provided resources at the same time.

      As Kubernetes continues to evolve (e.g. in-place updates) and as new patterns and practices emerge, the approaches to autoscaling may also change. However, the principles discussed above will remain foundational to creating scalable and resilient Kubernetes workloads. Whether you’re a developer or operations engineer, a solid understanding of pod autoscaling will be instrumental in the successful deployment and management of containerized applications.

      4 - Concepts

      4.1 - APIServer Admission Plugins

      A list of all gardener managed admission plugins together with their responsibilities

      Overview

      Similar to the kube-apiserver, the gardener-apiserver comes with a few in-tree managed admission plugins. If you want to get an overview of the what and why of admission plugins then this document might be a good start.

      This document lists all existing admission plugins with a short explanation of what it is responsible for.

      ClusterOpenIDConnectPreset, OpenIDConnectPreset

      (both enabled by default)

      These admission controllers react on CREATE operations for Shoots. If the Shoot does not specify any OIDC configuration (.spec.kubernetes.kubeAPIServer.oidcConfig=nil), then it tries to find a matching ClusterOpenIDConnectPreset or OpenIDConnectPreset, respectively. If there are multiple matches, then the one with the highest weight “wins”. @@ -1973,7 +1973,7 @@

      In this example, the label foo=bar will be injected into the Deployment, as well as into all created ReplicaSets and Pods.

      Preventing Reconciliations

      If a ManagedResource is annotated with resources.gardener.cloud/ignore=true, then it will be skipped entirely by the controller (no reconciliations or deletions of managed resources at all). However, when the ManagedResource itself is deleted (for example when a shoot is deleted), then the annotation is not respected and all resources will be deleted as usual. This feature can be helpful to temporarily patch/change resources managed as part of such ManagedResource. -Condition checks will be skipped for such ManagedResources.

      Modes

      The gardener-resource-manager can manage a resource in the following supported modes:

      • Ignore
        • The corresponding resource is removed from the ManagedResource status (.status.resources). No action is performed on the cluster.
        • The resource is no longer “managed” (updated or deleted).
        • The primary use case is a migration of a resource from one ManagedResource to another one.

      The mode for a resource can be specified with the resources.gardener.cloud/mode annotation. The annotation should be specified in the encoded resource manifest in the Secret that is referenced by the ManagedResource.

      Resource Class and Reconcilation Scope

      By default, the gardener-resource-manager controller watches for ManagedResources in all namespaces. +Condition checks will be skipped for such ManagedResources.

      Modes

      The gardener-resource-manager can manage a resource in the following supported modes:

      • Ignore
        • The corresponding resource is removed from the ManagedResource status (.status.resources). No action is performed on the cluster.
        • The resource is no longer “managed” (updated or deleted).
        • The primary use case is a migration of a resource from one ManagedResource to another one.

      The mode for a resource can be specified with the resources.gardener.cloud/mode annotation. The annotation should be specified in the encoded resource manifest in the Secret that is referenced by the ManagedResource.

      Resource Class and Reconciliation Scope

      By default, the gardener-resource-manager controller watches for ManagedResources in all namespaces. The .sourceClientConnection.namespace field in the component configuration restricts the watch to ManagedResources in a single namespace only. Note that this setting also affects all other controllers and webhooks since it’s a central configuration.

      A ManagedResource has an optional .spec.class field that allows it to indicate that it belongs to a given class of resources. The .controllers.resourceClass field in the component configuration restricts the watch to ManagedResources with the given .spec.class. @@ -2138,7 +2138,9 @@ - name: shoot--foo--bar-token user: token: "" -

      then the .users[0].user.token field of the kubeconfig will be updated accordingly.

      The controller also adds an annotation to the Secret to keep track when to renew the token before it expires. +

      then the .users[0].user.token field of the kubeconfig will be updated accordingly.

      The TokenRequestor can also optionally inject the current CA bundle if the secret is annotated with

      serviceaccount.resources.gardener.cloud/inject-ca-bundle: "true"
      +

      If a kubeconfig is present in the secret, the CA bundle is set in the in the cluster.certificate-authority-data field of the cluster of the current context. +Otherwise, the bundle is stored in an additional secret key bundle.crt.

      The controller also adds an annotation to the Secret to keep track when to renew the token before it expires. By default, the tokens are issued to expire after 12 hours. The expiration time can be set with the following annotation:

      serviceaccount.resources.gardener.cloud/token-expiration-duration: 6h
       

      It automatically renews once 80% of the lifetime is reached, or after 24h.

      Optionally, the controller can also populate the token into a Secret in the target cluster. This can be requested by annotating the Secret in the source cluster with:

      token-requestor.resources.gardener.cloud/target-secret-name: "foo"
       token-requestor.resources.gardener.cloud/target-secret-namespace: "bar"
      @@ -5320,7 +5322,7 @@
       re-creating API objects. The editing process may require some thought.
       This may require downtime for applications that rely on the feature.
    • Recommended for only non-critical uses because of potential for incompatible changes in subsequent releases.
    • Please do try Beta features and give feedback on them! -After they exit beta, it may not be practical for us to make more changes.

      A General Availability (GA) feature is also referred to as a stable feature. It means:

      • The feature is always enabled; you cannot disable it.
      • The corresponding feature gate is no longer needed.
      • Stable versions of features will appear in released software for many subsequent versions.

      List of Feature Gates

      FeatureRelevant ComponentsDescription
      DefaultSeccompProfilegardenlet, gardener-operatorEnables the defaulting of the seccomp profile for Gardener managed workload in the garden or seed to RuntimeDefault.
      ShootForceDeletiongardener-apiserverAllows forceful deletion of Shoots by annotating them with the confirmation.gardener.cloud/force-deletion annotation.
      UseNamespacedCloudProfilegardener-apiserverEnables usage of NamespacedCloudProfiles in Shoots.
      ShootManagedIssuergardenletEnables the shoot managed issuer functionality described in GEP 24.
      ShootCredentialsBindinggardener-apiserverEnables usage of CredentialsBindingName in Shoots.
      NewWorkerPoolHashgardenletEnables usage of the new worker pool hash calculation. The new calculation supports rolling worker pools if kubeReserved, systemReserved, evicitonHard or cpuManagerPolicy in the kubelet configuration are changed. All provider extensions must be upgraded to support this feature first. Existing worker pools are not immediately migrated to the new hash variant, since this would trigger the replacement of all nodes. The migration happens when a rolling update is triggered according to the old or new hash version calculation.
      NewVPNgardenletEnables usage of the new implementation of the VPN (go rewrite) using an IPv6 transfer network.
      NodeAgentAuthorizergardenlet, gardener-node-agentEnables authorization of gardener-node-agent to kube-apiserver of shoot clusters using an authorization webhook. It restricts the permissions of each gardener-node-agent instance to the objects belonging to its own node only.

      7.8 - Getting Started Locally

      Deploying Gardener Locally

      This document will walk you through deploying Gardener on your local machine. +After they exit beta, it may not be practical for us to make more changes.

      A General Availability (GA) feature is also referred to as a stable feature. It means:

      • The feature is always enabled; you cannot disable it.
      • The corresponding feature gate is no longer needed.
      • Stable versions of features will appear in released software for many subsequent versions.

      List of Feature Gates

      FeatureRelevant ComponentsDescription
      DefaultSeccompProfilegardenlet, gardener-operatorEnables the defaulting of the seccomp profile for Gardener managed workload in the garden or seed to RuntimeDefault.
      ShootForceDeletiongardener-apiserverAllows forceful deletion of Shoots by annotating them with the confirmation.gardener.cloud/force-deletion annotation.
      UseNamespacedCloudProfilegardener-apiserverEnables usage of NamespacedCloudProfiles in Shoots.
      ShootManagedIssuergardenletEnables the shoot managed issuer functionality described in GEP 24.
      ShootCredentialsBindinggardener-apiserverEnables usage of CredentialsBindingName in Shoots.
      NewWorkerPoolHashgardenletEnables usage of the new worker pool hash calculation. The new calculation supports rolling worker pools if kubeReserved, systemReserved, evictionHard or cpuManagerPolicy in the kubelet configuration are changed. All provider extensions must be upgraded to support this feature first. Existing worker pools are not immediately migrated to the new hash variant, since this would trigger the replacement of all nodes. The migration happens when a rolling update is triggered according to the old or new hash version calculation.
      NewVPNgardenletEnables usage of the new implementation of the VPN (go rewrite) using an IPv6 transfer network.
      NodeAgentAuthorizergardenlet, gardener-node-agentEnables authorization of gardener-node-agent to kube-apiserver of shoot clusters using an authorization webhook. It restricts the permissions of each gardener-node-agent instance to the objects belonging to its own node only.

      7.8 - Getting Started Locally

      Deploying Gardener Locally

      This document will walk you through deploying Gardener on your local machine. If you encounter difficulties, please open an issue so that we can make this process easier.

      Overview

      Gardener runs in any Kubernetes cluster. In this guide, we will start a KinD cluster which is used as both garden and seed cluster (please refer to the architecture overview) for simplicity.

      Based on Skaffold, the container images for all required components will be built and deployed into the cluster (via their Helm charts).

      Architecture Diagram

      Alternatives

      When deploying Gardener on your local machine you might face several limitations:

      • Your machine doesn’t have enough compute resources (see prerequisites) for hosting a second seed cluster or multiple shoot clusters.
      • Testing Gardener’s IPv6 features requires a Linux machine and native IPv6 connectivity to the internet, but you’re on macOS or don’t have IPv6 connectivity in your office environment or via your home ISP.

      In these cases, you might want to check out one of the following options that run the setup described in this guide elsewhere for circumventing these limitations:

      Prerequisites

      • Make sure that you have followed the Local Setup guide up until the Get the sources step.
      • Make sure your Docker daemon is up-to-date, up and running and has enough resources (at least 8 CPUs and 8Gi memory; see here how to configure the resources for Docker for Mac).

        Please note that 8 CPU / 8Gi memory might not be enough for more than two Shoot clusters, i.e., you might need to increase these values if you want to run additional Shoots. If you plan on following the optional steps to create a second seed cluster, the required resources will be more - at least 10 CPUs and 18Gi memory. @@ -5701,7 +5703,7 @@ The control plane is deployed in the so-called garden cluster, while the agent is installed into every seed cluster. Please note that it is possible to use the garden cluster as seed cluster by simply deploying the gardenlet into it.

        We are providing Helm charts in order to manage the various resources of the components. Please always make sure that you use the Helm chart version that matches the Gardener version you want to deploy.

        Deploying the Gardener Control Plane (API Server, Admission Controller, Controller Manager, Scheduler)

        In order to deploy the control plane components, please first deploy gardener-operator and create a Garden resource.

        alert-octagon-outline

        Caution

        Below approach is deprecated and will be removed after v1.135 of Gardener has been released (around beginning of 2026).

        The configuration values depict the various options to configure the different components. -Please consult Gardener Configuration and Usage for component specific configurations and Authentication of Gardener Control Plane Components Against the Garden Cluster for authentication related specifics.

        Also, note that all resources and deployments need to be created in the garden namespace (not overrideable). +Please consult Gardener Configuration and Usage for component specific configurations and Authentication of Gardener Control Plane Components Against the Garden Cluster for authentication related specifics.

        Also, note that all resources and deployments need to be created in the garden namespace (not overridable). If you enable the Gardener admission controller as part of you setup, please make sure the garden namespace is labelled with app: gardener. Otherwise, the backing service account for the admission controller Pod might not be created successfully. No action is necessary if you deploy the garden namespace with the Gardener control plane Helm chart.

        After preparing your values in a separate controlplane-values.yaml file (values.yaml can be used as starting point), you can run the following command against your garden cluster:

        helm install charts/gardener/controlplane \
        @@ -6001,7 +6003,7 @@
         
           # Basic Auth
           auth_type: base64(basic)
        -  url: base64(extenal.alertmanager.foo)
        +  url: base64(external.alertmanager.foo)
           username: base64(admin)
           password: base64(password)
         
        @@ -6095,7 +6097,7 @@
           enableContentionProfiling: true
         

        However, the handlers are served on the same port as configured in server.metrics.port via HTTP.

        For example (gardener-admission-controller):

        $ curl http://localhost:2723/debug/pprof/heap > /tmp/heap
         $ go tool pprof /tmp/heap
        -

      10 - Observability

      10.1 - Logging

      Logging Stack

      Motivation

      Kubernetes uses the underlying container runtime logging, which does not persist logs for stopped and destroyed containers. This makes it difficult to investigate issues in the very common case of not running containers. Gardener provides a solution to this problem for the managed cluster components by introducing its own logging stack.

      Components

      • A Fluent-bit daemonset which works like a log collector and custom Golang plugin which spreads log messages to their Vali instances.
      • One Vali Statefulset in the garden namespace which contains logs for the seed cluster and one per shoot namespace which contains logs for shoot’s controlplane.
      • One Plutono Deployment in garden namespace and two Deployments per shoot namespace (one exposed to the end users and one for the operators). Plutono is the UI component used in the logging stack.

      Container Logs Rotation and Retention

      Container log rotation in Kubernetes describes a subtile but important implementation detail depending on the type of the used high-level container runtime. When the used container runtime is not CRI compliant (such as dockershim), then the kubelet does not provide any rotation or retention implementations, hence leaving those aspects to the downstream components. When the used container runtime is CRI compliant (such as containerd), then the kubelet provides the necessary implementation with two configuration options:

      • ContainerLogMaxSize for rotation
      • ContainerLogMaxFiles for retention

      ContainerD Runtime

      In this case, it is possible to configure the containerLogMaxSize and containerLogMaxFiles fields in the Shoot specification. Both fields are optional and if nothing is specified, then the kubelet rotates on the size 100M. Those fields are part of provider’s workers definition. Here is an example:

      spec:
      +

      10 - Observability

      10.1 - Logging

      Logging Stack

      Motivation

      Kubernetes uses the underlying container runtime logging, which does not persist logs for stopped and destroyed containers. This makes it difficult to investigate issues in the very common case of not running containers. Gardener provides a solution to this problem for the managed cluster components by introducing its own logging stack.

      Components

      • A Fluent-bit daemonset which works like a log collector and custom Golang plugin which spreads log messages to their Vali instances.
      • One Vali Statefulset in the garden namespace which contains logs for the seed cluster and one per shoot namespace which contains logs for shoot’s controlplane.
      • One Plutono Deployment in garden namespace and two Deployments per shoot namespace (one exposed to the end users and one for the operators). Plutono is the UI component used in the logging stack.

      Container Logs Rotation and Retention

      Container log rotation in Kubernetes describes a subtle but important implementation detail depending on the type of the used high-level container runtime. When the used container runtime is not CRI compliant (such as dockershim), then the kubelet does not provide any rotation or retention implementations, hence leaving those aspects to the downstream components. When the used container runtime is CRI compliant (such as containerd), then the kubelet provides the necessary implementation with two configuration options:

      • ContainerLogMaxSize for rotation
      • ContainerLogMaxFiles for retention

      ContainerD Runtime

      In this case, it is possible to configure the containerLogMaxSize and containerLogMaxFiles fields in the Shoot specification. Both fields are optional and if nothing is specified, then the kubelet rotates on the size 100M. Those fields are part of provider’s workers definition. Here is an example:

      spec:
         provider:
           workers:
             - cri:
      @@ -7490,7 +7492,7 @@
       

      The logger is injected by controller-runtime’s Controller implementation. The logger returned by logf.FromContext is never nil. If the context doesn’t carry a logger, it falls back to the global logger (logf.Log), which might discard logs if not configured, but is also never nil.

      ⚠️ Make sure that you don’t overwrite the name or namespace value keys for such loggers, otherwise you will lose information about the reconciled object.

      The controller implementation (controller-runtime) itself takes care of logging the error returned by reconcilers. Hence, don’t log an error that you are returning. Generally, functions should not return an error, if they already logged it, because that means the error is already handled and not an error anymore. -See Dave Cheney’s post for more on this.

      Messages

      • Log messages should be static. Don’t put variable content in there, i.e., no fmt.Sprintf or string concatenation (+). Use key-value pairs instead.
      • Log messages should be capitalized. Note: This contrasts with error messages, that should not be capitalized. However, both should not end with a punctuation mark.

      Keys and Values

      • Use WithValues instead of repeatedly adding key-value pairs for multiple log statements. WithValues creates a new logger from the parent, that carries the given key-value pairs. E.g., use it when acting on one object in multiple steps and logging something for each step:

        log := parentLog.WithValues("infrastructure", client.ObjectKeyFromObject(infrastrucutre))
        +See Dave Cheney’s post for more on this.

        Messages

        • Log messages should be static. Don’t put variable content in there, i.e., no fmt.Sprintf or string concatenation (+). Use key-value pairs instead.
        • Log messages should be capitalized. Note: This contrasts with error messages, that should not be capitalized. However, both should not end with a punctuation mark.

        Keys and Values

        • Use WithValues instead of repeatedly adding key-value pairs for multiple log statements. WithValues creates a new logger from the parent, that carries the given key-value pairs. E.g., use it when acting on one object in multiple steps and logging something for each step:

          log := parentLog.WithValues("infrastructure", client.ObjectKeyFromObject(infrastructure))
           // ...
           log.Info("Creating Infrastructure")
           // ...
          @@ -7522,7 +7524,7 @@
           

        Logging in Test Code

        • If the tested production code requires a logger, you can pass logr.Discard() or logf.NullLogger{} in your test, which simply discards all logs.

        • logf.Log is safe to use in tests and will not cause a nil pointer deref, even if it’s not initialized via logf.SetLogger. It is initially set to a NullLogger by default, which means all logs are discarded, unless logf.SetLogger is called in the first 30 seconds of execution.

        • Pass zap.WriteTo(GinkgoWriter) in tests where you want to see the logs on test failure but not on success, for example:

          logf.SetLogger(logger.MustNewZapLogger(logger.DebugLevel, logger.FormatJSON, zap.WriteTo(GinkgoWriter)))
           log := logf.Log.WithName("test")
          -

        30 - Managed Seed

        ManagedSeeds: Register Shoot as Seed

        An existing shoot can be registered as a seed by creating a ManagedSeed resource. This resource contains:

        • The name of the shoot that should be registered as seed.
        • A gardenlet section that contains:
          • gardenlet deployment parameters, such as the number of replicas, the image, etc.
          • The GardenletConfiguration resource that contains controllers configuration, feature gates, and a seedConfig section that contains the Seed spec and parts of its metadata.
          • Additional configuration parameters, such as the garden connection bootstrap mechanism (see TLS Bootstrapping), and whether to merge the provided configuration with the configuration of the parent gardenlet.

        gardenlet is deployed to the shoot, and it registers a new seed upon startup based on the seedConfig section.

        Note: Earlier Gardener allowed specifying a seedTemplate directly in the ManagedSeed resource. This feature is discontinued, any seed configuration must be via the GardenletConfiguration.

        Note the following important aspects:

        • Unlike the Seed resource, the ManagedSeed resource is namespaced. Currently, managed seeds are restricted to the garden namespace.
        • The newly created Seed resource always has the same name as the ManagedSeed resource. Attempting to specify a different name in the seedConfig will fail.
        • The ManagedSeed resource must always refer to an existing shoot. Attempting to create a ManagedSeed referring to a non-existing shoot will fail.
        • A shoot that is being referred to by a ManagedSeed cannot be deleted. Attempting to delete such a shoot will fail.
        • You can omit practically everything from the gardenlet section, including all or most of the Seed spec fields. Proper defaults will be supplied in all cases, based either on the most common use cases or the information already available in the Shoot resource.
        • Also, if your seed is configured to host HA shoot control planes, then gardenlet will be deployed with multiple replicas across nodes or availability zones by default.
        • Some Seed spec fields, for example the provider type and region, networking CIDRs for pods, services, and nodes, etc., must be the same as the corresponding Shoot spec fields of the shoot that is being registered as seed. Attempting to use different values (except empty ones, so that they are supplied by the defaulting mechanims) will fail.

        Deploying gardenlet to the Shoot

        To register a shoot as a seed and deploy gardenlet to the shoot using a default configuration, create a ManagedSeed resource similar to the following:

        apiVersion: seedmanagement.gardener.cloud/v1alpha1
        +

      30 - Managed Seed

      ManagedSeeds: Register Shoot as Seed

      An existing shoot can be registered as a seed by creating a ManagedSeed resource. This resource contains:

      • The name of the shoot that should be registered as seed.
      • A gardenlet section that contains:
        • gardenlet deployment parameters, such as the number of replicas, the image, etc.
        • The GardenletConfiguration resource that contains controllers configuration, feature gates, and a seedConfig section that contains the Seed spec and parts of its metadata.
        • Additional configuration parameters, such as the garden connection bootstrap mechanism (see TLS Bootstrapping), and whether to merge the provided configuration with the configuration of the parent gardenlet.

      gardenlet is deployed to the shoot, and it registers a new seed upon startup based on the seedConfig section.

      Note: Earlier Gardener allowed specifying a seedTemplate directly in the ManagedSeed resource. This feature is discontinued, any seed configuration must be via the GardenletConfiguration.

      Note the following important aspects:

      • Unlike the Seed resource, the ManagedSeed resource is namespaced. Currently, managed seeds are restricted to the garden namespace.
      • The newly created Seed resource always has the same name as the ManagedSeed resource. Attempting to specify a different name in the seedConfig will fail.
      • The ManagedSeed resource must always refer to an existing shoot. Attempting to create a ManagedSeed referring to a non-existing shoot will fail.
      • A shoot that is being referred to by a ManagedSeed cannot be deleted. Attempting to delete such a shoot will fail.
      • You can omit practically everything from the gardenlet section, including all or most of the Seed spec fields. Proper defaults will be supplied in all cases, based either on the most common use cases or the information already available in the Shoot resource.
      • Also, if your seed is configured to host HA shoot control planes, then gardenlet will be deployed with multiple replicas across nodes or availability zones by default.
      • Some Seed spec fields, for example the provider type and region, networking CIDRs for pods, services, and nodes, etc., must be the same as the corresponding Shoot spec fields of the shoot that is being registered as seed. Attempting to use different values (except empty ones, so that they are supplied by the defaulting mechanism) will fail.

      Deploying gardenlet to the Shoot

      To register a shoot as a seed and deploy gardenlet to the shoot using a default configuration, create a ManagedSeed resource similar to the following:

      apiVersion: seedmanagement.gardener.cloud/v1alpha1
       kind: ManagedSeed
       metadata:
         name: my-managed-seed
      @@ -7543,7 +7545,7 @@
         * metrics_name_1
         * metrics_name_2
         ...
      -

      Adding Alerts

      The alert definitons are located in charts/seed-monitoring/charts/core/charts/prometheus/rules. There are two approaches for adding new alerts.

      1. Adding additional alerts for a component which already has a set of alerts. In this case you have to extend the existing rule file for the component.
      2. Adding alerts for a new component. In this case a new rule file with name scheme example-component.rules.yaml needs to be added.
      3. Add the new alert to alertInhibitionGraph.dot, add any required inhibition flows and render the new graph. To render the graph, run:
      dot -Tpng ./content/alertInhibitionGraph.dot -o ./content/alertInhibitionGraph.png
      +

      Adding Alerts

      The alert definitions are located in charts/seed-monitoring/charts/core/charts/prometheus/rules. There are two approaches for adding new alerts.

      1. Adding additional alerts for a component which already has a set of alerts. In this case you have to extend the existing rule file for the component.
      2. Adding alerts for a new component. In this case a new rule file with name scheme example-component.rules.yaml needs to be added.
      3. Add the new alert to alertInhibitionGraph.dot, add any required inhibition flows and render the new graph. To render the graph, run:
      dot -Tpng ./content/alertInhibitionGraph.dot -o ./content/alertInhibitionGraph.png
       
      1. Create a test for the new alert. See Alert Tests.

      Example alert:

      groups:
       * name: example.rules
         rules:
      @@ -7602,7 +7604,7 @@
           ]
         }
       }
      -

      32 - Network Policies

      NetworkPolicys In Garden, Seed, Shoot Clusters

      This document describes which Kubernetes NetworkPolicys deployed by Gardener into the various clusters.

      Garden Cluster

      (via gardener-operator and gardener-resource-manager)

      The gardener-operator runs a NetworkPolicy controller which is responsible for the following namespaces:

      • garden
      • istio-system
      • *istio-ingress-*
      • shoot-*
      • extension-* (in case the garden cluster is a seed cluster at the same time)

      It deploys the following so-called “general NetworkPolicys”:

      NamePurpose
      deny-allDenies all ingress and egress traffic for all pods in this namespace. Hence, all traffic must be explicitly allowed.
      allow-to-dnsAllows egress traffic from pods labeled with networking.gardener.cloud/to-dns=allowed to DNS pods running in the kube-sytem namespace. In practice, most of the pods performing network egress traffic need this label.
      allow-to-runtime-apiserverAllows egress traffic from pods labeled with networking.gardener.cloud/to-runtime-apiserver=allowed to the API server of the runtime cluster.
      allow-to-blocked-cidrsAllows egress traffic from pods labeled with networking.gardener.cloud/to-blocked-cidrs=allowed to explicitly blocked addresses configured by human operators (configured via .spec.networking.blockedCIDRs in the Seed). For instance, this can be used to block the cloud provider’s metadata service.
      allow-to-public-networksAllows egress traffic from pods labeled with networking.gardener.cloud/to-public-networks=allowed to all public network IPs, except for private networks (RFC1918), carrier-grade NAT (RFC6598), and explicitly blocked addresses configured by human operators for all pods labeled with networking.gardener.cloud/to-public-networks=allowed. In practice, this blocks egress traffic to all networks in the cluster and only allows egress traffic to public IPv4 addresses.
      allow-to-private-networksAllows egress traffic from pods labeled with networking.gardener.cloud/to-private-networks=allowed to the private networks (RFC1918) and carrier-grade NAT (RFC6598) except for cluster-specific networks (configured via .spec.networks in the Seed).

      Apart from those, the gardener-operator also enables the NetworkPolicy controller of gardener-resource-manager. +

      32 - Network Policies

      NetworkPolicys In Garden, Seed, Shoot Clusters

      This document describes which Kubernetes NetworkPolicys deployed by Gardener into the various clusters.

      Garden Cluster

      (via gardener-operator and gardener-resource-manager)

      The gardener-operator runs a NetworkPolicy controller which is responsible for the following namespaces:

      • garden
      • istio-system
      • *istio-ingress-*
      • shoot-*
      • extension-* (in case the garden cluster is a seed cluster at the same time)

      It deploys the following so-called “general NetworkPolicys”:

      NamePurpose
      deny-allDenies all ingress and egress traffic for all pods in this namespace. Hence, all traffic must be explicitly allowed.
      allow-to-dnsAllows egress traffic from pods labeled with networking.gardener.cloud/to-dns=allowed to DNS pods running in the kube-system namespace. In practice, most of the pods performing network egress traffic need this label.
      allow-to-runtime-apiserverAllows egress traffic from pods labeled with networking.gardener.cloud/to-runtime-apiserver=allowed to the API server of the runtime cluster.
      allow-to-blocked-cidrsAllows egress traffic from pods labeled with networking.gardener.cloud/to-blocked-cidrs=allowed to explicitly blocked addresses configured by human operators (configured via .spec.networking.blockedCIDRs in the Seed). For instance, this can be used to block the cloud provider’s metadata service.
      allow-to-public-networksAllows egress traffic from pods labeled with networking.gardener.cloud/to-public-networks=allowed to all public network IPs, except for private networks (RFC1918), carrier-grade NAT (RFC6598), and explicitly blocked addresses configured by human operators for all pods labeled with networking.gardener.cloud/to-public-networks=allowed. In practice, this blocks egress traffic to all networks in the cluster and only allows egress traffic to public IPv4 addresses.
      allow-to-private-networksAllows egress traffic from pods labeled with networking.gardener.cloud/to-private-networks=allowed to the private networks (RFC1918) and carrier-grade NAT (RFC6598) except for cluster-specific networks (configured via .spec.networks in the Seed).

      Apart from those, the gardener-operator also enables the NetworkPolicy controller of gardener-resource-manager. Please find more information in the linked document. In summary, most of the pods that initiate connections with other pods will have labels with networking.resources.gardener.cloud/ prefixes. This way, they leverage the automatically created NetworkPolicys by the controller. @@ -7678,7 +7680,7 @@ app: vpn-shoot

      Implications for Gardener Extensions

      Gardener extensions sometimes need to deploy additional components into the shoot namespace in the seed cluster hosting the control plane. For example, the gardener-extension-provider-aws deploys the cloud-controller-manager into the shoot namespace. -In most cases, such pods require network policy labels to allow the traffic they are initiating.

      For components deployed in the kube-system namespace of the shoots (e.g., CNI plugins or CSI drivers, etc.), custom NetworkPolicys might be required to ensure the respective components can still communicate in case the user creates a deny-all policy.

      33 - New Cloud Provider

      Adding Cloud Providers

      This document provides an overview of how to integrate a new cloud provider into Gardener. Each component that requires integration has a detailed description of how to integrate it and the steps required.

      Cloud Components

      Gardener is composed of 2 or more Kubernetes clusters:

      • Shoot: These are the end-user clusters, the regular Kubernetes clusters you have seen. They provide places for your workloads to run.
      • Seed: This is the “management” cluster. It manages the control planes of shoots by running them as native Kubernetes workloads.

      These two clusters can run in the same cloud provider, but they do not need to. For example, you could run your Seed in AWS, while having one shoot in Azure, two in Google, two in Alicloud, and three in Equinix Metal.

      The Seed cluster deploys and manages the Shoot clusters. Importantly, for this discussion, the etcd data store backing each Shoot runs as workloads inside the Seed. Thus, to use the above example, the clusters in Azure, Google, Alicloud and Equinix Metal will have their worker nodes and master nodes running in those clouds, but the etcd clusters backing them will run as separate deployments in the Seed Kubernetes cluster on AWS.

      This distinction becomes important when preparing the integration to a new cloud provider.

      Gardener Cloud Integration

      Gardener and its related components integrate with cloud providers at the following key lifecycle elements:

      • Create/destroy/get/list machines for the Shoot.
      • Create/destroy/get/list infrastructure components for the Shoot, e.g. VPCs, subnets, routes, etc.
      • Backup/restore etcd for the Seed via writing files to and reading them from object storage.

      Thus, the integrations you need for your cloud provider depend on whether you want to deploy Shoot clusters to the provider, Seed or both.

      • Shoot Only: machine lifecycle management, infrastructure
      • Seed: etcd backup/restore

      Gardener API

      In addition to the requirements to integrate with the cloud provider, you also need to enable the core Gardener app to receive, validate, and process requests to use that cloud provider.

      • Expose the cloud provider to the consumers of the Gardener API, so it can be told to use that cloud provider as an option.
      • Validate that API as requests come in.
      • Write cloud provider specific implementation (called “provider extension”).

      Cloud Provider API Requirements

      In order for a cloud provider to integrate with Gardener, the provider must have an API to perform machine lifecycle events, specifically:

      • Create a machine
      • Destroy a machine
      • Get information about a machine and its state
      • List machines

      In addition, if the Seed is to run on the given provider, it also must have an API to save files to block storage and retrieve them, for etcd backup/restore.

      The current integration with cloud providers is to add their API calls to Gardener and the Machine Controller Manager. As both Gardener and the Machine Controller Manager are written in go, the cloud provider should have a go SDK. However, if it has an API that is wrappable in go, e.g. a REST API, then you can use that to integrate.

      The Gardener team is working on bringing cloud provider integrations out-of-tree, making them plugable, which should simplify the process and make it possible to use other SDKs.

      Summary

      To add a new cloud provider, you need some or all of the following. Each repository contains instructions on how to extend it to a new cloud provider.

      TypePurposeLocationDocumentation
      Seed or ShootMachine Lifecyclemachine-controller-managerMCM new cloud provider
      Seed onlyetcd backup/restoreetcd-backup-restoreIn process
      AllExtension implementationgardenerExtension controller

      34 - New Kubernetes Version

      Adding Support For a New Kubernetes Version

      This document describes the steps needed to perform in order to confidently add support for a new Kubernetes minor version.

      ⚠️ Typically, once a minor Kubernetes version vX.Y is supported by Gardener, then all patch versions vX.Y.Z are also automatically supported without any required action. +In most cases, such pods require network policy labels to allow the traffic they are initiating.

      For components deployed in the kube-system namespace of the shoots (e.g., CNI plugins or CSI drivers, etc.), custom NetworkPolicys might be required to ensure the respective components can still communicate in case the user creates a deny-all policy.

      33 - New Cloud Provider

      Adding Cloud Providers

      This document provides an overview of how to integrate a new cloud provider into Gardener. Each component that requires integration has a detailed description of how to integrate it and the steps required.

      Cloud Components

      Gardener is composed of 2 or more Kubernetes clusters:

      • Shoot: These are the end-user clusters, the regular Kubernetes clusters you have seen. They provide places for your workloads to run.
      • Seed: This is the “management” cluster. It manages the control planes of shoots by running them as native Kubernetes workloads.

      These two clusters can run in the same cloud provider, but they do not need to. For example, you could run your Seed in AWS, while having one shoot in Azure, two in Google, two in Alicloud, and three in Equinix Metal.

      The Seed cluster deploys and manages the Shoot clusters. Importantly, for this discussion, the etcd data store backing each Shoot runs as workloads inside the Seed. Thus, to use the above example, the clusters in Azure, Google, Alicloud and Equinix Metal will have their worker nodes and master nodes running in those clouds, but the etcd clusters backing them will run as separate deployments in the Seed Kubernetes cluster on AWS.

      This distinction becomes important when preparing the integration to a new cloud provider.

      Gardener Cloud Integration

      Gardener and its related components integrate with cloud providers at the following key lifecycle elements:

      • Create/destroy/get/list machines for the Shoot.
      • Create/destroy/get/list infrastructure components for the Shoot, e.g. VPCs, subnets, routes, etc.
      • Backup/restore etcd for the Seed via writing files to and reading them from object storage.

      Thus, the integrations you need for your cloud provider depend on whether you want to deploy Shoot clusters to the provider, Seed or both.

      • Shoot Only: machine lifecycle management, infrastructure
      • Seed: etcd backup/restore

      Gardener API

      In addition to the requirements to integrate with the cloud provider, you also need to enable the core Gardener app to receive, validate, and process requests to use that cloud provider.

      • Expose the cloud provider to the consumers of the Gardener API, so it can be told to use that cloud provider as an option.
      • Validate that API as requests come in.
      • Write cloud provider specific implementation (called “provider extension”).

      Cloud Provider API Requirements

      In order for a cloud provider to integrate with Gardener, the provider must have an API to perform machine lifecycle events, specifically:

      • Create a machine
      • Destroy a machine
      • Get information about a machine and its state
      • List machines

      In addition, if the Seed is to run on the given provider, it also must have an API to save files to block storage and retrieve them, for etcd backup/restore.

      The current integration with cloud providers is to add their API calls to Gardener and the Machine Controller Manager. As both Gardener and the Machine Controller Manager are written in go, the cloud provider should have a go SDK. However, if it has an API that is wrappable in go, e.g. a REST API, then you can use that to integrate.

      The Gardener team is working on bringing cloud provider integrations out-of-tree, making them pluggable, which should simplify the process and make it possible to use other SDKs.

      Summary

      To add a new cloud provider, you need some or all of the following. Each repository contains instructions on how to extend it to a new cloud provider.

      TypePurposeLocationDocumentation
      Seed or ShootMachine Lifecyclemachine-controller-managerMCM new cloud provider
      Seed onlyetcd backup/restoreetcd-backup-restoreIn process
      AllExtension implementationgardenerExtension controller

      34 - New Kubernetes Version

      Adding Support For a New Kubernetes Version

      This document describes the steps needed to perform in order to confidently add support for a new Kubernetes minor version.

      ⚠️ Typically, once a minor Kubernetes version vX.Y is supported by Gardener, then all patch versions vX.Y.Z are also automatically supported without any required action. This is because patch versions do not introduce any new feature or API changes, so there is nothing that needs to be adapted in gardener/gardener code.

      The Kubernetes community release a new minor version roughly every 4 months. Please refer to the official documentation about their release cycles for any additional information.

      Shortly before a new release, an “umbrella” issue should be opened which is used to collect the required adaptations and to track the work items. For example, #5102 can be used as a template for the issue description. @@ -7686,7 +7688,7 @@ The first group contains tasks specific to the changes in the given Kubernetes release, the second group contains Kubernetes release-independent tasks.

      ℹ️ Upgrading the k8s.io/* and sigs.k8s.io/controller-runtime Golang dependencies is typically tracked and worked on separately (see e.g. #4772 or #5282).

      Deriving Release-Specific Tasks

      Most new minor Kubernetes releases incorporate API changes, deprecations, or new features. The community announces them via their change logs. In order to derive the release-specific tasks, the respective change log for the new version vX.Y has to be read and understood (for example, the changelog for v1.24).

      As already mentioned, typical changes to watch out for are:

      • API version promotions or deprecations
      • Feature gate promotions or deprecations
      • CLI flag changes for Kubernetes components
      • New default values in resources
      • New available fields in resources
      • New features potentially relevant for the Gardener system
      • Changes of labels or annotations Gardener relies on

      Obviously, this requires a certain experience and understanding of the Gardener project so that all “relevant changes” can be identified. -While reading the change log, add the tasks (along with the respective PR in kubernetes/kubernetes to the umbrella issue).

      ℹ️ Some of the changes might be specific to certain cloud providers. Pay attention to those as well and add related tasks to the issue.

      List Of Release-Independent Tasks

      The following paragraphs describe recurring tasks that need to be performed for each new release.

      Make Sure a New hyperkube Image Is Released

      The gardener/hyperkube repository is used to release container images consisting of the kubectl and kubelet binaries.

      There is a CI/CD job that runs periodically and releases a new hyperkube image when there is a new Kubernetes release. Before proceeding with the next steps, make sure that a new hyperkube image is released for the corresponding new Kubernetes minor version. Make sure that container image is present in GCR.

      Adapting Gardener

      • Allow instantiation of a Kubernetes client for the new minor version and update the README.md:
        • See this example commit.
        • The list of supported versions is meanwhile maintained here in the SupportedVersions variable.
      • Maintain the Kubernetes feature gates used for validation of Shoot resources:
        • The feature gates are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-feature-gates.sh <old-version> <new-version> (e.g. hack/compare-k8s-feature-gates.sh v1.26 v1.27).
        • It will present 3 lists of feature gates: those added and those removed in <new-version> compared to <old-version> and feature gates that got locked to default in <new-version>.
        • Add all added feature gates to the map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed feature gates, add <new-version> as RemovedInVersion to the already existing feature gate in the map.
        • For feature gates locked to default, add <new-version> as LockedToDefaultInVersion to the already existing feature gate in the map.
        • See this example commit.
      • Maintain the Kubernetes kube-apiserver admission plugins used for validation of Shoot resources:
        • The admission plugins are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-admission-plugins.sh <old-version> <new-version> (e.g. hack/compare-k8s-admission-plugins.sh 1.26 1.27).
        • It will present 2 lists of admission plugins: those added and those removed in <new-version> compared to <old-version>.
        • Add all added admission plugins to the admissionPluginsVersionRanges map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed admission plugins, add <new-version> as RemovedInVersion to the already existing admission plugin in the map.
        • Flag any admission plugins that are required (plugins that must not be disabled in the Shoot spec) by setting the Required boolean variable to true for the admission plugin in the map.
        • Flag any admission plugins that are forbidden by setting the Forbidden boolean variable to true for the admission plugin in the map.
      • Maintain the Kubernetes kube-apiserver API groups used for validation of Shoot resources:
        • The API groups are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-api-groups.sh <old-version> <new-version> (e.g. hack/compare-k8s-api-groups.sh 1.26 1.27).
        • It will present 2 lists of API GroupVersions and 2 lists of API GroupVersionResources: those added and those removed in <new-version> compared to <old-version>.
        • Add all added group versions to the apiGroupVersionRanges map and group version resources to the apiGVRVersionRanges map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed APIs, add <new-version> as RemovedInVersion to the already existing API in the corresponding map.
        • Flag any APIs that are required (APIs that must not be disabled in the Shoot spec) by setting the Required boolean variable to true for the API in the apiGVRVersionRanges map. If this API also should not be disabled for Workerless Shoots, then set RequiredForWorkerless boolean variable also to true. If the API is required for both Shoot types, then both of these booleans need to be set to true. If the whole API Group is required, then mark it correspondingly in the apiGroupVersionRanges map.
      • Maintain the Kubernetes kube-controller-manager controllers for each API group used in deploying required KCM controllers based on active APIs:
        • The API groups are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compute-k8s-controllers.sh <old-version> <new-version> (e.g. hack/compute-k8s-controllers.sh 1.28 1.29).
        • If it complains that the path for the controller is not present in the map, check the release branch of the new Kubernetes version and find the correct path for the missing/wrong controller. You can do so by checking the file cmd/kube-controller-manager/app/controllermanager.go and where the controller is initialized from. As of now, there is no straight-forward way to map each controller to its file. If this has improved, please enhance the script.
        • If the paths are correct, it will present 2 lists of controllers: those added and those removed for each API group in <new-version> compared to <old-version>.
        • Add all added controllers to the APIGroupControllerMap map and under the corresponding API group with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed controllers, add <new-version> as RemovedInVersion to the already existing controller in the corresponding API group map. If you are unable to find the removed controller name, then check for its alias. Either in the staging/src/k8s.io/cloud-provider/names/controller_names.go file (example) or in the cmd/kube-controller-manager/app/* files (example for apps API group). This is because for kubernetes versions starting from v1.28, we don’t maintain the aliases in the controller, but the controller names itself since some controllers can be initialized without aliases as well (example). The old alias should still be working since it should be backwards compatible as explained here. Once the support for kubernetes version < v1.28 is droppped, we can drop the usages of these aliases and move completely to controller names.
        • Make sure that the API groups in this file are in sync with the groups in this file. For example, core/v1 is replaced by the script as v1 and apiserverinternal as internal. This is because the API groups registered by the apiserver (example) and the file path imported by the controllers (example) might be slightly different in some cases.
      • Maintain the ServiceAccount names for the controllers part of kube-controller-manager:
        • The names are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-controllers.sh <old-version> <new-version> (e.g. hack/compare-k8s-controllers.sh 1.26 1.27).
        • It will present 2 lists of controllers: those added and those removed in <new-version> compared to <old-version>.
        • Double check whether such ServiceAccount indeed appears in the kube-system namespace when creating a cluster with <new-version>. Note that it sometimes might be hidden behind a default-off feature gate. You can create a local cluster with the new version using the local provider. It could so happen that the name of the controller is used in the form of a constant and not a string, see example, In that case not the value of the constant separetely. You could also cross check the names with the result of the compute-k8s-controllers.sh script used in the previous step.
        • If it appears, add all added controllers to the list based on the Kubernetes version (example).
        • For any removed controllers, add them only to the Kubernetes version if it is low enough.
      • Maintain the names of controllers used for workerless Shoots, here after carefully evaluating whether they are needed if there are no workers.
      • Maintain copies of the DaemonSet controller’s scheduling logic:
        • gardener-resource-manager’s Node controller uses a copy of parts of the DaemonSet controller’s logic for determining whether a specific Node should run a daemon pod of a given DaemonSet: see this file.
        • Check the referenced upstream files for changes to the DaemonSet controller’s logic and adapt our copies accordingly. This might include introducing version-specific checks in our codebase to handle different shoot cluster versions.
      • Maintain version specific defaulting logic in shoot admission plugin:
        • Sometimes default values for shoots are intentionally changed with the introduction of a new Kubernetes version.
        • The final Kubernetes version for a shoot is determined in the Shoot Validator Admission Plugin.
        • Any defaulting logic that depends on the version should be placed in this admission plugin (example).
      • Ensure that maintenance-controller is able to auto-update shoots to the new Kubernetes version. Changes to the shoot spec required for the Kubernetes update should be enforced in such cases (examples).
      • Add the new Kubernetes version to the CloudProfile in local setup.
        • See this example commit.
      • In the next Gardener release, file a PR that bumps the used Kubernetes version for local e2e test.
        • This step must be performed in a PR that targets the next Gardener release because of the e2e upgrade tests. The e2e upgrade tests deploy the previous Gardener version where the new Kubernetes version is not present in the CloudProfile. If the e2e tests are adapted in the same PR that adds the support for the Kubernetes version, then the e2e upgrade tests for that PR will fail because the newly added Kubernetes version in missing in the local CloudProfile from the old release.
        • See this example commit PR.

      Filing the Pull Request

      Work on all the tasks you have collected and validate them using the local provider. +While reading the change log, add the tasks (along with the respective PR in kubernetes/kubernetes to the umbrella issue).

      ℹ️ Some of the changes might be specific to certain cloud providers. Pay attention to those as well and add related tasks to the issue.

      List Of Release-Independent Tasks

      The following paragraphs describe recurring tasks that need to be performed for each new release.

      Make Sure a New hyperkube Image Is Released

      The gardener/hyperkube repository is used to release container images consisting of the kubectl and kubelet binaries.

      There is a CI/CD job that runs periodically and releases a new hyperkube image when there is a new Kubernetes release. Before proceeding with the next steps, make sure that a new hyperkube image is released for the corresponding new Kubernetes minor version. Make sure that container image is present in GCR.

      Adapting Gardener

      • Allow instantiation of a Kubernetes client for the new minor version and update the README.md:
        • See this example commit.
        • The list of supported versions is meanwhile maintained here in the SupportedVersions variable.
      • Maintain the Kubernetes feature gates used for validation of Shoot resources:
        • The feature gates are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-feature-gates.sh <old-version> <new-version> (e.g. hack/compare-k8s-feature-gates.sh v1.26 v1.27).
        • It will present 3 lists of feature gates: those added and those removed in <new-version> compared to <old-version> and feature gates that got locked to default in <new-version>.
        • Add all added feature gates to the map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed feature gates, add <new-version> as RemovedInVersion to the already existing feature gate in the map.
        • For feature gates locked to default, add <new-version> as LockedToDefaultInVersion to the already existing feature gate in the map.
        • See this example commit.
      • Maintain the Kubernetes kube-apiserver admission plugins used for validation of Shoot resources:
        • The admission plugins are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-admission-plugins.sh <old-version> <new-version> (e.g. hack/compare-k8s-admission-plugins.sh 1.26 1.27).
        • It will present 2 lists of admission plugins: those added and those removed in <new-version> compared to <old-version>.
        • Add all added admission plugins to the admissionPluginsVersionRanges map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed admission plugins, add <new-version> as RemovedInVersion to the already existing admission plugin in the map.
        • Flag any admission plugins that are required (plugins that must not be disabled in the Shoot spec) by setting the Required boolean variable to true for the admission plugin in the map.
        • Flag any admission plugins that are forbidden by setting the Forbidden boolean variable to true for the admission plugin in the map.
      • Maintain the Kubernetes kube-apiserver API groups used for validation of Shoot resources:
        • The API groups are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-api-groups.sh <old-version> <new-version> (e.g. hack/compare-k8s-api-groups.sh 1.26 1.27).
        • It will present 2 lists of API GroupVersions and 2 lists of API GroupVersionResources: those added and those removed in <new-version> compared to <old-version>.
        • Add all added group versions to the apiGroupVersionRanges map and group version resources to the apiGVRVersionRanges map with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed APIs, add <new-version> as RemovedInVersion to the already existing API in the corresponding map.
        • Flag any APIs that are required (APIs that must not be disabled in the Shoot spec) by setting the Required boolean variable to true for the API in the apiGVRVersionRanges map. If this API also should not be disabled for Workerless Shoots, then set RequiredForWorkerless boolean variable also to true. If the API is required for both Shoot types, then both of these booleans need to be set to true. If the whole API Group is required, then mark it correspondingly in the apiGroupVersionRanges map.
      • Maintain the Kubernetes kube-controller-manager controllers for each API group used in deploying required KCM controllers based on active APIs:
        • The API groups are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compute-k8s-controllers.sh <old-version> <new-version> (e.g. hack/compute-k8s-controllers.sh 1.28 1.29).
        • If it complains that the path for the controller is not present in the map, check the release branch of the new Kubernetes version and find the correct path for the missing/wrong controller. You can do so by checking the file cmd/kube-controller-manager/app/controllermanager.go and where the controller is initialized from. As of now, there is no straight-forward way to map each controller to its file. If this has improved, please enhance the script.
        • If the paths are correct, it will present 2 lists of controllers: those added and those removed for each API group in <new-version> compared to <old-version>.
        • Add all added controllers to the APIGroupControllerMap map and under the corresponding API group with <new-version> as AddedInVersion and no RemovedInVersion.
        • For any removed controllers, add <new-version> as RemovedInVersion to the already existing controller in the corresponding API group map. If you are unable to find the removed controller name, then check for its alias. Either in the staging/src/k8s.io/cloud-provider/names/controller_names.go file (example) or in the cmd/kube-controller-manager/app/* files (example for apps API group). This is because for kubernetes versions starting from v1.28, we don’t maintain the aliases in the controller, but the controller names itself since some controllers can be initialized without aliases as well (example). The old alias should still be working since it should be backwards compatible as explained here. Once the support for kubernetes version < v1.28 is dropped, we can drop the usages of these aliases and move completely to controller names.
        • Make sure that the API groups in this file are in sync with the groups in this file. For example, core/v1 is replaced by the script as v1 and apiserverinternal as internal. This is because the API groups registered by the apiserver (example) and the file path imported by the controllers (example) might be slightly different in some cases.
      • Maintain the ServiceAccount names for the controllers part of kube-controller-manager:
        • The names are maintained in this file.
        • To maintain this list for new Kubernetes versions, run hack/compare-k8s-controllers.sh <old-version> <new-version> (e.g. hack/compare-k8s-controllers.sh 1.26 1.27).
        • It will present 2 lists of controllers: those added and those removed in <new-version> compared to <old-version>.
        • Double check whether such ServiceAccount indeed appears in the kube-system namespace when creating a cluster with <new-version>. Note that it sometimes might be hidden behind a default-off feature gate. You can create a local cluster with the new version using the local provider. It could so happen that the name of the controller is used in the form of a constant and not a string, see example, In that case not the value of the constant separately. You could also cross check the names with the result of the compute-k8s-controllers.sh script used in the previous step.
        • If it appears, add all added controllers to the list based on the Kubernetes version (example).
        • For any removed controllers, add them only to the Kubernetes version if it is low enough.
      • Maintain the names of controllers used for workerless Shoots, here after carefully evaluating whether they are needed if there are no workers.
      • Maintain copies of the DaemonSet controller’s scheduling logic:
        • gardener-resource-manager’s Node controller uses a copy of parts of the DaemonSet controller’s logic for determining whether a specific Node should run a daemon pod of a given DaemonSet: see this file.
        • Check the referenced upstream files for changes to the DaemonSet controller’s logic and adapt our copies accordingly. This might include introducing version-specific checks in our codebase to handle different shoot cluster versions.
      • Maintain version specific defaulting logic in shoot admission plugin:
        • Sometimes default values for shoots are intentionally changed with the introduction of a new Kubernetes version.
        • The final Kubernetes version for a shoot is determined in the Shoot Validator Admission Plugin.
        • Any defaulting logic that depends on the version should be placed in this admission plugin (example).
      • Ensure that maintenance-controller is able to auto-update shoots to the new Kubernetes version. Changes to the shoot spec required for the Kubernetes update should be enforced in such cases (examples).
      • Add the new Kubernetes version to the CloudProfile in local setup.
        • See this example commit.
      • In the next Gardener release, file a PR that bumps the used Kubernetes version for local e2e test.
        • This step must be performed in a PR that targets the next Gardener release because of the e2e upgrade tests. The e2e upgrade tests deploy the previous Gardener version where the new Kubernetes version is not present in the CloudProfile. If the e2e tests are adapted in the same PR that adds the support for the Kubernetes version, then the e2e upgrade tests for that PR will fail because the newly added Kubernetes version in missing in the local CloudProfile from the old release.
        • See this example commit PR.

      Filing the Pull Request

      Work on all the tasks you have collected and validate them using the local provider. Execute the e2e tests and if everything looks good, then go ahead and file the PR (example PR). Generally, it is great if you add the PRs also to the umbrella issue so that they can be tracked more easily.

      Adapting Provider Extensions

      After the PR in gardener/gardener for the support of the new version has been merged, you can go ahead and work on the provider extensions.

      Actually, you can already start even if the PR is not yet merged and use the branch of your fork.

      • Update the github.com/gardener/gardener dependency in the extension and update the README.md.
      • Work on release-specific tasks related to this provider.

      Maintaining the cloud-controller-manager Images

      Provider extensions are using upstream cloud-controller-manager images. Make sure to adopt the new cloud-controller-manager release for the new Kubernetes minor version (example PR).

      Some of the cloud providers are not using upstream cloud-controller-manager images for some of the supported Kubernetes versions. @@ -7847,7 +7849,7 @@ In case the seed cluster runs at its capacity, then there is no waiting time required during the scale-up. Instead, the low-priority pause pods will be preempted and allow newly created shoot control plane pods to be scheduled fast. In the meantime, the cluster-autoscaler will trigger the scale-up because the preempted pause pods want to run again. -However, this delay doesn’t affect the important shoot control plane pods, which will improve the user experience.

      Use .spec.settings.excessCapacityReservation.configs to create excess capacity reservation deployments which allow to specify custom values for resources, nodeSelector and tolerations. Each config creates a deployment with a minium number of 2 replicas and a maximum equal to the number of zones configured for this seed. +However, this delay doesn’t affect the important shoot control plane pods, which will improve the user experience.

      Use .spec.settings.excessCapacityReservation.configs to create excess capacity reservation deployments which allow to specify custom values for resources, nodeSelector and tolerations. Each config creates a deployment with a minimum number of 2 replicas and a maximum equal to the number of zones configured for this seed. It defaults to a config reserving 2 CPUs and 6Gi of memory for each pod with no nodeSelector and no tolerations.

      Excess capacity reservation is enabled when .spec.settings.excessCapacityReservation.enabled is true or not specified while configs are present. It can be disabled by setting the field to false.

      Scheduling

      By default, the Gardener Scheduler will consider all seed clusters when a new shoot cluster shall be created. However, administrators/operators might want to exclude some of them from being considered by the scheduler. Therefore, seed clusters can be marked as “invisible”. @@ -8059,7 +8061,7 @@ │ ├── operatingsystem │ ├── operations │ └── vpntunnel - ├── suites # suites that run agains a running garden or shoot cluster + ├── suites # suites that run against a running garden or shoot cluster │ ├── gardener │ └── shoot └── system # suites that are used for building a full test flow @@ -8076,7 +8078,7 @@ to control the execution of specific labeled test. See the example below:

      go test -timeout=0 ./test/testmachinery/suites/shoot \
             --v -ginkgo.v -ginkgo.show-node-events -ginkgo.no-color \
             --report-file=/tmp/report.json \                     # write elasticsearch formatted output to a file
      -      --disable-dump=false \                               # disables dumping of teh current state if a test fails
      +      --disable-dump=false \                               # disables dumping of the current state if a test fails
             -kubecfg=/path/to/gardener/kubeconfig \
             -shoot-name=<shoot-name> \                           # Name of the shoot to test
             -project-namespace=<gardener project namespace> \    # Name of the gardener project the test shoot resides
      diff --git a/docs/docs/gardener/advanced/_print/index.html b/docs/docs/gardener/advanced/_print/index.html
      index 628d536251f..f7fdbdc02f2 100644
      --- a/docs/docs/gardener/advanced/_print/index.html
      +++ b/docs/docs/gardener/advanced/_print/index.html
      @@ -2,7 +2,7 @@
       

      This is the multi-page printable view of this section. +All

      This is the multi-page printable view of this section. Click here to print.

      Return to the regular view of this page.

      Advanced

      1 - Cleanup of Shoot Clusters in Deletion

      Cleanup of Shoot Clusters in Deletion

      When a shoot cluster is deleted then Gardener tries to gracefully remove most of the Kubernetes resources inside the cluster. This is to prevent that any infrastructure or other artifacts remain after the shoot deletion.

      The cleanup is performed in four steps. Some resources are deleted with a grace period, and all resources are forcefully deleted (by removing blocking finalizers) after some time to not block the cluster deletion entirely.

      Cleanup steps:

      1. All ValidatingWebhookConfigurations and MutatingWebhookConfigurations are deleted with a 5m grace period. Forceful finalization happens after 5m.
      2. All APIServices and CustomResourceDefinitions are deleted with a 5m grace period. Forceful finalization happens after 1h.
      3. All CronJobs, DaemonSets, Deployments, Ingresss, Jobs, Pods, ReplicaSets, ReplicationControllers, Services, StatefulSets, PersistentVolumeClaims are deleted with a 5m grace period. Forceful finalization happens after 5m.

        If the Shoot is annotated with shoot.gardener.cloud/skip-cleanup=true, then only Services and PersistentVolumeClaims are considered.

      4. All VolumeSnapshots and VolumeSnapshotContents are deleted with a 5m grace period. Forceful finalization happens after 1h.

      It is possible to override the finalization grace periods via annotations on the Shoot:

      • shoot.gardener.cloud/cleanup-webhooks-finalize-grace-period-seconds (for the resources handled in step 1)
      • shoot.gardener.cloud/cleanup-extended-apis-finalize-grace-period-seconds (for the resources handled in step 2)
      • shoot.gardener.cloud/cleanup-kubernetes-resources-finalize-grace-period-seconds (for the resources handled in step 3)

      ⚠️ If "0" is provided, then all resources are finalized immediately without waiting for any graceful deletion. diff --git a/docs/docs/gardener/advanced/containerd-registry-configuration/index.html b/docs/docs/gardener/advanced/containerd-registry-configuration/index.html index 8c09df8563c..c4d2241f150 100644 --- a/docs/docs/gardener/advanced/containerd-registry-configuration/index.html +++ b/docs/docs/gardener/advanced/containerd-registry-configuration/index.html @@ -10,7 +10,7 @@ Note: Trying to use both of the patterns at the same time is not supported by containerd. Only one of the configuration patterns has to be followed strictly.">

      KubeControllerManagerConfig

      (Appears on: Kubernetes)

      KubeControllerManagerConfig contains configuration settings for the kube-controller-manager.

      FieldDescription
      KubernetesConfig
      KubernetesConfig

      (Members of KubernetesConfig are embedded into this type.)

      horizontalPodAutoscaler
      HorizontalPodAutoscalerConfig
      (Optional)

      HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.

      nodeCIDRMaskSize
      int32
      (Optional)

      NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24). This field is immutable.

      podEvictionTimeout
      Kubernetes meta/v1.Duration
      (Optional)

      PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m.

      Deprecated: The corresponding kube-controller-manager flag --pod-eviction-timeout is deprecated in favor of the kube-apiserver flags --default-not-ready-toleration-seconds and --default-unreachable-toleration-seconds. -The --pod-eviction-timeout flag does not have effect when the taint besed eviction is enabled. The taint +The --pod-eviction-timeout flag does not have effect when the taint based eviction is enabled. The taint based eviction is beta (enabled by default) since Kubernetes 1.13 and GA since Kubernetes 1.18. Hence, instead of setting this field, set the spec.kubernetes.kubeAPIServer.defaultNotReadyTolerationSeconds and spec.kubernetes.kubeAPIServer.defaultUnreachableTolerationSeconds.

      nodeMonitorGracePeriod
      Kubernetes meta/v1.Duration
      (Optional)

      NodeMonitorGracePeriod defines the grace period before an unresponsive node is marked unhealthy.

      KubeProxyConfig

      (Appears on: @@ -619,7 +619,7 @@ (default: 0.9)

      recommendationLowerBoundMemoryPercentile
      float64(Optional)

      RecommendationLowerBoundMemoryPercentile is the usage percentile that will be used for the lower bound on memory recommendation. (default: 0.5)

      recommendationUpperBoundMemoryPercentile
      float64(Optional)

      RecommendationUpperBoundMemoryPercentile is the usage percentile that will be used for the upper bound on memory recommendation. (default: 0.95)

      Volume

      (Appears on: -Worker)

      Volume contains information about the volume type, size, and encryption.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      VolumeType

      (Appears on: +Worker)

      Volume contains information about the volume type, size, and encryption.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      VolumeType

      (Appears on: CloudProfileSpec, NamespacedCloudProfileSpec)

      VolumeType contains certain properties of a volume type.

      FieldDescription
      class
      string

      Class is the class of the volume type.

      name
      string

      Name is the name of the volume type.

      usable
      bool
      (Optional)

      Usable defines if the volume type can be used for shoot clusters.

      minSize
      k8s.io/apimachinery/pkg/api/resource.Quantity
      (Optional)

      MinSize is the minimal supported storage size.

      WatchCacheSizes

      (Appears on: KubeAPIServerConfig)

      WatchCacheSizes contains configuration of the API server’s watch cache sizes.

      FieldDescription
      default
      int32
      (Optional)

      Default configures the default watch cache size of the kube-apiserver @@ -732,7 +732,7 @@ DNSRecord)

      DNSRecordStatus is the status of a DNSRecord resource.

      FieldDescription
      DefaultStatus
      DefaultStatus

      (Members of DefaultStatus are embedded into this type.)

      DefaultStatus is a structure containing common fields used by all extension resources.

      zone
      string
      (Optional)

      Zone is the DNS hosted zone of this DNS record.

      DNSRecordType (string alias)

      (Appears on: DNSRecordSpec)

      DNSRecordType is a string alias.

      DataVolume

      (Appears on: -WorkerPool)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DefaultSpec

      (Appears on: +WorkerPool)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DefaultSpec

      (Appears on: BackupBucketSpec, BackupEntrySpec, BastionSpec, @@ -815,7 +815,7 @@ triggered. For each FilePath there must exist a File with matching Path in OperatingSystemConfig.Spec.Files.

      UnitCommand (string alias)

      (Appears on: Unit)

      UnitCommand is a string alias.

      Volume

      (Appears on: -WorkerPool)

      Volume contains information about the root disks that should be used for worker pools.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      WorkerPool

      (Appears on: +WorkerPool)

      Volume contains information about the root disks that should be used for worker pools.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      Size is the of the root volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      WorkerPool

      (Appears on: WorkerSpec)

      WorkerPool is the definition of a specific worker pool.

      FieldDescription
      machineType
      string

      MachineType contains information about the machine type that should be used for this worker pool.

      maximum
      int32

      Maximum is the maximum size of the worker pool.

      maxSurge
      k8s.io/apimachinery/pkg/util/intstr.IntOrString

      MaxSurge is maximum number of VMs that are created during an update.

      maxUnavailable
      k8s.io/apimachinery/pkg/util/intstr.IntOrString

      MaxUnavailable is the maximum number of VMs that can be unavailable during an update.

      annotations
      map[string]string
      (Optional)

      Annotations is a map of key/value pairs for annotations for all the Node objects in this worker pool.

      labels
      map[string]string
      (Optional)

      Labels is a map of key/value pairs for labels for all the Node objects in this worker pool.

      taints
      []Kubernetes core/v1.Taint
      (Optional)

      Taints is a list of taints for all the Node objects in this worker pool.

      machineImage
      MachineImage

      MachineImage contains logical information about the name and the version of the machie image that should be used. The logical information must be mapped to the provider-specific information (e.g., AMIs, …) by the provider itself.

      minimum
      int32

      Minimum is the minimum size of the worker pool.

      name
      string

      Name is the name of this worker pool.

      nodeAgentSecretName
      string
      (Optional)

      NodeAgentSecretName is uniquely identifying selected aspects of the OperatingSystemConfig. If it changes, then the diff --git a/docs/docs/gardener/api-reference/authentication/index.html b/docs/docs/gardener/api-reference/authentication/index.html index 8e3c87076a1..aadbd281b36 100644 --- a/docs/docs/gardener/api-reference/authentication/index.html +++ b/docs/docs/gardener/api-reference/authentication/index.html @@ -18,7 +18,7 @@ Refer to the Kubernetes API documentation for the fields of the metadata field. spec AdminKubeconfigRequestSpec Spec is the specification of the AdminKubeconfigRequest.">

      type
      string
      (Optional)

      Type is the DNS provider type.

      zones
      DNSIncludeExclude
      (Optional)

      Zones contains information about which hosted zones shall be included/excluded for this provider.

      Deprecated: This field is deprecated and will be removed in a future release. Please use the DNS extension provider config (e.g. shoot-dns-service) for additional configuration.

      DataVolume

      (Appears on: -Worker)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DeploymentRef

      (Appears on: +Worker)

      DataVolume contains information about a data volume.

      FieldDescription
      name
      string

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      DeploymentRef

      (Appears on: ControllerRegistrationDeployment)

      DeploymentRef contains information about ControllerDeployment references.

      FieldDescription
      name
      string

      Name is the name of the ControllerDeployment that is being referred to.

      DualApprovalForDeletion

      (Appears on: ProjectSpec)

      DualApprovalForDeletion contains configuration for the dual approval concept for resource deletion.

      FieldDescription
      resource
      string

      Resource is the name of the resource this applies to.

      selector
      Kubernetes meta/v1.LabelSelector

      Selector is the label selector for the resources.

      includeServiceAccounts
      bool
      (Optional)

      IncludeServiceAccounts specifies whether the concept also applies when deletion is triggered by ServiceAccounts. Defaults to true.

      ETCDEncryptionKeyRotation

      (Appears on: @@ -1244,7 +1244,7 @@ This field is only available for Kubernetes v1.30 or later.

      KubeControllerManagerConfig

      (Appears on: Kubernetes)

      KubeControllerManagerConfig contains configuration settings for the kube-controller-manager.

      FieldDescription
      KubernetesConfig
      KubernetesConfig

      (Members of KubernetesConfig are embedded into this type.)

      horizontalPodAutoscaler
      HorizontalPodAutoscalerConfig
      (Optional)

      HorizontalPodAutoscalerConfig contains horizontal pod autoscaler configuration settings for the kube-controller-manager.

      nodeCIDRMaskSize
      int32
      (Optional)

      NodeCIDRMaskSize defines the mask size for node cidr in cluster (default is 24). This field is immutable.

      podEvictionTimeout
      Kubernetes meta/v1.Duration
      (Optional)

      PodEvictionTimeout defines the grace period for deleting pods on failed nodes. Defaults to 2m.

      Deprecated: The corresponding kube-controller-manager flag --pod-eviction-timeout is deprecated in favor of the kube-apiserver flags --default-not-ready-toleration-seconds and --default-unreachable-toleration-seconds. -The --pod-eviction-timeout flag does not have effect when the taint besed eviction is enabled. The taint +The --pod-eviction-timeout flag does not have effect when the taint based eviction is enabled. The taint based eviction is beta (enabled by default) since Kubernetes 1.13 and GA since Kubernetes 1.18. Hence, instead of setting this field, set the spec.kubernetes.kubeAPIServer.defaultNotReadyTolerationSeconds and spec.kubernetes.kubeAPIServer.defaultUnreachableTolerationSeconds.

      nodeMonitorGracePeriod
      Kubernetes meta/v1.Duration
      (Optional)

      NodeMonitorGracePeriod defines the grace period before an unresponsive node is marked unhealthy.

      KubeProxyConfig

      (Appears on: @@ -1584,7 +1584,7 @@ (default: 0.9)

      recommendationLowerBoundMemoryPercentile
      float64(Optional)

      RecommendationLowerBoundMemoryPercentile is the usage percentile that will be used for the lower bound on memory recommendation. (default: 0.5)

      recommendationUpperBoundMemoryPercentile
      float64(Optional)

      RecommendationUpperBoundMemoryPercentile is the usage percentile that will be used for the upper bound on memory recommendation. (default: 0.95)

      Volume

      (Appears on: -Worker)

      Volume contains information about the volume type, size, and encryption.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referencable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      VolumeType

      (Appears on: +Worker)

      Volume contains information about the volume type, size, and encryption.

      FieldDescription
      name
      string
      (Optional)

      Name of the volume to make it referenceable.

      type
      string
      (Optional)

      Type is the type of the volume.

      size
      string

      VolumeSize is the size of the volume.

      encrypted
      bool
      (Optional)

      Encrypted determines if the volume should be encrypted.

      VolumeType

      (Appears on: CloudProfileSpec, NamespacedCloudProfileSpec)

      VolumeType contains certain properties of a volume type.

      FieldDescription
      class
      string

      Class is the class of the volume type.

      name
      string

      Name is the name of the volume type.

      usable
      bool
      (Optional)

      Usable defines if the volume type can be used for shoot clusters.

      minSize
      k8s.io/apimachinery/pkg/api/resource.Quantity
      (Optional)

      MinSize is the minimal supported storage size.

      WatchCacheSizes

      (Appears on: KubeAPIServerConfig)

      WatchCacheSizes contains configuration of the API server’s watch cache sizes.

      FieldDescription
      default
      int32
      (Optional)

      Default configures the default watch cache size of the kube-apiserver @@ -1605,9 +1605,10 @@ version must be equal or lower than the version of the shoot kubernetes version. Only one minor version difference to other worker groups and global kubernetes version is allowed.

      WorkerSystemComponents

      (Appears on: Worker)

      WorkerSystemComponents contains configuration for system components related to this worker pool

      FieldDescription
      allow
      bool

      Allow determines whether the pool should be allowed to host system components or not (defaults to true)

      WorkersSettings

      (Appears on: -Provider)

      WorkersSettings contains settings for all workers.

      FieldDescription
      sshAccess
      SSHAccess
      (Optional)

      SSHAccess contains settings regarding ssh access to the worker nodes.


      Generated with gen-crd-api-reference-docs

    Create documentation issue Print entire section \ No newline at end of file diff --git a/docs/docs/gardener/api-reference/index.html b/docs/docs/gardener/api-reference/index.html index eb7e2865b85..20c5acd7f70 100644 --- a/docs/docs/gardener/api-reference/index.html +++ b/docs/docs/gardener/api-reference/index.html @@ -2,7 +2,7 @@