From af6894eba6c7e424fd0e9e20809362d18f8cea30 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Mon, 29 Apr 2024 09:51:35 +0200 Subject: [PATCH] Prepare main for development of release v1.11 --- Makefile | 12 +- metadata.yaml | 3 + test/e2e/clusterctl_upgrade_test.go | 20 +- test/e2e/config/vsphere.yaml | 26 +- .../clusterclass-quick-start.yaml | 126 +- .../clusterclass/kustomization.yaml | 3 + .../clusterclass/patch-namingstrategy.yaml | 8 + .../clusterclass/patch-prekubeadmscript.yaml | 14 +- .../clusterclass/patch-vsphere-template.yaml | 45 + .../commons/cluster-network-CIDR.yaml | 0 .../cluster-resource-set-csi-insecure.yaml | 6 +- .../commons/cluster-resource-set-label.yaml | 0 .../commons/cluster-resource-set.yaml | 0 .../topology/cluster-template-topology.yaml | 1286 +++++++++++++++++ .../topology/kustomization.yaml | 8 +- .../workload/kustomization.yaml | 0 .../workload-control-plane-endpoint-ip.yaml | 0 .../v1.8/base/cluster-template-topology.yaml | 834 ----------- .../clusterclass/patch-vsphere-template.yaml | 37 - .../v1.8/commons/remove-storage-policy.yaml | 2 - test/e2e/data/shared/capv/main/metadata.yaml | 6 +- .../shared/capv/{v1.8 => v1.10}/metadata.yaml | 5 +- test/e2e/data/shared/capv/v1.9/metadata.yaml | 4 +- 23 files changed, 1496 insertions(+), 949 deletions(-) rename test/e2e/data/infrastructure-vsphere-govmomi/{v1.8 => v1.10}/clusterclass/clusterclass-quick-start.yaml (71%) rename test/e2e/data/infrastructure-vsphere-govmomi/{v1.8 => v1.10}/clusterclass/kustomization.yaml (78%) create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/patch-namingstrategy.yaml rename test/e2e/data/infrastructure-vsphere-govmomi/{v1.8 => v1.10}/clusterclass/patch-prekubeadmscript.yaml (70%) create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/patch-vsphere-template.yaml rename test/e2e/data/infrastructure-vsphere-govmomi/{v1.8 => v1.10}/commons/cluster-network-CIDR.yaml (100%) rename test/e2e/data/infrastructure-vsphere-govmomi/{v1.8 => v1.10}/commons/cluster-resource-set-csi-insecure.yaml (84%) rename test/e2e/data/infrastructure-vsphere-govmomi/{v1.8 => v1.10}/commons/cluster-resource-set-label.yaml (100%) rename test/e2e/data/infrastructure-vsphere-govmomi/{v1.8 => v1.10}/commons/cluster-resource-set.yaml (100%) create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.10/topology/cluster-template-topology.yaml rename test/e2e/data/infrastructure-vsphere-govmomi/{v1.8 => v1.10}/topology/kustomization.yaml (53%) rename test/e2e/data/infrastructure-vsphere-govmomi/{v1.8 => v1.10}/workload/kustomization.yaml (100%) rename test/e2e/data/infrastructure-vsphere-govmomi/{v1.8 => v1.10}/workload/workload-control-plane-endpoint-ip.yaml (100%) delete mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.8/base/cluster-template-topology.yaml delete mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/patch-vsphere-template.yaml delete mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/remove-storage-policy.yaml rename test/e2e/data/shared/capv/{v1.8 => v1.10}/metadata.yaml (96%) diff --git a/Makefile b/Makefile index 8682fe272a..43db367d00 100644 --- a/Makefile +++ b/Makefile @@ -346,7 +346,7 @@ generate-doctoc: TRACE=$(TRACE) ./hack/generate-doctoc.sh .PHONY: generate-e2e-templates -generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v1.8 v1.9 main) ## Generate test templates for all branches +generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v1.9 v1.10 main) ## Generate test templates for all branches .PHONY: generate-e2e-templates-main generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the main branch @@ -386,16 +386,16 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/conformance" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-conformance-supervisor.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/install-on-bootstrap" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-install-on-bootstrap-supervisor.yaml" +.PHONY: generate-e2e-templates-v1.10 +generate-e2e-templates-v1.10: $(KUSTOMIZE) + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.10/clusterclass" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.10/clusterclass-quick-start.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.10/workload" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.10/cluster-template-workload.yaml" + .PHONY: generate-e2e-templates-v1.9 generate-e2e-templates-v1.9: $(KUSTOMIZE) "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.9/clusterclass" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.9/clusterclass-quick-start.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.9/workload" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.9/cluster-template-workload.yaml" -.PHONY: generate-e2e-templates-v1.8 -generate-e2e-templates-v1.8: $(KUSTOMIZE) - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.8/clusterclass" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.8/clusterclass-quick-start.yaml" - "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.8/workload" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.8/cluster-template-workload.yaml" - .PHONY: generate-test-infra-prowjobs generate-test-infra-prowjobs: $(PROWJOB_GEN) ## Generates the prowjob configurations in test-infra @if [ -z "${TEST_INFRA_DIR}" ]; then echo "TEST_INFRA_DIR is not set"; exit 1; fi diff --git a/metadata.yaml b/metadata.yaml index bddb340f3f..5a959dabca 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -39,3 +39,6 @@ releaseSeries: - major: 1 minor: 10 contract: v1beta1 + - major: 1 + minor: 11 + contract: v1beta1 diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index 703fb833e6..286176a939 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -35,8 +35,8 @@ var ( capvReleaseMarkerPrefix = "go://sigs.k8s.io/cluster-api-provider-vsphere@v%s" ) -var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.9=>current, CAPI 1.6=>1.7) [ClusterClass]", func() { - const specName = "clusterctl-upgrade-1.9-current" // prefix (clusterctl-upgrade) copied from CAPI +var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.10=>current, CAPI 1.6=>1.7) [ClusterClass]", func() { + const specName = "clusterctl-upgrade-1.10-current" // prefix (clusterctl-upgrade) copied from CAPI Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { // Get CAPI v1.6 latest stable release @@ -44,7 +44,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.9= capiStableRelease16, err := getStableReleaseOfMinor(ctx, capiReleaseMarkerPrefix, capiVersion16) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", capiVersion16) // Get CAPV v1.9 latest stable release - capvVersion19 := "1.9" + capvVersion19 := "1.10" capvStableRelease19, err := getStableReleaseOfMinor(ctx, capvReleaseMarkerPrefix, capvVersion19) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", capvVersion19) return capi_e2e.ClusterctlUpgradeSpecInput{ @@ -65,16 +65,16 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.9= // InitWithKubernetesVersion should be the highest kubernetes version supported by the init Cluster API version. // This is to guarantee that both, the old and new CAPI version, support the defined version. // Ensure all Kubernetes versions used here are covered in patch-vsphere-template.yaml - InitWithKubernetesVersion: "v1.29.0", - WorkloadKubernetesVersion: "v1.29.0", + InitWithKubernetesVersion: "v1.30.0", + WorkloadKubernetesVersion: "v1.30.0", WorkloadFlavor: testSpecificSettingsGetter().FlavorForMode("workload"), } }) }, WithIP("WORKLOAD_CONTROL_PLANE_ENDPOINT_IP")) }) -var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.8=>current, CAPI 1.5=>1.7) [ClusterClass]", func() { - const specName = "clusterctl-upgrade-1.8-current" // prefix (clusterctl-upgrade) copied from CAPI +var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.9=>current, CAPI 1.5=>1.7) [ClusterClass]", func() { + const specName = "clusterctl-upgrade-1.9-current" // prefix (clusterctl-upgrade) copied from CAPI Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { // Get CAPI v1.5 latest stable release @@ -82,7 +82,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.8= capiStableRelease15, err := getStableReleaseOfMinor(ctx, capiReleaseMarkerPrefix, capiVersion15) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", capiVersion15) // Get CAPV v1.8 latest stable release - capvVersion18 := "1.8" + capvVersion18 := "1.9" capvStableRelease18, err := getStableReleaseOfMinor(ctx, capvReleaseMarkerPrefix, capvVersion18) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", capvVersion18) return capi_e2e.ClusterctlUpgradeSpecInput{ @@ -103,8 +103,8 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.8= // InitWithKubernetesVersion should be the highest kubernetes version supported by the init Cluster API version. // This is to guarantee that both, the old and new CAPI version, support the defined version. // Ensure all Kubernetes versions used here are covered in patch-vsphere-template.yaml - InitWithKubernetesVersion: "v1.28.0", - WorkloadKubernetesVersion: "v1.28.0", + InitWithKubernetesVersion: "v1.29.0", + WorkloadKubernetesVersion: "v1.29.0", WorkloadFlavor: testSpecificSettingsGetter().FlavorForMode("workload"), } }) diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 3f1a16da50..1e7dd2618b 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -152,7 +152,7 @@ providers: - name: vsphere type: InfrastructureProvider versions: - - name: v1.10.99 # next release + - name: v1.11.99 # next release # Use manifest from source files value: ../../../../cluster-api-provider-vsphere/config/default contract: v1beta1 @@ -180,31 +180,31 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-install-on-bootstrap-supervisor.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-conformance-supervisor.yaml" - sourcePath: "../data/shared/capv/main/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.9}" # supported release in the v1beta1 series + - name: "{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.10}" # supported release in the v1beta1 series # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.9}/infrastructure-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.10}/infrastructure-components.yaml" type: "url" contract: v1beta1 files: # Add a cluster template - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/v1.9/cluster-template-workload.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/v1.9/clusterclass-quick-start.yaml" - - sourcePath: "../data/shared/capv/v1.9/metadata.yaml" - - name: "{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.8}" # supported release in the v1beta1 series + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/v1.10/cluster-template-workload.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass-quick-start.yaml" + - sourcePath: "../data/shared/capv/v1.10/metadata.yaml" + - name: "{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.9}" # supported release in the v1beta1 series # Use manifest from source files - value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.8}/infrastructure-components.yaml" + value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.9}/infrastructure-components.yaml" type: "url" contract: v1beta1 files: # Add a cluster template - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/v1.8/cluster-template-workload.yaml" - - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass-quick-start.yaml" - - sourcePath: "../data/shared/capv/v1.8/metadata.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/v1.9/cluster-template-workload.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/v1.9/clusterclass-quick-start.yaml" + - sourcePath: "../data/shared/capv/v1.9/metadata.yaml" - name: vcsim type: RuntimeExtensionProvider # vcsim isn't a provider, but we fake it is so it can be handled by the clusterctl machinery. versions: - - name: v1.10.99 + - name: v1.11.99 # Use manifest from source files value: ../../../../cluster-api-provider-vsphere/test/infrastructure/vcsim/config/default contract: v1beta1 @@ -229,7 +229,7 @@ providers: - name: net-operator type: RuntimeExtensionProvider # net-operator isn't a provider, but we fake it is so it can be handled by the clusterctl machinery. versions: - - name: v1.10.99 + - name: v1.11.99 # Use manifest from source files value: ../../../../cluster-api-provider-vsphere/test/infrastructure/net-operator/config/default contract: v1beta1 diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/clusterclass-quick-start.yaml similarity index 71% rename from test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/clusterclass-quick-start.yaml rename to test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/clusterclass-quick-start.yaml index a9aeb15574..1451376109 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/clusterclass-quick-start.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/clusterclass-quick-start.yaml @@ -37,6 +37,9 @@ spec: - op: add path: /spec/template/spec/kubeadmConfigSpec/files value: [] + - op: add + path: /spec/template/spec/kubeadmConfigSpec/postKubeadmCommands + value: [] selector: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlaneTemplate @@ -46,6 +49,9 @@ spec: - op: add path: /spec/template/spec/files value: [] + - op: add + path: /spec/template/spec/postKubeadmCommands + value: [] selector: apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate @@ -53,7 +59,7 @@ spec: machineDeploymentClass: names: - ${CLUSTER_CLASS_NAME}-worker - name: createFilesArray + name: createEmptyArrays - definitions: - jsonPatches: - op: add @@ -94,7 +100,7 @@ spec: valueFrom: template: | host: '{{ .controlPlaneIpAddr }}' - port: 6443 + port: {{ .controlPlanePort }} - op: add path: /spec/template/spec/identityRef valueFrom: @@ -121,9 +127,69 @@ spec: path: /spec/template/spec/kubeadmConfigSpec/files/- valueFrom: template: |- - owner: root:root - path: "/etc/kubernetes/manifests/kube-vip.yaml" + owner: "root:root" + path: "/etc/kubernetes/manifests/kube-vip.yaml" content: {{ printf "%q" (regexReplaceAll "(name: address\n +value:).*" .kubeVipPodManifest (printf "$1 %s" .controlPlaneIpAddr)) }} + permissions: "0644" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + content: 127.0.0.1 localhost kubernetes + owner: root:root + path: /etc/kube-vip.hosts + permissions: "0644" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + content: | + #!/bin/bash + + # Copyright 2020 The Kubernetes Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + set -e + + # Configure the workaround required for kubeadm init with kube-vip: + # xref: https://github.com/kube-vip/kube-vip/issues/684 + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + IS_KUBEADM_INIT="false" + + # cloud-init kubeadm init + if [[ -f /run/kubeadm/kubeadm.yaml ]]; then + IS_KUBEADM_INIT="true" + fi + + # ignition kubeadm init + if [[ -f /etc/kubeadm.sh ]] && grep -q -e "kubeadm init" /etc/kubeadm.sh; then + IS_KUBEADM_INIT="true" + fi + + if [[ "$IS_KUBEADM_INIT" == "true" ]]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + fi + owner: root:root + path: /etc/pre-kubeadm-commands/50-kube-vip-prepare.sh + permissions: "0700" selector: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlaneTemplate @@ -131,39 +197,51 @@ spec: controlPlane: true name: kubeVipPodManifest variables: - - name: sshKey + - metadata: {} + name: sshKey required: false schema: openAPIV3Schema: description: Public key to SSH onto the cluster nodes. type: string - - name: infraServer + - metadata: {} + name: controlPlaneIpAddr required: true schema: openAPIV3Schema: - properties: - thumbprint: - type: string - url: - type: string - type: object - - name: controlPlaneIpAddr + description: Floating VIP for the control plane. + type: string + - metadata: {} + name: controlPlanePort required: true schema: openAPIV3Schema: - description: Floating VIP for the control plane. - type: string - - name: credsSecretName + description: Port for the control plane endpoint. + type: integer + - metadata: {} + name: kubeVipPodManifest required: true schema: openAPIV3Schema: - description: Secret containing the credentials for the infra cluster. + description: kube-vip manifest for the control plane. type: string - - name: kubeVipPodManifest + - metadata: {} + name: infraServer required: true schema: openAPIV3Schema: - description: kube-vip manifest for the control plane. + properties: + thumbprint: + type: string + url: + type: string + type: object + - metadata: {} + name: credsSecretName + required: true + schema: + openAPIV3Schema: + description: Secret containing the credentials for the infra cluster. type: string workers: machineDeployments: @@ -203,7 +281,7 @@ spec: networkName: '${VSPHERE_NETWORK}' numCPUs: 2 os: Linux - powerOffMode: hard + powerOffMode: trySoft resourcePool: '${VSPHERE_RESOURCE_POOL}' server: '${VSPHERE_SERVER}' storagePolicyName: '${VSPHERE_STORAGE_POLICY}' @@ -230,7 +308,7 @@ spec: networkName: '${VSPHERE_NETWORK}' numCPUs: 2 os: Linux - powerOffMode: hard + powerOffMode: trySoft resourcePool: '${VSPHERE_RESOURCE_POOL}' server: '${VSPHERE_SERVER}' storagePolicyName: '${VSPHERE_STORAGE_POLICY}' @@ -271,6 +349,9 @@ spec: >/etc/hosts - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + - mkdir -p /etc/pre-kubeadm-commands + - for script in $(find /etc/pre-kubeadm-commands/ -name '*.sh' -type f | sort); + do echo "Running script $script"; "$script"; done users: - name: capv sshAuthorizedKeys: @@ -297,3 +378,6 @@ spec: >/etc/hosts - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + - mkdir -p /etc/pre-kubeadm-commands + - for script in $(find /etc/pre-kubeadm-commands/ -name '*.sh' -type f | sort); + do echo "Running script $script"; "$script"; done diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/kustomization.yaml similarity index 78% rename from test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/kustomization.yaml rename to test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/kustomization.yaml index 820776eeaa..4c0e41b050 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/kustomization.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/kustomization.yaml @@ -9,3 +9,6 @@ patches: - target: kind: ClusterClass path: ./patch-prekubeadmscript.yaml + - target: + kind: ClusterClass + path: ./patch-namingstrategy.yaml diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/patch-namingstrategy.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/patch-namingstrategy.yaml new file mode 100644 index 0000000000..1877801c3c --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/patch-namingstrategy.yaml @@ -0,0 +1,8 @@ +- op: add + path: /spec/controlPlane/namingStrategy + value: + template: '{{ .cluster.name }}-cp-{{ .random }}' +- op: add + path: /spec/workers/machineDeployments/0/namingStrategy + value: + template: '{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}' diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/patch-prekubeadmscript.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/patch-prekubeadmscript.yaml similarity index 70% rename from test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/patch-prekubeadmscript.yaml rename to test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/patch-prekubeadmscript.yaml index 3e6e63b28d..3345f5b6d2 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/patch-prekubeadmscript.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/patch-prekubeadmscript.yaml @@ -3,34 +3,28 @@ value: definitions: - jsonPatches: - - op: add - path: /spec/template/spec/kubeadmConfigSpec/preKubeadmCommands/- - value: "/opt/prekubeadmscript.sh" - op: add path: /spec/template/spec/kubeadmConfigSpec/files/- valueFrom: template: | owner: root:root - path: "/opt/prekubeadmscript.sh" + path: "/etc/pre-kubeadm-commands/10-prekubeadmscript.sh" permissions: "0755" - content: {{ printf "%q" .preKubeadmScript }} + content: {{ printf "%q" (regexReplaceAll "(KUBERNETES_VERSION=.*)" .preKubeadmScript (printf "KUBERNETES_VERSION=%s" .builtin.controlPlane.version)) }} selector: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true - jsonPatches: - - op: add - path: /spec/template/spec/preKubeadmCommands/- - value: "/opt/prekubeadmscript.sh" - op: add path: /spec/template/spec/files/- valueFrom: template: | owner: root:root - path: "/opt/prekubeadmscript.sh" + path: "/etc/pre-kubeadm-commands/10-prekubeadmscript.sh" permissions: "0755" - content: {{ printf "%q" .preKubeadmScript }} + content: {{ printf "%q" (regexReplaceAll "(KUBERNETES_VERSION=.*)" .preKubeadmScript (printf "KUBERNETES_VERSION=%s" .builtin.machineDeployment.version)) }} selector: apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/patch-vsphere-template.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/patch-vsphere-template.yaml new file mode 100644 index 0000000000..1e9bff36e8 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/clusterclass/patch-vsphere-template.yaml @@ -0,0 +1,45 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/template + valueFrom: + # We have to fall back to v1.30.0 for the conformance latest ci test which uses + # versions without corresponding templates like "v1.30.0-alpha.0.525+09a5049ca78502". + template: |- + {{- if eq .builtin.controlPlane.version "v1.28.0" -}} + ubuntu-2204-kube-v1.28.0 + {{- else -}}{{- if eq .builtin.controlPlane.version "v1.29.0" -}} + ubuntu-2204-kube-v1.29.0 + {{- else -}} + ubuntu-2204-kube-v1.30.0 + {{- end -}}{{- end -}} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: replace + path: /spec/template/spec/template + valueFrom: + # We have to fall back to v1.30.0 for the conformance latest ci test which uses + # versions without corresponding templates like "v1.30.0-alpha.0.525+09a5049ca78502". + template: |- + {{- if eq .builtin.machineDeployment.version "v1.28.0" -}} + ubuntu-2204-kube-v1.28.0 + {{- else -}}{{- if eq .builtin.machineDeployment.version "v1.29.0" -}} + ubuntu-2204-kube-v1.29.0 + {{- else -}} + ubuntu-2204-kube-v1.30.0 + {{- end -}}{{- end -}} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: vSphereTemplate diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/cluster-network-CIDR.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/commons/cluster-network-CIDR.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/cluster-network-CIDR.yaml rename to test/e2e/data/infrastructure-vsphere-govmomi/v1.10/commons/cluster-network-CIDR.yaml diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/cluster-resource-set-csi-insecure.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/commons/cluster-resource-set-csi-insecure.yaml similarity index 84% rename from test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/cluster-resource-set-csi-insecure.yaml rename to test/e2e/data/infrastructure-vsphere-govmomi/v1.10/commons/cluster-resource-set-csi-insecure.yaml index be6ed3601b..c22ddb314b 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/cluster-resource-set-csi-insecure.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/commons/cluster-resource-set-csi-insecure.yaml @@ -1,15 +1,15 @@ apiVersion: v1 kind: Secret metadata: - name: csi-vsphere-config + name: vsphere-config-secret namespace: '${NAMESPACE}' stringData: data: | apiVersion: v1 kind: Secret metadata: - name: csi-vsphere-config - namespace: kube-system + name: vsphere-config-secret + namespace: vmware-system-csi stringData: csi-vsphere.conf: |+ [Global] diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/cluster-resource-set-label.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/commons/cluster-resource-set-label.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/cluster-resource-set-label.yaml rename to test/e2e/data/infrastructure-vsphere-govmomi/v1.10/commons/cluster-resource-set-label.yaml diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/cluster-resource-set.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/commons/cluster-resource-set.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/cluster-resource-set.yaml rename to test/e2e/data/infrastructure-vsphere-govmomi/v1.10/commons/cluster-resource-set.yaml diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/topology/cluster-template-topology.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/topology/cluster-template-topology.yaml new file mode 100644 index 0000000000..72ee1f4df8 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/topology/cluster-template-topology.yaml @@ -0,0 +1,1286 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + topology: + class: '${CLUSTER_CLASS_NAME}' + controlPlane: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + variables: + - name: sshKey + value: '${VSPHERE_SSH_AUTHORIZED_KEY}' + - name: kubeVipPodManifest + value: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE:=""} + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: svc_leasename + value: plndr-svcs-lock + - name: svc_election + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leasename + value: plndr-cp-lock + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: prometheus_server + value: :2112 + image: ghcr.io/kube-vip/kube-vip:v0.6.4 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + - mountPath: /etc/hosts + name: etchosts + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + - hostPath: + path: /etc/kube-vip.hosts + type: File + name: etchosts + status: {} + - name: controlPlaneIpAddr + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: controlPlanePort + value: ${CONTROL_PLANE_ENDPOINT_PORT:=6443} + - name: infraServer + value: + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + url: '${VSPHERE_SERVER}' + - name: credsSecretName + value: '${CLUSTER_NAME}' + version: '${KUBERNETES_VERSION}' + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + metadata: {} + name: md-0 + replicas: ${WORKER_MACHINE_COUNT} +--- +apiVersion: v1 +kind: Secret +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +stringData: + password: ${VSPHERE_PASSWORD} + username: ${VSPHERE_USERNAME} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: ${CLUSTER_NAME}-crs-0 + namespace: '${NAMESPACE}' +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + resources: + - kind: Secret + name: vsphere-config-secret + - kind: ConfigMap + name: csi-manifests + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-config-secret + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + name: vsphere-config-secret + namespace: vmware-system-csi + stringData: + csi-vsphere.conf: |+ + [Global] + thumbprint = "${VSPHERE_TLS_THUMBPRINT}" + + [VirtualCenter "${VSPHERE_SERVER}"] + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + apiVersion: v1 + kind: Namespace + metadata: + name: vmware-system-csi + --- + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - "" + resources: + - nodes + - pods + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - update + - delete + - patch + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - patch + - apiGroups: + - cns.vmware.com + resources: + - triggercsifullsyncs + verbs: + - create + - get + - update + - watch + - list + - apiGroups: + - cns.vmware.com + resources: + - cnsvspherevolumemigrations + verbs: + - create + - get + - list + - watch + - update + - delete + - apiGroups: + - cns.vmware.com + resources: + - cnsvolumeinfoes + verbs: + - create + - get + - list + - watch + - delete + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - create + - update + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - cns.vmware.com + resources: + - cnsvolumeoperationrequests + verbs: + - create + - get + - list + - update + - delete + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - watch + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - create + - get + - list + - watch + - update + - delete + - patch + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + - patch + - apiGroups: + - cns.vmware.com + resources: + - csinodetopologies + verbs: + - get + - update + - watch + - list + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: vmware-system-csi + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-node-cluster-role + rules: + - apiGroups: + - cns.vmware.com + resources: + - csinodetopologies + verbs: + - create + - watch + - get + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-node-cluster-role-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-node-cluster-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: vsphere-csi-node-role + namespace: vmware-system-csi + rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: vsphere-csi-node-binding + namespace: vmware-system-csi + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: vsphere-csi-node-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: v1 + data: + async-query-volume: "true" + block-volume-snapshot: "true" + cnsmgr-suspend-create-volume: "true" + csi-auth-check: "true" + csi-internal-generated-cluster-id: "true" + csi-migration: "true" + csi-windows-support: "true" + list-volumes: "true" + listview-tasks: "true" + max-pvscsi-targets-per-vm: "true" + multi-vcenter-csi-topology: "true" + online-volume-extend: "true" + pv-to-backingdiskobjectid-mapping: "false" + topology-preferential-datastores: "true" + trigger-csi-fullsync: "false" + kind: ConfigMap + metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: vmware-system-csi + --- + apiVersion: v1 + kind: Service + metadata: + labels: + app: vsphere-csi-controller + name: vsphere-csi-controller + namespace: vmware-system-csi + spec: + ports: + - name: ctlr + port: 2112 + protocol: TCP + targetPort: 2112 + - name: syncer + port: 2113 + protocol: TCP + targetPort: 2113 + selector: + app: vsphere-csi-controller + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vsphere-csi-controller + topologyKey: kubernetes.io/hostname + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + - --kube-api-qps=100 + - --kube-api-burst=100 + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-attacher:v4.3.0 + name: csi-attacher + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --timeout=300s + - --handle-volume-inuse-error=false + - --csi-address=$(ADDRESS) + - --kube-api-qps=100 + - --kube-api-burst=100 + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0 + name: csi-resizer + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: controller + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + periodSeconds: 180 + timeoutSeconds: 10 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + - containerPort: 2112 + name: prometheus + protocol: TCP + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --leader-election + - --leader-election-lease-duration=30s + - --leader-election-renew-deadline=20s + - --leader-election-retry-period=10s + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: GODEBUG + value: x509sha1=1 + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v3.1.0 + imagePullPolicy: Always + name: vsphere-syncer + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --kube-api-qps=100 + - --kube-api-burst=100 + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-provisioner:v3.5.0 + name: csi-provisioner + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --kube-api-qps=100 + - --kube-api-burst=100 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-snapshotter:v6.2.2 + name: csi-snapshotter + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + priorityClassName: system-cluster-critical + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - emptyDir: {} + name: socket-dir + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: vmware-system-csi + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: GODEBUG + value: x509sha1=1 + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - mountPath: /sys/block + name: blocks-dir + - mountPath: /sys/devices + name: sys-devices-dir + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: vsphere-csi-node + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + - hostPath: + path: /sys/block + type: Directory + name: blocks-dir + - hostPath: + path: /sys/devices + type: Directory + name: sys-devices-dir + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node-windows + namespace: vmware-system-csi + spec: + selector: + matchLabels: + app: vsphere-csi-node-windows + template: + metadata: + labels: + app: vsphere-csi-node-windows + role: vsphere-csi-windows + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=C:\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: DEBUG + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.1.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + - mountPath: C:\var\lib\kubelet + name: pods-mount-dir + - mountPath: \\.\pipe\csi-proxy-volume-v1 + name: csi-proxy-volume-v1 + - mountPath: \\.\pipe\csi-proxy-filesystem-v1 + name: csi-proxy-filesystem-v1 + - mountPath: \\.\pipe\csi-proxy-disk-v1 + name: csi-proxy-disk-v1 + - mountPath: \\.\pipe\csi-proxy-system-v1alpha1 + name: csi-proxy-system-v1alpha1 + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + nodeSelector: + kubernetes.io/os: windows + priorityClassName: system-node-critical + serviceAccountName: vsphere-csi-node + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + name: registration-dir + - hostPath: + path: C:\var\lib\kubelet\plugins\csi.vsphere.vmware.com\ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: \var\lib\kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + type: "" + name: csi-proxy-disk-v1 + - hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + type: "" + name: csi-proxy-volume-v1 + - hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + type: "" + name: csi-proxy-filesystem-v1 + - hostPath: + path: \\.\pipe\csi-proxy-system-v1alpha1 + type: "" + name: csi-proxy-system-v1alpha1 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate +kind: ConfigMap +metadata: + name: csi-manifests + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: '${NAMESPACE}' +stringData: + data: | + apiVersion: v1 + kind: Secret + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: secret + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + ${VSPHERE_SERVER}.password: ${VSPHERE_PASSWORD} + ${VSPHERE_SERVER}.username: ${VSPHERE_USERNAME} + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: | + --- + # Source: vsphere-cpi/templates/service-account.yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + labels: + app: vsphere-cpi + vsphere-cpi-infra: service-account + component: cloud-controller-manager + namespace: kube-system + --- + # Source: vsphere-cpi/templates/role.yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloud-controller-manager + labels: + app: vsphere-cpi + vsphere-cpi-infra: role + component: cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + --- + # Source: vsphere-cpi/templates/daemonset.yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-cpi + labels: + app: vsphere-cpi + vsphere-cpi-infra: daemonset + component: cloud-controller-manager + tier: control-plane + namespace: kube-system + annotations: + spec: + selector: + matchLabels: + app: vsphere-cpi + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: vsphere-cpi + component: cloud-controller-manager + tier: control-plane + release: release-name + vsphere-cpi-infra: daemonset + spec: + tolerations: + - key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/master + effect: NoSchedule + operator: Exists + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + operator: Exists + - key: node.kubernetes.io/not-ready + effect: NoSchedule + operator: Exists + - key: CriticalAddonsOnly + effect: NoExecute + operator: Exists + securityContext: + fsGroup: 1001 + runAsUser: 1001 + serviceAccountName: cloud-controller-manager + hostNetwork: true + dnsPolicy: ClusterFirst + priorityClassName: system-node-critical + containers: + - name: vsphere-cpi + image: gcr.io/cloud-provider-vsphere/cpi/release/manager:${CPI_IMAGE_K8S_VERSION} + imagePullPolicy: IfNotPresent + args: + - --cloud-provider=vsphere + - --v=2 + - --cloud-config=/etc/cloud/vsphere.conf + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + volumes: + - name: vsphere-config-volume + configMap: + name: cloud-config + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + app: vsphere-cpi + component: cloud-controller-manager + vsphere-cpi-infra: role-binding + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - apiGroup: "" + kind: User + name: cloud-controller-manager + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app: vsphere-cpi + component: cloud-controller-manager + vsphere-cpi-infra: cluster-role-binding + name: cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + port: 443 + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + vcenter: + ${VSPHERE_SERVER}: + datacenters: + - '${VSPHERE_DATACENTER}' + server: '${VSPHERE_SERVER}' + kind: ConfigMap + metadata: + name: cloud-config + namespace: kube-system +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: '${NAMESPACE}' diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/topology/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/topology/kustomization.yaml similarity index 53% rename from test/e2e/data/infrastructure-vsphere-govmomi/v1.8/topology/kustomization.yaml rename to test/e2e/data/infrastructure-vsphere-govmomi/v1.10/topology/kustomization.yaml index de372e3513..76ce47f3e9 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/topology/kustomization.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/topology/kustomization.yaml @@ -1,13 +1,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ../base/cluster-template-topology.yaml + - cluster-template-topology.yaml - ../commons/cluster-resource-set.yaml patchesStrategicMerge: - ../commons/cluster-resource-set-label.yaml - ../commons/cluster-network-CIDR.yaml - - ../commons/cluster-resource-set-csi-insecure.yaml -patches: - - target: - kind: VSphereMachineTemplate - path: ../commons/remove-storage-policy.yaml + - ../commons/cluster-resource-set-csi-insecure.yaml \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/workload/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/workload/kustomization.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere-govmomi/v1.8/workload/kustomization.yaml rename to test/e2e/data/infrastructure-vsphere-govmomi/v1.10/workload/kustomization.yaml diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/workload/workload-control-plane-endpoint-ip.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.10/workload/workload-control-plane-endpoint-ip.yaml similarity index 100% rename from test/e2e/data/infrastructure-vsphere-govmomi/v1.8/workload/workload-control-plane-endpoint-ip.yaml rename to test/e2e/data/infrastructure-vsphere-govmomi/v1.10/workload/workload-control-plane-endpoint-ip.yaml diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/base/cluster-template-topology.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/base/cluster-template-topology.yaml deleted file mode 100644 index 9379dd9215..0000000000 --- a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/base/cluster-template-topology.yaml +++ /dev/null @@ -1,834 +0,0 @@ ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - labels: - cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' - name: '${CLUSTER_NAME}' - namespace: '${NAMESPACE}' -spec: - topology: - class: '${CLUSTER_CLASS_NAME}' - controlPlane: - replicas: ${CONTROL_PLANE_MACHINE_COUNT} - variables: - - name: sshKey - value: '${VSPHERE_SSH_AUTHORIZED_KEY}' - - name: infraServer - value: - thumbprint: '${VSPHERE_TLS_THUMBPRINT}' - url: '${VSPHERE_SERVER}' - - name: kubeVipPodManifest - value: | - apiVersion: v1 - kind: Pod - metadata: - name: kube-vip - namespace: kube-system - spec: - containers: - - args: - - manager - env: - - name: cp_enable - value: "true" - - name: vip_interface - value: ${VIP_NETWORK_INTERFACE=""} - - name: address - value: ${CONTROL_PLANE_ENDPOINT_IP} - - name: port - value: "6443" - - name: vip_arp - value: "true" - - name: vip_leaderelection - value: "true" - - name: vip_leaseduration - value: "15" - - name: vip_renewdeadline - value: "10" - - name: vip_retryperiod - value: "2" - image: ghcr.io/kube-vip/kube-vip:v0.5.11 - imagePullPolicy: IfNotPresent - name: kube-vip - resources: {} - securityContext: - capabilities: - add: - - NET_ADMIN - - NET_RAW - volumeMounts: - - mountPath: /etc/kubernetes/admin.conf - name: kubeconfig - hostAliases: - - hostnames: - - kubernetes - ip: 127.0.0.1 - hostNetwork: true - volumes: - - hostPath: - path: /etc/kubernetes/admin.conf - type: FileOrCreate - name: kubeconfig - - name: controlPlaneIpAddr - value: ${CONTROL_PLANE_ENDPOINT_IP} - - name: credsSecretName - value: '${CLUSTER_NAME}' - version: '${KUBERNETES_VERSION}' - workers: - machineDeployments: - - class: ${CLUSTER_CLASS_NAME}-worker - metadata: {} - name: md-0 - replicas: ${WORKER_MACHINE_COUNT} ---- -apiVersion: v1 -kind: Secret -metadata: - name: '${CLUSTER_NAME}' - namespace: '${NAMESPACE}' -stringData: - password: ${VSPHERE_PASSWORD} - username: ${VSPHERE_USERNAME} ---- -apiVersion: addons.cluster.x-k8s.io/v1beta1 -kind: ClusterResourceSet -metadata: - labels: - cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' - name: ${CLUSTER_NAME}-crs-0 - namespace: '${NAMESPACE}' -spec: - clusterSelector: - matchLabels: - cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' - resources: - - kind: Secret - name: vsphere-csi-controller - - kind: ConfigMap - name: vsphere-csi-controller-role - - kind: ConfigMap - name: vsphere-csi-controller-binding - - kind: Secret - name: csi-vsphere-config - - kind: ConfigMap - name: csi.vsphere.vmware.com - - kind: ConfigMap - name: vsphere-csi-node - - kind: ConfigMap - name: vsphere-csi-controller - - kind: Secret - name: cloud-controller-manager - - kind: Secret - name: cloud-provider-vsphere-credentials - - kind: ConfigMap - name: cpi-manifests ---- -apiVersion: v1 -kind: Secret -metadata: - name: vsphere-csi-controller - namespace: '${NAMESPACE}' -stringData: - data: | - apiVersion: v1 - kind: ServiceAccount - metadata: - name: vsphere-csi-controller - namespace: kube-system -type: addons.cluster.x-k8s.io/resource-set ---- -apiVersion: v1 -data: - data: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: vsphere-csi-controller-role - rules: - - apiGroups: - - storage.k8s.io - resources: - - csidrivers - verbs: - - create - - delete - - apiGroups: - - "" - resources: - - nodes - - pods - - secrets - - configmaps - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - persistentvolumes - verbs: - - get - - list - - watch - - update - - create - - delete - - patch - - apiGroups: - - storage.k8s.io - resources: - - volumeattachments - verbs: - - get - - list - - watch - - update - - patch - - apiGroups: - - storage.k8s.io - resources: - - volumeattachments/status - verbs: - - patch - - apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - get - - list - - watch - - update - - apiGroups: - - storage.k8s.io - resources: - - storageclasses - - csinodes - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - events - verbs: - - list - - watch - - create - - update - - patch - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - watch - - list - - delete - - update - - create - - apiGroups: - - snapshot.storage.k8s.io - resources: - - volumesnapshots - verbs: - - get - - list - - apiGroups: - - snapshot.storage.k8s.io - resources: - - volumesnapshotcontents - verbs: - - get - - list -kind: ConfigMap -metadata: - name: vsphere-csi-controller-role - namespace: '${NAMESPACE}' ---- -apiVersion: v1 -data: - data: | - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: vsphere-csi-controller-binding - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: vsphere-csi-controller-role - subjects: - - kind: ServiceAccount - name: vsphere-csi-controller - namespace: kube-system -kind: ConfigMap -metadata: - name: vsphere-csi-controller-binding - namespace: '${NAMESPACE}' ---- -apiVersion: v1 -kind: Secret -metadata: - name: csi-vsphere-config - namespace: '${NAMESPACE}' -stringData: - data: | - apiVersion: v1 - kind: Secret - metadata: - name: csi-vsphere-config - namespace: kube-system - stringData: - csi-vsphere.conf: |+ - [Global] - thumbprint = "${VSPHERE_TLS_THUMBPRINT}" - - [VirtualCenter "${VSPHERE_SERVER}"] - user = "${VSPHERE_USERNAME}" - password = "${VSPHERE_PASSWORD}" - datacenters = "${VSPHERE_DATACENTER}" - - [Network] - public-network = "${VSPHERE_NETWORK}" - - type: Opaque -type: addons.cluster.x-k8s.io/resource-set ---- -apiVersion: v1 -data: - data: | - apiVersion: storage.k8s.io/v1 - kind: CSIDriver - metadata: - name: csi.vsphere.vmware.com - spec: - attachRequired: true -kind: ConfigMap -metadata: - name: csi.vsphere.vmware.com - namespace: '${NAMESPACE}' ---- -apiVersion: v1 -data: - data: | - apiVersion: apps/v1 - kind: DaemonSet - metadata: - name: vsphere-csi-node - namespace: kube-system - spec: - selector: - matchLabels: - app: vsphere-csi-node - template: - metadata: - labels: - app: vsphere-csi-node - role: vsphere-csi - spec: - containers: - - args: - - --v=5 - - --csi-address=$(ADDRESS) - - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) - env: - - name: ADDRESS - value: /csi/csi.sock - - name: DRIVER_REG_SOCK_PATH - value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock - image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 - lifecycle: - preStop: - exec: - command: - - /bin/sh - - -c - - rm -rf /registration/csi.vsphere.vmware.com-reg.sock /csi/csi.sock - name: node-driver-registrar - resources: {} - securityContext: - privileged: true - volumeMounts: - - mountPath: /csi - name: plugin-dir - - mountPath: /registration - name: registration-dir - - env: - - name: CSI_ENDPOINT - value: unix:///csi/csi.sock - - name: X_CSI_MODE - value: node - - name: X_CSI_SPEC_REQ_VALIDATION - value: "false" - - name: VSPHERE_CSI_CONFIG - value: /etc/cloud/csi-vsphere.conf - - name: LOGGER_LEVEL - value: PRODUCTION - - name: X_CSI_LOG_LEVEL - value: INFO - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 10 - periodSeconds: 5 - timeoutSeconds: 3 - name: vsphere-csi-node - ports: - - containerPort: 9808 - name: healthz - protocol: TCP - resources: {} - securityContext: - allowPrivilegeEscalation: true - capabilities: - add: - - SYS_ADMIN - privileged: true - volumeMounts: - - mountPath: /etc/cloud - name: vsphere-config-volume - - mountPath: /csi - name: plugin-dir - - mountPath: /var/lib/kubelet - mountPropagation: Bidirectional - name: pods-mount-dir - - mountPath: /dev - name: device-dir - - args: - - --csi-address=/csi/csi.sock - image: quay.io/k8scsi/livenessprobe:v2.1.0 - name: liveness-probe - resources: {} - volumeMounts: - - mountPath: /csi - name: plugin-dir - dnsPolicy: Default - tolerations: - - effect: NoSchedule - operator: Exists - - effect: NoExecute - operator: Exists - volumes: - - name: vsphere-config-volume - secret: - secretName: csi-vsphere-config - - hostPath: - path: /var/lib/kubelet/plugins_registry - type: Directory - name: registration-dir - - hostPath: - path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/ - type: DirectoryOrCreate - name: plugin-dir - - hostPath: - path: /var/lib/kubelet - type: Directory - name: pods-mount-dir - - hostPath: - path: /dev - name: device-dir - updateStrategy: - type: RollingUpdate -kind: ConfigMap -metadata: - name: vsphere-csi-node - namespace: '${NAMESPACE}' ---- -apiVersion: v1 -data: - data: | - apiVersion: apps/v1 - kind: Deployment - metadata: - name: vsphere-csi-controller - namespace: kube-system - spec: - replicas: 1 - selector: - matchLabels: - app: vsphere-csi-controller - template: - metadata: - labels: - app: vsphere-csi-controller - role: vsphere-csi - spec: - containers: - - args: - - --v=4 - - --timeout=300s - - --csi-address=$(ADDRESS) - - --leader-election - env: - - name: ADDRESS - value: /csi/csi.sock - image: quay.io/k8scsi/csi-attacher:v3.0.0 - name: csi-attacher - resources: {} - volumeMounts: - - mountPath: /csi - name: socket-dir - - env: - - name: CSI_ENDPOINT - value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock - - name: X_CSI_MODE - value: controller - - name: VSPHERE_CSI_CONFIG - value: /etc/cloud/csi-vsphere.conf - - name: LOGGER_LEVEL - value: PRODUCTION - - name: X_CSI_LOG_LEVEL - value: INFO - image: gcr.io/cloud-provider-vsphere/csi/release/driver:v2.1.0 - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: healthz - initialDelaySeconds: 10 - periodSeconds: 5 - timeoutSeconds: 3 - name: vsphere-csi-controller - ports: - - containerPort: 9808 - name: healthz - protocol: TCP - resources: {} - volumeMounts: - - mountPath: /etc/cloud - name: vsphere-config-volume - readOnly: true - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - args: - - --csi-address=$(ADDRESS) - env: - - name: ADDRESS - value: /var/lib/csi/sockets/pluginproxy/csi.sock - image: quay.io/k8scsi/livenessprobe:v2.1.0 - name: liveness-probe - resources: {} - volumeMounts: - - mountPath: /var/lib/csi/sockets/pluginproxy/ - name: socket-dir - - args: - - --leader-election - env: - - name: X_CSI_FULL_SYNC_INTERVAL_MINUTES - value: "30" - - name: LOGGER_LEVEL - value: PRODUCTION - - name: VSPHERE_CSI_CONFIG - value: /etc/cloud/csi-vsphere.conf - image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v2.1.0 - name: vsphere-syncer - resources: {} - volumeMounts: - - mountPath: /etc/cloud - name: vsphere-config-volume - readOnly: true - - args: - - --v=4 - - --timeout=300s - - --csi-address=$(ADDRESS) - - --leader-election - - --default-fstype=ext4 - env: - - name: ADDRESS - value: /csi/csi.sock - image: quay.io/k8scsi/csi-provisioner:v2.0.0 - name: csi-provisioner - resources: {} - volumeMounts: - - mountPath: /csi - name: socket-dir - dnsPolicy: Default - serviceAccountName: vsphere-csi-controller - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - operator: Exists - volumes: - - name: vsphere-config-volume - secret: - secretName: csi-vsphere-config - - emptyDir: {} - name: socket-dir -kind: ConfigMap -metadata: - name: vsphere-csi-controller - namespace: '${NAMESPACE}' ---- -apiVersion: v1 -kind: Secret -metadata: - name: cloud-controller-manager - namespace: '${NAMESPACE}' -stringData: - data: | - apiVersion: v1 - kind: ServiceAccount - metadata: - labels: - component: cloud-controller-manager - vsphere-cpi-infra: service-account - name: cloud-controller-manager - namespace: kube-system -type: addons.cluster.x-k8s.io/resource-set ---- -apiVersion: v1 -kind: Secret -metadata: - name: cloud-provider-vsphere-credentials - namespace: '${NAMESPACE}' -stringData: - data: | - apiVersion: v1 - kind: Secret - metadata: - labels: - component: cloud-controller-manager - vsphere-cpi-infra: secret - name: cloud-provider-vsphere-credentials - namespace: kube-system - stringData: - ${VSPHERE_SERVER}.password: ${VSPHERE_PASSWORD} - ${VSPHERE_SERVER}.username: ${VSPHERE_USERNAME} - type: Opaque -type: addons.cluster.x-k8s.io/resource-set ---- -apiVersion: v1 -data: - data: | - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - labels: - component: cloud-controller-manager - vsphere-cpi-infra: role - name: system:cloud-controller-manager - rules: - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - update - - apiGroups: - - "" - resources: - - nodes - verbs: - - '*' - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch - - apiGroups: - - "" - resources: - - services - verbs: - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - services/status - verbs: - - patch - - apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - get - - list - - watch - - update - - apiGroups: - - "" - resources: - - persistentvolumes - verbs: - - get - - list - - watch - - update - - apiGroups: - - "" - resources: - - endpoints - verbs: - - create - - get - - list - - watch - - update - - apiGroups: - - "" - resources: - - secrets - verbs: - - get - - list - - watch - - apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - watch - - list - - update - - create - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - labels: - component: cloud-controller-manager - vsphere-cpi-infra: cluster-role-binding - name: system:cloud-controller-manager - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:cloud-controller-manager - subjects: - - kind: ServiceAccount - name: cloud-controller-manager - namespace: kube-system - - kind: User - name: cloud-controller-manager - --- - apiVersion: v1 - data: - vsphere.conf: | - global: - port: 443 - secretName: cloud-provider-vsphere-credentials - secretNamespace: kube-system - thumbprint: '${VSPHERE_TLS_THUMBPRINT}' - vcenter: - ${VSPHERE_SERVER}: - datacenters: - - '${VSPHERE_DATACENTER}' - server: '${VSPHERE_SERVER}' - kind: ConfigMap - metadata: - name: vsphere-cloud-config - namespace: kube-system - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: RoleBinding - metadata: - labels: - component: cloud-controller-manager - vsphere-cpi-infra: role-binding - name: servicecatalog.k8s.io:apiserver-authentication-reader - namespace: kube-system - roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader - subjects: - - kind: ServiceAccount - name: cloud-controller-manager - namespace: kube-system - - kind: User - name: cloud-controller-manager - --- - apiVersion: apps/v1 - kind: DaemonSet - metadata: - labels: - component: cloud-controller-manager - tier: control-plane - name: vsphere-cloud-controller-manager - namespace: kube-system - spec: - selector: - matchLabels: - name: vsphere-cloud-controller-manager - template: - metadata: - labels: - component: cloud-controller-manager - name: vsphere-cloud-controller-manager - tier: control-plane - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node-role.kubernetes.io/control-plane - operator: Exists - - matchExpressions: - - key: node-role.kubernetes.io/master - operator: Exists - containers: - - args: - - --v=2 - - --cloud-provider=vsphere - - --cloud-config=/etc/cloud/vsphere.conf - image: gcr.io/cloud-provider-vsphere/cpi/release/manager:${CPI_IMAGE_K8S_VERSION} - name: vsphere-cloud-controller-manager - resources: - requests: - cpu: 200m - volumeMounts: - - mountPath: /etc/cloud - name: vsphere-config-volume - readOnly: true - hostNetwork: true - priorityClassName: system-node-critical - securityContext: - runAsUser: 1001 - serviceAccountName: cloud-controller-manager - tolerations: - - effect: NoSchedule - key: node.cloudprovider.kubernetes.io/uninitialized - value: "true" - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists - - effect: NoSchedule - key: node-role.kubernetes.io/control-plane - operator: Exists - - effect: NoSchedule - key: node.kubernetes.io/not-ready - operator: Exists - volumes: - - configMap: - name: vsphere-cloud-config - name: vsphere-config-volume - updateStrategy: - type: RollingUpdate -kind: ConfigMap -metadata: - name: cpi-manifests - namespace: '${NAMESPACE}' diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/patch-vsphere-template.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/patch-vsphere-template.yaml deleted file mode 100644 index 5f7f38db63..0000000000 --- a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/clusterclass/patch-vsphere-template.yaml +++ /dev/null @@ -1,37 +0,0 @@ -- op: add - path: /spec/patches/- - value: - definitions: - - jsonPatches: - - op: replace - path: /spec/template/spec/template - valueFrom: - template: |- - {{- if semverCompare ">= v1.28" .builtin.controlPlane.version -}} - ubuntu-2204-kube-{{ .builtin.controlPlane.version }} - {{- else -}} - ubuntu-2004-kube-{{ .builtin.controlPlane.version }} - {{- end -}} - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: VSphereMachineTemplate - matchResources: - controlPlane: true - - jsonPatches: - - op: replace - path: /spec/template/spec/template - valueFrom: - template: |- - {{- if semverCompare ">= v1.28" .builtin.machineDeployment.version -}} - ubuntu-2204-kube-{{ .builtin.machineDeployment.version }} - {{- else -}} - ubuntu-2004-kube-{{ .builtin.machineDeployment.version }} - {{- end -}} - selector: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: VSphereMachineTemplate - matchResources: - machineDeploymentClass: - names: - - ${CLUSTER_CLASS_NAME}-worker - name: vSphereTemplate diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/remove-storage-policy.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/remove-storage-policy.yaml deleted file mode 100644 index 9e0cac085c..0000000000 --- a/test/e2e/data/infrastructure-vsphere-govmomi/v1.8/commons/remove-storage-policy.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- op: remove - path: /spec/template/spec/storagePolicyName diff --git a/test/e2e/data/shared/capv/main/metadata.yaml b/test/e2e/data/shared/capv/main/metadata.yaml index b34a660d3c..227004519f 100644 --- a/test/e2e/data/shared/capv/main/metadata.yaml +++ b/test/e2e/data/shared/capv/main/metadata.yaml @@ -6,13 +6,13 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: - - major: 1 - minor: 8 - contract: v1beta1 - major: 1 minor: 9 contract: v1beta1 - major: 1 minor: 10 contract: v1beta1 + - major: 1 + minor: 11 + contract: v1beta1 diff --git a/test/e2e/data/shared/capv/v1.8/metadata.yaml b/test/e2e/data/shared/capv/v1.10/metadata.yaml similarity index 96% rename from test/e2e/data/shared/capv/v1.8/metadata.yaml rename to test/e2e/data/shared/capv/v1.10/metadata.yaml index cdb9ec77c3..77204dba7a 100644 --- a/test/e2e/data/shared/capv/v1.8/metadata.yaml +++ b/test/e2e/data/shared/capv/v1.10/metadata.yaml @@ -7,8 +7,9 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: - major: 1 - minor: 7 + minor: 8 contract: v1beta1 - major: 1 - minor: 8 + minor: 9 contract: v1beta1 + diff --git a/test/e2e/data/shared/capv/v1.9/metadata.yaml b/test/e2e/data/shared/capv/v1.9/metadata.yaml index 77204dba7a..c1d130d5e4 100644 --- a/test/e2e/data/shared/capv/v1.9/metadata.yaml +++ b/test/e2e/data/shared/capv/v1.9/metadata.yaml @@ -7,9 +7,9 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: - major: 1 - minor: 8 + minor: 9 contract: v1beta1 - major: 1 - minor: 9 + minor: 10 contract: v1beta1