From 624a9fca0ff689a8bdcbccfa203b68a848e3d31f Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Thu, 15 Aug 2024 08:25:39 +0200 Subject: [PATCH] Prepare main branch for v1.12 development MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan Büringer buringerst@vmware.com --- Makefile | 11 +- clusterctl-settings.json | 2 +- docs/release/release-tasks.md | 17 +- metadata.yaml | 3 + test/e2e/clusterctl_upgrade_test.go | 68 +- test/e2e/config/vsphere.yaml | 46 +- .../clusterclass-quick-start.yaml | 382 +++++ .../v1.11/clusterclass/kustomization.yaml | 17 + .../patch-k8s-install-script.yaml | 221 +++ .../clusterclass/patch-namingstrategy.yaml | 8 + .../clusterclass/patch-prekubeadmscript.yaml | 45 + .../clusterclass/patch-vsphere-template.yaml | 49 + .../v1.11/commons/cluster-network-CIDR.yaml | 10 + .../commons/cluster-resource-set-label.yaml | 7 + .../v1.11/commons/cluster-resource-set.yaml | 18 + .../topology/cluster-template-topology.yaml | 1273 +++++++++++++++++ .../v1.11/topology/kustomization.yaml | 8 + .../v1.11/workload/kustomization.yaml | 8 + .../workload-control-plane-endpoint-ip.yaml | 5 + .../clusterclass-quick-start-supervisor.yaml | 324 +++++ .../v1.11/clusterclass/kustomization.yaml | 20 + .../patch-k8s-install-script.yaml | 221 +++ .../clusterclass/patch-namingstrategy.yaml | 8 + .../clusterclass/patch-prekubeadmscript.yaml | 45 + .../clusterclass/patch-vm-namingstrategy.yaml | 4 + .../clusterclass/patch-vsphere-template.yaml | 49 + .../v1.11/commons/cluster-network-CIDR.yaml | 10 + .../commons/cluster-resource-set-label.yaml | 7 + .../v1.11/commons/cluster-resource-set.yaml | 18 + .../cluster-template-topology-supervisor.yaml | 1258 ++++++++++++++++ .../v1.11/topology/kustomization.yaml | 8 + .../v1.11/workload/kustomization.yaml | 8 + .../workload-control-plane-endpoint-ip.yaml | 5 + test/e2e/data/shared/capv/main/metadata.yaml | 3 + .../{capi/v1.5 => capv/v1.11}/metadata.yaml | 28 +- test/extension/tilt-provider.yaml | 2 +- .../net-operator/tilt-provider.yaml | 2 +- test/infrastructure/vcsim/tilt-provider.yaml | 2 +- tilt-provider.yaml | 4 +- 39 files changed, 4162 insertions(+), 62 deletions(-) create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/clusterclass-quick-start.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-k8s-install-script.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-namingstrategy.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-prekubeadmscript.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-vsphere-template.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-network-CIDR.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-resource-set-label.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-resource-set.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/topology/cluster-template-topology.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/topology/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/workload/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-govmomi/v1.11/workload/workload-control-plane-endpoint-ip.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/clusterclass-quick-start-supervisor.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-k8s-install-script.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-namingstrategy.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-prekubeadmscript.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-vm-namingstrategy.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-vsphere-template.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-network-CIDR.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-resource-set-label.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-resource-set.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/topology/cluster-template-topology-supervisor.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/topology/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/workload/kustomization.yaml create mode 100644 test/e2e/data/infrastructure-vsphere-supervisor/v1.11/workload/workload-control-plane-endpoint-ip.yaml rename test/e2e/data/shared/{capi/v1.5 => capv/v1.11}/metadata.yaml (67%) diff --git a/Makefile b/Makefile index 746728c5bd..749b3f2524 100644 --- a/Makefile +++ b/Makefile @@ -369,7 +369,7 @@ generate-doctoc: TRACE=$(TRACE) ./hack/generate-doctoc.sh .PHONY: generate-e2e-templates -generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v1.9 v1.10 main) ## Generate test templates for all branches +generate-e2e-templates: $(KUSTOMIZE) $(addprefix generate-e2e-templates-, v1.9 v1.10 v1.11 main) ## Generate test templates for all branches .PHONY: generate-e2e-templates-main generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the main branch @@ -413,6 +413,15 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/fast-rollout" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-fast-rollout-supervisor.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/ownerrefs-finalizers" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/main/cluster-template-ownerrefs-finalizers-supervisor.yaml" +.PHONY: generate-e2e-templates-v1.11 +generate-e2e-templates-v1.11: $(KUSTOMIZE) + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.11/clusterclass" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.11/clusterclass-quick-start.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.11/workload" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.11/cluster-template-workload.yaml" + + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/v1.11/clusterclass" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/v1.11/clusterclass-quick-start-supervisor.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_SUPERVISOR_TEMPLATE_DIR)/v1.11/workload" > "$(E2E_SUPERVISOR_TEMPLATE_DIR)/v1.11/cluster-template-workload-supervisor.yaml" + + .PHONY: generate-e2e-templates-v1.10 generate-e2e-templates-v1.10: $(KUSTOMIZE) "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.10/clusterclass" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/v1.10/clusterclass-quick-start.yaml" diff --git a/clusterctl-settings.json b/clusterctl-settings.json index ed240d0ec0..f91d971760 100644 --- a/clusterctl-settings.json +++ b/clusterctl-settings.json @@ -2,6 +2,6 @@ "name": "infrastructure-vsphere", "config": { "componentsFile": "infrastructure-components.yaml", - "nextVersion": "v1.11.99" + "nextVersion": "v1.12.99" } } diff --git a/docs/release/release-tasks.md b/docs/release/release-tasks.md index d69190addf..ad156ae043 100644 --- a/docs/release/release-tasks.md +++ b/docs/release/release-tasks.md @@ -24,21 +24,20 @@ is used for e.g. local development and e2e tests. We also modify tests so that t This comes down to changing occurrences of the old version to the new version, e.g. `v1.10` to `v1.11`: 1. Setup E2E tests for the new release: - 1. Goal is that our clusterctl upgrade tests are testing the right versions. For `v1.11` this means: - - v1beta1: `v1.10 => current` (will change with each new release) - - v1beta1: `v1.9 => current` (will change with each new release) - 2. Modify the test specs in `test/e2e/clusterctl_upgrade_test.go` (according to the versions we want to test described above). - Please note that both `InitWithKubernetesVersion` and `WorkloadKubernetesVersion` should be the highest mgmt cluster version supported by the respective Cluster API version. - 3. Update providers in `vsphere.yaml`: + 1. Goal is that we have clusterctl upgrade tests for all relevant upgrade cases: + - Modify the test specs in `test/e2e/clusterctl_upgrade_test.go`. Please note the comments above each test case (look for `This test should be changed during "prepare main branch"`) + Please note that both `InitWithKubernetesVersion` and `WorkloadKubernetesVersion` should be the highest mgmt cluster version supported by the respective Cluster API version. + - Please ping maintainers after these changes are made for a first round of feedback before continuing with the steps below. + 2. Update providers in `vsphere.yaml`: 1. Add a new `v1.10` entry. 2. Remove providers that are not used anymore in clusterctl upgrade tests. 3. Change `v1.10.99` to `v1.11.99`. - 4. Adjust `metadata.yaml`'s: + 3. Adjust `metadata.yaml`'s: 1. Create a new `v1.10` `metadata.yaml` (`test/e2e/data/shared/capv/v1.10/metadata.yaml`) by copying the top-level `metadata.yaml`. 2. Add new release to the top-level `metadata.yaml` 3. Add the new v1.11 release to the main `metadata.yaml` (`test/e2e/data/shared/capv/main/metadata.yaml`). 4. Remove old `metadata.yaml`'s that are not used anymore in clusterctl upgrade tests. - 5. Adjust cluster templates in `test/e2e/data/infrastructure-vsphere-govmomi` and `test/e2e/data/infrastructure-vsphere-supervisor`: + 4. Adjust cluster templates in `test/e2e/data/infrastructure-vsphere-govmomi` and `test/e2e/data/infrastructure-vsphere-supervisor`: 1. Regenerate templates via `make generate-e2e-templates`. 2. Create a new `v1.10` folder. It should be created based on the `main` folder and only contain the templates we use in the clusterctl upgrade tests, as of today: @@ -48,7 +47,7 @@ This comes down to changing occurrences of the old version to the new version, e - `workload` 3. Remove old folders that are not used anymore in clusterctl upgrade tests. 4. Add a `generate-e2e-templates-v1.10` target in `Makefile` and remove the old ones. -2. Update `clusterctl-settings.json`: `v1.10.99` => `v1.11.99`. +2. Update `clusterctl-settings.json` and all `tilt-provider.yaml`: `v1.10.99` => `v1.11.99`. 3. Make sure all tests are green. Prior art: [🌱 Prepare main for development of release v1.11](https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/pull/2950) diff --git a/metadata.yaml b/metadata.yaml index 5a959dabca..b4c095f206 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -42,3 +42,6 @@ releaseSeries: - major: 1 minor: 11 contract: v1beta1 + - major: 1 + minor: 12 + contract: v1beta1 diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index b612a7972d..441b4a0272 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -39,16 +39,19 @@ var ( capvReleaseMarkerPrefix = "go://sigs.k8s.io/cluster-api-provider-vsphere@v%s" ) -var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.10=>current, CAPI 1.7=>1.8) [vcsim] [supervisor] [ClusterClass]", func() { - const specName = "clusterctl-upgrade-1.10-current" // prefix (clusterctl-upgrade) copied from CAPI +// Note: This test should be changed during "prepare main branch", it should test CAPV n-1 => current (and then corresponding CAPI versions if already available). +var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.11=>current, CAPI 1.8=>1.8) on K8S latest ci mgmt cluster [vcsim] [supervisor] [ClusterClass]", func() { + const specName = "clusterctl-upgrade-1.11-current-latest-ci" // prefix (clusterctl-upgrade) copied from CAPI Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { - capiVersion := "1.7" + capiVersion := "1.8" capiStableRelease, err := getStableReleaseOfMinor(ctx, capiReleaseMarkerPrefix, capiVersion) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", capiVersion) - capvVersion := "1.10" + capvVersion := "1.11" capvStableRelease, err := getStableReleaseOfMinor(ctx, capvReleaseMarkerPrefix, capvVersion) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", capvVersion) + initKubernetesVersion, err := kubernetesversions.ResolveVersion(ctx, e2eConfig.GetVariable("KUBERNETES_VERSION_LATEST_CI")) + Expect(err).ToNot(HaveOccurred()) return capi_e2e.ClusterctlUpgradeSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, @@ -67,8 +70,53 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.10 // InitWithKubernetesVersion should be the highest kubernetes version supported by the init Cluster API version. // This is to guarantee that both, the old and new CAPI version, support the defined version. // Ensure all Kubernetes versions used here are covered in patch-vsphere-template.yaml - InitWithKubernetesVersion: "v1.30.0", - WorkloadKubernetesVersion: "v1.30.0", + InitWithKubernetesVersion: initKubernetesVersion, + WorkloadKubernetesVersion: "v1.31.0", + WorkloadFlavor: testSpecificSettingsGetter().FlavorForMode("workload"), + // We are using a separate management cluster. For running in VCSim we also have to pass WithAdditionalVCSimServer + // below otherwise there will be no VCSim instance created in the management cluster. + UseKindForManagementCluster: true, + KindManagementClusterNewClusterProxyFunc: kindManagementClusterNewClusterProxyFunc, + } + }) + }, + WithIP("WORKLOAD_CONTROL_PLANE_ENDPOINT_IP"), + // This is required because we are using a separate management cluster with kind by passing `UseKindForManagementCluster` above. + WithAdditionalVCSimServer(true), + ) +}) + +// Note: This test should be changed during "prepare main branch", it should test CAPV n-1 => current (and then corresponding CAPI versions if already available). +var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.11=>current, CAPI 1.8=>1.8) [vcsim] [supervisor] [ClusterClass]", func() { + const specName = "clusterctl-upgrade-1.11-current" // prefix (clusterctl-upgrade) copied from CAPI + Setup(specName, func(testSpecificSettingsGetter func() testSettings) { + capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { + capiVersion := "1.8" + capiStableRelease, err := getStableReleaseOfMinor(ctx, capiReleaseMarkerPrefix, capiVersion) + Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", capiVersion) + capvVersion := "1.11" + capvStableRelease, err := getStableReleaseOfMinor(ctx, capvReleaseMarkerPrefix, capvVersion) + Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", capvVersion) + return capi_e2e.ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + MgmtFlavor: testSpecificSettingsGetter().FlavorForMode("topology"), + PostNamespaceCreated: testSpecificSettingsGetter().PostNamespaceCreatedFunc, + InitWithBinary: fmt.Sprintf(clusterctlDownloadURL, capiStableRelease), + InitWithCoreProvider: fmt.Sprintf(providerCAPIPrefix, capiStableRelease), + InitWithBootstrapProviders: []string{fmt.Sprintf(providerKubeadmPrefix, capiStableRelease)}, + InitWithControlPlaneProviders: []string{fmt.Sprintf(providerKubeadmPrefix, capiStableRelease)}, + InitWithInfrastructureProviders: []string{fmt.Sprintf(providerVSpherePrefix, capvStableRelease)}, + InitWithRuntimeExtensionProviders: testSpecificSettingsGetter().RuntimeExtensionProviders, + InitWithIPAMProviders: []string{}, + // InitWithKubernetesVersion should be the highest kubernetes version supported by the init Cluster API version. + // This is to guarantee that both, the old and new CAPI version, support the defined version. + // Ensure all Kubernetes versions used here are covered in patch-vsphere-template.yaml + InitWithKubernetesVersion: "v1.31.0", + WorkloadKubernetesVersion: "v1.31.0", WorkloadFlavor: testSpecificSettingsGetter().FlavorForMode("workload"), // We are using a separate management cluster. For running in VCSim we also have to pass WithAdditionalVCSimServer // below otherwise there will be no VCSim instance created in the management cluster. @@ -83,7 +131,8 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.10 ) }) -var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.10=>current, CAPI 1.7=>1.8) on K8S latest ci mgmt cluster [vcsim] [supervisor] [ClusterClass]", func() { +// Note: This test should be changed during "prepare main branch", it should test CAPV n-2 => current (and then corresponding CAPI versions if already available). +var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.10=>current, CAPI 1.7=>1.8) [vcsim] [supervisor] [ClusterClass]", func() { const specName = "clusterctl-upgrade-1.10-current" // prefix (clusterctl-upgrade) copied from CAPI Setup(specName, func(testSpecificSettingsGetter func() testSettings) { capi_e2e.ClusterctlUpgradeSpec(ctx, func() capi_e2e.ClusterctlUpgradeSpecInput { @@ -93,8 +142,6 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.10 capvVersion := "1.10" capvStableRelease, err := getStableReleaseOfMinor(ctx, capvReleaseMarkerPrefix, capvVersion) Expect(err).ToNot(HaveOccurred(), "Failed to get stable version for minor release : %s", capvVersion) - initKubernetesVersion, err := kubernetesversions.ResolveVersion(ctx, e2eConfig.GetVariable("KUBERNETES_VERSION_LATEST_CI")) - Expect(err).ToNot(HaveOccurred()) return capi_e2e.ClusterctlUpgradeSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: testSpecificSettingsGetter().ClusterctlConfigPath, @@ -113,7 +160,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.10 // InitWithKubernetesVersion should be the highest kubernetes version supported by the init Cluster API version. // This is to guarantee that both, the old and new CAPI version, support the defined version. // Ensure all Kubernetes versions used here are covered in patch-vsphere-template.yaml - InitWithKubernetesVersion: initKubernetesVersion, + InitWithKubernetesVersion: "v1.30.0", WorkloadKubernetesVersion: "v1.30.0", WorkloadFlavor: testSpecificSettingsGetter().FlavorForMode("workload"), // We are using a separate management cluster. For running in VCSim we also have to pass WithAdditionalVCSimServer @@ -129,6 +176,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.10 ) }) +// Note: This test should be changed during "prepare main branch", it should test CAPV n-3 => current (and then corresponding CAPI versions if already available). var _ = Describe("When testing clusterctl upgrades using ClusterClass (CAPV 1.9=>current, CAPI 1.6=>1.8) [vcsim] [supervisor] [ClusterClass]", func() { const specName = "clusterctl-upgrade-1.9-current" // prefix (clusterctl-upgrade) copied from CAPI Setup(specName, func(testSpecificSettingsGetter func() testSettings) { diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 2d0ae582f7..b8b88ee97f 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -53,15 +53,6 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: "{go://sigs.k8s.io/cluster-api@v1.5}" # supported release in the v1beta1 series - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.5}/core-components.yaml" - type: "url" - contract: v1beta1 - files: - - sourcePath: "../data/shared/capi/v1.5/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - name: kubeadm type: BootstrapProvider @@ -93,15 +84,6 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: "{go://sigs.k8s.io/cluster-api@v1.5}" # supported release in the v1beta1 series - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.5}/bootstrap-components.yaml" - type: "url" - contract: v1beta1 - files: - - sourcePath: "../data/shared/capi/v1.5/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - name: kubeadm type: ControlPlaneProvider @@ -133,15 +115,6 @@ providers: replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - - name: "{go://sigs.k8s.io/cluster-api@v1.5}" # supported release in the v1beta1 series - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.5}/control-plane-components.yaml" - type: "url" - contract: v1beta1 - files: - - sourcePath: "../data/shared/capi/v1.5/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - name: in-cluster type: IPAMProvider @@ -159,7 +132,7 @@ providers: - name: vsphere type: InfrastructureProvider versions: - - name: v1.11.99 # next release + - name: v1.12.99 # next release # Use manifest from source files value: ../../../../cluster-api-provider-vsphere/config/default contract: v1beta1 @@ -191,6 +164,17 @@ providers: - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-conformance-supervisor.yaml" - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/main/cluster-template-ownerrefs-finalizers-supervisor.yaml" - sourcePath: "../data/shared/capv/main/metadata.yaml" + - name: "{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.11}" # supported release in the v1beta1 series + value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.11}/infrastructure-components.yaml" + type: "url" + contract: v1beta1 + files: + # Add a cluster template + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/v1.11/cluster-template-workload.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass-quick-start.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/v1.11/cluster-template-workload-supervisor.yaml" + - sourcePath: "../../../test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass-quick-start-supervisor.yaml" + - sourcePath: "../data/shared/capv/v1.11/metadata.yaml" - name: "{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.10}" # supported release in the v1beta1 series value: "https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/download/{go://sigs.k8s.io/cluster-api-provider-vsphere@v1.10}/infrastructure-components.yaml" type: "url" @@ -217,7 +201,7 @@ providers: - name: vcsim type: RuntimeExtensionProvider # vcsim isn't a provider, but we fake it is so it can be handled by the clusterctl machinery. versions: - - name: v1.11.99 + - name: v1.12.99 # Use manifest from source files value: ../../../../cluster-api-provider-vsphere/test/infrastructure/vcsim/config/default contract: v1beta1 @@ -242,7 +226,7 @@ providers: - name: net-operator type: RuntimeExtensionProvider # net-operator isn't a provider, but we fake it is so it can be handled by the clusterctl machinery. versions: - - name: v1.11.99 + - name: v1.12.99 # Use manifest from source files value: ../../../../cluster-api-provider-vsphere/test/infrastructure/net-operator/config/default contract: v1beta1 @@ -255,7 +239,7 @@ providers: - name: capv-test-extension type: RuntimeExtensionProvider versions: - - name: v1.11.99 + - name: v1.12.99 # Use manifest from source files value: ../../../../cluster-api-provider-vsphere/test/extension/config/default contract: v1beta1 diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/clusterclass-quick-start.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/clusterclass-quick-start.yaml new file mode 100644 index 0000000000..dad9f0c707 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/clusterclass-quick-start.yaml @@ -0,0 +1,382 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereClusterTemplate +metadata: + name: '${CLUSTER_CLASS_NAME}' + namespace: '${NAMESPACE}' +spec: + template: + spec: {} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: '${CLUSTER_CLASS_NAME}' +spec: + controlPlane: + machineInfrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_CLASS_NAME}-template + namespace: '${NAMESPACE}' + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: ${CLUSTER_CLASS_NAME}-controlplane + namespace: '${NAMESPACE}' + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereClusterTemplate + name: '${CLUSTER_CLASS_NAME}' + namespace: '${NAMESPACE}' + patches: + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files + value: [] + - op: add + path: /spec/template/spec/kubeadmConfigSpec/postKubeadmCommands + value: [] + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/files + value: [] + - op: add + path: /spec/template/spec/postKubeadmCommands + value: [] + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: createEmptyArrays + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/users + valueFrom: + template: | + - name: capv + sshAuthorizedKeys: + - '{{ .sshKey }}' + sudo: ALL=(ALL) NOPASSWD:ALL + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/users + valueFrom: + template: | + - name: capv + sshAuthorizedKeys: + - '{{ .sshKey }}' + sudo: ALL=(ALL) NOPASSWD:ALL + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + enabledIf: '{{ if .sshKey }}true{{end}}' + name: enableSSHIntoNodes + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/controlPlaneEndpoint + valueFrom: + template: | + host: '{{ .controlPlaneIpAddr }}' + port: {{ .controlPlanePort }} + - op: add + path: /spec/template/spec/identityRef + valueFrom: + template: | + kind: Secret + name: '{{ .credsSecretName }}' + - op: add + path: /spec/template/spec/server + valueFrom: + variable: infraServer.url + - op: add + path: /spec/template/spec/thumbprint + valueFrom: + variable: infraServer.thumbprint + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereClusterTemplate + matchResources: + infrastructureCluster: true + name: infraClusterSubstitutions + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: |- + owner: "root:root" + path: "/etc/kubernetes/manifests/kube-vip.yaml" + content: {{ printf "%q" (regexReplaceAll "(name: address\n +value:).*" .kubeVipPodManifest (printf "$1 %s" .controlPlaneIpAddr)) }} + permissions: "0644" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + content: 127.0.0.1 localhost kubernetes + owner: root:root + path: /etc/kube-vip.hosts + permissions: "0644" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + content: | + #!/bin/bash + + # Copyright 2020 The Kubernetes Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + set -e + + # Configure the workaround required for kubeadm init with kube-vip: + # xref: https://github.com/kube-vip/kube-vip/issues/684 + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + IS_KUBEADM_INIT="false" + + # cloud-init kubeadm init + if [[ -f /run/kubeadm/kubeadm.yaml ]]; then + IS_KUBEADM_INIT="true" + fi + + # ignition kubeadm init + if [[ -f /etc/kubeadm.sh ]] && grep -q -e "kubeadm init" /etc/kubeadm.sh; then + IS_KUBEADM_INIT="true" + fi + + if [[ "$IS_KUBEADM_INIT" == "true" ]]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + fi + owner: root:root + path: /etc/pre-kubeadm-commands/50-kube-vip-prepare.sh + permissions: "0700" + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + name: kubeVipPodManifest + variables: + - metadata: {} + name: sshKey + required: false + schema: + openAPIV3Schema: + description: Public key to SSH onto the cluster nodes. + type: string + - metadata: {} + name: controlPlaneIpAddr + required: true + schema: + openAPIV3Schema: + description: Floating VIP for the control plane. + type: string + - metadata: {} + name: controlPlanePort + required: true + schema: + openAPIV3Schema: + description: Port for the control plane endpoint. + type: integer + - metadata: {} + name: kubeVipPodManifest + required: true + schema: + openAPIV3Schema: + description: kube-vip manifest for the control plane. + type: string + - metadata: {} + name: infraServer + required: true + schema: + openAPIV3Schema: + properties: + thumbprint: + type: string + url: + type: string + type: object + - metadata: {} + name: credsSecretName + required: true + schema: + openAPIV3Schema: + description: Secret containing the credentials for the infra cluster. + type: string + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template + namespace: '${NAMESPACE}' + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_CLASS_NAME}-worker-machinetemplate + namespace: '${NAMESPACE}' + metadata: {} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-template + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + powerOffMode: trySoft + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-worker-machinetemplate + namespace: '${NAMESPACE}' +spec: + template: + spec: + cloneMode: linkedClone + datacenter: '${VSPHERE_DATACENTER}' + datastore: '${VSPHERE_DATASTORE}' + diskGiB: 25 + folder: '${VSPHERE_FOLDER}' + memoryMiB: 8192 + network: + devices: + - dhcp4: true + networkName: '${VSPHERE_NETWORK}' + numCPUs: 2 + os: Linux + powerOffMode: trySoft + resourcePool: '${VSPHERE_RESOURCE_POOL}' + server: '${VSPHERE_SERVER}' + storagePolicyName: '${VSPHERE_STORAGE_POLICY}' + template: '${VSPHERE_TEMPLATE}' + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-controlplane + namespace: '${NAMESPACE}' +spec: + template: + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + - mkdir -p /etc/pre-kubeadm-commands + - for script in $(find /etc/pre-kubeadm-commands/ -name '*.sh' -type f | sort); + do echo "Running script $script"; "$script"; done + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template + namespace: '${NAMESPACE}' +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + - mkdir -p /etc/pre-kubeadm-commands + - for script in $(find /etc/pre-kubeadm-commands/ -name '*.sh' -type f | sort); + do echo "Running script $script"; "$script"; done \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/kustomization.yaml new file mode 100644 index 0000000000..4128896681 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/kustomization.yaml @@ -0,0 +1,17 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ./clusterclass-quick-start.yaml +patches: + - target: + kind: ClusterClass + path: ./patch-vsphere-template.yaml + - target: + kind: ClusterClass + path: ./patch-prekubeadmscript.yaml + - target: + kind: ClusterClass + path: ./patch-k8s-install-script.yaml + - target: + kind: ClusterClass + path: ./patch-namingstrategy.yaml diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-k8s-install-script.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-k8s-install-script.yaml new file mode 100644 index 0000000000..51d92c895f --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-k8s-install-script.yaml @@ -0,0 +1,221 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + owner: root:root + path: "/etc/pre-kubeadm-commands/20-k8s-install.sh" + permissions: "0755" + content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + DISTRO="ubuntu" + KUBE_BINARY_DIR="/usr/bin" + + source /etc/lsb-release + if [[ "$${DISTRIB_ID}" == *Flatcar* ]]; then + # Overrides for flatcar + DISTRO="flatcar" + KUBE_BINARY_DIR="/opt/bin" + fi + + function retry { + attempt=0 + max_attempts=$${1} + interval=$${2} + shift; shift + until [[ $${attempt} -ge "$${max_attempts}" ]] ; do + attempt=$((attempt+1)) + set +e + eval "$*" && return || echo "failed $${attempt} times: $*" + set -e + sleep "$${interval}" + done + echo "error: reached max attempts at retry($*)" + return 1 + } + + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # This test installs release packages or binaries that are a result of the CI and release builds. + # It runs '... --version' commands to verify that the binaries are correctly installed + # and finally uninstalls the packages. + # For the release packages it tests all versions in the support skew. + LINE_SEPARATOR="*************************************************" + echo "$${LINE_SEPARATOR}" + + ## Variables (replaced by JSON patching) + KUBERNETES_VERSION={{ .builtin.controlPlane.version }} + ## + + # Note: We assume if kubectl has the right version, everything else has as well + if [[ $(kubectl version --client=true -o json | jq '.clientVersion.gitVersion' -r) = "$${KUBERNETES_VERSION}" ]]; then + echo "Detected Kubernetes $${KUBERNETES_VERSION} via kubectl version, nothing to do" + exit 0 + fi + + if [[ "$${KUBERNETES_VERSION}" != "" ]]; then + CI_DIR=/tmp/k8s-ci + mkdir -p "$${CI_DIR}" + declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") + # Let's just also download the control plane images for worker nodes. It's easier then optimizing it. + declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + CONTAINER_EXT="tar" + echo "* testing version $${KUBERNETES_VERSION}" + CI_URL="https://dl.k8s.io/ci/$${KUBERNETES_VERSION}/bin/linux/amd64" + # Set CI_URL to the released binaries for actually released versions. + if [[ "$${KUBERNETES_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] || [[ "$${KUBERNETES_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+-(beta|rc).[0-9]+$ ]]; then + CI_URL="https://dl.k8s.io/release/$${KUBERNETES_VERSION}/bin/linux/amd64" + fi + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + # Browser: https://console.cloud.google.com/storage/browser/k8s-release-dev?project=k8s-release-dev + # e.g.: https://storage.googleapis.com/k8s-release-dev/ci/v1.21.0-beta.1.378+cf3374e43491c5/bin/linux/amd64/kubectl + echo "* downloading binary: $${CI_URL}/$${CI_PACKAGE}" + wget "$${CI_URL}/$${CI_PACKAGE}" -O "$${CI_DIR}/$${CI_PACKAGE}" + chmod +x "$${CI_DIR}/$${CI_PACKAGE}" + mv "$${CI_DIR}/$${CI_PACKAGE}" "$${KUBE_BINARY_DIR}/$${CI_PACKAGE}" + done + systemctl restart kubelet + IMAGE_REGISTRY_PREFIX=registry.k8s.io + # Kubernetes builds from 1.20 through 1.24 are tagged with k8s.gcr.io + if [[ "$${KUBERNETES_VERSION}" =~ ^v1\.(1[0-9]|2[0-4])[\.[0-9]+ ]]; then + IMAGE_REGISTRY_PREFIX=k8s.gcr.io + fi + for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do + echo "* downloading package: $${CI_URL}/$${CI_CONTAINER}.$${CONTAINER_EXT}" + wget "$${CI_URL}/$${CI_CONTAINER}.$${CONTAINER_EXT}" -O "$${CI_DIR}/$${CI_CONTAINER}.$${CONTAINER_EXT}" + $${SUDO} ctr -n k8s.io images import "$${CI_DIR}/$${CI_CONTAINER}.$${CONTAINER_EXT}" || echo "* ignoring expected 'ctr images import' result" + $${SUDO} ctr -n k8s.io images tag "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}-amd64:$${KUBERNETES_VERSION//+/_}" "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}:$${KUBERNETES_VERSION//+/_}" + $${SUDO} ctr -n k8s.io images tag "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}-amd64:$${KUBERNETES_VERSION//+/_}" "gcr.io/k8s-staging-ci-images/$${CI_CONTAINER}:$${KUBERNETES_VERSION//+/_}" + done + fi + echo "* checking binary versions" + echo "ctr version: " "$(ctr version)" + echo "kubeadm version: " "$(kubeadm version -o=short)" + echo "kubectl version: " "$(kubectl version --client=true)" + echo "kubelet version: " "$(kubelet --version)" + echo "$${LINE_SEPARATOR}" + + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/files/- + valueFrom: + template: | + owner: root:root + path: "/etc/pre-kubeadm-commands/20-k8s-install.sh" + permissions: "0755" + content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + DISTRO="ubuntu" + KUBE_BINARY_DIR="/usr/bin" + + source /etc/lsb-release + if [[ "$${DISTRIB_ID}" == *Flatcar* ]]; then + # Overrides for flatcar + DISTRO="flatcar" + KUBE_BINARY_DIR="/opt/bin" + fi + + function retry { + attempt=0 + max_attempts=$${1} + interval=$${2} + shift; shift + until [[ $${attempt} -ge "$${max_attempts}" ]] ; do + attempt=$((attempt+1)) + set +e + eval "$*" && return || echo "failed $${attempt} times: $*" + set -e + sleep "$${interval}" + done + echo "error: reached max attempts at retry($*)" + return 1 + } + + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # This test installs release packages or binaries that are a result of the CI and release builds. + # It runs '... --version' commands to verify that the binaries are correctly installed + # and finally uninstalls the packages. + # For the release packages it tests all versions in the support skew. + LINE_SEPARATOR="*************************************************" + echo "$${LINE_SEPARATOR}" + + ## Variables (replaced by JSON patching) + KUBERNETES_VERSION={{ .builtin.machineDeployment.version }} + ## + + # Note: We assume if kubectl has the right version, everything else has as well + if [[ $(kubectl version --client=true -o json | jq '.clientVersion.gitVersion' -r) = "$${KUBERNETES_VERSION}" ]]; then + echo "Detected Kubernetes $${KUBERNETES_VERSION} via kubectl version, nothing to do" + exit 0 + fi + + if [[ "$${KUBERNETES_VERSION}" != "" ]]; then + CI_DIR=/tmp/k8s-ci + mkdir -p "$${CI_DIR}" + declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") + # Let's just also download the control plane images for worker nodes. It's easier then optimizing it. + declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + CONTAINER_EXT="tar" + echo "* testing version $${KUBERNETES_VERSION}" + CI_URL="https://dl.k8s.io/ci/$${KUBERNETES_VERSION}/bin/linux/amd64" + # Set CI_URL to the released binaries for actually released versions. + if [[ "$${KUBERNETES_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] || [[ "$${KUBERNETES_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+-(beta|rc).[0-9]+$ ]]; then + CI_URL="https://dl.k8s.io/release/$${KUBERNETES_VERSION}/bin/linux/amd64" + fi + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + # Browser: https://console.cloud.google.com/storage/browser/k8s-release-dev?project=k8s-release-dev + # e.g.: https://storage.googleapis.com/k8s-release-dev/ci/v1.21.0-beta.1.378+cf3374e43491c5/bin/linux/amd64/kubectl + echo "* downloading binary: $${CI_URL}/$${CI_PACKAGE}" + wget "$${CI_URL}/$${CI_PACKAGE}" -O "$${CI_DIR}/$${CI_PACKAGE}" + chmod +x "$${CI_DIR}/$${CI_PACKAGE}" + mv "$${CI_DIR}/$${CI_PACKAGE}" "$${KUBE_BINARY_DIR}/$${CI_PACKAGE}" + done + systemctl restart kubelet + IMAGE_REGISTRY_PREFIX=registry.k8s.io + # Kubernetes builds from 1.20 through 1.24 are tagged with k8s.gcr.io + if [[ "$${KUBERNETES_VERSION}" =~ ^v1\.(1[0-9]|2[0-4])[\.[0-9]+ ]]; then + IMAGE_REGISTRY_PREFIX=k8s.gcr.io + fi + for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do + echo "* downloading package: $${CI_URL}/$${CI_CONTAINER}.$${CONTAINER_EXT}" + wget "$${CI_URL}/$${CI_CONTAINER}.$${CONTAINER_EXT}" -O "$${CI_DIR}/$${CI_CONTAINER}.$${CONTAINER_EXT}" + $${SUDO} ctr -n k8s.io images import "$${CI_DIR}/$${CI_CONTAINER}.$${CONTAINER_EXT}" || echo "* ignoring expected 'ctr images import' result" + $${SUDO} ctr -n k8s.io images tag "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}-amd64:$${KUBERNETES_VERSION//+/_}" "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}:$${KUBERNETES_VERSION//+/_}" + $${SUDO} ctr -n k8s.io images tag "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}-amd64:$${KUBERNETES_VERSION//+/_}" "gcr.io/k8s-staging-ci-images/$${CI_CONTAINER}:$${KUBERNETES_VERSION//+/_}" + done + fi + echo "* checking binary versions" + echo "ctr version: " "$(ctr version)" + echo "kubeadm version: " "$(kubeadm version -o=short)" + echo "kubectl version: " "$(kubectl version --client=true)" + echo "kubelet version: " "$(kubelet --version)" + echo "$${LINE_SEPARATOR}" + + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: k8sInstallScript diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-namingstrategy.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-namingstrategy.yaml new file mode 100644 index 0000000000..1877801c3c --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-namingstrategy.yaml @@ -0,0 +1,8 @@ +- op: add + path: /spec/controlPlane/namingStrategy + value: + template: '{{ .cluster.name }}-cp-{{ .random }}' +- op: add + path: /spec/workers/machineDeployments/0/namingStrategy + value: + template: '{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}' diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-prekubeadmscript.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-prekubeadmscript.yaml new file mode 100644 index 0000000000..3345f5b6d2 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-prekubeadmscript.yaml @@ -0,0 +1,45 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + owner: root:root + path: "/etc/pre-kubeadm-commands/10-prekubeadmscript.sh" + permissions: "0755" + content: {{ printf "%q" (regexReplaceAll "(KUBERNETES_VERSION=.*)" .preKubeadmScript (printf "KUBERNETES_VERSION=%s" .builtin.controlPlane.version)) }} + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/files/- + valueFrom: + template: | + owner: root:root + path: "/etc/pre-kubeadm-commands/10-prekubeadmscript.sh" + permissions: "0755" + content: {{ printf "%q" (regexReplaceAll "(KUBERNETES_VERSION=.*)" .preKubeadmScript (printf "KUBERNETES_VERSION=%s" .builtin.machineDeployment.version)) }} + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + enabledIf: '{{ if .preKubeadmScript }}true{{ end }}' + name: preKubeadmScript +- op: add + path: /spec/variables/- + value: + name: preKubeadmScript + required: false + schema: + openAPIV3Schema: + type: string + description: Script to run in preKubeadmCommands. diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-vsphere-template.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-vsphere-template.yaml new file mode 100644 index 0000000000..0212e88f8a --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/clusterclass/patch-vsphere-template.yaml @@ -0,0 +1,49 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/template + valueFrom: + # We have to fall back to v1.30.0 for the conformance latest ci test which uses + # versions without corresponding templates like "v1.30.0-alpha.0.525+09a5049ca78502". + template: |- + {{- if eq .builtin.controlPlane.version "v1.28.0" -}} + ubuntu-2204-kube-v1.28.0 + {{- else if eq .builtin.controlPlane.version "v1.29.0" -}} + ubuntu-2204-kube-v1.29.0 + {{- else if eq .builtin.controlPlane.version "v1.30.0" -}} + ubuntu-2204-kube-v1.30.0 + {{- else -}} + ubuntu-2404-kube-v1.31.0 + {{- end -}} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: replace + path: /spec/template/spec/template + valueFrom: + # We have to fall back to v1.30.0 for the conformance latest ci test which uses + # versions without corresponding templates like "v1.30.0-alpha.0.525+09a5049ca78502". + template: |- + {{- if eq .builtin.machineDeployment.version "v1.28.0" -}} + ubuntu-2204-kube-v1.28.0 + {{- else if eq .builtin.machineDeployment.version "v1.29.0" -}} + ubuntu-2204-kube-v1.29.0 + {{- else if eq .builtin.machineDeployment.version "v1.30.0" -}} + ubuntu-2204-kube-v1.30.0 + {{- else -}} + ubuntu-2404-kube-v1.31.0 + {{- end -}} + selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: vSphereTemplate diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-network-CIDR.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-network-CIDR.yaml new file mode 100644 index 0000000000..24d0253cef --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-network-CIDR.yaml @@ -0,0 +1,10 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.30.0/24 diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-resource-set-label.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-resource-set-label.yaml new file mode 100644 index 0000000000..1447050b04 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-resource-set-label.yaml @@ -0,0 +1,7 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' + labels: + cni: "${CLUSTER_NAME}-crs-cni" diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-resource-set.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-resource-set.yaml new file mode 100644 index 0000000000..6507eed65e --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/commons/cluster-resource-set.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-cni" +data: ${CNI_RESOURCES} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-cni" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-cni" + resources: + - name: "cni-${CLUSTER_NAME}-crs-cni" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/topology/cluster-template-topology.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/topology/cluster-template-topology.yaml new file mode 100644 index 0000000000..d4c548b0e4 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/topology/cluster-template-topology.yaml @@ -0,0 +1,1273 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + topology: + class: '${CLUSTER_CLASS_NAME}' + controlPlane: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + variables: + - name: sshKey + value: '${VSPHERE_SSH_AUTHORIZED_KEY}' + - name: kubeVipPodManifest + value: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE:=""} + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: svc_leasename + value: plndr-svcs-lock + - name: svc_election + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leasename + value: plndr-cp-lock + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: prometheus_server + value: :2112 + image: ghcr.io/kube-vip/kube-vip:v0.6.4 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + - mountPath: /etc/hosts + name: etchosts + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + - hostPath: + path: /etc/kube-vip.hosts + type: File + name: etchosts + status: {} + - name: controlPlaneIpAddr + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: controlPlanePort + value: ${CONTROL_PLANE_ENDPOINT_PORT:=6443} + - name: infraServer + value: + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + url: '${VSPHERE_SERVER}' + - name: credsSecretName + value: '${CLUSTER_NAME}' + version: '${KUBERNETES_VERSION}' + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + metadata: {} + name: md-0 + replicas: ${WORKER_MACHINE_COUNT} +--- +apiVersion: v1 +kind: Secret +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +stringData: + password: "${VSPHERE_PASSWORD}" + username: "${VSPHERE_USERNAME}" +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: ${CLUSTER_NAME}-crs-0 + namespace: '${NAMESPACE}' +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + resources: + - kind: Secret + name: vsphere-config-secret + - kind: ConfigMap + name: csi-manifests + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-config-secret + namespace: '${NAMESPACE}' +stringData: + data: |- + apiVersion: v1 + kind: Secret + metadata: + name: vsphere-config-secret + namespace: vmware-system-csi + stringData: + csi-vsphere.conf: |+ + [Global] + thumbprint = "${VSPHERE_TLS_THUMBPRINT}" + + [VirtualCenter "${VSPHERE_SERVER}"] + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: |- + apiVersion: v1 + kind: Namespace + metadata: + name: vmware-system-csi + --- + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - "" + resources: + - nodes + - pods + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - update + - delete + - patch + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - patch + - apiGroups: + - cns.vmware.com + resources: + - triggercsifullsyncs + verbs: + - create + - get + - update + - watch + - list + - apiGroups: + - cns.vmware.com + resources: + - cnsvspherevolumemigrations + verbs: + - create + - get + - list + - watch + - update + - delete + - apiGroups: + - cns.vmware.com + resources: + - cnsvolumeinfoes + verbs: + - create + - get + - list + - watch + - delete + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - create + - update + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - cns.vmware.com + resources: + - cnsvolumeoperationrequests + verbs: + - create + - get + - list + - update + - delete + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - watch + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - create + - get + - list + - watch + - update + - delete + - patch + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + - patch + - apiGroups: + - cns.vmware.com + resources: + - csinodetopologies + verbs: + - get + - update + - watch + - list + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: vmware-system-csi + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-node-cluster-role + rules: + - apiGroups: + - cns.vmware.com + resources: + - csinodetopologies + verbs: + - create + - watch + - get + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-node-cluster-role-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-node-cluster-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: vsphere-csi-node-role + namespace: vmware-system-csi + rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: vsphere-csi-node-binding + namespace: vmware-system-csi + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: vsphere-csi-node-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: v1 + data: + pv-to-backingdiskobjectid-mapping: "false" + trigger-csi-fullsync: "false" + kind: ConfigMap + metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: vmware-system-csi + --- + apiVersion: v1 + kind: Service + metadata: + labels: + app: vsphere-csi-controller + name: vsphere-csi-controller + namespace: vmware-system-csi + spec: + ports: + - name: ctlr + port: 2112 + protocol: TCP + targetPort: 2112 + - name: syncer + port: 2113 + protocol: TCP + targetPort: 2113 + selector: + app: vsphere-csi-controller + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/controlplane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vsphere-csi-controller + topologyKey: kubernetes.io/hostname + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + - --kube-api-qps=100 + - --kube-api-burst=100 + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-attacher:v4.5.1 + name: csi-attacher + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --timeout=300s + - --handle-volume-inuse-error=false + - --csi-address=$(ADDRESS) + - --kube-api-qps=100 + - --kube-api-burst=100 + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-resizer:v1.10.1 + name: csi-resizer + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: controller + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.3.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + periodSeconds: 180 + timeoutSeconds: 10 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + - containerPort: 2112 + name: prometheus + protocol: TCP + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --leader-election + - --leader-election-lease-duration=30s + - --leader-election-renew-deadline=20s + - --leader-election-retry-period=10s + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v3.3.0 + imagePullPolicy: Always + name: vsphere-syncer + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --kube-api-qps=100 + - --kube-api-burst=100 + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.1 + name: csi-provisioner + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --kube-api-qps=100 + - --kube-api-burst=100 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.2 + name: csi-snapshotter + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + priorityClassName: system-cluster-critical + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - emptyDir: {} + name: socket-dir + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: vmware-system-csi + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.3.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - mountPath: /sys/block + name: blocks-dir + - mountPath: /sys/devices + name: sys-devices-dir + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: vsphere-csi-node + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + - hostPath: + path: /sys/block + type: Directory + name: blocks-dir + - hostPath: + path: /sys/devices + type: Directory + name: sys-devices-dir + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node-windows + namespace: vmware-system-csi + spec: + selector: + matchLabels: + app: vsphere-csi-node-windows + template: + metadata: + labels: + app: vsphere-csi-node-windows + role: vsphere-csi-windows + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=C:\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: DEBUG + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.3.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + - mountPath: C:\var\lib\kubelet + name: pods-mount-dir + - mountPath: \\.\pipe\csi-proxy-volume-v1 + name: csi-proxy-volume-v1 + - mountPath: \\.\pipe\csi-proxy-filesystem-v1 + name: csi-proxy-filesystem-v1 + - mountPath: \\.\pipe\csi-proxy-disk-v1 + name: csi-proxy-disk-v1 + - mountPath: \\.\pipe\csi-proxy-system-v1alpha1 + name: csi-proxy-system-v1alpha1 + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + nodeSelector: + kubernetes.io/os: windows + priorityClassName: system-node-critical + serviceAccountName: vsphere-csi-node + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + name: registration-dir + - hostPath: + path: C:\var\lib\kubelet\plugins\csi.vsphere.vmware.com\ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: \var\lib\kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + type: "" + name: csi-proxy-disk-v1 + - hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + type: "" + name: csi-proxy-volume-v1 + - hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + type: "" + name: csi-proxy-filesystem-v1 + - hostPath: + path: \\.\pipe\csi-proxy-system-v1alpha1 + type: "" + name: csi-proxy-system-v1alpha1 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate +kind: ConfigMap +metadata: + name: csi-manifests + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: '${NAMESPACE}' +stringData: + data: |- + apiVersion: v1 + kind: Secret + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: secret + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + ${VSPHERE_SERVER}.password: "${VSPHERE_PASSWORD}" + ${VSPHERE_SERVER}.username: "${VSPHERE_USERNAME}" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: |- + --- + # Source: vsphere-cpi/templates/service-account.yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + labels: + app: vsphere-cpi + vsphere-cpi-infra: service-account + component: cloud-controller-manager + namespace: kube-system + --- + # Source: vsphere-cpi/templates/role.yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloud-controller-manager + labels: + app: vsphere-cpi + vsphere-cpi-infra: role + component: cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + --- + # Source: vsphere-cpi/templates/daemonset.yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-cpi + labels: + app: vsphere-cpi + vsphere-cpi-infra: daemonset + component: cloud-controller-manager + tier: control-plane + namespace: kube-system + annotations: + spec: + selector: + matchLabels: + app: vsphere-cpi + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: vsphere-cpi + component: cloud-controller-manager + tier: control-plane + release: release-name + vsphere-cpi-infra: daemonset + spec: + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + - effect: NoExecute + key: CriticalAddonsOnly + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + securityContext: + fsGroup: 1001 + runAsUser: 1001 + serviceAccountName: cloud-controller-manager + hostNetwork: true + dnsPolicy: ClusterFirst + priorityClassName: system-node-critical + containers: + - name: vsphere-cpi + image: registry.k8s.io/cloud-pv-vsphere/cloud-provider-vsphere:${CPI_IMAGE_K8S_VERSION} + imagePullPolicy: IfNotPresent + args: + - --cloud-provider=vsphere + - --v=2 + - --cloud-config=/etc/cloud/vsphere.conf + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + volumes: + - name: vsphere-config-volume + configMap: + name: cloud-config + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + app: vsphere-cpi + component: cloud-controller-manager + vsphere-cpi-infra: role-binding + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - apiGroup: "" + kind: User + name: cloud-controller-manager + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app: vsphere-cpi + component: cloud-controller-manager + vsphere-cpi-infra: cluster-role-binding + name: cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + port: 443 + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + vcenter: + ${VSPHERE_SERVER}: + datacenters: + - '${VSPHERE_DATACENTER}' + server: '${VSPHERE_SERVER}' + kind: ConfigMap + metadata: + name: cloud-config + namespace: kube-system +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: '${NAMESPACE}' \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/topology/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/topology/kustomization.yaml new file mode 100644 index 0000000000..1e265e778b --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/topology/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - cluster-template-topology.yaml + - ../commons/cluster-resource-set.yaml +patchesStrategicMerge: + - ../commons/cluster-resource-set-label.yaml + - ../commons/cluster-network-CIDR.yaml diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/workload/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/workload/kustomization.yaml new file mode 100644 index 0000000000..e191dff830 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/workload/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../topology +patches: + - target: + kind: Cluster + path: workload-control-plane-endpoint-ip.yaml diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/workload/workload-control-plane-endpoint-ip.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/workload/workload-control-plane-endpoint-ip.yaml new file mode 100644 index 0000000000..f36926df68 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-govmomi/v1.11/workload/workload-control-plane-endpoint-ip.yaml @@ -0,0 +1,5 @@ +- op: replace + path: /spec/topology/variables/2 + value: + name: controlPlaneIpAddr + value: "${WORKLOAD_CONTROL_PLANE_ENDPOINT_IP}" diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/clusterclass-quick-start-supervisor.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/clusterclass-quick-start-supervisor.yaml new file mode 100644 index 0000000000..10561ef024 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/clusterclass-quick-start-supervisor.yaml @@ -0,0 +1,324 @@ +apiVersion: vmware.infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereClusterTemplate +metadata: + name: '${CLUSTER_CLASS_NAME}' + namespace: '${NAMESPACE}' +spec: + template: + spec: {} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: '${CLUSTER_CLASS_NAME}' +spec: + controlPlane: + machineInfrastructure: + ref: + apiVersion: vmware.infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_CLASS_NAME}-template + namespace: '${NAMESPACE}' + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: ${CLUSTER_CLASS_NAME}-controlplane + namespace: '${NAMESPACE}' + infrastructure: + ref: + apiVersion: vmware.infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereClusterTemplate + name: '${CLUSTER_CLASS_NAME}' + namespace: '${NAMESPACE}' + patches: + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files + value: [] + - op: add + path: /spec/template/spec/kubeadmConfigSpec/postKubeadmCommands + value: [] + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/files + value: [] + - op: add + path: /spec/template/spec/postKubeadmCommands + value: [] + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: createEmptyArrays + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/users + valueFrom: + template: | + - name: capv + sshAuthorizedKeys: + - '{{ .sshKey }}' + sudo: ALL=(ALL) NOPASSWD:ALL + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/users + valueFrom: + template: | + - name: capv + sshAuthorizedKeys: + - '{{ .sshKey }}' + sudo: ALL=(ALL) NOPASSWD:ALL + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + enabledIf: '{{ if .sshKey }}true{{end}}' + name: enableSSHIntoNodes + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/controlPlaneEndpoint + valueFrom: + template: | + host: '{{ .controlPlaneIpAddr }}' + port: {{ .controlPlanePort }} + selector: + apiVersion: vmware.infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereClusterTemplate + matchResources: + infrastructureCluster: true + name: infraClusterSubstitutions + - definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: |- + owner: "root:root" + path: "/etc/kubernetes/manifests/kube-vip.yaml" + content: {{ printf "%q" (regexReplaceAll "(name: address\n +value:).*" .kubeVipPodManifest (printf "$1 %s" .controlPlaneIpAddr)) }} + permissions: "0644" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + content: 127.0.0.1 localhost kubernetes + owner: root:root + path: /etc/kube-vip.hosts + permissions: "0644" + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + content: | + #!/bin/bash + + # Copyright 2020 The Kubernetes Authors. + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + set -e + + # Configure the workaround required for kubeadm init with kube-vip: + # xref: https://github.com/kube-vip/kube-vip/issues/684 + + # Nothing to do for kubernetes < v1.29 + KUBEADM_MINOR="$(kubeadm version -o short | cut -d '.' -f 2)" + if [[ "$KUBEADM_MINOR" -lt "29" ]]; then + exit 0 + fi + + IS_KUBEADM_INIT="false" + + # cloud-init kubeadm init + if [[ -f /run/kubeadm/kubeadm.yaml ]]; then + IS_KUBEADM_INIT="true" + fi + + # ignition kubeadm init + if [[ -f /etc/kubeadm.sh ]] && grep -q -e "kubeadm init" /etc/kubeadm.sh; then + IS_KUBEADM_INIT="true" + fi + + if [[ "$IS_KUBEADM_INIT" == "true" ]]; then + sed -i 's#path: /etc/kubernetes/admin.conf#path: /etc/kubernetes/super-admin.conf#' \ + /etc/kubernetes/manifests/kube-vip.yaml + fi + owner: root:root + path: /etc/pre-kubeadm-commands/50-kube-vip-prepare.sh + permissions: "0700" + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + name: kubeVipPodManifest + variables: + - metadata: {} + name: sshKey + required: false + schema: + openAPIV3Schema: + description: Public key to SSH onto the cluster nodes. + type: string + - metadata: {} + name: controlPlaneIpAddr + required: true + schema: + openAPIV3Schema: + description: Floating VIP for the control plane. + type: string + - metadata: {} + name: controlPlanePort + required: true + schema: + openAPIV3Schema: + description: Port for the control plane endpoint. + type: integer + - metadata: {} + name: kubeVipPodManifest + required: true + schema: + openAPIV3Schema: + description: kube-vip manifest for the control plane. + type: string + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template + namespace: '${NAMESPACE}' + infrastructure: + ref: + apiVersion: vmware.infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + name: ${CLUSTER_CLASS_NAME}-worker-machinetemplate + namespace: '${NAMESPACE}' + metadata: {} +--- +apiVersion: vmware.infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-template + namespace: '${NAMESPACE}' +spec: + template: + spec: + className: ${VSPHERE_MACHINE_CLASS_NAME} + imageName: ${VSPHERE_IMAGE_NAME} + powerOffMode: ${VSPHERE_POWER_OFF_MODE:=trySoft} + storageClass: ${VSPHERE_STORAGE_CLASS} +--- +apiVersion: vmware.infrastructure.cluster.x-k8s.io/v1beta1 +kind: VSphereMachineTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-worker-machinetemplate + namespace: '${NAMESPACE}' +spec: + template: + spec: + className: ${VSPHERE_MACHINE_CLASS_NAME} + imageName: ${VSPHERE_IMAGE_NAME} + powerOffMode: ${VSPHERE_POWER_OFF_MODE:=trySoft} + storageClass: ${VSPHERE_STORAGE_CLASS} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: KubeadmControlPlaneTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-controlplane + namespace: '${NAMESPACE}' +spec: + template: + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - dhclient eth0 + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + - mkdir -p /etc/pre-kubeadm-commands + - for script in $(find /etc/pre-kubeadm-commands/ -name '*.sh' -type f | sort); + do echo "Running script $script"; "$script"; done + users: + - name: capv + sshAuthorizedKeys: + - '${VSPHERE_SSH_AUTHORIZED_KEY}' + sudo: ALL=(ALL) NOPASSWD:ALL +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template + namespace: '${NAMESPACE}' +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + kubeletExtraArgs: + cloud-provider: external + name: '{{ local_hostname }}' + preKubeadmCommands: + - dhclient eth0 + - hostnamectl set-hostname "{{ ds.meta_data.hostname }}" + - echo "::1 ipv6-localhost ipv6-loopback localhost6 localhost6.localdomain6" + >/etc/hosts + - echo "127.0.0.1 {{ ds.meta_data.hostname }} {{ local_hostname }} localhost + localhost.localdomain localhost4 localhost4.localdomain4" >>/etc/hosts + - mkdir -p /etc/pre-kubeadm-commands + - for script in $(find /etc/pre-kubeadm-commands/ -name '*.sh' -type f | sort); + do echo "Running script $script"; "$script"; done \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/kustomization.yaml new file mode 100644 index 0000000000..6858997f52 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/kustomization.yaml @@ -0,0 +1,20 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ./clusterclass-quick-start-supervisor.yaml +patches: + - target: + kind: ClusterClass + path: ./patch-vsphere-template.yaml + - target: + kind: ClusterClass + path: ./patch-prekubeadmscript.yaml + - target: + kind: ClusterClass + path: ./patch-k8s-install-script.yaml + - target: + kind: ClusterClass + path: ./patch-namingstrategy.yaml + - target: + kind: VSphereMachineTemplate + path: ./patch-vm-namingstrategy.yaml diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-k8s-install-script.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-k8s-install-script.yaml new file mode 100644 index 0000000000..51d92c895f --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-k8s-install-script.yaml @@ -0,0 +1,221 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + owner: root:root + path: "/etc/pre-kubeadm-commands/20-k8s-install.sh" + permissions: "0755" + content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + DISTRO="ubuntu" + KUBE_BINARY_DIR="/usr/bin" + + source /etc/lsb-release + if [[ "$${DISTRIB_ID}" == *Flatcar* ]]; then + # Overrides for flatcar + DISTRO="flatcar" + KUBE_BINARY_DIR="/opt/bin" + fi + + function retry { + attempt=0 + max_attempts=$${1} + interval=$${2} + shift; shift + until [[ $${attempt} -ge "$${max_attempts}" ]] ; do + attempt=$((attempt+1)) + set +e + eval "$*" && return || echo "failed $${attempt} times: $*" + set -e + sleep "$${interval}" + done + echo "error: reached max attempts at retry($*)" + return 1 + } + + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # This test installs release packages or binaries that are a result of the CI and release builds. + # It runs '... --version' commands to verify that the binaries are correctly installed + # and finally uninstalls the packages. + # For the release packages it tests all versions in the support skew. + LINE_SEPARATOR="*************************************************" + echo "$${LINE_SEPARATOR}" + + ## Variables (replaced by JSON patching) + KUBERNETES_VERSION={{ .builtin.controlPlane.version }} + ## + + # Note: We assume if kubectl has the right version, everything else has as well + if [[ $(kubectl version --client=true -o json | jq '.clientVersion.gitVersion' -r) = "$${KUBERNETES_VERSION}" ]]; then + echo "Detected Kubernetes $${KUBERNETES_VERSION} via kubectl version, nothing to do" + exit 0 + fi + + if [[ "$${KUBERNETES_VERSION}" != "" ]]; then + CI_DIR=/tmp/k8s-ci + mkdir -p "$${CI_DIR}" + declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") + # Let's just also download the control plane images for worker nodes. It's easier then optimizing it. + declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + CONTAINER_EXT="tar" + echo "* testing version $${KUBERNETES_VERSION}" + CI_URL="https://dl.k8s.io/ci/$${KUBERNETES_VERSION}/bin/linux/amd64" + # Set CI_URL to the released binaries for actually released versions. + if [[ "$${KUBERNETES_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] || [[ "$${KUBERNETES_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+-(beta|rc).[0-9]+$ ]]; then + CI_URL="https://dl.k8s.io/release/$${KUBERNETES_VERSION}/bin/linux/amd64" + fi + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + # Browser: https://console.cloud.google.com/storage/browser/k8s-release-dev?project=k8s-release-dev + # e.g.: https://storage.googleapis.com/k8s-release-dev/ci/v1.21.0-beta.1.378+cf3374e43491c5/bin/linux/amd64/kubectl + echo "* downloading binary: $${CI_URL}/$${CI_PACKAGE}" + wget "$${CI_URL}/$${CI_PACKAGE}" -O "$${CI_DIR}/$${CI_PACKAGE}" + chmod +x "$${CI_DIR}/$${CI_PACKAGE}" + mv "$${CI_DIR}/$${CI_PACKAGE}" "$${KUBE_BINARY_DIR}/$${CI_PACKAGE}" + done + systemctl restart kubelet + IMAGE_REGISTRY_PREFIX=registry.k8s.io + # Kubernetes builds from 1.20 through 1.24 are tagged with k8s.gcr.io + if [[ "$${KUBERNETES_VERSION}" =~ ^v1\.(1[0-9]|2[0-4])[\.[0-9]+ ]]; then + IMAGE_REGISTRY_PREFIX=k8s.gcr.io + fi + for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do + echo "* downloading package: $${CI_URL}/$${CI_CONTAINER}.$${CONTAINER_EXT}" + wget "$${CI_URL}/$${CI_CONTAINER}.$${CONTAINER_EXT}" -O "$${CI_DIR}/$${CI_CONTAINER}.$${CONTAINER_EXT}" + $${SUDO} ctr -n k8s.io images import "$${CI_DIR}/$${CI_CONTAINER}.$${CONTAINER_EXT}" || echo "* ignoring expected 'ctr images import' result" + $${SUDO} ctr -n k8s.io images tag "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}-amd64:$${KUBERNETES_VERSION//+/_}" "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}:$${KUBERNETES_VERSION//+/_}" + $${SUDO} ctr -n k8s.io images tag "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}-amd64:$${KUBERNETES_VERSION//+/_}" "gcr.io/k8s-staging-ci-images/$${CI_CONTAINER}:$${KUBERNETES_VERSION//+/_}" + done + fi + echo "* checking binary versions" + echo "ctr version: " "$(ctr version)" + echo "kubeadm version: " "$(kubeadm version -o=short)" + echo "kubectl version: " "$(kubectl version --client=true)" + echo "kubelet version: " "$(kubelet --version)" + echo "$${LINE_SEPARATOR}" + + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/files/- + valueFrom: + template: | + owner: root:root + path: "/etc/pre-kubeadm-commands/20-k8s-install.sh" + permissions: "0755" + content: | + #!/bin/bash + + set -o nounset + set -o pipefail + set -o errexit + + DISTRO="ubuntu" + KUBE_BINARY_DIR="/usr/bin" + + source /etc/lsb-release + if [[ "$${DISTRIB_ID}" == *Flatcar* ]]; then + # Overrides for flatcar + DISTRO="flatcar" + KUBE_BINARY_DIR="/opt/bin" + fi + + function retry { + attempt=0 + max_attempts=$${1} + interval=$${2} + shift; shift + until [[ $${attempt} -ge "$${max_attempts}" ]] ; do + attempt=$((attempt+1)) + set +e + eval "$*" && return || echo "failed $${attempt} times: $*" + set -e + sleep "$${interval}" + done + echo "error: reached max attempts at retry($*)" + return 1 + } + + [[ $(id -u) != 0 ]] && SUDO="sudo" || SUDO="" + + # This test installs release packages or binaries that are a result of the CI and release builds. + # It runs '... --version' commands to verify that the binaries are correctly installed + # and finally uninstalls the packages. + # For the release packages it tests all versions in the support skew. + LINE_SEPARATOR="*************************************************" + echo "$${LINE_SEPARATOR}" + + ## Variables (replaced by JSON patching) + KUBERNETES_VERSION={{ .builtin.machineDeployment.version }} + ## + + # Note: We assume if kubectl has the right version, everything else has as well + if [[ $(kubectl version --client=true -o json | jq '.clientVersion.gitVersion' -r) = "$${KUBERNETES_VERSION}" ]]; then + echo "Detected Kubernetes $${KUBERNETES_VERSION} via kubectl version, nothing to do" + exit 0 + fi + + if [[ "$${KUBERNETES_VERSION}" != "" ]]; then + CI_DIR=/tmp/k8s-ci + mkdir -p "$${CI_DIR}" + declare -a PACKAGES_TO_TEST=("kubectl" "kubelet" "kubeadm") + # Let's just also download the control plane images for worker nodes. It's easier then optimizing it. + declare -a CONTAINERS_TO_TEST=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler") + CONTAINER_EXT="tar" + echo "* testing version $${KUBERNETES_VERSION}" + CI_URL="https://dl.k8s.io/ci/$${KUBERNETES_VERSION}/bin/linux/amd64" + # Set CI_URL to the released binaries for actually released versions. + if [[ "$${KUBERNETES_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] || [[ "$${KUBERNETES_VERSION}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+-(beta|rc).[0-9]+$ ]]; then + CI_URL="https://dl.k8s.io/release/$${KUBERNETES_VERSION}/bin/linux/amd64" + fi + for CI_PACKAGE in "$${PACKAGES_TO_TEST[@]}"; do + # Browser: https://console.cloud.google.com/storage/browser/k8s-release-dev?project=k8s-release-dev + # e.g.: https://storage.googleapis.com/k8s-release-dev/ci/v1.21.0-beta.1.378+cf3374e43491c5/bin/linux/amd64/kubectl + echo "* downloading binary: $${CI_URL}/$${CI_PACKAGE}" + wget "$${CI_URL}/$${CI_PACKAGE}" -O "$${CI_DIR}/$${CI_PACKAGE}" + chmod +x "$${CI_DIR}/$${CI_PACKAGE}" + mv "$${CI_DIR}/$${CI_PACKAGE}" "$${KUBE_BINARY_DIR}/$${CI_PACKAGE}" + done + systemctl restart kubelet + IMAGE_REGISTRY_PREFIX=registry.k8s.io + # Kubernetes builds from 1.20 through 1.24 are tagged with k8s.gcr.io + if [[ "$${KUBERNETES_VERSION}" =~ ^v1\.(1[0-9]|2[0-4])[\.[0-9]+ ]]; then + IMAGE_REGISTRY_PREFIX=k8s.gcr.io + fi + for CI_CONTAINER in "$${CONTAINERS_TO_TEST[@]}"; do + echo "* downloading package: $${CI_URL}/$${CI_CONTAINER}.$${CONTAINER_EXT}" + wget "$${CI_URL}/$${CI_CONTAINER}.$${CONTAINER_EXT}" -O "$${CI_DIR}/$${CI_CONTAINER}.$${CONTAINER_EXT}" + $${SUDO} ctr -n k8s.io images import "$${CI_DIR}/$${CI_CONTAINER}.$${CONTAINER_EXT}" || echo "* ignoring expected 'ctr images import' result" + $${SUDO} ctr -n k8s.io images tag "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}-amd64:$${KUBERNETES_VERSION//+/_}" "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}:$${KUBERNETES_VERSION//+/_}" + $${SUDO} ctr -n k8s.io images tag "$${IMAGE_REGISTRY_PREFIX}/$${CI_CONTAINER}-amd64:$${KUBERNETES_VERSION//+/_}" "gcr.io/k8s-staging-ci-images/$${CI_CONTAINER}:$${KUBERNETES_VERSION//+/_}" + done + fi + echo "* checking binary versions" + echo "ctr version: " "$(ctr version)" + echo "kubeadm version: " "$(kubeadm version -o=short)" + echo "kubectl version: " "$(kubectl version --client=true)" + echo "kubelet version: " "$(kubelet --version)" + echo "$${LINE_SEPARATOR}" + + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: k8sInstallScript diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-namingstrategy.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-namingstrategy.yaml new file mode 100644 index 0000000000..1877801c3c --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-namingstrategy.yaml @@ -0,0 +1,8 @@ +- op: add + path: /spec/controlPlane/namingStrategy + value: + template: '{{ .cluster.name }}-cp-{{ .random }}' +- op: add + path: /spec/workers/machineDeployments/0/namingStrategy + value: + template: '{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}' diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-prekubeadmscript.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-prekubeadmscript.yaml new file mode 100644 index 0000000000..3345f5b6d2 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-prekubeadmscript.yaml @@ -0,0 +1,45 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/- + valueFrom: + template: | + owner: root:root + path: "/etc/pre-kubeadm-commands/10-prekubeadmscript.sh" + permissions: "0755" + content: {{ printf "%q" (regexReplaceAll "(KUBERNETES_VERSION=.*)" .preKubeadmScript (printf "KUBERNETES_VERSION=%s" .builtin.controlPlane.version)) }} + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: add + path: /spec/template/spec/files/- + valueFrom: + template: | + owner: root:root + path: "/etc/pre-kubeadm-commands/10-prekubeadmscript.sh" + permissions: "0755" + content: {{ printf "%q" (regexReplaceAll "(KUBERNETES_VERSION=.*)" .preKubeadmScript (printf "KUBERNETES_VERSION=%s" .builtin.machineDeployment.version)) }} + selector: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + enabledIf: '{{ if .preKubeadmScript }}true{{ end }}' + name: preKubeadmScript +- op: add + path: /spec/variables/- + value: + name: preKubeadmScript + required: false + schema: + openAPIV3Schema: + type: string + description: Script to run in preKubeadmCommands. diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-vm-namingstrategy.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-vm-namingstrategy.yaml new file mode 100644 index 0000000000..0ec61e9df0 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-vm-namingstrategy.yaml @@ -0,0 +1,4 @@ +- op: add + path: /spec/template/spec/namingStrategy + value: + template: '{{ if le (len .machine.name) 20 }}{{ .machine.name }}{{else}}{{ trimSuffix "-" (trunc 14 .machine.name) }}-{{ trunc -5 .machine.name }}{{end}}' diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-vsphere-template.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-vsphere-template.yaml new file mode 100644 index 0000000000..769ede6045 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/clusterclass/patch-vsphere-template.yaml @@ -0,0 +1,49 @@ +- op: add + path: /spec/patches/- + value: + definitions: + - jsonPatches: + - op: replace + path: /spec/template/spec/imageName + valueFrom: + # We have to fall back to v1.30.0 for the conformance latest ci test which uses + # versions without corresponding templates like "v1.30.0-alpha.0.525+09a5049ca78502". + template: |- + {{- if eq .builtin.controlPlane.version "v1.28.0" -}} + ubuntu-2204-kube-v1.28.0 + {{- else if eq .builtin.controlPlane.version "v1.29.0" -}} + ubuntu-2204-kube-v1.29.0 + {{- else if eq .builtin.controlPlane.version "v1.30.0" -}} + ubuntu-2204-kube-v1.30.0 + {{- else -}} + ubuntu-2404-kube-v1.31.0 + {{- end -}} + selector: + apiVersion: vmware.infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + controlPlane: true + - jsonPatches: + - op: replace + path: /spec/template/spec/imageName + valueFrom: + # We have to fall back to v1.30.0 for the conformance latest ci test which uses + # versions without corresponding templates like "v1.30.0-alpha.0.525+09a5049ca78502". + template: |- + {{- if eq .builtin.machineDeployment.version "v1.28.0" -}} + ubuntu-2204-kube-v1.28.0 + {{- else if eq .builtin.machineDeployment.version "v1.29.0" -}} + ubuntu-2204-kube-v1.29.0 + {{- else if eq .builtin.machineDeployment.version "v1.30.0" -}} + ubuntu-2204-kube-v1.30.0 + {{- else -}} + ubuntu-2404-kube-v1.31.0 + {{- end -}} + selector: + apiVersion: vmware.infrastructure.cluster.x-k8s.io/v1beta1 + kind: VSphereMachineTemplate + matchResources: + machineDeploymentClass: + names: + - ${CLUSTER_CLASS_NAME}-worker + name: vSphereTemplate diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-network-CIDR.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-network-CIDR.yaml new file mode 100644 index 0000000000..24d0253cef --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-network-CIDR.yaml @@ -0,0 +1,10 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.30.0/24 diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-resource-set-label.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-resource-set-label.yaml new file mode 100644 index 0000000000..1447050b04 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-resource-set-label.yaml @@ -0,0 +1,7 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' + labels: + cni: "${CLUSTER_NAME}-crs-cni" diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-resource-set.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-resource-set.yaml new file mode 100644 index 0000000000..6507eed65e --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/commons/cluster-resource-set.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: "cni-${CLUSTER_NAME}-crs-cni" +data: ${CNI_RESOURCES} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: "${CLUSTER_NAME}-crs-cni" +spec: + strategy: ApplyOnce + clusterSelector: + matchLabels: + cni: "${CLUSTER_NAME}-crs-cni" + resources: + - name: "cni-${CLUSTER_NAME}-crs-cni" + kind: ConfigMap diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/topology/cluster-template-topology-supervisor.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/topology/cluster-template-topology-supervisor.yaml new file mode 100644 index 0000000000..0dc8ef8b9b --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/topology/cluster-template-topology-supervisor.yaml @@ -0,0 +1,1258 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: '${CLUSTER_NAME}' + namespace: '${NAMESPACE}' +spec: + topology: + class: '${CLUSTER_CLASS_NAME}' + controlPlane: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + variables: + - name: sshKey + value: '${VSPHERE_SSH_AUTHORIZED_KEY}' + - name: kubeVipPodManifest + value: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: ${VIP_NETWORK_INTERFACE:=""} + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: svc_leasename + value: plndr-svcs-lock + - name: svc_election + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leasename + value: plndr-cp-lock + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + - name: address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: prometheus_server + value: :2112 + image: ghcr.io/kube-vip/kube-vip:v0.6.4 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + - mountPath: /etc/hosts + name: etchosts + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig + - hostPath: + path: /etc/kube-vip.hosts + type: File + name: etchosts + status: {} + - name: controlPlaneIpAddr + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: controlPlanePort + value: ${CONTROL_PLANE_ENDPOINT_PORT:=6443} + version: '${KUBERNETES_VERSION}' + workers: + machineDeployments: + - class: ${CLUSTER_CLASS_NAME}-worker + metadata: {} + name: md-0 + replicas: ${WORKER_MACHINE_COUNT} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + labels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + name: ${CLUSTER_NAME}-crs-0 + namespace: '${NAMESPACE}' +spec: + clusterSelector: + matchLabels: + cluster.x-k8s.io/cluster-name: '${CLUSTER_NAME}' + resources: + - kind: Secret + name: vsphere-config-secret + - kind: ConfigMap + name: csi-manifests + - kind: Secret + name: cloud-provider-vsphere-credentials + - kind: ConfigMap + name: cpi-manifests +--- +apiVersion: v1 +kind: Secret +metadata: + name: vsphere-config-secret + namespace: '${NAMESPACE}' +stringData: + data: |- + apiVersion: v1 + kind: Secret + metadata: + name: vsphere-config-secret + namespace: vmware-system-csi + stringData: + csi-vsphere.conf: |+ + [Global] + thumbprint = "${VSPHERE_TLS_THUMBPRINT}" + + [VirtualCenter "${VSPHERE_SERVER}"] + user = "${VSPHERE_USERNAME}" + password = "${VSPHERE_PASSWORD}" + datacenters = "${VSPHERE_DATACENTER}" + + [Network] + public-network = "${VSPHERE_NETWORK}" + + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: |- + apiVersion: v1 + kind: Namespace + metadata: + name: vmware-system-csi + --- + apiVersion: storage.k8s.io/v1 + kind: CSIDriver + metadata: + name: csi.vsphere.vmware.com + spec: + attachRequired: true + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-controller-role + rules: + - apiGroups: + - "" + resources: + - nodes + - pods + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - apiGroups: + - "" + resources: + - persistentvolumeclaims + verbs: + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumeclaims/status + verbs: + - patch + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - watch + - create + - update + - delete + - patch + - apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - watch + - create + - update + - patch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - watch + - list + - delete + - update + - create + - apiGroups: + - storage.k8s.io + resources: + - storageclasses + - csinodes + verbs: + - get + - list + - watch + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments + verbs: + - get + - list + - watch + - patch + - apiGroups: + - cns.vmware.com + resources: + - triggercsifullsyncs + verbs: + - create + - get + - update + - watch + - list + - apiGroups: + - cns.vmware.com + resources: + - cnsvspherevolumemigrations + verbs: + - create + - get + - list + - watch + - update + - delete + - apiGroups: + - cns.vmware.com + resources: + - cnsvolumeinfoes + verbs: + - create + - get + - list + - watch + - delete + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - create + - update + - apiGroups: + - storage.k8s.io + resources: + - volumeattachments/status + verbs: + - patch + - apiGroups: + - cns.vmware.com + resources: + - cnsvolumeoperationrequests + verbs: + - create + - get + - list + - update + - delete + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotclasses + verbs: + - watch + - get + - list + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents + verbs: + - create + - get + - list + - watch + - update + - delete + - patch + - apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshotcontents/status + verbs: + - update + - patch + - apiGroups: + - cns.vmware.com + resources: + - csinodetopologies + verbs: + - get + - update + - watch + - list + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-controller-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-controller-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-controller + namespace: vmware-system-csi + --- + apiVersion: v1 + kind: ServiceAccount + metadata: + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: vsphere-csi-node-cluster-role + rules: + - apiGroups: + - cns.vmware.com + resources: + - csinodetopologies + verbs: + - create + - watch + - get + - patch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + name: vsphere-csi-node-cluster-role-binding + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: vsphere-csi-node-cluster-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: vsphere-csi-node-role + namespace: vmware-system-csi + rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: vsphere-csi-node-binding + namespace: vmware-system-csi + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: vsphere-csi-node-role + subjects: + - kind: ServiceAccount + name: vsphere-csi-node + namespace: vmware-system-csi + --- + apiVersion: v1 + data: + pv-to-backingdiskobjectid-mapping: "false" + trigger-csi-fullsync: "false" + kind: ConfigMap + metadata: + name: internal-feature-states.csi.vsphere.vmware.com + namespace: vmware-system-csi + --- + apiVersion: v1 + kind: Service + metadata: + labels: + app: vsphere-csi-controller + name: vsphere-csi-controller + namespace: vmware-system-csi + spec: + ports: + - name: ctlr + port: 2112 + protocol: TCP + targetPort: 2112 + - name: syncer + port: 2113 + protocol: TCP + targetPort: 2113 + selector: + app: vsphere-csi-controller + --- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: vsphere-csi-controller + namespace: vmware-system-csi + spec: + replicas: 1 + selector: + matchLabels: + app: vsphere-csi-controller + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app: vsphere-csi-controller + role: vsphere-csi + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/controlplane + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - vsphere-csi-controller + topologyKey: kubernetes.io/hostname + containers: + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + - --kube-api-qps=100 + - --kube-api-burst=100 + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-attacher:v4.5.1 + name: csi-attacher + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --timeout=300s + - --handle-volume-inuse-error=false + - --csi-address=$(ADDRESS) + - --kube-api-qps=100 + - --kube-api-burst=100 + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-resizer:v1.10.1 + name: csi-resizer + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: X_CSI_MODE + value: controller + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: X_CSI_SERIAL_VOL_ACCESS_TIMEOUT + value: 3m + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.3.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 30 + periodSeconds: 180 + timeoutSeconds: 10 + name: vsphere-csi-controller + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + - containerPort: 2112 + name: prometheus + protocol: TCP + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --leader-election + - --leader-election-lease-duration=30s + - --leader-election-renew-deadline=20s + - --leader-election-retry-period=10s + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: FULL_SYNC_INTERVAL_MINUTES + value: "30" + - name: VSPHERE_CSI_CONFIG + value: /etc/cloud/csi-vsphere.conf + - name: LOGGER_LEVEL + value: PRODUCTION + - name: INCLUSTER_CLIENT_QPS + value: "100" + - name: INCLUSTER_CLIENT_BURST + value: "100" + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/cloud-provider-vsphere/csi/release/syncer:v3.3.0 + imagePullPolicy: Always + name: vsphere-syncer + ports: + - containerPort: 2113 + name: prometheus + protocol: TCP + securityContext: + runAsGroup: 65532 + runAsNonRoot: true + runAsUser: 65532 + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + - args: + - --v=4 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --kube-api-qps=100 + - --kube-api-burst=100 + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + - --default-fstype=ext4 + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.1 + name: csi-provisioner + volumeMounts: + - mountPath: /csi + name: socket-dir + - args: + - --v=4 + - --kube-api-qps=100 + - --kube-api-burst=100 + - --timeout=300s + - --csi-address=$(ADDRESS) + - --leader-election + - --leader-election-lease-duration=120s + - --leader-election-renew-deadline=60s + - --leader-election-retry-period=30s + env: + - name: ADDRESS + value: /csi/csi.sock + image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.2 + name: csi-snapshotter + volumeMounts: + - mountPath: /csi + name: socket-dir + dnsPolicy: Default + priorityClassName: system-cluster-critical + serviceAccountName: vsphere-csi-controller + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - name: vsphere-config-volume + secret: + secretName: vsphere-config-secret + - emptyDir: {} + name: socket-dir + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node + namespace: vmware-system-csi + spec: + selector: + matchLabels: + app: vsphere-csi-node + template: + metadata: + labels: + app: vsphere-csi-node + role: vsphere-csi + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.1 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi.vsphere.vmware.com/csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.3.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: pods-mount-dir + - mountPath: /dev + name: device-dir + - mountPath: /sys/block + name: blocks-dir + - mountPath: /sys/devices + name: sys-devices-dir + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-node-critical + serviceAccountName: vsphere-csi-node + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi.vsphere.vmware.com + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: /dev + name: device-dir + - hostPath: + path: /sys/block + type: Directory + name: blocks-dir + - hostPath: + path: /sys/devices + type: Directory + name: sys-devices-dir + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + --- + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-csi-node-windows + namespace: vmware-system-csi + spec: + selector: + matchLabels: + app: vsphere-csi-node-windows + template: + metadata: + labels: + app: vsphere-csi-node-windows + role: vsphere-csi-windows + spec: + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: ADDRESS + value: unix://C:\\csi\\csi.sock + - name: DRIVER_REG_SOCK_PATH + value: C:\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 + livenessProbe: + exec: + command: + - /csi-node-driver-registrar.exe + - --kubelet-registration-path=C:\\var\\lib\\kubelet\\plugins\\csi.vsphere.vmware.com\\csi.sock + - --mode=kubelet-registration-probe + initialDelaySeconds: 3 + name: node-driver-registrar + volumeMounts: + - mountPath: /csi + name: plugin-dir + - mountPath: /registration + name: registration-dir + - args: + - --fss-name=internal-feature-states.csi.vsphere.vmware.com + - --fss-namespace=$(CSI_NAMESPACE) + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: CSI_ENDPOINT + value: unix://C:\\csi\\csi.sock + - name: MAX_VOLUMES_PER_NODE + value: "59" + - name: X_CSI_MODE + value: node + - name: X_CSI_SPEC_REQ_VALIDATION + value: "false" + - name: X_CSI_SPEC_DISABLE_LEN_CHECK + value: "true" + - name: LOGGER_LEVEL + value: PRODUCTION + - name: X_CSI_LOG_LEVEL + value: DEBUG + - name: CSI_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODEGETINFO_WATCH_TIMEOUT_MINUTES + value: "1" + image: gcr.io/cloud-provider-vsphere/csi/release/driver:v3.3.0 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + periodSeconds: 5 + timeoutSeconds: 5 + name: vsphere-csi-node + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + volumeMounts: + - mountPath: C:\csi + name: plugin-dir + - mountPath: C:\var\lib\kubelet + name: pods-mount-dir + - mountPath: \\.\pipe\csi-proxy-volume-v1 + name: csi-proxy-volume-v1 + - mountPath: \\.\pipe\csi-proxy-filesystem-v1 + name: csi-proxy-filesystem-v1 + - mountPath: \\.\pipe\csi-proxy-disk-v1 + name: csi-proxy-disk-v1 + - mountPath: \\.\pipe\csi-proxy-system-v1alpha1 + name: csi-proxy-system-v1alpha1 + - args: + - --v=4 + - --csi-address=/csi/csi.sock + image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0 + name: liveness-probe + volumeMounts: + - mountPath: /csi + name: plugin-dir + nodeSelector: + kubernetes.io/os: windows + priorityClassName: system-node-critical + serviceAccountName: vsphere-csi-node + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: C:\var\lib\kubelet\plugins_registry\ + type: Directory + name: registration-dir + - hostPath: + path: C:\var\lib\kubelet\plugins\csi.vsphere.vmware.com\ + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: \var\lib\kubelet + type: Directory + name: pods-mount-dir + - hostPath: + path: \\.\pipe\csi-proxy-disk-v1 + type: "" + name: csi-proxy-disk-v1 + - hostPath: + path: \\.\pipe\csi-proxy-volume-v1 + type: "" + name: csi-proxy-volume-v1 + - hostPath: + path: \\.\pipe\csi-proxy-filesystem-v1 + type: "" + name: csi-proxy-filesystem-v1 + - hostPath: + path: \\.\pipe\csi-proxy-system-v1alpha1 + type: "" + name: csi-proxy-system-v1alpha1 + updateStrategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate +kind: ConfigMap +metadata: + name: csi-manifests + namespace: '${NAMESPACE}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloud-provider-vsphere-credentials + namespace: '${NAMESPACE}' +stringData: + data: |- + apiVersion: v1 + kind: Secret + metadata: + labels: + component: cloud-controller-manager + vsphere-cpi-infra: secret + name: cloud-provider-vsphere-credentials + namespace: kube-system + stringData: + ${VSPHERE_SERVER}.password: "${VSPHERE_PASSWORD}" + ${VSPHERE_SERVER}.username: "${VSPHERE_USERNAME}" + type: Opaque +type: addons.cluster.x-k8s.io/resource-set +--- +apiVersion: v1 +data: + data: |- + --- + # Source: vsphere-cpi/templates/service-account.yaml + apiVersion: v1 + kind: ServiceAccount + metadata: + name: cloud-controller-manager + labels: + app: vsphere-cpi + vsphere-cpi-infra: service-account + component: cloud-controller-manager + namespace: kube-system + --- + # Source: vsphere-cpi/templates/role.yaml + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRole + metadata: + name: cloud-controller-manager + labels: + app: vsphere-cpi + vsphere-cpi-infra: role + component: cloud-controller-manager + rules: + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - update + - apiGroups: + - "" + resources: + - nodes + verbs: + - "*" + - apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch + - apiGroups: + - "" + resources: + - services + verbs: + - list + - patch + - update + - watch + - apiGroups: + - "" + resources: + - services/status + verbs: + - patch + - apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - persistentvolumes + verbs: + - get + - list + - update + - watch + - apiGroups: + - "" + resources: + - endpoints + verbs: + - create + - get + - list + - watch + - update + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch + - apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - create + - get + - list + - watch + - update + --- + # Source: vsphere-cpi/templates/daemonset.yaml + apiVersion: apps/v1 + kind: DaemonSet + metadata: + name: vsphere-cpi + labels: + app: vsphere-cpi + vsphere-cpi-infra: daemonset + component: cloud-controller-manager + tier: control-plane + namespace: kube-system + annotations: + spec: + selector: + matchLabels: + app: vsphere-cpi + updateStrategy: + type: RollingUpdate + template: + metadata: + labels: + app: vsphere-cpi + component: cloud-controller-manager + tier: control-plane + release: release-name + vsphere-cpi-infra: daemonset + spec: + tolerations: + - effect: NoSchedule + key: node.cloudprovider.kubernetes.io/uninitialized + value: "true" + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + - effect: NoSchedule + key: node.kubernetes.io/not-ready + operator: Exists + - effect: NoExecute + key: CriticalAddonsOnly + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + securityContext: + fsGroup: 1001 + runAsUser: 1001 + serviceAccountName: cloud-controller-manager + hostNetwork: true + dnsPolicy: ClusterFirst + priorityClassName: system-node-critical + containers: + - name: vsphere-cpi + image: registry.k8s.io/cloud-pv-vsphere/cloud-provider-vsphere:${CPI_IMAGE_K8S_VERSION} + imagePullPolicy: IfNotPresent + args: + - --cloud-provider=vsphere + - --v=2 + - --cloud-config=/etc/cloud/vsphere.conf + volumeMounts: + - mountPath: /etc/cloud + name: vsphere-config-volume + readOnly: true + volumes: + - name: vsphere-config-volume + configMap: + name: cloud-config + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + labels: + app: vsphere-cpi + component: cloud-controller-manager + vsphere-cpi-infra: role-binding + name: servicecatalog.k8s.io:apiserver-authentication-reader + namespace: kube-system + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader + subjects: + - apiGroup: "" + kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - apiGroup: "" + kind: User + name: cloud-controller-manager + --- + apiVersion: rbac.authorization.k8s.io/v1 + kind: ClusterRoleBinding + metadata: + labels: + app: vsphere-cpi + component: cloud-controller-manager + vsphere-cpi-infra: cluster-role-binding + name: cloud-controller-manager + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cloud-controller-manager + subjects: + - kind: ServiceAccount + name: cloud-controller-manager + namespace: kube-system + - kind: User + name: cloud-controller-manager + --- + apiVersion: v1 + data: + vsphere.conf: | + global: + port: 443 + secretName: cloud-provider-vsphere-credentials + secretNamespace: kube-system + thumbprint: '${VSPHERE_TLS_THUMBPRINT}' + vcenter: + ${VSPHERE_SERVER}: + datacenters: + - '${VSPHERE_DATACENTER}' + server: '${VSPHERE_SERVER}' + kind: ConfigMap + metadata: + name: cloud-config + namespace: kube-system +kind: ConfigMap +metadata: + name: cpi-manifests + namespace: '${NAMESPACE}' \ No newline at end of file diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/topology/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/topology/kustomization.yaml new file mode 100644 index 0000000000..efdd2348c0 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/topology/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - cluster-template-topology-supervisor.yaml + - ../commons/cluster-resource-set.yaml +patchesStrategicMerge: + - ../commons/cluster-resource-set-label.yaml + - ../commons/cluster-network-CIDR.yaml diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/workload/kustomization.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/workload/kustomization.yaml new file mode 100644 index 0000000000..e191dff830 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/workload/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../topology +patches: + - target: + kind: Cluster + path: workload-control-plane-endpoint-ip.yaml diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/workload/workload-control-plane-endpoint-ip.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/workload/workload-control-plane-endpoint-ip.yaml new file mode 100644 index 0000000000..f36926df68 --- /dev/null +++ b/test/e2e/data/infrastructure-vsphere-supervisor/v1.11/workload/workload-control-plane-endpoint-ip.yaml @@ -0,0 +1,5 @@ +- op: replace + path: /spec/topology/variables/2 + value: + name: controlPlaneIpAddr + value: "${WORKLOAD_CONTROL_PLANE_ENDPOINT_IP}" diff --git a/test/e2e/data/shared/capv/main/metadata.yaml b/test/e2e/data/shared/capv/main/metadata.yaml index 5a959dabca..b4c095f206 100644 --- a/test/e2e/data/shared/capv/main/metadata.yaml +++ b/test/e2e/data/shared/capv/main/metadata.yaml @@ -42,3 +42,6 @@ releaseSeries: - major: 1 minor: 11 contract: v1beta1 + - major: 1 + minor: 12 + contract: v1beta1 diff --git a/test/e2e/data/shared/capi/v1.5/metadata.yaml b/test/e2e/data/shared/capv/v1.11/metadata.yaml similarity index 67% rename from test/e2e/data/shared/capi/v1.5/metadata.yaml rename to test/e2e/data/shared/capv/v1.11/metadata.yaml index ee985c1588..5a959dabca 100644 --- a/test/e2e/data/shared/capi/v1.5/metadata.yaml +++ b/test/e2e/data/shared/capv/v1.11/metadata.yaml @@ -7,20 +7,38 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: - major: 1 - minor: 5 + minor: 0 contract: v1beta1 - major: 1 - minor: 4 + minor: 1 + contract: v1beta1 + - major: 1 + minor: 2 contract: v1beta1 - major: 1 minor: 3 contract: v1beta1 - major: 1 - minor: 2 + minor: 4 contract: v1beta1 - major: 1 - minor: 1 + minor: 5 contract: v1beta1 - major: 1 - minor: 0 + minor: 6 + contract: v1beta1 + - major: 1 + minor: 7 + contract: v1beta1 + - major: 1 + minor: 8 + contract: v1beta1 + - major: 1 + minor: 9 + contract: v1beta1 + - major: 1 + minor: 10 + contract: v1beta1 + - major: 1 + minor: 11 contract: v1beta1 diff --git a/test/extension/tilt-provider.yaml b/test/extension/tilt-provider.yaml index 8723800266..2174c6969c 100644 --- a/test/extension/tilt-provider.yaml +++ b/test/extension/tilt-provider.yaml @@ -1,7 +1,7 @@ --- - name: capv-test-extension config: - version: v1.11.99 + version: v1.12.99 image: gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-test-extension live_reload_deps: - main.go diff --git a/test/infrastructure/net-operator/tilt-provider.yaml b/test/infrastructure/net-operator/tilt-provider.yaml index 05afa8f8b5..f9931e88a4 100644 --- a/test/infrastructure/net-operator/tilt-provider.yaml +++ b/test/infrastructure/net-operator/tilt-provider.yaml @@ -1,7 +1,7 @@ --- - name: net-operator config: - version: v1.11.99 + version: v1.12.99 image: gcr.io/k8s-staging-capi-vsphere/cluster-api-net-operator live_reload_deps: - main.go diff --git a/test/infrastructure/vcsim/tilt-provider.yaml b/test/infrastructure/vcsim/tilt-provider.yaml index edc92ca8b3..a128670b93 100644 --- a/test/infrastructure/vcsim/tilt-provider.yaml +++ b/test/infrastructure/vcsim/tilt-provider.yaml @@ -1,7 +1,7 @@ --- - name: vcsim config: - version: v1.11.99 + version: v1.12.99 image: gcr.io/k8s-staging-capi-vsphere/cluster-api-vcsim-controller live_reload_deps: - main.go diff --git a/tilt-provider.yaml b/tilt-provider.yaml index b8ec74a2b9..1165865568 100644 --- a/tilt-provider.yaml +++ b/tilt-provider.yaml @@ -1,7 +1,7 @@ --- - name: vsphere config: - version: v1.11.99 + version: v1.12.99 image: gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-controller live_reload_deps: - main.go @@ -15,7 +15,7 @@ label: CAPV - name: vsphere-supervisor config: - version: v1.11.99 + version: v1.12.99 image: gcr.io/k8s-staging-capi-vsphere/cluster-api-vsphere-controller live_reload_deps: - main.go