From 0b799370d97ab02ce4fe4fdcad49130c79599faf Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Tue, 11 Jun 2024 17:50:12 +0200 Subject: [PATCH] test/e2e: use vSphere projects from Boskos MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan Büringer buringerst@vmware.com --- Makefile | 4 +- hack/e2e.sh | 68 ++++++++++++--- test/e2e/README.md | 16 ++-- test/e2e/config/vsphere.yaml | 4 +- test/e2e/e2e_setup_test.go | 10 ++- test/e2e/e2e_suite_test.go | 12 +-- test/e2e/ipam_test.go | 3 + test/framework/ip/addressmanager.go | 8 ++ test/framework/ip/incluster.go | 129 ++++++++++++++++++++-------- 9 files changed, 187 insertions(+), 67 deletions(-) diff --git a/Makefile b/Makefile index 84cfec8e16..7237148c77 100644 --- a/Makefile +++ b/Makefile @@ -83,7 +83,7 @@ GINKGO_NODES ?= 1 GINKGO_TIMEOUT ?= 3h E2E_CONF_FILE ?= $(abspath test/e2e/config/vsphere.yaml) E2E_CONF_OVERRIDE_FILE ?= $(abspath test/e2e/config/config-overrides.yaml) -E2E_IPAM_KUBECONFIG ?= +E2E_VSPHERE_IP_POOL ?= E2E_TEMPLATE_DIR := $(abspath test/e2e/data/) E2E_GOVMOMI_TEMPLATE_DIR := $(E2E_TEMPLATE_DIR)/infrastructure-vsphere-govmomi E2E_SUPERVISOR_TEMPLATE_DIR := $(E2E_TEMPLATE_DIR)/infrastructure-vsphere-supervisor @@ -616,7 +616,7 @@ e2e: $(GINKGO) $(KUSTOMIZE) $(KIND) $(GOVC) ## Run e2e tests --e2e.artifacts-folder="$(ARTIFACTS)" \ --e2e.skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ --e2e.use-existing-cluster="$(USE_EXISTING_CLUSTER)" \ - --e2e.ipam-kubeconfig="$(E2E_IPAM_KUBECONFIG)" + --e2e.ip-pool='$(E2E_VSPHERE_IP_POOL)' ## -------------------------------------- ## Release diff --git a/hack/e2e.sh b/hack/e2e.sh index 5b7273fb8a..85b9cc12eb 100755 --- a/hack/e2e.sh +++ b/hack/e2e.sh @@ -24,7 +24,7 @@ REPO_ROOT=$(git rev-parse --show-toplevel) RE_VCSIM='\[vcsim\\]' # In CI, ARTIFACTS is set to a different directory. This stores the value of -# ARTIFACTS i1n ORIGINAL_ARTIFACTS and replaces ARTIFACTS by a temporary directory +# ARTIFACTS in ORIGINAL_ARTIFACTS and replaces ARTIFACTS by a temporary directory # which gets cleaned up from credentials at the end of the test. export ORIGINAL_ARTIFACTS="" export ARTIFACTS="${ARTIFACTS:-${REPO_ROOT}/_artifacts}" @@ -33,10 +33,25 @@ if [[ "${ARTIFACTS}" != "${REPO_ROOT}/_artifacts" ]]; then ARTIFACTS=$(mktemp -d) fi -# shellcheck source=./hack/ensure-kubectl.sh -source "${REPO_ROOT}/hack/ensure-kubectl.sh" +# shellcheck source=./hack/ensure-go.sh +source "${REPO_ROOT}/hack/ensure-go.sh" + +export BOSKOS_RESOURCE_OWNER=cluster-api-provider-vsphere +if [[ "${JOB_NAME}" != "" ]]; then + export BOSKOS_RESOURCE_OWNER="${JOB_NAME}/${BUILD_ID}" +fi +export BOSKOS_RESOURCE_TYPE=vsphere-project-cluster-api-provider on_exit() { + # Only handle Boskos when we have to (not for vcsim) + if [[ ! "${GINKGO_FOCUS:-}" =~ $RE_VCSIM ]]; then + # Stop boskos heartbeat + [[ -z ${HEART_BEAT_PID:-} ]] || kill -9 "${HEART_BEAT_PID}" + + # If Boskos is being used then release the vsphere project. + [ -z "${BOSKOS_HOST:-}" ] || docker run -e VSPHERE_USERNAME -e VSPHERE_PASSWORD gcr.io/k8s-staging-capi-vsphere/extra/boskosctl:latest release --boskos-host="${BOSKOS_HOST}" --resource-owner="${BOSKOS_RESOURCE_OWNER}" --resource-name="${BOSKOS_RESOURCE_NAME}" --vsphere-server="${VSPHERE_SERVER}" --vsphere-tls-thumbprint="${VSPHERE_TLS_THUMBPRINT}" --vsphere-folder="${BOSKOS_RESOURCE_FOLDER}" --vsphere-resource-pool="${BOSKOS_RESOURCE_POOL}" + fi + # kill the VPN only when we started it (not vcsim) if [[ ! "${GINKGO_FOCUS:-}" =~ $RE_VCSIM ]]; then docker kill vpn @@ -83,18 +98,13 @@ export VSPHERE_SSH_PRIVATE_KEY="/root/ssh/.private-key/private-key" export E2E_CONF_FILE="${REPO_ROOT}/test/e2e/config/vsphere.yaml" export E2E_CONF_OVERRIDE_FILE="" export E2E_VM_OPERATOR_VERSION="${VM_OPERATOR_VERSION:-v1.8.6-0-gde75746a}" -export ARTIFACTS="${ARTIFACTS:-${REPO_ROOT}/_artifacts}" export DOCKER_IMAGE_TAR="/tmp/images/image.tar" export GC_KIND="false" # Make tests run in-parallel export GINKGO_NODES=5 -# Set the kubeconfig to the IPAM cluster so the e2e tests can claim ip addresses -# for kube-vip. -export E2E_IPAM_KUBECONFIG="/root/ipam-conf/capv-services.conf" - -# Only run the vpn/check for IPAM when we need them (not vcsim) +# Only run the vpn/check for IPAM when we need them (not for vcsim) if [[ ! "${GINKGO_FOCUS:-}" =~ $RE_VCSIM ]]; then # Run the vpn client in container docker run --rm -d --name vpn -v "${HOME}/.openvpn/:${HOME}/.openvpn/" \ @@ -104,11 +114,11 @@ if [[ ! "${GINKGO_FOCUS:-}" =~ $RE_VCSIM ]]; then # Tail the vpn logs docker logs vpn - # Wait until the VPN connection is active and we are able to reach the ipam cluster - function wait_for_ipam_reachable() { + # Wait until the VPN connection is active. + function wait_for_vpn_up() { local n=0 until [ $n -ge 30 ]; do - kubectl --kubeconfig="${E2E_IPAM_KUBECONFIG}" --request-timeout=2s get inclusterippools.ipam.cluster.x-k8s.io && RET=$? || RET=$? + curl "https://${VSPHERE_SERVER}" --connect-timeout 2 -k && RET=$? || RET=$? if [[ "$RET" -eq 0 ]]; then break fi @@ -117,7 +127,39 @@ if [[ ! "${GINKGO_FOCUS:-}" =~ $RE_VCSIM ]]; then done return "$RET" } - wait_for_ipam_reachable + wait_for_vpn_up + + # If BOSKOS_HOST is set then acquire a vsphere-project from Boskos. + if [ -n "${BOSKOS_HOST:-}" ]; then + # Check out the account from Boskos and store the produced environment + # variables in a temporary file. + account_env_var_file="$(mktemp)" + docker run gcr.io/k8s-staging-capi-vsphere/extra/boskosctl:latest acquire --boskos-host="${BOSKOS_HOST}" --resource-owner="${BOSKOS_RESOURCE_OWNER}" --resource-type="${BOSKOS_RESOURCE_TYPE}" 1>"${account_env_var_file}" + checkout_account_status="${?}" + + # If the checkout process was a success then load the account's + # environment variables into this process. + # shellcheck disable=SC1090 + [ "${checkout_account_status}" = "0" ] && . "${account_env_var_file}" + export BOSKOS_RESOURCE_NAME=${BOSKOS_RESOURCE_NAME} + export VSPHERE_FOLDER=${BOSKOS_RESOURCE_FOLDER} + export VSPHERE_RESOURCE_POOL=${BOSKOS_RESOURCE_POOL} + export E2E_VSPHERE_IP_POOL="${BOSKOS_RESOURCE_IP_POOL}" + + # Always remove the account environment variable file. It contains + # sensitive information. + rm -f "${account_env_var_file}" + + if [ ! "${checkout_account_status}" = "0" ]; then + echo "error getting vsphere project from Boskos" 1>&2 + exit "${checkout_account_status}" + fi + + # Run the heartbeat to tell boskos periodically that we are still + # using the checked out account. + docker run gcr.io/k8s-staging-capi-vsphere/extra/boskosctl:latest heartbeat --boskos-host="${BOSKOS_HOST}" --resource-owner="${BOSKOS_RESOURCE_OWNER}" --resource-name="${BOSKOS_RESOURCE_NAME}" >>"${ARTIFACTS}/boskos-heartbeat.log" 2>&1 & + HEART_BEAT_PID=$! + fi fi make envsubst diff --git a/test/e2e/README.md b/test/e2e/README.md index ed897de0db..4adedf40d7 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -33,18 +33,18 @@ The first step to running the e2e tests is setting up the required environment v | `VSPHERE_SSH_PRIVATE_KEY` | The file path of the private key used to ssh into the CAPV VMs | `/home/foo/bar-ssh.key` | | `VSPHERE_SSH_AUTHORIZED_KEY` | The public key that is added to the CAPV VMs | `ssh-rsa ABCDEF...XYZ=` | | `VSPHERE_TLS_THUMBPRINT` | The TLS thumbprint of the vSphere server's certificate which should be trusted | `2A:3F:BC:CA:C0:96:35:D4:B7:A2:AA:3C:C1:33:D9:D7:BE:EC:31:55` | -| `CONTROL_PLANE_ENDPOINT_IP` | The IP that kube-vip should use as a control plane endpoint. It will not be used if `E2E_IPAM_KUBECONFIG` is set. | `10.10.123.100` | +| `CONTROL_PLANE_ENDPOINT_IP` | The IP that kube-vip should use as a control plane endpoint. It will not be used if `E2E_VSPHERE_IP_POOL` is set. | `10.10.123.100` | | `VSPHERE_STORAGE_POLICY` | The name of an existing vSphere storage policy to be assigned to created VMs | `my-test-sp` | ### Flags -| Flag | Description | Default Value | -|-------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| -| `SKIP_RESOURCE_CLEANUP` | This flags skips cleanup of the resources created during the tests as well as the kind/bootstrap cluster | `false` | -| `USE_EXISTING_CLUSTER` | This flag enables the usage of an existing K8S cluster as the management cluster to run tests against. | `false` | -| `GINKGO_TEST_TIMEOUT` | This sets the timeout for the E2E test suite. | `2h` | -| `GINKGO_FOCUS` | This populates the `-focus` flag of the `ginkgo` run command. | `""` | -| `E2E_IPAM_KUBECONFIG` | This flag points to a kubeconfig where the in-cluster IPAM provider is running to dynamically claim IP addresses for tests. If this is set, the environment variable `CONTROL_PLANE_ENDPOINT_IP` gets ignored. | `""` | +| Flag | Description | Default Value | +|-------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| `SKIP_RESOURCE_CLEANUP` | This flags skips cleanup of the resources created during the tests as well as the kind/bootstrap cluster | `false` | +| `USE_EXISTING_CLUSTER` | This flag enables the usage of an existing K8S cluster as the management cluster to run tests against. | `false` | +| `GINKGO_TEST_TIMEOUT` | This sets the timeout for the E2E test suite. | `2h` | +| `GINKGO_FOCUS` | This populates the `-focus` flag of the `ginkgo` run command. | `""` | +| `E2E_VSPHERE_IP_POOL` | This allows to configure the IPPool to use for the e2e test. Supports the addresses, gateway and prefix fields from the InClusterIPPool CRD https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster/blob/main/api/v1alpha2/inclusterippool_types.go. If this is set, the environment variable `CONTROL_PLANE_ENDPOINT_IP` gets ignored. | `""` | ### Running the e2e tests diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index a5b0eafed6..307ce0e3d1 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -268,8 +268,8 @@ variables: VSPHERE_CONTENT_LIBRARY: "capv" VSPHERE_CONTENT_LIBRARY_ITEMS: "ubuntu-2204-kube-v1.28.0,ubuntu-2204-kube-v1.29.0,ubuntu-2204-kube-v1.30.0" VSPHERE_IMAGE_NAME: "ubuntu-2204-kube-v1.30.0" - VSPHERE_NETWORK: "sddc-cgw-network-6" - VSPHERE_DISTRIBUTED_PORT_GROUP: "/SDDC-Datacenter/network/sddc-cgw-network-6" + VSPHERE_NETWORK: "sddc-cgw-network-10" + VSPHERE_DISTRIBUTED_PORT_GROUP: "/SDDC-Datacenter/network/sddc-cgw-network-10" VSPHERE_TEMPLATE: "ubuntu-2204-kube-v1.30.0" FLATCAR_VSPHERE_TEMPLATE: "flatcar-stable-3815.2.2-kube-v1.30.0" VSPHERE_INSECURE_CSI: "true" diff --git a/test/e2e/e2e_setup_test.go b/test/e2e/e2e_setup_test.go index 0d5f0dc012..82b126ed6d 100644 --- a/test/e2e/e2e_setup_test.go +++ b/test/e2e/e2e_setup_test.go @@ -45,6 +45,7 @@ import ( type setupOptions struct { additionalIPVariableNames []string gatewayIPVariableName string + prefixVariableName string } // SetupOption is a configuration option supplied to Setup. @@ -65,6 +66,13 @@ func WithGateway(variableName string) SetupOption { } } +// WithPrefix instructs Setup to store the prefix from IPAM into the provided variableName. +func WithPrefix(variableName string) SetupOption { + return func(o *setupOptions) { + o.prefixVariableName = variableName + } +} + type testSettings struct { ClusterctlConfigPath string PostNamespaceCreatedFunc func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace string) @@ -90,7 +98,7 @@ func Setup(specName string, f func(testSpecificSettings func() testSettings), op case VCenterTestTarget: Byf("Getting IP for %s", strings.Join(append([]string{vsphereip.ControlPlaneEndpointIPVariable}, options.additionalIPVariableNames...), ",")) // get IPs from the in cluster address manager - testSpecificIPAddressClaims, testSpecificVariables = inClusterAddressManager.ClaimIPs(ctx, vsphereip.WithGateway(options.gatewayIPVariableName), vsphereip.WithIP(options.additionalIPVariableNames...)) + testSpecificIPAddressClaims, testSpecificVariables = inClusterAddressManager.ClaimIPs(ctx, vsphereip.WithGateway(options.gatewayIPVariableName), vsphereip.WithPrefix(options.prefixVariableName), vsphereip.WithIP(options.additionalIPVariableNames...)) case VCSimTestTarget: c := bootstrapClusterProxy.GetClient() diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 537e9d4d7a..4aa073b9ce 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -33,6 +33,7 @@ import ( storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" + ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -119,9 +120,8 @@ var ( namespaces map[*corev1.Namespace]context.CancelFunc - // e2eIPAMKubeconfig is a kubeconfig to a cluster which provides IP address management via an in-cluster - // IPAM provider to claim IPs for the control plane IPs of created clusters. - e2eIPAMKubeconfig string + // e2eIPPool to be used for the e2e test. + e2eIPPool string // inClusterAddressManager is used to claim and cleanup IP addresses used for kubernetes control plane API Servers. inClusterAddressManager vsphereip.AddressManager @@ -137,7 +137,7 @@ func init() { flag.BoolVar(&alsoLogToFile, "e2e.also-log-to-file", true, "if true, ginkgo logs are additionally written to the `ginkgo-log.txt` file in the artifacts folder (including timestamps)") flag.BoolVar(&skipCleanup, "e2e.skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") flag.BoolVar(&useExistingCluster, "e2e.use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") - flag.StringVar(&e2eIPAMKubeconfig, "e2e.ipam-kubeconfig", "", "path to the kubeconfig for the IPAM cluster") + flag.StringVar(&e2eIPPool, "e2e.ip-pool", "", "IPPool to use for the e2e test. Supports the addresses, gateway and prefix fields from the InClusterIPPool CRD https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster/blob/main/api/v1alpha2/inclusterippool_types.go") } func TestE2E(t *testing.T) { @@ -282,7 +282,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { switch testTarget { case VCenterTestTarget: // Create the in cluster address manager - inClusterAddressManager, err = vsphereip.InClusterAddressManager(e2eIPAMKubeconfig, ipClaimLabels, skipCleanup) + inClusterAddressManager, err = vsphereip.InClusterAddressManager(ctx, bootstrapClusterProxy.GetClient(), e2eIPPool, ipClaimLabels, skipCleanup) Expect(err).ToNot(HaveOccurred()) case VCSimTestTarget: @@ -332,6 +332,8 @@ var _ = SynchronizedAfterSuite(func() { func initScheme() *runtime.Scheme { sc := runtime.NewScheme() framework.TryAddDefaultSchemes(sc) + // TODO: should probably be added to TryAddDefaultSchemes in core CAPI. + _ = ipamv1.AddToScheme(sc) if testTarget == VCSimTestTarget { _ = vcsimv1.AddToScheme(sc) diff --git a/test/e2e/ipam_test.go b/test/e2e/ipam_test.go index 85372cfccd..e98b43efd1 100644 --- a/test/e2e/ipam_test.go +++ b/test/e2e/ipam_test.go @@ -42,6 +42,9 @@ var _ = Describe("ClusterClass Creation using Cluster API quick-start test and I // Set the WithGateway option to write the gateway ip address to the variable. // This variable is required for creating the InClusterIPPool for the ipam provider. WithGateway("IPAM_GATEWAY"), + // Set the WithPrefix option to set the prefix to the variable. + // This variable is required for creating the InClusterIPPool for the ipam provider. + WithPrefix("IPAM_PREFIX"), // Claim two IPs from the CI's IPAM provider to use in the InClusterIPPool of // the ipam provider. The IPs then get claimed during provisioning to configure // static IPs for the control-plane and worker node. diff --git a/test/framework/ip/addressmanager.go b/test/framework/ip/addressmanager.go index 15cbf1e1c2..88ffa61fb0 100644 --- a/test/framework/ip/addressmanager.go +++ b/test/framework/ip/addressmanager.go @@ -44,6 +44,7 @@ type AddressClaims []AddressClaim type claimOptions struct { additionalIPVariableNames []string gatewayIPVariableName string + prefixVariableName string } type ClaimOption func(*claimOptions) @@ -63,6 +64,13 @@ func WithGateway(variableName string) ClaimOption { } } +// WithPrefix instructs Setup to store the prefix from IPAM into the provided variableName. +func WithPrefix(variableName string) ClaimOption { + return func(o *claimOptions) { + o.prefixVariableName = variableName + } +} + type teardownOptions struct { folderName string vSphereClient *govmomi.Client diff --git a/test/framework/ip/incluster.go b/test/framework/ip/incluster.go index 48db2e2d39..2cc993b7ef 100644 --- a/test/framework/ip/incluster.go +++ b/test/framework/ip/incluster.go @@ -18,9 +18,10 @@ package ip import ( "context" + "encoding/json" "fmt" "os" - "path/filepath" + "strconv" "time" . "github.com/onsi/ginkgo/v2" @@ -30,12 +31,13 @@ import ( "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/vim25/mo" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" "k8s.io/utils/ptr" ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" @@ -43,42 +45,104 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func init() { - ipamScheme = runtime.NewScheme() - _ = ipamv1.AddToScheme(ipamScheme) -} - var _ AddressManager = &inCluster{} +var ipPoolName = "capv-e2e-ippool" + type inCluster struct { client client.Client labels map[string]string skipCleanup bool + ipPool *unstructured.Unstructured +} + +// inClusterIPPoolSpec defines the desired state of InClusterIPPool. +// Note: This is a copy of the relevant fields from: https://github.com/kubernetes-sigs/cluster-api-ipam-provider-in-cluster/blob/main/api/v1alpha2/inclusterippool_types.go +// This was copied to avoid a go dependency on this provider. +type inClusterIPPoolSpec struct { + // Addresses is a list of IP addresses that can be assigned. This set of + // addresses can be non-contiguous. + Addresses []string `json:"addresses"` + + // Prefix is the network prefix to use. + // +kubebuilder:validation:Maximum=128 + Prefix int `json:"prefix"` + + // Gateway + // +optional + Gateway string `json:"gateway,omitempty"` } // InClusterAddressManager returns an ip.AddressManager implementation that leverage on the IPAM provider installed into the management cluster. // If e2eIPAMKubeconfig is an empty string it will return a noop AddressManager which does nothing so we can fallback on setting environment variables. -func InClusterAddressManager(e2eIPAMKubeconfig string, labels map[string]string, skipCleanup bool) (AddressManager, error) { +func InClusterAddressManager(ctx context.Context, client client.Client, e2eIPPool string, labels map[string]string, skipCleanup bool) (AddressManager, error) { if len(labels) == 0 { return nil, fmt.Errorf("expecting labels to be set to prevent deletion of other IPAddressClaims") } - if e2eIPAMKubeconfig == "" { + if e2eIPPool == "" { return &noop{}, nil } - ipamClient, err := getClient(e2eIPAMKubeconfig) + ipPool, err := createIPPool(ctx, client, e2eIPPool) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "failed to create IPPool") } return &inCluster{ labels: labels, - client: ipamClient, + client: client, + ipPool: ipPool, skipCleanup: skipCleanup, }, nil } +func createIPPool(ctx context.Context, c client.Client, e2eIPPool string) (*unstructured.Unstructured, error) { + ipPoolSpec := inClusterIPPoolSpec{} + if err := json.Unmarshal([]byte(e2eIPPool), &ipPoolSpec); err != nil { + return nil, fmt.Errorf("failed to unmarshal IP Pool configuration") + } + + ipPool := &unstructured.Unstructured{} + ipPool.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "ipam.cluster.x-k8s.io", + Version: "v1alpha2", + Kind: "InClusterIPPool", + }) + ipPool.SetNamespace(metav1.NamespaceDefault) + ipPool.SetName(ipPoolName) + // Note: We have to convert ipPoolSpec to a map[string]interface{}, otherwise SetNestedField panics in DeepCopyJSONValue. + addresses := []interface{}{} + for _, a := range ipPoolSpec.Addresses { + addresses = append(addresses, a) + } + spec := map[string]interface{}{ + "addresses": addresses, + "prefix": int64(ipPoolSpec.Prefix), // DeepCopyJSONValue only supports int64. + "gateway": ipPoolSpec.Gateway, + } + if err := unstructured.SetNestedField(ipPool.Object, spec, "spec"); err != nil { + return nil, fmt.Errorf("failed to set InClusterIPPool spec") + } + + // InClusterAddressManager is called on multiple ginkgo workers at the same time. + // So some of them will hit AlreadyExists errors. + // In this case we are just retrieving the already existing InClusterIPPool. + // Note: The InClusterIPPool is intentionally not deleted on TearDown, because at + // this time IPAddressClaim are still in deleting (so we would get an error when triggering deletion). + if err := c.Create(ctx, ipPool); err != nil { + if !apierrors.IsAlreadyExists(err) { + return nil, err + } + + if err := c.Get(ctx, client.ObjectKey{Namespace: metav1.NamespaceDefault, Name: ipPoolName}, ipPool); err != nil { + return nil, err + } + } + + return ipPool, nil +} + func (h *inCluster) ClaimIPs(ctx context.Context, opts ...ClaimOption) (AddressClaims, map[string]string) { options := &claimOptions{} for _, o := range opts { @@ -98,12 +162,19 @@ func (h *inCluster) ClaimIPs(ctx context.Context, opts ...ClaimOption) (AddressC }) Byf("Setting clusterctl variable %s to %s", variable, ip.Spec.Address) variables[variable] = ip.Spec.Address - if variable == ControlPlaneEndpointIPVariable && options.gatewayIPVariableName != "" { - // Set the gateway variable if requested to the gateway of the control plane IP. - // This is required in ipam scenarios, otherwise the VMs will not be able to - // connect to the public internet to pull images. - Byf("Setting clusterctl variable %s to %s", variable, ip.Spec.Gateway) - variables[options.gatewayIPVariableName] = ip.Spec.Gateway + if variable == ControlPlaneEndpointIPVariable { + if options.gatewayIPVariableName != "" { + // Set the gateway variable if requested to the gateway of the control plane IP. + // This is required in ipam scenarios, otherwise the VMs will not be able to + // connect to the public internet to pull images. + Byf("Setting clusterctl variable %s to %s", options.gatewayIPVariableName, ip.Spec.Gateway) + variables[options.gatewayIPVariableName] = ip.Spec.Gateway + } + if options.prefixVariableName != "" { + // Set the prefix variable if requested to the prefix of the control plane IP. + Byf("Setting clusterctl variable %s to %s", options.prefixVariableName, ip.Spec.Gateway) + variables[options.prefixVariableName] = strconv.Itoa(ip.Spec.Prefix) + } } } @@ -225,20 +296,6 @@ func (h *inCluster) Teardown(ctx context.Context, opts ...TearDownOption) error return nil } -func getClient(e2eIPAMKubeconfig string) (client.Client, error) { - kubeConfig, err := os.ReadFile(filepath.Clean(e2eIPAMKubeconfig)) - if err != nil { - return nil, err - } - - restConfig, err := clientcmd.RESTConfigFromKubeConfig(kubeConfig) - if err != nil { - return nil, err - } - - return client.New(restConfig, client.Options{Scheme: ipamScheme}) -} - // getVirtualMachineIPAddresses lists all VirtualMachines in the given folder and // returns a map which contains the IP addresses of all machines. // If the given folder is not found it will return an error. @@ -292,9 +349,9 @@ func (h *inCluster) claimIPAddress(ctx context.Context) (_ *ipamv1.IPAddress, _ }, Spec: ipamv1.IPAddressClaimSpec{ PoolRef: corev1.TypedLocalObjectReference{ - APIGroup: ptr.To("ipam.cluster.x-k8s.io"), - Kind: "InClusterIPPool", - Name: "capv-e2e-ippool", + APIGroup: ptr.To(h.ipPool.GroupVersionKind().Group), + Kind: h.ipPool.GroupVersionKind().Kind, + Name: h.ipPool.GetName(), }, }, }