diff --git a/Makefile b/Makefile index b74345ea3b..fd66b0a5bc 100644 --- a/Makefile +++ b/Makefile @@ -60,6 +60,7 @@ BUILD_DIR := .build TEST_DIR := test VCSIM_DIR := test/infrastructure/vcsim NETOP_DIR := test/infrastructure/net-operator +TEST_EXTENSION_DIR := test/infrastructure/test-extension TOOLS_DIR := hack/tools TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/$(BIN_DIR)) FLAVOR_DIR := $(ROOT_DIR)/templates @@ -233,6 +234,10 @@ VM_OPERATOR_ALL_ARCH = amd64 arm64 NET_OPERATOR_IMAGE_NAME ?= cluster-api-net-operator NET_OPERATOR_IMG ?= $(STAGING_REGISTRY)/$(NET_OPERATOR_IMAGE_NAME) +# net operator +TEST_EXTENSION_IMAGE_NAME ?= cluster-api-test-extension +TEST_EXTENSION_IMG ?= $(STAGING_REGISTRY)/$(TEST_EXTENSION_IMAGE_NAME) + # boskosctl BOSKOSCTL_IMG ?= gcr.io/k8s-staging-capi-vsphere/extra/boskosctl BOSKOSCTL_IMG_TAG ?= $(shell git describe --always --dirty) @@ -271,6 +276,7 @@ SUPERVISOR_WEBHOOK_ROOT ?= $(MANIFEST_ROOT)/supervisor/webhook RBAC_ROOT ?= $(MANIFEST_ROOT)/rbac VCSIM_RBAC_ROOT ?= $(VCSIM_DIR)/config/rbac NETOP_RBAC_ROOT ?= $(NETOP_DIR)/config/rbac +TEST_EXTENSION_RBAC_ROOT ?= $(TEST_EXTENSION_DIR)/config/rbac JANITOR_DIR ?= ./$(TOOLS_DIR)/janitor @@ -318,6 +324,11 @@ generate-manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. paths=./$(NETOP_DIR)/controllers/... \ output:rbac:dir=$(NETOP_RBAC_ROOT) \ rbac:roleName=manager-role + # test-extension is used for Runtime SDK tests + $(CONTROLLER_GEN) \ + paths=./$(TEST_EXTENSION_DIR)/... \ + output:rbac:dir=$(TEST_EXTENSION_RBAC_ROOT) \ + rbac:roleName=manager-role # vcsim crds are used for tests. $(CONTROLLER_GEN) \ paths=./$(VCSIM_DIR)/api/v1alpha1 \ @@ -377,6 +388,7 @@ generate-e2e-templates-main: $(KUSTOMIZE) ## Generate test templates for the mai # generate clusterclass and cluster topology cp "$(RELEASE_DIR)/main/clusterclass-template.yaml" "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/clusterclass/clusterclass-quick-start.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/clusterclass" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/clusterclass-quick-start.yaml" + "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/clusterclass-runtimesdk" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/clusterclass-runtimesdk-quick-start.yaml" cp "$(RELEASE_DIR)/main/cluster-template-topology.yaml" "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/topology/cluster-template-topology.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/topology" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-topology.yaml" "$(KUSTOMIZE)" --load-restrictor LoadRestrictionsNone build "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/install-on-bootstrap" > "$(E2E_GOVMOMI_TEMPLATE_DIR)/main/cluster-template-install-on-bootstrap.yaml" @@ -569,6 +581,15 @@ docker-build-net-operator: docker-pull-prerequisites ## Build the docker image f $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./$(NETOP_DIR)/config/default/manager_pull_policy.yaml"; \ fi +.PHONY: docker-build-test-extension +docker-build-test-extension: docker-pull-prerequisites ## Build the docker image for test-extension controller manager +## reads Dockerfile from stdin to avoid an incorrectly cached Dockerfile (https://github.com/moby/buildkit/issues/1368) + cat $(TEST_EXTENSION_DIR)/Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(NET_OPERATOR_IMG)-$(ARCH):$(TAG) --file - + @if [ "${DOCKER_BUILD_MODIFY_MANIFESTS}" = "true" ]; then \ + $(MAKE) set-manifest-image MANIFEST_IMG=$(NET_OPERATOR_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./$(TEST_EXTENSION_DIR)/config/default/manager_image_patch.yaml"; \ + $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./$(TEST_EXTENSION_DIR)/config/default/manager_pull_policy.yaml"; \ + fi + .PHONY: docker-build-boskosctl docker-build-boskosctl: cat hack/tools/boskosctl/Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) . -t $(BOSKOSCTL_IMG):$(BOSKOSCTL_IMG_TAG) --file - @@ -636,6 +657,7 @@ e2e-images: ## Build the e2e manager image $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build-vcsim $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build-net-operator + $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build-test-extension .PHONY: e2e e2e: e2e-images generate-e2e-templates diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 0b29ace34f..5508e3f085 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -16,6 +16,8 @@ images: loadBehavior: mustLoad - name: gcr.io/k8s-staging-capi-vsphere/cluster-api-net-operator-{ARCH}:dev loadBehavior: mustLoad + - name: gcr.io/k8s-staging-capi-vsphere/cluster-api-test-extension-{ARCH}:dev + loadBehavior: mustLoad - name: gcr.io/k8s-staging-capi-vsphere/extra/vm-operator:v1.8.6-0-gde75746a loadBehavior: tryLoad @@ -226,6 +228,19 @@ providers: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" + - name: test-extension + type: RuntimeExtensionProvider # test-extension isn't a provider, but we fake it is so it can be handled by the clusterctl machinery. + versions: + - name: v1.11.99 + # Use manifest from source files + value: ../../../../cluster-api-provider-vsphere/test/infrastructure/test-extension/config/default + contract: v1beta1 + files: + - sourcePath: "../data/shared/capv/main/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + variables: # Ensure all Kubernetes versions used here are covered in patch-vsphere-template.yaml KUBERNETES_VERSION: "v1.30.0" diff --git a/test/infrastructure/test-extension/Dockerfile b/test/infrastructure/test-extension/Dockerfile new file mode 100644 index 0000000000..9e6086d2b8 --- /dev/null +++ b/test/infrastructure/test-extension/Dockerfile @@ -0,0 +1,83 @@ +# syntax=docker/dockerfile:1.4 + +# Copyright 2024 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the manager binary +# Run this with docker build --build-arg builder_image= +ARG builder_image + +# Build architecture +ARG ARCH + +# Ignore Hadolint rule "Always tag the version of an image explicitly." +# It's an invalid finding since the image is explicitly set in the Makefile. +# https://github.com/hadolint/hadolint/wiki/DL3006 +# hadolint ignore=DL3006 +FROM ${builder_image} as builder +WORKDIR /workspace + +# Run this with docker build --build-arg goproxy=$(go env GOPROXY) to override the goproxy +ARG goproxy=https://proxy.golang.org +ENV GOPROXY=$goproxy + +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum + +# Change directories into the test go module +WORKDIR /workspace/test + +# Copy the Go Modules manifests +COPY test/go.mod go.mod +COPY test/go.sum go.sum + +# Cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN --mount=type=cache,target=/go/pkg/mod \ + go mod download + +# This needs to build with the entire CAPV context +WORKDIR /workspace + +# Copy the sources (which includes the test/infrastructure/vcsim subdirectory) +COPY ./ ./ + +# Change directories into test-extension +WORKDIR /workspace/test/infrastructure/test-extension + +# Cache the go build into the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + go build . + +# Build +ARG package=. +ARG ARCH +ARG ldflags + +# Do not force rebuild of up-to-date packages (do not use -a) and use the compiler cache folder +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \ + go build -trimpath -ldflags "${ldflags} -extldflags '-static'" \ + -o manager ${package} + + +FROM gcr.io/distroless/static:nonroot-${ARCH} +WORKDIR / +COPY --from=builder /workspace/test/infrastructure/test-extension/manager . +# Use uid of nonroot user (65532) because kubernetes expects numeric user when applying pod security policies +USER 65532 +ENTRYPOINT ["/manager"] diff --git a/test/infrastructure/test-extension/README.md b/test/infrastructure/test-extension/README.md new file mode 100644 index 0000000000..342c2156a5 --- /dev/null +++ b/test/infrastructure/test-extension/README.md @@ -0,0 +1,3 @@ +# test-extension + +Provide a minimal implementation of a Runtime SDK test-extension. diff --git a/test/infrastructure/test-extension/config/certmanager/certificate.yaml b/test/infrastructure/test-extension/config/certmanager/certificate.yaml new file mode 100644 index 0000000000..4079986e89 --- /dev/null +++ b/test/infrastructure/test-extension/config/certmanager/certificate.yaml @@ -0,0 +1,24 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/test/infrastructure/test-extension/config/certmanager/kustomization.yaml b/test/infrastructure/test-extension/config/certmanager/kustomization.yaml new file mode 100644 index 0000000000..95f333f3f7 --- /dev/null +++ b/test/infrastructure/test-extension/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - certificate.yaml + +configurations: + - kustomizeconfig.yaml diff --git a/test/infrastructure/test-extension/config/certmanager/kustomizeconfig.yaml b/test/infrastructure/test-extension/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 0000000000..c6a6c0f1e0 --- /dev/null +++ b/test/infrastructure/test-extension/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: + - kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: + - kind: Certificate + group: cert-manager.io + path: spec/commonName + - kind: Certificate + group: cert-manager.io + path: spec/dnsNames + - kind: Certificate + group: cert-manager.io + path: spec/secretName diff --git a/test/infrastructure/test-extension/config/default/kustomization.yaml b/test/infrastructure/test-extension/config/default/kustomization.yaml new file mode 100644 index 0000000000..095c5ac85b --- /dev/null +++ b/test/infrastructure/test-extension/config/default/kustomization.yaml @@ -0,0 +1,54 @@ +namespace: vmware-system-test-extension + +namePrefix: vmware-system-test-extension- + +commonLabels: + # capvsim is not a provider, but by adding this label + # we can get this installed by Cluster APIs Tiltfile and by the clusterctl machinery we use in E2E tests. + cluster.x-k8s.io/provider: "runtime-extension-capv-test" + +resources: + - namespace.yaml + +bases: + - ../rbac + - ../manager + - ../webhook + - ../certmanager + +patchesStrategicMerge: + # Provide customizable hook for make targets. + - manager_image_patch.yaml + - manager_pull_policy.yaml + - manager_webhook_patch.yaml + +vars: + - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + fieldref: + fieldpath: metadata.namespace + - name: CERTIFICATE_NAME + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + - name: SERVICE_NAMESPACE # namespace of the service + objref: + kind: Service + version: v1 + name: webhook-service + fieldref: + fieldpath: metadata.namespace + - name: SERVICE_NAME + objref: + kind: Service + version: v1 + name: webhook-service + +configurations: + - kustomizeconfig.yaml diff --git a/test/infrastructure/test-extension/config/default/kustomizeconfig.yaml b/test/infrastructure/test-extension/config/default/kustomizeconfig.yaml new file mode 100644 index 0000000000..eb191e64d0 --- /dev/null +++ b/test/infrastructure/test-extension/config/default/kustomizeconfig.yaml @@ -0,0 +1,4 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +varReference: +- kind: Deployment + path: spec/template/spec/volumes/secret/secretName diff --git a/test/infrastructure/test-extension/config/default/manager_image_patch.yaml b/test/infrastructure/test-extension/config/default/manager_image_patch.yaml new file mode 100644 index 0000000000..cd52fe1759 --- /dev/null +++ b/test/infrastructure/test-extension/config/default/manager_image_patch.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - image: gcr.io/k8s-staging-capi-vsphere/cluster-api-test-extension:dev + name: manager diff --git a/test/infrastructure/test-extension/config/default/manager_pull_policy.yaml b/test/infrastructure/test-extension/config/default/manager_pull_policy.yaml new file mode 100644 index 0000000000..74a0879c60 --- /dev/null +++ b/test/infrastructure/test-extension/config/default/manager_pull_policy.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: Always diff --git a/test/infrastructure/test-extension/config/default/manager_webhook_patch.yaml b/test/infrastructure/test-extension/config/default/manager_webhook_patch.yaml new file mode 100644 index 0000000000..f18fd10f99 --- /dev/null +++ b/test/infrastructure/test-extension/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize + diff --git a/test/infrastructure/test-extension/config/default/namespace.yaml b/test/infrastructure/test-extension/config/default/namespace.yaml new file mode 100644 index 0000000000..8b55c3cd89 --- /dev/null +++ b/test/infrastructure/test-extension/config/default/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system diff --git a/test/infrastructure/test-extension/config/manager/kustomization.yaml b/test/infrastructure/test-extension/config/manager/kustomization.yaml new file mode 100644 index 0000000000..5c5f0b84cb --- /dev/null +++ b/test/infrastructure/test-extension/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/test/infrastructure/test-extension/config/manager/manager.yaml b/test/infrastructure/test-extension/config/manager/manager.yaml new file mode 100644 index 0000000000..71697ed8e3 --- /dev/null +++ b/test/infrastructure/test-extension/config/manager/manager.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - command: + - /manager + args: + - "--leader-elect" + - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" + - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" + image: controller:latest + name: manager + ports: + - containerPort: 9440 + name: healthz + protocol: TCP + - containerPort: 8443 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsUser: 65532 + runAsGroup: 65532 + terminationMessagePolicy: FallbackToLogsOnError + terminationGracePeriodSeconds: 10 + serviceAccountName: manager + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/test/infrastructure/test-extension/config/rbac/kustomization.yaml b/test/infrastructure/test-extension/config/rbac/kustomization.yaml new file mode 100644 index 0000000000..e82521ffdc --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- role.yaml +- role_binding.yaml +- service_account.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml diff --git a/test/infrastructure/test-extension/config/rbac/leader_election_role.yaml b/test/infrastructure/test-extension/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000000..23055e187d --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/leader_election_role.yaml @@ -0,0 +1,24 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete diff --git a/test/infrastructure/test-extension/config/rbac/leader_election_role_binding.yaml b/test/infrastructure/test-extension/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000000..d5e0044679 --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: manager + namespace: system diff --git a/test/infrastructure/test-extension/config/rbac/role.yaml b/test/infrastructure/test-extension/config/rbac/role.yaml new file mode 100644 index 0000000000..8e90952e79 --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/role.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/test/infrastructure/test-extension/config/rbac/role_binding.yaml b/test/infrastructure/test-extension/config/rbac/role_binding.yaml new file mode 100644 index 0000000000..5a95f66d6f --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: manager + namespace: system diff --git a/test/infrastructure/test-extension/config/rbac/service_account.yaml b/test/infrastructure/test-extension/config/rbac/service_account.yaml new file mode 100644 index 0000000000..77f747b53c --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: manager + namespace: system diff --git a/test/infrastructure/test-extension/config/tilt/extensionconfig.yaml b/test/infrastructure/test-extension/config/tilt/extensionconfig.yaml new file mode 100644 index 0000000000..d5ee765c97 --- /dev/null +++ b/test/infrastructure/test-extension/config/tilt/extensionconfig.yaml @@ -0,0 +1,18 @@ +apiVersion: runtime.cluster.x-k8s.io/v1alpha1 +kind: ExtensionConfig +metadata: + annotations: + runtime.cluster.x-k8s.io/inject-ca-from-secret: vmware-system-test-extension/vmware-system-test-extension-webhook-service-cert + name: test-extension +spec: + clientConfig: + service: + name: vmware-system-test-extension-webhook-service + namespace: vmware-system-test-extension # Note: this assumes the test extension get deployed in the default namespace defined in its own runtime-extensions-components.yaml + port: 443 + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - default # Note: this assumes the test extension is used by Cluster in the default namespace only \ No newline at end of file diff --git a/test/infrastructure/test-extension/config/webhook/kustomization.yaml b/test/infrastructure/test-extension/config/webhook/kustomization.yaml new file mode 100644 index 0000000000..66157d5d5f --- /dev/null +++ b/test/infrastructure/test-extension/config/webhook/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/test/infrastructure/test-extension/config/webhook/kustomizeconfig.yaml b/test/infrastructure/test-extension/config/webhook/kustomizeconfig.yaml new file mode 100644 index 0000000000..25e21e3c96 --- /dev/null +++ b/test/infrastructure/test-extension/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,25 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + - kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/name + +namespace: +- kind: MutatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true +- kind: ValidatingWebhookConfiguration + group: admissionregistration.k8s.io + path: webhooks/clientConfig/service/namespace + create: true + +varReference: +- path: metadata/annotations diff --git a/test/infrastructure/test-extension/config/webhook/service.yaml b/test/infrastructure/test-extension/config/webhook/service.yaml new file mode 100644 index 0000000000..711977f54f --- /dev/null +++ b/test/infrastructure/test-extension/config/webhook/service.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + targetPort: webhook-server diff --git a/test/infrastructure/test-extension/main.go b/test/infrastructure/test-extension/main.go new file mode 100644 index 0000000000..70e025dcdc --- /dev/null +++ b/test/infrastructure/test-extension/main.go @@ -0,0 +1,400 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// main is the main package for the test extension. +// The test extension serves two goals: +// - to provide a reference implementation of Runtime Extension +// - to implement the Runtime Extension used by Cluster API E2E tests. +package main + +import ( + "context" + "flag" + "os" + goruntime "runtime" + "time" + + "github.com/spf13/pflag" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/leaderelection/resourcelock" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + logsv1 "k8s.io/component-base/logs/api/v1" + _ "k8s.io/component-base/logs/json/register" + "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/remote" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/runtime/server" + "sigs.k8s.io/cluster-api/test/extension/handlers/lifecycle" + "sigs.k8s.io/cluster-api/util/flags" + "sigs.k8s.io/cluster-api/version" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/test-extension/topologymutation" +) + +var ( + // catalog contains all information about RuntimeHooks. + catalog = runtimecatalog.New() + + // scheme is a Kubernetes runtime scheme containing all the information about API types used by the test extension. + // NOTE: it is not mandatory to use scheme in custom RuntimeExtension, but working with typed API objects makes code + // easier to read and less error-prone than using unstructured or working with raw json/yaml. + scheme = runtime.NewScheme() + // Creates a logger to be used during the main func using controller runtime utilities + // NOTE: it is not mandatory to use controller runtime utilities in custom RuntimeExtension, but it is recommended + // because it makes log from those components similar to log from controllers. + setupLog = ctrl.Log.WithName("setup") + controllerName = "cluster-api-test-extension-manager" + + // flags. + enableLeaderElection bool + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + profilerAddress string + enableContentionProfiling bool + syncPeriod time.Duration + restConfigQPS float32 + restConfigBurst int + webhookPort int + webhookCertDir string + webhookCertName string + webhookKeyName string + healthAddr string + tlsOptions = flags.TLSOptions{} + diagnosticsOptions = flags.DiagnosticsOptions{} + logOptions = logs.NewOptions() +) + +func init() { + // Adds to the scheme all the API types we used by the test extension. + _ = clientgoscheme.AddToScheme(scheme) + _ = apiextensionsv1.AddToScheme(scheme) + + _ = clusterv1.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) + + _ = infrav1.AddToScheme(scheme) + _ = vmwarev1.AddToScheme(scheme) + + // Register the RuntimeHook types into the catalog. + _ = runtimehooksv1.AddToCatalog(catalog) +} + +// InitFlags initializes the flags. +func InitFlags(fs *pflag.FlagSet) { + // Initialize logs flags using Kubernetes component-base machinery. + // NOTE: it is not mandatory to use Kubernetes component-base machinery in custom RuntimeExtension, but it is + // recommended because it helps in ensuring consistency across different components in the cluster. + logsv1.AddFlags(logOptions, fs) + + fs.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + + fs.DurationVar(&leaderElectionLeaseDuration, "leader-elect-lease-duration", 15*time.Second, + "Interval at which non-leader candidates will wait to force acquire leadership (duration string)") + + fs.DurationVar(&leaderElectionRenewDeadline, "leader-elect-renew-deadline", 10*time.Second, + "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)") + + fs.DurationVar(&leaderElectionRetryPeriod, "leader-elect-retry-period", 2*time.Second, + "Duration the LeaderElector clients should wait between tries of actions (duration string)") + + fs.StringVar(&profilerAddress, "profiler-address", "", + "Bind address to expose the pprof profiler (e.g. localhost:6060)") + + fs.BoolVar(&enableContentionProfiling, "contention-profiling", false, + "Enable block profiling") + + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, + "The minimum interval at which watched resources are reconciled (e.g. 15m)") + + fs.Float32Var(&restConfigQPS, "kube-api-qps", 20, + "Maximum queries per second from the controller client to the Kubernetes API server. Defaults to 20") + + fs.IntVar(&restConfigBurst, "kube-api-burst", 30, + "Maximum number of queries that should be allowed in one burst from the controller client to the Kubernetes API server. Default 30") + + fs.IntVar(&webhookPort, "webhook-port", 9443, + "Webhook Server port") + + fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", + "Webhook cert dir.") + + fs.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", + "Webhook cert name.") + + fs.StringVar(&webhookKeyName, "webhook-key-name", "tls.key", + "Webhook key name.") + + fs.StringVar(&healthAddr, "health-addr", ":9440", + "The address the health endpoint binds to.") + + flags.AddDiagnosticsOptions(fs, &diagnosticsOptions) + flags.AddTLSOptions(fs, &tlsOptions) + + // Add test-extension specific flags + // NOTE: it is not mandatory to use the same flag names in all RuntimeExtension, but it is recommended when + // addressing common concerns like profiler-address, webhook-port, webhook-cert-dir etc. because it helps in ensuring + // consistency across different components in the cluster. +} + +// Add RBAC for the authorized diagnostics endpoint. +// +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create +// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create + +func main() { + // Initialize and parse command line flags. + InitFlags(pflag.CommandLine) + pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + // Set log level 2 as default. + if err := pflag.CommandLine.Set("v", "2"); err != nil { + setupLog.Error(err, "Failed to set default log level") + os.Exit(1) + } + pflag.Parse() + + // Validates logs flags using Kubernetes component-base machinery and apply them + // so klog will automatically use the right logger. + // NOTE: klog is the log of choice of component-base machinery. + if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { + setupLog.Error(err, "Unable to start extension") + os.Exit(1) + } + + // Add the klog logger in the context. + // NOTE: it is not mandatory to use contextual logging in custom RuntimeExtension, but it is recommended + // because it allows to use a log stored in the context across the entire chain of calls (without + // requiring an addition log parameter in all the functions). + ctrl.SetLogger(klog.Background()) + + restConfig := ctrl.GetConfigOrDie() + restConfig.QPS = restConfigQPS + restConfig.Burst = restConfigBurst + restConfig.UserAgent = remote.DefaultClusterAPIUserAgent(controllerName) + + tlsOptionOverrides, err := flags.GetTLSOptionOverrideFuncs(tlsOptions) + if err != nil { + setupLog.Error(err, "Unable to add TLS settings to the webhook server") + os.Exit(1) + } + + diagnosticsOpts := flags.GetDiagnosticsOptions(diagnosticsOptions) + + if enableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + + // Create an HTTP server for serving Runtime Extensions. + runtimeExtensionWebhookServer, err := server.New(server.Options{ + Port: webhookPort, + CertDir: webhookCertDir, + CertName: webhookCertName, + KeyName: webhookKeyName, + TLSOpts: tlsOptionOverrides, + Catalog: catalog, + }) + if err != nil { + setupLog.Error(err, "Error creating runtime extension webhook server") + os.Exit(1) + } + + ctrlOptions := ctrl.Options{ + Scheme: scheme, + LeaderElection: enableLeaderElection, + LeaderElectionID: "controller-leader-election-capv-test-extension", + LeaseDuration: &leaderElectionLeaseDuration, + RenewDeadline: &leaderElectionRenewDeadline, + RetryPeriod: &leaderElectionRetryPeriod, + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + HealthProbeBindAddress: healthAddr, + PprofBindAddress: profilerAddress, + Metrics: diagnosticsOpts, + Cache: cache.Options{ + SyncPeriod: &syncPeriod, + }, + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + // Use the cache for all Unstructured get/list calls. + Unstructured: true, + }, + }, + WebhookServer: runtimeExtensionWebhookServer, + } + + // Start the manager + mgr, err := ctrl.NewManager(restConfig, ctrlOptions) + if err != nil { + setupLog.Error(err, "Unable to start manager") + os.Exit(1) + } + + // Set up a context listening for SIGINT. + ctx := ctrl.SetupSignalHandler() + + // Setup Runtime Extensions. + setupTopologyMutationHookHandlers(runtimeExtensionWebhookServer) + setupLifecycleHookHandlers(mgr, runtimeExtensionWebhookServer) + + // Setup checks, indexes, reconcilers and webhooks. + setupChecks(mgr) + setupIndexes(ctx, mgr) + setupReconcilers(ctx, mgr) + setupWebhooks(mgr) + + setupLog.Info("Starting manager", "version", version.Get().String()) + if err := mgr.Start(ctx); err != nil { + setupLog.Error(err, "Problem running manager") + os.Exit(1) + } +} + +// setupTopologyMutationHookHandlers sets up Topology Mutation Hooks (Runtime Patches). +func setupTopologyMutationHookHandlers(runtimeExtensionWebhookServer *server.Server) { + _ = runtimeExtensionWebhookServer + // Create the ExtensionHandlers for the Topology Mutation Hooks. + // NOTE: it is not mandatory to group all the ExtensionHandlers using a struct, what is important + // is to have HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. + topologyMutationExtensionHandlers := topologymutation.NewExtensionHandlers(scheme) + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.GeneratePatches, + Name: "generate-patches", + HandlerFunc: topologyMutationExtensionHandlers.GeneratePatches, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.ValidateTopology, + Name: "validate-topology", + HandlerFunc: topologyMutationExtensionHandlers.ValidateTopology, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.DiscoverVariables, + Name: "discover-variables", + HandlerFunc: topologyMutationExtensionHandlers.DiscoverVariables, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } +} + +// setupLifecycleHookHandlers sets up Lifecycle Hooks. +func setupLifecycleHookHandlers(mgr ctrl.Manager, runtimeExtensionWebhookServer *server.Server) { + // Create the ExtensionHandlers for the lifecycle hooks + // NOTE: it is not mandatory to group all the ExtensionHandlers using a struct, what is important + // is to have HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. + lifecycleExtensionHandlers := lifecycle.NewExtensionHandlers(mgr.GetClient()) + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.BeforeClusterCreate, + Name: "before-cluster-create", + HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterCreate, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.AfterControlPlaneInitialized, + Name: "after-control-plane-initialized", + HandlerFunc: lifecycleExtensionHandlers.DoAfterControlPlaneInitialized, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.BeforeClusterUpgrade, + Name: "before-cluster-upgrade", + HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterUpgrade, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.AfterControlPlaneUpgrade, + Name: "after-control-plane-upgrade", + HandlerFunc: lifecycleExtensionHandlers.DoAfterControlPlaneUpgrade, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.AfterClusterUpgrade, + Name: "after-cluster-upgrade", + HandlerFunc: lifecycleExtensionHandlers.DoAfterClusterUpgrade, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.BeforeClusterDelete, + Name: "before-cluster-delete", + HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterDelete, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } +} + +func setupChecks(mgr ctrl.Manager) { + if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { + setupLog.Error(err, "Unable to create ready check") + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { + setupLog.Error(err, "Unable to create health check") + os.Exit(1) + } +} + +func setupIndexes(_ context.Context, _ ctrl.Manager) { +} + +func setupReconcilers(_ context.Context, _ ctrl.Manager) { +} + +func setupWebhooks(_ ctrl.Manager) { +} diff --git a/test/infrastructure/test-extension/tilt-provider.yaml b/test/infrastructure/test-extension/tilt-provider.yaml new file mode 100644 index 0000000000..c23ce01609 --- /dev/null +++ b/test/infrastructure/test-extension/tilt-provider.yaml @@ -0,0 +1,11 @@ +--- +- name: vsphere-test-extension + config: + version: v1.11.99 + image: gcr.io/k8s-staging-capi-vsphere/cluster-api-test-extension + live_reload_deps: + - main.go + - controllers + label: VSPHERE_EXTENSION + additional_resources: + - config/tilt/extensionconfig.yaml diff --git a/test/infrastructure/test-extension/topologymutation/handler.go b/test/infrastructure/test-extension/topologymutation/handler.go new file mode 100644 index 0000000000..e4cc5b4db0 --- /dev/null +++ b/test/infrastructure/test-extension/topologymutation/handler.go @@ -0,0 +1,164 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package topologymutation contains the handlers for the topologymutation webhook. +// +// The implementation of the handlers is specifically designed for Cluster API E2E tests use cases. +// When implementing custom RuntimeExtension, it is only required to expose HandlerFunc with the +// signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. +package topologymutation + +import ( + "context" + + "github.com/pkg/errors" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/runtime/topologymutation" + ctrl "sigs.k8s.io/controller-runtime" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" +) + +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;patch;update;create + +// ExtensionHandlers provides a common struct shared across the topology mutation hooks handlers; +// this is convenient because in Cluster API's E2E tests all of them are using a decoder for working with typed +// API objects, which makes code easier to read and less error prone than using unstructured or working with raw json/yaml. +// NOTE: it is not mandatory to use a ExtensionHandlers in custom RuntimeExtension, what is important +// is to expose HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. +type ExtensionHandlers struct { + decoder runtime.Decoder +} + +// NewExtensionHandlers returns a new ExtensionHandlers for the topology mutation hook handlers. +func NewExtensionHandlers(scheme *runtime.Scheme) *ExtensionHandlers { + return &ExtensionHandlers{ + // Add the apiGroups being handled to the decoder + decoder: serializer.NewCodecFactory(scheme).UniversalDecoder( + infrav1.GroupVersion, + controlplanev1.GroupVersion, + bootstrapv1.GroupVersion, + ), + } +} + +// GeneratePatches implements the HandlerFunc for the GeneratePatches hook. +// The hook adds to the response the patches we are using in Cluster API E2E tests. +// NOTE: custom RuntimeExtension must implement the body of this func according to the specific use case. +func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehooksv1.GeneratePatchesRequest, resp *runtimehooksv1.GeneratePatchesResponse) { + log := ctrl.LoggerFrom(ctx) + log.Info("GeneratePatches is called") + + // TODO: validate variables. + + // By using WalkTemplates it is possible to implement patches using typed API objects, which makes code + // easier to read and less error prone than using unstructured or working with raw json/yaml. + // IMPORTANT: by unit testing this func/nested func properly, it is possible to prevent unexpected rollouts when patches are modified. + topologymutation.WalkTemplates(ctx, h.decoder, req, resp, func(ctx context.Context, obj runtime.Object, variables map[string]apiextensionsv1.JSON, _ runtimehooksv1.HolderReference) error { + log := ctrl.LoggerFrom(ctx) + + switch obj := obj.(type) { + case *infrav1.VSphereClusterTemplate: + if err := patchGovmomiClusterTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching VSphereClusterTemplate") + return errors.Wrap(err, "error patching VSphereClusterTemplate") + } + case *infrav1.VSphereMachineTemplate: + if err := patchGovmomiMachineTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching VSphereMachineTemplate") + return errors.Wrap(err, "error patching VSphereMachineTemplate") + } + case *vmwarev1.VSphereClusterTemplate: + if err := patchSupervisorClusterTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching VSphereClusterTemplate") + return errors.Wrap(err, "error patching VSphereClusterTemplate") + } + case *vmwarev1.VSphereMachineTemplate: + if err := patchSupervisorMachineTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching VSphereMachineTemplate") + return errors.Wrap(err, "error patching VSphereMachineTemplate") + } + } + return nil + }) +} + +// patchGovmomiClusterTemplate patches the govmomi VSphereClusterTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchGovmomiClusterTemplate(_ context.Context, _ *infrav1.VSphereClusterTemplate, _ map[string]apiextensionsv1.JSON) error { + // TODO(chrischdi): implement some patch + return nil +} + +// patchSupervisorClusterTemplate patches the supervisor VSphereClusterTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchSupervisorClusterTemplate(_ context.Context, _ *vmwarev1.VSphereClusterTemplate, _ map[string]apiextensionsv1.JSON) error { + // TODO(chrischdi): implement some patch + return nil +} + +// patchGovmomiMachineTemplate patches the govmomi VSphereMachineTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchGovmomiMachineTemplate(_ context.Context, _ *infrav1.VSphereMachineTemplate, _ map[string]apiextensionsv1.JSON) error { + // TODO(chrischdi): implement some patch + return nil +} + +// patchSupervisorMachineTemplate patches the supervisor VSphereMachineTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchSupervisorMachineTemplate(_ context.Context, _ *vmwarev1.VSphereMachineTemplate, _ map[string]apiextensionsv1.JSON) error { + // TODO(chrischdi): implement some patch + return nil +} + +// ValidateTopology implements the HandlerFunc for the ValidateTopology hook. +// Cluster API E2E currently are just validating the hook gets called. +// NOTE: custom RuntimeExtension must implement the body of this func according to the specific use case. +func (h *ExtensionHandlers) ValidateTopology(ctx context.Context, _ *runtimehooksv1.ValidateTopologyRequest, resp *runtimehooksv1.ValidateTopologyResponse) { + log := ctrl.LoggerFrom(ctx) + log.Info("ValidateTopology called") + + resp.Status = runtimehooksv1.ResponseStatusSuccess +} + +// DiscoverVariables implements the HandlerFunc for the DiscoverVariables hook. +func (h *ExtensionHandlers) DiscoverVariables(ctx context.Context, _ *runtimehooksv1.DiscoverVariablesRequest, resp *runtimehooksv1.DiscoverVariablesResponse) { + log := ctrl.LoggerFrom(ctx) + log.Info("DiscoverVariables called") + + resp.Status = runtimehooksv1.ResponseStatusSuccess + resp.Variables = []clusterv1.ClusterClassVariable{ + { + Name: "unusedTest", + Required: false, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Type: "string", + Default: &apiextensionsv1.JSON{Raw: []byte(`""`)}, + Example: &apiextensionsv1.JSON{Raw: []byte(`""`)}, + Description: "unusedTest is not used.", + }, + }, + }, + } +}