From 8c80dcc596d8a5580aaf2da02dfd6e5e0d7060ee Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Fri, 21 Jun 2024 19:40:18 +0200 Subject: [PATCH] test: implement test runtime extension --- .dockerignore | 3 + Makefile | 21 + test/infrastructure/test-extension/Dockerfile | 83 ++++ test/infrastructure/test-extension/README.md | 3 + .../config/certmanager/certificate.yaml | 24 ++ .../config/certmanager/kustomization.yaml | 5 + .../config/certmanager/kustomizeconfig.yaml | 19 + .../config/default/kustomization.yaml | 54 +++ .../config/default/kustomizeconfig.yaml | 4 + .../config/default/manager_image_patch.yaml | 11 + .../config/default/manager_pull_policy.yaml | 11 + .../config/default/manager_webhook_patch.yaml | 23 + .../config/default/namespace.yaml | 6 + .../config/manager/kustomization.yaml | 2 + .../config/manager/manager.yaml | 61 +++ .../config/rbac/kustomization.yaml | 8 + .../config/rbac/leader_election_role.yaml | 24 ++ .../rbac/leader_election_role_binding.yaml | 12 + .../test-extension/config/rbac/role.yaml | 29 ++ .../config/rbac/role_binding.yaml | 12 + .../config/rbac/service_account.yaml | 5 + .../config/tilt/extensionconfig.yaml | 18 + .../config/webhook/kustomization.yaml | 5 + .../config/webhook/kustomizeconfig.yaml | 5 + .../config/webhook/service.yaml | 9 + test/infrastructure/test-extension/main.go | 400 +++++++++++++++++ .../test-extension/tilt-provider.yaml | 11 + .../topologymutation/handler.go | 408 ++++++++++++++++++ 28 files changed, 1276 insertions(+) create mode 100644 test/infrastructure/test-extension/Dockerfile create mode 100644 test/infrastructure/test-extension/README.md create mode 100644 test/infrastructure/test-extension/config/certmanager/certificate.yaml create mode 100644 test/infrastructure/test-extension/config/certmanager/kustomization.yaml create mode 100644 test/infrastructure/test-extension/config/certmanager/kustomizeconfig.yaml create mode 100644 test/infrastructure/test-extension/config/default/kustomization.yaml create mode 100644 test/infrastructure/test-extension/config/default/kustomizeconfig.yaml create mode 100644 test/infrastructure/test-extension/config/default/manager_image_patch.yaml create mode 100644 test/infrastructure/test-extension/config/default/manager_pull_policy.yaml create mode 100644 test/infrastructure/test-extension/config/default/manager_webhook_patch.yaml create mode 100644 test/infrastructure/test-extension/config/default/namespace.yaml create mode 100644 test/infrastructure/test-extension/config/manager/kustomization.yaml create mode 100644 test/infrastructure/test-extension/config/manager/manager.yaml create mode 100644 test/infrastructure/test-extension/config/rbac/kustomization.yaml create mode 100644 test/infrastructure/test-extension/config/rbac/leader_election_role.yaml create mode 100644 test/infrastructure/test-extension/config/rbac/leader_election_role_binding.yaml create mode 100644 test/infrastructure/test-extension/config/rbac/role.yaml create mode 100644 test/infrastructure/test-extension/config/rbac/role_binding.yaml create mode 100644 test/infrastructure/test-extension/config/rbac/service_account.yaml create mode 100644 test/infrastructure/test-extension/config/tilt/extensionconfig.yaml create mode 100644 test/infrastructure/test-extension/config/webhook/kustomization.yaml create mode 100644 test/infrastructure/test-extension/config/webhook/kustomizeconfig.yaml create mode 100644 test/infrastructure/test-extension/config/webhook/service.yaml create mode 100644 test/infrastructure/test-extension/main.go create mode 100644 test/infrastructure/test-extension/tilt-provider.yaml create mode 100644 test/infrastructure/test-extension/topologymutation/handler.go diff --git a/.dockerignore b/.dockerignore index 42b1005a11..7e03d2ea17 100644 --- a/.dockerignore +++ b/.dockerignore @@ -17,6 +17,9 @@ _artifacts Makefile **/Makefile +# add yaml files from internal/kubevip which are required for embedding. +!internal/kubevip/*.yaml + # ignores changes to test-only code to avoid extra rebuilds test/e2e/** diff --git a/Makefile b/Makefile index b74345ea3b..bb2d831a8d 100644 --- a/Makefile +++ b/Makefile @@ -60,6 +60,7 @@ BUILD_DIR := .build TEST_DIR := test VCSIM_DIR := test/infrastructure/vcsim NETOP_DIR := test/infrastructure/net-operator +TEST_EXTENSION_DIR := test/infrastructure/test-extension TOOLS_DIR := hack/tools TOOLS_BIN_DIR := $(abspath $(TOOLS_DIR)/$(BIN_DIR)) FLAVOR_DIR := $(ROOT_DIR)/templates @@ -233,6 +234,10 @@ VM_OPERATOR_ALL_ARCH = amd64 arm64 NET_OPERATOR_IMAGE_NAME ?= cluster-api-net-operator NET_OPERATOR_IMG ?= $(STAGING_REGISTRY)/$(NET_OPERATOR_IMAGE_NAME) +# test-extension +TEST_EXTENSION_IMAGE_NAME ?= cluster-api-test-extension +TEST_EXTENSION_IMG ?= $(STAGING_REGISTRY)/$(TEST_EXTENSION_IMAGE_NAME) + # boskosctl BOSKOSCTL_IMG ?= gcr.io/k8s-staging-capi-vsphere/extra/boskosctl BOSKOSCTL_IMG_TAG ?= $(shell git describe --always --dirty) @@ -271,6 +276,7 @@ SUPERVISOR_WEBHOOK_ROOT ?= $(MANIFEST_ROOT)/supervisor/webhook RBAC_ROOT ?= $(MANIFEST_ROOT)/rbac VCSIM_RBAC_ROOT ?= $(VCSIM_DIR)/config/rbac NETOP_RBAC_ROOT ?= $(NETOP_DIR)/config/rbac +TEST_EXTENSION_RBAC_ROOT ?= $(TEST_EXTENSION_DIR)/config/rbac JANITOR_DIR ?= ./$(TOOLS_DIR)/janitor @@ -318,6 +324,11 @@ generate-manifests: $(CONTROLLER_GEN) ## Generate manifests e.g. CRD, RBAC etc. paths=./$(NETOP_DIR)/controllers/... \ output:rbac:dir=$(NETOP_RBAC_ROOT) \ rbac:roleName=manager-role + # test-extension is used for Runtime SDK tests + $(CONTROLLER_GEN) \ + paths=./$(TEST_EXTENSION_DIR)/... \ + output:rbac:dir=$(TEST_EXTENSION_RBAC_ROOT) \ + rbac:roleName=manager-role # vcsim crds are used for tests. $(CONTROLLER_GEN) \ paths=./$(VCSIM_DIR)/api/v1alpha1 \ @@ -569,6 +580,15 @@ docker-build-net-operator: docker-pull-prerequisites ## Build the docker image f $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./$(NETOP_DIR)/config/default/manager_pull_policy.yaml"; \ fi +.PHONY: docker-build-test-extension +docker-build-test-extension: docker-pull-prerequisites ## Build the docker image for test-extension controller manager +## reads Dockerfile from stdin to avoid an incorrectly cached Dockerfile (https://github.com/moby/buildkit/issues/1368) + cat $(TEST_EXTENSION_DIR)/Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) --build-arg ARCH=$(ARCH) --build-arg ldflags="$(LDFLAGS)" . -t $(TEST_EXTENSION_IMG)-$(ARCH):$(TAG) --file - + @if [ "${DOCKER_BUILD_MODIFY_MANIFESTS}" = "true" ]; then \ + $(MAKE) set-manifest-image MANIFEST_IMG=$(TEST_EXTENSION_IMG)-$(ARCH) MANIFEST_TAG=$(TAG) TARGET_RESOURCE="./$(TEST_EXTENSION_DIR)/config/default/manager_image_patch.yaml"; \ + $(MAKE) set-manifest-pull-policy TARGET_RESOURCE="./$(TEST_EXTENSION_DIR)/config/default/manager_pull_policy.yaml"; \ + fi + .PHONY: docker-build-boskosctl docker-build-boskosctl: cat hack/tools/boskosctl/Dockerfile | DOCKER_BUILDKIT=1 docker build --build-arg builder_image=$(GO_CONTAINER_IMAGE) --build-arg goproxy=$(GOPROXY) . -t $(BOSKOSCTL_IMG):$(BOSKOSCTL_IMG_TAG) --file - @@ -636,6 +656,7 @@ e2e-images: ## Build the e2e manager image $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build-vcsim $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build-net-operator + $(MAKE) REGISTRY=gcr.io/k8s-staging-capi-vsphere PULL_POLICY=IfNotPresent TAG=dev docker-build-test-extension .PHONY: e2e e2e: e2e-images generate-e2e-templates diff --git a/test/infrastructure/test-extension/Dockerfile b/test/infrastructure/test-extension/Dockerfile new file mode 100644 index 0000000000..9e6086d2b8 --- /dev/null +++ b/test/infrastructure/test-extension/Dockerfile @@ -0,0 +1,83 @@ +# syntax=docker/dockerfile:1.4 + +# Copyright 2024 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build the manager binary +# Run this with docker build --build-arg builder_image= +ARG builder_image + +# Build architecture +ARG ARCH + +# Ignore Hadolint rule "Always tag the version of an image explicitly." +# It's an invalid finding since the image is explicitly set in the Makefile. +# https://github.com/hadolint/hadolint/wiki/DL3006 +# hadolint ignore=DL3006 +FROM ${builder_image} as builder +WORKDIR /workspace + +# Run this with docker build --build-arg goproxy=$(go env GOPROXY) to override the goproxy +ARG goproxy=https://proxy.golang.org +ENV GOPROXY=$goproxy + +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum + +# Change directories into the test go module +WORKDIR /workspace/test + +# Copy the Go Modules manifests +COPY test/go.mod go.mod +COPY test/go.sum go.sum + +# Cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN --mount=type=cache,target=/go/pkg/mod \ + go mod download + +# This needs to build with the entire CAPV context +WORKDIR /workspace + +# Copy the sources (which includes the test/infrastructure/vcsim subdirectory) +COPY ./ ./ + +# Change directories into test-extension +WORKDIR /workspace/test/infrastructure/test-extension + +# Cache the go build into the Go’s compiler cache folder so we take benefits of compiler caching across docker build calls +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + go build . + +# Build +ARG package=. +ARG ARCH +ARG ldflags + +# Do not force rebuild of up-to-date packages (do not use -a) and use the compiler cache folder +RUN --mount=type=cache,target=/root/.cache/go-build \ + --mount=type=cache,target=/go/pkg/mod \ + CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} \ + go build -trimpath -ldflags "${ldflags} -extldflags '-static'" \ + -o manager ${package} + + +FROM gcr.io/distroless/static:nonroot-${ARCH} +WORKDIR / +COPY --from=builder /workspace/test/infrastructure/test-extension/manager . +# Use uid of nonroot user (65532) because kubernetes expects numeric user when applying pod security policies +USER 65532 +ENTRYPOINT ["/manager"] diff --git a/test/infrastructure/test-extension/README.md b/test/infrastructure/test-extension/README.md new file mode 100644 index 0000000000..15e7bece51 --- /dev/null +++ b/test/infrastructure/test-extension/README.md @@ -0,0 +1,3 @@ +# CAPV test-extension + +Provide a minimal implementation of a Runtime SDK test-extension. diff --git a/test/infrastructure/test-extension/config/certmanager/certificate.yaml b/test/infrastructure/test-extension/config/certmanager/certificate.yaml new file mode 100644 index 0000000000..4079986e89 --- /dev/null +++ b/test/infrastructure/test-extension/config/certmanager/certificate.yaml @@ -0,0 +1,24 @@ +# The following manifests contain a self-signed issuer CR and a certificate CR. +# More document can be found at https://docs.cert-manager.io +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: selfsigned-issuer + namespace: system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml + namespace: system +spec: + # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize + dnsNames: + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc + - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local + issuerRef: + kind: Issuer + name: selfsigned-issuer + secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize diff --git a/test/infrastructure/test-extension/config/certmanager/kustomization.yaml b/test/infrastructure/test-extension/config/certmanager/kustomization.yaml new file mode 100644 index 0000000000..95f333f3f7 --- /dev/null +++ b/test/infrastructure/test-extension/config/certmanager/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - certificate.yaml + +configurations: + - kustomizeconfig.yaml diff --git a/test/infrastructure/test-extension/config/certmanager/kustomizeconfig.yaml b/test/infrastructure/test-extension/config/certmanager/kustomizeconfig.yaml new file mode 100644 index 0000000000..c6a6c0f1e0 --- /dev/null +++ b/test/infrastructure/test-extension/config/certmanager/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +nameReference: + - kind: Issuer + group: cert-manager.io + fieldSpecs: + - kind: Certificate + group: cert-manager.io + path: spec/issuerRef/name + +varReference: + - kind: Certificate + group: cert-manager.io + path: spec/commonName + - kind: Certificate + group: cert-manager.io + path: spec/dnsNames + - kind: Certificate + group: cert-manager.io + path: spec/secretName diff --git a/test/infrastructure/test-extension/config/default/kustomization.yaml b/test/infrastructure/test-extension/config/default/kustomization.yaml new file mode 100644 index 0000000000..2c33c4391b --- /dev/null +++ b/test/infrastructure/test-extension/config/default/kustomization.yaml @@ -0,0 +1,54 @@ +namespace: capv-test-extension + +namePrefix: capv-test-extension- + +commonLabels: + # capvsim is not a provider, but by adding this label + # we can get this installed by Cluster APIs Tiltfile and by the clusterctl machinery we use in E2E tests. + cluster.x-k8s.io/provider: "runtime-extension-capv-test" + +resources: + - namespace.yaml + +bases: + - ../rbac + - ../manager + - ../webhook + - ../certmanager + +patchesStrategicMerge: + # Provide customizable hook for make targets. + - manager_image_patch.yaml + - manager_pull_policy.yaml + - manager_webhook_patch.yaml + +vars: + - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + fieldref: + fieldpath: metadata.namespace + - name: CERTIFICATE_NAME + objref: + kind: Certificate + group: cert-manager.io + version: v1 + name: serving-cert # this name should match the one in certificate.yaml + - name: SERVICE_NAMESPACE # namespace of the service + objref: + kind: Service + version: v1 + name: webhook-service + fieldref: + fieldpath: metadata.namespace + - name: SERVICE_NAME + objref: + kind: Service + version: v1 + name: webhook-service + +configurations: + - kustomizeconfig.yaml diff --git a/test/infrastructure/test-extension/config/default/kustomizeconfig.yaml b/test/infrastructure/test-extension/config/default/kustomizeconfig.yaml new file mode 100644 index 0000000000..eb191e64d0 --- /dev/null +++ b/test/infrastructure/test-extension/config/default/kustomizeconfig.yaml @@ -0,0 +1,4 @@ +# This configuration is for teaching kustomize how to update name ref and var substitution +varReference: +- kind: Deployment + path: spec/template/spec/volumes/secret/secretName diff --git a/test/infrastructure/test-extension/config/default/manager_image_patch.yaml b/test/infrastructure/test-extension/config/default/manager_image_patch.yaml new file mode 100644 index 0000000000..cd52fe1759 --- /dev/null +++ b/test/infrastructure/test-extension/config/default/manager_image_patch.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - image: gcr.io/k8s-staging-capi-vsphere/cluster-api-test-extension:dev + name: manager diff --git a/test/infrastructure/test-extension/config/default/manager_pull_policy.yaml b/test/infrastructure/test-extension/config/default/manager_pull_policy.yaml new file mode 100644 index 0000000000..74a0879c60 --- /dev/null +++ b/test/infrastructure/test-extension/config/default/manager_pull_policy.yaml @@ -0,0 +1,11 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + imagePullPolicy: Always diff --git a/test/infrastructure/test-extension/config/default/manager_webhook_patch.yaml b/test/infrastructure/test-extension/config/default/manager_webhook_patch.yaml new file mode 100644 index 0000000000..f18fd10f99 --- /dev/null +++ b/test/infrastructure/test-extension/config/default/manager_webhook_patch.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + ports: + - containerPort: 9443 + name: webhook-server + protocol: TCP + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + volumes: + - name: cert + secret: + secretName: $(SERVICE_NAME)-cert # this secret will not be prefixed, since it's not managed by kustomize + diff --git a/test/infrastructure/test-extension/config/default/namespace.yaml b/test/infrastructure/test-extension/config/default/namespace.yaml new file mode 100644 index 0000000000..8b55c3cd89 --- /dev/null +++ b/test/infrastructure/test-extension/config/default/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system diff --git a/test/infrastructure/test-extension/config/manager/kustomization.yaml b/test/infrastructure/test-extension/config/manager/kustomization.yaml new file mode 100644 index 0000000000..5c5f0b84cb --- /dev/null +++ b/test/infrastructure/test-extension/config/manager/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- manager.yaml diff --git a/test/infrastructure/test-extension/config/manager/manager.yaml b/test/infrastructure/test-extension/config/manager/manager.yaml new file mode 100644 index 0000000000..71697ed8e3 --- /dev/null +++ b/test/infrastructure/test-extension/config/manager/manager.yaml @@ -0,0 +1,61 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + labels: + control-plane: controller-manager + spec: + containers: + - command: + - /manager + args: + - "--leader-elect" + - "--diagnostics-address=${CAPI_DIAGNOSTICS_ADDRESS:=:8443}" + - "--insecure-diagnostics=${CAPI_INSECURE_DIAGNOSTICS:=false}" + image: controller:latest + name: manager + ports: + - containerPort: 9440 + name: healthz + protocol: TCP + - containerPort: 8443 + name: metrics + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: healthz + livenessProbe: + httpGet: + path: /healthz + port: healthz + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + runAsUser: 65532 + runAsGroup: 65532 + terminationMessagePolicy: FallbackToLogsOnError + terminationGracePeriodSeconds: 10 + serviceAccountName: manager + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault diff --git a/test/infrastructure/test-extension/config/rbac/kustomization.yaml b/test/infrastructure/test-extension/config/rbac/kustomization.yaml new file mode 100644 index 0000000000..e82521ffdc --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- role.yaml +- role_binding.yaml +- service_account.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml diff --git a/test/infrastructure/test-extension/config/rbac/leader_election_role.yaml b/test/infrastructure/test-extension/config/rbac/leader_election_role.yaml new file mode 100644 index 0000000000..23055e187d --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/leader_election_role.yaml @@ -0,0 +1,24 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create +- apiGroups: + - "coordination.k8s.io" + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete diff --git a/test/infrastructure/test-extension/config/rbac/leader_election_role_binding.yaml b/test/infrastructure/test-extension/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 0000000000..d5e0044679 --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: manager + namespace: system diff --git a/test/infrastructure/test-extension/config/rbac/role.yaml b/test/infrastructure/test-extension/config/rbac/role.yaml new file mode 100644 index 0000000000..8e90952e79 --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/role.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - list + - patch + - update + - watch +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/test/infrastructure/test-extension/config/rbac/role_binding.yaml b/test/infrastructure/test-extension/config/rbac/role_binding.yaml new file mode 100644 index 0000000000..5a95f66d6f --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: manager + namespace: system diff --git a/test/infrastructure/test-extension/config/rbac/service_account.yaml b/test/infrastructure/test-extension/config/rbac/service_account.yaml new file mode 100644 index 0000000000..77f747b53c --- /dev/null +++ b/test/infrastructure/test-extension/config/rbac/service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: manager + namespace: system diff --git a/test/infrastructure/test-extension/config/tilt/extensionconfig.yaml b/test/infrastructure/test-extension/config/tilt/extensionconfig.yaml new file mode 100644 index 0000000000..795af86cff --- /dev/null +++ b/test/infrastructure/test-extension/config/tilt/extensionconfig.yaml @@ -0,0 +1,18 @@ +apiVersion: runtime.cluster.x-k8s.io/v1alpha1 +kind: ExtensionConfig +metadata: + annotations: + runtime.cluster.x-k8s.io/inject-ca-from-secret: capv-test-extension/capv-test-extension-webhook-service-cert + name: capv-test-extension +spec: + clientConfig: + service: + name: capv-test-extension-webhook-service + namespace: capv-test-extension # Note: this assumes the test extension get deployed in the default namespace defined in its own runtime-extensions-components.yaml + port: 443 + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: In + values: + - default # Note: this assumes the test extension is used by Cluster in the default namespace only \ No newline at end of file diff --git a/test/infrastructure/test-extension/config/webhook/kustomization.yaml b/test/infrastructure/test-extension/config/webhook/kustomization.yaml new file mode 100644 index 0000000000..66157d5d5f --- /dev/null +++ b/test/infrastructure/test-extension/config/webhook/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- service.yaml + +configurations: +- kustomizeconfig.yaml diff --git a/test/infrastructure/test-extension/config/webhook/kustomizeconfig.yaml b/test/infrastructure/test-extension/config/webhook/kustomizeconfig.yaml new file mode 100644 index 0000000000..345ca49278 --- /dev/null +++ b/test/infrastructure/test-extension/config/webhook/kustomizeconfig.yaml @@ -0,0 +1,5 @@ +# the following config is for teaching kustomize where to look at when substituting vars. +# It requires kustomize v2.1.0 or newer to work properly. + +varReference: +- path: metadata/annotations diff --git a/test/infrastructure/test-extension/config/webhook/service.yaml b/test/infrastructure/test-extension/config/webhook/service.yaml new file mode 100644 index 0000000000..711977f54f --- /dev/null +++ b/test/infrastructure/test-extension/config/webhook/service.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Service +metadata: + name: webhook-service + namespace: system +spec: + ports: + - port: 443 + targetPort: webhook-server diff --git a/test/infrastructure/test-extension/main.go b/test/infrastructure/test-extension/main.go new file mode 100644 index 0000000000..b0258ea713 --- /dev/null +++ b/test/infrastructure/test-extension/main.go @@ -0,0 +1,400 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// main is the main package for the test extension. +// The test extension serves two goals: +// - to provide a reference implementation of Runtime Extension +// - to implement the Runtime Extension used by Cluster API E2E tests. +package main + +import ( + "context" + "flag" + "os" + goruntime "runtime" + "time" + + "github.com/spf13/pflag" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/leaderelection/resourcelock" + cliflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" + logsv1 "k8s.io/component-base/logs/api/v1" + _ "k8s.io/component-base/logs/json/register" + "k8s.io/klog/v2" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/remote" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/runtime/server" + "sigs.k8s.io/cluster-api/test/extension/handlers/lifecycle" + "sigs.k8s.io/cluster-api/util/flags" + "sigs.k8s.io/cluster-api/version" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/test/infrastructure/test-extension/topologymutation" +) + +var ( + // catalog contains all information about RuntimeHooks. + catalog = runtimecatalog.New() + + // scheme is a Kubernetes runtime scheme containing all the information about API types used by the test extension. + // NOTE: it is not mandatory to use scheme in custom RuntimeExtension, but working with typed API objects makes code + // easier to read and less error-prone than using unstructured or working with raw json/yaml. + scheme = runtime.NewScheme() + // Creates a logger to be used during the main func using controller runtime utilities + // NOTE: it is not mandatory to use controller runtime utilities in custom RuntimeExtension, but it is recommended + // because it makes log from those components similar to log from controllers. + setupLog = ctrl.Log.WithName("setup") + controllerName = "capv-test-extension-manager" + + // flags. + enableLeaderElection bool + leaderElectionLeaseDuration time.Duration + leaderElectionRenewDeadline time.Duration + leaderElectionRetryPeriod time.Duration + profilerAddress string + enableContentionProfiling bool + syncPeriod time.Duration + restConfigQPS float32 + restConfigBurst int + webhookPort int + webhookCertDir string + webhookCertName string + webhookKeyName string + healthAddr string + tlsOptions = flags.TLSOptions{} + diagnosticsOptions = flags.DiagnosticsOptions{} + logOptions = logs.NewOptions() +) + +func init() { + // Adds to the scheme all the API types we used by the test extension. + _ = clientgoscheme.AddToScheme(scheme) + _ = apiextensionsv1.AddToScheme(scheme) + + _ = clusterv1.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) + + _ = infrav1.AddToScheme(scheme) + _ = vmwarev1.AddToScheme(scheme) + + // Register the RuntimeHook types into the catalog. + _ = runtimehooksv1.AddToCatalog(catalog) +} + +// InitFlags initializes the flags. +func InitFlags(fs *pflag.FlagSet) { + // Initialize logs flags using Kubernetes component-base machinery. + // NOTE: it is not mandatory to use Kubernetes component-base machinery in custom RuntimeExtension, but it is + // recommended because it helps in ensuring consistency across different components in the cluster. + logsv1.AddFlags(logOptions, fs) + + fs.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + + fs.DurationVar(&leaderElectionLeaseDuration, "leader-elect-lease-duration", 15*time.Second, + "Interval at which non-leader candidates will wait to force acquire leadership (duration string)") + + fs.DurationVar(&leaderElectionRenewDeadline, "leader-elect-renew-deadline", 10*time.Second, + "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)") + + fs.DurationVar(&leaderElectionRetryPeriod, "leader-elect-retry-period", 2*time.Second, + "Duration the LeaderElector clients should wait between tries of actions (duration string)") + + fs.StringVar(&profilerAddress, "profiler-address", "", + "Bind address to expose the pprof profiler (e.g. localhost:6060)") + + fs.BoolVar(&enableContentionProfiling, "contention-profiling", false, + "Enable block profiling") + + fs.DurationVar(&syncPeriod, "sync-period", 10*time.Minute, + "The minimum interval at which watched resources are reconciled (e.g. 15m)") + + fs.Float32Var(&restConfigQPS, "kube-api-qps", 20, + "Maximum queries per second from the controller client to the Kubernetes API server. Defaults to 20") + + fs.IntVar(&restConfigBurst, "kube-api-burst", 30, + "Maximum number of queries that should be allowed in one burst from the controller client to the Kubernetes API server. Default 30") + + fs.IntVar(&webhookPort, "webhook-port", 9443, + "Webhook Server port") + + fs.StringVar(&webhookCertDir, "webhook-cert-dir", "/tmp/k8s-webhook-server/serving-certs/", + "Webhook cert dir.") + + fs.StringVar(&webhookCertName, "webhook-cert-name", "tls.crt", + "Webhook cert name.") + + fs.StringVar(&webhookKeyName, "webhook-key-name", "tls.key", + "Webhook key name.") + + fs.StringVar(&healthAddr, "health-addr", ":9440", + "The address the health endpoint binds to.") + + flags.AddDiagnosticsOptions(fs, &diagnosticsOptions) + flags.AddTLSOptions(fs, &tlsOptions) + + // Add test-extension specific flags + // NOTE: it is not mandatory to use the same flag names in all RuntimeExtension, but it is recommended when + // addressing common concerns like profiler-address, webhook-port, webhook-cert-dir etc. because it helps in ensuring + // consistency across different components in the cluster. +} + +// Add RBAC for the authorized diagnostics endpoint. +// +kubebuilder:rbac:groups=authentication.k8s.io,resources=tokenreviews,verbs=create +// +kubebuilder:rbac:groups=authorization.k8s.io,resources=subjectaccessreviews,verbs=create + +func main() { + // Initialize and parse command line flags. + InitFlags(pflag.CommandLine) + pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + // Set log level 2 as default. + if err := pflag.CommandLine.Set("v", "2"); err != nil { + setupLog.Error(err, "Failed to set default log level") + os.Exit(1) + } + pflag.Parse() + + // Validates logs flags using Kubernetes component-base machinery and apply them + // so klog will automatically use the right logger. + // NOTE: klog is the log of choice of component-base machinery. + if err := logsv1.ValidateAndApply(logOptions, nil); err != nil { + setupLog.Error(err, "Unable to start extension") + os.Exit(1) + } + + // Add the klog logger in the context. + // NOTE: it is not mandatory to use contextual logging in custom RuntimeExtension, but it is recommended + // because it allows to use a log stored in the context across the entire chain of calls (without + // requiring an addition log parameter in all the functions). + ctrl.SetLogger(klog.Background()) + + restConfig := ctrl.GetConfigOrDie() + restConfig.QPS = restConfigQPS + restConfig.Burst = restConfigBurst + restConfig.UserAgent = remote.DefaultClusterAPIUserAgent(controllerName) + + tlsOptionOverrides, err := flags.GetTLSOptionOverrideFuncs(tlsOptions) + if err != nil { + setupLog.Error(err, "Unable to add TLS settings to the webhook server") + os.Exit(1) + } + + diagnosticsOpts := flags.GetDiagnosticsOptions(diagnosticsOptions) + + if enableContentionProfiling { + goruntime.SetBlockProfileRate(1) + } + + // Create an HTTP server for serving Runtime Extensions. + runtimeExtensionWebhookServer, err := server.New(server.Options{ + Port: webhookPort, + CertDir: webhookCertDir, + CertName: webhookCertName, + KeyName: webhookKeyName, + TLSOpts: tlsOptionOverrides, + Catalog: catalog, + }) + if err != nil { + setupLog.Error(err, "Error creating runtime extension webhook server") + os.Exit(1) + } + + ctrlOptions := ctrl.Options{ + Scheme: scheme, + LeaderElection: enableLeaderElection, + LeaderElectionID: "controller-leader-election-capv-test-extension", + LeaseDuration: &leaderElectionLeaseDuration, + RenewDeadline: &leaderElectionRenewDeadline, + RetryPeriod: &leaderElectionRetryPeriod, + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + HealthProbeBindAddress: healthAddr, + PprofBindAddress: profilerAddress, + Metrics: diagnosticsOpts, + Cache: cache.Options{ + SyncPeriod: &syncPeriod, + }, + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + &corev1.ConfigMap{}, + &corev1.Secret{}, + }, + // Use the cache for all Unstructured get/list calls. + Unstructured: true, + }, + }, + WebhookServer: runtimeExtensionWebhookServer, + } + + // Start the manager + mgr, err := ctrl.NewManager(restConfig, ctrlOptions) + if err != nil { + setupLog.Error(err, "Unable to start manager") + os.Exit(1) + } + + // Set up a context listening for SIGINT. + ctx := ctrl.SetupSignalHandler() + + // Setup Runtime Extensions. + setupTopologyMutationHookHandlers(runtimeExtensionWebhookServer) + setupLifecycleHookHandlers(mgr, runtimeExtensionWebhookServer) + + // Setup checks, indexes, reconcilers and webhooks. + setupChecks(mgr) + setupIndexes(ctx, mgr) + setupReconcilers(ctx, mgr) + setupWebhooks(mgr) + + setupLog.Info("Starting manager", "version", version.Get().String()) + if err := mgr.Start(ctx); err != nil { + setupLog.Error(err, "Problem running manager") + os.Exit(1) + } +} + +// setupTopologyMutationHookHandlers sets up Topology Mutation Hooks (Runtime Patches). +func setupTopologyMutationHookHandlers(runtimeExtensionWebhookServer *server.Server) { + _ = runtimeExtensionWebhookServer + // Create the ExtensionHandlers for the Topology Mutation Hooks. + // NOTE: it is not mandatory to group all the ExtensionHandlers using a struct, what is important + // is to have HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. + topologyMutationExtensionHandlers := topologymutation.NewExtensionHandlers(scheme) + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.GeneratePatches, + Name: "generate-patches", + HandlerFunc: topologyMutationExtensionHandlers.GeneratePatches, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.ValidateTopology, + Name: "validate-topology", + HandlerFunc: topologyMutationExtensionHandlers.ValidateTopology, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.DiscoverVariables, + Name: "discover-variables", + HandlerFunc: topologyMutationExtensionHandlers.DiscoverVariables, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } +} + +// setupLifecycleHookHandlers sets up Lifecycle Hooks. +func setupLifecycleHookHandlers(mgr ctrl.Manager, runtimeExtensionWebhookServer *server.Server) { + // Create the ExtensionHandlers for the lifecycle hooks + // NOTE: it is not mandatory to group all the ExtensionHandlers using a struct, what is important + // is to have HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. + lifecycleExtensionHandlers := lifecycle.NewExtensionHandlers(mgr.GetClient()) + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.BeforeClusterCreate, + Name: "before-cluster-create", + HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterCreate, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.AfterControlPlaneInitialized, + Name: "after-control-plane-initialized", + HandlerFunc: lifecycleExtensionHandlers.DoAfterControlPlaneInitialized, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.BeforeClusterUpgrade, + Name: "before-cluster-upgrade", + HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterUpgrade, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.AfterControlPlaneUpgrade, + Name: "after-control-plane-upgrade", + HandlerFunc: lifecycleExtensionHandlers.DoAfterControlPlaneUpgrade, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.AfterClusterUpgrade, + Name: "after-cluster-upgrade", + HandlerFunc: lifecycleExtensionHandlers.DoAfterClusterUpgrade, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } + + if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ + Hook: runtimehooksv1.BeforeClusterDelete, + Name: "before-cluster-delete", + HandlerFunc: lifecycleExtensionHandlers.DoBeforeClusterDelete, + }); err != nil { + setupLog.Error(err, "Error adding handler") + os.Exit(1) + } +} + +func setupChecks(mgr ctrl.Manager) { + if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { + setupLog.Error(err, "Unable to create ready check") + os.Exit(1) + } + + if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { + setupLog.Error(err, "Unable to create health check") + os.Exit(1) + } +} + +func setupIndexes(_ context.Context, _ ctrl.Manager) { +} + +func setupReconcilers(_ context.Context, _ ctrl.Manager) { +} + +func setupWebhooks(_ ctrl.Manager) { +} diff --git a/test/infrastructure/test-extension/tilt-provider.yaml b/test/infrastructure/test-extension/tilt-provider.yaml new file mode 100644 index 0000000000..95d67842ab --- /dev/null +++ b/test/infrastructure/test-extension/tilt-provider.yaml @@ -0,0 +1,11 @@ +--- +- name: capv-test-extension + config: + version: v1.11.99 + image: gcr.io/k8s-staging-capi-vsphere/cluster-api-test-extension + live_reload_deps: + - main.go + - controllers + label: CAPV_EXTENSION + additional_resources: + - config/tilt/extensionconfig.yaml diff --git a/test/infrastructure/test-extension/topologymutation/handler.go b/test/infrastructure/test-extension/topologymutation/handler.go new file mode 100644 index 0000000000..1abff0ec84 --- /dev/null +++ b/test/infrastructure/test-extension/topologymutation/handler.go @@ -0,0 +1,408 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package topologymutation contains the handlers for the topologymutation webhook. +// +// The implementation of the handlers is specifically designed for Cluster API E2E tests use cases. +// When implementing custom RuntimeExtension, it is only required to expose HandlerFunc with the +// signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. +package topologymutation + +import ( + "context" + "fmt" + "regexp" + + "github.com/pkg/errors" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/utils/ptr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/runtime/topologymutation" + ctrl "sigs.k8s.io/controller-runtime" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + "sigs.k8s.io/cluster-api-provider-vsphere/internal/kubevip" +) + +// +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;patch;update;create + +// ExtensionHandlers provides a common struct shared across the topology mutation hooks handlers; +// this is convenient because in Cluster API's E2E tests all of them are using a decoder for working with typed +// API objects, which makes code easier to read and less error prone than using unstructured or working with raw json/yaml. +// NOTE: it is not mandatory to use a ExtensionHandlers in custom RuntimeExtension, what is important +// is to expose HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. +type ExtensionHandlers struct { + decoder runtime.Decoder +} + +// NewExtensionHandlers returns a new ExtensionHandlers for the topology mutation hook handlers. +func NewExtensionHandlers(scheme *runtime.Scheme) *ExtensionHandlers { + return &ExtensionHandlers{ + // Add the apiGroups being handled to the decoder + decoder: serializer.NewCodecFactory(scheme).UniversalDecoder( + infrav1.GroupVersion, + controlplanev1.GroupVersion, + bootstrapv1.GroupVersion, + ), + } +} + +// GeneratePatches implements the HandlerFunc for the GeneratePatches hook. +// The hook adds to the response the patches we are using in Cluster API E2E tests. +// NOTE: custom RuntimeExtension must implement the body of this func according to the specific use case. +func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehooksv1.GeneratePatchesRequest, resp *runtimehooksv1.GeneratePatchesResponse) { + log := ctrl.LoggerFrom(ctx) + log.Info("GeneratePatches is called") + + // By using WalkTemplates it is possible to implement patches using typed API objects, which makes code + // easier to read and less error prone than using unstructured or working with raw json/yaml. + // IMPORTANT: by unit testing this func/nested func properly, it is possible to prevent unexpected rollouts when patches are modified. + topologymutation.WalkTemplates(ctx, h.decoder, req, resp, func(ctx context.Context, obj runtime.Object, variables map[string]apiextensionsv1.JSON, holderRef runtimehooksv1.HolderReference) error { + log := ctrl.LoggerFrom(ctx) + + isControlPlane := holderRef.Kind == "KubeadmControlPlane" + + switch obj := obj.(type) { + case *controlplanev1.KubeadmControlPlaneTemplate: + if err := patchKubeadmControlPlaneTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching KubeadmControlPlaneTemplate") + return errors.Wrap(err, "error patching KubeadmControlPlaneTemplate") + } + case *bootstrapv1.KubeadmConfigTemplate: + if err := patchKubeadmConfigTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching KubeadmConfigTemplate") + return errors.Wrap(err, "error patching KubeadmConfigTemplate") + } + case *infrav1.VSphereClusterTemplate: + if err := patchGovmomiClusterTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching VSphereClusterTemplate") + return errors.Wrap(err, "error patching VSphereClusterTemplate") + } + case *infrav1.VSphereMachineTemplate: + if err := patchGovmomiMachineTemplate(ctx, obj, variables, isControlPlane); err != nil { + log.Error(err, "Error patching VSphereMachineTemplate") + return errors.Wrap(err, "error patching VSphereMachineTemplate") + } + case *vmwarev1.VSphereClusterTemplate: + if err := patchSupervisorClusterTemplate(ctx, obj, variables); err != nil { + log.Error(err, "Error patching VSphereClusterTemplate") + return errors.Wrap(err, "error patching VSphereClusterTemplate") + } + case *vmwarev1.VSphereMachineTemplate: + if err := patchSupervisorMachineTemplate(ctx, obj, variables, isControlPlane); err != nil { + log.Error(err, "Error patching VSphereMachineTemplate") + return errors.Wrap(err, "error patching VSphereMachineTemplate") + } + } + return nil + }) +} + +// patchKubeadmControlPlaneTemplate patches the KubeadmControlPlaneTemplate. +func patchKubeadmControlPlaneTemplate(_ context.Context, tpl *controlplanev1.KubeadmControlPlaneTemplate, templateVariables map[string]apiextensionsv1.JSON) error { + // patch enableSSHIntoNodes + if err := patchUsers(&tpl.Spec.Template.Spec.KubeadmConfigSpec, templateVariables); err != nil { + return err + } + + // patch kubeVipPodManifest + kubeVipPodManifest, err := topologymutation.GetStringVariable(templateVariables, "kubeVipPodManifest") + if err != nil { + // Skip patch if sshKey variable is not set + if topologymutation.IsNotFoundError(err) { + return nil + } + return err + } + controlPlaneIPAddr, err := topologymutation.GetStringVariable(templateVariables, "controlPlaneIpAddr") + if err != nil { + return err + } + kubeVipPodManifestModified := regexp.MustCompile("(name: address\n +value:).*").ReplaceAllString(kubeVipPodManifest, fmt.Sprintf("$1 %s", controlPlaneIPAddr)) + + for _, file := range kubevip.Files() { + if file.Path == "/etc/kubernetes/manifests/kube-vip.yaml" { + file.Content = kubeVipPodManifestModified + } + tpl.Spec.Template.Spec.KubeadmConfigSpec.Files = append(tpl.Spec.Template.Spec.KubeadmConfigSpec.Files, file) + } + + // patch preKubeadmScript + preKubeadmScript, err := topologymutation.GetStringVariable(templateVariables, "preKubeadmScript") + if err != nil { + return err + } + version, err := topologymutation.GetStringVariable(templateVariables, "builtin.controlPlane.version") + if err != nil { + return err + } + + versionRegex := regexp.MustCompile("(KUBERNETES_VERSION=.*)") + tpl.Spec.Template.Spec.KubeadmConfigSpec.Files = append(tpl.Spec.Template.Spec.KubeadmConfigSpec.Files, + bootstrapv1.File{ + Owner: "root:root", + Path: "/etc/pre-kubeadm-commands/10-prekubeadmscript.sh", + Permissions: "0755", + Content: versionRegex.ReplaceAllString(preKubeadmScript, fmt.Sprintf("KUBERNETES_VERSION=%s", version)), + }, + ) + + return nil +} + +// KubeadmConfigTemplate patches the KubeadmConfigTemplate. +func patchKubeadmConfigTemplate(_ context.Context, tpl *bootstrapv1.KubeadmConfigTemplate, templateVariables map[string]apiextensionsv1.JSON) error { + // patch enableSSHIntoNodes + if err := patchUsers(&tpl.Spec.Template.Spec, templateVariables); err != nil { + return err + } + + // patch preKubeadmScript + preKubeadmScript, err := topologymutation.GetStringVariable(templateVariables, "preKubeadmScript") + if err != nil { + return err + } + version, err := topologymutation.GetStringVariable(templateVariables, "builtin.machineDeployment.version") + if err != nil { + return err + } + + versionRegex := regexp.MustCompile("(KUBERNETES_VERSION=.*)") + tpl.Spec.Template.Spec.Files = append(tpl.Spec.Template.Spec.Files, + bootstrapv1.File{ + Owner: "root:root", + Path: "/etc/pre-kubeadm-commands/10-prekubeadmscript.sh", + Permissions: "0755", + Content: versionRegex.ReplaceAllString(preKubeadmScript, fmt.Sprintf("KUBERNETES_VERSION=%s", version)), + }, + ) + + return nil +} + +func patchUsers(kubeadmConfigSpec *bootstrapv1.KubeadmConfigSpec, templateVariables map[string]apiextensionsv1.JSON) error { + sshKey, err := topologymutation.GetStringVariable(templateVariables, "sshKey") + if err != nil { + // Skip patch if sshKey variable is not set + if topologymutation.IsNotFoundError(err) { + return nil + } + return err + } + + kubeadmConfigSpec.Users = append(kubeadmConfigSpec.Users, + bootstrapv1.User{ + Name: "capv", + SSHAuthorizedKeys: []string{sshKey}, + Sudo: ptr.To("ALL=(ALL) NOPASSWD:ALL"), + }) + return nil +} + +// patchGovmomiClusterTemplate patches the govmomi VSphereClusterTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchGovmomiClusterTemplate(_ context.Context, vsphereCluster *infrav1.VSphereClusterTemplate, templateVariables map[string]apiextensionsv1.JSON) error { + // patch infraClusterSubstitutions + controlPlaneIPAddr, err := topologymutation.GetStringVariable(templateVariables, "controlPlaneIpAddr") + if err != nil { + return err + } + var controlPlanePort int32 + if err := topologymutation.GetObjectVariableInto(templateVariables, "controlPlanePort", &controlPlanePort); err != nil { + return err + } + + vsphereCluster.Spec.Template.Spec.ControlPlaneEndpoint.Host = controlPlaneIPAddr + vsphereCluster.Spec.Template.Spec.ControlPlaneEndpoint.Port = controlPlanePort + + credsSecretName, err := topologymutation.GetStringVariable(templateVariables, "credsSecretName") + if err != nil { + return err + } + + vsphereCluster.Spec.Template.Spec.IdentityRef = &infrav1.VSphereIdentityReference{ + Kind: infrav1.SecretKind, + Name: credsSecretName, + } + + infraServerURL, err := topologymutation.GetStringVariable(templateVariables, "infraServer.url") + if err != nil { + return err + } + + vsphereCluster.Spec.Template.Spec.Server = infraServerURL + + infraServerThumbprint, err := topologymutation.GetStringVariable(templateVariables, "infraServer.thumbprint") + if err != nil { + return err + } + + vsphereCluster.Spec.Template.Spec.Thumbprint = infraServerThumbprint + + return nil +} + +// patchSupervisorClusterTemplate patches the supervisor VSphereClusterTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchSupervisorClusterTemplate(_ context.Context, vsphereCluster *vmwarev1.VSphereClusterTemplate, templateVariables map[string]apiextensionsv1.JSON) error { + // patch infraClusterSubstitutions + controlPlaneIPAddr, err := topologymutation.GetStringVariable(templateVariables, "controlPlaneIpAddr") + if err != nil { + return err + } + var controlPlanePort int32 + if err := topologymutation.GetObjectVariableInto(templateVariables, "controlPlanePort", &controlPlanePort); err != nil { + return err + } + + vsphereCluster.Spec.Template.Spec.ControlPlaneEndpoint.Host = controlPlaneIPAddr + vsphereCluster.Spec.Template.Spec.ControlPlaneEndpoint.Port = controlPlanePort + + return nil +} + +// patchGovmomiMachineTemplate patches the govmomi VSphereMachineTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchGovmomiMachineTemplate(_ context.Context, vsphereMachineTemplate *infrav1.VSphereMachineTemplate, templateVariables map[string]apiextensionsv1.JSON, isControlPlane bool) error { + // patch vSphereTemplate + + var err error + vsphereMachineTemplate.Spec.Template.Spec.Template, err = calculateImageName(templateVariables, isControlPlane) + + return err +} + +// patchSupervisorMachineTemplate patches the supervisor VSphereMachineTemplate. +// NOTE: this patch is not required for any special reason, it is used for testing the patch machinery itself. +func patchSupervisorMachineTemplate(_ context.Context, vsphereMachineTemplate *vmwarev1.VSphereMachineTemplate, templateVariables map[string]apiextensionsv1.JSON, isControlPlane bool) error { + // patch vSphereTemplate + + var err error + vsphereMachineTemplate.Spec.Template.Spec.ImageName, err = calculateImageName(templateVariables, isControlPlane) + + return err +} + +func calculateImageName(templateVariables map[string]apiextensionsv1.JSON, isControlPlane bool) (string, error) { + // patch vSphereTemplate + versionVariable := "builtin.controlPlane.version" + if !isControlPlane { + versionVariable = "builtin.machineDeployment.version" + } + + version, err := topologymutation.GetStringVariable(templateVariables, versionVariable) + if err != nil { + return "", err + } + + // Fallback to the v1.30.0 image, except for v1.28.0 and v1.29.0. + if version != "v1.28.0" && version != "v1.29.0" { + version = "v1.30.0" + } + + return fmt.Sprintf("ubuntu-2204-kube-%s", version), nil +} + +// ValidateTopology implements the HandlerFunc for the ValidateTopology hook. +// Cluster API E2E currently are just validating the hook gets called. +// NOTE: custom RuntimeExtension must implement the body of this func according to the specific use case. +func (h *ExtensionHandlers) ValidateTopology(ctx context.Context, _ *runtimehooksv1.ValidateTopologyRequest, resp *runtimehooksv1.ValidateTopologyResponse) { + log := ctrl.LoggerFrom(ctx) + log.Info("ValidateTopology called") + + resp.Status = runtimehooksv1.ResponseStatusSuccess +} + +// DiscoverVariables implements the HandlerFunc for the DiscoverVariables hook. +func (h *ExtensionHandlers) DiscoverVariables(ctx context.Context, _ *runtimehooksv1.DiscoverVariablesRequest, resp *runtimehooksv1.DiscoverVariablesResponse) { + log := ctrl.LoggerFrom(ctx) + log.Info("DiscoverVariables called") + + resp.Status = runtimehooksv1.ResponseStatusSuccess + + // The variables are a copy of what flavorgen generates in `packaging/flavorgen/flavors/clusterclass_generators.go` + resp.Variables = []clusterv1.ClusterClassVariable{ + { + Name: "sshKey", + Required: false, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Description: "Public key to SSH onto the cluster nodes.", + Type: "string", + }, + }, + }, + { + Name: "controlPlaneIpAddr", + Required: true, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Type: "string", + Description: "Floating VIP for the control plane.", + }, + }, + }, + { + Name: "controlPlanePort", + Required: true, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Type: "integer", + Description: "Port for the control plane endpoint.", + }, + }, + }, + { + Name: "kubeVipPodManifest", + Required: true, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Type: "string", + Description: "kube-vip manifest for the control plane.", + }, + }, + }, + { + Name: "infraServer", + Required: true, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Type: "object", + Properties: map[string]clusterv1.JSONSchemaProps{ + "url": {Type: "string"}, + "thumbprint": {Type: "string"}, + }, + }, + }, + }, + { + Name: "credsSecretName", + Required: true, + Schema: clusterv1.VariableSchema{ + OpenAPIV3Schema: clusterv1.JSONSchemaProps{ + Type: "string", + Description: "Secret containing the credentials for the infra cluster.", + }, + }, + }, + } +}