From 09ff688ddb29e6a8900c1aa02241f5b26de15713 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Tue, 12 Sep 2023 22:30:41 +0200 Subject: [PATCH] Improve logging for VSphereCluster controllers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan Büringer buringerst@vmware.com --- .golangci.yml | 1 + controllers/clustermodule_reconciler.go | 66 +++-- controllers/clustermodule_reconciler_test.go | 82 +++--- controllers/controllers_suite_test.go | 4 +- controllers/serviceaccount_controller.go | 255 ++++++++++-------- .../serviceaccount_controller_suite_test.go | 4 +- .../serviceaccount_controller_unit_test.go | 46 ++-- controllers/servicediscovery_controller.go | 109 ++++---- ... servicediscovery_controller_intg_test.go} | 0 ...servicediscovery_controller_suite_test.go} | 0 ... servicediscovery_controller_unit_test.go} | 16 +- .../vmware/vspherecluster_reconciler.go | 133 +++++---- .../vmware/vspherecluster_reconciler_test.go | 35 ++- controllers/vspherecluster_controller.go | 55 ++-- controllers/vspherecluster_reconciler.go | 186 +++++++------ controllers/vspherecluster_reconciler_test.go | 15 +- .../vsphereclusteridentity_controller.go | 6 +- .../vspheredeploymentzone_controller.go | 13 +- ...vspheredeploymentzone_controller_domain.go | 3 +- controllers/vspheremachine_controller.go | 12 +- controllers/vspherevm_controller.go | 8 +- main.go | 22 +- pkg/clustermodule/fake/service.go | 16 +- pkg/clustermodule/interfaces.go | 12 +- pkg/clustermodule/service.go | 77 +++--- pkg/clustermodule/service_test.go | 13 +- pkg/clustermodule/session.go | 30 +-- pkg/clustermodule/util_test.go | 2 - pkg/context/cluster_context.go | 8 +- pkg/context/fake/fake_cluster_context.go | 6 +- .../fake/fake_guest_cluster_context.go | 11 +- pkg/context/fake/fake_machine_context.go | 22 +- .../fake/fake_vmware_cluster_context.go | 14 +- pkg/context/vmware/cluster_context.go | 13 +- pkg/context/vmware/machine_context.go | 6 +- pkg/manager/network.go | 24 +- pkg/services/interfaces.go | 34 +-- pkg/services/network/dummy_provider.go | 12 +- pkg/services/network/netop_provider.go | 38 +-- pkg/services/network/network_test.go | 73 ++--- pkg/services/network/nsxt_provider.go | 52 ++-- pkg/services/vimmachine_test.go | 8 +- .../vmoperator/control_plane_endpoint.go | 62 +++-- .../vmoperator/control_plane_endpoint_test.go | 79 +++--- pkg/services/vmoperator/resource_policy.go | 46 ++-- .../vmoperator/resource_policy_test.go | 13 +- pkg/services/vmoperator/vmopmachine_test.go | 4 +- pkg/session/session.go | 1 + pkg/util/testutil.go | 15 +- test/helpers/vmware/unit_test_context.go | 21 +- 50 files changed, 969 insertions(+), 814 deletions(-) rename controllers/{svcdiscovery_controller_intg_test.go => servicediscovery_controller_intg_test.go} (100%) rename controllers/{svcdiscovery_controller_suite_test.go => servicediscovery_controller_suite_test.go} (100%) rename controllers/{svcdiscovery_controller_unit_test.go => servicediscovery_controller_unit_test.go} (92%) diff --git a/.golangci.yml b/.golangci.yml index f508b31c77..f7d1fb7f5f 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -33,6 +33,7 @@ linters: - govet - importas - ineffassign + - loggercheck - misspell - nakedret - nilerr diff --git a/controllers/clustermodule_reconciler.go b/controllers/clustermodule_reconciler.go index c7bb217b94..8978586c44 100644 --- a/controllers/clustermodule_reconciler.go +++ b/controllers/clustermodule_reconciler.go @@ -23,10 +23,12 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/event" @@ -47,29 +49,31 @@ import ( // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachinetemplates,verbs=get;list;watch type Reconciler struct { - *capvcontext.ControllerContext + Client client.Client ClusterModuleService clustermodule.Service } -func NewReconciler(controllerCtx *capvcontext.ControllerContext) Reconciler { +func NewReconciler(controllerManagerCtx *capvcontext.ControllerManagerContext) Reconciler { return Reconciler{ - ControllerContext: controllerCtx, - ClusterModuleService: clustermodule.NewService(), + Client: controllerManagerCtx.Client, + ClusterModuleService: clustermodule.NewService(controllerManagerCtx, controllerManagerCtx.Client), } } -func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { - clusterCtx.Logger.Info("reconcile anti affinity setup") +func (r Reconciler) Reconcile(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling cluster modules") + if !clustermodule.IsClusterCompatible(clusterCtx) { conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.VCenterVersionIncompatibleReason, clusterv1.ConditionSeverityInfo, "vCenter API version %s is not compatible with cluster modules", clusterCtx.VSphereCluster.Status.VCenterVersion) - clusterCtx.Logger.Info("cluster is not compatible for anti affinity", - "api version", clusterCtx.VSphereCluster.Status.VCenterVersion) + log.Info("Cluster is not compatible for anti affinity (vCenter >= 7 required)", + "vCenterVersion", clusterCtx.VSphereCluster.Status.VCenterVersion) return reconcile.Result{}, nil } - objectMap, err := r.fetchMachineOwnerObjects(clusterCtx) + objectMap, err := r.fetchMachineOwnerObjects(ctx, clusterCtx) if err != nil { return reconcile.Result{}, err } @@ -78,6 +82,10 @@ func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile clusterModuleSpecs := []infrav1.ClusterModule{} for _, mod := range clusterCtx.VSphereCluster.Spec.ClusterModules { + // Note: We have to use := here to not overwrite log & ctx outside the for loop. + log := log.WithValues("targetObjectName", mod.TargetObjectName, "moduleUUID", mod.ModuleUUID) + ctx := ctrl.LoggerInto(ctx, log) + curr := mod.TargetObjectName if mod.ControlPlane { curr = appendKCPKey(curr) @@ -85,19 +93,17 @@ func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile if obj, ok := objectMap[curr]; !ok { // delete the cluster module as the object is marked for deletion // or already deleted. - if err := r.ClusterModuleService.Remove(clusterCtx, mod.ModuleUUID); err != nil { - clusterCtx.Logger.Error(err, "failed to delete cluster module for object", - "name", mod.TargetObjectName, "moduleUUID", mod.ModuleUUID) + if err := r.ClusterModuleService.Remove(ctx, clusterCtx, mod.ModuleUUID); err != nil { + log.Error(err, "failed to delete cluster module for object") } delete(objectMap, curr) } else { // verify the cluster module - exists, err := r.ClusterModuleService.DoesExist(clusterCtx, obj, mod.ModuleUUID) + exists, err := r.ClusterModuleService.DoesExist(ctx, clusterCtx, obj, mod.ModuleUUID) if err != nil { // Add the error to modErrs so it gets handled below. modErrs = append(modErrs, clusterModError{obj.GetName(), errors.Wrapf(err, "failed to verify cluster module %q", mod.ModuleUUID)}) - clusterCtx.Logger.Error(err, "failed to verify cluster module for object", - "name", mod.TargetObjectName, "moduleUUID", mod.ModuleUUID) + log.Error(err, "failed to verify cluster module for object") // Append the module and remove it from objectMap to not create new ones instead. clusterModuleSpecs = append(clusterModuleSpecs, infrav1.ClusterModule{ ControlPlane: obj.IsControlPlane(), @@ -119,17 +125,15 @@ func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile }) delete(objectMap, curr) } else { - clusterCtx.Logger.Info("module for object not found", - "moduleUUID", mod.ModuleUUID, - "object", mod.TargetObjectName) + log.Info("module for object not found") } } } for _, obj := range objectMap { - moduleUUID, err := r.ClusterModuleService.Create(clusterCtx, obj) + moduleUUID, err := r.ClusterModuleService.Create(ctx, clusterCtx, obj) if err != nil { - clusterCtx.Logger.Error(err, "failed to create cluster module for target object", "name", obj.GetName()) + log.Error(err, "failed to create cluster module for target object", "targetObjectName", obj.GetName()) modErrs = append(modErrs, clusterModError{obj.GetName(), err}) continue } @@ -166,28 +170,34 @@ func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile } func (r Reconciler) toAffinityInput(ctx context.Context, obj client.Object) []reconcile.Request { + log := ctrl.LoggerFrom(ctx) + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, metav1.ObjectMeta{ Namespace: obj.GetNamespace(), Labels: obj.GetLabels(), OwnerReferences: obj.GetOwnerReferences(), }) if err != nil { - r.Logger.Error(err, "failed to get owner cluster") + log.Error(err, "failed to get owner cluster") return nil } + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) if cluster.Spec.InfrastructureRef == nil { - r.Logger.Error(err, "cluster infrastructureRef not set. Requeing", - "namespace", cluster.Namespace, "cluster_name", cluster.Name) + log.Error(err, "Cluster infrastructureRef not set. Requeueing") return nil } + + log = log.WithValues("VSphereCluster", klog.KRef(cluster.Namespace, cluster.Spec.InfrastructureRef.Name)) + ctx = ctrl.LoggerInto(ctx, log) + vsphereCluster := &infrav1.VSphereCluster{} if err := r.Client.Get(ctx, client.ObjectKey{ Name: cluster.Spec.InfrastructureRef.Name, Namespace: cluster.Namespace, }, vsphereCluster); err != nil { - r.Logger.Error(err, "failed to get vSphereCluster object", - "namespace", cluster.Namespace, "name", cluster.Spec.InfrastructureRef.Name) + log.Error(err, "Failed to get vSphereCluster object") return nil } @@ -226,7 +236,7 @@ func (r Reconciler) PopulateWatchesOnController(mgr manager.Manager, controller ) } -func (r Reconciler) fetchMachineOwnerObjects(clusterCtx *capvcontext.ClusterContext) (map[string]clustermodule.Wrapper, error) { +func (r Reconciler) fetchMachineOwnerObjects(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (map[string]clustermodule.Wrapper, error) { objects := map[string]clustermodule.Wrapper{} name, ok := clusterCtx.VSphereCluster.GetLabels()[clusterv1.ClusterNameLabel] @@ -237,7 +247,7 @@ func (r Reconciler) fetchMachineOwnerObjects(clusterCtx *capvcontext.ClusterCont labels := map[string]string{clusterv1.ClusterNameLabel: name} kcpList := &controlplanev1.KubeadmControlPlaneList{} if err := r.Client.List( - clusterCtx, kcpList, + ctx, kcpList, client.InNamespace(clusterCtx.VSphereCluster.GetNamespace()), client.MatchingLabels(labels)); err != nil { return nil, errors.Wrapf(err, "failed to list control plane objects") @@ -254,7 +264,7 @@ func (r Reconciler) fetchMachineOwnerObjects(clusterCtx *capvcontext.ClusterCont mdList := &clusterv1.MachineDeploymentList{} if err := r.Client.List( - clusterCtx, mdList, + ctx, mdList, client.InNamespace(clusterCtx.VSphereCluster.GetNamespace()), client.MatchingLabels(labels)); err != nil { return nil, errors.Wrapf(err, "failed to list machine deployment objects") diff --git a/controllers/clustermodule_reconciler_test.go b/controllers/clustermodule_reconciler_test.go index 7cfc18a8f6..dd80233a67 100644 --- a/controllers/clustermodule_reconciler_test.go +++ b/controllers/clustermodule_reconciler_test.go @@ -66,8 +66,8 @@ func TestReconciler_Reconcile(t *testing.T) { }, }, setupMocks: func(svc *cmodfake.CMService) { - svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) - svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(true, nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, kcpUUID).Return(true, nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, mdUUID).Return(true, nil) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) @@ -77,8 +77,8 @@ func TestReconciler_Reconcile(t *testing.T) { name: "when no cluster modules exist", clusterModules: []infrav1.ClusterModule{}, setupMocks: func(svc *cmodfake.CMService) { - svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID, nil) - svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID, nil) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID, nil) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID, nil) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) @@ -103,8 +103,8 @@ func TestReconciler_Reconcile(t *testing.T) { }, }, setupMocks: func(svc *cmodfake.CMService) { - svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) - svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID, nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, kcpUUID).Return(true, nil) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID, nil) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) @@ -134,10 +134,10 @@ func TestReconciler_Reconcile(t *testing.T) { }, }, setupMocks: func(svc *cmodfake.CMService) { - svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(false, nil) - svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(false, nil) - svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID+"a", nil) - svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID+"a", nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, kcpUUID).Return(false, nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, mdUUID).Return(false, nil) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID+"a", nil) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID+"a", nil) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) @@ -165,8 +165,8 @@ func TestReconciler_Reconcile(t *testing.T) { }, haveError: true, setupMocks: func(svc *cmodfake.CMService) { - svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(false, vCenter500err) - svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, kcpUUID).Return(false, vCenter500err) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) @@ -195,8 +195,8 @@ func TestReconciler_Reconcile(t *testing.T) { }, haveError: true, setupMocks: func(svc *cmodfake.CMService) { - svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) - svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, kcpUUID).Return(true, nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) @@ -225,9 +225,9 @@ func TestReconciler_Reconcile(t *testing.T) { }, haveError: true, setupMocks: func(svc *cmodfake.CMService) { - svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(false, nil) - svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) - svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID+"a", nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, kcpUUID).Return(false, nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID+"a", nil) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) @@ -244,8 +244,8 @@ func TestReconciler_Reconcile(t *testing.T) { name: "when cluster module creation is called for a resource pool owned by non compute cluster resource", clusterModules: []infrav1.ClusterModule{}, setupMocks: func(svc *cmodfake.CMService) { - svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return("", clustermodule.NewIncompatibleOwnerError("foo-123")) - svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID, nil) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(kcp)).Return("", clustermodule.NewIncompatibleOwnerError("foo-123")) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID, nil) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) @@ -262,8 +262,8 @@ func TestReconciler_Reconcile(t *testing.T) { name: "when cluster module creation fails", clusterModules: []infrav1.ClusterModule{}, setupMocks: func(svc *cmodfake.CMService) { - svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID, nil) - svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return("", errors.New("failed to reach API")) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID, nil) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(md)).Return("", errors.New("failed to reach API")) }, // if cluster module creation fails for any reason apart from incompatibility, error should be returned haveError: true, @@ -282,8 +282,8 @@ func TestReconciler_Reconcile(t *testing.T) { name: "when all cluster module creations fail for a resource pool owned by non compute cluster resource", clusterModules: []infrav1.ClusterModule{}, setupMocks: func(svc *cmodfake.CMService) { - svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return("", clustermodule.NewIncompatibleOwnerError("foo-123")) - svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return("", clustermodule.NewIncompatibleOwnerError("bar-123")) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(kcp)).Return("", clustermodule.NewIncompatibleOwnerError("foo-123")) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(md)).Return("", clustermodule.NewIncompatibleOwnerError("bar-123")) }, // if cluster module creation fails due to resource pool owner incompatibility, vSphereCluster object is set to Ready haveError: false, @@ -298,9 +298,9 @@ func TestReconciler_Reconcile(t *testing.T) { name: "when some cluster module creations are skipped", clusterModules: []infrav1.ClusterModule{}, setupMocks: func(svc *cmodfake.CMService) { - svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID, nil) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID, nil) // mimics cluster module creation was skipped - svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return("", nil) + svc.On("Create", mock.Anything, mock.Anything, clustermodule.NewWrapper(md)).Return("", nil) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) @@ -324,7 +324,7 @@ func TestReconciler_Reconcile(t *testing.T) { }, }, setupMocks: func(svc *cmodfake.CMService) { - svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, kcpUUID).Return(true, nil) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) @@ -353,8 +353,8 @@ func TestReconciler_Reconcile(t *testing.T) { }, }, setupMocks: func(svc *cmodfake.CMService) { - svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) - svc.On("Remove", mock.Anything, mdUUID).Return(nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mock.Anything, kcpUUID).Return(true, nil) + svc.On("Remove", mock.Anything, mock.Anything, mdUUID).Return(nil) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) @@ -383,8 +383,8 @@ func TestReconciler_Reconcile(t *testing.T) { }, }, setupMocks: func(svc *cmodfake.CMService) { - svc.On("Remove", mock.Anything, kcpUUID).Return(nil) - svc.On("Remove", mock.Anything, mdUUID).Return(nil) + svc.On("Remove", mock.Anything, mock.Anything, kcpUUID).Return(nil) + svc.On("Remove", mock.Anything, mock.Anything, mdUUID).Return(nil) }, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.BeEmpty()) @@ -410,9 +410,9 @@ func TestReconciler_Reconcile(t *testing.T) { tt.beforeFn(md) } controllerCtx := fake.NewControllerContext(fake.NewControllerManagerContext(kcp, md)) - ctx := fake.NewClusterContext(controllerCtx) - ctx.VSphereCluster.Spec.ClusterModules = tt.clusterModules - ctx.VSphereCluster.Status = infrav1.VSphereClusterStatus{VCenterVersion: infrav1.NewVCenterVersion("7.0.0")} + clusterCtx := fake.NewClusterContext(controllerCtx) + clusterCtx.VSphereCluster.Spec.ClusterModules = tt.clusterModules + clusterCtx.VSphereCluster.Status = infrav1.VSphereClusterStatus{VCenterVersion: infrav1.NewVCenterVersion("7.0.0")} svc := new(cmodfake.CMService) if tt.setupMocks != nil { @@ -420,16 +420,16 @@ func TestReconciler_Reconcile(t *testing.T) { } r := Reconciler{ - ControllerContext: controllerCtx, + Client: controllerCtx.Client, ClusterModuleService: svc, } - _, err := r.Reconcile(ctx) + _, err := r.Reconcile(ctx, clusterCtx) if tt.haveError { g.Expect(err).To(gomega.HaveOccurred()) } else { g.Expect(err).ToNot(gomega.HaveOccurred()) } - tt.customAssert(g, ctx) + tt.customAssert(g, clusterCtx) svc.AssertExpectations(t) }) @@ -486,9 +486,9 @@ func TestReconciler_fetchMachineOwnerObjects(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := gomega.NewWithT(t) controllerCtx := fake.NewControllerContext(fake.NewControllerManagerContext(tt.initObjs...)) - ctx := fake.NewClusterContext(controllerCtx) - r := Reconciler{ControllerContext: controllerCtx} - objMap, err := r.fetchMachineOwnerObjects(ctx) + clusterCtx := fake.NewClusterContext(controllerCtx) + r := Reconciler{Client: controllerCtx.Client} + objMap, err := r.fetchMachineOwnerObjects(ctx, clusterCtx) if tt.hasError { g.Expect(err).To(gomega.HaveOccurred()) return @@ -511,8 +511,8 @@ func TestReconciler_fetchMachineOwnerObjects(t *testing.T) { machineDeployment("foo", metav1.NamespaceDefault, fake.Clusterv1a2Name), mdToBeDeleted, )) - ctx := fake.NewClusterContext(controllerCtx) - objMap, err := Reconciler{ControllerContext: controllerCtx}.fetchMachineOwnerObjects(ctx) + clusterCtx := fake.NewClusterContext(controllerCtx) + objMap, err := Reconciler{Client: controllerCtx.Client}.fetchMachineOwnerObjects(ctx, clusterCtx) g.Expect(err).NotTo(gomega.HaveOccurred()) g.Expect(objMap).To(gomega.HaveLen(2)) }) diff --git a/controllers/controllers_suite_test.go b/controllers/controllers_suite_test.go index 8554b5dcad..b97f15ed67 100644 --- a/controllers/controllers_suite_test.go +++ b/controllers/controllers_suite_test.go @@ -108,10 +108,10 @@ func setup() { if err := AddVSphereDeploymentZoneControllerToManager(testEnv.GetContext(), testEnv.Manager, controllerOpts); err != nil { panic(fmt.Sprintf("unable to setup VSphereDeploymentZone controller: %v", err)) } - if err := AddServiceAccountProviderControllerToManager(testEnv.GetContext(), testEnv.Manager, tracker, controllerOpts); err != nil { + if err := AddServiceAccountProviderControllerToManager(ctx, testEnv.GetContext(), testEnv.Manager, tracker, controllerOpts); err != nil { panic(fmt.Sprintf("unable to setup ServiceAccount controller: %v", err)) } - if err := AddServiceDiscoveryControllerToManager(testEnv.GetContext(), testEnv.Manager, tracker, controllerOpts); err != nil { + if err := AddServiceDiscoveryControllerToManager(ctx, testEnv.GetContext(), testEnv.Manager, tracker, controllerOpts); err != nil { panic(fmt.Sprintf("unable to setup SvcDiscovery controller: %v", err)) } diff --git a/controllers/serviceaccount_controller.go b/controllers/serviceaccount_controller.go index f5afd6ff18..144cf7a44a 100644 --- a/controllers/serviceaccount_controller.go +++ b/controllers/serviceaccount_controller.go @@ -30,6 +30,8 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/remote" clusterutilv1 "sigs.k8s.io/cluster-api/util" @@ -66,27 +68,22 @@ const ( ) // AddServiceAccountProviderControllerToManager adds this controller to the provided manager. -func AddServiceAccountProviderControllerToManager(controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, tracker *remote.ClusterCacheTracker, options controller.Options) error { +func AddServiceAccountProviderControllerToManager(ctx context.Context, controllerManagerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, tracker *remote.ClusterCacheTracker, options controller.Options) error { var ( controlledType = &vmwarev1.ProviderServiceAccount{} controlledTypeName = reflect.TypeOf(controlledType).Elem().Name() controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(controlledTypeName)) - controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, controllerNameShort) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerManagerCtx.Namespace, controllerManagerCtx.Name, controllerNameShort) ) - controllerContext := &capvcontext.ControllerContext{ - ControllerManagerContext: controllerCtx, - Name: controllerNameShort, - Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: controllerCtx.Logger.WithName(controllerNameShort), - } - r := ServiceAccountReconciler{ - ControllerContext: controllerContext, + r := &ServiceAccountReconciler{ + Client: controllerManagerCtx.Client, + Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), remoteClusterCacheTracker: tracker, } - clusterToInfraFn := clusterToSupervisorInfrastructureMapFunc(controllerCtx) + clusterToInfraFn := clusterToSupervisorInfrastructureMapFunc(ctx, controllerManagerCtx.Client) return ctrl.NewControllerManagedBy(mgr).For(controlledType). WithOptions(options). @@ -105,47 +102,56 @@ func AddServiceAccountProviderControllerToManager(controllerCtx *capvcontext.Con &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { requests := clusterToInfraFn(ctx, o) - if requests == nil { + if len(requests) == 0 { return nil } + log := ctrl.LoggerFrom(ctx).WithValues("VSphereCluster", klog.KRef(requests[0].Namespace, requests[0].Name)) + ctx = ctrl.LoggerInto(ctx, log) + c := &vmwarev1.VSphereCluster{} if err := r.Client.Get(ctx, requests[0].NamespacedName, c); err != nil { - r.Logger.V(4).Error(err, "Failed to get VSphereCluster") + log.V(4).Error(err, "Failed to get VSphereCluster") return nil } if annotations.IsExternallyManaged(c) { - r.Logger.V(4).Info("VSphereCluster is externally managed, skipping mapping.") + log.V(4).Info("VSphereCluster is externally managed, skipping mapping.") return nil } return requests }), ). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), controllerManagerCtx.WatchFilterValue)). Complete(r) } -func clusterToSupervisorInfrastructureMapFunc(controllerCtx *capvcontext.ControllerManagerContext) handler.MapFunc { +func clusterToSupervisorInfrastructureMapFunc(ctx context.Context, c client.Client) handler.MapFunc { gvk := vmwarev1.GroupVersion.WithKind(reflect.TypeOf(&vmwarev1.VSphereCluster{}).Elem().Name()) - return clusterutilv1.ClusterToInfrastructureMapFunc(controllerCtx, gvk, controllerCtx.Client, &vmwarev1.VSphereCluster{}) + return clusterutilv1.ClusterToInfrastructureMapFunc(ctx, gvk, c, &vmwarev1.VSphereCluster{}) } type ServiceAccountReconciler struct { - *capvcontext.ControllerContext - + Client client.Client + Recorder record.Recorder remoteClusterCacheTracker *remote.ClusterCacheTracker } -func (r ServiceAccountReconciler) Reconcile(_ context.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { - r.ControllerContext.Logger.V(4).Info("Starting Reconcile") +func (r *ServiceAccountReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + log.V(4).Info("Starting Reconcile") + + vsphereClusterKey := client.ObjectKey{Namespace: req.Namespace, Name: req.Name} + + // Note: This reconciler reconciles ProviderServiceAccounts so we have to add VSphereCluster ourselves. + log = log.WithValues("VSphereCluster", klog.KRef(vsphereClusterKey.Namespace, vsphereClusterKey.Name)) + ctx = ctrl.LoggerInto(ctx, log) // Get the vSphereCluster for this request. vsphereCluster := &vmwarev1.VSphereCluster{} - clusterKey := client.ObjectKey{Namespace: req.Namespace, Name: req.Name} - if err := r.Client.Get(r, clusterKey, vsphereCluster); err != nil { + if err := r.Client.Get(ctx, vsphereClusterKey, vsphereCluster); err != nil { if apierrors.IsNotFound(err) { - r.Logger.Info("vSphereCluster not found, won't reconcile", "cluster", clusterKey) + log.Info("VSphereCluster not found, won't reconcile") return reconcile.Result{}, nil } return reconcile.Result{}, err @@ -164,36 +170,33 @@ func (r ServiceAccountReconciler) Reconcile(_ context.Context, req reconcile.Req // Create the cluster context for this request. clusterContext := &vmwarecontext.ClusterContext{ - ControllerContext: r.ControllerContext, - VSphereCluster: vsphereCluster, - Logger: r.Logger.WithName(req.Namespace).WithName(req.Name), - PatchHelper: patchHelper, + VSphereCluster: vsphereCluster, + PatchHelper: patchHelper, } // Always issue a patch when exiting this function so changes to the // resource are patched back to the API server. defer func() { - if err := clusterContext.Patch(); err != nil { - if reterr == nil { - reterr = err - } else { - clusterContext.Logger.Error(err, "patch failed", "cluster", clusterContext.String()) - } + if err := clusterContext.Patch(ctx); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() + if !vsphereCluster.DeletionTimestamp.IsZero() { - return r.ReconcileDelete(clusterContext) + return r.ReconcileDelete(ctx, clusterContext) } - cluster, err := clusterutilv1.GetClusterFromMetadata(r, r.Client, vsphereCluster.ObjectMeta) + cluster, err := clusterutilv1.GetClusterFromMetadata(ctx, r.Client, vsphereCluster.ObjectMeta) if err != nil { - r.Logger.Info("unable to get capi cluster from vsphereCluster", "err", err) + log.Error(err, "unable to get Cluster from VSphereCluster") return reconcile.Result{}, nil } + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) // Pause reconciliation if entire vSphereCluster or Cluster is paused if annotations.IsPaused(cluster, vsphereCluster) { - r.Logger.V(4).Info("VSphereCluster %s/%s linked to a cluster that is paused", + log.V(4).Info("VSphereCluster %s/%s linked to a cluster that is paused", vsphereCluster.Namespace, vsphereCluster.Name) return reconcile.Result{}, nil } @@ -202,35 +205,36 @@ func (r ServiceAccountReconciler) Reconcile(_ context.Context, req reconcile.Req // then just return a no-op and wait for the next sync. This will occur when // the Cluster's status is updated with a reference to the secret that has // the Kubeconfig data used to access the target cluster. - guestClient, err := r.remoteClusterCacheTracker.GetClient(clusterContext, client.ObjectKeyFromObject(cluster)) + guestClient, err := r.remoteClusterCacheTracker.GetClient(ctx, client.ObjectKeyFromObject(cluster)) if err != nil { if errors.Is(err, remote.ErrClusterLocked) { - r.Logger.V(5).Info("Requeuing because another worker has the lock on the ClusterCacheTracker") + log.V(5).Info("Requeuing because another worker has the lock on the ClusterCacheTracker") return ctrl.Result{Requeue: true}, nil } - clusterContext.Logger.Info("The control plane is not ready yet", "err", err) + log.Error(err, "The control plane is not ready yet") return reconcile.Result{RequeueAfter: clusterNotReadyRequeueTime}, nil } // Defer to the Reconciler for reconciling a non-delete event. - return r.ReconcileNormal(&vmwarecontext.GuestClusterContext{ + return r.ReconcileNormal(ctx, &vmwarecontext.GuestClusterContext{ ClusterContext: clusterContext, GuestClient: guestClient, }) } -func (r ServiceAccountReconciler) ReconcileDelete(clusterCtx *vmwarecontext.ClusterContext) (reconcile.Result, error) { - clusterCtx.Logger.V(4).Info("Reconciling deleting Provider ServiceAccounts", "cluster", clusterCtx.VSphereCluster.Name) +func (r *ServiceAccountReconciler) ReconcileDelete(ctx context.Context, clusterCtx *vmwarecontext.ClusterContext) (reconcile.Result, error) { + log := ctrl.LoggerFrom(ctx) + log.V(4).Info("Reconciling deleting Provider ServiceAccounts") - pSvcAccounts, err := getProviderServiceAccounts(clusterCtx) + pSvcAccounts, err := r.getProviderServiceAccounts(ctx, clusterCtx) if err != nil { - clusterCtx.Logger.Error(err, "Error fetching provider serviceaccounts") + log.Error(err, "Error fetching ProviderServiceAccounts") return reconcile.Result{}, err } for _, pSvcAccount := range pSvcAccounts { // Delete entries for configmap with serviceaccount - if err := r.deleteServiceAccountConfigMap(clusterCtx, pSvcAccount); err != nil { + if err := r.deleteServiceAccountConfigMap(ctx, pSvcAccount); err != nil { return reconcile.Result{}, errors.Wrapf(err, "unable to delete configmap entry for provider serviceaccount %s", pSvcAccount.Name) } } @@ -238,8 +242,10 @@ func (r ServiceAccountReconciler) ReconcileDelete(clusterCtx *vmwarecontext.Clus return reconcile.Result{}, nil } -func (r ServiceAccountReconciler) ReconcileNormal(guestClusterCtx *vmwarecontext.GuestClusterContext) (_ reconcile.Result, reterr error) { - guestClusterCtx.Logger.V(4).Info("Reconciling Provider ServiceAccount", "cluster", guestClusterCtx.VSphereCluster.Name) +func (r *ServiceAccountReconciler) ReconcileNormal(ctx context.Context, guestClusterCtx *vmwarecontext.GuestClusterContext) (_ reconcile.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + log.V(4).Info("Reconciling ProviderServiceAccount") + defer func() { if reterr != nil { conditions.MarkFalse(guestClusterCtx.VSphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition, vmwarev1.ProviderServiceAccountsReconciliationFailedReason, @@ -249,14 +255,14 @@ func (r ServiceAccountReconciler) ReconcileNormal(guestClusterCtx *vmwarecontext } }() - pSvcAccounts, err := getProviderServiceAccounts(guestClusterCtx.ClusterContext) + pSvcAccounts, err := r.getProviderServiceAccounts(ctx, guestClusterCtx.ClusterContext) if err != nil { - guestClusterCtx.Logger.Error(err, "Error fetching provider serviceaccounts") + log.Error(err, "Error fetching ProviderServiceAccounts") return reconcile.Result{}, err } - err = r.ensureProviderServiceAccounts(guestClusterCtx, pSvcAccounts) + err = r.ensureProviderServiceAccounts(ctx, guestClusterCtx, pSvcAccounts) if err != nil { - guestClusterCtx.Logger.Error(err, "Error ensuring provider serviceaccounts") + log.Error(err, "Error ensuring ProviderServiceAccounts") return reconcile.Result{}, err } @@ -264,59 +270,66 @@ func (r ServiceAccountReconciler) ReconcileNormal(guestClusterCtx *vmwarecontext } // Ensure service accounts from provider spec is created. -func (r ServiceAccountReconciler) ensureProviderServiceAccounts(guestClusterCtx *vmwarecontext.GuestClusterContext, pSvcAccounts []vmwarev1.ProviderServiceAccount) error { +func (r *ServiceAccountReconciler) ensureProviderServiceAccounts(ctx context.Context, guestClusterCtx *vmwarecontext.GuestClusterContext, pSvcAccounts []vmwarev1.ProviderServiceAccount) error { + log := ctrl.LoggerFrom(ctx) + for i, pSvcAccount := range pSvcAccounts { + // Note: We have to use := here to not overwrite log & ctx outside the for loop. + log := log.WithValues("ProviderServiceAccount", klog.KRef(pSvcAccount.Namespace, pSvcAccount.Name)) + ctx := ctrl.LoggerInto(ctx, log) + if guestClusterCtx.Cluster != nil && annotations.IsPaused(guestClusterCtx.Cluster, &(pSvcAccounts[i])) { - r.Logger.V(4).Info("ProviderServiceAccount %s/%s linked to a cluster that is paused or has pause annotation", - pSvcAccount.Namespace, pSvcAccount.Name) + log.V(4).Info("ProviderServiceAccount linked to a cluster that is paused or has pause annotation") continue } // 1. Create service accounts by the name specified in Provider Spec - if err := r.ensureServiceAccount(guestClusterCtx.ClusterContext, pSvcAccount); err != nil { + if err := r.ensureServiceAccount(ctx, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to create provider serviceaccount %s", pSvcAccount.Name) } // 2. Update configmap with serviceaccount - if err := r.ensureServiceAccountConfigMap(guestClusterCtx.ClusterContext, pSvcAccount); err != nil { + if err := r.ensureServiceAccountConfigMap(ctx, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to sync configmap for provider serviceaccount %s", pSvcAccount.Name) } // 3. Create secret of Service account token type for the service account - if err := r.ensureServiceAccountSecret(guestClusterCtx.ClusterContext, pSvcAccount); err != nil { + if err := r.ensureServiceAccountSecret(ctx, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to create provider serviceaccount secret %s", getServiceAccountSecretName(pSvcAccount)) } // 4. Create the associated role for the service account - if err := r.ensureRole(guestClusterCtx.ClusterContext, pSvcAccount); err != nil { + if err := r.ensureRole(ctx, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to create role for provider serviceaccount %s", pSvcAccount.Name) } // 5. Create the associated roleBinding for the service account - if err := r.ensureRoleBinding(guestClusterCtx.ClusterContext, pSvcAccount); err != nil { + if err := r.ensureRoleBinding(ctx, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to create rolebinding for provider serviceaccount %s", pSvcAccount.Name) } // 6. Sync the service account with the target - if err := r.syncServiceAccountSecret(guestClusterCtx, pSvcAccount); err != nil { + if err := r.syncServiceAccountSecret(ctx, guestClusterCtx, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to sync secret for provider serviceaccount %s", pSvcAccount.Name) } } return nil } -func (r ServiceAccountReconciler) ensureServiceAccount(clusterCtx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { +func (r *ServiceAccountReconciler) ensureServiceAccount(ctx context.Context, pSvcAccount vmwarev1.ProviderServiceAccount) error { + log := ctrl.LoggerFrom(ctx) + svcAccount := corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: getServiceAccountName(pSvcAccount), Namespace: pSvcAccount.Namespace, }, } - logger := clusterCtx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "serviceaccount", svcAccount.Name) - err := util.SetControllerReferenceWithOverride(&pSvcAccount, &svcAccount, clusterCtx.Scheme) + + err := util.SetControllerReferenceWithOverride(&pSvcAccount, &svcAccount, r.Client.Scheme()) if err != nil { return err } - logger.V(4).Info("Creating service account") - err = clusterCtx.Client.Create(clusterCtx, &svcAccount) + log.V(4).Info("Creating service account") + err = r.Client.Create(ctx, &svcAccount) if err != nil && !apierrors.IsAlreadyExists(err) { // Note: We skip updating the service account because the token controller updates the service account with a // secret and we don't want to overwrite it with an empty secret. @@ -325,8 +338,10 @@ func (r ServiceAccountReconciler) ensureServiceAccount(clusterCtx *vmwarecontext return nil } -func (r ServiceAccountReconciler) ensureServiceAccountSecret(clusterCtx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { - secret := corev1.Secret{ +func (r *ServiceAccountReconciler) ensureServiceAccountSecret(ctx context.Context, pSvcAccount vmwarev1.ProviderServiceAccount) error { + log := ctrl.LoggerFrom(ctx) + + secret := &corev1.Secret{ Type: corev1.SecretTypeServiceAccountToken, ObjectMeta: metav1.ObjectMeta{ Name: getServiceAccountSecretName(pSvcAccount), @@ -337,14 +352,15 @@ func (r ServiceAccountReconciler) ensureServiceAccountSecret(clusterCtx *vmwarec }, }, } + log = log.WithValues("Secret", klog.KObj(secret)) + ctx = ctrl.LoggerInto(ctx, log) - logger := clusterCtx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "secret", secret.Name) - err := util.SetControllerReferenceWithOverride(&pSvcAccount, &secret, clusterCtx.Scheme) + err := util.SetControllerReferenceWithOverride(&pSvcAccount, secret, r.Client.Scheme()) if err != nil { return err } - logger.V(4).Info("Creating service account secret") - err = clusterCtx.Client.Create(clusterCtx, &secret) + log.V(4).Info("Creating ServiceAccount Secret") + err = r.Client.Create(ctx, secret) if err != nil && !apierrors.IsAlreadyExists(err) { // Note: We skip updating the service account because the token controller updates the service account with a // secret and we don't want to overwrite it with an empty secret. @@ -353,17 +369,21 @@ func (r ServiceAccountReconciler) ensureServiceAccountSecret(clusterCtx *vmwarec return nil } -func (r ServiceAccountReconciler) ensureRole(clusterCtx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { - role := rbacv1.Role{ +func (r *ServiceAccountReconciler) ensureRole(ctx context.Context, pSvcAccount vmwarev1.ProviderServiceAccount) error { + log := ctrl.LoggerFrom(ctx) + + role := &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: getRoleName(pSvcAccount), Namespace: pSvcAccount.Namespace, }, } - logger := clusterCtx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "role", role.Name) - logger.V(4).Info("Creating or updating role") - _, err := controllerutil.CreateOrPatch(clusterCtx, clusterCtx.Client, &role, func() error { - if err := util.SetControllerReferenceWithOverride(&pSvcAccount, &role, clusterCtx.Scheme); err != nil { + log = log.WithValues("Role", klog.KObj(role)) + ctx = ctrl.LoggerInto(ctx, log) + + log.V(4).Info("Creating or updating Role") + _, err := controllerutil.CreateOrPatch(ctx, r.Client, role, func() error { + if err := util.SetControllerReferenceWithOverride(&pSvcAccount, role, r.Client.Scheme()); err != nil { return err } role.Rules = pSvcAccount.Spec.Rules @@ -372,19 +392,22 @@ func (r ServiceAccountReconciler) ensureRole(clusterCtx *vmwarecontext.ClusterCo return err } -func (r ServiceAccountReconciler) ensureRoleBinding(clusterCtx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { +func (r *ServiceAccountReconciler) ensureRoleBinding(ctx context.Context, pSvcAccount vmwarev1.ProviderServiceAccount) error { + log := ctrl.LoggerFrom(ctx) + roleName := getRoleName(pSvcAccount) svcAccountName := getServiceAccountName(pSvcAccount) - roleBinding := rbacv1.RoleBinding{ + roleBinding := &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: getRoleBindingName(pSvcAccount), Namespace: pSvcAccount.Namespace, }, } - logger := clusterCtx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "rolebinding", roleBinding.Name) - logger.V(4).Info("Creating or updating rolebinding") + log = log.WithValues("RoleBinding", klog.KObj(roleBinding)) + ctx = ctrl.LoggerInto(ctx, log) + log.V(4).Info("Creating or updating RoleBinding") - err := clusterCtx.Client.Get(clusterCtx, types.NamespacedName{Name: getRoleBindingName(pSvcAccount), Namespace: pSvcAccount.Namespace}, &roleBinding) + err := r.Client.Get(ctx, types.NamespacedName{Name: getRoleBindingName(pSvcAccount), Namespace: pSvcAccount.Namespace}, roleBinding) if err != nil && !apierrors.IsNotFound(err) { return err } @@ -392,14 +415,14 @@ func (r ServiceAccountReconciler) ensureRoleBinding(clusterCtx *vmwarecontext.Cl if err == nil { // If the roleRef needs changing, we have to delete the rolebinding and recreate it. if roleBinding.RoleRef.Name != roleName || roleBinding.RoleRef.Kind != "Role" || roleBinding.RoleRef.APIGroup != rbacv1.GroupName { - if err := clusterCtx.Client.Delete(clusterCtx, &roleBinding); err != nil { + if err := r.Client.Delete(ctx, roleBinding); err != nil { return err } } } - _, err = controllerutil.CreateOrPatch(clusterCtx, clusterCtx.Client, &roleBinding, func() error { - if err := util.SetControllerReferenceWithOverride(&pSvcAccount, &roleBinding, clusterCtx.Scheme); err != nil { + _, err = controllerutil.CreateOrPatch(ctx, r.Client, roleBinding, func() error { + if err := util.SetControllerReferenceWithOverride(&pSvcAccount, roleBinding, r.Client.Scheme()); err != nil { return err } roleBinding.RoleRef = rbacv1.RoleRef{ @@ -420,14 +443,14 @@ func (r ServiceAccountReconciler) ensureRoleBinding(clusterCtx *vmwarecontext.Cl return err } -func (r ServiceAccountReconciler) syncServiceAccountSecret(guestClusterCtx *vmwarecontext.GuestClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { - logger := guestClusterCtx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name) - logger.V(4).Info("Attempting to sync token secret for provider service account") +func (r *ServiceAccountReconciler) syncServiceAccountSecret(ctx context.Context, guestClusterCtx *vmwarecontext.GuestClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { + log := ctrl.LoggerFrom(ctx) + log.V(4).Info("Attempting to sync token secret for ProviderServiceAccount") secretName := getServiceAccountSecretName(pSvcAccount) - logger.V(4).Info("Fetching secret for service account token details", "secret", secretName) + log.V(4).Info("Fetching secret for service account token details", "Secret", klog.KRef(pSvcAccount.Namespace, secretName)) var svcAccountTokenSecret corev1.Secret - err := guestClusterCtx.Client.Get(guestClusterCtx, types.NamespacedName{Name: secretName, Namespace: pSvcAccount.Namespace}, &svcAccountTokenSecret) + err := r.Client.Get(ctx, types.NamespacedName{Name: secretName, Namespace: pSvcAccount.Namespace}, &svcAccountTokenSecret) if err != nil { return err } @@ -435,7 +458,7 @@ func (r ServiceAccountReconciler) syncServiceAccountSecret(guestClusterCtx *vmwa if len(svcAccountTokenSecret.Data) == 0 { // Note: We don't have to requeue here because we have a watch on the secret and the cluster should be reconciled // when a secret has token data populated. - logger.Info("Skipping sync secret for provider service account: secret has no data", "secret", secretName) + log.Info("Skipping sync secret for provider service account: secret has no data", "Secret", klog.KRef(pSvcAccount.Namespace, secretName)) return nil } @@ -446,9 +469,9 @@ func (r ServiceAccountReconciler) syncServiceAccountSecret(guestClusterCtx *vmwa }, } - if err = guestClusterCtx.GuestClient.Get(guestClusterCtx, client.ObjectKey{Name: pSvcAccount.Spec.TargetNamespace}, targetNamespace); err != nil { + if err = guestClusterCtx.GuestClient.Get(ctx, client.ObjectKey{Name: pSvcAccount.Spec.TargetNamespace}, targetNamespace); err != nil { if apierrors.IsNotFound(err) { - err = guestClusterCtx.GuestClient.Create(guestClusterCtx, targetNamespace) + err = guestClusterCtx.GuestClient.Create(ctx, targetNamespace) if err != nil { return err } @@ -463,18 +486,18 @@ func (r ServiceAccountReconciler) syncServiceAccountSecret(guestClusterCtx *vmwa Namespace: pSvcAccount.Spec.TargetNamespace, }, } - logger.V(4).Info("Creating or updating secret in cluster", "namespace", targetSecret.Namespace, "name", targetSecret.Name) - _, err = controllerutil.CreateOrPatch(guestClusterCtx, guestClusterCtx.GuestClient, targetSecret, func() error { + log.V(4).Info("Creating or updating Secret in cluster", "Secret", klog.KObj(targetSecret)) + _, err = controllerutil.CreateOrPatch(ctx, guestClusterCtx.GuestClient, targetSecret, func() error { targetSecret.Data = svcAccountTokenSecret.Data return nil }) return err } -func (r ServiceAccountReconciler) getConfigMapAndBuffer(clusterCtx *vmwarecontext.ClusterContext) (*corev1.ConfigMap, *corev1.ConfigMap, error) { +func (r *ServiceAccountReconciler) getConfigMapAndBuffer(ctx context.Context) (*corev1.ConfigMap, *corev1.ConfigMap, error) { configMap := &corev1.ConfigMap{} - if err := clusterCtx.Client.Get(clusterCtx, GetCMNamespaceName(), configMap); err != nil { + if err := r.Client.Get(ctx, GetCMNamespaceName(), configMap); err != nil { return nil, nil, err } @@ -484,11 +507,11 @@ func (r ServiceAccountReconciler) getConfigMapAndBuffer(clusterCtx *vmwarecontex return configMapBuffer, configMap, nil } -func (r ServiceAccountReconciler) deleteServiceAccountConfigMap(clusterCtx *vmwarecontext.ClusterContext, svcAccount vmwarev1.ProviderServiceAccount) error { - logger := clusterCtx.Logger.WithValues("providerserviceaccount", svcAccount.Name) +func (r *ServiceAccountReconciler) deleteServiceAccountConfigMap(ctx context.Context, svcAccount vmwarev1.ProviderServiceAccount) error { + log := ctrl.LoggerFrom(ctx) svcAccountName := getSystemServiceAccountFullName(svcAccount) - configMapBuffer, configMap, err := r.getConfigMapAndBuffer(clusterCtx) + configMapBuffer, configMap, err := r.getConfigMapAndBuffer(ctx) if err != nil { return err } @@ -496,8 +519,8 @@ func (r ServiceAccountReconciler) deleteServiceAccountConfigMap(clusterCtx *vmwa // Service account name is not in the config map return nil } - logger.Info("Deleting config map entry for provider service account") - _, err = controllerutil.CreateOrPatch(clusterCtx, clusterCtx.Client, configMapBuffer, func() error { + log.Info("Deleting config map entry for provider service account") + _, err = controllerutil.CreateOrPatch(ctx, r.Client, configMapBuffer, func() error { configMapBuffer.Data = configMap.Data delete(configMapBuffer.Data, svcAccountName) return nil @@ -505,11 +528,11 @@ func (r ServiceAccountReconciler) deleteServiceAccountConfigMap(clusterCtx *vmwa return err } -func (r ServiceAccountReconciler) ensureServiceAccountConfigMap(clusterCtx *vmwarecontext.ClusterContext, svcAccount vmwarev1.ProviderServiceAccount) error { - logger := clusterCtx.Logger.WithValues("providerserviceaccount", svcAccount.Name) +func (r *ServiceAccountReconciler) ensureServiceAccountConfigMap(ctx context.Context, svcAccount vmwarev1.ProviderServiceAccount) error { + log := ctrl.LoggerFrom(ctx) svcAccountName := getSystemServiceAccountFullName(svcAccount) - configMapBuffer, configMap, err := r.getConfigMapAndBuffer(clusterCtx) + configMapBuffer, configMap, err := r.getConfigMapAndBuffer(ctx) if err != nil { return err } @@ -517,8 +540,8 @@ func (r ServiceAccountReconciler) ensureServiceAccountConfigMap(clusterCtx *vmwa // Service account name is already in the config map return nil } - logger.Info("Updating config map for provider service account") - _, err = controllerutil.CreateOrPatch(clusterCtx, clusterCtx.Client, configMapBuffer, func() error { + log.Info("Updating config map for provider service account") + _, err = controllerutil.CreateOrPatch(ctx, r.Client, configMapBuffer, func() error { configMapBuffer.Data = configMap.Data configMapBuffer.Data[svcAccountName] = "true" return nil @@ -526,11 +549,11 @@ func (r ServiceAccountReconciler) ensureServiceAccountConfigMap(clusterCtx *vmwa return err } -func getProviderServiceAccounts(clusterCtx *vmwarecontext.ClusterContext) ([]vmwarev1.ProviderServiceAccount, error) { +func (r *ServiceAccountReconciler) getProviderServiceAccounts(ctx context.Context, clusterCtx *vmwarecontext.ClusterContext) ([]vmwarev1.ProviderServiceAccount, error) { var pSvcAccounts []vmwarev1.ProviderServiceAccount pSvcAccountList := vmwarev1.ProviderServiceAccountList{} - if err := clusterCtx.Client.List(clusterCtx, &pSvcAccountList, client.InNamespace(clusterCtx.VSphereCluster.Namespace)); err != nil { + if err := r.Client.List(ctx, &pSvcAccountList, client.InNamespace(clusterCtx.VSphereCluster.Namespace)); err != nil { return nil, err } @@ -584,7 +607,7 @@ func GetCMNamespaceName() types.NamespacedName { // secretToVSphereCluster is a mapper function used to enqueue reconcile.Request objects. // It accepts a Secret object owned by the controller and fetches the service account // that contains the token and creates a reconcile.Request for the vmwarev1.VSphereCluster object. -func (r ServiceAccountReconciler) secretToVSphereCluster(ctx context.Context, o client.Object) []reconcile.Request { +func (r *ServiceAccountReconciler) secretToVSphereCluster(ctx context.Context, o client.Object) []reconcile.Request { secret, ok := o.(*corev1.Secret) if !ok { return nil @@ -603,7 +626,7 @@ func (r ServiceAccountReconciler) secretToVSphereCluster(ctx context.Context, o }, svcAccount); err != nil { return nil } - return r.serviceAccountToVSphereCluster(svcAccount) + return r.serviceAccountToVSphereCluster(ctx, svcAccount) } return nil } @@ -611,14 +634,14 @@ func (r ServiceAccountReconciler) secretToVSphereCluster(ctx context.Context, o // serviceAccountToVSphereCluster is a mapper function used to enqueue reconcile.Request objects. // From the watched object owned by this controller, it creates reconcile.Request object // for the vmwarev1.VSphereCluster object that owns the watched object. -func (r ServiceAccountReconciler) serviceAccountToVSphereCluster(o client.Object) []reconcile.Request { +func (r *ServiceAccountReconciler) serviceAccountToVSphereCluster(ctx context.Context, o client.Object) []reconcile.Request { // We do this because this controller is effectively a vSphereCluster controller that reconciles its // dependent ProviderServiceAccount objects. ownerRef := metav1.GetControllerOf(o) if ownerRef != nil && ownerRef.Kind == kindProviderServiceAccount { key := types.NamespacedName{Namespace: o.GetNamespace(), Name: ownerRef.Name} pSvcAccount := &vmwarev1.ProviderServiceAccount{} - if err := r.Client.Get(r.Context, key, pSvcAccount); err != nil { + if err := r.Client.Get(ctx, key, pSvcAccount); err != nil { return nil } return toVSphereClusterRequest(pSvcAccount) @@ -627,7 +650,7 @@ func (r ServiceAccountReconciler) serviceAccountToVSphereCluster(o client.Object } // providerServiceAccountToVSphereCluster is a mapper function used to enqueue reconcile.Request objects. -func (r ServiceAccountReconciler) providerServiceAccountToVSphereCluster(_ context.Context, o client.Object) []reconcile.Request { +func (r *ServiceAccountReconciler) providerServiceAccountToVSphereCluster(_ context.Context, o client.Object) []reconcile.Request { providerServiceAccount, ok := o.(*vmwarev1.ProviderServiceAccount) if !ok { return nil diff --git a/controllers/serviceaccount_controller_suite_test.go b/controllers/serviceaccount_controller_suite_test.go index 358745cbdf..521c78ab83 100644 --- a/controllers/serviceaccount_controller_suite_test.go +++ b/controllers/serviceaccount_controller_suite_test.go @@ -112,7 +112,7 @@ func assertTargetSecret(ctx context.Context, guestClient client.Client, namespac }).Should(Equal([]byte(testSecretToken))) } -func assertTargetNamespace(ctx *helpers.UnitTestContextForController, guestClient client.Client, namespaceName string, isExist bool) { +func assertTargetNamespace(ctx context.Context, guestClient client.Client, namespaceName string, isExist bool) { namespace := &corev1.Namespace{} err := guestClient.Get(ctx, client.ObjectKey{Name: namespaceName}, namespace) if isExist { @@ -122,7 +122,7 @@ func assertTargetNamespace(ctx *helpers.UnitTestContextForController, guestClien } } -func assertRoleWithGetPVC(ctx *helpers.UnitTestContextForController, ctrlClient client.Client, namespace, name string) { +func assertRoleWithGetPVC(ctx context.Context, ctrlClient client.Client, namespace, name string) { var roleList rbacv1.RoleList opts := &client.ListOptions{ Namespace: namespace, diff --git a/controllers/serviceaccount_controller_unit_test.go b/controllers/serviceaccount_controller_unit_test.go index 85793e933e..ed19a24a80 100644 --- a/controllers/serviceaccount_controller_unit_test.go +++ b/controllers/serviceaccount_controller_unit_test.go @@ -17,6 +17,7 @@ limitations under the License. package controllers import ( + "context" "fmt" "os" @@ -43,16 +44,19 @@ func unitTestsReconcileNormal() { ) JustBeforeEach(func() { + controllerCtx = helpers.NewUnitTestContextForController(namespace, vsphereCluster, false, initObjects, nil) // Note: The service account provider requires a reference to the vSphereCluster hence the need to create // a fake vSphereCluster in the test and pass it to during context setup. - reconciler = ServiceAccountReconciler{} - controllerCtx = helpers.NewUnitTestContextForController(namespace, vsphereCluster, false, initObjects, nil) - _, err := reconciler.ReconcileNormal(controllerCtx.GuestClusterContext) + reconciler = ServiceAccountReconciler{ + Client: controllerCtx.ControllerContext.Client, + Recorder: controllerCtx.ControllerContext.Recorder, + } + _, err := reconciler.ReconcileNormal(ctx, controllerCtx.GuestClusterContext) Expect(err).NotTo(HaveOccurred()) // Update the VSphereCluster and its status in the fake client. - Expect(controllerCtx.Client.Update(controllerCtx, controllerCtx.VSphereCluster)).To(Succeed()) - Expect(controllerCtx.Client.Status().Update(controllerCtx, controllerCtx.VSphereCluster)).To(Succeed()) + Expect(controllerCtx.ControllerContext.Client.Update(ctx, controllerCtx.VSphereCluster)).To(Succeed()) + Expect(controllerCtx.ControllerContext.Client.Status().Update(ctx, controllerCtx.VSphereCluster)).To(Succeed()) }) AfterEach(func() { @@ -63,7 +67,7 @@ func unitTestsReconcileNormal() { namespace = capiutil.RandomString(6) It("Should reconcile", func() { By("Not creating any entities") - assertNoEntities(controllerCtx, controllerCtx.Client, namespace) + assertNoEntities(ctx, controllerCtx.ControllerContext.Client, namespace) assertProviderServiceAccountsCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) @@ -81,32 +85,32 @@ func unitTestsReconcileNormal() { } }) It("should create a service account and a secret", func() { - _, err := reconciler.ReconcileNormal(controllerCtx.GuestClusterContext) + _, err := reconciler.ReconcileNormal(ctx, controllerCtx.GuestClusterContext) Expect(err).NotTo(HaveOccurred()) svcAccount := &corev1.ServiceAccount{} - assertEventuallyExistsInNamespace(controllerCtx, controllerCtx.Client, namespace, vsphereCluster.GetName(), svcAccount) + assertEventuallyExistsInNamespace(ctx, controllerCtx.ControllerContext.Client, namespace, vsphereCluster.GetName(), svcAccount) secret := &corev1.Secret{} - assertEventuallyExistsInNamespace(controllerCtx, controllerCtx.Client, namespace, fmt.Sprintf("%s-secret", vsphereCluster.GetName()), secret) + assertEventuallyExistsInNamespace(ctx, controllerCtx.ControllerContext.Client, namespace, fmt.Sprintf("%s-secret", vsphereCluster.GetName()), secret) }) Context("When serviceaccount secret is created", func() { It("Should reconcile", func() { - assertTargetNamespace(controllerCtx, controllerCtx.GuestClient, testTargetNS, false) - updateServiceAccountSecretAndReconcileNormal(controllerCtx, reconciler, vsphereCluster) - assertTargetNamespace(controllerCtx, controllerCtx.GuestClient, testTargetNS, true) + assertTargetNamespace(ctx, controllerCtx.GuestClient, testTargetNS, false) + updateServiceAccountSecretAndReconcileNormal(ctx, controllerCtx, reconciler, vsphereCluster) + assertTargetNamespace(ctx, controllerCtx.GuestClient, testTargetNS, true) By("Creating the target secret in the target namespace") - assertTargetSecret(controllerCtx, controllerCtx.GuestClient, testTargetNS, testTargetSecret) + assertTargetSecret(ctx, controllerCtx.GuestClient, testTargetNS, testTargetSecret) assertProviderServiceAccountsCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) Context("When serviceaccount secret is modified", func() { It("Should reconcile", func() { // This is to simulate an outdated token that will be replaced when the serviceaccount secret is created. - createTargetSecretWithInvalidToken(controllerCtx, controllerCtx.GuestClient, testTargetNS) - updateServiceAccountSecretAndReconcileNormal(controllerCtx, reconciler, vsphereCluster) + createTargetSecretWithInvalidToken(ctx, controllerCtx.GuestClient, testTargetNS) + updateServiceAccountSecretAndReconcileNormal(ctx, controllerCtx, reconciler, vsphereCluster) By("Updating the target secret in the target namespace") - assertTargetSecret(controllerCtx, controllerCtx.GuestClient, testTargetNS, testTargetSecret) + assertTargetSecret(ctx, controllerCtx.GuestClient, testTargetNS, testTargetSecret) assertProviderServiceAccountsCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) @@ -115,7 +119,7 @@ func unitTestsReconcileNormal() { initObjects = append(initObjects, getTestRoleWithGetPod(namespace, vsphereCluster.GetName())) }) It("Should update role", func() { - assertRoleWithGetPVC(controllerCtx, controllerCtx.Client, namespace, vsphereCluster.GetName()) + assertRoleWithGetPVC(ctx, controllerCtx.ControllerContext.Client, namespace, vsphereCluster.GetName()) assertProviderServiceAccountsCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) @@ -124,7 +128,7 @@ func unitTestsReconcileNormal() { initObjects = append(initObjects, getTestRoleBindingWithInvalidRoleRef(namespace, vsphereCluster.GetName())) }) It("Should update rolebinding", func() { - assertRoleBinding(controllerCtx, controllerCtx.Client, namespace, vsphereCluster.GetName()) + assertRoleBinding(controllerCtx, controllerCtx.ControllerContext.Client, namespace, vsphereCluster.GetName()) assertProviderServiceAccountsCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) @@ -133,8 +137,8 @@ func unitTestsReconcileNormal() { // Updates the service account secret similar to how a token controller would act upon a service account // and then re-invokes reconcileNormal. -func updateServiceAccountSecretAndReconcileNormal(controllerCtx *helpers.UnitTestContextForController, reconciler ServiceAccountReconciler, object client.Object) { - assertServiceAccountAndUpdateSecret(controllerCtx, controllerCtx.Client, object.GetNamespace(), object.GetName()) - _, err := reconciler.ReconcileNormal(controllerCtx.GuestClusterContext) +func updateServiceAccountSecretAndReconcileNormal(ctx context.Context, controllerCtx *helpers.UnitTestContextForController, reconciler ServiceAccountReconciler, object client.Object) { + assertServiceAccountAndUpdateSecret(ctx, controllerCtx.ControllerContext.Client, object.GetNamespace(), object.GetName()) + _, err := reconciler.ReconcileNormal(ctx, controllerCtx.GuestClusterContext) Expect(err).NotTo(HaveOccurred()) } diff --git a/controllers/servicediscovery_controller.go b/controllers/servicediscovery_controller.go index 5899a64cd9..4091cd6434 100644 --- a/controllers/servicediscovery_controller.go +++ b/controllers/servicediscovery_controller.go @@ -28,6 +28,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" @@ -72,19 +73,15 @@ const ( // +kubebuilder:rbac:groups="",resources=configmaps/status,verbs=get // AddServiceDiscoveryControllerToManager adds the ServiceDiscovery controller to the provided manager. -func AddServiceDiscoveryControllerToManager(controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, tracker *remote.ClusterCacheTracker, options controller.Options) error { +func AddServiceDiscoveryControllerToManager(ctx context.Context, controllerManagerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, tracker *remote.ClusterCacheTracker, options controller.Options) error { var ( controllerNameShort = ServiceDiscoveryControllerName - controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, ServiceDiscoveryControllerName) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerManagerCtx.Namespace, controllerManagerCtx.Name, controllerNameShort) ) - controllerContext := &capvcontext.ControllerContext{ - ControllerManagerContext: controllerCtx, - Name: controllerNameShort, - Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: controllerCtx.Logger.WithName(controllerNameShort), - } - r := serviceDiscoveryReconciler{ - ControllerContext: controllerContext, + + r := &serviceDiscoveryReconciler{ + Client: controllerManagerCtx.Client, + Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), remoteClusterCacheTracker: tracker, } @@ -104,6 +101,7 @@ func AddServiceDiscoveryControllerToManager(controllerCtx *capvcontext.Controlle src := source.Kind(configMapCache, &corev1.ConfigMap{}) return ctrl.NewControllerManagedBy(mgr).For(&vmwarev1.VSphereCluster{}). + Named(ServiceDiscoveryControllerName). WithOptions(options). Watches( &corev1.Service{}, @@ -121,25 +119,28 @@ func AddServiceDiscoveryControllerToManager(controllerCtx *capvcontext.Controlle &vmwarev1.VSphereCluster{}, handler.OnlyControllerOwner(), )). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), controllerManagerCtx.WatchFilterValue)). Complete(r) } type serviceDiscoveryReconciler struct { - *capvcontext.ControllerContext + Client client.Client + Recorder record.Recorder remoteClusterCacheTracker *remote.ClusterCacheTracker } -func (r serviceDiscoveryReconciler) Reconcile(_ context.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { - logger := r.Logger.WithName(req.Namespace).WithName(req.Name) - logger.V(4).Info("Starting Reconcile") +func (r *serviceDiscoveryReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + log.V(4).Info("Starting Reconcile") // Get the vspherecluster for this request. vsphereCluster := &vmwarev1.VSphereCluster{} - if err := r.Client.Get(r, req.NamespacedName, vsphereCluster); err != nil { + // Note: VSphereCluster doesn't have to be added to the logger as controller-runtime + // already adds the reconciled object (which is VSphereCluster). + if err := r.Client.Get(ctx, req.NamespacedName, vsphereCluster); err != nil { if apierrors.IsNotFound(err) { - logger.Info("Cluster not found, won't reconcile", "key", req.NamespacedName) + log.Info("VSphereCluster not found, won't reconcile") return reconcile.Result{}, nil } return reconcile.Result{}, err @@ -158,21 +159,15 @@ func (r serviceDiscoveryReconciler) Reconcile(_ context.Context, req reconcile.R // Create the cluster context for this request. clusterContext := &vmwarecontext.ClusterContext{ - ControllerContext: r.ControllerContext, - VSphereCluster: vsphereCluster, - Logger: logger, - PatchHelper: patchHelper, + VSphereCluster: vsphereCluster, + PatchHelper: patchHelper, } // Always issue a patch when exiting this function so changes to the // resource are patched back to the API server. defer func() { - if err := clusterContext.Patch(); err != nil { - if reterr == nil { - reterr = err - } else { - clusterContext.Logger.Error(err, "patch failed", "cluster", clusterContext.String()) - } + if err := clusterContext.Patch(ctx); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() @@ -181,34 +176,36 @@ func (r serviceDiscoveryReconciler) Reconcile(_ context.Context, req reconcile.R return reconcile.Result{}, nil } - cluster, err := clusterutilv1.GetClusterFromMetadata(r, r.Client, vsphereCluster.ObjectMeta) + cluster, err := clusterutilv1.GetClusterFromMetadata(ctx, r.Client, vsphereCluster.ObjectMeta) if err != nil { - logger.Info("unable to get capi cluster from vsphereCluster", "err", err) + log.Error(err, "unable to get Cluster from VSphereCluster") return reconcile.Result{RequeueAfter: clusterNotReadyRequeueTime}, nil } // We cannot proceed until we are able to access the target cluster. Until // then just return a no-op and wait for the next sync. - guestClient, err := r.remoteClusterCacheTracker.GetClient(clusterContext, client.ObjectKeyFromObject(cluster)) + guestClient, err := r.remoteClusterCacheTracker.GetClient(ctx, client.ObjectKeyFromObject(cluster)) if err != nil { if errors.Is(err, remote.ErrClusterLocked) { - r.Logger.V(5).Info("Requeuing because another worker has the lock on the ClusterCacheTracker") + log.V(5).Info("Requeuing because another worker has the lock on the ClusterCacheTracker") return ctrl.Result{Requeue: true}, nil } - logger.Info("The control plane is not ready yet", "err", err) + log.Error(err, "The control plane is not ready yet") return reconcile.Result{RequeueAfter: clusterNotReadyRequeueTime}, nil } // Defer to the Reconciler for reconciling a non-delete event. - return r.ReconcileNormal(&vmwarecontext.GuestClusterContext{ + return r.ReconcileNormal(ctx, &vmwarecontext.GuestClusterContext{ ClusterContext: clusterContext, GuestClient: guestClient, }) } -func (r serviceDiscoveryReconciler) ReconcileNormal(guestClusterCtx *vmwarecontext.GuestClusterContext) (reconcile.Result, error) { - guestClusterCtx.Logger.V(4).Info("Reconciling Service Discovery", "cluster", guestClusterCtx.VSphereCluster.Name) - if err := r.reconcileSupervisorHeadlessService(guestClusterCtx); err != nil { +func (r *serviceDiscoveryReconciler) ReconcileNormal(ctx context.Context, guestClusterCtx *vmwarecontext.GuestClusterContext) (reconcile.Result, error) { + log := ctrl.LoggerFrom(ctx) + log.V(4).Info("Reconciling Service Discovery") + + if err := r.reconcileSupervisorHeadlessService(ctx, guestClusterCtx); err != nil { conditions.MarkFalse(guestClusterCtx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition, vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to configure supervisor headless service for %v", guestClusterCtx.VSphereCluster) @@ -219,15 +216,17 @@ func (r serviceDiscoveryReconciler) ReconcileNormal(guestClusterCtx *vmwareconte // Setup a local k8s service in the target cluster that proxies to the Supervisor Cluster API Server. The add-ons are // dependent on this local service to connect to the Supervisor Cluster. -func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(guestClusterCtx *vmwarecontext.GuestClusterContext) error { +func (r *serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(ctx context.Context, guestClusterCtx *vmwarecontext.GuestClusterContext) error { + log := ctrl.LoggerFrom(ctx) + // Create the headless service to the supervisor api server on the target cluster. supervisorPort := vmwarev1.SupervisorAPIServerPort svc := NewSupervisorHeadlessService(vmwarev1.SupervisorHeadlessSvcPort, supervisorPort) - if err := guestClusterCtx.GuestClient.Create(guestClusterCtx, svc); err != nil && !apierrors.IsAlreadyExists(err) { + if err := guestClusterCtx.GuestClient.Create(ctx, svc); err != nil && !apierrors.IsAlreadyExists(err) { return errors.Wrapf(err, "cannot create k8s service %s/%s in ", svc.Namespace, svc.Name) } - supervisorHost, err := GetSupervisorAPIServerAddress(guestClusterCtx.ClusterContext) + supervisorHost, err := r.getSupervisorAPIServerAddress(ctx) if err != nil { // Note: We have watches on the LB Svc (VIP) & the cluster-info configmap (FIP). There is no need to return an error to keep // re-trying. @@ -236,7 +235,7 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(guestClus return nil } - guestClusterCtx.Logger.Info("Discovered supervisor apiserver address", "host", supervisorHost, "port", supervisorPort) + log.Info("Discovered supervisor apiserver address", "host", supervisorHost, "port", supervisorPort) // CreateOrPatch the newEndpoints with the discovered supervisor api server address newEndpoints := NewSupervisorHeadlessServiceEndpoints( supervisorHost, @@ -253,7 +252,7 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(guestClus }, } result, err := controllerutil.CreateOrPatch( - guestClusterCtx, + ctx, guestClusterCtx.GuestClient, endpoints, func() error { @@ -272,7 +271,7 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(guestClus switch result { case controllerutil.OperationResultNone: - guestClusterCtx.Logger.Info( + log.Info( "no update required for k8s service endpoints", "endpointsKey", endpointsKey, @@ -280,7 +279,7 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(guestClus endpointsSubsetsStr, ) case controllerutil.OperationResultCreated: - guestClusterCtx.Logger.Info( + log.Info( "created k8s service endpoints", "endpointsKey", endpointsKey, @@ -288,7 +287,7 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(guestClus endpointsSubsetsStr, ) case controllerutil.OperationResultUpdated: - guestClusterCtx.Logger.Info( + log.Info( "updated k8s service endpoints", "endpointsKey", endpointsKey, @@ -296,10 +295,9 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(guestClus endpointsSubsetsStr, ) default: - guestClusterCtx.Logger.Error( - fmt.Errorf( - "unexpected result during createOrPatch k8s service endpoints", - ), + log.Error( + nil, + "unexpected result during createOrPatch k8s service endpoints", "endpointsKey", endpointsKey, "endpointsSubsets", @@ -313,18 +311,19 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(guestClus return nil } -func GetSupervisorAPIServerAddress(clusterCtx *vmwarecontext.ClusterContext) (string, error) { +func (r *serviceDiscoveryReconciler) getSupervisorAPIServerAddress(ctx context.Context) (string, error) { + log := ctrl.LoggerFrom(ctx) + // Discover the supervisor api server address // 1. Check if a k8s service "kube-system/kube-apiserver-lb-svc" is available, if so, fetch the loadbalancer IP. // 2. If not, get the Supervisor Cluster Management Network Floating IP (FIP) from the cluster-info configmap. This is // to support non-NSX-T development usecases only. If we are unable to find the cluster-info configmap for some reason, // we log the error. - supervisorHost, err := GetSupervisorAPIServerVIP(clusterCtx.Client) + supervisorHost, err := GetSupervisorAPIServerVIP(r.Client) if err != nil { - clusterCtx.Logger.Info("Unable to discover supervisor apiserver virtual ip, fallback to floating ip", "reason", err.Error()) - supervisorHost, err = GetSupervisorAPIServerFIP(clusterCtx.Client) + log.Error(err, "Unable to discover supervisor apiserver virtual ip, fallback to floating ip") + supervisorHost, err = GetSupervisorAPIServerFIP(r.Client) if err != nil { - clusterCtx.Logger.Error(err, "Unable to discover supervisor apiserver address") return "", errors.Wrapf(err, "Unable to discover supervisor apiserver address") } } @@ -453,7 +452,7 @@ func getClusterFromKubeConfig(config *clientcmdapi.Config) *clientcmdapi.Cluster // serviceToClusters is a mapper function used to enqueue reconcile.Requests // It watches for Service objects of type LoadBalancer for the supervisor api-server. -func (r serviceDiscoveryReconciler) serviceToClusters(ctx context.Context, o client.Object) []reconcile.Request { +func (r *serviceDiscoveryReconciler) serviceToClusters(ctx context.Context, o client.Object) []reconcile.Request { if o.GetNamespace() != vmwarev1.SupervisorLoadBalancerSvcNamespace || o.GetName() != vmwarev1.SupervisorLoadBalancerSvcName { return nil } @@ -462,7 +461,7 @@ func (r serviceDiscoveryReconciler) serviceToClusters(ctx context.Context, o cli // configMapToClusters is a mapper function used to enqueue reconcile.Requests // It watches for cluster-info configmaps for the supervisor api-server. -func (r serviceDiscoveryReconciler) configMapToClusters(ctx context.Context, o client.Object) []reconcile.Request { +func (r *serviceDiscoveryReconciler) configMapToClusters(ctx context.Context, o client.Object) []reconcile.Request { if o.GetNamespace() != metav1.NamespacePublic || o.GetName() != bootstrapapi.ConfigMapClusterInfo { return nil } diff --git a/controllers/svcdiscovery_controller_intg_test.go b/controllers/servicediscovery_controller_intg_test.go similarity index 100% rename from controllers/svcdiscovery_controller_intg_test.go rename to controllers/servicediscovery_controller_intg_test.go diff --git a/controllers/svcdiscovery_controller_suite_test.go b/controllers/servicediscovery_controller_suite_test.go similarity index 100% rename from controllers/svcdiscovery_controller_suite_test.go rename to controllers/servicediscovery_controller_suite_test.go diff --git a/controllers/svcdiscovery_controller_unit_test.go b/controllers/servicediscovery_controller_unit_test.go similarity index 92% rename from controllers/svcdiscovery_controller_unit_test.go rename to controllers/servicediscovery_controller_unit_test.go index bb68c2f714..a631dc8226 100644 --- a/controllers/svcdiscovery_controller_unit_test.go +++ b/controllers/servicediscovery_controller_unit_test.go @@ -43,12 +43,16 @@ func serviceDiscoveryUnitTestsReconcileNormal() { JustBeforeEach(func() { vsphereCluster = fake.NewVSphereCluster(namespace) controllerCtx = helpers.NewUnitTestContextForController(namespace, &vsphereCluster, false, initObjects, nil) - _, err := reconciler.ReconcileNormal(controllerCtx.GuestClusterContext) + reconciler = serviceDiscoveryReconciler{ + Client: controllerCtx.ControllerContext.Client, + Recorder: controllerCtx.ControllerContext.Recorder, + } + _, err := reconciler.ReconcileNormal(ctx, controllerCtx.GuestClusterContext) Expect(err).NotTo(HaveOccurred()) // Update the VSphereCluster and its status in the fake client. - Expect(controllerCtx.Client.Update(controllerCtx, controllerCtx.VSphereCluster)).To(Succeed()) - Expect(controllerCtx.Client.Status().Update(controllerCtx, controllerCtx.VSphereCluster)).To(Succeed()) + Expect(controllerCtx.ControllerContext.Client.Update(ctx, controllerCtx.VSphereCluster)).To(Succeed()) + Expect(controllerCtx.ControllerContext.Client.Status().Update(ctx, controllerCtx.VSphereCluster)).To(Succeed()) }) JustAfterEach(func() { controllerCtx = nil @@ -74,7 +78,11 @@ func serviceDiscoveryUnitTestsReconcileNormal() { assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) It("Should get supervisor master endpoint IP", func() { - supervisorEndpointIP, err := GetSupervisorAPIServerAddress(controllerCtx.ClusterContext) + r := &serviceDiscoveryReconciler{ + Client: controllerCtx.ControllerContext.Client, + Recorder: controllerCtx.ControllerContext.Recorder, + } + supervisorEndpointIP, err := r.getSupervisorAPIServerAddress(ctx) Expect(err).ShouldNot(HaveOccurred()) Expect(supervisorEndpointIP).To(Equal(testSupervisorAPIServerVIP)) }) diff --git a/controllers/vmware/vspherecluster_reconciler.go b/controllers/vmware/vspherecluster_reconciler.go index 8645e266ef..29e7f36fd1 100644 --- a/controllers/vmware/vspherecluster_reconciler.go +++ b/controllers/vmware/vspherecluster_reconciler.go @@ -25,6 +25,8 @@ import ( topologyv1 "github.com/vmware-tanzu/vm-operator/external/tanzu-topology/api/v1alpha1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" @@ -36,8 +38,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" ) @@ -47,7 +49,8 @@ const ( ) type ClusterReconciler struct { - *capvcontext.ControllerContext + Client client.Client + Recorder record.Recorder NetworkProvider services.NetworkProvider ControlPlaneService services.ControlPlaneEndpointService ResourcePolicyService services.ResourcePolicyService @@ -63,26 +66,30 @@ type ClusterReconciler struct { // +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;update;create;delete // +kubebuilder:rbac:groups="",resources=persistentvolumeclaims/status,verbs=get;update;patch -func (r *ClusterReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { - logger := r.Logger.WithName(req.Namespace).WithName(req.Name) - logger.V(3).Info("Starting Reconcile vsphereCluster") +func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + log.V(4).Info("Starting Reconcile") // Fetch the vsphereCluster instance vsphereCluster := &vmwarev1.VSphereCluster{} - err := r.Client.Get(r, req.NamespacedName, vsphereCluster) + err := r.Client.Get(ctx, req.NamespacedName, vsphereCluster) if err != nil { if apierrors.IsNotFound(err) { - r.Logger.V(4).Info("VSphereCluster not found, won't reconcile", "key", req.NamespacedName) + log.V(4).Info("VSphereCluster not found, won't reconcile") return reconcile.Result{}, nil } return reconcile.Result{}, err } // Fetch the Cluster. - cluster, err := clusterutilv1.GetOwnerCluster(r, r.Client, vsphereCluster.ObjectMeta) + cluster, err := clusterutilv1.GetOwnerCluster(ctx, r.Client, vsphereCluster.ObjectMeta) if err != nil { return reconcile.Result{}, err } + if cluster != nil { + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + } // Build the patch helper. patchHelper, err := patch.NewHelper(vsphereCluster, r.Client) @@ -92,32 +99,27 @@ func (r *ClusterReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ct // Build the cluster context. clusterContext := &vmware.ClusterContext{ - ControllerContext: r.ControllerContext, - Cluster: cluster, - Logger: r.Logger.WithName(vsphereCluster.Namespace).WithName(vsphereCluster.Name), - VSphereCluster: vsphereCluster, - PatchHelper: patchHelper, + Cluster: cluster, + VSphereCluster: vsphereCluster, + PatchHelper: patchHelper, } // Always close the context when exiting this function so we can persist any vsphereCluster changes. defer func() { r.Recorder.EmitEvent(vsphereCluster, "Reconcile", reterr, true) - if err := clusterContext.Patch(); err != nil { - clusterContext.Logger.Error(err, "failed to patch vspherecluster") - if reterr == nil { - reterr = err - } + if err := clusterContext.Patch(ctx); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() // Handle deleted clusters if !vsphereCluster.DeletionTimestamp.IsZero() { - r.reconcileDelete(clusterContext) + r.reconcileDelete(ctx, clusterContext) return ctrl.Result{}, nil } if cluster == nil { - logger.V(2).Info("waiting on Cluster controller to set OwnerRef on infra cluster") + log.Info("Waiting on Cluster controller to set OwnerRef on VSphereCluster") return reconcile.Result{}, nil } @@ -129,11 +131,12 @@ func (r *ClusterReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ct } // Handle non-deleted clusters - return ctrl.Result{}, r.reconcileNormal(clusterContext) + return ctrl.Result{}, r.reconcileNormal(ctx, clusterContext) } -func (r *ClusterReconciler) reconcileDelete(clusterCtx *vmware.ClusterContext) { - clusterCtx.Logger.Info("Reconciling vsphereCluster delete") +func (r *ClusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *vmware.ClusterContext) { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling VSphereCluster delete") deletingConditionTypes := []clusterv1.ConditionType{ vmwarev1.ResourcePolicyReadyCondition, @@ -151,11 +154,12 @@ func (r *ClusterReconciler) reconcileDelete(clusterCtx *vmware.ClusterContext) { controllerutil.RemoveFinalizer(clusterCtx.VSphereCluster, vmwarev1.ClusterFinalizer) } -func (r *ClusterReconciler) reconcileNormal(clusterCtx *vmware.ClusterContext) error { - clusterCtx.Logger.Info("Reconciling vsphereCluster") +func (r *ClusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *vmware.ClusterContext) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling VSphereCluster") // Get any failure domains to report back to the CAPI core controller. - failureDomains, err := r.getFailureDomains(clusterCtx) + failureDomains, err := r.getFailureDomains(ctx) if err != nil { return errors.Wrapf( err, @@ -164,9 +168,9 @@ func (r *ClusterReconciler) reconcileNormal(clusterCtx *vmware.ClusterContext) e clusterCtx.VSphereCluster.Status.FailureDomains = failureDomains // Reconcile ResourcePolicy before we create the machines. If the ResourcePolicy is not reconciled before we create the Node VMs, - // it will be handled by vm operator by relocating the VMs to the ResourcePool and Folder specified by the ResourcePolicy. + // it will be handled by vm operator by relocating the VMs to the ResourcePool and Folder specified by the ResourcePolicy. // Reconciling the ResourcePolicy early potentially saves us the extra relocate operation. - resourcePolicyName, err := r.ResourcePolicyService.ReconcileResourcePolicy(clusterCtx) + resourcePolicyName, err := r.ResourcePolicyService.ReconcileResourcePolicy(ctx, clusterCtx) if err != nil { conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition, vmwarev1.ResourcePolicyCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return errors.Wrapf(err, @@ -177,32 +181,34 @@ func (r *ClusterReconciler) reconcileNormal(clusterCtx *vmware.ClusterContext) e clusterCtx.VSphereCluster.Status.ResourcePolicyName = resourcePolicyName // Configure the cluster for the cluster network - err = r.NetworkProvider.ProvisionClusterNetwork(clusterCtx) + err = r.NetworkProvider.ProvisionClusterNetwork(ctx, clusterCtx) if err != nil { return errors.Wrapf(err, "failed to configure cluster network for vsphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) } - if ok, err := r.reconcileControlPlaneEndpoint(clusterCtx); !ok { + if ok, err := r.reconcileControlPlaneEndpoint(ctx, clusterCtx); !ok { if err != nil { return errors.Wrapf(err, "unexpected error while reconciling control plane endpoint for %s", clusterCtx.VSphereCluster.Name) } } clusterCtx.VSphereCluster.Status.Ready = true - clusterCtx.Logger.V(2).Info("Reconcile completed, vsphereCluster is infrastructure-ready") + log.V(2).Info("Reconciling completed, VSphereCluster is ready") return nil } -func (r *ClusterReconciler) reconcileControlPlaneEndpoint(clusterCtx *vmware.ClusterContext) (bool, error) { +func (r *ClusterReconciler) reconcileControlPlaneEndpoint(ctx context.Context, clusterCtx *vmware.ClusterContext) (bool, error) { + log := ctrl.LoggerFrom(ctx) + if !clusterCtx.Cluster.Spec.ControlPlaneEndpoint.IsZero() { clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Host = clusterCtx.Cluster.Spec.ControlPlaneEndpoint.Host clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Port = clusterCtx.Cluster.Spec.ControlPlaneEndpoint.Port if r.NetworkProvider.HasLoadBalancer() { conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) } - clusterCtx.Logger.Info("skipping control plane endpoint reconciliation", + log.Info("Skipping control plane endpoint reconciliation", "reason", "ControlPlaneEndpoint already set on Cluster", "controlPlaneEndpoint", clusterCtx.Cluster.Spec.ControlPlaneEndpoint.String()) return true, nil @@ -212,14 +218,14 @@ func (r *ClusterReconciler) reconcileControlPlaneEndpoint(clusterCtx *vmware.Clu if r.NetworkProvider.HasLoadBalancer() { conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) } - clusterCtx.Logger.Info("skipping control plane endpoint reconciliation", - "reason", "ControlPlaneEndpoint already set on vsphereCluster", + log.Info("Skipping control plane endpoint reconciliation", + "reason", "ControlPlaneEndpoint already set on VSphereCluster", "controlPlaneEndpoint", clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.String()) return true, nil } if r.NetworkProvider.HasLoadBalancer() { - if err := r.reconcileLoadBalancedEndpoint(clusterCtx); err != nil { + if err := r.reconcileLoadBalancedEndpoint(ctx, clusterCtx); err != nil { return false, errors.Wrapf(err, "failed to reconcile loadbalanced endpoint for vsphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) @@ -228,7 +234,7 @@ func (r *ClusterReconciler) reconcileControlPlaneEndpoint(clusterCtx *vmware.Clu return true, nil } - if err := r.reconcileAPIEndpoints(clusterCtx); err != nil { + if err := r.reconcileAPIEndpoints(ctx, clusterCtx); err != nil { return false, errors.Wrapf(err, "failed to reconcile API endpoints for vsphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) @@ -237,11 +243,12 @@ func (r *ClusterReconciler) reconcileControlPlaneEndpoint(clusterCtx *vmware.Clu return true, nil } -func (r *ClusterReconciler) reconcileLoadBalancedEndpoint(clusterCtx *vmware.ClusterContext) error { - clusterCtx.Logger.Info("Reconciling load-balanced control plane endpoint") +func (r *ClusterReconciler) reconcileLoadBalancedEndpoint(ctx context.Context, clusterCtx *vmware.ClusterContext) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling load-balanced control plane endpoint") // Will create a VirtualMachineService for a NetworkProvider that supports load balancing - cpEndpoint, err := r.ControlPlaneService.ReconcileControlPlaneEndpointService(clusterCtx, r.NetworkProvider) + cpEndpoint, err := r.ControlPlaneService.ReconcileControlPlaneEndpointService(ctx, clusterCtx, r.NetworkProvider) if err != nil { // Likely the endpoint is not ready. Keep retrying. return errors.Wrapf(err, @@ -256,16 +263,18 @@ func (r *ClusterReconciler) reconcileLoadBalancedEndpoint(clusterCtx *vmware.Clu // If we've got here and we have a cpEndpoint, we're done. clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint = *cpEndpoint - clusterCtx.Logger.V(3).Info( + log.V(3).Info( "found API endpoint via virtual machine service", "host", cpEndpoint.Host, "port", cpEndpoint.Port) return nil } -func (r *ClusterReconciler) reconcileAPIEndpoints(clusterCtx *vmware.ClusterContext) error { - clusterCtx.Logger.Info("Reconciling control plane endpoint") - machines, err := collections.GetFilteredMachinesForCluster(clusterCtx, r.Client, clusterCtx.Cluster, collections.ControlPlaneMachines(clusterCtx.Cluster.Name)) +func (r *ClusterReconciler) reconcileAPIEndpoints(ctx context.Context, clusterCtx *vmware.ClusterContext) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling control plane endpoint") + + machines, err := collections.GetFilteredMachinesForCluster(ctx, r.Client, clusterCtx.Cluster, collections.ControlPlaneMachines(clusterCtx.Cluster.Name)) if err != nil { return errors.Wrapf(err, "failed to get Machines for Cluster %s/%s", @@ -278,26 +287,31 @@ func (r *ClusterReconciler) reconcileAPIEndpoints(clusterCtx *vmware.ClusterCont // Iterate over the cluster's control plane CAPI machines. for _, machine := range machines { + // Note: We have to use := here to create a new variable and not overwrite log & ctx outside the for loop. + log := log.WithValues("Machine", klog.KObj(machine)) + ctx := ctrl.LoggerInto(ctx, log) + // Only machines with bootstrap data will have an IP address. if machine.Spec.Bootstrap.DataSecretName == nil { - clusterCtx.Logger.V(5).Info( + log.V(5).Info( "skipping machine while looking for IP address", - "reason", "bootstrap.DataSecretName is nil", - "machine-name", machine.Name) + "reason", "bootstrap.DataSecretName is nil") continue } // Get the vsphereMachine for the CAPI Machine resource. - vsphereMachine, err := util.GetVSphereMachine(clusterCtx, clusterCtx.Client, machine.Namespace, machine.Name) + vsphereMachine, err := util.GetVSphereMachine(ctx, r.Client, machine.Namespace, machine.Name) if err != nil { return errors.Wrapf(err, "failed to get vsphereMachine for Machine %s/%s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name, machine.Name) } + log = log.WithValues("VSphereMachine", klog.KObj(vsphereMachine)) + ctx = ctrl.LoggerInto(ctx, log) //nolint:ineffassign,staticcheck // ensure the logger is up-to-date in ctx, even if we currently don't use ctx below. // If the machine has no IP address then skip it. if vsphereMachine.Status.IPAddr == "" { - clusterCtx.Logger.V(5).Info("skipping machine without IP address") + log.V(5).Info("skipping machine without IP address") continue } @@ -309,7 +323,7 @@ func (r *ClusterReconciler) reconcileAPIEndpoints(clusterCtx *vmware.ClusterCont Port: apiEndpointPort, } apiEndpointList = append(apiEndpointList, apiEndpoint) - clusterCtx.Logger.V(3).Info( + log.V(3).Info( "found API endpoint via control plane machine", "host", apiEndpoint.Host, "port", apiEndpoint.Port) @@ -331,29 +345,34 @@ func (r *ClusterReconciler) reconcileAPIEndpoints(clusterCtx *vmware.ClusterCont } func (r *ClusterReconciler) VSphereMachineToCluster(ctx context.Context, o client.Object) []reconcile.Request { + log := ctrl.LoggerFrom(ctx) + vsphereMachine, ok := o.(*vmwarev1.VSphereMachine) if !ok { - r.Logger.Error(errors.New("did not get vspheremachine"), "got", fmt.Sprintf("%T", o)) + log.Error(nil, fmt.Sprintf("expected a VSphereMachine but got a %T", o)) return nil } + log = log.WithValues("VSphereMachine", klog.KObj(vsphereMachine)) + ctx = ctrl.LoggerInto(ctx, log) + if !util.IsControlPlaneMachine(vsphereMachine) { - r.Logger.V(5).Info("rejecting vsphereCluster reconcile as not CP machine", "machineName", vsphereMachine.Name) + log.V(5).Info("rejecting vsphereCluster reconcile as not CP machine") return nil } // Only currently interested in updating Cluster from vsphereMachines with IP addresses if vsphereMachine.Status.IPAddr == "" { - r.Logger.V(5).Info("rejecting vsphereCluster reconcile as no IP address", "machineName", vsphereMachine.Name) + log.V(5).Info("rejecting vsphereCluster reconcile as no IP address") return nil } cluster, err := util.GetVSphereClusterFromVMwareMachine(ctx, r.Client, vsphereMachine) if err != nil { - r.Logger.Error(err, "failed to get cluster", "machine", vsphereMachine.Name, "namespace", vsphereMachine.Namespace) + log.Error(err, "failed to get cluster") return nil } // Can add further filters on Cluster state so that we don't keep reconciling Cluster - r.Logger.V(3).Info("triggering VSphereCluster reconcile from VSphereMachine", "machineName", vsphereMachine.Name) + log.V(3).Info("triggering VSphereCluster reconcile from VSphereMachine") return []ctrl.Request{{ NamespacedName: types.NamespacedName{ Namespace: cluster.Namespace, @@ -368,13 +387,13 @@ var isFaultDomainsFSSEnabled = func() bool { // Returns the failure domain information discovered on the cluster // hosting this controller. -func (r *ClusterReconciler) getFailureDomains(clusterCtx *vmware.ClusterContext) (clusterv1.FailureDomains, error) { +func (r *ClusterReconciler) getFailureDomains(ctx context.Context) (clusterv1.FailureDomains, error) { if !isFaultDomainsFSSEnabled() { return nil, nil } availabilityZoneList := &topologyv1.AvailabilityZoneList{} - if err := clusterCtx.Client.List(clusterCtx, availabilityZoneList); err != nil { + if err := r.Client.List(ctx, availabilityZoneList); err != nil { return nil, err } diff --git a/controllers/vmware/vspherecluster_reconciler_test.go b/controllers/vmware/vspherecluster_reconciler_test.go index 84fe750578..a7199f18e1 100644 --- a/controllers/vmware/vspherecluster_reconciler_test.go +++ b/controllers/vmware/vspherecluster_reconciler_test.go @@ -27,8 +27,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/network" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/vmoperator" @@ -54,7 +56,9 @@ var _ = Describe("Cluster Controller Tests", func() { cluster *clusterv1.Cluster vsphereCluster *vmwarev1.VSphereCluster vsphereMachine *vmwarev1.VSphereMachine - ctx *vmware.ClusterContext + ctx = ctrl.SetupSignalHandler() + clusterCtx *vmware.ClusterContext + controllerCtx *capvcontext.ControllerContext reconciler *ClusterReconciler ) @@ -62,17 +66,20 @@ var _ = Describe("Cluster Controller Tests", func() { // Create all necessary dependencies cluster = util.CreateCluster(clusterName) vsphereCluster = util.CreateVSphereCluster(clusterName) - ctx = util.CreateClusterContext(cluster, vsphereCluster) + clusterCtx, controllerCtx = util.CreateClusterContext(cluster, vsphereCluster) vsphereMachine = util.CreateVSphereMachine(machineName, clusterName, className, imageName, storageClass, controlPlaneLabelTrue) reconciler = &ClusterReconciler{ - ControllerContext: ctx.ControllerContext, - NetworkProvider: network.DummyNetworkProvider(), - ControlPlaneService: vmoperator.CPService{}, + Client: controllerCtx.Client, + Recorder: controllerCtx.Recorder, + NetworkProvider: network.DummyNetworkProvider(), + ControlPlaneService: &vmoperator.CPService{ + Client: controllerCtx.Client, + }, } - Expect(ctx.Client.Create(ctx, cluster)).To(Succeed()) - Expect(ctx.Client.Create(ctx, vsphereCluster)).To(Succeed()) + Expect(controllerCtx.Client.Create(ctx, cluster)).To(Succeed()) + Expect(controllerCtx.Client.Create(ctx, vsphereCluster)).To(Succeed()) }) // Ensure that the mechanism for reconciling clusters when a control plane machine gets an IP works @@ -93,10 +100,10 @@ var _ = Describe("Cluster Controller Tests", func() { Context("Test reconcileDelete", func() { It("should mark specific resources to be in deleting conditions", func() { - ctx.VSphereCluster.Status.Conditions = append(ctx.VSphereCluster.Status.Conditions, + clusterCtx.VSphereCluster.Status.Conditions = append(clusterCtx.VSphereCluster.Status.Conditions, clusterv1.Condition{Type: vmwarev1.ResourcePolicyReadyCondition, Status: corev1.ConditionTrue}) - reconciler.reconcileDelete(ctx) - c := conditions.Get(ctx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition) + reconciler.reconcileDelete(ctx, clusterCtx) + c := conditions.Get(clusterCtx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition) Expect(c).NotTo(BeNil()) Expect(c.Status).To(Equal(corev1.ConditionFalse)) Expect(c.Reason).To(Equal(clusterv1.DeletingReason)) @@ -104,10 +111,10 @@ var _ = Describe("Cluster Controller Tests", func() { It("should not mark other resources to be in deleting conditions", func() { otherReady := clusterv1.ConditionType("OtherReady") - ctx.VSphereCluster.Status.Conditions = append(ctx.VSphereCluster.Status.Conditions, + clusterCtx.VSphereCluster.Status.Conditions = append(clusterCtx.VSphereCluster.Status.Conditions, clusterv1.Condition{Type: otherReady, Status: corev1.ConditionTrue}) - reconciler.reconcileDelete(ctx) - c := conditions.Get(ctx.VSphereCluster, otherReady) + reconciler.reconcileDelete(ctx, clusterCtx) + c := conditions.Get(clusterCtx.VSphereCluster, otherReady) Expect(c).NotTo(BeNil()) Expect(c.Status).NotTo(Equal(corev1.ConditionFalse)) Expect(c.Reason).NotTo(Equal(clusterv1.DeletingReason)) @@ -144,7 +151,7 @@ var _ = Describe("Cluster Controller Tests", func() { }, } - Expect(ctx.Client.Create(ctx, zone)).To(Succeed()) + Expect(controllerCtx.Client.Create(ctx, zone)).To(Succeed()) } fds, err := reconciler.getFailureDomains(ctx) diff --git a/controllers/vspherecluster_controller.go b/controllers/vspherecluster_controller.go index e4af2ab405..ed4e547192 100644 --- a/controllers/vspherecluster_controller.go +++ b/controllers/vspherecluster_controller.go @@ -57,7 +57,7 @@ import ( // AddClusterControllerToManager adds the cluster controller to the provided // manager. -func AddClusterControllerToManager(ctx context.Context, controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, clusterControlledType client.Object, options controller.Options) error { +func AddClusterControllerToManager(ctx context.Context, controllerManagerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, clusterControlledType client.Object, options controller.Options) error { supervisorBased, err := util.IsSupervisorType(clusterControlledType) if err != nil { return err @@ -66,51 +66,46 @@ func AddClusterControllerToManager(ctx context.Context, controllerCtx *capvconte var ( clusterControlledTypeName = reflect.TypeOf(clusterControlledType).Elem().Name() clusterControlledTypeGVK = infrav1.GroupVersion.WithKind(clusterControlledTypeName) - controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(clusterControlledTypeName)) - controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, controllerNameShort) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerManagerCtx.Namespace, controllerManagerCtx.Name, strings.ToLower(clusterControlledTypeName)) ) if supervisorBased { clusterControlledTypeGVK = vmwarev1.GroupVersion.WithKind(clusterControlledTypeName) - controllerNameShort = fmt.Sprintf("%s-supervisor-controller", strings.ToLower(clusterControlledTypeName)) - controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, controllerNameShort) - } - - // Build the controller context. - controllerContext := &capvcontext.ControllerContext{ - ControllerManagerContext: controllerCtx, - Name: controllerNameShort, - Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: controllerCtx.Logger.WithName(controllerNameShort), + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerManagerCtx.Namespace, controllerManagerCtx.Name, strings.ToLower(clusterControlledTypeName)) } if supervisorBased { - networkProvider, err := inframanager.GetNetworkProvider(controllerCtx) + networkProvider, err := inframanager.GetNetworkProvider(ctx, controllerManagerCtx.Client, controllerManagerCtx.NetworkProvider) if err != nil { return errors.Wrap(err, "failed to create a network provider") } reconciler := &vmware.ClusterReconciler{ - ControllerContext: controllerContext, - ResourcePolicyService: vmoperator.RPService{}, - ControlPlaneService: vmoperator.CPService{}, - NetworkProvider: networkProvider, + Client: controllerManagerCtx.Client, + Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), + ResourcePolicyService: &vmoperator.RPService{ + Client: controllerManagerCtx.Client, + }, + ControlPlaneService: &vmoperator.CPService{ + Client: controllerManagerCtx.Client, + }, + NetworkProvider: networkProvider, } return ctrl.NewControllerManagedBy(mgr). - Named(controllerNameShort). For(clusterControlledType). WithOptions(options). Watches( &vmwarev1.VSphereMachine{}, handler.EnqueueRequestsFromMapFunc(reconciler.VSphereMachineToCluster), ). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), controllerManagerCtx.WatchFilterValue)). Complete(reconciler) } - reconciler := clusterReconciler{ - ControllerContext: controllerContext, - clusterModuleReconciler: NewReconciler(controllerContext), + reconciler := &clusterReconciler{ + ControllerManagerContext: controllerManagerCtx, + Client: controllerManagerCtx.Client, + clusterModuleReconciler: NewReconciler(controllerManagerCtx), } - clusterToInfraFn := clusterToInfrastructureMapFunc(ctx, controllerCtx) + clusterToInfraFn := clusterToInfrastructureMapFunc(ctx, controllerManagerCtx) c, err := ctrl.NewControllerManagedBy(mgr). // Watch the controlled, infrastructure resource. For(clusterControlledType). @@ -119,6 +114,8 @@ func AddClusterControllerToManager(ctx context.Context, controllerCtx *capvconte Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { + log := ctrl.LoggerFrom(ctx) + requests := clusterToInfraFn(ctx, o) if requests == nil { return nil @@ -126,12 +123,12 @@ func AddClusterControllerToManager(ctx context.Context, controllerCtx *capvconte c := &infrav1.VSphereCluster{} if err := reconciler.Client.Get(ctx, requests[0].NamespacedName, c); err != nil { - reconciler.Logger.V(4).Error(err, "Failed to get VSphereCluster") + log.V(4).Error(err, "Failed to get VSphereCluster") return nil } if annotations.IsExternallyManaged(c) { - reconciler.Logger.V(4).Info("VSphereCluster is externally managed, skipping mapping.") + log.V(4).Info("VSphereCluster is externally managed, skipping mapping.") return nil } return requests @@ -157,11 +154,11 @@ func AddClusterControllerToManager(ctx context.Context, controllerCtx *capvconte // should cause a resource to be synchronized, such as a goroutine // waiting on some asynchronous, external task to complete. WatchesRawSource( - &source.Channel{Source: controllerCtx.GetGenericEventChannelFor(clusterControlledTypeGVK)}, + &source.Channel{Source: controllerManagerCtx.GetGenericEventChannelFor(clusterControlledTypeGVK)}, &handler.EnqueueRequestForObject{}, ). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)). - WithEventFilter(predicates.ResourceIsNotExternallyManaged(reconciler.Logger)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), controllerManagerCtx.WatchFilterValue)). + WithEventFilter(predicates.ResourceIsNotExternallyManaged(ctrl.LoggerFrom(ctx))). Build(reconciler) if err != nil { return err diff --git a/controllers/vspherecluster_reconciler.go b/controllers/vspherecluster_reconciler.go index 0596928fda..34329ef614 100644 --- a/controllers/vspherecluster_reconciler.go +++ b/controllers/vspherecluster_reconciler.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" @@ -55,18 +56,21 @@ import ( const legacyIdentityFinalizer string = "identity/infrastructure.cluster.x-k8s.io" type clusterReconciler struct { - *capvcontext.ControllerContext + ControllerManagerContext *capvcontext.ControllerManagerContext + Client client.Client clusterModuleReconciler Reconciler } // Reconcile ensures the back-end state reflects the Kubernetes resource state intent. -func (r clusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { +func (r *clusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + // Get the VSphereCluster resource for this request. vsphereCluster := &infrav1.VSphereCluster{} if err := r.Client.Get(ctx, req.NamespacedName, vsphereCluster); err != nil { if apierrors.IsNotFound(err) { - r.Logger.V(4).Info("VSphereCluster not found, won't reconcile", "key", req.NamespacedName) + log.V(4).Info("VSphereCluster not found, won't reconcile") return reconcile.Result{}, nil } return reconcile.Result{}, err @@ -78,12 +82,14 @@ func (r clusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ c return reconcile.Result{}, err } if cluster == nil { - r.Logger.Info("Waiting for Cluster Controller to set OwnerRef on VSphereCluster") + log.Info("Waiting for Cluster Controller to set OwnerRef on VSphereCluster") return reconcile.Result{}, nil } + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + if annotations.IsPaused(cluster, vsphereCluster) { - r.Logger.V(4).Info("VSphereCluster %s/%s linked to a cluster that is paused", - vsphereCluster.Namespace, vsphereCluster.Name) + log.V(4).Info("VSphereCluster %s/%s linked to a cluster that is paused") return reconcile.Result{}, nil } @@ -100,25 +106,20 @@ func (r clusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ c // Create the cluster context for this request. clusterContext := &capvcontext.ClusterContext{ - ControllerContext: r.ControllerContext, - Cluster: cluster, - VSphereCluster: vsphereCluster, - Logger: r.Logger.WithName(req.Namespace).WithName(req.Name), - PatchHelper: patchHelper, + Cluster: cluster, + VSphereCluster: vsphereCluster, + PatchHelper: patchHelper, } // Always issue a patch when exiting this function so changes to the // resource are patched back to the API server. defer func() { - if err := clusterContext.Patch(); err != nil { - if reterr == nil { - reterr = err - } - clusterContext.Logger.Error(err, "patch failed", "cluster", clusterContext.String()) + if err := clusterContext.Patch(ctx); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() - if err := setOwnerRefsOnVsphereMachines(ctx, clusterContext); err != nil { + if err := r.setOwnerRefsOnVsphereMachines(ctx, clusterContext); err != nil { return reconcile.Result{}, errors.Wrapf(err, "failed to set owner refs on VSphereMachine objects") } @@ -138,10 +139,11 @@ func (r clusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ c return r.reconcileNormal(ctx, clusterContext) } -func (r clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { - clusterCtx.Logger.Info("Reconciling VSphereCluster delete") +func (r *clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling VSphereCluster delete") - vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(ctx, clusterCtx.Client, clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name) + vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(ctx, r.Client, clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name) if err != nil { return reconcile.Result{}, errors.Wrapf(err, "unable to list VSphereMachines part of VSphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) @@ -150,6 +152,10 @@ func (r clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *capv machineDeletionCount := 0 var deletionErrors []error for _, vsphereMachine := range vsphereMachines { + // Note: We have to use := here to not overwrite log & ctx outside the for loop. + log := log.WithValues("VSphereMachine", klog.KObj(vsphereMachine)) + ctx := ctrl.LoggerInto(ctx, log) + // If the VSphereMachine is not owned by the CAPI Machine object because the machine object was deleted // before setting the owner references, then proceed with the deletion of the VSphereMachine object. // This is required until CAPI has a solution for https://github.com/kubernetes-sigs/cluster-api/issues/5483 @@ -158,13 +164,13 @@ func (r clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *capv } machineDeletionCount++ // Remove the finalizer since VM creation wouldn't proceed - r.Logger.Info("Removing finalizer from VSphereMachine", "namespace", vsphereMachine.Namespace, "name", vsphereMachine.Name) + log.Info("Removing finalizer from VSphereMachine") ctrlutil.RemoveFinalizer(vsphereMachine, infrav1.MachineFinalizer) if err := r.Client.Update(ctx, vsphereMachine); err != nil { return reconcile.Result{}, err } if err := r.Client.Delete(ctx, vsphereMachine); err != nil && !apierrors.IsNotFound(err) { - clusterCtx.Logger.Error(err, "Failed to delete for VSphereMachine", "namespace", vsphereMachine.Namespace, "name", vsphereMachine.Name) + log.Error(err, "Failed to delete for VSphereMachine") deletionErrors = append(deletionErrors, err) } } @@ -173,14 +179,14 @@ func (r clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *capv } if len(vsphereMachines)-machineDeletionCount > 0 { - clusterCtx.Logger.Info("Waiting for VSphereMachines to be deleted", "count", len(vsphereMachines)-machineDeletionCount) + log.Info("Waiting for VSphereMachines to be deleted", "count", len(vsphereMachines)-machineDeletionCount) return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } // The cluster module info needs to be reconciled before the secret deletion // since it needs access to the vCenter instance to be able to perform LCM operations // on the cluster modules. - affinityReconcileResult, err := r.reconcileClusterModules(clusterCtx) + affinityReconcileResult, err := r.reconcileClusterModules(ctx, clusterCtx) if err != nil { return affinityReconcileResult, err } @@ -192,15 +198,14 @@ func (r clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *capv Namespace: clusterCtx.VSphereCluster.Namespace, Name: clusterCtx.VSphereCluster.Spec.IdentityRef.Name, } - err := clusterCtx.Client.Get(ctx, secretKey, secret) - if err != nil { + if err := r.Client.Get(ctx, secretKey, secret); err != nil { if apierrors.IsNotFound(err) { ctrlutil.RemoveFinalizer(clusterCtx.VSphereCluster, infrav1.ClusterFinalizer) return reconcile.Result{}, nil } return reconcile.Result{}, err } - r.Logger.Info(fmt.Sprintf("Removing finalizer from Secret %s/%s having finalizers %v", secret.Namespace, secret.Name, secret.Finalizers)) + log.Info(fmt.Sprintf("Removing finalizer from Secret %s/%s having finalizers %v", secret.Namespace, secret.Name, secret.Finalizers)) ctrlutil.RemoveFinalizer(secret, infrav1.SecretIdentitySetFinalizer) // Check if the old finalizer(from v0.7) is present, if yes, delete it @@ -208,10 +213,10 @@ func (r clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *capv if ctrlutil.ContainsFinalizer(secret, legacyIdentityFinalizer) { ctrlutil.RemoveFinalizer(secret, legacyIdentityFinalizer) } - if err := clusterCtx.Client.Update(ctx, secret); err != nil { + if err := r.Client.Update(ctx, secret); err != nil { return reconcile.Result{}, err } - if err := clusterCtx.Client.Delete(ctx, secret); err != nil { + if err := r.Client.Delete(ctx, secret); err != nil { return reconcile.Result{}, err } } @@ -222,15 +227,16 @@ func (r clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *capv return reconcile.Result{}, nil } -func (r clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { - clusterCtx.Logger.Info("Reconciling VSphereCluster") +func (r *clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { + log := ctrl.LoggerFrom(ctx) + log.Info("Reconciling VSphereCluster") ok, err := r.reconcileDeploymentZones(ctx, clusterCtx) if err != nil { return reconcile.Result{}, err } if !ok { - clusterCtx.Logger.Info("waiting for failure domains to be reconciled") + log.Info("Waiting for failure domains to be reconciled") return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } @@ -250,10 +256,10 @@ func (r clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *capv err = r.reconcileVCenterVersion(clusterCtx, vcenterSession) if err != nil || clusterCtx.VSphereCluster.Status.VCenterVersion == "" { conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.MissingVCenterVersionReason, clusterv1.ConditionSeverityWarning, "vCenter API version not set") - clusterCtx.Logger.Error(err, "could not reconcile vCenter version") + log.Error(err, "could not reconcile vCenter version") } - affinityReconcileResult, err := r.reconcileClusterModules(clusterCtx) + affinityReconcileResult, err := r.reconcileClusterModules(ctx, clusterCtx) if err != nil { conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.ClusterModuleSetupFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return affinityReconcileResult, err @@ -264,9 +270,9 @@ func (r clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *capv // Ensure the VSphereCluster is reconciled when the API server first comes online. // A reconcile event will only be triggered if the Cluster is not marked as // ControlPlaneInitialized. - r.reconcileVSphereClusterWhenAPIServerIsOnline(clusterCtx) + r.reconcileVSphereClusterWhenAPIServerIsOnline(ctx, clusterCtx) if clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.IsZero() { - clusterCtx.Logger.Info("control plane endpoint is not reconciled") + log.Info("control plane endpoint is not reconciled") return reconcile.Result{}, nil } @@ -283,7 +289,7 @@ func (r clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *capv return reconcile.Result{}, nil } -func (r clusterReconciler) reconcileIdentitySecret(ctx context.Context, clusterCtx *capvcontext.ClusterContext) error { +func (r *clusterReconciler) reconcileIdentitySecret(ctx context.Context, clusterCtx *capvcontext.ClusterContext) error { vsphereCluster := clusterCtx.VSphereCluster if !identity.IsSecretIdentity(vsphereCluster) { return nil @@ -293,7 +299,7 @@ func (r clusterReconciler) reconcileIdentitySecret(ctx context.Context, clusterC Namespace: vsphereCluster.Namespace, Name: vsphereCluster.Spec.IdentityRef.Name, } - err := clusterCtx.Client.Get(ctx, secretKey, secret) + err := r.Client.Get(ctx, secretKey, secret) if err != nil { return err } @@ -303,7 +309,7 @@ func (r clusterReconciler) reconcileIdentitySecret(ctx context.Context, clusterC return fmt.Errorf("another cluster has set the OwnerRef for secret: %s/%s", secret.Namespace, secret.Name) } - helper, err := patch.NewHelper(secret, clusterCtx.Client) + helper, err := patch.NewHelper(secret, r.Client) if err != nil { return err } @@ -330,17 +336,17 @@ func (r clusterReconciler) reconcileIdentitySecret(ctx context.Context, clusterC return nil } -func (r clusterReconciler) reconcileVCenterConnectivity(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (*session.Session, error) { +func (r *clusterReconciler) reconcileVCenterConnectivity(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (*session.Session, error) { params := session.NewParams(). WithServer(clusterCtx.VSphereCluster.Spec.Server). WithThumbprint(clusterCtx.VSphereCluster.Spec.Thumbprint). WithFeatures(session.Feature{ - EnableKeepAlive: r.EnableKeepAlive, - KeepAliveDuration: r.KeepAliveDuration, + EnableKeepAlive: r.ControllerManagerContext.EnableKeepAlive, + KeepAliveDuration: r.ControllerManagerContext.KeepAliveDuration, }) if clusterCtx.VSphereCluster.Spec.IdentityRef != nil { - creds, err := identity.GetCredentials(ctx, r.Client, clusterCtx.VSphereCluster, r.Namespace) + creds, err := identity.GetCredentials(ctx, r.Client, clusterCtx.VSphereCluster, r.ControllerManagerContext.Namespace) if err != nil { return nil, err } @@ -349,11 +355,11 @@ func (r clusterReconciler) reconcileVCenterConnectivity(ctx context.Context, clu return session.GetOrCreate(ctx, params) } - params = params.WithUserInfo(clusterCtx.Username, clusterCtx.Password) - return session.GetOrCreate(clusterCtx, params) + params = params.WithUserInfo(r.ControllerManagerContext.Username, r.ControllerManagerContext.Password) + return session.GetOrCreate(ctx, params) } -func (r clusterReconciler) reconcileVCenterVersion(clusterCtx *capvcontext.ClusterContext, s *session.Session) error { +func (r *clusterReconciler) reconcileVCenterVersion(clusterCtx *capvcontext.ClusterContext, s *session.Session) error { version, err := s.GetVersion() if err != nil { return err @@ -362,7 +368,7 @@ func (r clusterReconciler) reconcileVCenterVersion(clusterCtx *capvcontext.Clust return nil } -func (r clusterReconciler) reconcileDeploymentZones(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (bool, error) { +func (r *clusterReconciler) reconcileDeploymentZones(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (bool, error) { // If there is no failure domain selector, we should simply skip it if clusterCtx.VSphereCluster.Spec.FailureDomainSelector == nil { return true, nil @@ -431,28 +437,33 @@ var ( apiServerTriggersMu sync.Mutex ) -func (r clusterReconciler) reconcileVSphereClusterWhenAPIServerIsOnline(clusterCtx *capvcontext.ClusterContext) { +func (r *clusterReconciler) reconcileVSphereClusterWhenAPIServerIsOnline(ctx context.Context, clusterCtx *capvcontext.ClusterContext) { + log := ctrl.LoggerFrom(ctx) + if conditions.IsTrue(clusterCtx.Cluster, clusterv1.ControlPlaneInitializedCondition) { - clusterCtx.Logger.Info("skipping reconcile when API server is online", + log.Info("Skipping reconcile when API server is online", "reason", "controlPlaneInitialized") return } apiServerTriggersMu.Lock() defer apiServerTriggersMu.Unlock() if _, ok := apiServerTriggers[clusterCtx.Cluster.UID]; ok { - clusterCtx.Logger.Info("skipping reconcile when API server is online", + log.Info("Skipping reconcile when API server is online", "reason", "alreadyPolling") return } apiServerTriggers[clusterCtx.Cluster.UID] = struct{}{} go func() { - ctx := context.Background() + // Note: we have to use a new context here so the ctx in this go routine is not canceled + // when the reconcile returns. + ctx := ctrl.LoggerInto(context.Background(), log) + // Block until the target API server is online. - clusterCtx.Logger.Info("start polling API server for online check") + log.Info("Start polling API server for online check") wait.PollUntilContextCancel(ctx, time.Second*1, true, func(context.Context) (bool, error) { return r.isAPIServerOnline(ctx, clusterCtx), nil }) //nolint:errcheck - clusterCtx.Logger.Info("stop polling API server for online check") - clusterCtx.Logger.Info("triggering GenericEvent", "reason", "api-server-online") - eventChannel := clusterCtx.GetGenericEventChannelFor(clusterCtx.VSphereCluster.GetObjectKind().GroupVersionKind()) + log.Info("Stop polling API server for online check") + log.Info("Triggering GenericEvent", "reason", "api-server-online") + eventChannel := r.ControllerManagerContext.GetGenericEventChannelFor(clusterCtx.VSphereCluster.GetObjectKind().GroupVersionKind()) eventChannel <- event.GenericEvent{ Object: clusterCtx.VSphereCluster, } @@ -460,17 +471,19 @@ func (r clusterReconciler) reconcileVSphereClusterWhenAPIServerIsOnline(clusterC // Once the control plane has been marked as initialized it is safe to // remove the key from the map that prevents multiple goroutines from // polling the API server to see if it is online. - clusterCtx.Logger.Info("start polling for control plane initialized") + log.Info("Start polling for control plane initialized") wait.PollUntilContextCancel(ctx, time.Second*1, true, func(context.Context) (bool, error) { return r.isControlPlaneInitialized(ctx, clusterCtx), nil }) //nolint:errcheck - clusterCtx.Logger.Info("stop polling for control plane initialized") + log.Info("Stop polling for control plane initialized") apiServerTriggersMu.Lock() delete(apiServerTriggers, clusterCtx.Cluster.UID) apiServerTriggersMu.Unlock() }() } -func (r clusterReconciler) isAPIServerOnline(ctx context.Context, clusterCtx *capvcontext.ClusterContext) bool { - if kubeClient, err := infrautilv1.NewKubeClient(ctx, clusterCtx.Client, clusterCtx.Cluster); err == nil { +func (r *clusterReconciler) isAPIServerOnline(ctx context.Context, clusterCtx *capvcontext.ClusterContext) bool { + log := ctrl.LoggerFrom(ctx) + + if kubeClient, err := infrautilv1.NewKubeClient(ctx, r.Client, clusterCtx.Cluster); err == nil { if _, err := kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}); err == nil { // The target cluster is online. To make sure the correct control // plane endpoint information is logged, it is necessary to fetch @@ -480,13 +493,13 @@ func (r clusterReconciler) isAPIServerOnline(ctx context.Context, clusterCtx *ca // if the API server is online. cluster := &clusterv1.Cluster{} clusterKey := client.ObjectKey{Namespace: clusterCtx.Cluster.Namespace, Name: clusterCtx.Cluster.Name} - if err := clusterCtx.Client.Get(ctx, clusterKey, cluster); err != nil { + if err := r.Client.Get(ctx, clusterKey, cluster); err != nil { cluster = clusterCtx.Cluster.DeepCopy() cluster.Spec.ControlPlaneEndpoint.Host = clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Host cluster.Spec.ControlPlaneEndpoint.Port = clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Port - clusterCtx.Logger.Error(err, "failed to get updated cluster object while checking if API server is online") + log.Error(err, "failed to get updated cluster object while checking if API server is online") } - clusterCtx.Logger.Info( + log.Info( "API server is online", "controlPlaneEndpoint", cluster.Spec.ControlPlaneEndpoint.String()) return true @@ -495,22 +508,24 @@ func (r clusterReconciler) isAPIServerOnline(ctx context.Context, clusterCtx *ca return false } -func (r clusterReconciler) isControlPlaneInitialized(ctx context.Context, clusterCtx *capvcontext.ClusterContext) bool { +func (r *clusterReconciler) isControlPlaneInitialized(ctx context.Context, clusterCtx *capvcontext.ClusterContext) bool { + log := ctrl.LoggerFrom(ctx) + cluster := &clusterv1.Cluster{} clusterKey := client.ObjectKey{Namespace: clusterCtx.Cluster.Namespace, Name: clusterCtx.Cluster.Name} - if err := clusterCtx.Client.Get(ctx, clusterKey, cluster); err != nil { + if err := r.Client.Get(ctx, clusterKey, cluster); err != nil { if !apierrors.IsNotFound(err) { - clusterCtx.Logger.Error(err, "failed to get updated cluster object while checking if control plane is initialized") + log.Error(err, "Failed to get updated cluster object while checking if control plane is initialized") return false } - clusterCtx.Logger.Info("exiting early because cluster no longer exists") + log.Info("Exiting early because cluster no longer exists") return true } return conditions.IsTrue(clusterCtx.Cluster, clusterv1.ControlPlaneInitializedCondition) } -func setOwnerRefsOnVsphereMachines(ctx context.Context, clusterCtx *capvcontext.ClusterContext) error { - vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(ctx, clusterCtx.Client, clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name) +func (r *clusterReconciler) setOwnerRefsOnVsphereMachines(ctx context.Context, clusterCtx *capvcontext.ClusterContext) error { + vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(ctx, r.Client, clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name) if err != nil { return errors.Wrapf(err, "unable to list VSphereMachines part of VSphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) @@ -518,7 +533,7 @@ func setOwnerRefsOnVsphereMachines(ctx context.Context, clusterCtx *capvcontext. var patchErrors []error for _, vsphereMachine := range vsphereMachines { - patchHelper, err := patch.NewHelper(vsphereMachine, clusterCtx.Client) + patchHelper, err := patch.NewHelper(vsphereMachine, r.Client) if err != nil { patchErrors = append(patchErrors, err) continue @@ -540,10 +555,9 @@ func setOwnerRefsOnVsphereMachines(ctx context.Context, clusterCtx *capvcontext. return kerrors.NewAggregate(patchErrors) } -// TODO: zhanggbj: Add golang context after refactoring ClusterModule controller and reconciler. -func (r clusterReconciler) reconcileClusterModules(clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { +func (r *clusterReconciler) reconcileClusterModules(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { if feature.Gates.Enabled(feature.NodeAntiAffinity) { - return r.clusterModuleReconciler.Reconcile(clusterCtx) + return r.clusterModuleReconciler.Reconcile(ctx, clusterCtx) } return reconcile.Result{}, nil } @@ -551,12 +565,17 @@ func (r clusterReconciler) reconcileClusterModules(clusterCtx *capvcontext.Clust // controlPlaneMachineToCluster is a handler.ToRequestsFunc to be used // to enqueue requests for reconciliation for VSphereCluster to update // its status.apiEndpoints field. -func (r clusterReconciler) controlPlaneMachineToCluster(ctx context.Context, o client.Object) []ctrl.Request { +func (r *clusterReconciler) controlPlaneMachineToCluster(ctx context.Context, o client.Object) []ctrl.Request { + log := ctrl.LoggerFrom(ctx) + vsphereMachine, ok := o.(*infrav1.VSphereMachine) if !ok { - r.Logger.Error(nil, fmt.Sprintf("expected a VSphereMachine but got a %T", o)) + log.Error(nil, fmt.Sprintf("expected a VSphereMachine but got a %T", o)) return nil } + log = log.WithValues("VSphereMachine", klog.KObj(vsphereMachine)) + ctx = ctrl.LoggerInto(ctx, log) + if !infrautilv1.IsControlPlaneMachine(vsphereMachine) { return nil } @@ -568,18 +587,18 @@ func (r clusterReconciler) controlPlaneMachineToCluster(ctx context.Context, o c if err == infrautilv1.ErrNoMachineIPAddr { return nil } - r.Logger.Error(err, "failed to get preferred IP address for VSphereMachine", - "namespace", vsphereMachine.Namespace, "name", vsphereMachine.Name) + log.Error(err, "Failed to get preferred IP address for VSphereMachine") return nil } // Fetch the CAPI Cluster. cluster, err := clusterutilv1.GetClusterFromMetadata(ctx, r.Client, vsphereMachine.ObjectMeta) if err != nil { - r.Logger.Error(err, "VSphereMachine is missing cluster label or cluster does not exist", - "namespace", vsphereMachine.Namespace, "name", vsphereMachine.Name) + log.Error(err, "VSphereMachine is missing cluster label or cluster does not exist") return nil } + log = log.WithValues("Cluster", klog.KObj(cluster), "VSphereCluster", klog.KRef(cluster.Namespace, cluster.Spec.InfrastructureRef.Name)) + ctx = ctrl.LoggerInto(ctx, log) if conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { return nil @@ -596,8 +615,7 @@ func (r clusterReconciler) controlPlaneMachineToCluster(ctx context.Context, o c Name: cluster.Spec.InfrastructureRef.Name, } if err := r.Client.Get(ctx, vsphereClusterKey, vsphereCluster); err != nil { - r.Logger.Error(err, "failed to get VSphereCluster", - "namespace", vsphereClusterKey.Namespace, "name", vsphereClusterKey.Name) + log.Error(err, "Failed to get VSphereCluster") return nil } @@ -613,18 +631,20 @@ func (r clusterReconciler) controlPlaneMachineToCluster(ctx context.Context, o c }} } -func (r clusterReconciler) deploymentZoneToCluster(ctx context.Context, o client.Object) []ctrl.Request { +func (r *clusterReconciler) deploymentZoneToCluster(ctx context.Context, o client.Object) []ctrl.Request { + log := ctrl.LoggerFrom(ctx) + var requests []ctrl.Request obj, ok := o.(*infrav1.VSphereDeploymentZone) if !ok { - r.Logger.Error(nil, fmt.Sprintf("expected an infrav1.VSphereDeploymentZone but got a %T", o)) + log.Error(nil, fmt.Sprintf("expected an infrav1.VSphereDeploymentZone but got a %T", o)) return nil } var clusterList infrav1.VSphereClusterList err := r.Client.List(ctx, &clusterList) if err != nil { - r.Logger.Error(err, "unable to list clusters") + log.Error(err, "Unable to list clusters") return requests } diff --git a/controllers/vspherecluster_reconciler_test.go b/controllers/vspherecluster_reconciler_test.go index f1e050320e..003df5d1fe 100644 --- a/controllers/vspherecluster_reconciler_test.go +++ b/controllers/vspherecluster_reconciler_test.go @@ -703,7 +703,10 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { clusterCtx := fake.NewClusterContext(controllerCtx) clusterCtx.VSphereCluster.Spec.Server = server - r := clusterReconciler{ControllerContext: controllerCtx} + r := clusterReconciler{ + ControllerManagerContext: controllerCtx.ControllerManagerContext, + Client: controllerCtx.Client, + } reconciled, err := r.reconcileDeploymentZones(ctx, clusterCtx) g.Expect(err).NotTo(HaveOccurred()) g.Expect(reconciled).To(Equal(tt.reconciled)) @@ -773,7 +776,10 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { clusterCtx.VSphereCluster.Spec.Server = server clusterCtx.VSphereCluster.Spec.FailureDomainSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}} - r := clusterReconciler{ControllerContext: controllerCtx} + r := clusterReconciler{ + ControllerManagerContext: controllerCtx.ControllerManagerContext, + Client: controllerCtx.Client, + } reconciled, err := r.reconcileDeploymentZones(ctx, clusterCtx) g.Expect(err).NotTo(HaveOccurred()) g.Expect(reconciled).To(Equal(tt.reconciled)) @@ -806,7 +812,10 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { clusterCtx.VSphereCluster.Spec.Server = server clusterCtx.VSphereCluster.Spec.FailureDomainSelector = selector - r := clusterReconciler{ControllerContext: controllerCtx} + r := clusterReconciler{ + ControllerManagerContext: controllerCtx.ControllerManagerContext, + Client: controllerCtx.Client, + } _, err := r.reconcileDeploymentZones(ctx, clusterCtx) g.Expect(err).NotTo(HaveOccurred()) g.Expect(clusterCtx.VSphereCluster.Status.FailureDomains).To(HaveLen(selectedZones)) diff --git a/controllers/vsphereclusteridentity_controller.go b/controllers/vsphereclusteridentity_controller.go index 626938f559..2fd7e049f3 100644 --- a/controllers/vsphereclusteridentity_controller.go +++ b/controllers/vsphereclusteridentity_controller.go @@ -26,6 +26,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kerrors "k8s.io/apimachinery/pkg/util/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" @@ -108,10 +109,7 @@ func (r clusterIdentityReconciler) Reconcile(ctx context.Context, req reconcile. conditions.SetSummary(identity, conditions.WithConditions(infrav1.CredentialsAvailableCondidtion)) if err := patchHelper.Patch(ctx, identity); err != nil { - if reterr == nil { - reterr = err - } - r.Logger.Error(err, "patch failed", "namespace", identity.Namespace, "name", identity.Name) + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() diff --git a/controllers/vspheredeploymentzone_controller.go b/controllers/vspheredeploymentzone_controller.go index f3a04822f3..4a47fc4d8d 100644 --- a/controllers/vspheredeploymentzone_controller.go +++ b/controllers/vspheredeploymentzone_controller.go @@ -26,6 +26,8 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" @@ -98,7 +100,9 @@ type vsphereDeploymentZoneReconciler struct { } func (r vsphereDeploymentZoneReconciler) Reconcile(ctx context.Context, request reconcile.Request) (_ reconcile.Result, reterr error) { - logr := r.Logger.WithValues("vspheredeploymentzone", request.Name) + // Note: Just adding VSphereDeploymentZone here explicitly as this reconciler was not refactored + // to use the logger from ctx yet. Once we do that, controller-runtime adds VSphereDeploymentZone for us. + logr := r.Logger.WithValues("VSphereDeploymentZone", klog.KRef(request.Namespace, request.Name)) // Fetch the VSphereDeploymentZone for this request. vsphereDeploymentZone := &infrav1.VSphereDeploymentZone{} if err := r.Client.Get(ctx, request.NamespacedName, vsphereDeploymentZone); err != nil { @@ -138,10 +142,7 @@ func (r vsphereDeploymentZoneReconciler) Reconcile(ctx context.Context, request } defer func() { if err := vsphereDeploymentZoneContext.Patch(); err != nil { - if reterr == nil { - reterr = err - } - logr.Error(err, "patch failed", "vsphereDeploymentZone", vsphereDeploymentZoneContext.String()) + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() @@ -228,7 +229,7 @@ func (r vsphereDeploymentZoneReconciler) getVCenterSession(deploymentZoneCtx *ca if deploymentZoneCtx.VSphereDeploymentZone.Spec.Server != vsphereCluster.Spec.Server || vsphereCluster.Spec.IdentityRef == nil { continue } - logger := deploymentZoneCtx.Logger.WithValues("cluster", vsphereCluster.Name) + logger := deploymentZoneCtx.Logger.WithValues("VSphereCluster", klog.KRef(vsphereCluster.Namespace, vsphereCluster.Name)) params = params.WithThumbprint(vsphereCluster.Spec.Thumbprint) clust := vsphereCluster creds, err := identity.GetCredentials(deploymentZoneCtx, r.Client, &clust, r.Namespace) diff --git a/controllers/vspheredeploymentzone_controller_domain.go b/controllers/vspheredeploymentzone_controller_domain.go index 5759b02825..5fbc6e2f48 100644 --- a/controllers/vspheredeploymentzone_controller_domain.go +++ b/controllers/vspheredeploymentzone_controller_domain.go @@ -20,6 +20,7 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" @@ -33,7 +34,7 @@ import ( ) func (r vsphereDeploymentZoneReconciler) reconcileFailureDomain(deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext) error { - logger := ctrl.LoggerFrom(deploymentZoneCtx).WithValues("failure domain", deploymentZoneCtx.VSphereFailureDomain.Name) + logger := ctrl.LoggerFrom(deploymentZoneCtx).WithValues("VSphereFailureDomain", klog.KObj(deploymentZoneCtx.VSphereFailureDomain)) // verify the failure domain for the region if err := r.reconcileInfraFailureDomain(deploymentZoneCtx, deploymentZoneCtx.VSphereFailureDomain.Spec.Region); err != nil { diff --git a/controllers/vspheremachine_controller.go b/controllers/vspheremachine_controller.go index 9edc24518c..aab3592f60 100644 --- a/controllers/vspheremachine_controller.go +++ b/controllers/vspheremachine_controller.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" apitypes "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/validation" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" @@ -135,7 +136,7 @@ func AddMachineControllerToManager(controllerCtx *capvcontext.ControllerManagerC // Watch any VirtualMachine resources owned by this VSphereMachine builder.Owns(&vmoprv1.VirtualMachine{}) r.VMService = &vmoperator.VmopMachineService{} - networkProvider, err := inframanager.GetNetworkProvider(controllerCtx) + networkProvider, err := inframanager.GetNetworkProvider(context.TODO(), controllerCtx.Client, controllerCtx.NetworkProvider) if err != nil { return errors.Wrap(err, "failed to create a network provider") } @@ -179,7 +180,7 @@ type machineReconciler struct { func (r *machineReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { var machineContext capvcontext.MachineContext logger := r.Logger.WithName(req.Namespace).WithName(req.Name) - logger.V(3).Info("Starting Reconcile VSphereMachine") + logger.V(4).Info("Starting Reconcile") // Fetch VSphereMachine object and populate the machine context machineContext, err := r.VMService.FetchVSphereMachine(r.Client, req.NamespacedName) @@ -229,10 +230,7 @@ func (r *machineReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ct // Patch the VSphereMachine resource. if err := machineContext.Patch(); err != nil { - if reterr == nil { - reterr = err - } - machineContext.GetLogger().Error(err, "patch failed", "machine", machineContext.String()) + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() @@ -412,7 +410,7 @@ func (r *machineReconciler) setVMModifiers(machineCtx capvcontext.MachineContext // No need to check the type. We know this will be a VirtualMachine vm, _ := obj.(*vmoprv1.VirtualMachine) supervisorMachineCtx.Logger.V(3).Info("Applying network config to VM", "vm-name", vm.Name) - err := r.networkProvider.ConfigureVirtualMachine(supervisorMachineCtx.GetClusterContext(), vm) + err := r.networkProvider.ConfigureVirtualMachine(context.TODO(), supervisorMachineCtx.GetClusterContext(), vm) if err != nil { return nil, errors.Errorf("failed to configure machine network: %+v", err) } diff --git a/controllers/vspherevm_controller.go b/controllers/vspherevm_controller.go index be33963225..e9f9269abe 100644 --- a/controllers/vspherevm_controller.go +++ b/controllers/vspherevm_controller.go @@ -28,6 +28,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apitypes "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -155,6 +156,8 @@ type vmReconciler struct { // Reconcile ensures the back-end state reflects the Kubernetes resource state intent. func (r vmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + r.Logger.V(4).Info("Starting Reconcile", "key", req.NamespacedName) + // Get the VSphereVM resource for this request. vsphereVM := &infrav1.VSphereVM{} if err := r.Client.Get(r, req.NamespacedName, vsphereVM); err != nil { @@ -257,10 +260,7 @@ func (r vmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.R // Patch the VSphereVM resource. if err := vmContext.Patch(); err != nil { - if reterr == nil { - reterr = err - } - vmContext.Logger.Error(err, "patch failed", "vm", vmContext.String()) + reterr = kerrors.NewAggregate([]error{reterr, err}) } }() diff --git a/main.go b/main.go index 91fe11cb52..ce13739898 100644 --- a/main.go +++ b/main.go @@ -59,7 +59,7 @@ import ( ) var ( - setupLog = ctrl.Log.WithName("entrypoint") + setupLog = ctrl.Log.WithName("setup") logOptions = logs.NewOptions() controllerName = "cluster-api-vsphere-manager" @@ -216,8 +216,9 @@ func InitFlags(fs *pflag.FlagSet) { func main() { InitFlags(pflag.CommandLine) - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc) + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + // Set log level 2 as default. if err := pflag.CommandLine.Set("v", "2"); err != nil { setupLog.Error(err, "failed to set log level: %v") os.Exit(1) @@ -239,16 +240,13 @@ func main() { if watchNamespace != "" { managerOpts.Cache.Namespaces = []string{watchNamespace} - setupLog.Info( - "Watching objects only in namespace for reconciliation", - "namespace", watchNamespace) } if profilerAddress != "" && enableContentionProfiling { goruntime.SetBlockProfileRate(1) } - setupLog.V(1).Info(fmt.Sprintf("feature gates: %+v\n", feature.Gates)) + setupLog.Info(fmt.Sprintf("Feature gates: %+v\n", feature.Gates)) managerOpts.Cache.SyncPeriod = &syncPeriod managerOpts.LeaseDuration = &leaderElectionLeaseDuration @@ -300,8 +298,6 @@ func main() { } webhookOpts.TLSOpts = tlsOptionOverrides managerOpts.WebhookServer = webhook.NewServer(webhookOpts) - - setupLog.Info("creating controller manager", "version", version.Get().String()) managerOpts.AddToManager = addToManager // Set up the context that's going to be used in controllers and for the manager. @@ -309,15 +305,15 @@ func main() { mgr, err := manager.New(ctx, managerOpts) if err != nil { - setupLog.Error(err, "problem creating controller manager") + setupLog.Error(err, "Error creating manager") os.Exit(1) } setupChecks(mgr) - setupLog.Info("starting controller manager") + setupLog.Info("Starting manager", "version", version.Get().String()) if err := mgr.Start(ctx); err != nil { - setupLog.Error(err, "problem running controller manager") + setupLog.Error(err, "Error starting manager") os.Exit(1) } @@ -383,11 +379,11 @@ func setupSupervisorControllers(ctx context.Context, controllerCtx *capvcontext. return err } - if err := controllers.AddServiceAccountProviderControllerToManager(controllerCtx, mgr, tracker, concurrency(providerServiceAccountConcurrency)); err != nil { + if err := controllers.AddServiceAccountProviderControllerToManager(ctx, controllerCtx, mgr, tracker, concurrency(providerServiceAccountConcurrency)); err != nil { return err } - return controllers.AddServiceDiscoveryControllerToManager(controllerCtx, mgr, tracker, concurrency(serviceDiscoveryConcurrency)) + return controllers.AddServiceDiscoveryControllerToManager(ctx, controllerCtx, mgr, tracker, concurrency(serviceDiscoveryConcurrency)) } func setupChecks(mgr ctrlmgr.Manager) { diff --git a/pkg/clustermodule/fake/service.go b/pkg/clustermodule/fake/service.go index f65cc521bd..3a81b992ff 100644 --- a/pkg/clustermodule/fake/service.go +++ b/pkg/clustermodule/fake/service.go @@ -17,27 +17,29 @@ limitations under the License. package fake import ( + "context" + "github.com/stretchr/testify/mock" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/clustermodule" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" ) type CMService struct { mock.Mock } -func (f *CMService) Create(ctx *context.ClusterContext, wrapper clustermodule.Wrapper) (string, error) { - args := f.Called(ctx, wrapper) +func (f *CMService) Create(ctx context.Context, clusterCtx *capvcontext.ClusterContext, wrapper clustermodule.Wrapper) (string, error) { + args := f.Called(ctx, clusterCtx, wrapper) return args.String(0), args.Error(1) } -func (f *CMService) DoesExist(ctx *context.ClusterContext, wrapper clustermodule.Wrapper, moduleUUID string) (bool, error) { - args := f.Called(ctx, wrapper, moduleUUID) +func (f *CMService) DoesExist(ctx context.Context, clusterCtx *capvcontext.ClusterContext, wrapper clustermodule.Wrapper, moduleUUID string) (bool, error) { + args := f.Called(ctx, clusterCtx, wrapper, moduleUUID) return args.Bool(0), args.Error(1) } -func (f *CMService) Remove(ctx *context.ClusterContext, moduleUUID string) error { - args := f.Called(ctx, moduleUUID) +func (f *CMService) Remove(ctx context.Context, clusterCtx *capvcontext.ClusterContext, moduleUUID string) error { + args := f.Called(ctx, clusterCtx, moduleUUID) return args.Error(0) } diff --git a/pkg/clustermodule/interfaces.go b/pkg/clustermodule/interfaces.go index cc1d75e4e2..bf4d77650e 100644 --- a/pkg/clustermodule/interfaces.go +++ b/pkg/clustermodule/interfaces.go @@ -16,12 +16,16 @@ limitations under the License. package clustermodule -import "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" +import ( + "context" + + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" +) type Service interface { - Create(ctx *context.ClusterContext, wrapper Wrapper) (string, error) + Create(ctx context.Context, clusterCtx *capvcontext.ClusterContext, wrapper Wrapper) (string, error) - DoesExist(ctx *context.ClusterContext, wrapper Wrapper, moduleUUID string) (bool, error) + DoesExist(ctx context.Context, clusterCtx *capvcontext.ClusterContext, wrapper Wrapper, moduleUUID string) (bool, error) - Remove(ctx *context.ClusterContext, moduleUUID string) error + Remove(ctx context.Context, clusterCtx *capvcontext.ClusterContext, moduleUUID string) error } diff --git a/pkg/clustermodule/service.go b/pkg/clustermodule/service.go index 42db4fd127..f5091b7b35 100644 --- a/pkg/clustermodule/service.go +++ b/pkg/clustermodule/service.go @@ -22,6 +22,9 @@ import ( "github.com/pkg/errors" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/vim25/types" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/clustermodules" @@ -30,102 +33,110 @@ import ( const validMachineTemplate = "VSphereMachineTemplate" -type service struct{} +type service struct { + ControllerManagerContext *capvcontext.ControllerManagerContext + Client client.Client +} -func NewService() Service { - return service{} +func NewService(controllerManagerCtx *capvcontext.ControllerManagerContext, client client.Client) Service { + return &service{ + ControllerManagerContext: controllerManagerCtx, + Client: client, + } } -func (s service) Create(clusterCtx *capvcontext.ClusterContext, wrapper Wrapper) (string, error) { - logger := clusterCtx.Logger.WithValues("object", wrapper.GetName(), "namespace", wrapper.GetNamespace()) +func (s *service) Create(ctx context.Context, clusterCtx *capvcontext.ClusterContext, wrapper Wrapper) (string, error) { + log := ctrl.LoggerFrom(ctx).WithValues(wrapper.GetObjectKind().GroupVersionKind().Kind, klog.KObj(wrapper)) + ctx = ctrl.LoggerInto(ctx, log) - templateRef, err := fetchTemplateRef(clusterCtx, clusterCtx.Client, wrapper) + templateRef, err := s.fetchTemplateRef(ctx, wrapper) if err != nil { - logger.V(4).Error(err, "error fetching template for object") + log.V(4).Error(err, "error fetching template for object") return "", errors.Wrapf(err, "error fetching machine template for object %s/%s", wrapper.GetNamespace(), wrapper.GetName()) } if templateRef.Kind != validMachineTemplate { // since this is a heterogeneous cluster, we should skip cluster module creation for non VSphereMachine objects - logger.V(4).Info("skipping module creation for object") + log.V(4).Info("skipping module creation for object") return "", nil } - template, err := fetchMachineTemplate(clusterCtx, wrapper, templateRef.Name) + template, err := s.fetchMachineTemplate(ctx, wrapper, templateRef.Name) if err != nil { - logger.V(4).Error(err, "error fetching template") + log.V(4).Error(err, "error fetching template") return "", err } if server := template.Spec.Template.Spec.Server; server != clusterCtx.VSphereCluster.Spec.Server { - logger.V(4).Info("skipping module creation for object since template uses a different server", "server", server) + log.V(4).Info("skipping module creation for object since template uses a different server", "server", server) return "", nil } - vCenterSession, err := fetchSessionForObject(clusterCtx, template) + vCenterSession, err := s.fetchSessionForObject(ctx, clusterCtx, template) if err != nil { - logger.V(4).Error(err, "error fetching session") + log.V(4).Error(err, "error fetching session") return "", err } // Fetch the compute cluster resource by tracing the owner of the resource pool in use. // TODO (srm09): How do we support Multi AZ scenarios here - computeClusterRef, err := getComputeClusterResource(clusterCtx, vCenterSession, template.Spec.Template.Spec.ResourcePool) + computeClusterRef, err := getComputeClusterResource(ctx, vCenterSession, template.Spec.Template.Spec.ResourcePool) if err != nil { - logger.V(4).Error(err, "error fetching compute cluster resource") + log.V(4).Error(err, "error fetching compute cluster resource") return "", err } provider := clustermodules.NewProvider(vCenterSession.TagManager.Client) - moduleUUID, err := provider.CreateModule(clusterCtx, computeClusterRef) + moduleUUID, err := provider.CreateModule(ctx, computeClusterRef) if err != nil { - logger.V(4).Error(err, "error creating cluster module") + log.V(4).Error(err, "error creating cluster module") return "", err } - logger.V(4).Info("created cluster module for object", "moduleUUID", moduleUUID) + log.V(4).Info("created cluster module for object", "moduleUUID", moduleUUID) return moduleUUID, nil } -func (s service) DoesExist(clusterCtx *capvcontext.ClusterContext, wrapper Wrapper, moduleUUID string) (bool, error) { - logger := clusterCtx.Logger.WithValues("object", wrapper.GetName()) +func (s *service) DoesExist(ctx context.Context, clusterCtx *capvcontext.ClusterContext, wrapper Wrapper, moduleUUID string) (bool, error) { + log := ctrl.LoggerFrom(ctx).WithValues(wrapper.GetObjectKind().GroupVersionKind().Kind, klog.KObj(wrapper)) + ctx = ctrl.LoggerInto(ctx, log) - templateRef, err := fetchTemplateRef(clusterCtx, clusterCtx.Client, wrapper) + templateRef, err := s.fetchTemplateRef(ctx, wrapper) if err != nil { - logger.V(4).Error(err, "error fetching template for object") + log.V(4).Error(err, "error fetching template for object") return false, errors.Wrapf(err, "error fetching infrastructure machine template for object %s/%s", wrapper.GetNamespace(), wrapper.GetName()) } - template, err := fetchMachineTemplate(clusterCtx, wrapper, templateRef.Name) + template, err := s.fetchMachineTemplate(ctx, wrapper, templateRef.Name) if err != nil { - logger.V(4).Error(err, "error fetching template") + log.V(4).Error(err, "error fetching template") return false, err } - vCenterSession, err := fetchSessionForObject(clusterCtx, template) + vCenterSession, err := s.fetchSessionForObject(ctx, clusterCtx, template) if err != nil { - logger.V(4).Error(err, "error fetching session") + log.V(4).Error(err, "error fetching session") return false, err } // Fetch the compute cluster resource by tracing the owner of the resource pool in use. // TODO (srm09): How do we support Multi AZ scenarios here - computeClusterRef, err := getComputeClusterResource(clusterCtx, vCenterSession, template.Spec.Template.Spec.ResourcePool) + computeClusterRef, err := getComputeClusterResource(ctx, vCenterSession, template.Spec.Template.Spec.ResourcePool) if err != nil { - logger.V(4).Error(err, "error fetching compute cluster resource") + log.V(4).Error(err, "error fetching compute cluster resource") return false, err } provider := clustermodules.NewProvider(vCenterSession.TagManager.Client) - return provider.DoesModuleExist(clusterCtx, moduleUUID, computeClusterRef) + return provider.DoesModuleExist(ctx, moduleUUID, computeClusterRef) } -func (s service) Remove(clusterCtx *capvcontext.ClusterContext, moduleUUID string) error { - params := newParams(*clusterCtx) - vcenterSession, err := fetchSession(clusterCtx, params) +func (s *service) Remove(ctx context.Context, clusterCtx *capvcontext.ClusterContext, moduleUUID string) error { + params := s.newParams(*clusterCtx) + vcenterSession, err := s.fetchSession(ctx, clusterCtx, params) if err != nil { return err } provider := clustermodules.NewProvider(vcenterSession.TagManager.Client) - return provider.DeleteModule(clusterCtx, moduleUUID) + return provider.DeleteModule(ctx, moduleUUID) } func getComputeClusterResource(ctx context.Context, s *session.Session, resourcePool string) (types.ManagedObjectReference, error) { diff --git a/pkg/clustermodule/service_test.go b/pkg/clustermodule/service_test.go index 36374dbfbd..8fdaaa1792 100644 --- a/pkg/clustermodule/service_test.go +++ b/pkg/clustermodule/service_test.go @@ -17,6 +17,7 @@ limitations under the License. package clustermodule import ( + "context" "fmt" "testing" @@ -30,8 +31,6 @@ import ( ) func TestService_Create(t *testing.T) { - svc := NewService() - t.Run("creation is skipped", func(t *testing.T) { t.Run("when wrapper points to template != VSphereMachineTemplate", func(t *testing.T) { md := machineDeployment("md", fake.Namespace, fake.Clusterv1a2Name) @@ -43,9 +42,10 @@ func TestService_Create(t *testing.T) { g := gomega.NewWithT(t) controllerCtx := fake.NewControllerContext(fake.NewControllerManagerContext(md)) - ctx := fake.NewClusterContext(controllerCtx) + clusterCtx := fake.NewClusterContext(controllerCtx) + svc := NewService(controllerCtx.ControllerManagerContext, controllerCtx.Client) - moduleUUID, err := svc.Create(ctx, mdWrapper{md}) + moduleUUID, err := svc.Create(context.Background(), clusterCtx, mdWrapper{md}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(moduleUUID).To(gomega.BeEmpty()) }) @@ -73,9 +73,10 @@ func TestService_Create(t *testing.T) { g := gomega.NewWithT(t) controllerCtx := fake.NewControllerContext(fake.NewControllerManagerContext(md, machineTemplate)) - ctx := fake.NewClusterContext(controllerCtx) + clusterCtx := fake.NewClusterContext(controllerCtx) + svc := NewService(controllerCtx.ControllerManagerContext, controllerCtx.Client) - moduleUUID, err := svc.Create(ctx, mdWrapper{md}) + moduleUUID, err := svc.Create(context.Background(), clusterCtx, mdWrapper{md}) g.Expect(err).ToNot(gomega.HaveOccurred()) g.Expect(moduleUUID).To(gomega.BeEmpty()) }) diff --git a/pkg/clustermodule/session.go b/pkg/clustermodule/session.go index f7e2798f78..ea34fa19c7 100644 --- a/pkg/clustermodule/session.go +++ b/pkg/clustermodule/session.go @@ -31,46 +31,46 @@ import ( "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" ) -func fetchSessionForObject(clusterCtx *capvcontext.ClusterContext, template *infrav1.VSphereMachineTemplate) (*session.Session, error) { - params := newParams(*clusterCtx) +func (s *service) fetchSessionForObject(ctx context.Context, clusterCtx *capvcontext.ClusterContext, template *infrav1.VSphereMachineTemplate) (*session.Session, error) { + params := s.newParams(*clusterCtx) // Datacenter is necessary since we use the finder. params = params.WithDatacenter(template.Spec.Template.Spec.Datacenter) - return fetchSession(clusterCtx, params) + return s.fetchSession(ctx, clusterCtx, params) } -func newParams(clusterCtx capvcontext.ClusterContext) *session.Params { +func (s *service) newParams(clusterCtx capvcontext.ClusterContext) *session.Params { return session.NewParams(). WithServer(clusterCtx.VSphereCluster.Spec.Server). WithThumbprint(clusterCtx.VSphereCluster.Spec.Thumbprint). WithFeatures(session.Feature{ - EnableKeepAlive: clusterCtx.EnableKeepAlive, - KeepAliveDuration: clusterCtx.KeepAliveDuration, + EnableKeepAlive: s.ControllerManagerContext.EnableKeepAlive, + KeepAliveDuration: s.ControllerManagerContext.KeepAliveDuration, }) } -func fetchSession(clusterCtx *capvcontext.ClusterContext, params *session.Params) (*session.Session, error) { +func (s *service) fetchSession(ctx context.Context, clusterCtx *capvcontext.ClusterContext, params *session.Params) (*session.Session, error) { if clusterCtx.VSphereCluster.Spec.IdentityRef != nil { - creds, err := identity.GetCredentials(clusterCtx, clusterCtx.Client, clusterCtx.VSphereCluster, clusterCtx.Namespace) + creds, err := identity.GetCredentials(ctx, s.Client, clusterCtx.VSphereCluster, s.ControllerManagerContext.Namespace) if err != nil { return nil, err } params = params.WithUserInfo(creds.Username, creds.Password) - return session.GetOrCreate(clusterCtx, params) + return session.GetOrCreate(ctx, params) } - params = params.WithUserInfo(clusterCtx.Username, clusterCtx.Password) - return session.GetOrCreate(clusterCtx, params) + params = params.WithUserInfo(s.ControllerManagerContext.Username, s.ControllerManagerContext.Password) + return session.GetOrCreate(ctx, params) } -func fetchTemplateRef(ctx context.Context, c client.Client, input Wrapper) (*corev1.ObjectReference, error) { +func (s *service) fetchTemplateRef(ctx context.Context, input Wrapper) (*corev1.ObjectReference, error) { obj := new(unstructured.Unstructured) obj.SetAPIVersion(input.GetObjectKind().GroupVersionKind().GroupVersion().String()) obj.SetKind(input.GetObjectKind().GroupVersionKind().Kind) obj.SetName(input.GetName()) key := client.ObjectKey{Name: obj.GetName(), Namespace: input.GetNamespace()} - if err := c.Get(ctx, key, obj); err != nil { + if err := s.Client.Get(ctx, key, obj); err != nil { return nil, errors.Wrapf(err, "failed to retrieve %s external object %q/%q", obj.GetKind(), key.Namespace, key.Name) } @@ -81,9 +81,9 @@ func fetchTemplateRef(ctx context.Context, c client.Client, input Wrapper) (*cor return &objRef, nil } -func fetchMachineTemplate(clusterCtx *capvcontext.ClusterContext, input Wrapper, templateName string) (*infrav1.VSphereMachineTemplate, error) { +func (s *service) fetchMachineTemplate(ctx context.Context, input Wrapper, templateName string) (*infrav1.VSphereMachineTemplate, error) { template := &infrav1.VSphereMachineTemplate{} - if err := clusterCtx.Client.Get(clusterCtx, client.ObjectKey{ + if err := s.Client.Get(ctx, client.ObjectKey{ Name: templateName, Namespace: input.GetNamespace(), }, template); err != nil { diff --git a/pkg/clustermodule/util_test.go b/pkg/clustermodule/util_test.go index cab13084c4..531c5cae93 100644 --- a/pkg/clustermodule/util_test.go +++ b/pkg/clustermodule/util_test.go @@ -19,7 +19,6 @@ package clustermodule import ( "testing" - "github.com/go-logr/logr" "github.com/google/uuid" "github.com/onsi/gomega" @@ -68,7 +67,6 @@ func Test_IsCompatible(t *testing.T) { g := gomega.NewWithT(t) isCompatible := IsClusterCompatible(&context.ClusterContext{ VSphereCluster: cluster, - Logger: logr.Discard(), }) g.Expect(isCompatible).To(gomega.Equal(tt.isCompatible)) }) diff --git a/pkg/context/cluster_context.go b/pkg/context/cluster_context.go index 46e16c0a2f..2b3faf6bcb 100644 --- a/pkg/context/cluster_context.go +++ b/pkg/context/cluster_context.go @@ -17,9 +17,9 @@ limitations under the License. package context import ( + "context" "fmt" - "github.com/go-logr/logr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" @@ -29,11 +29,9 @@ import ( // ClusterContext is a Go context used with a VSphereCluster. type ClusterContext struct { - *ControllerContext Cluster *clusterv1.Cluster VSphereCluster *infrav1.VSphereCluster PatchHelper *patch.Helper - Logger logr.Logger } // String returns VSphereClusterGroupVersionKind VSphereClusterNamespace/VSphereClusterName. @@ -42,7 +40,7 @@ func (c *ClusterContext) String() string { } // Patch updates the object and its status on the API server. -func (c *ClusterContext) Patch() error { +func (c *ClusterContext) Patch(ctx context.Context) error { // always update the readyCondition. conditions.SetSummary(c.VSphereCluster, conditions.WithConditions( @@ -50,5 +48,5 @@ func (c *ClusterContext) Patch() error { ), ) - return c.PatchHelper.Patch(c, c.VSphereCluster) + return c.PatchHelper.Patch(ctx, c.VSphereCluster) } diff --git a/pkg/context/fake/fake_cluster_context.go b/pkg/context/fake/fake_cluster_context.go index 3842abac66..9fed683791 100644 --- a/pkg/context/fake/fake_cluster_context.go +++ b/pkg/context/fake/fake_cluster_context.go @@ -40,10 +40,8 @@ func NewClusterContext(ctx *context.ControllerContext) *context.ClusterContext { } return &context.ClusterContext{ - ControllerContext: ctx, - Cluster: &cluster, - VSphereCluster: &vsphereCluster, - Logger: ctx.Logger.WithName(vsphereCluster.Name), + Cluster: &cluster, + VSphereCluster: &vsphereCluster, } } diff --git a/pkg/context/fake/fake_guest_cluster_context.go b/pkg/context/fake/fake_guest_cluster_context.go index 9072ac8fd4..ad1bfcef8c 100644 --- a/pkg/context/fake/fake_guest_cluster_context.go +++ b/pkg/context/fake/fake_guest_cluster_context.go @@ -17,26 +17,29 @@ limitations under the License. package fake import ( + "context" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" ) // NewGuestClusterContext returns a fake GuestClusterContext for unit testing // guest cluster controllers with a fake client. -func NewGuestClusterContext(ctx *vmware.ClusterContext, prototypeCluster bool, gcInitObjects ...client.Object) *vmware.GuestClusterContext { +func NewGuestClusterContext(ctx context.Context, clusterCtx *vmware.ClusterContext, controllerCtx *capvcontext.ControllerContext, prototypeCluster bool, gcInitObjects ...client.Object) *vmware.GuestClusterContext { if prototypeCluster { - cluster := newCluster(ctx.VSphereCluster) - if err := ctx.Client.Create(ctx, cluster); err != nil { + cluster := newCluster(clusterCtx.VSphereCluster) + if err := controllerCtx.Client.Create(ctx, cluster); err != nil { panic(err) } } return &vmware.GuestClusterContext{ - ClusterContext: ctx, + ClusterContext: clusterCtx, GuestClient: NewFakeGuestClusterClient(gcInitObjects...), } } diff --git a/pkg/context/fake/fake_machine_context.go b/pkg/context/fake/fake_machine_context.go index 79818f9638..74bdaa8c4b 100644 --- a/pkg/context/fake/fake_machine_context.go +++ b/pkg/context/fake/fake_machine_context.go @@ -17,36 +17,38 @@ limitations under the License. package fake import ( + "context" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" ) // NewMachineContext returns a fake VIMMachineContext for unit testing // reconcilers with a fake client. -func NewMachineContext(ctx *context.ClusterContext) *context.VIMMachineContext { +func NewMachineContext(clusterCtx *capvcontext.ClusterContext, controllerCtx *capvcontext.ControllerContext) *capvcontext.VIMMachineContext { // Create the machine resources. machine := newMachineV1a4() vsphereMachine := newVSphereMachine(machine) // Add the cluster resources to the fake cluster client. - if err := ctx.Client.Create(ctx, &machine); err != nil { + if err := controllerCtx.Client.Create(context.TODO(), &machine); err != nil { panic(err) } - if err := ctx.Client.Create(ctx, &vsphereMachine); err != nil { + if err := controllerCtx.Client.Create(context.TODO(), &vsphereMachine); err != nil { panic(err) } - return &context.VIMMachineContext{ - BaseMachineContext: &context.BaseMachineContext{ - ControllerContext: ctx.ControllerContext, - Cluster: ctx.Cluster, + return &capvcontext.VIMMachineContext{ + BaseMachineContext: &capvcontext.BaseMachineContext{ + ControllerContext: controllerCtx, + Cluster: clusterCtx.Cluster, Machine: &machine, - Logger: ctx.Logger.WithName(vsphereMachine.Name), + Logger: controllerCtx.Logger.WithName(vsphereMachine.Name), }, - VSphereCluster: ctx.VSphereCluster, + VSphereCluster: clusterCtx.VSphereCluster, VSphereMachine: &vsphereMachine, } } diff --git a/pkg/context/fake/fake_vmware_cluster_context.go b/pkg/context/fake/fake_vmware_cluster_context.go index 570b15ed10..e572da3c9c 100644 --- a/pkg/context/fake/fake_vmware_cluster_context.go +++ b/pkg/context/fake/fake_vmware_cluster_context.go @@ -17,14 +17,16 @@ limitations under the License. package fake import ( + "context" + vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" ) // NewVmwareClusterContext returns a fake ClusterContext for unit testing // reconcilers with a fake client. -func NewVmwareClusterContext(ctx *context.ControllerContext, namespace string, vsphereCluster *vmwarev1.VSphereCluster) *vmware.ClusterContext { +func NewVmwareClusterContext(controllerCtx *capvcontext.ControllerContext, namespace string, vsphereCluster *vmwarev1.VSphereCluster) *vmware.ClusterContext { // Create the cluster resources. cluster := newClusterV1() if vsphereCluster == nil { @@ -33,16 +35,14 @@ func NewVmwareClusterContext(ctx *context.ControllerContext, namespace string, v } // Add the cluster resources to the fake cluster client. - if err := ctx.Client.Create(ctx, &cluster); err != nil { + if err := controllerCtx.Client.Create(context.TODO(), &cluster); err != nil { panic(err) } - if err := ctx.Client.Create(ctx, vsphereCluster); err != nil { + if err := controllerCtx.Client.Create(context.TODO(), vsphereCluster); err != nil { panic(err) } return &vmware.ClusterContext{ - ControllerContext: ctx, - VSphereCluster: vsphereCluster, - Logger: ctx.Logger.WithName(vsphereCluster.Name), + VSphereCluster: vsphereCluster, } } diff --git a/pkg/context/vmware/cluster_context.go b/pkg/context/vmware/cluster_context.go index 42bccb6fe6..1d23e5293d 100644 --- a/pkg/context/vmware/cluster_context.go +++ b/pkg/context/vmware/cluster_context.go @@ -17,33 +17,30 @@ limitations under the License. package vmware import ( + "context" "fmt" - "github.com/go-logr/logr" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" ) // ClusterContext is a Go context used with a CAPI cluster. type ClusterContext struct { - *context.ControllerContext Cluster *clusterv1.Cluster VSphereCluster *vmwarev1.VSphereCluster PatchHelper *patch.Helper - Logger logr.Logger } -// String returns ControllerManagerName/ControllerName/ClusterAPIVersion/ClusterNamespace/ClusterName. +// String returns ClusterAPIVersion/ClusterNamespace/ClusterName. func (c *ClusterContext) String() string { - return fmt.Sprintf("%s/%s/%s/%s", c.ControllerContext.String(), c.VSphereCluster.APIVersion, c.VSphereCluster.Namespace, c.VSphereCluster.Name) + return fmt.Sprintf("%s %s/%s", c.VSphereCluster.GroupVersionKind(), c.VSphereCluster.Namespace, c.VSphereCluster.Name) } // Patch updates the object and its status on the API server. -func (c *ClusterContext) Patch() error { +func (c *ClusterContext) Patch(ctx context.Context) error { // always update the readyCondition. conditions.SetSummary(c.VSphereCluster, conditions.WithConditions( @@ -52,5 +49,5 @@ func (c *ClusterContext) Patch() error { vmwarev1.LoadBalancerReadyCondition, ), ) - return c.PatchHelper.Patch(c, c.VSphereCluster) + return c.PatchHelper.Patch(ctx, c.VSphereCluster) } diff --git a/pkg/context/vmware/machine_context.go b/pkg/context/vmware/machine_context.go index a54332ad96..648e406c37 100644 --- a/pkg/context/vmware/machine_context.go +++ b/pkg/context/vmware/machine_context.go @@ -58,10 +58,8 @@ func (c *SupervisorMachineContext) GetObjectMeta() metav1.ObjectMeta { func (c *SupervisorMachineContext) GetClusterContext() *ClusterContext { return &ClusterContext{ - ControllerContext: c.ControllerContext, - Cluster: c.Cluster, - VSphereCluster: c.VSphereCluster, - Logger: c.GetLogger(), + Cluster: c.Cluster, + VSphereCluster: c.VSphereCluster, } } diff --git a/pkg/manager/network.go b/pkg/manager/network.go index 829e2627af..9e1dd4edaf 100644 --- a/pkg/manager/network.go +++ b/pkg/manager/network.go @@ -17,7 +17,11 @@ limitations under the License. package manager import ( - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + "context" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/network" ) @@ -30,20 +34,22 @@ const ( // GetNetworkProvider will return a network provider instance based on the environment // the cfg is used to initialize a client that talks directly to api-server without using the cache. -func GetNetworkProvider(controllerCtx *context.ControllerManagerContext) (services.NetworkProvider, error) { - switch controllerCtx.NetworkProvider { +func GetNetworkProvider(ctx context.Context, client client.Client, networkProvider string) (services.NetworkProvider, error) { + log := ctrl.LoggerFrom(ctx) + + switch networkProvider { case NSXNetworkProvider: // TODO: disableFirewall not configurable - controllerCtx.Logger.Info("Pick NSX-T network provider") - return network.NsxtNetworkProvider(controllerCtx.Client, "false"), nil + log.Info("Pick NSX-T network provider") + return network.NsxtNetworkProvider(client, "false"), nil case VDSNetworkProvider: - controllerCtx.Logger.Info("Pick NetOp (VDS) network provider") - return network.NetOpNetworkProvider(controllerCtx.Client), nil + log.Info("Pick NetOp (VDS) network provider") + return network.NetOpNetworkProvider(client), nil case DummyLBNetworkProvider: - controllerCtx.Logger.Info("Pick Dummy network provider") + log.Info("Pick Dummy network provider") return network.DummyLBNetworkProvider(), nil default: - controllerCtx.Logger.Info("NetworkProvider not set. Pick Dummy network provider") + log.Info("NetworkProvider not set. Pick Dummy network provider") return network.DummyNetworkProvider(), nil } } diff --git a/pkg/services/interfaces.go b/pkg/services/interfaces.go index 5de7a98e3e..ae7fa466c1 100644 --- a/pkg/services/interfaces.go +++ b/pkg/services/interfaces.go @@ -17,6 +17,8 @@ limitations under the License. package services import ( + "context" + vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -25,42 +27,42 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" ) // VSphereMachineService is used for vsphere VM lifecycle and syncing with VSphereMachine types. type VSphereMachineService interface { - FetchVSphereMachine(client client.Client, name types.NamespacedName) (context.MachineContext, error) - FetchVSphereCluster(client client.Client, cluster *clusterv1.Cluster, machineContext context.MachineContext) (context.MachineContext, error) - ReconcileDelete(ctx context.MachineContext) error - SyncFailureReason(ctx context.MachineContext) (bool, error) - ReconcileNormal(ctx context.MachineContext) (bool, error) - GetHostInfo(ctx context.MachineContext) (string, error) + FetchVSphereMachine(client client.Client, name types.NamespacedName) (capvcontext.MachineContext, error) + FetchVSphereCluster(client client.Client, cluster *clusterv1.Cluster, machineContext capvcontext.MachineContext) (capvcontext.MachineContext, error) + ReconcileDelete(ctx capvcontext.MachineContext) error + SyncFailureReason(ctx capvcontext.MachineContext) (bool, error) + ReconcileNormal(ctx capvcontext.MachineContext) (bool, error) + GetHostInfo(ctx capvcontext.MachineContext) (string, error) } // VirtualMachineService is a service for creating/updating/deleting virtual // machines on vSphere. type VirtualMachineService interface { // ReconcileVM reconciles a VM with the intended state. - ReconcileVM(ctx *context.VMContext) (infrav1.VirtualMachine, error) + ReconcileVM(ctx *capvcontext.VMContext) (infrav1.VirtualMachine, error) // DestroyVM powers off and removes a VM from the inventory. - DestroyVM(ctx *context.VMContext) (reconcile.Result, infrav1.VirtualMachine, error) + DestroyVM(ctx *capvcontext.VMContext) (reconcile.Result, infrav1.VirtualMachine, error) } // ControlPlaneEndpointService is a service for reconciling load balanced control plane endpoints. type ControlPlaneEndpointService interface { // ReconcileControlPlaneEndpointService manages the lifecycle of a // control plane endpoint managed by a vmoperator VirtualMachineService - ReconcileControlPlaneEndpointService(ctx *vmware.ClusterContext, netProvider NetworkProvider) (*clusterv1.APIEndpoint, error) + ReconcileControlPlaneEndpointService(ctx context.Context, clusterCtx *vmware.ClusterContext, netProvider NetworkProvider) (*clusterv1.APIEndpoint, error) } // ResourcePolicyService is a service for reconciling a VirtualMachineSetResourcePolicy for a cluster. type ResourcePolicyService interface { // ReconcileResourcePolicy ensures that a VirtualMachineSetResourcePolicy exists for the cluster // Returns the name of a policy if it exists, otherwise returns an error - ReconcileResourcePolicy(ctx *vmware.ClusterContext) (string, error) + ReconcileResourcePolicy(ctx context.Context, clusterCtx *vmware.ClusterContext) (string, error) } // NetworkProvider provision network resources and configures VM based on network type. @@ -70,18 +72,18 @@ type NetworkProvider interface { // ProvisionClusterNetwork creates network resource for a given cluster // This operation should be idempotent - ProvisionClusterNetwork(ctx *vmware.ClusterContext) error + ProvisionClusterNetwork(ctx context.Context, clusterCtx *vmware.ClusterContext) error // GetClusterNetworkName returns the name of a valid cluster network if one exists // Returns an empty string if the operation is not supported - GetClusterNetworkName(ctx *vmware.ClusterContext) (string, error) + GetClusterNetworkName(ctx context.Context, clusterCtx *vmware.ClusterContext) (string, error) // GetVMServiceAnnotations returns the annotations, if any, to place on a VM Service. - GetVMServiceAnnotations(ctx *vmware.ClusterContext) (map[string]string, error) + GetVMServiceAnnotations(ctx context.Context, clusterCtx *vmware.ClusterContext) (map[string]string, error) // ConfigureVirtualMachine configures a VM for the particular network - ConfigureVirtualMachine(ctx *vmware.ClusterContext, vm *vmoprv1.VirtualMachine) error + ConfigureVirtualMachine(ctx context.Context, clusterCtx *vmware.ClusterContext, vm *vmoprv1.VirtualMachine) error // VerifyNetworkStatus verifies the status of the network after vnet creation - VerifyNetworkStatus(ctx *vmware.ClusterContext, obj runtime.Object) error + VerifyNetworkStatus(ctx context.Context, clusterCtx *vmware.ClusterContext, obj runtime.Object) error } diff --git a/pkg/services/network/dummy_provider.go b/pkg/services/network/dummy_provider.go index 1144bb04c6..78e5ba85a4 100644 --- a/pkg/services/network/dummy_provider.go +++ b/pkg/services/network/dummy_provider.go @@ -18,6 +18,8 @@ limitations under the License. package network import ( + "context" + vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" "k8s.io/apimachinery/pkg/runtime" @@ -37,23 +39,23 @@ func (np *dummyNetworkProvider) HasLoadBalancer() bool { return false } -func (np *dummyNetworkProvider) ProvisionClusterNetwork(ctx *vmware.ClusterContext) error { +func (np *dummyNetworkProvider) ProvisionClusterNetwork(ctx context.Context, clusterCtx *vmware.ClusterContext) error { return nil } -func (np *dummyNetworkProvider) GetClusterNetworkName(ctx *vmware.ClusterContext) (string, error) { +func (np *dummyNetworkProvider) GetClusterNetworkName(ctx context.Context, clusterCtx *vmware.ClusterContext) (string, error) { return "", nil } -func (np *dummyNetworkProvider) ConfigureVirtualMachine(ctx *vmware.ClusterContext, vm *vmoprv1.VirtualMachine) error { +func (np *dummyNetworkProvider) ConfigureVirtualMachine(ctx context.Context, clusterCtx *vmware.ClusterContext, vm *vmoprv1.VirtualMachine) error { return nil } -func (np *dummyNetworkProvider) GetVMServiceAnnotations(ctx *vmware.ClusterContext) (map[string]string, error) { +func (np *dummyNetworkProvider) GetVMServiceAnnotations(ctx context.Context, clusterCtx *vmware.ClusterContext) (map[string]string, error) { return map[string]string{}, nil } -func (np *dummyNetworkProvider) VerifyNetworkStatus(ctx *vmware.ClusterContext, obj runtime.Object) error { +func (np *dummyNetworkProvider) VerifyNetworkStatus(ctx context.Context, clusterCtx *vmware.ClusterContext, obj runtime.Object) error { return nil } diff --git a/pkg/services/network/netop_provider.go b/pkg/services/network/netop_provider.go index d4922f9afa..4c471dcfff 100644 --- a/pkg/services/network/netop_provider.go +++ b/pkg/services/network/netop_provider.go @@ -17,12 +17,14 @@ limitations under the License. package network import ( + "context" "fmt" netopv1 "github.com/vmware-tanzu/net-operator-api/api/v1alpha1" vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" @@ -44,27 +46,29 @@ func (np *netopNetworkProvider) HasLoadBalancer() bool { return true } -func (np *netopNetworkProvider) ProvisionClusterNetwork(ctx *vmware.ClusterContext) error { - conditions.MarkTrue(ctx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition) +func (np *netopNetworkProvider) ProvisionClusterNetwork(_ context.Context, clusterCtx *vmware.ClusterContext) error { + conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition) return nil } -func (np *netopNetworkProvider) getDefaultClusterNetwork(ctx *vmware.ClusterContext) (*netopv1.Network, error) { - networkWithLabel, err := np.getDefaultClusterNetworkWithLabel(ctx, CAPVDefaultNetworkLabel) +func (np *netopNetworkProvider) getDefaultClusterNetwork(ctx context.Context, clusterCtx *vmware.ClusterContext) (*netopv1.Network, error) { + log := ctrl.LoggerFrom(ctx) + + networkWithLabel, err := np.getDefaultClusterNetworkWithLabel(ctx, clusterCtx, CAPVDefaultNetworkLabel) if networkWithLabel != nil && err == nil { return networkWithLabel, nil } - ctx.Logger.Info("falling back to legacy label to identify default network", "label", legacyDefaultNetworkLabel) - return np.getDefaultClusterNetworkWithLabel(ctx, legacyDefaultNetworkLabel) + log.Info("falling back to legacy label to identify default network", "label", legacyDefaultNetworkLabel) + return np.getDefaultClusterNetworkWithLabel(ctx, clusterCtx, legacyDefaultNetworkLabel) } -func (np *netopNetworkProvider) getDefaultClusterNetworkWithLabel(ctx *vmware.ClusterContext, label string) (*netopv1.Network, error) { +func (np *netopNetworkProvider) getDefaultClusterNetworkWithLabel(ctx context.Context, clusterCtx *vmware.ClusterContext, label string) (*netopv1.Network, error) { labels := map[string]string{ label: "true", } networkList := &netopv1.NetworkList{} - err := np.client.List(ctx, networkList, client.InNamespace(ctx.Cluster.Namespace), client.MatchingLabels(labels)) + err := np.client.List(ctx, networkList, client.InNamespace(clusterCtx.Cluster.Namespace), client.MatchingLabels(labels)) if err != nil { return nil, err } @@ -79,13 +83,13 @@ func (np *netopNetworkProvider) getDefaultClusterNetworkWithLabel(ctx *vmware.Cl } } -func (np *netopNetworkProvider) getClusterNetwork(ctx *vmware.ClusterContext) (*netopv1.Network, error) { +func (np *netopNetworkProvider) getClusterNetwork(ctx context.Context, clusterCtx *vmware.ClusterContext) (*netopv1.Network, error) { // A "NetworkName" can later be added to the Spec, but currently we only have a preselected default. - return np.getDefaultClusterNetwork(ctx) + return np.getDefaultClusterNetwork(ctx, clusterCtx) } -func (np *netopNetworkProvider) GetClusterNetworkName(ctx *vmware.ClusterContext) (string, error) { - network, err := np.getClusterNetwork(ctx) +func (np *netopNetworkProvider) GetClusterNetworkName(ctx context.Context, clusterCtx *vmware.ClusterContext) (string, error) { + network, err := np.getClusterNetwork(ctx, clusterCtx) if err != nil { return "", err } @@ -93,8 +97,8 @@ func (np *netopNetworkProvider) GetClusterNetworkName(ctx *vmware.ClusterContext return network.Name, nil } -func (np *netopNetworkProvider) GetVMServiceAnnotations(ctx *vmware.ClusterContext) (map[string]string, error) { - networkName, err := np.GetClusterNetworkName(ctx) +func (np *netopNetworkProvider) GetVMServiceAnnotations(ctx context.Context, clusterCtx *vmware.ClusterContext) (map[string]string, error) { + networkName, err := np.GetClusterNetworkName(ctx, clusterCtx) if err != nil { return nil, err } @@ -102,8 +106,8 @@ func (np *netopNetworkProvider) GetVMServiceAnnotations(ctx *vmware.ClusterConte return map[string]string{NetOpNetworkNameAnnotation: networkName}, nil } -func (np *netopNetworkProvider) ConfigureVirtualMachine(ctx *vmware.ClusterContext, vm *vmoprv1.VirtualMachine) error { - network, err := np.getClusterNetwork(ctx) +func (np *netopNetworkProvider) ConfigureVirtualMachine(ctx context.Context, clusterCtx *vmware.ClusterContext, vm *vmoprv1.VirtualMachine) error { + network, err := np.getClusterNetwork(ctx, clusterCtx) if err != nil { return err } @@ -123,7 +127,7 @@ func (np *netopNetworkProvider) ConfigureVirtualMachine(ctx *vmware.ClusterConte return nil } -func (np *netopNetworkProvider) VerifyNetworkStatus(_ *vmware.ClusterContext, obj runtime.Object) error { +func (np *netopNetworkProvider) VerifyNetworkStatus(_ context.Context, _ *vmware.ClusterContext, obj runtime.Object) error { if _, ok := obj.(*netopv1.Network); !ok { return fmt.Errorf("expected Net Operator Network but got %T", obj) } diff --git a/pkg/services/network/network_test.go b/pkg/services/network/network_test.go index 87cc59a963..049cb85814 100644 --- a/pkg/services/network/network_test.go +++ b/pkg/services/network/network_test.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" . "github.com/onsi/ginkgo/v2" @@ -65,7 +66,8 @@ var _ = Describe("Network provider", func() { fakeSNATIP = "192.168.10.2" clusterKind = "Cluster" infraClusterKind = "VSphereCluster" - ctx *vmware.ClusterContext + ctx = context.Background() + clusterCtx *vmware.ClusterContext err error np services.NetworkProvider cluster *clusterv1.Cluster @@ -101,7 +103,7 @@ var _ = Describe("Network provider", func() { Namespace: dummyNs, }, } - ctx = util.CreateClusterContext(cluster, vSphereCluster) + clusterCtx, _ = util.CreateClusterContext(cluster, vSphereCluster) vm = &vmoprv1.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Namespace: dummyNs, @@ -112,7 +114,7 @@ var _ = Describe("Network provider", func() { Context("ConfigureVirtualMachine", func() { JustBeforeEach(func() { - err = np.ConfigureVirtualMachine(ctx, vm) + err = np.ConfigureVirtualMachine(ctx, clusterCtx, vm) }) Context("with dummy network provider", func() { @@ -160,7 +162,7 @@ var _ = Describe("Network provider", func() { }) It("vds network interface already exists", func() { - err = np.ConfigureVirtualMachine(ctx, vm) + err = np.ConfigureVirtualMachine(ctx, clusterCtx, vm) }) }) } @@ -185,7 +187,7 @@ var _ = Describe("Network provider", func() { It("should add nsx-t type network interface", func() { }) It("nsx-t network interface already exists", func() { - err = np.ConfigureVirtualMachine(ctx, vm) + err = np.ConfigureVirtualMachine(ctx, clusterCtx, vm) }) AfterEach(func() { Expect(err).ToNot(HaveOccurred()) @@ -254,6 +256,7 @@ var _ = Describe("Network provider", func() { scheme = runtime.NewScheme() Expect(ncpv1.AddToScheme(scheme)).To(Succeed()) Expect(corev1.AddToScheme(scheme)).To(Succeed()) + Expect(vmwarev1.AddToScheme(scheme)).To(Succeed()) }) Context("with dummy network provider", func() { @@ -261,11 +264,11 @@ var _ = Describe("Network provider", func() { np = DummyNetworkProvider() }) JustBeforeEach(func() { - err = np.ProvisionClusterNetwork(ctx) + err = np.ProvisionClusterNetwork(ctx, clusterCtx) }) It("should succeed", func() { Expect(err).ToNot(HaveOccurred()) - vnet, localerr := np.GetClusterNetworkName(ctx) + vnet, localerr := np.GetClusterNetworkName(ctx, clusterCtx) Expect(localerr).ToNot(HaveOccurred()) Expect(vnet).To(BeEmpty()) }) @@ -280,11 +283,11 @@ var _ = Describe("Network provider", func() { }) JustBeforeEach(func() { // noop for netop - err = np.ProvisionClusterNetwork(ctx) + err = np.ProvisionClusterNetwork(ctx, clusterCtx) }) It("should succeed", func() { Expect(err).ToNot(HaveOccurred()) - Expect(conditions.IsTrue(ctx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) @@ -293,14 +296,14 @@ var _ = Describe("Network provider", func() { client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(runtimeObjs...).Build() nsxNp, _ = NsxtNetworkProvider(client, "true").(*nsxtNetworkProvider) np = nsxNp - err = np.ProvisionClusterNetwork(ctx) + err = np.ProvisionClusterNetwork(ctx, clusterCtx) }) It("should not update vnet with whitelist_source_ranges in spec", func() { Expect(err).ToNot(HaveOccurred()) - vnet, localerr := np.GetClusterNetworkName(ctx) + vnet, localerr := np.GetClusterNetworkName(ctx, clusterCtx) Expect(localerr).ToNot(HaveOccurred()) - Expect(vnet).To(Equal(GetNSXTVirtualNetworkName(ctx.VSphereCluster.Name))) + Expect(vnet).To(Equal(GetNSXTVirtualNetworkName(clusterCtx.VSphereCluster.Name))) createdVNET := &ncpv1.VirtualNetwork{} err = client.Get(ctx, apitypes.NamespacedName{ @@ -315,10 +318,10 @@ var _ = Describe("Network provider", func() { // The organization of these tests are inverted so easiest to put this here because // NCP will eventually be removed. It("GetVMServiceAnnotations", func() { - annotations, err := np.GetVMServiceAnnotations(ctx) + annotations, err := np.GetVMServiceAnnotations(ctx, clusterCtx) Expect(err).ToNot(HaveOccurred()) - Expect(annotations).To(HaveKeyWithValue("ncp.vmware.com/virtual-network-name", GetNSXTVirtualNetworkName(ctx.VSphereCluster.Name))) - Expect(conditions.IsTrue(ctx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(annotations).To(HaveKeyWithValue("ncp.vmware.com/virtual-network-name", GetNSXTVirtualNetworkName(clusterCtx.VSphereCluster.Name))) + Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) @@ -328,14 +331,14 @@ var _ = Describe("Network provider", func() { client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(configmapObj, systemNamespaceObj).Build() nsxNp, _ = NsxtNetworkProvider(client, "true").(*nsxtNetworkProvider) np = nsxNp - err = np.ProvisionClusterNetwork(ctx) + err = np.ProvisionClusterNetwork(ctx, clusterCtx) }) It("should create vnet without whitelist_source_ranges in spec", func() { Expect(err).ToNot(HaveOccurred()) - vnet, localerr := np.GetClusterNetworkName(ctx) + vnet, localerr := np.GetClusterNetworkName(ctx, clusterCtx) Expect(localerr).ToNot(HaveOccurred()) - Expect(vnet).To(Equal(GetNSXTVirtualNetworkName(ctx.VSphereCluster.Name))) + Expect(vnet).To(Equal(GetNSXTVirtualNetworkName(clusterCtx.VSphereCluster.Name))) createdVNET := &ncpv1.VirtualNetwork{} err = client.Get(ctx, apitypes.NamespacedName{ @@ -345,7 +348,7 @@ var _ = Describe("Network provider", func() { Expect(err).ToNot(HaveOccurred()) Expect(createdVNET.Spec.WhitelistSourceRanges).To(BeEmpty()) - Expect(conditions.IsTrue(ctx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) @@ -354,14 +357,14 @@ var _ = Describe("Network provider", func() { client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(runtimeObjs...).Build() nsxNp, _ = NsxtNetworkProvider(client, "false").(*nsxtNetworkProvider) np = nsxNp - err = np.ProvisionClusterNetwork(ctx) + err = np.ProvisionClusterNetwork(ctx, clusterCtx) }) It("should update vnet with whitelist_source_ranges in spec", func() { Expect(err).ToNot(HaveOccurred()) - vnet, localerr := np.GetClusterNetworkName(ctx) + vnet, localerr := np.GetClusterNetworkName(ctx, clusterCtx) Expect(localerr).ToNot(HaveOccurred()) - Expect(vnet).To(Equal(GetNSXTVirtualNetworkName(ctx.VSphereCluster.Name))) + Expect(vnet).To(Equal(GetNSXTVirtualNetworkName(clusterCtx.VSphereCluster.Name))) // Verify WhitelistSourceRanges have been updated createdVNET := &ncpv1.VirtualNetwork{} @@ -372,7 +375,7 @@ var _ = Describe("Network provider", func() { Expect(err).ToNot(HaveOccurred()) Expect(createdVNET.Spec.WhitelistSourceRanges).To(Equal(fakeSNATIP + "/32")) - Expect(conditions.IsTrue(ctx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) @@ -382,14 +385,14 @@ var _ = Describe("Network provider", func() { client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(configmapObj, systemNamespaceObj).Build() nsxNp, _ = NsxtNetworkProvider(client, "false").(*nsxtNetworkProvider) np = nsxNp - err = np.ProvisionClusterNetwork(ctx) + err = np.ProvisionClusterNetwork(ctx, clusterCtx) }) It("should create new vnet with whitelist_source_ranges in spec", func() { Expect(err).ToNot(HaveOccurred()) - vnet, localerr := np.GetClusterNetworkName(ctx) + vnet, localerr := np.GetClusterNetworkName(ctx, clusterCtx) Expect(localerr).ToNot(HaveOccurred()) - Expect(vnet).To(Equal(GetNSXTVirtualNetworkName(ctx.VSphereCluster.Name))) + Expect(vnet).To(Equal(GetNSXTVirtualNetworkName(clusterCtx.VSphereCluster.Name))) // Verify WhitelistSourceRanges have been updated createdVNET := &ncpv1.VirtualNetwork{} @@ -401,7 +404,7 @@ var _ = Describe("Network provider", func() { Expect(createdVNET.Spec.WhitelistSourceRanges).To(Equal(fakeSNATIP + "/32")) // err is not empty, but it is because vnetObj does not have status mocked in this test - Expect(conditions.IsTrue(ctx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) @@ -412,14 +415,14 @@ var _ = Describe("Network provider", func() { client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(runtimeObjs...).Build() nsxNp, _ = NsxtNetworkProvider(client, "false").(*nsxtNetworkProvider) np = nsxNp - err = np.ProvisionClusterNetwork(ctx) + err = np.ProvisionClusterNetwork(ctx, clusterCtx) }) It("should not update vnet with whitelist_source_ranges in spec", func() { Expect(err).ToNot(HaveOccurred()) - vnet, localerr := np.GetClusterNetworkName(ctx) + vnet, localerr := np.GetClusterNetworkName(ctx, clusterCtx) Expect(localerr).ToNot(HaveOccurred()) - Expect(vnet).To(Equal(GetNSXTVirtualNetworkName(ctx.VSphereCluster.Name))) + Expect(vnet).To(Equal(GetNSXTVirtualNetworkName(clusterCtx.VSphereCluster.Name))) // Verify WhitelistSourceRanges is not included createdVNET := &ncpv1.VirtualNetwork{} @@ -431,7 +434,7 @@ var _ = Describe("Network provider", func() { Expect(createdVNET.Spec.WhitelistSourceRanges).To(BeEmpty()) // err is not empty, but it is because vnetObj does not have status mocked in this test - Expect(conditions.IsTrue(ctx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) AfterEach(func() { @@ -459,17 +462,17 @@ var _ = Describe("Network provider", func() { {Type: "Ready", Status: "False", Reason: testVnetNotRealizedReason, Message: testVnetNotRealizedMessage}, }, } - vnetObj = createUnReadyNsxtVirtualNetwork(ctx, status) + vnetObj = createUnReadyNsxtVirtualNetwork(clusterCtx, status) client = fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(vnetObj).Build() nsxNp, _ = NsxtNetworkProvider(client, "false").(*nsxtNetworkProvider) np = nsxNp - err = np.VerifyNetworkStatus(ctx, vnetObj) + err = np.VerifyNetworkStatus(ctx, clusterCtx, vnetObj) expectedErrorMessage := fmt.Sprintf("virtual network ready status is: '%s' in cluster %s. reason: %s, message: %s", "False", apitypes.NamespacedName{Namespace: dummyNs, Name: dummyCluster}, testVnetNotRealizedReason, testVnetNotRealizedMessage) Expect(err).To(MatchError(expectedErrorMessage)) - Expect(conditions.IsFalse(ctx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(conditions.IsFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) }) @@ -498,7 +501,7 @@ var _ = Describe("Network provider", func() { Context("with default network", func() { It("Should return expected annotations", func() { - annotations, err := np.GetVMServiceAnnotations(ctx) + annotations, err := np.GetVMServiceAnnotations(ctx, clusterCtx) Expect(err).ToNot(HaveOccurred()) Expect(annotations).To(HaveKeyWithValue("netoperator.vmware.com/network-name", defaultNetwork.Name)) }) diff --git a/pkg/services/network/nsxt_provider.go b/pkg/services/network/nsxt_provider.go index d55ea4a888..958f297b1f 100644 --- a/pkg/services/network/nsxt_provider.go +++ b/pkg/services/network/nsxt_provider.go @@ -17,6 +17,7 @@ limitations under the License. package network import ( + "context" "fmt" "github.com/pkg/errors" @@ -27,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/types" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -74,21 +76,22 @@ func (np *nsxtNetworkProvider) verifyNSXTVirtualNetworkStatus(ctx *vmware.Cluste return nil } -func (np *nsxtNetworkProvider) VerifyNetworkStatus(ctx *vmware.ClusterContext, obj runtime.Object) error { +func (np *nsxtNetworkProvider) VerifyNetworkStatus(_ context.Context, clusterCtx *vmware.ClusterContext, obj runtime.Object) error { vnet, ok := obj.(*ncpv1.VirtualNetwork) if !ok { return fmt.Errorf("expected NCP VirtualNetwork but got %T", obj) } - return np.verifyNSXTVirtualNetworkStatus(ctx, vnet) + return np.verifyNSXTVirtualNetworkStatus(clusterCtx, vnet) } -func (np *nsxtNetworkProvider) ProvisionClusterNetwork(ctx *vmware.ClusterContext) error { - cluster := ctx.VSphereCluster - clusterKey := types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace} +func (np *nsxtNetworkProvider) ProvisionClusterNetwork(ctx context.Context, clusterCtx *vmware.ClusterContext) error { + log := ctrl.LoggerFrom(ctx) - ctx.Logger.V(2).Info("Provisioning ", "vnet", GetNSXTVirtualNetworkName(cluster.Name), "namespace", cluster.Namespace) - defer ctx.Logger.V(2).Info("Finished provisioning", "vnet", GetNSXTVirtualNetworkName(cluster.Name), "namespace", cluster.Namespace) + cluster := clusterCtx.VSphereCluster + + log.V(2).Info("Provisioning ", "vnet", GetNSXTVirtualNetworkName(cluster.Name)) + defer log.V(2).Info("Finished provisioning", "vnet", GetNSXTVirtualNetworkName(cluster.Name)) vnet := &ncpv1.VirtualNetwork{ ObjectMeta: metav1.ObjectMeta{ @@ -102,7 +105,7 @@ func (np *nsxtNetworkProvider) ProvisionClusterNetwork(ctx *vmware.ClusterContex if np.disableFW != "true" && vnet.Spec.WhitelistSourceRanges == "" { supportFW, err := util.NCPSupportFW(ctx, np.client) if err != nil { - ctx.Logger.Error(err, "failed to check if NCP supports firewall rules enforcement on GC T1 router") + log.Error(err, "failed to check if NCP supports firewall rules enforcement on GC T1 router") return err } // specify whitelist_source_ranges if needed and if NCP supports it @@ -110,11 +113,10 @@ func (np *nsxtNetworkProvider) ProvisionClusterNetwork(ctx *vmware.ClusterContex // Find system namespace snat ip systemNSSnatIP, err := util.GetNamespaceNetSnatIP(ctx, np.client, SystemNamespace) if err != nil { - ctx.Logger.Error(err, "failed to get Snat IP for kube-system") + log.Error(err, "failed to get Snat IP for kube-system") return err } - ctx.Logger.V(4).Info("got system namespace snat ip", - "cluster", clusterKey, "ip", systemNSSnatIP) + log.V(4).Info("got system namespace snat ip", "ip", systemNSSnatIP) // WhitelistSourceRanges accept cidrs only vnet.Spec.WhitelistSourceRanges = systemNSSnatIP + "/32" @@ -122,15 +124,15 @@ func (np *nsxtNetworkProvider) ProvisionClusterNetwork(ctx *vmware.ClusterContex } if err := ctrlutil.SetOwnerReference( - ctx.VSphereCluster, + clusterCtx.VSphereCluster, vnet, - ctx.Scheme, + np.client.Scheme(), ); err != nil { return errors.Wrapf( err, "error setting %s/%s as owner of %s/%s", - ctx.VSphereCluster.Namespace, - ctx.VSphereCluster.Name, + clusterCtx.VSphereCluster.Namespace, + clusterCtx.VSphereCluster.Name, vnet.Namespace, vnet.Name, ) @@ -139,18 +141,18 @@ func (np *nsxtNetworkProvider) ProvisionClusterNetwork(ctx *vmware.ClusterContex return nil }) if err != nil { - conditions.MarkFalse(ctx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - ctx.Logger.V(2).Info("Failed to provision network", "cluster", clusterKey) + conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + log.V(2).Info("Failed to provision network") return err } - return np.verifyNSXTVirtualNetworkStatus(ctx, vnet) + return np.verifyNSXTVirtualNetworkStatus(clusterCtx, vnet) } -// Returns the name of a valid cluster network if one exists. -func (np *nsxtNetworkProvider) GetClusterNetworkName(ctx *vmware.ClusterContext) (string, error) { +// GetClusterNetworkName returns the name of a valid cluster network if one exists. +func (np *nsxtNetworkProvider) GetClusterNetworkName(ctx context.Context, clusterCtx *vmware.ClusterContext) (string, error) { vnet := &ncpv1.VirtualNetwork{} - cluster := ctx.VSphereCluster + cluster := clusterCtx.VSphereCluster namespacedName := types.NamespacedName{ Namespace: cluster.Namespace, Name: GetNSXTVirtualNetworkName(cluster.Name), @@ -161,8 +163,8 @@ func (np *nsxtNetworkProvider) GetClusterNetworkName(ctx *vmware.ClusterContext) return namespacedName.Name, nil } -func (np *nsxtNetworkProvider) GetVMServiceAnnotations(ctx *vmware.ClusterContext) (map[string]string, error) { - vnetName, err := np.GetClusterNetworkName(ctx) +func (np *nsxtNetworkProvider) GetVMServiceAnnotations(ctx context.Context, clusterCtx *vmware.ClusterContext) (map[string]string, error) { + vnetName, err := np.GetClusterNetworkName(ctx, clusterCtx) if err != nil { return nil, err } @@ -171,8 +173,8 @@ func (np *nsxtNetworkProvider) GetVMServiceAnnotations(ctx *vmware.ClusterContex } // ConfigureVirtualMachine configures a VirtualMachine object based on the networking configuration. -func (np *nsxtNetworkProvider) ConfigureVirtualMachine(ctx *vmware.ClusterContext, vm *vmoprv1.VirtualMachine) error { - nsxtClusterNetworkName := GetNSXTVirtualNetworkName(ctx.VSphereCluster.Name) +func (np *nsxtNetworkProvider) ConfigureVirtualMachine(_ context.Context, clusterCtx *vmware.ClusterContext, vm *vmoprv1.VirtualMachine) error { + nsxtClusterNetworkName := GetNSXTVirtualNetworkName(clusterCtx.VSphereCluster.Name) for _, vnif := range vm.Spec.NetworkInterfaces { if vnif.NetworkType == NSXTTypeNetwork && vnif.NetworkName == nsxtClusterNetworkName { // expected network interface is already found diff --git a/pkg/services/vimmachine_test.go b/pkg/services/vimmachine_test.go index 7f4abf3f22..51d74c6c94 100644 --- a/pkg/services/vimmachine_test.go +++ b/pkg/services/vimmachine_test.go @@ -67,7 +67,7 @@ var _ = Describe("VimMachineService_GenerateOverrideFunc", func() { BeforeEach(func() { controllerCtx = fake.NewControllerContext(fake.NewControllerManagerContext(deplZone("one"), deplZone("two"), failureDomain("one"), failureDomain("two"))) - machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx)) + machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx), controllerCtx) vimMachineService = &VimMachineService{} }) @@ -215,7 +215,7 @@ var _ = Describe("VimMachineService_GetHostInfo", func() { Context("When VMProvisioned Condition is set", func() { BeforeEach(func() { controllerCtx = fake.NewControllerContext(fake.NewControllerManagerContext(getVSphereVM(hostAddr, corev1.ConditionTrue))) - machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx)) + machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx), controllerCtx) }) It("Fetches host address from the VSphereVM object", func() { host, err := vimMachineService.GetHostInfo(machineCtx) @@ -227,7 +227,7 @@ var _ = Describe("VimMachineService_GetHostInfo", func() { Context("When VMProvisioned Condition is unset", func() { BeforeEach(func() { controllerCtx = fake.NewControllerContext(fake.NewControllerManagerContext(getVSphereVM(hostAddr, corev1.ConditionFalse))) - machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx)) + machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx), controllerCtx) }) It("returns empty string", func() { host, err := vimMachineService.GetHostInfo(machineCtx) @@ -266,7 +266,7 @@ var _ = Describe("VimMachineService_createOrPatchVSphereVM", func() { } controllerCtx = fake.NewControllerContext(fake.NewControllerManagerContext(getVSphereVM(hostAddr, corev1.ConditionTrue))) - machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx)) + machineCtx = fake.NewMachineContext(fake.NewClusterContext(controllerCtx), controllerCtx) machineCtx.Machine.SetName(fakeLongClusterName) Context("When VSphereMachine OS is Windows", func() { diff --git a/pkg/services/vmoperator/control_plane_endpoint.go b/pkg/services/vmoperator/control_plane_endpoint.go index a7ea39eef5..5aa5527f88 100644 --- a/pkg/services/vmoperator/control_plane_endpoint.go +++ b/pkg/services/vmoperator/control_plane_endpoint.go @@ -17,14 +17,17 @@ limitations under the License. package vmoperator import ( + "context" "fmt" "github.com/pkg/errors" vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -52,37 +55,40 @@ const ( ) // CPService represents the ability to reconcile a ControlPlaneEndpoint. -type CPService struct{} +type CPService struct { + Client client.Client +} // ReconcileControlPlaneEndpointService manages the lifecycle of a control plane endpoint managed by a vmoperator VirtualMachineService. -func (s CPService) ReconcileControlPlaneEndpointService(ctx *vmware.ClusterContext, netProvider services.NetworkProvider) (*clusterv1.APIEndpoint, error) { - ctx.Logger.V(4).Info("Reconciling control plane VirtualMachineService for cluster") +func (s *CPService) ReconcileControlPlaneEndpointService(ctx context.Context, clusterCtx *vmware.ClusterContext, netProvider services.NetworkProvider) (*clusterv1.APIEndpoint, error) { + log := ctrl.LoggerFrom(ctx) + log.V(4).Info("Reconciling control plane VirtualMachineService for cluster") // If the NetworkProvider does not support a load balancer, this should be a no-op if !netProvider.HasLoadBalancer() { return nil, nil } - vmService, err := s.getVMControlPlaneService(ctx) + vmService, err := s.getVMControlPlaneService(ctx, clusterCtx) if err != nil { if !apierrors.IsNotFound(err) { err = errors.Wrapf(err, "failed to check if VirtualMachineService exists") - conditions.MarkFalse(ctx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return nil, err } // Get the provider annotations for the ControlPlane Service. - annotations, err := netProvider.GetVMServiceAnnotations(ctx) + annotations, err := netProvider.GetVMServiceAnnotations(ctx, clusterCtx) if err != nil { err = errors.Wrapf(err, "failed to get provider VirtualMachineService annotations") - conditions.MarkFalse(ctx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return nil, err } - vmService, err = s.createVMControlPlaneService(ctx, annotations) + vmService, err = s.createVMControlPlaneService(ctx, clusterCtx, annotations) if err != nil { err = errors.Wrapf(err, "failed to create VirtualMachineService") - conditions.MarkFalse(ctx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return nil, err } } @@ -91,18 +97,18 @@ func (s CPService) ReconcileControlPlaneEndpointService(ctx *vmware.ClusterConte vip, err := getVMServiceVIP(vmService) if err != nil { err = errors.Wrapf(err, "VirtualMachineService LB does not yet have VIP assigned") - conditions.MarkFalse(ctx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.WaitingForLoadBalancerIPReason, clusterv1.ConditionSeverityInfo, err.Error()) + conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.WaitingForLoadBalancerIPReason, clusterv1.ConditionSeverityInfo, err.Error()) return nil, err } cpEndpoint, err := getAPIEndpointFromVIP(vmService, vip) if err != nil { err = errors.Wrapf(err, "VirtualMachineService LB does not have an apiserver endpoint") - conditions.MarkFalse(ctx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.WaitingForLoadBalancerIPReason, clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.WaitingForLoadBalancerIPReason, clusterv1.ConditionSeverityWarning, err.Error()) return nil, err } - conditions.MarkTrue(ctx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) + conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) return cpEndpoint, nil } @@ -141,13 +147,13 @@ func newVirtualMachineService(ctx *vmware.ClusterContext) *vmoprv1.VirtualMachin } } -func (s CPService) createVMControlPlaneService(ctx *vmware.ClusterContext, annotations map[string]string) (*vmoprv1.VirtualMachineService, error) { +func (s *CPService) createVMControlPlaneService(ctx context.Context, clusterCtx *vmware.ClusterContext, annotations map[string]string) (*vmoprv1.VirtualMachineService, error) { // Note that the current implementation will only create a VirtualMachineService for a load balanced endpoint serviceType := vmoprv1.VirtualMachineServiceTypeLoadBalancer - vmService := newVirtualMachineService(ctx) + vmService := newVirtualMachineService(clusterCtx) - _, err := ctrlutil.CreateOrPatch(ctx, ctx.Client, vmService, func() error { + _, err := ctrlutil.CreateOrPatch(ctx, s.Client, vmService, func() error { if vmService.Annotations == nil { vmService.Annotations = annotations } else { @@ -166,19 +172,19 @@ func (s CPService) createVMControlPlaneService(ctx *vmware.ClusterContext, annot TargetPort: defaultAPIBindPort, }, }, - Selector: clusterRoleVMLabels(ctx, true), + Selector: clusterRoleVMLabels(clusterCtx, true), } if err := ctrlutil.SetOwnerReference( - ctx.VSphereCluster, + clusterCtx.VSphereCluster, vmService, - ctx.Scheme, + s.Client.Scheme(), ); err != nil { return errors.Wrapf( err, "error setting %s/%s as owner of %s/%s", - ctx.VSphereCluster.Namespace, - ctx.VSphereCluster.Name, + clusterCtx.VSphereCluster.Namespace, + clusterCtx.VSphereCluster.Name, vmService.Namespace, vmService.Name, ) @@ -193,18 +199,20 @@ func (s CPService) createVMControlPlaneService(ctx *vmware.ClusterContext, annot return vmService, nil } -func (s CPService) getVMControlPlaneService(ctx *vmware.ClusterContext) (*vmoprv1.VirtualMachineService, error) { +func (s *CPService) getVMControlPlaneService(ctx context.Context, clusterCtx *vmware.ClusterContext) (*vmoprv1.VirtualMachineService, error) { + log := ctrl.LoggerFrom(ctx) + vmService := &vmoprv1.VirtualMachineService{} - vmServiceName := client.ObjectKey{ - Namespace: ctx.Cluster.Namespace, - Name: controlPlaneVMServiceName(ctx), + vmServiceKey := client.ObjectKey{ + Namespace: clusterCtx.Cluster.Namespace, + Name: controlPlaneVMServiceName(clusterCtx), } - if err := ctx.Client.Get(ctx, vmServiceName, vmService); err != nil { + if err := s.Client.Get(ctx, vmServiceKey, vmService); err != nil { if !apierrors.IsNotFound(err) { - return nil, fmt.Errorf("failed to get VirtualMachineService %s: %v", vmServiceName.Name, err) + return nil, fmt.Errorf("failed to get VirtualMachineService %s: %v", vmServiceKey.Name, err) } - ctx.Logger.V(2).Info("VirtualMachineService was not found", "cluster", ctx.Cluster.Name, "service", vmServiceName.Name) + log.Info("VirtualMachineService was not found", "VirtualMachineService", klog.KRef(vmServiceKey.Namespace, vmServiceKey.Name)) return nil, err } diff --git a/pkg/services/vmoperator/control_plane_endpoint_test.go b/pkg/services/vmoperator/control_plane_endpoint_test.go index 1e8c5b50d3..ea7236cb49 100644 --- a/pkg/services/vmoperator/control_plane_endpoint_test.go +++ b/pkg/services/vmoperator/control_plane_endpoint_test.go @@ -17,6 +17,8 @@ limitations under the License. package vmoperator import ( + "context" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" netopv1 "github.com/vmware-tanzu/net-operator-api/api/v1alpha1" @@ -28,20 +30,22 @@ import ( "k8s.io/apimachinery/pkg/types" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/network" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" ) -func getVirtualMachineService(_ CPService, ctx *vmware.ClusterContext) *vmoprv1.VirtualMachineService { - vms := newVirtualMachineService(ctx) +func getVirtualMachineService(ctx context.Context, clusterCtx *vmware.ClusterContext, c ctrlclient.Client, _ CPService) *vmoprv1.VirtualMachineService { + vms := newVirtualMachineService(clusterCtx) nsname := types.NamespacedName{ Namespace: vms.Namespace, Name: vms.Name, } - err := ctx.Client.Get(ctx, nsname, vms) + err := c.Get(ctx, nsname, vms) if apierrors.IsNotFound(err) { return nil } @@ -49,34 +53,34 @@ func getVirtualMachineService(_ CPService, ctx *vmware.ClusterContext) *vmoprv1. return vms } -func createVnet(ctx *vmware.ClusterContext) { +func createVnet(ctx context.Context, clusterCtx *vmware.ClusterContext, c ctrlclient.Client) { vnet := &ncpv1.VirtualNetwork{ ObjectMeta: metav1.ObjectMeta{ - Namespace: ctx.VSphereCluster.Namespace, - Name: network.GetNSXTVirtualNetworkName(ctx.VSphereCluster.Name), + Namespace: clusterCtx.VSphereCluster.Namespace, + Name: network.GetNSXTVirtualNetworkName(clusterCtx.VSphereCluster.Name), }, } - Expect(ctx.Client.Create(ctx, vnet)).To(Succeed()) + Expect(c.Create(ctx, vnet)).To(Succeed()) } -func createDefaultNetwork(ctx *vmware.ClusterContext) { +func createDefaultNetwork(ctx context.Context, clusterCtx *vmware.ClusterContext, c ctrlclient.Client) { defaultNetwork := &netopv1.Network{ ObjectMeta: metav1.ObjectMeta{ Name: "dummy-network", - Namespace: ctx.VSphereCluster.Namespace, + Namespace: clusterCtx.VSphereCluster.Namespace, Labels: map[string]string{network.CAPVDefaultNetworkLabel: "true"}, }, Spec: netopv1.NetworkSpec{ Type: netopv1.NetworkTypeVDS, }, } - Expect(ctx.Client.Create(ctx, defaultNetwork)).To(Succeed()) + Expect(c.Create(ctx, defaultNetwork)).To(Succeed()) } -func updateVMServiceWithVIP(cpService CPService, ctx *vmware.ClusterContext, vip string) { - vmService := getVirtualMachineService(cpService, ctx) +func updateVMServiceWithVIP(ctx context.Context, clusterCtx *vmware.ClusterContext, c ctrlclient.Client, cpService CPService, vip string) { + vmService := getVirtualMachineService(ctx, clusterCtx, c, cpService) vmService.Status.LoadBalancer.Ingress = []vmoprv1.LoadBalancerIngress{{IP: vip}} - err := ctx.Client.Status().Update(ctx, vmService) + err := c.Status().Update(ctx, vmService) Expect(err).ShouldNot(HaveOccurred()) } @@ -101,12 +105,15 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { cluster *clusterv1.Cluster vsphereCluster *vmwarev1.VSphereCluster - ctx *vmware.ClusterContext + ctx = context.Background() + clusterCtx *vmware.ClusterContext + controllerCtx *capvcontext.ControllerContext + c ctrlclient.Client apiEndpoint *clusterv1.APIEndpoint vms *vmoprv1.VirtualMachineService - cpService = CPService{} + cpService CPService ) BeforeEach(func() { @@ -119,8 +126,12 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { // Create all necessary dependencies cluster = util.CreateCluster(clusterName) vsphereCluster = util.CreateVSphereCluster(clusterName) - ctx = util.CreateClusterContext(cluster, vsphereCluster) - expectedClusterRoleVMLabels = clusterRoleVMLabels(ctx, true) + clusterCtx, controllerCtx = util.CreateClusterContext(cluster, vsphereCluster) + c = controllerCtx.Client + expectedClusterRoleVMLabels = clusterRoleVMLabels(clusterCtx, true) + cpService = CPService{ + Client: c, + } }) Context("Reconcile ControlPlaneEndpointService", func() { @@ -133,7 +144,7 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { Expect(apiEndpoint.Port).To(BeEquivalentTo(expectedPort)) } - vms = getVirtualMachineService(cpService, ctx) + vms = getVirtualMachineService(ctx, clusterCtx, c, cpService) Expect(vms != nil).Should(Equal(expectVMS)) if vms != nil { Expect(vms.Spec.Type).To(Equal(expectedType)) @@ -149,7 +160,7 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { } for _, expectedCondition := range expectedConditions { - c := conditions.Get(ctx.VSphereCluster, expectedCondition.Type) + c := conditions.Get(clusterCtx.VSphereCluster, expectedCondition.Type) Expect(c).NotTo(BeNil()) Expect(c.Status).To(Equal(expectedCondition.Status)) Expect(c.Reason).To(Equal(expectedCondition.Reason)) @@ -166,8 +177,8 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { expectReconcileError = false expectAPIEndpoint = false expectVMS = false - apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, network.DummyNetworkProvider()) - Expect(conditions.Get(ctx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition)).To(BeNil()) + apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, clusterCtx, network.DummyNetworkProvider()) + Expect(conditions.Get(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition)).To(BeNil()) verifyOutput() }) @@ -175,7 +186,7 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { expectReconcileError = true // VirtualMachineService LB does not yet have VIP assigned expectVMS = true expectedType = vmoprv1.VirtualMachineServiceTypeLoadBalancer - apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, network.DummyLBNetworkProvider()) + apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, clusterCtx, network.DummyLBNetworkProvider()) verifyOutput() }) @@ -190,7 +201,7 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { // The NetOp network provider looks a Network. If one does not exist, it will fail. By("NetOp NetworkProvider has no Network") - netOpProvider := network.NetOpNetworkProvider(ctx.Client) + netOpProvider := network.NetOpNetworkProvider(c) // we expect the reconciliation fail because lack of bootstrap data expectedConditions = append(expectedConditions, clusterv1.Condition{ Type: vmwarev1.LoadBalancerReadyCondition, @@ -198,7 +209,7 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { Reason: vmwarev1.LoadBalancerCreationFailedReason, Message: noNetworkFailure, }) - apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, netOpProvider) + apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, clusterCtx, netOpProvider) verifyOutput() // If a Network is present, a VirtualMachineService should be created @@ -206,10 +217,10 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { // A VirtualMachineService should be created and will wait for a VIP to be assigned expectedAnnotations["netoperator.vmware.com/network-name"] = "dummy-network" expectVMS = true - createDefaultNetwork(ctx) + createDefaultNetwork(ctx, clusterCtx, c) expectedConditions[0].Reason = vmwarev1.WaitingForLoadBalancerIPReason expectedConditions[0].Message = waitingForVIPFailure - apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, netOpProvider) + apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, clusterCtx, netOpProvider) verifyOutput() // Once a VIP has been created, a VirtualMachineService should exist with a valid endpoint @@ -218,11 +229,11 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { expectAPIEndpoint = true expectedPort = defaultAPIBindPort expectedHost = vip - updateVMServiceWithVIP(cpService, ctx, vip) + updateVMServiceWithVIP(ctx, clusterCtx, c, cpService, vip) expectedConditions[0].Status = corev1.ConditionTrue expectedConditions[0].Reason = "" expectedConditions[0].Message = "" - apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, netOpProvider) + apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, clusterCtx, netOpProvider) verifyOutput() }) @@ -243,8 +254,8 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { // The NSXT network provider looks for a real vnet. If one does not exist, it will fail. By("NSXT NetworkProvider has no vnet") - nsxtProvider := network.NsxtNetworkProvider(ctx.Client, "false") - apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, nsxtProvider) + nsxtProvider := network.NsxtNetworkProvider(c, "false") + apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, clusterCtx, nsxtProvider) verifyOutput() // If a vnet is present, a VirtualMachineService should be created @@ -255,8 +266,8 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { expectVMS = true expectedConditions[0].Reason = vmwarev1.WaitingForLoadBalancerIPReason expectedConditions[0].Message = waitingForVIPFailure - createVnet(ctx) - apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, nsxtProvider) + createVnet(ctx, clusterCtx, c) + apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, clusterCtx, nsxtProvider) verifyOutput() // Once a VIP has been created, a VirtualMachineService should exist with a valid endpoint @@ -268,8 +279,8 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { expectedConditions[0].Status = corev1.ConditionTrue expectedConditions[0].Reason = "" expectedConditions[0].Message = "" - updateVMServiceWithVIP(cpService, ctx, vip) - apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, nsxtProvider) + updateVMServiceWithVIP(ctx, clusterCtx, c, cpService, vip) + apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, clusterCtx, nsxtProvider) verifyOutput() }) }) diff --git a/pkg/services/vmoperator/resource_policy.go b/pkg/services/vmoperator/resource_policy.go index 39f64a40a0..2e51b88c59 100644 --- a/pkg/services/vmoperator/resource_policy.go +++ b/pkg/services/vmoperator/resource_policy.go @@ -17,6 +17,8 @@ limitations under the License. package vmoperator import ( + "context" + "github.com/pkg/errors" vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -29,17 +31,19 @@ import ( ) // RPService represents the ability to reconcile a VirtualMachineSetResourcePolicy via vmoperator. -type RPService struct{} +type RPService struct { + Client client.Client +} // ReconcileResourcePolicy ensures that a VirtualMachineSetResourcePolicy exists for the cluster // Returns the name of a policy if it exists, otherwise returns an error. -func (s RPService) ReconcileResourcePolicy(ctx *vmware.ClusterContext) (string, error) { - resourcePolicy, err := s.getVirtualMachineSetResourcePolicy(ctx) +func (s *RPService) ReconcileResourcePolicy(ctx context.Context, clusterCtx *vmware.ClusterContext) (string, error) { + resourcePolicy, err := s.getVirtualMachineSetResourcePolicy(ctx, clusterCtx) if err != nil { if !apierrors.IsNotFound(err) { return "", errors.Errorf("unexpected error in getting the Resource policy: %+v", err) } - resourcePolicy, err = s.createVirtualMachineSetResourcePolicy(ctx) + resourcePolicy, err = s.createVirtualMachineSetResourcePolicy(ctx, clusterCtx) if err != nil { return "", errors.Errorf("failed to create Resource Policy: %+v", err) } @@ -48,56 +52,56 @@ func (s RPService) ReconcileResourcePolicy(ctx *vmware.ClusterContext) (string, return resourcePolicy.Name, nil } -func (s RPService) newVirtualMachineSetResourcePolicy(ctx *vmware.ClusterContext) *vmoprv1.VirtualMachineSetResourcePolicy { +func (s *RPService) newVirtualMachineSetResourcePolicy(clusterCtx *vmware.ClusterContext) *vmoprv1.VirtualMachineSetResourcePolicy { return &vmoprv1.VirtualMachineSetResourcePolicy{ ObjectMeta: metav1.ObjectMeta{ - Namespace: ctx.Cluster.Namespace, - Name: ctx.Cluster.Name, + Namespace: clusterCtx.Cluster.Namespace, + Name: clusterCtx.Cluster.Name, }, } } -func (s RPService) getVirtualMachineSetResourcePolicy(ctx *vmware.ClusterContext) (*vmoprv1.VirtualMachineSetResourcePolicy, error) { +func (s *RPService) getVirtualMachineSetResourcePolicy(ctx context.Context, clusterCtx *vmware.ClusterContext) (*vmoprv1.VirtualMachineSetResourcePolicy, error) { vmResourcePolicy := &vmoprv1.VirtualMachineSetResourcePolicy{} vmResourcePolicyName := client.ObjectKey{ - Namespace: ctx.Cluster.Namespace, - Name: ctx.Cluster.Name, + Namespace: clusterCtx.Cluster.Namespace, + Name: clusterCtx.Cluster.Name, } - err := ctx.Client.Get(ctx, vmResourcePolicyName, vmResourcePolicy) + err := s.Client.Get(ctx, vmResourcePolicyName, vmResourcePolicy) return vmResourcePolicy, err } -func (s RPService) createVirtualMachineSetResourcePolicy(ctx *vmware.ClusterContext) (*vmoprv1.VirtualMachineSetResourcePolicy, error) { - vmResourcePolicy := s.newVirtualMachineSetResourcePolicy(ctx) +func (s *RPService) createVirtualMachineSetResourcePolicy(ctx context.Context, clusterCtx *vmware.ClusterContext) (*vmoprv1.VirtualMachineSetResourcePolicy, error) { + vmResourcePolicy := s.newVirtualMachineSetResourcePolicy(clusterCtx) - _, err := ctrlutil.CreateOrPatch(ctx, ctx.Client, vmResourcePolicy, func() error { + _, err := ctrlutil.CreateOrPatch(ctx, s.Client, vmResourcePolicy, func() error { vmResourcePolicy.Spec = vmoprv1.VirtualMachineSetResourcePolicySpec{ ResourcePool: vmoprv1.ResourcePoolSpec{ - Name: ctx.Cluster.Name, + Name: clusterCtx.Cluster.Name, }, Folder: vmoprv1.FolderSpec{ - Name: ctx.Cluster.Name, + Name: clusterCtx.Cluster.Name, }, ClusterModules: []vmoprv1.ClusterModuleSpec{ { GroupName: ControlPlaneVMClusterModuleGroupName, }, { - GroupName: vmwareutil.GetMachineDeploymentNameForCluster(ctx.Cluster), + GroupName: vmwareutil.GetMachineDeploymentNameForCluster(clusterCtx.Cluster), }, }, } // Ensure that the VirtualMachineSetResourcePolicy is owned by the VSphereCluster if err := ctrlutil.SetOwnerReference( - ctx.VSphereCluster, + clusterCtx.VSphereCluster, vmResourcePolicy, - ctx.Scheme, + s.Client.Scheme(), ); err != nil { return errors.Wrapf( err, "error setting %s/%s as owner of %s/%s", - ctx.VSphereCluster.Namespace, - ctx.VSphereCluster.Name, + clusterCtx.VSphereCluster.Namespace, + clusterCtx.VSphereCluster.Name, vmResourcePolicy.Namespace, vmResourcePolicy.Name, ) diff --git a/pkg/services/vmoperator/resource_policy_test.go b/pkg/services/vmoperator/resource_policy_test.go index 182174d5c4..25c5f9c95f 100644 --- a/pkg/services/vmoperator/resource_policy_test.go +++ b/pkg/services/vmoperator/resource_policy_test.go @@ -17,6 +17,7 @@ limitations under the License. package vmoperator import ( + "context" "fmt" "testing" @@ -31,17 +32,19 @@ func TestRPService(t *testing.T) { vsphereClusterName := fmt.Sprintf("%s-%s", clusterName, capi_util.RandomString((6))) cluster := util.CreateCluster(clusterName) vsphereCluster := util.CreateVSphereCluster(vsphereClusterName) - ctx := util.CreateClusterContext(cluster, vsphereCluster) - - rpService := RPService{} + clusterCtx, controllerCtx := util.CreateClusterContext(cluster, vsphereCluster) + ctx := context.Background() + rpService := RPService{ + Client: controllerCtx.Client, + } t.Run("Creates Resource Policy using the cluster name", func(t *testing.T) { g := NewWithT(t) - name, err := rpService.ReconcileResourcePolicy(ctx) + name, err := rpService.ReconcileResourcePolicy(ctx, clusterCtx) g.Expect(err).NotTo(HaveOccurred()) g.Expect(name).To(Equal(clusterName)) - resourcePolicy, err := rpService.getVirtualMachineSetResourcePolicy(ctx) + resourcePolicy, err := rpService.getVirtualMachineSetResourcePolicy(ctx, clusterCtx) g.Expect(err).NotTo(HaveOccurred()) g.Expect(resourcePolicy.Spec.ResourcePool.Name).To(Equal(clusterName)) g.Expect(resourcePolicy.Spec.Folder.Name).To(Equal(clusterName)) diff --git a/pkg/services/vmoperator/vmopmachine_test.go b/pkg/services/vmoperator/vmopmachine_test.go index 6dc8efe5fe..29fd13407e 100644 --- a/pkg/services/vmoperator/vmopmachine_test.go +++ b/pkg/services/vmoperator/vmopmachine_test.go @@ -108,9 +108,9 @@ var _ = Describe("VirtualMachine tests", func() { vsphereCluster = util.CreateVSphereCluster(clusterName) machine = util.CreateMachine(machineName, clusterName, k8sVersion, controlPlaneLabelTrue) vsphereMachine = util.CreateVSphereMachine(machineName, clusterName, className, imageName, storageClass, controlPlaneLabelTrue) - clusterContext := util.CreateClusterContext(cluster, vsphereCluster) + clusterContext, controllerCtx := util.CreateClusterContext(cluster, vsphereCluster) ctx = util.CreateMachineContext(clusterContext, machine, vsphereMachine) - ctx.ControllerContext = clusterContext.ControllerContext + ctx.ControllerContext = controllerCtx }) Context("Reconcile VirtualMachine", func() { diff --git a/pkg/session/session.go b/pkg/session/session.go index 6b223a5161..a6fbe110db 100644 --- a/pkg/session/session.go +++ b/pkg/session/session.go @@ -118,6 +118,7 @@ func GetOrCreate(ctx context.Context, params *Params) (*Session, error) { "server", params.server, "datacenter", params.datacenter, "username", params.userinfo.Username()) + ctx = ctrl.LoggerInto(ctx, logger) sessionMU.Lock() defer sessionMU.Unlock() diff --git a/pkg/util/testutil.go b/pkg/util/testutil.go index 5a3e11568f..71a4eff57e 100644 --- a/pkg/util/testutil.go +++ b/pkg/util/testutil.go @@ -149,7 +149,7 @@ func createScheme() *runtime.Scheme { return scheme } -func CreateClusterContext(cluster *clusterv1.Cluster, vsphereCluster *vmwarev1.VSphereCluster) *vmware.ClusterContext { +func CreateClusterContext(cluster *clusterv1.Cluster, vsphereCluster *vmwarev1.VSphereCluster) (*vmware.ClusterContext, *capvcontext.ControllerContext) { scheme := createScheme() controllerManagerContext := &capvcontext.ControllerManagerContext{ Context: context.Background(), @@ -169,11 +169,9 @@ func CreateClusterContext(cluster *clusterv1.Cluster, vsphereCluster *vmwarev1.V // Build the cluster context. return &vmware.ClusterContext{ - ControllerContext: controllerContext, - Logger: controllerContext.Logger.WithName("cluster-context-logger"), - Cluster: cluster, - VSphereCluster: vsphereCluster, - } + Cluster: cluster, + VSphereCluster: vsphereCluster, + }, controllerContext } func CreateMachineContext(clusterContext *vmware.ClusterContext, machine *clusterv1.Machine, @@ -181,7 +179,10 @@ func CreateMachineContext(clusterContext *vmware.ClusterContext, machine *cluste // Build the machine context. return &vmware.SupervisorMachineContext{ BaseMachineContext: &capvcontext.BaseMachineContext{ - Logger: clusterContext.Logger.WithName(vsphereMachine.Name), + // TODO(sbueringer): This SupervisorMachineContext is only used in tests and the Logger + // will be removed anyway once we refactor the machine controllers, so it's fine if the + // logger is not perfect. + Logger: klog.Background().WithName(vsphereMachine.Name), Machine: machine, Cluster: clusterContext.Cluster, }, diff --git a/test/helpers/vmware/unit_test_context.go b/test/helpers/vmware/unit_test_context.go index 6c187922ae..dbddba42b6 100644 --- a/test/helpers/vmware/unit_test_context.go +++ b/test/helpers/vmware/unit_test_context.go @@ -17,6 +17,8 @@ limitations under the License. package vmware import ( + "context" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" @@ -25,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/fake" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" ) @@ -40,6 +42,7 @@ type UnitTestContextForController struct { Key client.ObjectKey VirtualMachineImage *vmoprv1.VirtualMachineImage + ControllerContext *capvcontext.ControllerContext } // NewUnitTestContextForController returns a new UnitTestContextForController @@ -47,26 +50,28 @@ type UnitTestContextForController struct { // invoke the VSphereCluster spec controller. func NewUnitTestContextForController( /*newReconcilerFn NewReconcilerFunc, */ namespace string, vSphereCluster *vmwarev1.VSphereCluster, prototypeCluster bool, initObjects, gcInitObjects []client.Object) *UnitTestContextForController { + controllerCtx := fake.NewControllerContext(fake.NewControllerManagerContext(initObjects...)) + ctx := &UnitTestContextForController{ - GuestClusterContext: fake.NewGuestClusterContext(fake.NewVmwareClusterContext( - fake.NewControllerContext( - fake.NewControllerManagerContext(initObjects...)), namespace, vSphereCluster), prototypeCluster, gcInitObjects...), + GuestClusterContext: fake.NewGuestClusterContext(context.TODO(), fake.NewVmwareClusterContext(controllerCtx, namespace, vSphereCluster), + controllerCtx, prototypeCluster, gcInitObjects...), + ControllerContext: controllerCtx, } ctx.Key = client.ObjectKey{Namespace: ctx.VSphereCluster.Namespace, Name: ctx.VSphereCluster.Name} - CreatePrototypePrereqs(ctx, ctx.ControllerManagerContext) + CreatePrototypePrereqs(context.TODO(), controllerCtx.Client) return ctx } -func CreatePrototypePrereqs(_ *UnitTestContextForController, ctx *context.ControllerManagerContext) { +func CreatePrototypePrereqs(ctx context.Context, c client.Client) { By("Creating a prototype VirtualMachineClass", func() { virtualMachineClass := FakeVirtualMachineClass() virtualMachineClass.Name = "small" - Expect(ctx.Client.Create(ctx, virtualMachineClass)).To(Succeed()) + Expect(c.Create(ctx, virtualMachineClass)).To(Succeed()) virtualMachineClassKey := client.ObjectKey{Name: virtualMachineClass.Name} Eventually(func() error { - return ctx.Client.Get(ctx, virtualMachineClassKey, virtualMachineClass) + return c.Get(ctx, virtualMachineClassKey, virtualMachineClass) }).Should(Succeed()) }) }