diff --git a/controllers/clustermodule_reconciler.go b/controllers/clustermodule_reconciler.go index f623224fbf..c7bb217b94 100644 --- a/controllers/clustermodule_reconciler.go +++ b/controllers/clustermodule_reconciler.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "fmt" "strings" @@ -38,7 +38,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/clustermodule" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" ) // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments,verbs=get;list;watch @@ -47,29 +47,29 @@ import ( // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachinetemplates,verbs=get;list;watch type Reconciler struct { - *context.ControllerContext + *capvcontext.ControllerContext ClusterModuleService clustermodule.Service } -func NewReconciler(ctx *context.ControllerContext) Reconciler { +func NewReconciler(controllerCtx *capvcontext.ControllerContext) Reconciler { return Reconciler{ - ControllerContext: ctx, + ControllerContext: controllerCtx, ClusterModuleService: clustermodule.NewService(), } } -func (r Reconciler) Reconcile(ctx *context.ClusterContext) (reconcile.Result, error) { - ctx.Logger.Info("reconcile anti affinity setup") - if !clustermodule.IsClusterCompatible(ctx) { - conditions.MarkFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.VCenterVersionIncompatibleReason, clusterv1.ConditionSeverityInfo, - "vCenter API version %s is not compatible with cluster modules", ctx.VSphereCluster.Status.VCenterVersion) - ctx.Logger.Info("cluster is not compatible for anti affinity", - "api version", ctx.VSphereCluster.Status.VCenterVersion) +func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { + clusterCtx.Logger.Info("reconcile anti affinity setup") + if !clustermodule.IsClusterCompatible(clusterCtx) { + conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.VCenterVersionIncompatibleReason, clusterv1.ConditionSeverityInfo, + "vCenter API version %s is not compatible with cluster modules", clusterCtx.VSphereCluster.Status.VCenterVersion) + clusterCtx.Logger.Info("cluster is not compatible for anti affinity", + "api version", clusterCtx.VSphereCluster.Status.VCenterVersion) return reconcile.Result{}, nil } - objectMap, err := r.fetchMachineOwnerObjects(ctx) + objectMap, err := r.fetchMachineOwnerObjects(clusterCtx) if err != nil { return reconcile.Result{}, err } @@ -77,7 +77,7 @@ func (r Reconciler) Reconcile(ctx *context.ClusterContext) (reconcile.Result, er modErrs := []clusterModError{} clusterModuleSpecs := []infrav1.ClusterModule{} - for _, mod := range ctx.VSphereCluster.Spec.ClusterModules { + for _, mod := range clusterCtx.VSphereCluster.Spec.ClusterModules { curr := mod.TargetObjectName if mod.ControlPlane { curr = appendKCPKey(curr) @@ -85,18 +85,18 @@ func (r Reconciler) Reconcile(ctx *context.ClusterContext) (reconcile.Result, er if obj, ok := objectMap[curr]; !ok { // delete the cluster module as the object is marked for deletion // or already deleted. - if err := r.ClusterModuleService.Remove(ctx, mod.ModuleUUID); err != nil { - ctx.Logger.Error(err, "failed to delete cluster module for object", + if err := r.ClusterModuleService.Remove(clusterCtx, mod.ModuleUUID); err != nil { + clusterCtx.Logger.Error(err, "failed to delete cluster module for object", "name", mod.TargetObjectName, "moduleUUID", mod.ModuleUUID) } delete(objectMap, curr) } else { // verify the cluster module - exists, err := r.ClusterModuleService.DoesExist(ctx, obj, mod.ModuleUUID) + exists, err := r.ClusterModuleService.DoesExist(clusterCtx, obj, mod.ModuleUUID) if err != nil { // Add the error to modErrs so it gets handled below. modErrs = append(modErrs, clusterModError{obj.GetName(), errors.Wrapf(err, "failed to verify cluster module %q", mod.ModuleUUID)}) - ctx.Logger.Error(err, "failed to verify cluster module for object", + clusterCtx.Logger.Error(err, "failed to verify cluster module for object", "name", mod.TargetObjectName, "moduleUUID", mod.ModuleUUID) // Append the module and remove it from objectMap to not create new ones instead. clusterModuleSpecs = append(clusterModuleSpecs, infrav1.ClusterModule{ @@ -119,7 +119,7 @@ func (r Reconciler) Reconcile(ctx *context.ClusterContext) (reconcile.Result, er }) delete(objectMap, curr) } else { - ctx.Logger.Info("module for object not found", + clusterCtx.Logger.Info("module for object not found", "moduleUUID", mod.ModuleUUID, "object", mod.TargetObjectName) } @@ -127,9 +127,9 @@ func (r Reconciler) Reconcile(ctx *context.ClusterContext) (reconcile.Result, er } for _, obj := range objectMap { - moduleUUID, err := r.ClusterModuleService.Create(ctx, obj) + moduleUUID, err := r.ClusterModuleService.Create(clusterCtx, obj) if err != nil { - ctx.Logger.Error(err, "failed to create cluster module for target object", "name", obj.GetName()) + clusterCtx.Logger.Error(err, "failed to create cluster module for target object", "name", obj.GetName()) modErrs = append(modErrs, clusterModError{obj.GetName(), err}) continue } @@ -143,7 +143,7 @@ func (r Reconciler) Reconcile(ctx *context.ClusterContext) (reconcile.Result, er ModuleUUID: moduleUUID, }) } - ctx.VSphereCluster.Spec.ClusterModules = clusterModuleSpecs + clusterCtx.VSphereCluster.Spec.ClusterModules = clusterModuleSpecs switch { case len(modErrs) > 0: @@ -155,17 +155,17 @@ func (r Reconciler) Reconcile(ctx *context.ClusterContext) (reconcile.Result, er } else { err = errors.New(generateClusterModuleErrorMessage(modErrs)) } - conditions.MarkFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.ClusterModuleSetupFailedReason, + conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.ClusterModuleSetupFailedReason, clusterv1.ConditionSeverityWarning, generateClusterModuleErrorMessage(modErrs)) case len(modErrs) == 0 && len(clusterModuleSpecs) > 0: - conditions.MarkTrue(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition) + conditions.MarkTrue(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition) default: - conditions.Delete(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition) + conditions.Delete(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition) } return reconcile.Result{}, err } -func (r Reconciler) toAffinityInput(ctx goctx.Context, obj client.Object) []reconcile.Request { +func (r Reconciler) toAffinityInput(ctx context.Context, obj client.Object) []reconcile.Request { cluster, err := util.GetClusterFromMetadata(ctx, r.Client, metav1.ObjectMeta{ Namespace: obj.GetNamespace(), Labels: obj.GetLabels(), @@ -226,10 +226,10 @@ func (r Reconciler) PopulateWatchesOnController(mgr manager.Manager, controller ) } -func (r Reconciler) fetchMachineOwnerObjects(ctx *context.ClusterContext) (map[string]clustermodule.Wrapper, error) { +func (r Reconciler) fetchMachineOwnerObjects(clusterCtx *capvcontext.ClusterContext) (map[string]clustermodule.Wrapper, error) { objects := map[string]clustermodule.Wrapper{} - name, ok := ctx.VSphereCluster.GetLabels()[clusterv1.ClusterNameLabel] + name, ok := clusterCtx.VSphereCluster.GetLabels()[clusterv1.ClusterNameLabel] if !ok { return nil, errors.Errorf("missing CAPI cluster label") } @@ -237,8 +237,8 @@ func (r Reconciler) fetchMachineOwnerObjects(ctx *context.ClusterContext) (map[s labels := map[string]string{clusterv1.ClusterNameLabel: name} kcpList := &controlplanev1.KubeadmControlPlaneList{} if err := r.Client.List( - ctx, kcpList, - client.InNamespace(ctx.VSphereCluster.GetNamespace()), + clusterCtx, kcpList, + client.InNamespace(clusterCtx.VSphereCluster.GetNamespace()), client.MatchingLabels(labels)); err != nil { return nil, errors.Wrapf(err, "failed to list control plane objects") } @@ -254,8 +254,8 @@ func (r Reconciler) fetchMachineOwnerObjects(ctx *context.ClusterContext) (map[s mdList := &clusterv1.MachineDeploymentList{} if err := r.Client.List( - ctx, mdList, - client.InNamespace(ctx.VSphereCluster.GetNamespace()), + clusterCtx, mdList, + client.InNamespace(clusterCtx.VSphereCluster.GetNamespace()), client.MatchingLabels(labels)); err != nil { return nil, errors.Wrapf(err, "failed to list machine deployment objects") } diff --git a/controllers/clustermodule_reconciler_test.go b/controllers/clustermodule_reconciler_test.go index 0f1244e5ad..7cfc18a8f6 100644 --- a/controllers/clustermodule_reconciler_test.go +++ b/controllers/clustermodule_reconciler_test.go @@ -33,7 +33,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/clustermodule" cmodfake "sigs.k8s.io/cluster-api-provider-vsphere/pkg/clustermodule/fake" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/fake" ) @@ -49,7 +49,7 @@ func TestReconciler_Reconcile(t *testing.T) { clusterModules []infrav1.ClusterModule beforeFn func(object client.Object) setupMocks func(*cmodfake.CMService) - customAssert func(*gomega.WithT, *context.ClusterContext) + customAssert func(*gomega.WithT, *capvcontext.ClusterContext) }{ { name: "when cluster modules already exist", @@ -69,8 +69,8 @@ func TestReconciler_Reconcile(t *testing.T) { svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(true, nil) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) }, }, { @@ -80,12 +80,12 @@ func TestReconciler_Reconcile(t *testing.T) { svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID, nil) svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID, nil) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) var ( names, moduleUUIDs []string ) - for _, mod := range ctx.VSphereCluster.Spec.ClusterModules { + for _, mod := range clusterCtx.VSphereCluster.Spec.ClusterModules { names = append(names, mod.TargetObjectName) moduleUUIDs = append(moduleUUIDs, mod.ModuleUUID) } @@ -106,12 +106,12 @@ func TestReconciler_Reconcile(t *testing.T) { svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID, nil) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) var ( names, moduleUUIDs []string ) - for _, mod := range ctx.VSphereCluster.Spec.ClusterModules { + for _, mod := range clusterCtx.VSphereCluster.Spec.ClusterModules { names = append(names, mod.TargetObjectName) moduleUUIDs = append(moduleUUIDs, mod.ModuleUUID) } @@ -139,14 +139,14 @@ func TestReconciler_Reconcile(t *testing.T) { svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID+"a", nil) svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID+"a", nil) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) // Ensure the new modules exist. - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID+"a")) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID+"a")) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID+"a")) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID+"a")) // Check that condition got set. - g.Expect(conditions.Has(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsTrue(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.IsTrue(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) }, }, { @@ -168,15 +168,15 @@ func TestReconciler_Reconcile(t *testing.T) { svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(false, vCenter500err) svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) // Ensure the old modules still exist. - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) // Check that condition got set. - g.Expect(conditions.Has(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) + g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) }, }, { @@ -198,15 +198,15 @@ func TestReconciler_Reconcile(t *testing.T) { svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) // Ensure the old modules still exist. - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) // Check that condition got set. - g.Expect(conditions.Has(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) + g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) }, }, { @@ -229,15 +229,15 @@ func TestReconciler_Reconcile(t *testing.T) { svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID+"a", nil) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) // Ensure the errored and the new module exist. - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID)) // Check that condition got set. - g.Expect(conditions.Has(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) + g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) }, }, { @@ -247,15 +247,15 @@ func TestReconciler_Reconcile(t *testing.T) { svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return("", clustermodule.NewIncompatibleOwnerError("foo-123")) svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID, nil) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].TargetObjectName).To(gomega.Equal("md")) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(mdUUID)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeFalse()) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].TargetObjectName).To(gomega.Equal("md")) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(mdUUID)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeFalse()) - g.Expect(conditions.Has(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("kcp")) + g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("kcp")) }, }, { @@ -267,15 +267,15 @@ func TestReconciler_Reconcile(t *testing.T) { }, // if cluster module creation fails for any reason apart from incompatibility, error should be returned haveError: true, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].TargetObjectName).To(gomega.Equal("kcp")) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(kcpUUID)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeTrue()) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].TargetObjectName).To(gomega.Equal("kcp")) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(kcpUUID)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeTrue()) - g.Expect(conditions.Has(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("md")) + g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("md")) }, }, { @@ -287,11 +287,11 @@ func TestReconciler_Reconcile(t *testing.T) { }, // if cluster module creation fails due to resource pool owner incompatibility, vSphereCluster object is set to Ready haveError: false, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.BeEmpty()) - g.Expect(conditions.Has(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("kcp")) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.BeEmpty()) + g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("kcp")) }, }, { @@ -302,11 +302,11 @@ func TestReconciler_Reconcile(t *testing.T) { // mimics cluster module creation was skipped svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return("", nil) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].TargetObjectName).To(gomega.Equal("kcp")) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(kcpUUID)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeTrue()) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].TargetObjectName).To(gomega.Equal("kcp")) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(kcpUUID)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeTrue()) }, }, { @@ -326,11 +326,11 @@ func TestReconciler_Reconcile(t *testing.T) { setupMocks: func(svc *cmodfake.CMService) { svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].TargetObjectName).To(gomega.Equal("kcp")) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(kcpUUID)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeTrue()) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].TargetObjectName).To(gomega.Equal("kcp")) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(kcpUUID)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeTrue()) }, }, { @@ -356,11 +356,11 @@ func TestReconciler_Reconcile(t *testing.T) { svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) svc.On("Remove", mock.Anything, mdUUID).Return(nil) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].TargetObjectName).To(gomega.Equal("kcp")) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(kcpUUID)) - g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeTrue()) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(1)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].TargetObjectName).To(gomega.Equal("kcp")) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(kcpUUID)) + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeTrue()) }, }, { @@ -386,8 +386,8 @@ func TestReconciler_Reconcile(t *testing.T) { svc.On("Remove", mock.Anything, kcpUUID).Return(nil) svc.On("Remove", mock.Anything, mdUUID).Return(nil) }, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.BeEmpty()) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.BeEmpty()) }, }, { @@ -398,8 +398,8 @@ func TestReconciler_Reconcile(t *testing.T) { kcp.ObjectMeta.Finalizers = append(kcp.ObjectMeta.Finalizers, "keep-this-for-the-test") }, clusterModules: []infrav1.ClusterModule{}, - customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { - g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.BeEmpty()) + customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { + g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.BeEmpty()) }, }, } diff --git a/controllers/serviceaccount_controller.go b/controllers/serviceaccount_controller.go index 5284058b69..f5afd6ff18 100644 --- a/controllers/serviceaccount_controller.go +++ b/controllers/serviceaccount_controller.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "fmt" "os" "reflect" @@ -46,7 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" vmwarecontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" @@ -66,27 +66,27 @@ const ( ) // AddServiceAccountProviderControllerToManager adds this controller to the provided manager. -func AddServiceAccountProviderControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager, tracker *remote.ClusterCacheTracker, options controller.Options) error { +func AddServiceAccountProviderControllerToManager(controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, tracker *remote.ClusterCacheTracker, options controller.Options) error { var ( controlledType = &vmwarev1.ProviderServiceAccount{} controlledTypeName = reflect.TypeOf(controlledType).Elem().Name() controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(controlledTypeName)) - controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, controllerNameShort) ) - controllerContext := &context.ControllerContext{ - ControllerManagerContext: ctx, + controllerContext := &capvcontext.ControllerContext{ + ControllerManagerContext: controllerCtx, Name: controllerNameShort, Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: ctx.Logger.WithName(controllerNameShort), + Logger: controllerCtx.Logger.WithName(controllerNameShort), } r := ServiceAccountReconciler{ ControllerContext: controllerContext, remoteClusterCacheTracker: tracker, } - clusterToInfraFn := clusterToSupervisorInfrastructureMapFunc(ctx) + clusterToInfraFn := clusterToSupervisorInfrastructureMapFunc(controllerCtx) return ctrl.NewControllerManagedBy(mgr).For(controlledType). WithOptions(options). @@ -103,7 +103,7 @@ func AddServiceAccountProviderControllerToManager(ctx *context.ControllerManager // Watches clusters and reconciles the vSphereCluster Watches( &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(func(ctx goctx.Context, o client.Object) []reconcile.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { requests := clusterToInfraFn(ctx, o) if requests == nil { return nil @@ -122,22 +122,22 @@ func AddServiceAccountProviderControllerToManager(ctx *context.ControllerManager return requests }), ). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ctx.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)). Complete(r) } -func clusterToSupervisorInfrastructureMapFunc(managerContext *context.ControllerManagerContext) handler.MapFunc { +func clusterToSupervisorInfrastructureMapFunc(controllerCtx *capvcontext.ControllerManagerContext) handler.MapFunc { gvk := vmwarev1.GroupVersion.WithKind(reflect.TypeOf(&vmwarev1.VSphereCluster{}).Elem().Name()) - return clusterutilv1.ClusterToInfrastructureMapFunc(managerContext, gvk, managerContext.Client, &vmwarev1.VSphereCluster{}) + return clusterutilv1.ClusterToInfrastructureMapFunc(controllerCtx, gvk, controllerCtx.Client, &vmwarev1.VSphereCluster{}) } type ServiceAccountReconciler struct { - *context.ControllerContext + *capvcontext.ControllerContext remoteClusterCacheTracker *remote.ClusterCacheTracker } -func (r ServiceAccountReconciler) Reconcile(_ goctx.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { +func (r ServiceAccountReconciler) Reconcile(_ context.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { r.ControllerContext.Logger.V(4).Info("Starting Reconcile") // Get the vSphereCluster for this request. @@ -219,18 +219,18 @@ func (r ServiceAccountReconciler) Reconcile(_ goctx.Context, req reconcile.Reque }) } -func (r ServiceAccountReconciler) ReconcileDelete(ctx *vmwarecontext.ClusterContext) (reconcile.Result, error) { - ctx.Logger.V(4).Info("Reconciling deleting Provider ServiceAccounts", "cluster", ctx.VSphereCluster.Name) +func (r ServiceAccountReconciler) ReconcileDelete(clusterCtx *vmwarecontext.ClusterContext) (reconcile.Result, error) { + clusterCtx.Logger.V(4).Info("Reconciling deleting Provider ServiceAccounts", "cluster", clusterCtx.VSphereCluster.Name) - pSvcAccounts, err := getProviderServiceAccounts(ctx) + pSvcAccounts, err := getProviderServiceAccounts(clusterCtx) if err != nil { - ctx.Logger.Error(err, "Error fetching provider serviceaccounts") + clusterCtx.Logger.Error(err, "Error fetching provider serviceaccounts") return reconcile.Result{}, err } for _, pSvcAccount := range pSvcAccounts { // Delete entries for configmap with serviceaccount - if err := r.deleteServiceAccountConfigMap(ctx, pSvcAccount); err != nil { + if err := r.deleteServiceAccountConfigMap(clusterCtx, pSvcAccount); err != nil { return reconcile.Result{}, errors.Wrapf(err, "unable to delete configmap entry for provider serviceaccount %s", pSvcAccount.Name) } } @@ -238,25 +238,25 @@ func (r ServiceAccountReconciler) ReconcileDelete(ctx *vmwarecontext.ClusterCont return reconcile.Result{}, nil } -func (r ServiceAccountReconciler) ReconcileNormal(ctx *vmwarecontext.GuestClusterContext) (_ reconcile.Result, reterr error) { - ctx.Logger.V(4).Info("Reconciling Provider ServiceAccount", "cluster", ctx.VSphereCluster.Name) +func (r ServiceAccountReconciler) ReconcileNormal(guestClusterCtx *vmwarecontext.GuestClusterContext) (_ reconcile.Result, reterr error) { + guestClusterCtx.Logger.V(4).Info("Reconciling Provider ServiceAccount", "cluster", guestClusterCtx.VSphereCluster.Name) defer func() { if reterr != nil { - conditions.MarkFalse(ctx.VSphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition, vmwarev1.ProviderServiceAccountsReconciliationFailedReason, + conditions.MarkFalse(guestClusterCtx.VSphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition, vmwarev1.ProviderServiceAccountsReconciliationFailedReason, clusterv1.ConditionSeverityWarning, reterr.Error()) } else { - conditions.MarkTrue(ctx.VSphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition) + conditions.MarkTrue(guestClusterCtx.VSphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition) } }() - pSvcAccounts, err := getProviderServiceAccounts(ctx.ClusterContext) + pSvcAccounts, err := getProviderServiceAccounts(guestClusterCtx.ClusterContext) if err != nil { - ctx.Logger.Error(err, "Error fetching provider serviceaccounts") + guestClusterCtx.Logger.Error(err, "Error fetching provider serviceaccounts") return reconcile.Result{}, err } - err = r.ensureProviderServiceAccounts(ctx, pSvcAccounts) + err = r.ensureProviderServiceAccounts(guestClusterCtx, pSvcAccounts) if err != nil { - ctx.Logger.Error(err, "Error ensuring provider serviceaccounts") + guestClusterCtx.Logger.Error(err, "Error ensuring provider serviceaccounts") return reconcile.Result{}, err } @@ -264,59 +264,59 @@ func (r ServiceAccountReconciler) ReconcileNormal(ctx *vmwarecontext.GuestCluste } // Ensure service accounts from provider spec is created. -func (r ServiceAccountReconciler) ensureProviderServiceAccounts(ctx *vmwarecontext.GuestClusterContext, pSvcAccounts []vmwarev1.ProviderServiceAccount) error { +func (r ServiceAccountReconciler) ensureProviderServiceAccounts(guestClusterCtx *vmwarecontext.GuestClusterContext, pSvcAccounts []vmwarev1.ProviderServiceAccount) error { for i, pSvcAccount := range pSvcAccounts { - if ctx.Cluster != nil && annotations.IsPaused(ctx.Cluster, &(pSvcAccounts[i])) { + if guestClusterCtx.Cluster != nil && annotations.IsPaused(guestClusterCtx.Cluster, &(pSvcAccounts[i])) { r.Logger.V(4).Info("ProviderServiceAccount %s/%s linked to a cluster that is paused or has pause annotation", pSvcAccount.Namespace, pSvcAccount.Name) continue } // 1. Create service accounts by the name specified in Provider Spec - if err := r.ensureServiceAccount(ctx.ClusterContext, pSvcAccount); err != nil { + if err := r.ensureServiceAccount(guestClusterCtx.ClusterContext, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to create provider serviceaccount %s", pSvcAccount.Name) } // 2. Update configmap with serviceaccount - if err := r.ensureServiceAccountConfigMap(ctx.ClusterContext, pSvcAccount); err != nil { + if err := r.ensureServiceAccountConfigMap(guestClusterCtx.ClusterContext, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to sync configmap for provider serviceaccount %s", pSvcAccount.Name) } // 3. Create secret of Service account token type for the service account - if err := r.ensureServiceAccountSecret(ctx.ClusterContext, pSvcAccount); err != nil { + if err := r.ensureServiceAccountSecret(guestClusterCtx.ClusterContext, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to create provider serviceaccount secret %s", getServiceAccountSecretName(pSvcAccount)) } // 4. Create the associated role for the service account - if err := r.ensureRole(ctx.ClusterContext, pSvcAccount); err != nil { + if err := r.ensureRole(guestClusterCtx.ClusterContext, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to create role for provider serviceaccount %s", pSvcAccount.Name) } // 5. Create the associated roleBinding for the service account - if err := r.ensureRoleBinding(ctx.ClusterContext, pSvcAccount); err != nil { + if err := r.ensureRoleBinding(guestClusterCtx.ClusterContext, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to create rolebinding for provider serviceaccount %s", pSvcAccount.Name) } // 6. Sync the service account with the target - if err := r.syncServiceAccountSecret(ctx, pSvcAccount); err != nil { + if err := r.syncServiceAccountSecret(guestClusterCtx, pSvcAccount); err != nil { return errors.Wrapf(err, "unable to sync secret for provider serviceaccount %s", pSvcAccount.Name) } } return nil } -func (r ServiceAccountReconciler) ensureServiceAccount(ctx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { +func (r ServiceAccountReconciler) ensureServiceAccount(clusterCtx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { svcAccount := corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: getServiceAccountName(pSvcAccount), Namespace: pSvcAccount.Namespace, }, } - logger := ctx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "serviceaccount", svcAccount.Name) - err := util.SetControllerReferenceWithOverride(&pSvcAccount, &svcAccount, ctx.Scheme) + logger := clusterCtx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "serviceaccount", svcAccount.Name) + err := util.SetControllerReferenceWithOverride(&pSvcAccount, &svcAccount, clusterCtx.Scheme) if err != nil { return err } logger.V(4).Info("Creating service account") - err = ctx.Client.Create(ctx, &svcAccount) + err = clusterCtx.Client.Create(clusterCtx, &svcAccount) if err != nil && !apierrors.IsAlreadyExists(err) { // Note: We skip updating the service account because the token controller updates the service account with a // secret and we don't want to overwrite it with an empty secret. @@ -325,7 +325,7 @@ func (r ServiceAccountReconciler) ensureServiceAccount(ctx *vmwarecontext.Cluste return nil } -func (r ServiceAccountReconciler) ensureServiceAccountSecret(ctx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { +func (r ServiceAccountReconciler) ensureServiceAccountSecret(clusterCtx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { secret := corev1.Secret{ Type: corev1.SecretTypeServiceAccountToken, ObjectMeta: metav1.ObjectMeta{ @@ -338,13 +338,13 @@ func (r ServiceAccountReconciler) ensureServiceAccountSecret(ctx *vmwarecontext. }, } - logger := ctx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "secret", secret.Name) - err := util.SetControllerReferenceWithOverride(&pSvcAccount, &secret, ctx.Scheme) + logger := clusterCtx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "secret", secret.Name) + err := util.SetControllerReferenceWithOverride(&pSvcAccount, &secret, clusterCtx.Scheme) if err != nil { return err } logger.V(4).Info("Creating service account secret") - err = ctx.Client.Create(ctx, &secret) + err = clusterCtx.Client.Create(clusterCtx, &secret) if err != nil && !apierrors.IsAlreadyExists(err) { // Note: We skip updating the service account because the token controller updates the service account with a // secret and we don't want to overwrite it with an empty secret. @@ -353,17 +353,17 @@ func (r ServiceAccountReconciler) ensureServiceAccountSecret(ctx *vmwarecontext. return nil } -func (r ServiceAccountReconciler) ensureRole(ctx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { +func (r ServiceAccountReconciler) ensureRole(clusterCtx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { role := rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: getRoleName(pSvcAccount), Namespace: pSvcAccount.Namespace, }, } - logger := ctx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "role", role.Name) + logger := clusterCtx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "role", role.Name) logger.V(4).Info("Creating or updating role") - _, err := controllerutil.CreateOrPatch(ctx, ctx.Client, &role, func() error { - if err := util.SetControllerReferenceWithOverride(&pSvcAccount, &role, ctx.Scheme); err != nil { + _, err := controllerutil.CreateOrPatch(clusterCtx, clusterCtx.Client, &role, func() error { + if err := util.SetControllerReferenceWithOverride(&pSvcAccount, &role, clusterCtx.Scheme); err != nil { return err } role.Rules = pSvcAccount.Spec.Rules @@ -372,7 +372,7 @@ func (r ServiceAccountReconciler) ensureRole(ctx *vmwarecontext.ClusterContext, return err } -func (r ServiceAccountReconciler) ensureRoleBinding(ctx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { +func (r ServiceAccountReconciler) ensureRoleBinding(clusterCtx *vmwarecontext.ClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { roleName := getRoleName(pSvcAccount) svcAccountName := getServiceAccountName(pSvcAccount) roleBinding := rbacv1.RoleBinding{ @@ -381,10 +381,10 @@ func (r ServiceAccountReconciler) ensureRoleBinding(ctx *vmwarecontext.ClusterCo Namespace: pSvcAccount.Namespace, }, } - logger := ctx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "rolebinding", roleBinding.Name) + logger := clusterCtx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name, "rolebinding", roleBinding.Name) logger.V(4).Info("Creating or updating rolebinding") - err := ctx.Client.Get(ctx, types.NamespacedName{Name: getRoleBindingName(pSvcAccount), Namespace: pSvcAccount.Namespace}, &roleBinding) + err := clusterCtx.Client.Get(clusterCtx, types.NamespacedName{Name: getRoleBindingName(pSvcAccount), Namespace: pSvcAccount.Namespace}, &roleBinding) if err != nil && !apierrors.IsNotFound(err) { return err } @@ -392,14 +392,14 @@ func (r ServiceAccountReconciler) ensureRoleBinding(ctx *vmwarecontext.ClusterCo if err == nil { // If the roleRef needs changing, we have to delete the rolebinding and recreate it. if roleBinding.RoleRef.Name != roleName || roleBinding.RoleRef.Kind != "Role" || roleBinding.RoleRef.APIGroup != rbacv1.GroupName { - if err := ctx.Client.Delete(ctx, &roleBinding); err != nil { + if err := clusterCtx.Client.Delete(clusterCtx, &roleBinding); err != nil { return err } } } - _, err = controllerutil.CreateOrPatch(ctx, ctx.Client, &roleBinding, func() error { - if err := util.SetControllerReferenceWithOverride(&pSvcAccount, &roleBinding, ctx.Scheme); err != nil { + _, err = controllerutil.CreateOrPatch(clusterCtx, clusterCtx.Client, &roleBinding, func() error { + if err := util.SetControllerReferenceWithOverride(&pSvcAccount, &roleBinding, clusterCtx.Scheme); err != nil { return err } roleBinding.RoleRef = rbacv1.RoleRef{ @@ -420,14 +420,14 @@ func (r ServiceAccountReconciler) ensureRoleBinding(ctx *vmwarecontext.ClusterCo return err } -func (r ServiceAccountReconciler) syncServiceAccountSecret(ctx *vmwarecontext.GuestClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { - logger := ctx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name) +func (r ServiceAccountReconciler) syncServiceAccountSecret(guestClusterCtx *vmwarecontext.GuestClusterContext, pSvcAccount vmwarev1.ProviderServiceAccount) error { + logger := guestClusterCtx.Logger.WithValues("providerserviceaccount", pSvcAccount.Name) logger.V(4).Info("Attempting to sync token secret for provider service account") secretName := getServiceAccountSecretName(pSvcAccount) logger.V(4).Info("Fetching secret for service account token details", "secret", secretName) var svcAccountTokenSecret corev1.Secret - err := ctx.Client.Get(ctx, types.NamespacedName{Name: secretName, Namespace: pSvcAccount.Namespace}, &svcAccountTokenSecret) + err := guestClusterCtx.Client.Get(guestClusterCtx, types.NamespacedName{Name: secretName, Namespace: pSvcAccount.Namespace}, &svcAccountTokenSecret) if err != nil { return err } @@ -446,9 +446,9 @@ func (r ServiceAccountReconciler) syncServiceAccountSecret(ctx *vmwarecontext.Gu }, } - if err = ctx.GuestClient.Get(ctx, client.ObjectKey{Name: pSvcAccount.Spec.TargetNamespace}, targetNamespace); err != nil { + if err = guestClusterCtx.GuestClient.Get(guestClusterCtx, client.ObjectKey{Name: pSvcAccount.Spec.TargetNamespace}, targetNamespace); err != nil { if apierrors.IsNotFound(err) { - err = ctx.GuestClient.Create(ctx, targetNamespace) + err = guestClusterCtx.GuestClient.Create(guestClusterCtx, targetNamespace) if err != nil { return err } @@ -464,17 +464,17 @@ func (r ServiceAccountReconciler) syncServiceAccountSecret(ctx *vmwarecontext.Gu }, } logger.V(4).Info("Creating or updating secret in cluster", "namespace", targetSecret.Namespace, "name", targetSecret.Name) - _, err = controllerutil.CreateOrPatch(ctx, ctx.GuestClient, targetSecret, func() error { + _, err = controllerutil.CreateOrPatch(guestClusterCtx, guestClusterCtx.GuestClient, targetSecret, func() error { targetSecret.Data = svcAccountTokenSecret.Data return nil }) return err } -func (r ServiceAccountReconciler) getConfigMapAndBuffer(ctx *vmwarecontext.ClusterContext) (*corev1.ConfigMap, *corev1.ConfigMap, error) { +func (r ServiceAccountReconciler) getConfigMapAndBuffer(clusterCtx *vmwarecontext.ClusterContext) (*corev1.ConfigMap, *corev1.ConfigMap, error) { configMap := &corev1.ConfigMap{} - if err := ctx.Client.Get(ctx, GetCMNamespaceName(), configMap); err != nil { + if err := clusterCtx.Client.Get(clusterCtx, GetCMNamespaceName(), configMap); err != nil { return nil, nil, err } @@ -484,11 +484,11 @@ func (r ServiceAccountReconciler) getConfigMapAndBuffer(ctx *vmwarecontext.Clust return configMapBuffer, configMap, nil } -func (r ServiceAccountReconciler) deleteServiceAccountConfigMap(ctx *vmwarecontext.ClusterContext, svcAccount vmwarev1.ProviderServiceAccount) error { - logger := ctx.Logger.WithValues("providerserviceaccount", svcAccount.Name) +func (r ServiceAccountReconciler) deleteServiceAccountConfigMap(clusterCtx *vmwarecontext.ClusterContext, svcAccount vmwarev1.ProviderServiceAccount) error { + logger := clusterCtx.Logger.WithValues("providerserviceaccount", svcAccount.Name) svcAccountName := getSystemServiceAccountFullName(svcAccount) - configMapBuffer, configMap, err := r.getConfigMapAndBuffer(ctx) + configMapBuffer, configMap, err := r.getConfigMapAndBuffer(clusterCtx) if err != nil { return err } @@ -497,7 +497,7 @@ func (r ServiceAccountReconciler) deleteServiceAccountConfigMap(ctx *vmwareconte return nil } logger.Info("Deleting config map entry for provider service account") - _, err = controllerutil.CreateOrPatch(ctx, ctx.Client, configMapBuffer, func() error { + _, err = controllerutil.CreateOrPatch(clusterCtx, clusterCtx.Client, configMapBuffer, func() error { configMapBuffer.Data = configMap.Data delete(configMapBuffer.Data, svcAccountName) return nil @@ -505,11 +505,11 @@ func (r ServiceAccountReconciler) deleteServiceAccountConfigMap(ctx *vmwareconte return err } -func (r ServiceAccountReconciler) ensureServiceAccountConfigMap(ctx *vmwarecontext.ClusterContext, svcAccount vmwarev1.ProviderServiceAccount) error { - logger := ctx.Logger.WithValues("providerserviceaccount", svcAccount.Name) +func (r ServiceAccountReconciler) ensureServiceAccountConfigMap(clusterCtx *vmwarecontext.ClusterContext, svcAccount vmwarev1.ProviderServiceAccount) error { + logger := clusterCtx.Logger.WithValues("providerserviceaccount", svcAccount.Name) svcAccountName := getSystemServiceAccountFullName(svcAccount) - configMapBuffer, configMap, err := r.getConfigMapAndBuffer(ctx) + configMapBuffer, configMap, err := r.getConfigMapAndBuffer(clusterCtx) if err != nil { return err } @@ -518,7 +518,7 @@ func (r ServiceAccountReconciler) ensureServiceAccountConfigMap(ctx *vmwareconte return nil } logger.Info("Updating config map for provider service account") - _, err = controllerutil.CreateOrPatch(ctx, ctx.Client, configMapBuffer, func() error { + _, err = controllerutil.CreateOrPatch(clusterCtx, clusterCtx.Client, configMapBuffer, func() error { configMapBuffer.Data = configMap.Data configMapBuffer.Data[svcAccountName] = "true" return nil @@ -526,11 +526,11 @@ func (r ServiceAccountReconciler) ensureServiceAccountConfigMap(ctx *vmwareconte return err } -func getProviderServiceAccounts(ctx *vmwarecontext.ClusterContext) ([]vmwarev1.ProviderServiceAccount, error) { +func getProviderServiceAccounts(clusterCtx *vmwarecontext.ClusterContext) ([]vmwarev1.ProviderServiceAccount, error) { var pSvcAccounts []vmwarev1.ProviderServiceAccount pSvcAccountList := vmwarev1.ProviderServiceAccountList{} - if err := ctx.Client.List(ctx, &pSvcAccountList, client.InNamespace(ctx.VSphereCluster.Namespace)); err != nil { + if err := clusterCtx.Client.List(clusterCtx, &pSvcAccountList, client.InNamespace(clusterCtx.VSphereCluster.Namespace)); err != nil { return nil, err } @@ -542,7 +542,7 @@ func getProviderServiceAccounts(ctx *vmwarecontext.ClusterContext) ([]vmwarev1.P continue } ref := pSvcAccount.Spec.Ref - if ref != nil && ref.Name == ctx.VSphereCluster.Name { + if ref != nil && ref.Name == clusterCtx.VSphereCluster.Name { pSvcAccounts = append(pSvcAccounts, pSvcAccount) } } @@ -584,7 +584,7 @@ func GetCMNamespaceName() types.NamespacedName { // secretToVSphereCluster is a mapper function used to enqueue reconcile.Request objects. // It accepts a Secret object owned by the controller and fetches the service account // that contains the token and creates a reconcile.Request for the vmwarev1.VSphereCluster object. -func (r ServiceAccountReconciler) secretToVSphereCluster(ctx goctx.Context, o client.Object) []reconcile.Request { +func (r ServiceAccountReconciler) secretToVSphereCluster(ctx context.Context, o client.Object) []reconcile.Request { secret, ok := o.(*corev1.Secret) if !ok { return nil @@ -627,7 +627,7 @@ func (r ServiceAccountReconciler) serviceAccountToVSphereCluster(o client.Object } // providerServiceAccountToVSphereCluster is a mapper function used to enqueue reconcile.Request objects. -func (r ServiceAccountReconciler) providerServiceAccountToVSphereCluster(_ goctx.Context, o client.Object) []reconcile.Request { +func (r ServiceAccountReconciler) providerServiceAccountToVSphereCluster(_ context.Context, o client.Object) []reconcile.Request { providerServiceAccount, ok := o.(*vmwarev1.ProviderServiceAccount) if !ok { return nil diff --git a/controllers/serviceaccount_controller_suite_test.go b/controllers/serviceaccount_controller_suite_test.go index 994782bff6..358745cbdf 100644 --- a/controllers/serviceaccount_controller_suite_test.go +++ b/controllers/serviceaccount_controller_suite_test.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "fmt" "strings" "time" @@ -46,27 +46,27 @@ const ( var truePointer = true -func createTestResource(ctx goctx.Context, ctrlClient client.Client, obj client.Object) { +func createTestResource(ctx context.Context, ctrlClient client.Client, obj client.Object) { Expect(ctrlClient.Create(ctx, obj)).To(Succeed()) } -func deleteTestResource(ctx goctx.Context, ctrlClient client.Client, obj client.Object) { +func deleteTestResource(ctx context.Context, ctrlClient client.Client, obj client.Object) { Expect(ctrlClient.Delete(ctx, obj)).To(Succeed()) } -func createTargetSecretWithInvalidToken(ctx goctx.Context, guestClient client.Client, namespace string) { +func createTargetSecretWithInvalidToken(ctx context.Context, guestClient client.Client, namespace string) { secret := getTestTargetSecretWithInvalidToken(namespace) Expect(guestClient.Create(ctx, secret)).To(Succeed()) } -func assertEventuallyExistsInNamespace(ctx goctx.Context, c client.Client, namespace, name string, obj client.Object) { +func assertEventuallyExistsInNamespace(ctx context.Context, c client.Client, namespace, name string, obj client.Object) { EventuallyWithOffset(2, func() error { key := client.ObjectKey{Namespace: namespace, Name: name} return c.Get(ctx, key, obj) }).Should(Succeed()) } -func assertNoEntities(ctx goctx.Context, ctrlClient client.Client, namespace string) { +func assertNoEntities(ctx context.Context, ctrlClient client.Client, namespace string) { Consistently(func() int { var serviceAccountList corev1.ServiceAccountList err := ctrlClient.List(ctx, &serviceAccountList, client.InNamespace(namespace)) @@ -89,7 +89,7 @@ func assertNoEntities(ctx goctx.Context, ctrlClient client.Client, namespace str }, time.Second*3).Should(Equal(0)) } -func assertServiceAccountAndUpdateSecret(ctx goctx.Context, ctrlClient client.Client, namespace, name string) { +func assertServiceAccountAndUpdateSecret(ctx context.Context, ctrlClient client.Client, namespace, name string) { svcAccount := &corev1.ServiceAccount{} assertEventuallyExistsInNamespace(ctx, ctrlClient, namespace, name, svcAccount) secret := &corev1.Secret{} @@ -102,7 +102,7 @@ func assertServiceAccountAndUpdateSecret(ctx goctx.Context, ctrlClient client.Cl Expect(ctrlClient.Update(ctx, secret)).To(Succeed()) } -func assertTargetSecret(ctx goctx.Context, guestClient client.Client, namespace, name string) { +func assertTargetSecret(ctx context.Context, guestClient client.Client, namespace, name string) { secret := &corev1.Secret{} assertEventuallyExistsInNamespace(ctx, guestClient, namespace, name, secret) EventuallyWithOffset(2, func() []byte { @@ -145,7 +145,7 @@ func assertRoleBinding(_ *helpers.UnitTestContextForController, ctrlClient clien opts := &client.ListOptions{ Namespace: namespace, } - err := ctrlClient.List(goctx.TODO(), &roleBindingList, opts) + err := ctrlClient.List(context.TODO(), &roleBindingList, opts) Expect(err).ShouldNot(HaveOccurred()) Expect(roleBindingList.Items).To(HaveLen(1)) Expect(roleBindingList.Items[0].Name).To(Equal(name)) diff --git a/controllers/serviceaccount_controller_unit_test.go b/controllers/serviceaccount_controller_unit_test.go index bbea913f71..85793e933e 100644 --- a/controllers/serviceaccount_controller_unit_test.go +++ b/controllers/serviceaccount_controller_unit_test.go @@ -35,7 +35,7 @@ var _ = Describe("ServiceAccountReconciler ReconcileNormal", unitTestsReconcileN func unitTestsReconcileNormal() { var ( - ctx *helpers.UnitTestContextForController + controllerCtx *helpers.UnitTestContextForController vsphereCluster *vmwarev1.VSphereCluster initObjects []client.Object namespace string @@ -46,25 +46,25 @@ func unitTestsReconcileNormal() { // Note: The service account provider requires a reference to the vSphereCluster hence the need to create // a fake vSphereCluster in the test and pass it to during context setup. reconciler = ServiceAccountReconciler{} - ctx = helpers.NewUnitTestContextForController(namespace, vsphereCluster, false, initObjects, nil) - _, err := reconciler.ReconcileNormal(ctx.GuestClusterContext) + controllerCtx = helpers.NewUnitTestContextForController(namespace, vsphereCluster, false, initObjects, nil) + _, err := reconciler.ReconcileNormal(controllerCtx.GuestClusterContext) Expect(err).NotTo(HaveOccurred()) // Update the VSphereCluster and its status in the fake client. - Expect(ctx.Client.Update(ctx, ctx.VSphereCluster)).To(Succeed()) - Expect(ctx.Client.Status().Update(ctx, ctx.VSphereCluster)).To(Succeed()) + Expect(controllerCtx.Client.Update(controllerCtx, controllerCtx.VSphereCluster)).To(Succeed()) + Expect(controllerCtx.Client.Status().Update(controllerCtx, controllerCtx.VSphereCluster)).To(Succeed()) }) AfterEach(func() { - ctx = nil + controllerCtx = nil }) Context("When no provider service account is available", func() { namespace = capiutil.RandomString(6) It("Should reconcile", func() { By("Not creating any entities") - assertNoEntities(ctx, ctx.Client, namespace) - assertProviderServiceAccountsCondition(ctx.VSphereCluster, corev1.ConditionTrue, "", "", "") + assertNoEntities(controllerCtx, controllerCtx.Client, namespace) + assertProviderServiceAccountsCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) @@ -81,33 +81,33 @@ func unitTestsReconcileNormal() { } }) It("should create a service account and a secret", func() { - _, err := reconciler.ReconcileNormal(ctx.GuestClusterContext) + _, err := reconciler.ReconcileNormal(controllerCtx.GuestClusterContext) Expect(err).NotTo(HaveOccurred()) svcAccount := &corev1.ServiceAccount{} - assertEventuallyExistsInNamespace(ctx, ctx.Client, namespace, vsphereCluster.GetName(), svcAccount) + assertEventuallyExistsInNamespace(controllerCtx, controllerCtx.Client, namespace, vsphereCluster.GetName(), svcAccount) secret := &corev1.Secret{} - assertEventuallyExistsInNamespace(ctx, ctx.Client, namespace, fmt.Sprintf("%s-secret", vsphereCluster.GetName()), secret) + assertEventuallyExistsInNamespace(controllerCtx, controllerCtx.Client, namespace, fmt.Sprintf("%s-secret", vsphereCluster.GetName()), secret) }) Context("When serviceaccount secret is created", func() { It("Should reconcile", func() { - assertTargetNamespace(ctx, ctx.GuestClient, testTargetNS, false) - updateServiceAccountSecretAndReconcileNormal(ctx, reconciler, vsphereCluster) - assertTargetNamespace(ctx, ctx.GuestClient, testTargetNS, true) + assertTargetNamespace(controllerCtx, controllerCtx.GuestClient, testTargetNS, false) + updateServiceAccountSecretAndReconcileNormal(controllerCtx, reconciler, vsphereCluster) + assertTargetNamespace(controllerCtx, controllerCtx.GuestClient, testTargetNS, true) By("Creating the target secret in the target namespace") - assertTargetSecret(ctx, ctx.GuestClient, testTargetNS, testTargetSecret) - assertProviderServiceAccountsCondition(ctx.VSphereCluster, corev1.ConditionTrue, "", "", "") + assertTargetSecret(controllerCtx, controllerCtx.GuestClient, testTargetNS, testTargetSecret) + assertProviderServiceAccountsCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) Context("When serviceaccount secret is modified", func() { It("Should reconcile", func() { // This is to simulate an outdated token that will be replaced when the serviceaccount secret is created. - createTargetSecretWithInvalidToken(ctx, ctx.GuestClient, testTargetNS) - updateServiceAccountSecretAndReconcileNormal(ctx, reconciler, vsphereCluster) + createTargetSecretWithInvalidToken(controllerCtx, controllerCtx.GuestClient, testTargetNS) + updateServiceAccountSecretAndReconcileNormal(controllerCtx, reconciler, vsphereCluster) By("Updating the target secret in the target namespace") - assertTargetSecret(ctx, ctx.GuestClient, testTargetNS, testTargetSecret) - assertProviderServiceAccountsCondition(ctx.VSphereCluster, corev1.ConditionTrue, "", "", "") + assertTargetSecret(controllerCtx, controllerCtx.GuestClient, testTargetNS, testTargetSecret) + assertProviderServiceAccountsCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) Context("When invalid role exists", func() { @@ -115,8 +115,8 @@ func unitTestsReconcileNormal() { initObjects = append(initObjects, getTestRoleWithGetPod(namespace, vsphereCluster.GetName())) }) It("Should update role", func() { - assertRoleWithGetPVC(ctx, ctx.Client, namespace, vsphereCluster.GetName()) - assertProviderServiceAccountsCondition(ctx.VSphereCluster, corev1.ConditionTrue, "", "", "") + assertRoleWithGetPVC(controllerCtx, controllerCtx.Client, namespace, vsphereCluster.GetName()) + assertProviderServiceAccountsCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) Context("When invalid rolebinding exists", func() { @@ -124,8 +124,8 @@ func unitTestsReconcileNormal() { initObjects = append(initObjects, getTestRoleBindingWithInvalidRoleRef(namespace, vsphereCluster.GetName())) }) It("Should update rolebinding", func() { - assertRoleBinding(ctx, ctx.Client, namespace, vsphereCluster.GetName()) - assertProviderServiceAccountsCondition(ctx.VSphereCluster, corev1.ConditionTrue, "", "", "") + assertRoleBinding(controllerCtx, controllerCtx.Client, namespace, vsphereCluster.GetName()) + assertProviderServiceAccountsCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) }) @@ -133,8 +133,8 @@ func unitTestsReconcileNormal() { // Updates the service account secret similar to how a token controller would act upon a service account // and then re-invokes reconcileNormal. -func updateServiceAccountSecretAndReconcileNormal(ctx *helpers.UnitTestContextForController, reconciler ServiceAccountReconciler, object client.Object) { - assertServiceAccountAndUpdateSecret(ctx, ctx.Client, object.GetNamespace(), object.GetName()) - _, err := reconciler.ReconcileNormal(ctx.GuestClusterContext) +func updateServiceAccountSecretAndReconcileNormal(controllerCtx *helpers.UnitTestContextForController, reconciler ServiceAccountReconciler, object client.Object) { + assertServiceAccountAndUpdateSecret(controllerCtx, controllerCtx.Client, object.GetNamespace(), object.GetName()) + _, err := reconciler.ReconcileNormal(controllerCtx.GuestClusterContext) Expect(err).NotTo(HaveOccurred()) } diff --git a/controllers/servicediscovery_controller.go b/controllers/servicediscovery_controller.go index 6dd88e0e56..2b8872cc22 100644 --- a/controllers/servicediscovery_controller.go +++ b/controllers/servicediscovery_controller.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "fmt" "net" "net/url" @@ -49,7 +49,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" vmwarecontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" ) @@ -72,16 +72,16 @@ const ( // +kubebuilder:rbac:groups="",resources=configmaps/status,verbs=get // AddServiceDiscoveryControllerToManager adds the ServiceDiscovery controller to the provided manager. -func AddServiceDiscoveryControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager, tracker *remote.ClusterCacheTracker, options controller.Options) error { +func AddServiceDiscoveryControllerToManager(controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, tracker *remote.ClusterCacheTracker, options controller.Options) error { var ( controllerNameShort = ServiceDiscoveryControllerName - controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, ServiceDiscoveryControllerName) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, ServiceDiscoveryControllerName) ) - controllerContext := &context.ControllerContext{ - ControllerManagerContext: ctx, + controllerContext := &capvcontext.ControllerContext{ + ControllerManagerContext: controllerCtx, Name: controllerNameShort, Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: ctx.Logger.WithName(controllerNameShort), + Logger: controllerCtx.Logger.WithName(controllerNameShort), } r := serviceDiscoveryReconciler{ ControllerContext: controllerContext, @@ -121,17 +121,17 @@ func AddServiceDiscoveryControllerToManager(ctx *context.ControllerManagerContex &vmwarev1.VSphereCluster{}, handler.OnlyControllerOwner(), )). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ctx.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)). Complete(r) } type serviceDiscoveryReconciler struct { - *context.ControllerContext + *capvcontext.ControllerContext remoteClusterCacheTracker *remote.ClusterCacheTracker } -func (r serviceDiscoveryReconciler) Reconcile(_ goctx.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { +func (r serviceDiscoveryReconciler) Reconcile(_ context.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { logger := r.Logger.WithName(req.Namespace).WithName(req.Name) logger.V(4).Info("Starting Reconcile") @@ -206,12 +206,12 @@ func (r serviceDiscoveryReconciler) Reconcile(_ goctx.Context, req reconcile.Req }) } -func (r serviceDiscoveryReconciler) ReconcileNormal(ctx *vmwarecontext.GuestClusterContext) (reconcile.Result, error) { - ctx.Logger.V(4).Info("Reconciling Service Discovery", "cluster", ctx.VSphereCluster.Name) - if err := r.reconcileSupervisorHeadlessService(ctx); err != nil { - conditions.MarkFalse(ctx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition, vmwarev1.SupervisorHeadlessServiceSetupFailedReason, +func (r serviceDiscoveryReconciler) ReconcileNormal(guestClusterCtx *vmwarecontext.GuestClusterContext) (reconcile.Result, error) { + guestClusterCtx.Logger.V(4).Info("Reconciling Service Discovery", "cluster", guestClusterCtx.VSphereCluster.Name) + if err := r.reconcileSupervisorHeadlessService(guestClusterCtx); err != nil { + conditions.MarkFalse(guestClusterCtx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition, vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - return reconcile.Result{}, errors.Wrapf(err, "failed to configure supervisor headless service for %v", ctx.VSphereCluster) + return reconcile.Result{}, errors.Wrapf(err, "failed to configure supervisor headless service for %v", guestClusterCtx.VSphereCluster) } return reconcile.Result{}, nil @@ -219,24 +219,24 @@ func (r serviceDiscoveryReconciler) ReconcileNormal(ctx *vmwarecontext.GuestClus // Setup a local k8s service in the target cluster that proxies to the Supervisor Cluster API Server. The add-ons are // dependent on this local service to connect to the Supervisor Cluster. -func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(ctx *vmwarecontext.GuestClusterContext) error { +func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(guestClusterCtx *vmwarecontext.GuestClusterContext) error { // Create the headless service to the supervisor api server on the target cluster. supervisorPort := vmwarev1.SupervisorAPIServerPort svc := NewSupervisorHeadlessService(vmwarev1.SupervisorHeadlessSvcPort, supervisorPort) - if err := ctx.GuestClient.Create(ctx, svc); err != nil && !apierrors.IsAlreadyExists(err) { + if err := guestClusterCtx.GuestClient.Create(guestClusterCtx, svc); err != nil && !apierrors.IsAlreadyExists(err) { return errors.Wrapf(err, "cannot create k8s service %s/%s in ", svc.Namespace, svc.Name) } - supervisorHost, err := GetSupervisorAPIServerAddress(ctx.ClusterContext) + supervisorHost, err := GetSupervisorAPIServerAddress(guestClusterCtx.ClusterContext) if err != nil { // Note: We have watches on the LB Svc (VIP) & the cluster-info configmap (FIP). There is no need to return an error to keep // re-trying. - conditions.MarkFalse(ctx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition, vmwarev1.SupervisorHeadlessServiceSetupFailedReason, + conditions.MarkFalse(guestClusterCtx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition, vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return nil } - ctx.Logger.Info("Discovered supervisor apiserver address", "host", supervisorHost, "port", supervisorPort) + guestClusterCtx.Logger.Info("Discovered supervisor apiserver address", "host", supervisorHost, "port", supervisorPort) // CreateOrPatch the newEndpoints with the discovered supervisor api server address newEndpoints := NewSupervisorHeadlessServiceEndpoints( supervisorHost, @@ -253,8 +253,8 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(ctx *vmwa }, } result, err := controllerutil.CreateOrPatch( - ctx, - ctx.GuestClient, + guestClusterCtx, + guestClusterCtx.GuestClient, endpoints, func() error { endpoints.Subsets = newEndpoints.Subsets @@ -272,7 +272,7 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(ctx *vmwa switch result { case controllerutil.OperationResultNone: - ctx.Logger.Info( + guestClusterCtx.Logger.Info( "no update required for k8s service endpoints", "endpointsKey", endpointsKey, @@ -280,7 +280,7 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(ctx *vmwa endpointsSubsetsStr, ) case controllerutil.OperationResultCreated: - ctx.Logger.Info( + guestClusterCtx.Logger.Info( "created k8s service endpoints", "endpointsKey", endpointsKey, @@ -288,7 +288,7 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(ctx *vmwa endpointsSubsetsStr, ) case controllerutil.OperationResultUpdated: - ctx.Logger.Info( + guestClusterCtx.Logger.Info( "updated k8s service endpoints", "endpointsKey", endpointsKey, @@ -296,7 +296,7 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(ctx *vmwa endpointsSubsetsStr, ) default: - ctx.Logger.Error( + guestClusterCtx.Logger.Error( fmt.Errorf( "unexpected result during createOrPatch k8s service endpoints", ), @@ -309,22 +309,22 @@ func (r serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(ctx *vmwa ) } - conditions.MarkTrue(ctx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition) + conditions.MarkTrue(guestClusterCtx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition) return nil } -func GetSupervisorAPIServerAddress(ctx *vmwarecontext.ClusterContext) (string, error) { +func GetSupervisorAPIServerAddress(clusterCtx *vmwarecontext.ClusterContext) (string, error) { // Discover the supervisor api server address // 1. Check if a k8s service "kube-system/kube-apiserver-lb-svc" is available, if so, fetch the loadbalancer IP. // 2. If not, get the Supervisor Cluster Management Network Floating IP (FIP) from the cluster-info configmap. This is // to support non-NSX-T development usecases only. If we are unable to find the cluster-info configmap for some reason, // we log the error. - supervisorHost, err := GetSupervisorAPIServerVIP(ctx.Client) + supervisorHost, err := GetSupervisorAPIServerVIP(clusterCtx.Client) if err != nil { - ctx.Logger.Info("Unable to discover supervisor apiserver virtual ip, fallback to floating ip", "reason", err.Error()) - supervisorHost, err = GetSupervisorAPIServerFIP(ctx.Client) + clusterCtx.Logger.Info("Unable to discover supervisor apiserver virtual ip, fallback to floating ip", "reason", err.Error()) + supervisorHost, err = GetSupervisorAPIServerFIP(clusterCtx.Client) if err != nil { - ctx.Logger.Error(err, "Unable to discover supervisor apiserver address") + clusterCtx.Logger.Error(err, "Unable to discover supervisor apiserver address") return "", errors.Wrapf(err, "Unable to discover supervisor apiserver address") } } @@ -380,7 +380,7 @@ func NewSupervisorHeadlessServiceEndpoints(targetHost string, targetPort int) *c func GetSupervisorAPIServerVIP(client client.Client) (string, error) { svc := &corev1.Service{} svcKey := types.NamespacedName{Name: vmwarev1.SupervisorLoadBalancerSvcName, Namespace: vmwarev1.SupervisorLoadBalancerSvcNamespace} - if err := client.Get(goctx.Background(), svcKey, svc); err != nil { + if err := client.Get(context.Background(), svcKey, svc); err != nil { return "", errors.Wrapf(err, "unable to get supervisor loadbalancer svc %s", svcKey) } if len(svc.Status.LoadBalancer.Ingress) > 0 { @@ -412,7 +412,7 @@ func GetSupervisorAPIServerFIP(client client.Client) (string, error) { func getSupervisorAPIServerURLWithFIP(client client.Client) (string, error) { cm := &corev1.ConfigMap{} cmKey := types.NamespacedName{Name: bootstrapapi.ConfigMapClusterInfo, Namespace: metav1.NamespacePublic} - if err := client.Get(goctx.Background(), cmKey, cm); err != nil { + if err := client.Get(context.Background(), cmKey, cm); err != nil { return "", err } kubeconfig, err := tryParseClusterInfoFromConfigMap(cm) @@ -453,7 +453,7 @@ func getClusterFromKubeConfig(config *clientcmdapi.Config) *clientcmdapi.Cluster // serviceToClusters is a mapper function used to enqueue reconcile.Requests // It watches for Service objects of type LoadBalancer for the supervisor api-server. -func (r serviceDiscoveryReconciler) serviceToClusters(ctx goctx.Context, o client.Object) []reconcile.Request { +func (r serviceDiscoveryReconciler) serviceToClusters(ctx context.Context, o client.Object) []reconcile.Request { if o.GetNamespace() != vmwarev1.SupervisorLoadBalancerSvcNamespace || o.GetName() != vmwarev1.SupervisorLoadBalancerSvcName { return nil } @@ -462,14 +462,14 @@ func (r serviceDiscoveryReconciler) serviceToClusters(ctx goctx.Context, o clien // configMapToClusters is a mapper function used to enqueue reconcile.Requests // It watches for cluster-info configmaps for the supervisor api-server. -func (r serviceDiscoveryReconciler) configMapToClusters(ctx goctx.Context, o client.Object) []reconcile.Request { +func (r serviceDiscoveryReconciler) configMapToClusters(ctx context.Context, o client.Object) []reconcile.Request { if o.GetNamespace() != metav1.NamespacePublic || o.GetName() != bootstrapapi.ConfigMapClusterInfo { return nil } return allClustersRequests(ctx, r.Client) } -func allClustersRequests(ctx goctx.Context, c client.Client) []reconcile.Request { +func allClustersRequests(ctx context.Context, c client.Client) []reconcile.Request { vsphereClusterList := &vmwarev1.VSphereClusterList{} if err := c.List(ctx, vsphereClusterList, &client.ListOptions{}); err != nil { return nil diff --git a/controllers/svcdiscovery_controller_unit_test.go b/controllers/svcdiscovery_controller_unit_test.go index 58dc8b3806..bb68c2f714 100644 --- a/controllers/svcdiscovery_controller_unit_test.go +++ b/controllers/svcdiscovery_controller_unit_test.go @@ -34,7 +34,7 @@ var _ = Describe("ServiceDiscoveryReconciler ReconcileNormal", serviceDiscoveryU func serviceDiscoveryUnitTestsReconcileNormal() { var ( - ctx *helpers.UnitTestContextForController + controllerCtx *helpers.UnitTestContextForController vsphereCluster vmwarev1.VSphereCluster initObjects []client.Object reconciler serviceDiscoveryReconciler @@ -42,22 +42,22 @@ func serviceDiscoveryUnitTestsReconcileNormal() { namespace := capiutil.RandomString(6) JustBeforeEach(func() { vsphereCluster = fake.NewVSphereCluster(namespace) - ctx = helpers.NewUnitTestContextForController(namespace, &vsphereCluster, false, initObjects, nil) - _, err := reconciler.ReconcileNormal(ctx.GuestClusterContext) + controllerCtx = helpers.NewUnitTestContextForController(namespace, &vsphereCluster, false, initObjects, nil) + _, err := reconciler.ReconcileNormal(controllerCtx.GuestClusterContext) Expect(err).NotTo(HaveOccurred()) // Update the VSphereCluster and its status in the fake client. - Expect(ctx.Client.Update(ctx, ctx.VSphereCluster)).To(Succeed()) - Expect(ctx.Client.Status().Update(ctx, ctx.VSphereCluster)).To(Succeed()) + Expect(controllerCtx.Client.Update(controllerCtx, controllerCtx.VSphereCluster)).To(Succeed()) + Expect(controllerCtx.Client.Status().Update(controllerCtx, controllerCtx.VSphereCluster)).To(Succeed()) }) JustAfterEach(func() { - ctx = nil + controllerCtx = nil }) Context("When no VIP or FIP is available ", func() { It("Should reconcile headless svc", func() { By("creating a service and no endpoint in the guest cluster") - assertHeadlessSvcWithNoEndpoints(ctx, ctx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) - assertServiceDiscoveryCondition(ctx.VSphereCluster, corev1.ConditionFalse, "Unable to discover supervisor apiserver address", + assertHeadlessSvcWithNoEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) + assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionFalse, "Unable to discover supervisor apiserver address", vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning) }) }) @@ -70,11 +70,11 @@ func serviceDiscoveryUnitTestsReconcileNormal() { }) It("Should reconcile headless svc", func() { By("creating a service and endpoints using the VIP in the guest cluster") - assertHeadlessSvcWithVIPEndpoints(ctx, ctx.GuestClient, vmwarev1.SupervisorHeadlessSvcNamespace, vmwarev1.SupervisorHeadlessSvcName) - assertServiceDiscoveryCondition(ctx.VSphereCluster, corev1.ConditionTrue, "", "", "") + assertHeadlessSvcWithVIPEndpoints(ctx, controllerCtx.GuestClient, vmwarev1.SupervisorHeadlessSvcNamespace, vmwarev1.SupervisorHeadlessSvcName) + assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) It("Should get supervisor master endpoint IP", func() { - supervisorEndpointIP, err := GetSupervisorAPIServerAddress(ctx.ClusterContext) + supervisorEndpointIP, err := GetSupervisorAPIServerAddress(controllerCtx.ClusterContext) Expect(err).ShouldNot(HaveOccurred()) Expect(supervisorEndpointIP).To(Equal(testSupervisorAPIServerVIP)) }) @@ -86,8 +86,8 @@ func serviceDiscoveryUnitTestsReconcileNormal() { }) It("Should reconcile headless svc", func() { By("creating a service and endpoints using the FIP in the guest cluster") - assertHeadlessSvcWithFIPEndpoints(ctx, ctx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) - assertServiceDiscoveryCondition(ctx.VSphereCluster, corev1.ConditionTrue, "", "", "") + assertHeadlessSvcWithFIPEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) + assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) Context("When VIP and FIP are available", func() { @@ -99,8 +99,8 @@ func serviceDiscoveryUnitTestsReconcileNormal() { }) It("Should reconcile headless svc", func() { By("creating a service and endpoints using the VIP in the guest cluster") - assertHeadlessSvcWithVIPEndpoints(ctx, ctx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) - assertServiceDiscoveryCondition(ctx.VSphereCluster, corev1.ConditionTrue, "", "", "") + assertHeadlessSvcWithVIPEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) + assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) Context("When VIP is an hostname", func() { @@ -110,8 +110,8 @@ func serviceDiscoveryUnitTestsReconcileNormal() { }) It("Should reconcile headless svc", func() { By("creating a service and endpoints using the VIP in the guest cluster") - assertHeadlessSvcWithVIPHostnameEndpoints(ctx, ctx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) - assertServiceDiscoveryCondition(ctx.VSphereCluster, corev1.ConditionTrue, "", "", "") + assertHeadlessSvcWithVIPHostnameEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) + assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) Context("When FIP is an hostname", func() { @@ -122,8 +122,8 @@ func serviceDiscoveryUnitTestsReconcileNormal() { }) It("Should reconcile headless svc", func() { By("creating a service and endpoints using the FIP in the guest cluster") - assertHeadlessSvcWithFIPHostNameEndpoints(ctx, ctx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) - assertServiceDiscoveryCondition(ctx.VSphereCluster, corev1.ConditionTrue, "", "", "") + assertHeadlessSvcWithFIPHostNameEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) + assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionTrue, "", "", "") }) }) Context("When FIP is an empty hostname", func() { @@ -134,8 +134,8 @@ func serviceDiscoveryUnitTestsReconcileNormal() { }) It("Should reconcile headless svc", func() { By("creating a service and no endpoint in the guest cluster") - assertHeadlessSvcWithNoEndpoints(ctx, ctx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) - assertServiceDiscoveryCondition(ctx.VSphereCluster, corev1.ConditionFalse, "Unable to discover supervisor apiserver address", + assertHeadlessSvcWithNoEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) + assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionFalse, "Unable to discover supervisor apiserver address", vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning) }) }) @@ -147,8 +147,8 @@ func serviceDiscoveryUnitTestsReconcileNormal() { }) It("Should reconcile headless svc", func() { By("creating a service and no endpoint in the guest cluster") - assertHeadlessSvcWithNoEndpoints(ctx, ctx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) - assertServiceDiscoveryCondition(ctx.VSphereCluster, corev1.ConditionFalse, "Unable to discover supervisor apiserver address", + assertHeadlessSvcWithNoEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) + assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionFalse, "Unable to discover supervisor apiserver address", vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning) }) }) @@ -163,8 +163,8 @@ func serviceDiscoveryUnitTestsReconcileNormal() { }) It("Should reconcile headless svc", func() { By("creating a service and no endpoint in the guest cluster") - assertHeadlessSvcWithNoEndpoints(ctx, ctx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) - assertServiceDiscoveryCondition(ctx.VSphereCluster, corev1.ConditionFalse, "Unable to discover supervisor apiserver address", + assertHeadlessSvcWithNoEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) + assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionFalse, "Unable to discover supervisor apiserver address", vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning) }) }) @@ -179,8 +179,8 @@ func serviceDiscoveryUnitTestsReconcileNormal() { }) It("Should reconcile headless svc", func() { By("creating a service and no endpoint in the guest cluster") - assertHeadlessSvcWithNoEndpoints(ctx, ctx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) - assertServiceDiscoveryCondition(ctx.VSphereCluster, corev1.ConditionFalse, "Unable to discover supervisor apiserver address", + assertHeadlessSvcWithNoEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) + assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionFalse, "Unable to discover supervisor apiserver address", vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning) }) }) diff --git a/controllers/vmware/test/controllers_suite_test.go b/controllers/vmware/test/controllers_suite_test.go index 8bbc4e161b..c9aa829411 100644 --- a/controllers/vmware/test/controllers_suite_test.go +++ b/controllers/vmware/test/controllers_suite_test.go @@ -17,7 +17,7 @@ limitations under the License. package test import ( - goctx "context" + "context" "encoding/json" "os/exec" "path" @@ -43,7 +43,7 @@ import ( var ( testEnv *envtest.Environment restConfig *rest.Config - ctx, cancel = goctx.WithCancel(goctx.Background()) + ctx, cancel = context.WithCancel(context.Background()) clusterAPIDir = findModuleDir("sigs.k8s.io/cluster-api") ) diff --git a/controllers/vmware/test/controllers_test.go b/controllers/vmware/test/controllers_test.go index 0407a696e0..10c69e9235 100644 --- a/controllers/vmware/test/controllers_test.go +++ b/controllers/vmware/test/controllers_test.go @@ -17,7 +17,7 @@ limitations under the License. package test import ( - goctx "context" + "context" "fmt" "os" "reflect" @@ -39,7 +39,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/controllers" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/manager" ) @@ -235,12 +235,12 @@ func getManager(cfg *rest.Config, networkProvider string) manager.Manager { controllerOpts := controller.Options{MaxConcurrentReconciles: 10} - opts.AddToManager = func(ctx *context.ControllerManagerContext, mgr ctrlmgr.Manager) error { - if err := controllers.AddClusterControllerToManager(ctx, mgr, &vmwarev1.VSphereCluster{}, controllerOpts); err != nil { + opts.AddToManager = func(controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager) error { + if err := controllers.AddClusterControllerToManager(controllerCtx, mgr, &vmwarev1.VSphereCluster{}, controllerOpts); err != nil { return err } - return controllers.AddMachineControllerToManager(ctx, mgr, &vmwarev1.VSphereMachine{}, controllerOpts) + return controllers.AddMachineControllerToManager(controllerCtx, mgr, &vmwarev1.VSphereMachine{}, controllerOpts) } mgr, err := manager.New(opts) @@ -248,13 +248,13 @@ func getManager(cfg *rest.Config, networkProvider string) manager.Manager { return mgr } -func initManagerAndBuildClient(networkProvider string) (client.Client, goctx.CancelFunc) { +func initManagerAndBuildClient(networkProvider string) (client.Client, context.CancelFunc) { By("setting up a new manager") mgr := getManager(restConfig, networkProvider) k8sClient := mgr.GetClient() By("starting the manager") - managerCtx, managerCancel := goctx.WithCancel(ctx) + managerCtx, managerCancel := context.WithCancel(ctx) go func() { managerRuntimeError := mgr.Start(managerCtx) @@ -266,7 +266,7 @@ func initManagerAndBuildClient(networkProvider string) (client.Client, goctx.Can return k8sClient, managerCancel } -func prepareClient(isLoadBalanced bool) (cli client.Client, cancelation goctx.CancelFunc) { +func prepareClient(isLoadBalanced bool) (cli client.Client, cancelation context.CancelFunc) { networkProvider := "" if isLoadBalanced { networkProvider = manager.DummyLBNetworkProvider @@ -285,7 +285,7 @@ var ( var _ = Describe("Conformance tests", func() { var ( k8sClient client.Client - managerCancel goctx.CancelFunc + managerCancel context.CancelFunc key *client.ObjectKey obj *client.Object ) @@ -336,7 +336,7 @@ var _ = Describe("Conformance tests", func() { var _ = Describe("Reconciliation tests", func() { var ( k8sClient client.Client - managerCancel goctx.CancelFunc + managerCancel context.CancelFunc ) // assertEventuallyFinalizers is used to assert an object eventually has one diff --git a/controllers/vmware/vspherecluster_reconciler.go b/controllers/vmware/vspherecluster_reconciler.go index a2ee1c5cc6..8645e266ef 100644 --- a/controllers/vmware/vspherecluster_reconciler.go +++ b/controllers/vmware/vspherecluster_reconciler.go @@ -17,7 +17,7 @@ limitations under the License. package vmware import ( - goctx "context" + "context" "fmt" "os" @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" @@ -47,7 +47,7 @@ const ( ) type ClusterReconciler struct { - *context.ControllerContext + *capvcontext.ControllerContext NetworkProvider services.NetworkProvider ControlPlaneService services.ControlPlaneEndpointService ResourcePolicyService services.ResourcePolicyService @@ -63,7 +63,7 @@ type ClusterReconciler struct { // +kubebuilder:rbac:groups="",resources=persistentvolumeclaims,verbs=get;list;watch;update;create;delete // +kubebuilder:rbac:groups="",resources=persistentvolumeclaims/status,verbs=get;update;patch -func (r *ClusterReconciler) Reconcile(_ goctx.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { +func (r *ClusterReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { logger := r.Logger.WithName(req.Namespace).WithName(req.Name) logger.V(3).Info("Starting Reconcile vsphereCluster") @@ -132,8 +132,8 @@ func (r *ClusterReconciler) Reconcile(_ goctx.Context, req ctrl.Request) (_ ctrl return ctrl.Result{}, r.reconcileNormal(clusterContext) } -func (r *ClusterReconciler) reconcileDelete(ctx *vmware.ClusterContext) { - ctx.Logger.Info("Reconciling vsphereCluster delete") +func (r *ClusterReconciler) reconcileDelete(clusterCtx *vmware.ClusterContext) { + clusterCtx.Logger.Info("Reconciling vsphereCluster delete") deletingConditionTypes := []clusterv1.ConditionType{ vmwarev1.ResourcePolicyReadyCondition, @@ -142,134 +142,134 @@ func (r *ClusterReconciler) reconcileDelete(ctx *vmware.ClusterContext) { } for _, t := range deletingConditionTypes { - if c := conditions.Get(ctx.VSphereCluster, t); c != nil { - conditions.MarkFalse(ctx.VSphereCluster, t, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + if c := conditions.Get(clusterCtx.VSphereCluster, t); c != nil { + conditions.MarkFalse(clusterCtx.VSphereCluster, t, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") } } // Cluster is deleted so remove the finalizer. - controllerutil.RemoveFinalizer(ctx.VSphereCluster, vmwarev1.ClusterFinalizer) + controllerutil.RemoveFinalizer(clusterCtx.VSphereCluster, vmwarev1.ClusterFinalizer) } -func (r *ClusterReconciler) reconcileNormal(ctx *vmware.ClusterContext) error { - ctx.Logger.Info("Reconciling vsphereCluster") +func (r *ClusterReconciler) reconcileNormal(clusterCtx *vmware.ClusterContext) error { + clusterCtx.Logger.Info("Reconciling vsphereCluster") // Get any failure domains to report back to the CAPI core controller. - failureDomains, err := r.getFailureDomains(ctx) + failureDomains, err := r.getFailureDomains(clusterCtx) if err != nil { return errors.Wrapf( err, - "unexpected error while discovering failure domains for %s", ctx.VSphereCluster.Name) + "unexpected error while discovering failure domains for %s", clusterCtx.VSphereCluster.Name) } - ctx.VSphereCluster.Status.FailureDomains = failureDomains + clusterCtx.VSphereCluster.Status.FailureDomains = failureDomains // Reconcile ResourcePolicy before we create the machines. If the ResourcePolicy is not reconciled before we create the Node VMs, // it will be handled by vm operator by relocating the VMs to the ResourcePool and Folder specified by the ResourcePolicy. // Reconciling the ResourcePolicy early potentially saves us the extra relocate operation. - resourcePolicyName, err := r.ResourcePolicyService.ReconcileResourcePolicy(ctx) + resourcePolicyName, err := r.ResourcePolicyService.ReconcileResourcePolicy(clusterCtx) if err != nil { - conditions.MarkFalse(ctx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition, vmwarev1.ResourcePolicyCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition, vmwarev1.ResourcePolicyCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return errors.Wrapf(err, "failed to configure resource policy for vsphereCluster %s/%s", - ctx.VSphereCluster.Namespace, ctx.VSphereCluster.Name) + clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) } - conditions.MarkTrue(ctx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition) - ctx.VSphereCluster.Status.ResourcePolicyName = resourcePolicyName + conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition) + clusterCtx.VSphereCluster.Status.ResourcePolicyName = resourcePolicyName // Configure the cluster for the cluster network - err = r.NetworkProvider.ProvisionClusterNetwork(ctx) + err = r.NetworkProvider.ProvisionClusterNetwork(clusterCtx) if err != nil { return errors.Wrapf(err, "failed to configure cluster network for vsphereCluster %s/%s", - ctx.VSphereCluster.Namespace, ctx.VSphereCluster.Name) + clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) } - if ok, err := r.reconcileControlPlaneEndpoint(ctx); !ok { + if ok, err := r.reconcileControlPlaneEndpoint(clusterCtx); !ok { if err != nil { - return errors.Wrapf(err, "unexpected error while reconciling control plane endpoint for %s", ctx.VSphereCluster.Name) + return errors.Wrapf(err, "unexpected error while reconciling control plane endpoint for %s", clusterCtx.VSphereCluster.Name) } } - ctx.VSphereCluster.Status.Ready = true - ctx.Logger.V(2).Info("Reconcile completed, vsphereCluster is infrastructure-ready") + clusterCtx.VSphereCluster.Status.Ready = true + clusterCtx.Logger.V(2).Info("Reconcile completed, vsphereCluster is infrastructure-ready") return nil } -func (r *ClusterReconciler) reconcileControlPlaneEndpoint(ctx *vmware.ClusterContext) (bool, error) { - if !ctx.Cluster.Spec.ControlPlaneEndpoint.IsZero() { - ctx.VSphereCluster.Spec.ControlPlaneEndpoint.Host = ctx.Cluster.Spec.ControlPlaneEndpoint.Host - ctx.VSphereCluster.Spec.ControlPlaneEndpoint.Port = ctx.Cluster.Spec.ControlPlaneEndpoint.Port +func (r *ClusterReconciler) reconcileControlPlaneEndpoint(clusterCtx *vmware.ClusterContext) (bool, error) { + if !clusterCtx.Cluster.Spec.ControlPlaneEndpoint.IsZero() { + clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Host = clusterCtx.Cluster.Spec.ControlPlaneEndpoint.Host + clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Port = clusterCtx.Cluster.Spec.ControlPlaneEndpoint.Port if r.NetworkProvider.HasLoadBalancer() { - conditions.MarkTrue(ctx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) + conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) } - ctx.Logger.Info("skipping control plane endpoint reconciliation", + clusterCtx.Logger.Info("skipping control plane endpoint reconciliation", "reason", "ControlPlaneEndpoint already set on Cluster", - "controlPlaneEndpoint", ctx.Cluster.Spec.ControlPlaneEndpoint.String()) + "controlPlaneEndpoint", clusterCtx.Cluster.Spec.ControlPlaneEndpoint.String()) return true, nil } - if !ctx.VSphereCluster.Spec.ControlPlaneEndpoint.IsZero() { + if !clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.IsZero() { if r.NetworkProvider.HasLoadBalancer() { - conditions.MarkTrue(ctx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) + conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) } - ctx.Logger.Info("skipping control plane endpoint reconciliation", + clusterCtx.Logger.Info("skipping control plane endpoint reconciliation", "reason", "ControlPlaneEndpoint already set on vsphereCluster", - "controlPlaneEndpoint", ctx.VSphereCluster.Spec.ControlPlaneEndpoint.String()) + "controlPlaneEndpoint", clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.String()) return true, nil } if r.NetworkProvider.HasLoadBalancer() { - if err := r.reconcileLoadBalancedEndpoint(ctx); err != nil { + if err := r.reconcileLoadBalancedEndpoint(clusterCtx); err != nil { return false, errors.Wrapf(err, "failed to reconcile loadbalanced endpoint for vsphereCluster %s/%s", - ctx.VSphereCluster.Namespace, ctx.VSphereCluster.Name) + clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) } return true, nil } - if err := r.reconcileAPIEndpoints(ctx); err != nil { + if err := r.reconcileAPIEndpoints(clusterCtx); err != nil { return false, errors.Wrapf(err, "failed to reconcile API endpoints for vsphereCluster %s/%s", - ctx.VSphereCluster.Namespace, ctx.VSphereCluster.Name) + clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) } return true, nil } -func (r *ClusterReconciler) reconcileLoadBalancedEndpoint(ctx *vmware.ClusterContext) error { - ctx.Logger.Info("Reconciling load-balanced control plane endpoint") +func (r *ClusterReconciler) reconcileLoadBalancedEndpoint(clusterCtx *vmware.ClusterContext) error { + clusterCtx.Logger.Info("Reconciling load-balanced control plane endpoint") // Will create a VirtualMachineService for a NetworkProvider that supports load balancing - cpEndpoint, err := r.ControlPlaneService.ReconcileControlPlaneEndpointService(ctx, r.NetworkProvider) + cpEndpoint, err := r.ControlPlaneService.ReconcileControlPlaneEndpointService(clusterCtx, r.NetworkProvider) if err != nil { // Likely the endpoint is not ready. Keep retrying. return errors.Wrapf(err, "failed to get control plane endpoint for Cluster %s/%s", - ctx.VSphereCluster.Namespace, ctx.VSphereCluster.Name) + clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) } if cpEndpoint == nil { return fmt.Errorf("control plane endpoint not available for Cluster %s/%s", - ctx.VSphereCluster.Namespace, ctx.VSphereCluster.Name) + clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) } // If we've got here and we have a cpEndpoint, we're done. - ctx.VSphereCluster.Spec.ControlPlaneEndpoint = *cpEndpoint - ctx.Logger.V(3).Info( + clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint = *cpEndpoint + clusterCtx.Logger.V(3).Info( "found API endpoint via virtual machine service", "host", cpEndpoint.Host, "port", cpEndpoint.Port) return nil } -func (r *ClusterReconciler) reconcileAPIEndpoints(ctx *vmware.ClusterContext) error { - ctx.Logger.Info("Reconciling control plane endpoint") - machines, err := collections.GetFilteredMachinesForCluster(ctx, r.Client, ctx.Cluster, collections.ControlPlaneMachines(ctx.Cluster.Name)) +func (r *ClusterReconciler) reconcileAPIEndpoints(clusterCtx *vmware.ClusterContext) error { + clusterCtx.Logger.Info("Reconciling control plane endpoint") + machines, err := collections.GetFilteredMachinesForCluster(clusterCtx, r.Client, clusterCtx.Cluster, collections.ControlPlaneMachines(clusterCtx.Cluster.Name)) if err != nil { return errors.Wrapf(err, "failed to get Machines for Cluster %s/%s", - ctx.Cluster.Namespace, ctx.Cluster.Name) + clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name) } // Define a variable to assign the API endpoints of control plane @@ -280,7 +280,7 @@ func (r *ClusterReconciler) reconcileAPIEndpoints(ctx *vmware.ClusterContext) er for _, machine := range machines { // Only machines with bootstrap data will have an IP address. if machine.Spec.Bootstrap.DataSecretName == nil { - ctx.Logger.V(5).Info( + clusterCtx.Logger.V(5).Info( "skipping machine while looking for IP address", "reason", "bootstrap.DataSecretName is nil", "machine-name", machine.Name) @@ -288,16 +288,16 @@ func (r *ClusterReconciler) reconcileAPIEndpoints(ctx *vmware.ClusterContext) er } // Get the vsphereMachine for the CAPI Machine resource. - vsphereMachine, err := util.GetVSphereMachine(ctx, ctx.Client, machine.Namespace, machine.Name) + vsphereMachine, err := util.GetVSphereMachine(clusterCtx, clusterCtx.Client, machine.Namespace, machine.Name) if err != nil { return errors.Wrapf(err, "failed to get vsphereMachine for Machine %s/%s/%s", - ctx.VSphereCluster.Namespace, ctx.VSphereCluster.Name, machine.Name) + clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name, machine.Name) } // If the machine has no IP address then skip it. if vsphereMachine.Status.IPAddr == "" { - ctx.Logger.V(5).Info("skipping machine without IP address") + clusterCtx.Logger.V(5).Info("skipping machine without IP address") continue } @@ -309,7 +309,7 @@ func (r *ClusterReconciler) reconcileAPIEndpoints(ctx *vmware.ClusterContext) er Port: apiEndpointPort, } apiEndpointList = append(apiEndpointList, apiEndpoint) - ctx.Logger.V(3).Info( + clusterCtx.Logger.V(3).Info( "found API endpoint via control plane machine", "host", apiEndpoint.Host, "port", apiEndpoint.Port) @@ -321,16 +321,16 @@ func (r *ClusterReconciler) reconcileAPIEndpoints(ctx *vmware.ClusterContext) er if len(apiEndpointList) == 0 { return errors.Wrapf(err, "failed to reconcile API endpoints for %s/%s", - ctx.VSphereCluster.Namespace, ctx.VSphereCluster.Name) + clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) } // Update the vsphereCluster's list of APIEndpoints. - ctx.VSphereCluster.Spec.ControlPlaneEndpoint = apiEndpointList[0] + clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint = apiEndpointList[0] return nil } -func (r *ClusterReconciler) VSphereMachineToCluster(ctx goctx.Context, o client.Object) []reconcile.Request { +func (r *ClusterReconciler) VSphereMachineToCluster(ctx context.Context, o client.Object) []reconcile.Request { vsphereMachine, ok := o.(*vmwarev1.VSphereMachine) if !ok { r.Logger.Error(errors.New("did not get vspheremachine"), "got", fmt.Sprintf("%T", o)) @@ -368,13 +368,13 @@ var isFaultDomainsFSSEnabled = func() bool { // Returns the failure domain information discovered on the cluster // hosting this controller. -func (r *ClusterReconciler) getFailureDomains(ctx *vmware.ClusterContext) (clusterv1.FailureDomains, error) { +func (r *ClusterReconciler) getFailureDomains(clusterCtx *vmware.ClusterContext) (clusterv1.FailureDomains, error) { if !isFaultDomainsFSSEnabled() { return nil, nil } availabilityZoneList := &topologyv1.AvailabilityZoneList{} - if err := ctx.Client.List(ctx, availabilityZoneList); err != nil { + if err := clusterCtx.Client.List(clusterCtx, availabilityZoneList); err != nil { return nil, err } diff --git a/controllers/vspherecluster_controller.go b/controllers/vspherecluster_controller.go index 6408ed8887..23629486f2 100644 --- a/controllers/vspherecluster_controller.go +++ b/controllers/vspherecluster_controller.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "fmt" "reflect" "strings" @@ -39,7 +39,7 @@ import ( vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/controllers/vmware" "sigs.k8s.io/cluster-api-provider-vsphere/feature" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" inframanager "sigs.k8s.io/cluster-api-provider-vsphere/pkg/manager" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/vmoperator" @@ -57,7 +57,7 @@ import ( // AddClusterControllerToManager adds the cluster controller to the provided // manager. -func AddClusterControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager, clusterControlledType client.Object, options controller.Options) error { +func AddClusterControllerToManager(controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, clusterControlledType client.Object, options controller.Options) error { supervisorBased, err := util.IsSupervisorType(clusterControlledType) if err != nil { return err @@ -67,24 +67,24 @@ func AddClusterControllerToManager(ctx *context.ControllerManagerContext, mgr ma clusterControlledTypeName = reflect.TypeOf(clusterControlledType).Elem().Name() clusterControlledTypeGVK = infrav1.GroupVersion.WithKind(clusterControlledTypeName) controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(clusterControlledTypeName)) - controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, controllerNameShort) ) if supervisorBased { clusterControlledTypeGVK = vmwarev1.GroupVersion.WithKind(clusterControlledTypeName) controllerNameShort = fmt.Sprintf("%s-supervisor-controller", strings.ToLower(clusterControlledTypeName)) - controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, controllerNameShort) } // Build the controller context. - controllerContext := &context.ControllerContext{ - ControllerManagerContext: ctx, + controllerContext := &capvcontext.ControllerContext{ + ControllerManagerContext: controllerCtx, Name: controllerNameShort, Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: ctx.Logger.WithName(controllerNameShort), + Logger: controllerCtx.Logger.WithName(controllerNameShort), } if supervisorBased { - networkProvider, err := inframanager.GetNetworkProvider(ctx) + networkProvider, err := inframanager.GetNetworkProvider(controllerCtx) if err != nil { return errors.Wrap(err, "failed to create a network provider") } @@ -102,7 +102,7 @@ func AddClusterControllerToManager(ctx *context.ControllerManagerContext, mgr ma &vmwarev1.VSphereMachine{}, handler.EnqueueRequestsFromMapFunc(reconciler.VSphereMachineToCluster), ). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ctx.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)). Complete(reconciler) } @@ -110,7 +110,7 @@ func AddClusterControllerToManager(ctx *context.ControllerManagerContext, mgr ma ControllerContext: controllerContext, clusterModuleReconciler: NewReconciler(controllerContext), } - clusterToInfraFn := clusterToInfrastructureMapFunc(ctx) + clusterToInfraFn := clusterToInfrastructureMapFunc(controllerCtx) c, err := ctrl.NewControllerManagedBy(mgr). // Watch the controlled, infrastructure resource. For(clusterControlledType). @@ -118,7 +118,7 @@ func AddClusterControllerToManager(ctx *context.ControllerManagerContext, mgr ma // Watch the CAPI resource that owns this infrastructure resource. Watches( &clusterv1.Cluster{}, - handler.EnqueueRequestsFromMapFunc(func(ctx goctx.Context, o client.Object) []reconcile.Request { + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, o client.Object) []reconcile.Request { requests := clusterToInfraFn(ctx, o) if requests == nil { return nil @@ -157,10 +157,10 @@ func AddClusterControllerToManager(ctx *context.ControllerManagerContext, mgr ma // should cause a resource to be synchronized, such as a goroutine // waiting on some asynchronous, external task to complete. WatchesRawSource( - &source.Channel{Source: ctx.GetGenericEventChannelFor(clusterControlledTypeGVK)}, + &source.Channel{Source: controllerCtx.GetGenericEventChannelFor(clusterControlledTypeGVK)}, &handler.EnqueueRequestForObject{}, ). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ctx.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)). WithEventFilter(predicates.ResourceIsNotExternallyManaged(reconciler.Logger)). Build(reconciler) if err != nil { @@ -173,7 +173,7 @@ func AddClusterControllerToManager(ctx *context.ControllerManagerContext, mgr ma return nil } -func clusterToInfrastructureMapFunc(managerContext *context.ControllerManagerContext) handler.MapFunc { +func clusterToInfrastructureMapFunc(controllerCtx *capvcontext.ControllerManagerContext) handler.MapFunc { gvk := infrav1.GroupVersion.WithKind(reflect.TypeOf(&infrav1.VSphereCluster{}).Elem().Name()) - return clusterutilv1.ClusterToInfrastructureMapFunc(managerContext, gvk, managerContext.Client, &infrav1.VSphereCluster{}) + return clusterutilv1.ClusterToInfrastructureMapFunc(controllerCtx, gvk, controllerCtx.Client, &infrav1.VSphereCluster{}) } diff --git a/controllers/vspherecluster_reconciler.go b/controllers/vspherecluster_reconciler.go index b17588bdb1..65aac0a8b8 100644 --- a/controllers/vspherecluster_reconciler.go +++ b/controllers/vspherecluster_reconciler.go @@ -18,7 +18,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "fmt" "sync" "time" @@ -44,7 +44,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/feature" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/identity" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" @@ -55,13 +55,13 @@ import ( const legacyIdentityFinalizer string = "identity/infrastructure.cluster.x-k8s.io" type clusterReconciler struct { - *context.ControllerContext + *capvcontext.ControllerContext clusterModuleReconciler Reconciler } // Reconcile ensures the back-end state reflects the Kubernetes resource state intent. -func (r clusterReconciler) Reconcile(_ goctx.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { +func (r clusterReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { // Get the VSphereCluster resource for this request. vsphereCluster := &infrav1.VSphereCluster{} if err := r.Client.Get(r, req.NamespacedName, vsphereCluster); err != nil { @@ -99,7 +99,7 @@ func (r clusterReconciler) Reconcile(_ goctx.Context, req ctrl.Request) (_ ctrl. } // Create the cluster context for this request. - clusterContext := &context.ClusterContext{ + clusterContext := &capvcontext.ClusterContext{ ControllerContext: r.ControllerContext, Cluster: cluster, VSphereCluster: vsphereCluster, @@ -138,13 +138,13 @@ func (r clusterReconciler) Reconcile(_ goctx.Context, req ctrl.Request) (_ ctrl. return r.reconcileNormal(clusterContext) } -func (r clusterReconciler) reconcileDelete(ctx *context.ClusterContext) (reconcile.Result, error) { - ctx.Logger.Info("Reconciling VSphereCluster delete") +func (r clusterReconciler) reconcileDelete(clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { + clusterCtx.Logger.Info("Reconciling VSphereCluster delete") - vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(ctx, ctx.Client, ctx.Cluster.Namespace, ctx.Cluster.Name) + vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(clusterCtx, clusterCtx.Client, clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name) if err != nil { return reconcile.Result{}, errors.Wrapf(err, - "unable to list VSphereMachines part of VSphereCluster %s/%s", ctx.VSphereCluster.Namespace, ctx.VSphereCluster.Name) + "unable to list VSphereMachines part of VSphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) } machineDeletionCount := 0 @@ -153,16 +153,16 @@ func (r clusterReconciler) reconcileDelete(ctx *context.ClusterContext) (reconci // If the VSphereMachine is not owned by the CAPI Machine object because the machine object was deleted // before setting the owner references, then proceed with the deletion of the VSphereMachine object. // This is required until CAPI has a solution for https://github.com/kubernetes-sigs/cluster-api/issues/5483 - if clusterutilv1.IsOwnedByObject(vsphereMachine, ctx.VSphereCluster) && len(vsphereMachine.OwnerReferences) == 1 { + if clusterutilv1.IsOwnedByObject(vsphereMachine, clusterCtx.VSphereCluster) && len(vsphereMachine.OwnerReferences) == 1 { machineDeletionCount++ // Remove the finalizer since VM creation wouldn't proceed r.Logger.Info("Removing finalizer from VSphereMachine", "namespace", vsphereMachine.Namespace, "name", vsphereMachine.Name) ctrlutil.RemoveFinalizer(vsphereMachine, infrav1.MachineFinalizer) - if err := r.Client.Update(ctx, vsphereMachine); err != nil { + if err := r.Client.Update(clusterCtx, vsphereMachine); err != nil { return reconcile.Result{}, err } - if err := r.Client.Delete(ctx, vsphereMachine); err != nil && !apierrors.IsNotFound(err) { - ctx.Logger.Error(err, "Failed to delete for VSphereMachine", "namespace", vsphereMachine.Namespace, "name", vsphereMachine.Name) + if err := r.Client.Delete(clusterCtx, vsphereMachine); err != nil && !apierrors.IsNotFound(err) { + clusterCtx.Logger.Error(err, "Failed to delete for VSphereMachine", "namespace", vsphereMachine.Namespace, "name", vsphereMachine.Name) deletionErrors = append(deletionErrors, err) } } @@ -172,29 +172,29 @@ func (r clusterReconciler) reconcileDelete(ctx *context.ClusterContext) (reconci } if len(vsphereMachines)-machineDeletionCount > 0 { - ctx.Logger.Info("Waiting for VSphereMachines to be deleted", "count", len(vsphereMachines)-machineDeletionCount) + clusterCtx.Logger.Info("Waiting for VSphereMachines to be deleted", "count", len(vsphereMachines)-machineDeletionCount) return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } // The cluster module info needs to be reconciled before the secret deletion // since it needs access to the vCenter instance to be able to perform LCM operations // on the cluster modules. - affinityReconcileResult, err := r.reconcileClusterModules(ctx) + affinityReconcileResult, err := r.reconcileClusterModules(clusterCtx) if err != nil { return affinityReconcileResult, err } // Remove finalizer on Identity Secret - if identity.IsSecretIdentity(ctx.VSphereCluster) { + if identity.IsSecretIdentity(clusterCtx.VSphereCluster) { secret := &corev1.Secret{} secretKey := client.ObjectKey{ - Namespace: ctx.VSphereCluster.Namespace, - Name: ctx.VSphereCluster.Spec.IdentityRef.Name, + Namespace: clusterCtx.VSphereCluster.Namespace, + Name: clusterCtx.VSphereCluster.Spec.IdentityRef.Name, } - err := ctx.Client.Get(ctx, secretKey, secret) + err := clusterCtx.Client.Get(clusterCtx, secretKey, secret) if err != nil { if apierrors.IsNotFound(err) { - ctrlutil.RemoveFinalizer(ctx.VSphereCluster, infrav1.ClusterFinalizer) + ctrlutil.RemoveFinalizer(clusterCtx.VSphereCluster, infrav1.ClusterFinalizer) return reconcile.Result{}, nil } return reconcile.Result{}, err @@ -207,90 +207,90 @@ func (r clusterReconciler) reconcileDelete(ctx *context.ClusterContext) (reconci if ctrlutil.ContainsFinalizer(secret, legacyIdentityFinalizer) { ctrlutil.RemoveFinalizer(secret, legacyIdentityFinalizer) } - if err := ctx.Client.Update(ctx, secret); err != nil { + if err := clusterCtx.Client.Update(clusterCtx, secret); err != nil { return reconcile.Result{}, err } - if err := ctx.Client.Delete(ctx, secret); err != nil { + if err := clusterCtx.Client.Delete(clusterCtx, secret); err != nil { return reconcile.Result{}, err } } // Cluster is deleted so remove the finalizer. - ctrlutil.RemoveFinalizer(ctx.VSphereCluster, infrav1.ClusterFinalizer) + ctrlutil.RemoveFinalizer(clusterCtx.VSphereCluster, infrav1.ClusterFinalizer) return reconcile.Result{}, nil } -func (r clusterReconciler) reconcileNormal(ctx *context.ClusterContext) (reconcile.Result, error) { - ctx.Logger.Info("Reconciling VSphereCluster") +func (r clusterReconciler) reconcileNormal(clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { + clusterCtx.Logger.Info("Reconciling VSphereCluster") - ok, err := r.reconcileDeploymentZones(ctx) + ok, err := r.reconcileDeploymentZones(clusterCtx) if err != nil { return reconcile.Result{}, err } if !ok { - ctx.Logger.Info("waiting for failure domains to be reconciled") + clusterCtx.Logger.Info("waiting for failure domains to be reconciled") return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } - if err := r.reconcileIdentitySecret(ctx); err != nil { - conditions.MarkFalse(ctx.VSphereCluster, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) + if err := r.reconcileIdentitySecret(clusterCtx); err != nil { + conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) return reconcile.Result{}, err } - vcenterSession, err := r.reconcileVCenterConnectivity(ctx) + vcenterSession, err := r.reconcileVCenterConnectivity(clusterCtx) if err != nil { - conditions.MarkFalse(ctx.VSphereCluster, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) + conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) return reconcile.Result{}, errors.Wrapf(err, - "unexpected error while probing vcenter for %s", ctx) + "unexpected error while probing vcenter for %s", clusterCtx) } - conditions.MarkTrue(ctx.VSphereCluster, infrav1.VCenterAvailableCondition) + conditions.MarkTrue(clusterCtx.VSphereCluster, infrav1.VCenterAvailableCondition) - err = r.reconcileVCenterVersion(ctx, vcenterSession) - if err != nil || ctx.VSphereCluster.Status.VCenterVersion == "" { - conditions.MarkFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.MissingVCenterVersionReason, clusterv1.ConditionSeverityWarning, "vCenter API version not set") - ctx.Logger.Error(err, "could not reconcile vCenter version") + err = r.reconcileVCenterVersion(clusterCtx, vcenterSession) + if err != nil || clusterCtx.VSphereCluster.Status.VCenterVersion == "" { + conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.MissingVCenterVersionReason, clusterv1.ConditionSeverityWarning, "vCenter API version not set") + clusterCtx.Logger.Error(err, "could not reconcile vCenter version") } - affinityReconcileResult, err := r.reconcileClusterModules(ctx) + affinityReconcileResult, err := r.reconcileClusterModules(clusterCtx) if err != nil { - conditions.MarkFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.ClusterModuleSetupFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.ClusterModuleSetupFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) return affinityReconcileResult, err } - ctx.VSphereCluster.Status.Ready = true + clusterCtx.VSphereCluster.Status.Ready = true // Ensure the VSphereCluster is reconciled when the API server first comes online. // A reconcile event will only be triggered if the Cluster is not marked as // ControlPlaneInitialized. - r.reconcileVSphereClusterWhenAPIServerIsOnline(ctx) - if ctx.VSphereCluster.Spec.ControlPlaneEndpoint.IsZero() { - ctx.Logger.Info("control plane endpoint is not reconciled") + r.reconcileVSphereClusterWhenAPIServerIsOnline(clusterCtx) + if clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.IsZero() { + clusterCtx.Logger.Info("control plane endpoint is not reconciled") return reconcile.Result{}, nil } // If the cluster is deleted, that's mean that the workload cluster is being deleted and so the CCM/CSI instances - if !ctx.Cluster.DeletionTimestamp.IsZero() { + if !clusterCtx.Cluster.DeletionTimestamp.IsZero() { return reconcile.Result{}, nil } // Wait until the API server is online and accessible. - if !r.isAPIServerOnline(ctx) { + if !r.isAPIServerOnline(clusterCtx) { return reconcile.Result{}, nil } return reconcile.Result{}, nil } -func (r clusterReconciler) reconcileIdentitySecret(ctx *context.ClusterContext) error { - vsphereCluster := ctx.VSphereCluster +func (r clusterReconciler) reconcileIdentitySecret(clusterCtx *capvcontext.ClusterContext) error { + vsphereCluster := clusterCtx.VSphereCluster if identity.IsSecretIdentity(vsphereCluster) { secret := &corev1.Secret{} secretKey := client.ObjectKey{ Namespace: vsphereCluster.Namespace, Name: vsphereCluster.Spec.IdentityRef.Name, } - err := ctx.Client.Get(ctx, secretKey, secret) + err := clusterCtx.Client.Get(clusterCtx, secretKey, secret) if err != nil { return err } @@ -312,7 +312,7 @@ func (r clusterReconciler) reconcileIdentitySecret(ctx *context.ClusterContext) if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) { ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer) } - err = r.Client.Update(ctx, secret) + err = r.Client.Update(clusterCtx, secret) if err != nil { return err } @@ -321,53 +321,53 @@ func (r clusterReconciler) reconcileIdentitySecret(ctx *context.ClusterContext) return nil } -func (r clusterReconciler) reconcileVCenterConnectivity(ctx *context.ClusterContext) (*session.Session, error) { +func (r clusterReconciler) reconcileVCenterConnectivity(clusterCtx *capvcontext.ClusterContext) (*session.Session, error) { params := session.NewParams(). - WithServer(ctx.VSphereCluster.Spec.Server). - WithThumbprint(ctx.VSphereCluster.Spec.Thumbprint). + WithServer(clusterCtx.VSphereCluster.Spec.Server). + WithThumbprint(clusterCtx.VSphereCluster.Spec.Thumbprint). WithFeatures(session.Feature{ EnableKeepAlive: r.EnableKeepAlive, KeepAliveDuration: r.KeepAliveDuration, }) - if ctx.VSphereCluster.Spec.IdentityRef != nil { - creds, err := identity.GetCredentials(ctx, r.Client, ctx.VSphereCluster, r.Namespace) + if clusterCtx.VSphereCluster.Spec.IdentityRef != nil { + creds, err := identity.GetCredentials(clusterCtx, r.Client, clusterCtx.VSphereCluster, r.Namespace) if err != nil { return nil, err } params = params.WithUserInfo(creds.Username, creds.Password) - return session.GetOrCreate(ctx, params) + return session.GetOrCreate(clusterCtx, params) } - params = params.WithUserInfo(ctx.Username, ctx.Password) - return session.GetOrCreate(ctx, params) + params = params.WithUserInfo(clusterCtx.Username, clusterCtx.Password) + return session.GetOrCreate(clusterCtx, params) } -func (r clusterReconciler) reconcileVCenterVersion(ctx *context.ClusterContext, s *session.Session) error { +func (r clusterReconciler) reconcileVCenterVersion(clusterCtx *capvcontext.ClusterContext, s *session.Session) error { version, err := s.GetVersion() if err != nil { return err } - ctx.VSphereCluster.Status.VCenterVersion = version + clusterCtx.VSphereCluster.Status.VCenterVersion = version return nil } -func (r clusterReconciler) reconcileDeploymentZones(ctx *context.ClusterContext) (bool, error) { +func (r clusterReconciler) reconcileDeploymentZones(clusterCtx *capvcontext.ClusterContext) (bool, error) { // If there is no failure domain selector, we should simply skip it - if ctx.VSphereCluster.Spec.FailureDomainSelector == nil { + if clusterCtx.VSphereCluster.Spec.FailureDomainSelector == nil { return true, nil } var opts client.ListOptions var err error - opts.LabelSelector, err = metav1.LabelSelectorAsSelector(ctx.VSphereCluster.Spec.FailureDomainSelector) + opts.LabelSelector, err = metav1.LabelSelectorAsSelector(clusterCtx.VSphereCluster.Spec.FailureDomainSelector) if err != nil { return false, errors.Wrapf(err, "zone label selector is misconfigured") } var deploymentZoneList infrav1.VSphereDeploymentZoneList - err = r.Client.List(ctx, &deploymentZoneList, &opts) + err = r.Client.List(clusterCtx, &deploymentZoneList, &opts) if err != nil { return false, errors.Wrap(err, "unable to list deployment zones") } @@ -375,7 +375,7 @@ func (r clusterReconciler) reconcileDeploymentZones(ctx *context.ClusterContext) readyNotReported, notReady := 0, 0 failureDomains := clusterv1.FailureDomains{} for _, zone := range deploymentZoneList.Items { - if zone.Spec.Server != ctx.VSphereCluster.Spec.Server { + if zone.Spec.Server != clusterCtx.VSphereCluster.Spec.Server { continue } @@ -396,21 +396,21 @@ func (r clusterReconciler) reconcileDeploymentZones(ctx *context.ClusterContext) notReady++ } - ctx.VSphereCluster.Status.FailureDomains = failureDomains + clusterCtx.VSphereCluster.Status.FailureDomains = failureDomains if readyNotReported > 0 { - conditions.MarkFalse(ctx.VSphereCluster, infrav1.FailureDomainsAvailableCondition, infrav1.WaitingForFailureDomainStatusReason, clusterv1.ConditionSeverityInfo, "waiting for failure domains to report ready status") + conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition, infrav1.WaitingForFailureDomainStatusReason, clusterv1.ConditionSeverityInfo, "waiting for failure domains to report ready status") return false, nil } if len(failureDomains) > 0 { if notReady > 0 { - conditions.MarkFalse(ctx.VSphereCluster, infrav1.FailureDomainsAvailableCondition, infrav1.FailureDomainsSkippedReason, clusterv1.ConditionSeverityInfo, "one or more failure domains are not ready") + conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition, infrav1.FailureDomainsSkippedReason, clusterv1.ConditionSeverityInfo, "one or more failure domains are not ready") } else { - conditions.MarkTrue(ctx.VSphereCluster, infrav1.FailureDomainsAvailableCondition) + conditions.MarkTrue(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition) } } else { // Remove the condition if failure domains do not exist - conditions.Delete(ctx.VSphereCluster, infrav1.FailureDomainsAvailableCondition) + conditions.Delete(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition) } return true, nil } @@ -422,46 +422,46 @@ var ( apiServerTriggersMu sync.Mutex ) -func (r clusterReconciler) reconcileVSphereClusterWhenAPIServerIsOnline(ctx *context.ClusterContext) { - if conditions.IsTrue(ctx.Cluster, clusterv1.ControlPlaneInitializedCondition) { - ctx.Logger.Info("skipping reconcile when API server is online", +func (r clusterReconciler) reconcileVSphereClusterWhenAPIServerIsOnline(clusterCtx *capvcontext.ClusterContext) { + if conditions.IsTrue(clusterCtx.Cluster, clusterv1.ControlPlaneInitializedCondition) { + clusterCtx.Logger.Info("skipping reconcile when API server is online", "reason", "controlPlaneInitialized") return } apiServerTriggersMu.Lock() defer apiServerTriggersMu.Unlock() - if _, ok := apiServerTriggers[ctx.Cluster.UID]; ok { - ctx.Logger.Info("skipping reconcile when API server is online", + if _, ok := apiServerTriggers[clusterCtx.Cluster.UID]; ok { + clusterCtx.Logger.Info("skipping reconcile when API server is online", "reason", "alreadyPolling") return } - apiServerTriggers[ctx.Cluster.UID] = struct{}{} + apiServerTriggers[clusterCtx.Cluster.UID] = struct{}{} go func() { // Block until the target API server is online. - ctx.Logger.Info("start polling API server for online check") - wait.PollUntilContextCancel(goctx.Background(), time.Second*1, true, func(goctx.Context) (bool, error) { return r.isAPIServerOnline(ctx), nil }) //nolint:errcheck - ctx.Logger.Info("stop polling API server for online check") - ctx.Logger.Info("triggering GenericEvent", "reason", "api-server-online") - eventChannel := ctx.GetGenericEventChannelFor(ctx.VSphereCluster.GetObjectKind().GroupVersionKind()) + clusterCtx.Logger.Info("start polling API server for online check") + wait.PollUntilContextCancel(context.Background(), time.Second*1, true, func(context.Context) (bool, error) { return r.isAPIServerOnline(clusterCtx), nil }) //nolint:errcheck + clusterCtx.Logger.Info("stop polling API server for online check") + clusterCtx.Logger.Info("triggering GenericEvent", "reason", "api-server-online") + eventChannel := clusterCtx.GetGenericEventChannelFor(clusterCtx.VSphereCluster.GetObjectKind().GroupVersionKind()) eventChannel <- event.GenericEvent{ - Object: ctx.VSphereCluster, + Object: clusterCtx.VSphereCluster, } // Once the control plane has been marked as initialized it is safe to // remove the key from the map that prevents multiple goroutines from // polling the API server to see if it is online. - ctx.Logger.Info("start polling for control plane initialized") - wait.PollUntilContextCancel(goctx.Background(), time.Second*1, true, func(goctx.Context) (bool, error) { return r.isControlPlaneInitialized(ctx), nil }) //nolint:errcheck - ctx.Logger.Info("stop polling for control plane initialized") + clusterCtx.Logger.Info("start polling for control plane initialized") + wait.PollUntilContextCancel(context.Background(), time.Second*1, true, func(context.Context) (bool, error) { return r.isControlPlaneInitialized(clusterCtx), nil }) //nolint:errcheck + clusterCtx.Logger.Info("stop polling for control plane initialized") apiServerTriggersMu.Lock() - delete(apiServerTriggers, ctx.Cluster.UID) + delete(apiServerTriggers, clusterCtx.Cluster.UID) apiServerTriggersMu.Unlock() }() } -func (r clusterReconciler) isAPIServerOnline(ctx *context.ClusterContext) bool { - if kubeClient, err := infrautilv1.NewKubeClient(ctx, ctx.Client, ctx.Cluster); err == nil { - if _, err := kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}); err == nil { +func (r clusterReconciler) isAPIServerOnline(clusterCtx *capvcontext.ClusterContext) bool { + if kubeClient, err := infrautilv1.NewKubeClient(clusterCtx, clusterCtx.Client, clusterCtx.Cluster); err == nil { + if _, err := kubeClient.CoreV1().Nodes().List(clusterCtx, metav1.ListOptions{}); err == nil { // The target cluster is online. To make sure the correct control // plane endpoint information is logged, it is necessary to fetch // an up-to-date Cluster resource. If this fails, then set the @@ -469,14 +469,14 @@ func (r clusterReconciler) isAPIServerOnline(ctx *context.ClusterContext) bool { // VSphereCluster resource, as it must have the correct information // if the API server is online. cluster := &clusterv1.Cluster{} - clusterKey := client.ObjectKey{Namespace: ctx.Cluster.Namespace, Name: ctx.Cluster.Name} - if err := ctx.Client.Get(ctx, clusterKey, cluster); err != nil { - cluster = ctx.Cluster.DeepCopy() - cluster.Spec.ControlPlaneEndpoint.Host = ctx.VSphereCluster.Spec.ControlPlaneEndpoint.Host - cluster.Spec.ControlPlaneEndpoint.Port = ctx.VSphereCluster.Spec.ControlPlaneEndpoint.Port - ctx.Logger.Error(err, "failed to get updated cluster object while checking if API server is online") + clusterKey := client.ObjectKey{Namespace: clusterCtx.Cluster.Namespace, Name: clusterCtx.Cluster.Name} + if err := clusterCtx.Client.Get(clusterCtx, clusterKey, cluster); err != nil { + cluster = clusterCtx.Cluster.DeepCopy() + cluster.Spec.ControlPlaneEndpoint.Host = clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Host + cluster.Spec.ControlPlaneEndpoint.Port = clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Port + clusterCtx.Logger.Error(err, "failed to get updated cluster object while checking if API server is online") } - ctx.Logger.Info( + clusterCtx.Logger.Info( "API server is online", "controlPlaneEndpoint", cluster.Spec.ControlPlaneEndpoint.String()) return true @@ -485,30 +485,30 @@ func (r clusterReconciler) isAPIServerOnline(ctx *context.ClusterContext) bool { return false } -func (r clusterReconciler) isControlPlaneInitialized(ctx *context.ClusterContext) bool { +func (r clusterReconciler) isControlPlaneInitialized(clusterCtx *capvcontext.ClusterContext) bool { cluster := &clusterv1.Cluster{} - clusterKey := client.ObjectKey{Namespace: ctx.Cluster.Namespace, Name: ctx.Cluster.Name} - if err := ctx.Client.Get(ctx, clusterKey, cluster); err != nil { + clusterKey := client.ObjectKey{Namespace: clusterCtx.Cluster.Namespace, Name: clusterCtx.Cluster.Name} + if err := clusterCtx.Client.Get(clusterCtx, clusterKey, cluster); err != nil { if !apierrors.IsNotFound(err) { - ctx.Logger.Error(err, "failed to get updated cluster object while checking if control plane is initialized") + clusterCtx.Logger.Error(err, "failed to get updated cluster object while checking if control plane is initialized") return false } - ctx.Logger.Info("exiting early because cluster no longer exists") + clusterCtx.Logger.Info("exiting early because cluster no longer exists") return true } - return conditions.IsTrue(ctx.Cluster, clusterv1.ControlPlaneInitializedCondition) + return conditions.IsTrue(clusterCtx.Cluster, clusterv1.ControlPlaneInitializedCondition) } -func setOwnerRefsOnVsphereMachines(ctx *context.ClusterContext) error { - vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(ctx, ctx.Client, ctx.Cluster.Namespace, ctx.Cluster.Name) +func setOwnerRefsOnVsphereMachines(clusterCtx *capvcontext.ClusterContext) error { + vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(clusterCtx, clusterCtx.Client, clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name) if err != nil { return errors.Wrapf(err, - "unable to list VSphereMachines part of VSphereCluster %s/%s", ctx.VSphereCluster.Namespace, ctx.VSphereCluster.Name) + "unable to list VSphereMachines part of VSphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) } var patchErrors []error for _, vsphereMachine := range vsphereMachines { - patchHelper, err := patch.NewHelper(vsphereMachine, ctx.Client) + patchHelper, err := patch.NewHelper(vsphereMachine, clusterCtx.Client) if err != nil { patchErrors = append(patchErrors, err) continue @@ -517,22 +517,22 @@ func setOwnerRefsOnVsphereMachines(ctx *context.ClusterContext) error { vsphereMachine.SetOwnerReferences(clusterutilv1.EnsureOwnerRef( vsphereMachine.OwnerReferences, metav1.OwnerReference{ - APIVersion: ctx.VSphereCluster.APIVersion, - Kind: ctx.VSphereCluster.Kind, - Name: ctx.VSphereCluster.Name, - UID: ctx.VSphereCluster.UID, + APIVersion: clusterCtx.VSphereCluster.APIVersion, + Kind: clusterCtx.VSphereCluster.Kind, + Name: clusterCtx.VSphereCluster.Name, + UID: clusterCtx.VSphereCluster.UID, })) - if err := patchHelper.Patch(ctx, vsphereMachine); err != nil { + if err := patchHelper.Patch(clusterCtx, vsphereMachine); err != nil { patchErrors = append(patchErrors, err) } } return kerrors.NewAggregate(patchErrors) } -func (r clusterReconciler) reconcileClusterModules(ctx *context.ClusterContext) (reconcile.Result, error) { +func (r clusterReconciler) reconcileClusterModules(clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { if feature.Gates.Enabled(feature.NodeAntiAffinity) { - return r.clusterModuleReconciler.Reconcile(ctx) + return r.clusterModuleReconciler.Reconcile(clusterCtx) } return reconcile.Result{}, nil } @@ -540,7 +540,7 @@ func (r clusterReconciler) reconcileClusterModules(ctx *context.ClusterContext) // controlPlaneMachineToCluster is a handler.ToRequestsFunc to be used // to enqueue requests for reconciliation for VSphereCluster to update // its status.apiEndpoints field. -func (r clusterReconciler) controlPlaneMachineToCluster(ctx goctx.Context, o client.Object) []ctrl.Request { +func (r clusterReconciler) controlPlaneMachineToCluster(ctx context.Context, o client.Object) []ctrl.Request { vsphereMachine, ok := o.(*infrav1.VSphereMachine) if !ok { r.Logger.Error(nil, fmt.Sprintf("expected a VSphereMachine but got a %T", o)) @@ -602,7 +602,7 @@ func (r clusterReconciler) controlPlaneMachineToCluster(ctx goctx.Context, o cli }} } -func (r clusterReconciler) deploymentZoneToCluster(ctx goctx.Context, o client.Object) []ctrl.Request { +func (r clusterReconciler) deploymentZoneToCluster(ctx context.Context, o client.Object) []ctrl.Request { var requests []ctrl.Request obj, ok := o.(*infrav1.VSphereDeploymentZone) if !ok { diff --git a/controllers/vsphereclusteridentity_controller.go b/controllers/vsphereclusteridentity_controller.go index 136224296b..8a2563780b 100644 --- a/controllers/vsphereclusteridentity_controller.go +++ b/controllers/vsphereclusteridentity_controller.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - _context "context" + "context" "fmt" "reflect" "strings" @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" pkgidentity "sigs.k8s.io/cluster-api-provider-vsphere/pkg/identity" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" ) @@ -53,18 +53,18 @@ var ( // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vsphereclusteridentities/status,verbs=get;update;patch // +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;patch;update;delete -func AddVsphereClusterIdentityControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager, options controller.Options) error { +func AddVsphereClusterIdentityControllerToManager(controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, options controller.Options) error { var ( controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(identityControlledTypeName)) - controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, controllerNameShort) ) // Build the controller context. - controllerContext := &context.ControllerContext{ - ControllerManagerContext: ctx, + controllerContext := &capvcontext.ControllerContext{ + ControllerManagerContext: controllerCtx, Name: controllerNameShort, Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: ctx.Logger.WithName(controllerNameShort), + Logger: controllerCtx.Logger.WithName(controllerNameShort), } reconciler := clusterIdentityReconciler{ControllerContext: controllerContext} @@ -72,15 +72,15 @@ func AddVsphereClusterIdentityControllerToManager(ctx *context.ControllerManager return ctrl.NewControllerManagedBy(mgr). For(identityControlledType). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ctx.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)). Complete(reconciler) } type clusterIdentityReconciler struct { - *context.ControllerContext + *capvcontext.ControllerContext } -func (r clusterIdentityReconciler) Reconcile(ctx _context.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { +func (r clusterIdentityReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, reterr error) { // TODO(gab-satchi) consider creating a context for the clusterIdentity // Get VSphereClusterIdentity identity := &infrav1.VSphereClusterIdentity{} @@ -161,7 +161,7 @@ func (r clusterIdentityReconciler) Reconcile(ctx _context.Context, req reconcile return reconcile.Result{}, nil } -func (r clusterIdentityReconciler) reconcileDelete(ctx _context.Context, identity *infrav1.VSphereClusterIdentity) error { +func (r clusterIdentityReconciler) reconcileDelete(ctx context.Context, identity *infrav1.VSphereClusterIdentity) error { r.Logger.Info("Reconciling VSphereClusterIdentity delete") secret := &corev1.Secret{} secretKey := client.ObjectKey{ diff --git a/controllers/vsphereclusteridentity_controller_test.go b/controllers/vsphereclusteridentity_controller_test.go index 55e67207cc..043559eaa1 100644 --- a/controllers/vsphereclusteridentity_controller_test.go +++ b/controllers/vsphereclusteridentity_controller_test.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -31,7 +31,7 @@ import ( ) var _ = Describe("VSphereClusterIdentity Reconciler", func() { - ctx := goctx.Background() + ctx := context.Background() controllerNamespace := testEnv.Manager.GetContext().Namespace Context("Reconcile Normal", func() { diff --git a/controllers/vspheredeploymentzone_controller.go b/controllers/vspheredeploymentzone_controller.go index 9c54c30ea8..14be19e5e7 100644 --- a/controllers/vspheredeploymentzone_controller.go +++ b/controllers/vspheredeploymentzone_controller.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "fmt" "reflect" "strings" @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/identity" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" @@ -55,22 +55,22 @@ import ( // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspherefailuredomains,verbs=get;list;watch;create;update;patch;delete // AddVSphereDeploymentZoneControllerToManager adds the VSphereDeploymentZone controller to the provided manager. -func AddVSphereDeploymentZoneControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager, options controller.Options) error { +func AddVSphereDeploymentZoneControllerToManager(controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, options controller.Options) error { var ( controlledType = &infrav1.VSphereDeploymentZone{} controlledTypeName = reflect.TypeOf(controlledType).Elem().Name() controlledTypeGVK = infrav1.GroupVersion.WithKind(controlledTypeName) controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(controlledTypeName)) - controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, controllerNameShort) ) // Build the controller context. - controllerContext := &context.ControllerContext{ - ControllerManagerContext: ctx, + controllerContext := &capvcontext.ControllerContext{ + ControllerManagerContext: controllerCtx, Name: controllerNameShort, Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: ctx.Logger.WithName(controllerNameShort), + Logger: controllerCtx.Logger.WithName(controllerNameShort), } reconciler := vsphereDeploymentZoneReconciler{ControllerContext: controllerContext} @@ -86,18 +86,18 @@ func AddVSphereDeploymentZoneControllerToManager(ctx *context.ControllerManagerC // should cause a resource to be synchronized, such as a goroutine // waiting on some asynchronous, external task to complete. WatchesRawSource( - &source.Channel{Source: ctx.GetGenericEventChannelFor(controlledTypeGVK)}, + &source.Channel{Source: controllerCtx.GetGenericEventChannelFor(controlledTypeGVK)}, &handler.EnqueueRequestForObject{}, ). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ctx.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)). Complete(reconciler) } type vsphereDeploymentZoneReconciler struct { - *context.ControllerContext + *capvcontext.ControllerContext } -func (r vsphereDeploymentZoneReconciler) Reconcile(ctx goctx.Context, request reconcile.Request) (_ reconcile.Result, reterr error) { +func (r vsphereDeploymentZoneReconciler) Reconcile(ctx context.Context, request reconcile.Request) (_ reconcile.Result, reterr error) { logr := r.Logger.WithValues("vspheredeploymentzone", request.Name) // Fetch the VSphereDeploymentZone for this request. vsphereDeploymentZone := &infrav1.VSphereDeploymentZone{} @@ -129,7 +129,7 @@ func (r vsphereDeploymentZoneReconciler) Reconcile(ctx goctx.Context, request re vsphereDeploymentZone.Name) } - vsphereDeploymentZoneContext := &context.VSphereDeploymentZoneContext{ + vsphereDeploymentZoneContext := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: r.ControllerContext, VSphereDeploymentZone: vsphereDeploymentZone, VSphereFailureDomain: failureDomain, @@ -159,78 +159,78 @@ func (r vsphereDeploymentZoneReconciler) Reconcile(ctx goctx.Context, request re return ctrl.Result{}, r.reconcileNormal(vsphereDeploymentZoneContext) } -func (r vsphereDeploymentZoneReconciler) reconcileNormal(ctx *context.VSphereDeploymentZoneContext) error { - authSession, err := r.getVCenterSession(ctx) +func (r vsphereDeploymentZoneReconciler) reconcileNormal(deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext) error { + authSession, err := r.getVCenterSession(deploymentZoneCtx) if err != nil { - ctx.Logger.V(4).Error(err, "unable to create session") - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) - ctx.VSphereDeploymentZone.Status.Ready = pointer.Bool(false) + deploymentZoneCtx.Logger.V(4).Error(err, "unable to create session") + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) + deploymentZoneCtx.VSphereDeploymentZone.Status.Ready = pointer.Bool(false) return errors.Wrapf(err, "unable to create auth session") } - ctx.AuthSession = authSession - conditions.MarkTrue(ctx.VSphereDeploymentZone, infrav1.VCenterAvailableCondition) + deploymentZoneCtx.AuthSession = authSession + conditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VCenterAvailableCondition) - if err := r.reconcilePlacementConstraint(ctx); err != nil { - ctx.VSphereDeploymentZone.Status.Ready = pointer.Bool(false) + if err := r.reconcilePlacementConstraint(deploymentZoneCtx); err != nil { + deploymentZoneCtx.VSphereDeploymentZone.Status.Ready = pointer.Bool(false) return errors.Wrap(err, "placement constraint is misconfigured") } - conditions.MarkTrue(ctx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition) + conditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition) // reconcile the failure domain - if err := r.reconcileFailureDomain(ctx); err != nil { - ctx.Logger.V(4).Error(err, "failed to reconcile failure domain", "failureDomain", ctx.VSphereDeploymentZone.Spec.FailureDomain) - ctx.VSphereDeploymentZone.Status.Ready = pointer.Bool(false) + if err := r.reconcileFailureDomain(deploymentZoneCtx); err != nil { + deploymentZoneCtx.Logger.V(4).Error(err, "failed to reconcile failure domain", "failureDomain", deploymentZoneCtx.VSphereDeploymentZone.Spec.FailureDomain) + deploymentZoneCtx.VSphereDeploymentZone.Status.Ready = pointer.Bool(false) return errors.Wrapf(err, "failed to reconcile failure domain") } - conditions.MarkTrue(ctx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) + conditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) // Ensure the VSphereDeploymentZone is marked as an owner of the VSphereFailureDomain. - if !clusterutilv1.HasOwnerRef(ctx.VSphereFailureDomain.GetOwnerReferences(), metav1.OwnerReference{ + if !clusterutilv1.HasOwnerRef(deploymentZoneCtx.VSphereFailureDomain.GetOwnerReferences(), metav1.OwnerReference{ APIVersion: infrav1.GroupVersion.String(), Kind: "VSphereDeploymentZone", - Name: ctx.VSphereDeploymentZone.Name, + Name: deploymentZoneCtx.VSphereDeploymentZone.Name, }) { - if err := updateOwnerReferences(ctx, ctx.VSphereFailureDomain, r.Client, func() []metav1.OwnerReference { - return append(ctx.VSphereFailureDomain.OwnerReferences, metav1.OwnerReference{ + if err := updateOwnerReferences(deploymentZoneCtx, deploymentZoneCtx.VSphereFailureDomain, r.Client, func() []metav1.OwnerReference { + return append(deploymentZoneCtx.VSphereFailureDomain.OwnerReferences, metav1.OwnerReference{ APIVersion: infrav1.GroupVersion.String(), - Kind: ctx.VSphereDeploymentZone.Kind, - Name: ctx.VSphereDeploymentZone.Name, - UID: ctx.VSphereDeploymentZone.UID, + Kind: deploymentZoneCtx.VSphereDeploymentZone.Kind, + Name: deploymentZoneCtx.VSphereDeploymentZone.Name, + UID: deploymentZoneCtx.VSphereDeploymentZone.UID, }) }); err != nil { return err } } - ctx.VSphereDeploymentZone.Status.Ready = pointer.Bool(true) + deploymentZoneCtx.VSphereDeploymentZone.Status.Ready = pointer.Bool(true) return nil } -func (r vsphereDeploymentZoneReconciler) reconcilePlacementConstraint(ctx *context.VSphereDeploymentZoneContext) error { - placementConstraint := ctx.VSphereDeploymentZone.Spec.PlacementConstraint +func (r vsphereDeploymentZoneReconciler) reconcilePlacementConstraint(deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext) error { + placementConstraint := deploymentZoneCtx.VSphereDeploymentZone.Spec.PlacementConstraint if resourcePool := placementConstraint.ResourcePool; resourcePool != "" { - if _, err := ctx.AuthSession.Finder.ResourcePool(ctx, resourcePool); err != nil { - ctx.Logger.V(4).Error(err, "unable to find resource pool", "name", resourcePool) - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition, infrav1.ResourcePoolNotFoundReason, clusterv1.ConditionSeverityError, "resource pool %s is misconfigured", resourcePool) + if _, err := deploymentZoneCtx.AuthSession.Finder.ResourcePool(deploymentZoneCtx, resourcePool); err != nil { + deploymentZoneCtx.Logger.V(4).Error(err, "unable to find resource pool", "name", resourcePool) + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition, infrav1.ResourcePoolNotFoundReason, clusterv1.ConditionSeverityError, "resource pool %s is misconfigured", resourcePool) return errors.Wrapf(err, "unable to find resource pool %s", resourcePool) } } if folder := placementConstraint.Folder; folder != "" { - if _, err := ctx.AuthSession.Finder.Folder(ctx, placementConstraint.Folder); err != nil { - ctx.Logger.V(4).Error(err, "unable to find folder", "name", folder) - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition, infrav1.FolderNotFoundReason, clusterv1.ConditionSeverityError, "datastore %s is misconfigured", folder) + if _, err := deploymentZoneCtx.AuthSession.Finder.Folder(deploymentZoneCtx, placementConstraint.Folder); err != nil { + deploymentZoneCtx.Logger.V(4).Error(err, "unable to find folder", "name", folder) + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition, infrav1.FolderNotFoundReason, clusterv1.ConditionSeverityError, "datastore %s is misconfigured", folder) return errors.Wrapf(err, "unable to find folder %s", folder) } } return nil } -func (r vsphereDeploymentZoneReconciler) getVCenterSession(ctx *context.VSphereDeploymentZoneContext) (*session.Session, error) { +func (r vsphereDeploymentZoneReconciler) getVCenterSession(deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext) (*session.Session, error) { params := session.NewParams(). - WithServer(ctx.VSphereDeploymentZone.Spec.Server). - WithDatacenter(ctx.VSphereFailureDomain.Spec.Topology.Datacenter). + WithServer(deploymentZoneCtx.VSphereDeploymentZone.Spec.Server). + WithDatacenter(deploymentZoneCtx.VSphereFailureDomain.Spec.Topology.Datacenter). WithUserInfo(r.ControllerContext.Username, r.ControllerContext.Password). WithFeatures(session.Feature{ EnableKeepAlive: r.EnableKeepAlive, @@ -238,16 +238,16 @@ func (r vsphereDeploymentZoneReconciler) getVCenterSession(ctx *context.VSphereD }) clusterList := &infrav1.VSphereClusterList{} - if err := r.Client.List(ctx, clusterList); err != nil { + if err := r.Client.List(deploymentZoneCtx, clusterList); err != nil { return nil, err } for _, vsphereCluster := range clusterList.Items { - if ctx.VSphereDeploymentZone.Spec.Server == vsphereCluster.Spec.Server && vsphereCluster.Spec.IdentityRef != nil { - logger := ctx.Logger.WithValues("cluster", vsphereCluster.Name) + if deploymentZoneCtx.VSphereDeploymentZone.Spec.Server == vsphereCluster.Spec.Server && vsphereCluster.Spec.IdentityRef != nil { + logger := deploymentZoneCtx.Logger.WithValues("cluster", vsphereCluster.Name) params = params.WithThumbprint(vsphereCluster.Spec.Thumbprint) clust := vsphereCluster - creds, err := identity.GetCredentials(ctx, r.Client, &clust, r.Namespace) + creds, err := identity.GetCredentials(deploymentZoneCtx, r.Client, &clust, r.Namespace) if err != nil { logger.Error(err, "error retrieving credentials from IdentityRef") continue @@ -264,53 +264,53 @@ func (r vsphereDeploymentZoneReconciler) getVCenterSession(ctx *context.VSphereD params) } -func (r vsphereDeploymentZoneReconciler) reconcileDelete(ctx *context.VSphereDeploymentZoneContext) error { +func (r vsphereDeploymentZoneReconciler) reconcileDelete(deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext) error { r.Logger.Info("Deleting VSphereDeploymentZone") machines := &clusterv1.MachineList{} - if err := r.Client.List(ctx, machines); err != nil { + if err := r.Client.List(deploymentZoneCtx, machines); err != nil { r.Logger.Error(err, "unable to list machines") return errors.Wrapf(err, "unable to list machines") } machinesUsingDeploymentZone := collections.FromMachineList(machines).Filter(collections.ActiveMachines, func(machine *clusterv1.Machine) bool { if machine.Spec.FailureDomain != nil { - return *machine.Spec.FailureDomain == ctx.VSphereDeploymentZone.Name + return *machine.Spec.FailureDomain == deploymentZoneCtx.VSphereDeploymentZone.Name } return false }) if len(machinesUsingDeploymentZone) > 0 { machineNamesStr := util.MachinesAsString(machinesUsingDeploymentZone.SortedByCreationTimestamp()) - err := errors.Errorf("%s is currently in use by machines: %s", ctx.VSphereDeploymentZone.Name, machineNamesStr) - r.Logger.Error(err, "Error deleting VSphereDeploymentZone", "name", ctx.VSphereDeploymentZone.Name) + err := errors.Errorf("%s is currently in use by machines: %s", deploymentZoneCtx.VSphereDeploymentZone.Name, machineNamesStr) + r.Logger.Error(err, "Error deleting VSphereDeploymentZone", "name", deploymentZoneCtx.VSphereDeploymentZone.Name) return err } - if err := updateOwnerReferences(ctx, ctx.VSphereFailureDomain, r.Client, func() []metav1.OwnerReference { - return clusterutilv1.RemoveOwnerRef(ctx.VSphereFailureDomain.OwnerReferences, metav1.OwnerReference{ + if err := updateOwnerReferences(deploymentZoneCtx, deploymentZoneCtx.VSphereFailureDomain, r.Client, func() []metav1.OwnerReference { + return clusterutilv1.RemoveOwnerRef(deploymentZoneCtx.VSphereFailureDomain.OwnerReferences, metav1.OwnerReference{ APIVersion: infrav1.GroupVersion.String(), - Kind: ctx.VSphereDeploymentZone.Kind, - Name: ctx.VSphereDeploymentZone.Name, + Kind: deploymentZoneCtx.VSphereDeploymentZone.Kind, + Name: deploymentZoneCtx.VSphereDeploymentZone.Name, }) }); err != nil { return err } - if len(ctx.VSphereFailureDomain.OwnerReferences) == 0 { - ctx.Logger.Info("deleting vsphereFailureDomain", "name", ctx.VSphereFailureDomain.Name) - if err := r.Client.Delete(ctx, ctx.VSphereFailureDomain); err != nil && !apierrors.IsNotFound(err) { - ctx.Logger.Error(err, "failed to delete related %s %s", ctx.VSphereFailureDomain.GroupVersionKind(), ctx.VSphereFailureDomain.Name) + if len(deploymentZoneCtx.VSphereFailureDomain.OwnerReferences) == 0 { + deploymentZoneCtx.Logger.Info("deleting vsphereFailureDomain", "name", deploymentZoneCtx.VSphereFailureDomain.Name) + if err := r.Client.Delete(deploymentZoneCtx, deploymentZoneCtx.VSphereFailureDomain); err != nil && !apierrors.IsNotFound(err) { + deploymentZoneCtx.Logger.Error(err, "failed to delete related %s %s", deploymentZoneCtx.VSphereFailureDomain.GroupVersionKind(), deploymentZoneCtx.VSphereFailureDomain.Name) } } - ctrlutil.RemoveFinalizer(ctx.VSphereDeploymentZone, infrav1.DeploymentZoneFinalizer) + ctrlutil.RemoveFinalizer(deploymentZoneCtx.VSphereDeploymentZone, infrav1.DeploymentZoneFinalizer) return nil } // updateOwnerReferences uses the ownerRef function to calculate the owner references // to be set on the object and patches the object. -func updateOwnerReferences(ctx goctx.Context, obj client.Object, client client.Client, ownerRefFunc func() []metav1.OwnerReference) error { +func updateOwnerReferences(ctx context.Context, obj client.Object, client client.Client, ownerRefFunc func() []metav1.OwnerReference) error { patchHelper, err := patch.NewHelper(obj, client) if err != nil { return errors.Wrapf(err, "failed to init patch helper for %s %s", @@ -327,7 +327,7 @@ func updateOwnerReferences(ctx goctx.Context, obj client.Object, client client.C return nil } -func (r vsphereDeploymentZoneReconciler) failureDomainsToDeploymentZones(ctx goctx.Context, a client.Object) []reconcile.Request { +func (r vsphereDeploymentZoneReconciler) failureDomainsToDeploymentZones(ctx context.Context, a client.Object) []reconcile.Request { failureDomain, ok := a.(*infrav1.VSphereFailureDomain) if !ok { r.Logger.Error(nil, fmt.Sprintf("expected a VSphereFailureDomain but got a %T", a)) diff --git a/controllers/vspheredeploymentzone_controller_domain.go b/controllers/vspheredeploymentzone_controller_domain.go index 663ef32701..455bbe51f2 100644 --- a/controllers/vspheredeploymentzone_controller_domain.go +++ b/controllers/vspheredeploymentzone_controller_domain.go @@ -24,107 +24,107 @@ import ( ctrl "sigs.k8s.io/controller-runtime" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/cluster" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/metadata" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/taggable" ) -func (r vsphereDeploymentZoneReconciler) reconcileFailureDomain(ctx *context.VSphereDeploymentZoneContext) error { - logger := ctrl.LoggerFrom(ctx).WithValues("failure domain", ctx.VSphereFailureDomain.Name) +func (r vsphereDeploymentZoneReconciler) reconcileFailureDomain(deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext) error { + logger := ctrl.LoggerFrom(deploymentZoneCtx).WithValues("failure domain", deploymentZoneCtx.VSphereFailureDomain.Name) // verify the failure domain for the region - if err := r.reconcileInfraFailureDomain(ctx, ctx.VSphereFailureDomain.Spec.Region); err != nil { - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.RegionMisconfiguredReason, clusterv1.ConditionSeverityError, err.Error()) + if err := r.reconcileInfraFailureDomain(deploymentZoneCtx, deploymentZoneCtx.VSphereFailureDomain.Spec.Region); err != nil { + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.RegionMisconfiguredReason, clusterv1.ConditionSeverityError, err.Error()) logger.Error(err, "region is not configured correctly") return errors.Wrapf(err, "region is not configured correctly") } // verify the failure domain for the zone - if err := r.reconcileInfraFailureDomain(ctx, ctx.VSphereFailureDomain.Spec.Zone); err != nil { - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ZoneMisconfiguredReason, clusterv1.ConditionSeverityError, err.Error()) + if err := r.reconcileInfraFailureDomain(deploymentZoneCtx, deploymentZoneCtx.VSphereFailureDomain.Spec.Zone); err != nil { + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ZoneMisconfiguredReason, clusterv1.ConditionSeverityError, err.Error()) logger.Error(err, "zone is not configured correctly") return errors.Wrapf(err, "zone is not configured correctly") } - if computeCluster := ctx.VSphereFailureDomain.Spec.Topology.ComputeCluster; computeCluster != nil { - if err := r.reconcileComputeCluster(ctx); err != nil { + if computeCluster := deploymentZoneCtx.VSphereFailureDomain.Spec.Topology.ComputeCluster; computeCluster != nil { + if err := r.reconcileComputeCluster(deploymentZoneCtx); err != nil { logger.Error(err, "compute cluster is not configured correctly", "name", *computeCluster) return errors.Wrap(err, "compute cluster is not configured correctly") } } - if err := r.reconcileTopology(ctx); err != nil { + if err := r.reconcileTopology(deploymentZoneCtx); err != nil { logger.Error(err, "topology is not configured correctly") return errors.Wrap(err, "topology is not configured correctly") } return nil } -func (r vsphereDeploymentZoneReconciler) reconcileInfraFailureDomain(ctx *context.VSphereDeploymentZoneContext, failureDomain infrav1.FailureDomain) error { +func (r vsphereDeploymentZoneReconciler) reconcileInfraFailureDomain(deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext, failureDomain infrav1.FailureDomain) error { if *failureDomain.AutoConfigure { - return r.createAndAttachMetadata(ctx, failureDomain) + return r.createAndAttachMetadata(deploymentZoneCtx, failureDomain) } - return r.verifyFailureDomain(ctx, failureDomain) + return r.verifyFailureDomain(deploymentZoneCtx, failureDomain) } -func (r vsphereDeploymentZoneReconciler) reconcileTopology(ctx *context.VSphereDeploymentZoneContext) error { - topology := ctx.VSphereFailureDomain.Spec.Topology +func (r vsphereDeploymentZoneReconciler) reconcileTopology(deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext) error { + topology := deploymentZoneCtx.VSphereFailureDomain.Spec.Topology if datastore := topology.Datastore; datastore != "" { - if _, err := ctx.AuthSession.Finder.Datastore(ctx, datastore); err != nil { - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.DatastoreNotFoundReason, clusterv1.ConditionSeverityError, "datastore %s is misconfigured", datastore) + if _, err := deploymentZoneCtx.AuthSession.Finder.Datastore(deploymentZoneCtx, datastore); err != nil { + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.DatastoreNotFoundReason, clusterv1.ConditionSeverityError, "datastore %s is misconfigured", datastore) return errors.Wrapf(err, "unable to find datastore %s", datastore) } } for _, network := range topology.Networks { - if _, err := ctx.AuthSession.Finder.Network(ctx, network); err != nil { - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.NetworkNotFoundReason, clusterv1.ConditionSeverityError, "network %s is misconfigured", network) + if _, err := deploymentZoneCtx.AuthSession.Finder.Network(deploymentZoneCtx, network); err != nil { + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.NetworkNotFoundReason, clusterv1.ConditionSeverityError, "network %s is misconfigured", network) return errors.Wrapf(err, "unable to find network %s", network) } } if hostPlacementInfo := topology.Hosts; hostPlacementInfo != nil { - rule, err := cluster.VerifyAffinityRule(ctx, *topology.ComputeCluster, hostPlacementInfo.HostGroupName, hostPlacementInfo.VMGroupName) + rule, err := cluster.VerifyAffinityRule(deploymentZoneCtx, *topology.ComputeCluster, hostPlacementInfo.HostGroupName, hostPlacementInfo.VMGroupName) switch { case err != nil: - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.HostsMisconfiguredReason, clusterv1.ConditionSeverityError, "vm host affinity does not exist") + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.HostsMisconfiguredReason, clusterv1.ConditionSeverityError, "vm host affinity does not exist") return err case rule.Disabled(): - ctrl.LoggerFrom(ctx).V(4).Info("warning: vm-host rule for the failure domain is disabled", "hostgroup", hostPlacementInfo.HostGroupName, "vmGroup", hostPlacementInfo.VMGroupName) - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.HostsAffinityMisconfiguredReason, clusterv1.ConditionSeverityWarning, "vm host affinity is disabled") + ctrl.LoggerFrom(deploymentZoneCtx).V(4).Info("warning: vm-host rule for the failure domain is disabled", "hostgroup", hostPlacementInfo.HostGroupName, "vmGroup", hostPlacementInfo.VMGroupName) + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.HostsAffinityMisconfiguredReason, clusterv1.ConditionSeverityWarning, "vm host affinity is disabled") default: - conditions.MarkTrue(ctx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) + conditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) } } return nil } -func (r vsphereDeploymentZoneReconciler) reconcileComputeCluster(ctx *context.VSphereDeploymentZoneContext) error { - computeCluster := ctx.VSphereFailureDomain.Spec.Topology.ComputeCluster +func (r vsphereDeploymentZoneReconciler) reconcileComputeCluster(deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext) error { + computeCluster := deploymentZoneCtx.VSphereFailureDomain.Spec.Topology.ComputeCluster if computeCluster == nil { return nil } - ccr, err := ctx.AuthSession.Finder.ClusterComputeResource(ctx, *computeCluster) + ccr, err := deploymentZoneCtx.AuthSession.Finder.ClusterComputeResource(deploymentZoneCtx, *computeCluster) if err != nil { - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ComputeClusterNotFoundReason, clusterv1.ConditionSeverityError, "compute cluster %s not found", *computeCluster) + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ComputeClusterNotFoundReason, clusterv1.ConditionSeverityError, "compute cluster %s not found", *computeCluster) return errors.Wrap(err, "compute cluster not found") } - if resourcePool := ctx.VSphereDeploymentZone.Spec.PlacementConstraint.ResourcePool; resourcePool != "" { - rp, err := ctx.AuthSession.Finder.ResourcePool(ctx, resourcePool) + if resourcePool := deploymentZoneCtx.VSphereDeploymentZone.Spec.PlacementConstraint.ResourcePool; resourcePool != "" { + rp, err := deploymentZoneCtx.AuthSession.Finder.ResourcePool(deploymentZoneCtx, resourcePool) if err != nil { return errors.Wrapf(err, "unable to find resource pool") } - ref, err := rp.Owner(ctx) + ref, err := rp.Owner(deploymentZoneCtx) if err != nil { - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ComputeClusterNotFoundReason, clusterv1.ConditionSeverityError, "resource pool owner not found") + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ComputeClusterNotFoundReason, clusterv1.ConditionSeverityError, "resource pool owner not found") return errors.Wrap(err, "unable to find owner compute resource") } if ref.Reference() != ccr.Reference() { - conditions.MarkFalse(ctx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ResourcePoolNotFoundReason, clusterv1.ConditionSeverityError, "resource pool is not owned by compute cluster") + conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ResourcePoolNotFoundReason, clusterv1.ConditionSeverityError, "resource pool is not owned by compute cluster") return errors.Errorf("compute cluster %s does not own resource pool %s", *computeCluster, resourcePool) } } @@ -133,19 +133,19 @@ func (r vsphereDeploymentZoneReconciler) reconcileComputeCluster(ctx *context.VS // verifyFailureDomain verifies the Failure Domain. It verifies the existence of tag and category specified and // checks whether the specified tags exist on the DataCenter or Compute Cluster or Hosts (in a HostGroup). -func (r vsphereDeploymentZoneReconciler) verifyFailureDomain(ctx *context.VSphereDeploymentZoneContext, failureDomain infrav1.FailureDomain) error { - if _, err := ctx.AuthSession.TagManager.GetTagForCategory(ctx, failureDomain.Name, failureDomain.TagCategory); err != nil { +func (r vsphereDeploymentZoneReconciler) verifyFailureDomain(deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext, failureDomain infrav1.FailureDomain) error { + if _, err := deploymentZoneCtx.AuthSession.TagManager.GetTagForCategory(deploymentZoneCtx, failureDomain.Name, failureDomain.TagCategory); err != nil { return errors.Wrapf(err, "failed to verify tag %s and category %s", failureDomain.Name, failureDomain.TagCategory) } - objects, err := taggable.GetObjects(ctx, failureDomain.Type) + objects, err := taggable.GetObjects(deploymentZoneCtx, failureDomain.Type) if err != nil { return errors.Wrapf(err, "failed to find object") } // All the objects should be associated to the tag for _, obj := range objects { - hasTag, err := obj.HasTag(ctx, failureDomain.Name) + hasTag, err := obj.HasTag(deploymentZoneCtx, failureDomain.Name) if err != nil { return errors.Wrapf(err, "failed to verify tag association") } @@ -156,21 +156,21 @@ func (r vsphereDeploymentZoneReconciler) verifyFailureDomain(ctx *context.VSpher return nil } -func (r vsphereDeploymentZoneReconciler) createAndAttachMetadata(ctx *context.VSphereDeploymentZoneContext, failureDomain infrav1.FailureDomain) error { - logger := ctrl.LoggerFrom(ctx, "tag", failureDomain.Name, "category", failureDomain.TagCategory) - categoryID, err := metadata.CreateCategory(ctx, failureDomain.TagCategory, failureDomain.Type) +func (r vsphereDeploymentZoneReconciler) createAndAttachMetadata(deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext, failureDomain infrav1.FailureDomain) error { + logger := ctrl.LoggerFrom(deploymentZoneCtx, "tag", failureDomain.Name, "category", failureDomain.TagCategory) + categoryID, err := metadata.CreateCategory(deploymentZoneCtx, failureDomain.TagCategory, failureDomain.Type) if err != nil { logger.V(4).Error(err, "category creation failed") return errors.Wrapf(err, "failed to create category %s", failureDomain.TagCategory) } - err = metadata.CreateTag(ctx, failureDomain.Name, categoryID) + err = metadata.CreateTag(deploymentZoneCtx, failureDomain.Name, categoryID) if err != nil { logger.V(4).Error(err, "tag creation failed") return errors.Wrapf(err, "failed to create tag %s", failureDomain.Name) } logger = logger.WithValues("type", failureDomain.Type) - objects, err := taggable.GetObjects(ctx, failureDomain.Type) + objects, err := taggable.GetObjects(deploymentZoneCtx, failureDomain.Type) if err != nil { logger.V(4).Error(err, "failed to find object") return err @@ -179,7 +179,7 @@ func (r vsphereDeploymentZoneReconciler) createAndAttachMetadata(ctx *context.VS var errList []error for _, obj := range objects { logger.V(4).Info("attaching tag to object") - err := obj.AttachTag(ctx, failureDomain.Name) + err := obj.AttachTag(deploymentZoneCtx, failureDomain.Name) if err != nil { logger.V(4).Error(err, "failed to find object") errList = append(errList, errors.Wrapf(err, "failed to attach tag")) diff --git a/controllers/vspheredeploymentzone_controller_domain_test.go b/controllers/vspheredeploymentzone_controller_domain_test.go index 4fc1dd3401..a46c095468 100644 --- a/controllers/vspheredeploymentzone_controller_domain_test.go +++ b/controllers/vspheredeploymentzone_controller_domain_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/utils/pointer" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/fake" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" "sigs.k8s.io/cluster-api-provider-vsphere/test/helpers/vcsim" @@ -92,7 +92,7 @@ func ForComputeClusterZone(t *testing.T) { }, } - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereFailureDomain: vsphereFailureDomain, Logger: logr.Discard(), @@ -176,7 +176,7 @@ func ForHostGroupZone(t *testing.T) { }, } - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereFailureDomain: vsphereFailureDomain, Logger: logr.Discard(), @@ -287,7 +287,7 @@ func TestVsphereDeploymentZoneReconciler_Reconcile_CreateAndAttachMetadata(t *te Spec: tests[0].vsphereFailureDomainSpec, } - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereFailureDomain: vsphereFailureDomain, Logger: logr.Discard(), @@ -308,7 +308,7 @@ func TestVsphereDeploymentZoneReconciler_Reconcile_CreateAndAttachMetadata(t *te Spec: tests[1].vsphereFailureDomainSpec, } - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereFailureDomain: vsphereFailureDomain, Logger: logr.Discard(), @@ -329,7 +329,7 @@ func TestVsphereDeploymentZoneReconciler_Reconcile_CreateAndAttachMetadata(t *te Spec: tests[2].vsphereFailureDomainSpec, } - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereFailureDomain: vsphereFailureDomain, Logger: logr.Discard(), diff --git a/controllers/vspheredeploymentzone_controller_test.go b/controllers/vspheredeploymentzone_controller_test.go index e0d74011e6..ed664cc6ab 100644 --- a/controllers/vspheredeploymentzone_controller_test.go +++ b/controllers/vspheredeploymentzone_controller_test.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "testing" "github.com/go-logr/logr" @@ -35,7 +35,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/fake" "sigs.k8s.io/cluster-api-provider-vsphere/test/helpers/vcsim" ) @@ -43,7 +43,7 @@ import ( var _ = Describe("VSphereDeploymentZoneReconciler", func() { var ( simr *vcsim.Simulator - ctx goctx.Context + ctx context.Context failureDomainKey, deploymentZoneKey client.ObjectKey @@ -74,7 +74,7 @@ var _ = Describe("VSphereDeploymentZoneReconciler", func() { Expect(simr.Run(op, gbytes.NewBuffer(), gbytes.NewBuffer())).To(Succeed()) } - ctx = goctx.Background() + ctx = context.Background() }) BeforeEach(func() { @@ -555,7 +555,7 @@ func TestVsphereDeploymentZone_Failed_ReconcilePlacementConstraint(t *testing.T) controllerCtx := fake.NewControllerContext(mgmtContext) - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereDeploymentZone: &infrav1.VSphereDeploymentZone{Spec: infrav1.VSphereDeploymentZoneSpec{ Server: simr.ServerURL().Host, @@ -615,7 +615,7 @@ func TestVSphereDeploymentZoneReconciler_ReconcileDelete(t *testing.T) { t.Run("should block deletion", func(t *testing.T) { mgmtContext := fake.NewControllerManagerContext(machineUsingDeplZone, vsphereFailureDomain) controllerCtx := fake.NewControllerContext(mgmtContext) - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereDeploymentZone: vsphereDeploymentZone, VSphereFailureDomain: vsphereFailureDomain, @@ -637,7 +637,7 @@ func TestVSphereDeploymentZoneReconciler_ReconcileDelete(t *testing.T) { mgmtContext := fake.NewControllerManagerContext(machineUsingDeplZone, vsphereFailureDomain) controllerCtx := fake.NewControllerContext(mgmtContext) - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereDeploymentZone: vsphereDeploymentZone, VSphereFailureDomain: vsphereFailureDomain, @@ -656,7 +656,7 @@ func TestVSphereDeploymentZoneReconciler_ReconcileDelete(t *testing.T) { machineNotUsingDeplZone := createMachine("machine-1", "cluster-1", "ns", false) mgmtContext := fake.NewControllerManagerContext(machineNotUsingDeplZone, vsphereFailureDomain) controllerCtx := fake.NewControllerContext(mgmtContext) - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereDeploymentZone: vsphereDeploymentZone, VSphereFailureDomain: vsphereFailureDomain, @@ -673,7 +673,7 @@ func TestVSphereDeploymentZoneReconciler_ReconcileDelete(t *testing.T) { t.Run("when no machines are present", func(t *testing.T) { mgmtContext := fake.NewControllerManagerContext(vsphereFailureDomain) controllerCtx := fake.NewControllerContext(mgmtContext) - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereDeploymentZone: vsphereDeploymentZone, VSphereFailureDomain: vsphereFailureDomain, @@ -697,7 +697,7 @@ func TestVSphereDeploymentZoneReconciler_ReconcileDelete(t *testing.T) { t.Run("not used by other deployment zones", func(t *testing.T) { mgmtContext := fake.NewControllerManagerContext(vsphereFailureDomain) controllerCtx := fake.NewControllerContext(mgmtContext) - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereDeploymentZone: vsphereDeploymentZone, VSphereFailureDomain: vsphereFailureDomain, @@ -719,7 +719,7 @@ func TestVSphereDeploymentZoneReconciler_ReconcileDelete(t *testing.T) { mgmtContext := fake.NewControllerManagerContext(vsphereFailureDomain) controllerCtx := fake.NewControllerContext(mgmtContext) - deploymentZoneCtx := &context.VSphereDeploymentZoneContext{ + deploymentZoneCtx := &capvcontext.VSphereDeploymentZoneContext{ ControllerContext: controllerCtx, VSphereDeploymentZone: vsphereDeploymentZone, VSphereFailureDomain: vsphereFailureDomain, @@ -732,7 +732,7 @@ func TestVSphereDeploymentZoneReconciler_ReconcileDelete(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) fetchedFailureDomain := &infrav1.VSphereFailureDomain{} - g.Expect(mgmtContext.Client.Get(goctx.Background(), client.ObjectKey{Name: vsphereFailureDomain.Name}, fetchedFailureDomain)).To(Succeed()) + g.Expect(mgmtContext.Client.Get(context.Background(), client.ObjectKey{Name: vsphereFailureDomain.Name}, fetchedFailureDomain)).To(Succeed()) g.Expect(fetchedFailureDomain.OwnerReferences).To(HaveLen(1)) }) }) diff --git a/controllers/vspheremachine_controller.go b/controllers/vspheremachine_controller.go index 3541ad7cc4..7d15151f60 100644 --- a/controllers/vspheremachine_controller.go +++ b/controllers/vspheremachine_controller.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "fmt" "reflect" "strings" @@ -51,7 +51,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/constants" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" inframanager "sigs.k8s.io/cluster-api-provider-vsphere/pkg/manager" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" @@ -78,7 +78,7 @@ const hostInfoErrStr = "host info cannot be used as a label value" // AddMachineControllerToManager adds the machine controller to the provided // manager. -func AddMachineControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager, controlledType client.Object, options controller.Options) error { +func AddMachineControllerToManager(controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, controlledType client.Object, options controller.Options) error { supervisorBased, err := util.IsSupervisorType(controlledType) if err != nil { return err @@ -88,21 +88,21 @@ func AddMachineControllerToManager(ctx *context.ControllerManagerContext, mgr ma controlledTypeName = reflect.TypeOf(controlledType).Elem().Name() controlledTypeGVK = infrav1.GroupVersion.WithKind(controlledTypeName) controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(controlledTypeName)) - controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, controllerNameShort) ) if supervisorBased { controlledTypeGVK = vmwarev1.GroupVersion.WithKind(controlledTypeName) controllerNameShort = fmt.Sprintf("%s-supervisor-controller", strings.ToLower(controlledTypeName)) - controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, controllerNameShort) } // Build the controller context. - controllerContext := &context.ControllerContext{ - ControllerManagerContext: ctx, + controllerContext := &capvcontext.ControllerContext{ + ControllerManagerContext: controllerCtx, Name: controllerNameShort, Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: ctx.Logger.WithName(controllerNameShort), + Logger: controllerCtx.Logger.WithName(controllerNameShort), } builder := ctrl.NewControllerManagedBy(mgr). @@ -120,10 +120,10 @@ func AddMachineControllerToManager(ctx *context.ControllerManagerContext, mgr ma // should cause a resource to be synchronized, such as a goroutine // waiting on some asynchronous, external task to complete. WatchesRawSource( - &source.Channel{Source: ctx.GetGenericEventChannelFor(controlledTypeGVK)}, + &source.Channel{Source: controllerCtx.GetGenericEventChannelFor(controlledTypeGVK)}, &handler.EnqueueRequestForObject{}, ). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ctx.WatchFilterValue)) + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)) r := &machineReconciler{ ControllerContext: controllerContext, @@ -135,7 +135,7 @@ func AddMachineControllerToManager(ctx *context.ControllerManagerContext, mgr ma // Watch any VirtualMachine resources owned by this VSphereMachine builder.Owns(&vmoprv1.VirtualMachine{}) r.VMService = &vmoperator.VmopMachineService{} - networkProvider, err := inframanager.GetNetworkProvider(ctx) + networkProvider, err := inframanager.GetNetworkProvider(controllerCtx) if err != nil { return errors.Wrap(err, "failed to create a network provider") } @@ -169,15 +169,15 @@ func AddMachineControllerToManager(ctx *context.ControllerManagerContext, mgr ma } type machineReconciler struct { - *context.ControllerContext + *capvcontext.ControllerContext VMService services.VSphereMachineService networkProvider services.NetworkProvider supervisorBased bool } // Reconcile ensures the back-end state reflects the Kubernetes resource state intent. -func (r *machineReconciler) Reconcile(_ goctx.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { - var machineContext context.MachineContext +func (r *machineReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { + var machineContext capvcontext.MachineContext logger := r.Logger.WithName(req.Namespace).WithName(req.Name) logger.V(3).Info("Starting Reconcile VSphereMachine") @@ -211,7 +211,7 @@ func (r *machineReconciler) Reconcile(_ goctx.Context, req ctrl.Request) (_ ctrl machineContext.GetObjectMeta().Namespace, machineContext.GetObjectMeta().Name) } - machineContext.SetBaseMachineContext(&context.BaseMachineContext{ + machineContext.SetBaseMachineContext(&capvcontext.BaseMachineContext{ ControllerContext: r.ControllerContext, Cluster: cluster, Machine: machine, @@ -262,17 +262,17 @@ func (r *machineReconciler) Reconcile(_ goctx.Context, req ctrl.Request) (_ ctrl return r.reconcileNormal(machineContext) } -func (r *machineReconciler) reconcileDelete(ctx context.MachineContext) (reconcile.Result, error) { - ctx.GetLogger().Info("Handling deleted VSphereMachine") - conditions.MarkFalse(ctx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") +func (r *machineReconciler) reconcileDelete(machineCtx capvcontext.MachineContext) (reconcile.Result, error) { + machineCtx.GetLogger().Info("Handling deleted SphereMachine") + conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") - if err := r.VMService.ReconcileDelete(ctx); err != nil { + if err := r.VMService.ReconcileDelete(machineCtx); err != nil { if apierrors.IsNotFound(err) { // The VM is deleted so remove the finalizer. - ctrlutil.RemoveFinalizer(ctx.GetVSphereMachine(), infrav1.MachineFinalizer) + ctrlutil.RemoveFinalizer(machineCtx.GetVSphereMachine(), infrav1.MachineFinalizer) return reconcile.Result{}, nil } - conditions.MarkFalse(ctx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "") + conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "") return reconcile.Result{}, err } @@ -280,46 +280,46 @@ func (r *machineReconciler) reconcileDelete(ctx context.MachineContext) (reconci return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } -func (r *machineReconciler) reconcileNormal(ctx context.MachineContext) (reconcile.Result, error) { - machineFailed, err := r.VMService.SyncFailureReason(ctx) +func (r *machineReconciler) reconcileNormal(machineCtx capvcontext.MachineContext) (reconcile.Result, error) { + machineFailed, err := r.VMService.SyncFailureReason(machineCtx) if err != nil && !apierrors.IsNotFound(err) { return reconcile.Result{}, err } // If the VSphereMachine is in an error state, return early. if machineFailed { - ctx.GetLogger().Info("Error state detected, skipping reconciliation") + machineCtx.GetLogger().Info("Error state detected, skipping reconciliation") return reconcile.Result{}, nil } //nolint:gocritic if r.supervisorBased { - err := r.setVMModifiers(ctx) + err := r.setVMModifiers(machineCtx) if err != nil { return reconcile.Result{}, err } } else { // vmwarev1.VSphereCluster doesn't set Cluster.Status.Ready until the API endpoint is available. - if !ctx.GetCluster().Status.InfrastructureReady { - ctx.GetLogger().Info("Cluster infrastructure is not ready yet") - conditions.MarkFalse(ctx.GetVSphereMachine(), infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + if !machineCtx.GetCluster().Status.InfrastructureReady { + machineCtx.GetLogger().Info("Cluster infrastructure is not ready yet") + conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") return reconcile.Result{}, nil } } // Make sure bootstrap data is available and populated. - if ctx.GetMachine().Spec.Bootstrap.DataSecretName == nil { - if !util.IsControlPlaneMachine(ctx.GetVSphereMachine()) && !conditions.IsTrue(ctx.GetCluster(), clusterv1.ControlPlaneInitializedCondition) { - ctx.GetLogger().Info("Waiting for the control plane to be initialized") - conditions.MarkFalse(ctx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + if machineCtx.GetMachine().Spec.Bootstrap.DataSecretName == nil { + if !util.IsControlPlaneMachine(machineCtx.GetVSphereMachine()) && !conditions.IsTrue(machineCtx.GetCluster(), clusterv1.ControlPlaneInitializedCondition) { + machineCtx.GetLogger().Info("Waiting for the control plane to be initialized") + conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } - ctx.GetLogger().Info("Waiting for bootstrap data to be available") - conditions.MarkFalse(ctx.GetVSphereMachine(), infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + machineCtx.GetLogger().Info("Waiting for bootstrap data to be available") + conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") return reconcile.Result{}, nil } - requeue, err := r.VMService.ReconcileNormal(ctx) + requeue, err := r.VMService.ReconcileNormal(machineCtx) if err != nil { return reconcile.Result{}, err } else if requeue { @@ -329,21 +329,21 @@ func (r *machineReconciler) reconcileNormal(ctx context.MachineContext) (reconci // The machine is patched at the last stage before marking the VM as provisioned // This makes sure that the VSphereMachine exists and is in a Running state // before attempting to patch. - err = r.patchMachineLabelsWithHostInfo(ctx) + err = r.patchMachineLabelsWithHostInfo(machineCtx) if err != nil { - r.Logger.Error(err, "failed to patch machine with host info label", "machine ", ctx.GetMachine().Name) + r.Logger.Error(err, "failed to patch machine with host info label", "machine ", machineCtx.GetMachine().Name) return reconcile.Result{}, err } - conditions.MarkTrue(ctx.GetVSphereMachine(), infrav1.VMProvisionedCondition) + conditions.MarkTrue(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition) return reconcile.Result{}, nil } // patchMachineLabelsWithHostInfo adds the ESXi host information as a label to the Machine object. // The ESXi host information is added with the CAPI node label prefix // which would be added onto the node by the CAPI controllers. -func (r *machineReconciler) patchMachineLabelsWithHostInfo(ctx context.MachineContext) error { - hostInfo, err := r.VMService.GetHostInfo(ctx) +func (r *machineReconciler) patchMachineLabelsWithHostInfo(machineCtx capvcontext.MachineContext) error { + hostInfo, err := r.VMService.GetHostInfo(machineCtx) if err != nil { return err } @@ -356,7 +356,7 @@ func (r *machineReconciler) patchMachineLabelsWithHostInfo(ctx context.MachineCo return err } - machine := ctx.GetMachine() + machine := machineCtx.GetMachine() patchHelper, err := patch.NewHelper(machine, r.Client) if err != nil { return err @@ -369,7 +369,7 @@ func (r *machineReconciler) patchMachineLabelsWithHostInfo(ctx context.MachineCo return patchHelper.Patch(r, machine) } -func (r *machineReconciler) clusterToVSphereMachines(ctx goctx.Context, a client.Object) []reconcile.Request { +func (r *machineReconciler) clusterToVSphereMachines(ctx context.Context, a client.Object) []reconcile.Request { requests := []reconcile.Request{} machines, err := util.GetVSphereMachinesInCluster(ctx, r.Client, a.GetNamespace(), a.GetName()) if err != nil { @@ -402,8 +402,8 @@ func (r *machineReconciler) fetchCAPICluster(machine *clusterv1.Machine, vsphere } // Return hooks that will be invoked when a VirtualMachine is created. -func (r *machineReconciler) setVMModifiers(c context.MachineContext) error { - ctx, ok := c.(*vmware.SupervisorMachineContext) +func (r *machineReconciler) setVMModifiers(machineCtx capvcontext.MachineContext) error { + supervisorMachineCtx, ok := machineCtx.(*vmware.SupervisorMachineContext) if !ok { return errors.New("received unexpected MachineContext. expecting SupervisorMachineContext type") } @@ -411,13 +411,13 @@ func (r *machineReconciler) setVMModifiers(c context.MachineContext) error { networkModifier := func(obj runtime.Object) (runtime.Object, error) { // No need to check the type. We know this will be a VirtualMachine vm, _ := obj.(*vmoprv1.VirtualMachine) - ctx.Logger.V(3).Info("Applying network config to VM", "vm-name", vm.Name) - err := r.networkProvider.ConfigureVirtualMachine(ctx.GetClusterContext(), vm) + supervisorMachineCtx.Logger.V(3).Info("Applying network config to VM", "vm-name", vm.Name) + err := r.networkProvider.ConfigureVirtualMachine(supervisorMachineCtx.GetClusterContext(), vm) if err != nil { return nil, errors.Errorf("failed to configure machine network: %+v", err) } return vm, nil } - ctx.VMModifiers = []vmware.VMModifier{networkModifier} + supervisorMachineCtx.VMModifiers = []vmware.VMModifier{networkModifier} return nil } diff --git a/controllers/vspherevm_controller.go b/controllers/vspherevm_controller.go index 9d3b0af907..be33963225 100644 --- a/controllers/vspherevm_controller.go +++ b/controllers/vspherevm_controller.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "fmt" "reflect" "strings" @@ -53,7 +53,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/feature" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/clustermodule" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/identity" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services" @@ -70,22 +70,22 @@ import ( // +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;delete // AddVMControllerToManager adds the VM controller to the provided manager. -func AddVMControllerToManager(ctx *context.ControllerManagerContext, mgr manager.Manager, tracker *remote.ClusterCacheTracker, options controller.Options) error { +func AddVMControllerToManager(controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, tracker *remote.ClusterCacheTracker, options controller.Options) error { var ( controlledType = &infrav1.VSphereVM{} controlledTypeName = reflect.TypeOf(controlledType).Elem().Name() controlledTypeGVK = infrav1.GroupVersion.WithKind(controlledTypeName) controllerNameShort = fmt.Sprintf("%s-controller", strings.ToLower(controlledTypeName)) - controllerNameLong = fmt.Sprintf("%s/%s/%s", ctx.Namespace, ctx.Name, controllerNameShort) + controllerNameLong = fmt.Sprintf("%s/%s/%s", controllerCtx.Namespace, controllerCtx.Name, controllerNameShort) ) // Build the controller context. - controllerContext := &context.ControllerContext{ - ControllerManagerContext: ctx, + controllerContext := &capvcontext.ControllerContext{ + ControllerManagerContext: controllerCtx, Name: controllerNameShort, Recorder: record.New(mgr.GetEventRecorderFor(controllerNameLong)), - Logger: ctx.Logger.WithName(controllerNameShort), + Logger: controllerCtx.Logger.WithName(controllerNameShort), } r := vmReconciler{ ControllerContext: controllerContext, @@ -103,10 +103,10 @@ func AddVMControllerToManager(ctx *context.ControllerManagerContext, mgr manager // should cause a resource to be synchronized, such as a goroutine // waiting on some asynchronous, external task to complete. WatchesRawSource( - &source.Channel{Source: ctx.GetGenericEventChannelFor(controlledTypeGVK)}, + &source.Channel{Source: controllerCtx.GetGenericEventChannelFor(controlledTypeGVK)}, &handler.EnqueueRequestForObject{}, ). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), ctx.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(controllerCtx), controllerCtx.WatchFilterValue)). Watches( &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.clusterToVSphereVMs), @@ -147,14 +147,14 @@ func AddVMControllerToManager(ctx *context.ControllerManagerContext, mgr manager } type vmReconciler struct { - *context.ControllerContext + *capvcontext.ControllerContext VMService services.VirtualMachineService remoteClusterCacheTracker *remote.ClusterCacheTracker } // Reconcile ensures the back-end state reflects the Kubernetes resource state intent. -func (r vmReconciler) Reconcile(ctx goctx.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { +func (r vmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { // Get the VSphereVM resource for this request. vsphereVM := &infrav1.VSphereVM{} if err := r.Client.Get(r, req.NamespacedName, vsphereVM); err != nil { @@ -224,7 +224,7 @@ func (r vmReconciler) Reconcile(ctx goctx.Context, req ctrl.Request) (_ ctrl.Res } // Create the VM context for this request. - vmContext := &context.VMContext{ + vmContext := &capvcontext.VMContext{ ControllerContext: r.ControllerContext, VSphereVM: vsphereVM, VSphereFailureDomain: vsphereFailureDomain, @@ -293,34 +293,34 @@ func (r vmReconciler) Reconcile(ctx goctx.Context, req ctrl.Request) (_ ctrl.Res // // This logic was moved to a smaller function outside of the main Reconcile() loop // for the ease of testing. -func (r vmReconciler) reconcile(ctx *context.VMContext, input fetchClusterModuleInput) (reconcile.Result, error) { +func (r vmReconciler) reconcile(vmCtx *capvcontext.VMContext, input fetchClusterModuleInput) (reconcile.Result, error) { if feature.Gates.Enabled(feature.NodeAntiAffinity) { clusterModuleInfo, err := r.fetchClusterModuleInfo(input) // If cluster module information cannot be fetched for a VM being deleted, // we should not block VM deletion since the cluster module is updated // once the VM gets removed. - if err != nil && ctx.VSphereVM.ObjectMeta.DeletionTimestamp.IsZero() { + if err != nil && vmCtx.VSphereVM.ObjectMeta.DeletionTimestamp.IsZero() { return reconcile.Result{}, err } - ctx.ClusterModuleInfo = clusterModuleInfo + vmCtx.ClusterModuleInfo = clusterModuleInfo } // Handle deleted machines - if !ctx.VSphereVM.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx) + if !vmCtx.VSphereVM.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(vmCtx) } // Handle non-deleted machines - return r.reconcileNormal(ctx) + return r.reconcileNormal(vmCtx) } -func (r vmReconciler) reconcileDelete(ctx *context.VMContext) (reconcile.Result, error) { - ctx.Logger.Info("Handling deleted VSphereVM") +func (r vmReconciler) reconcileDelete(vmCtx *capvcontext.VMContext) (reconcile.Result, error) { + vmCtx.Logger.Info("Handling deleted VSphereVM") - conditions.MarkFalse(ctx.VSphereVM, infrav1.VMProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") - result, vm, err := r.VMService.DestroyVM(ctx) + conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + result, vm, err := r.VMService.DestroyVM(vmCtx) if err != nil { - conditions.MarkFalse(ctx.VSphereVM, infrav1.VMProvisionedCondition, "DeletionFailed", clusterv1.ConditionSeverityWarning, err.Error()) + conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, "DeletionFailed", clusterv1.ConditionSeverityWarning, err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to destroy VM") } @@ -331,12 +331,12 @@ func (r vmReconciler) reconcileDelete(ctx *context.VMContext) (reconcile.Result, // Requeue the operation until the VM is "notfound". if vm.State != infrav1.VirtualMachineStateNotFound { - ctx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateNotFound, "actual-vm-state", vm.State) + vmCtx.Logger.Info("vm state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateNotFound, "actual-vm-state", vm.State) return reconcile.Result{}, nil } // Attempt to delete the node corresponding to the vsphere VM - result, err = r.deleteNode(ctx, vm.Name) + result, err = r.deleteNode(vmCtx, vm.Name) if err != nil { r.Logger.V(6).Info("unable to delete node", "err", err) } @@ -345,12 +345,12 @@ func (r vmReconciler) reconcileDelete(ctx *context.VMContext) (reconcile.Result, return result, nil } - if err := r.deleteIPAddressClaims(ctx); err != nil { + if err := r.deleteIPAddressClaims(vmCtx); err != nil { return reconcile.Result{}, err } // The VM is deleted so remove the finalizer. - ctrlutil.RemoveFinalizer(ctx.VSphereVM, infrav1.VMFinalizer) + ctrlutil.RemoveFinalizer(vmCtx.VSphereVM, infrav1.VMFinalizer) return reconcile.Result{}, nil } @@ -359,13 +359,13 @@ func (r vmReconciler) reconcileDelete(ctx *context.VMContext) (reconcile.Result, // This is necessary since CAPI does not the nodeRef field on the owner Machine object // until the node moves to Ready state. Hence, on Machine deletion it is unable to delete // the kubernetes node corresponding to the VM. -func (r vmReconciler) deleteNode(ctx *context.VMContext, name string) (reconcile.Result, error) { +func (r vmReconciler) deleteNode(vmCtx *capvcontext.VMContext, name string) (reconcile.Result, error) { // Fetching the cluster object from the VSphereVM object to create a remote client to the cluster - cluster, err := clusterutilv1.GetClusterFromMetadata(r.ControllerContext, r.Client, ctx.VSphereVM.ObjectMeta) + cluster, err := clusterutilv1.GetClusterFromMetadata(r.ControllerContext, r.Client, vmCtx.VSphereVM.ObjectMeta) if err != nil { return ctrl.Result{}, err } - clusterClient, err := r.remoteClusterCacheTracker.GetClient(ctx, ctrlclient.ObjectKeyFromObject(cluster)) + clusterClient, err := r.remoteClusterCacheTracker.GetClient(vmCtx, ctrlclient.ObjectKeyFromObject(cluster)) if err != nil { if errors.Is(err, remote.ErrClusterLocked) { r.Logger.V(5).Info("Requeuing because another worker has the lock on the ClusterCacheTracker") @@ -380,35 +380,35 @@ func (r vmReconciler) deleteNode(ctx *context.VMContext, name string) (reconcile Name: name, }, } - return ctrl.Result{}, clusterClient.Delete(ctx, node) + return ctrl.Result{}, clusterClient.Delete(vmCtx, node) } -func (r vmReconciler) reconcileNormal(ctx *context.VMContext) (reconcile.Result, error) { - if ctx.VSphereVM.Status.FailureReason != nil || ctx.VSphereVM.Status.FailureMessage != nil { - r.Logger.Info("VM is failed, won't reconcile", "namespace", ctx.VSphereVM.Namespace, "name", ctx.VSphereVM.Name) +func (r vmReconciler) reconcileNormal(vmCtx *capvcontext.VMContext) (reconcile.Result, error) { + if vmCtx.VSphereVM.Status.FailureReason != nil || vmCtx.VSphereVM.Status.FailureMessage != nil { + r.Logger.Info("VM is failed, won't reconcile", "namespace", vmCtx.VSphereVM.Namespace, "name", vmCtx.VSphereVM.Name) return reconcile.Result{}, nil } - if r.isWaitingForStaticIPAllocation(ctx) { - conditions.MarkFalse(ctx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.WaitingForStaticIPAllocationReason, clusterv1.ConditionSeverityInfo, "") - ctx.Logger.Info("vm is waiting for static ip to be available") + if r.isWaitingForStaticIPAllocation(vmCtx) { + conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.WaitingForStaticIPAllocationReason, clusterv1.ConditionSeverityInfo, "") + vmCtx.Logger.Info("vm is waiting for static ip to be available") return reconcile.Result{}, nil } - if err := r.reconcileIPAddressClaims(ctx); err != nil { + if err := r.reconcileIPAddressClaims(vmCtx); err != nil { return reconcile.Result{}, err } // Get or create the VM. - vm, err := r.VMService.ReconcileVM(ctx) + vm, err := r.VMService.ReconcileVM(vmCtx) if err != nil { - ctx.Logger.Error(err, "error reconciling VM") + vmCtx.Logger.Error(err, "error reconciling VM") return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile VM") } // Do not proceed until the backend VM is marked ready. if vm.State != infrav1.VirtualMachineStateReady { - ctx.Logger.Info( + vmCtx.Logger.Info( "VM state is not reconciled", "expected-vm-state", infrav1.VirtualMachineStateReady, "actual-vm-state", vm.State) @@ -416,33 +416,33 @@ func (r vmReconciler) reconcileNormal(ctx *context.VMContext) (reconcile.Result, } // Update the VSphereVM's BIOS UUID. - ctx.Logger.Info("vm bios-uuid", "biosuuid", vm.BiosUUID) + vmCtx.Logger.Info("vm bios-uuid", "biosuuid", vm.BiosUUID) // defensive check to ensure we are not removing the biosUUID if vm.BiosUUID != "" { - ctx.VSphereVM.Spec.BiosUUID = vm.BiosUUID + vmCtx.VSphereVM.Spec.BiosUUID = vm.BiosUUID } else { return reconcile.Result{}, errors.Errorf("bios uuid is empty while VM is ready") } // VMRef should be set just once. It is not supposed to change! - if vm.VMRef != "" && ctx.VSphereVM.Status.VMRef == "" { - ctx.VSphereVM.Status.VMRef = vm.VMRef + if vm.VMRef != "" && vmCtx.VSphereVM.Status.VMRef == "" { + vmCtx.VSphereVM.Status.VMRef = vm.VMRef } // Update the VSphereVM's network status. - r.reconcileNetwork(ctx, vm) + r.reconcileNetwork(vmCtx, vm) // we didn't get any addresses, requeue - if len(ctx.VSphereVM.Status.Addresses) == 0 { - conditions.MarkFalse(ctx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.WaitingForIPAllocationReason, clusterv1.ConditionSeverityInfo, "") + if len(vmCtx.VSphereVM.Status.Addresses) == 0 { + conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.WaitingForIPAllocationReason, clusterv1.ConditionSeverityInfo, "") return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } // Once the network is online the VM is considered ready. - ctx.VSphereVM.Status.Ready = true - conditions.MarkTrue(ctx.VSphereVM, infrav1.VMProvisionedCondition) - ctx.Logger.Info("VSphereVM is ready") + vmCtx.VSphereVM.Status.Ready = true + conditions.MarkTrue(vmCtx.VSphereVM, infrav1.VMProvisionedCondition) + vmCtx.Logger.Info("VSphereVM is ready") return reconcile.Result{}, nil } @@ -450,8 +450,8 @@ func (r vmReconciler) reconcileNormal(ctx *context.VMContext) (reconcile.Result, // to be allocated. // It checks the state of both DHCP4 and DHCP6 for all the network devices and if // any static IP addresses or IPAM Pools are specified. -func (r vmReconciler) isWaitingForStaticIPAllocation(ctx *context.VMContext) bool { - devices := ctx.VSphereVM.Spec.Network.Devices +func (r vmReconciler) isWaitingForStaticIPAllocation(vmCtx *capvcontext.VMContext) bool { + devices := vmCtx.VSphereVM.Spec.Network.Devices for _, dev := range devices { if !dev.DHCP4 && !dev.DHCP6 && len(dev.IPAddrs) == 0 && len(dev.AddressesFromPools) == 0 { // Static IP is not available yet @@ -462,16 +462,16 @@ func (r vmReconciler) isWaitingForStaticIPAllocation(ctx *context.VMContext) boo return false } -func (r vmReconciler) reconcileNetwork(ctx *context.VMContext, vm infrav1.VirtualMachine) { - ctx.VSphereVM.Status.Network = vm.Network +func (r vmReconciler) reconcileNetwork(vmCtx *capvcontext.VMContext, vm infrav1.VirtualMachine) { + vmCtx.VSphereVM.Status.Network = vm.Network ipAddrs := make([]string, 0, len(vm.Network)) - for _, netStatus := range ctx.VSphereVM.Status.Network { + for _, netStatus := range vmCtx.VSphereVM.Status.Network { ipAddrs = append(ipAddrs, netStatus.IPAddrs...) } - ctx.VSphereVM.Status.Addresses = ipAddrs + vmCtx.VSphereVM.Status.Addresses = ipAddrs } -func (r vmReconciler) clusterToVSphereVMs(ctx goctx.Context, a ctrlclient.Object) []reconcile.Request { +func (r vmReconciler) clusterToVSphereVMs(ctx context.Context, a ctrlclient.Object) []reconcile.Request { requests := []reconcile.Request{} vms := &infrav1.VSphereVMList{} err := r.Client.List(ctx, vms, ctrlclient.MatchingLabels( @@ -494,7 +494,7 @@ func (r vmReconciler) clusterToVSphereVMs(ctx goctx.Context, a ctrlclient.Object return requests } -func (r vmReconciler) vsphereClusterToVSphereVMs(ctx goctx.Context, a ctrlclient.Object) []reconcile.Request { +func (r vmReconciler) vsphereClusterToVSphereVMs(ctx context.Context, a ctrlclient.Object) []reconcile.Request { vsphereCluster, ok := a.(*infrav1.VSphereCluster) if !ok { return nil @@ -526,7 +526,7 @@ func (r vmReconciler) vsphereClusterToVSphereVMs(ctx goctx.Context, a ctrlclient return requests } -func (r vmReconciler) ipAddressClaimToVSphereVM(_ goctx.Context, a ctrlclient.Object) []reconcile.Request { +func (r vmReconciler) ipAddressClaimToVSphereVM(_ context.Context, a ctrlclient.Object) []reconcile.Request { ipAddressClaim, ok := a.(*ipamv1.IPAddressClaim) if !ok { return nil @@ -549,7 +549,7 @@ func (r vmReconciler) ipAddressClaimToVSphereVM(_ goctx.Context, a ctrlclient.Ob return requests } -func (r vmReconciler) retrieveVcenterSession(ctx goctx.Context, vsphereVM *infrav1.VSphereVM) (*session.Session, error) { +func (r vmReconciler) retrieveVcenterSession(ctx context.Context, vsphereVM *infrav1.VSphereVM) (*session.Session, error) { // Get cluster object and then get VSphereCluster object params := session.NewParams(). diff --git a/controllers/vspherevm_controller_test.go b/controllers/vspherevm_controller_test.go index 3dbcfae453..94c71eaa70 100644 --- a/controllers/vspherevm_controller_test.go +++ b/controllers/vspherevm_controller_test.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "fmt" "testing" "time" @@ -42,7 +42,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/feature" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/fake" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/identity" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" @@ -203,7 +203,7 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { controllerMgrContext.Password = password controllerMgrContext.Username = simr.ServerURL().User.Username() - controllerContext := &context.ControllerContext{ + controllerContext := &capvcontext.ControllerContext{ ControllerManagerContext: controllerMgrContext, Recorder: record.New(apirecord.NewFakeRecorder(100)), Logger: log.Log, @@ -230,13 +230,13 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { Network: nil, }, nil) r := setupReconciler(fakeVMSvc) - _, err = r.Reconcile(goctx.Background(), ctrl.Request{NamespacedName: util.ObjectKey(vsphereVM)}) + _, err = r.Reconcile(context.Background(), ctrl.Request{NamespacedName: util.ObjectKey(vsphereVM)}) g := NewWithT(t) g.Expect(err).NotTo(HaveOccurred()) vm := &infrav1.VSphereVM{} vmKey := util.ObjectKey(vsphereVM) - g.Expect(r.Client.Get(goctx.Background(), vmKey, vm)).NotTo(HaveOccurred()) + g.Expect(r.Client.Get(context.Background(), vmKey, vm)).NotTo(HaveOccurred()) g.Expect(conditions.Has(vm, infrav1.VMProvisionedCondition)).To(BeTrue()) vmProvisionCondition := conditions.Get(vm, infrav1.VMProvisionedCondition) @@ -263,13 +263,13 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { }}, }, nil) r := setupReconciler(fakeVMSvc) - _, err = r.Reconcile(goctx.Background(), ctrl.Request{NamespacedName: util.ObjectKey(vsphereVM)}) + _, err = r.Reconcile(context.Background(), ctrl.Request{NamespacedName: util.ObjectKey(vsphereVM)}) g := NewWithT(t) g.Expect(err).NotTo(HaveOccurred()) vm := &infrav1.VSphereVM{} vmKey := util.ObjectKey(vsphereVM) - g.Expect(r.Client.Get(goctx.Background(), vmKey, vm)).NotTo(HaveOccurred()) + g.Expect(r.Client.Get(context.Background(), vmKey, vm)).NotTo(HaveOccurred()) g.Expect(conditions.Has(vm, infrav1.VMProvisionedCondition)).To(BeTrue()) vmProvisionCondition := conditions.Get(vm, infrav1.VMProvisionedCondition) @@ -311,16 +311,16 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { g := NewWithT(t) - _, err := r.Reconcile(goctx.Background(), ctrl.Request{NamespacedName: util.ObjectKey(vsphereVM)}) + _, err := r.Reconcile(context.Background(), ctrl.Request{NamespacedName: util.ObjectKey(vsphereVM)}) g.Expect(err).To(HaveOccurred()) vm := &infrav1.VSphereVM{} vmKey := util.ObjectKey(vsphereVM) - g.Expect(apierrors.IsNotFound(r.Client.Get(goctx.Background(), vmKey, vm))).To(BeTrue()) + g.Expect(apierrors.IsNotFound(r.Client.Get(context.Background(), vmKey, vm))).To(BeTrue()) claim := &ipamv1.IPAddressClaim{} ipacKey := util.ObjectKey(ipAddressClaim) - g.Expect(r.Client.Get(goctx.Background(), ipacKey, claim)).NotTo(HaveOccurred()) + g.Expect(r.Client.Get(context.Background(), ipacKey, claim)).NotTo(HaveOccurred()) g.Expect(claim.ObjectMeta.Finalizers).NotTo(ContainElement(infrav1.IPAddressClaimFinalizer)) }) } @@ -483,20 +483,20 @@ func TestRetrievingVCenterCredentialsFromCluster(t *testing.T) { initObjs = append(initObjs, secret, vsphereVM, vsphereMachine, machine, cluster, vsphereCluster) controllerMgrContext := fake.NewControllerManagerContext(initObjs...) - controllerContext := &context.ControllerContext{ + controllerContext := &capvcontext.ControllerContext{ ControllerManagerContext: controllerMgrContext, Recorder: record.New(apirecord.NewFakeRecorder(100)), Logger: log.Log, } r := vmReconciler{ControllerContext: controllerContext} - _, err = r.Reconcile(goctx.Background(), ctrl.Request{NamespacedName: util.ObjectKey(vsphereVM)}) + _, err = r.Reconcile(context.Background(), ctrl.Request{NamespacedName: util.ObjectKey(vsphereVM)}) g := NewWithT(t) g.Expect(err).NotTo(HaveOccurred()) vm := &infrav1.VSphereVM{} vmKey := util.ObjectKey(vsphereVM) - g.Expect(r.Client.Get(goctx.Background(), vmKey, vm)).NotTo(HaveOccurred()) + g.Expect(r.Client.Get(context.Background(), vmKey, vm)).NotTo(HaveOccurred()) g.Expect(conditions.Has(vm, infrav1.VCenterAvailableCondition)).To(BeTrue()) vCenterCondition := conditions.Get(vm, infrav1.VCenterAvailableCondition) g.Expect(vCenterCondition.Status).To(Equal(corev1.ConditionTrue)) @@ -519,14 +519,14 @@ func TestRetrievingVCenterCredentialsFromCluster(t *testing.T) { initObjs = append(initObjs, secret, vsphereVM, vsphereMachine, machine, cluster, vsphereCluster) controllerMgrContext := fake.NewControllerManagerContext(initObjs...) - controllerContext := &context.ControllerContext{ + controllerContext := &capvcontext.ControllerContext{ ControllerManagerContext: controllerMgrContext, Recorder: record.New(apirecord.NewFakeRecorder(100)), Logger: log.Log, } r := vmReconciler{ControllerContext: controllerContext} - _, err = r.Reconcile(goctx.Background(), ctrl.Request{NamespacedName: util.ObjectKey(vsphereVM)}) + _, err = r.Reconcile(context.Background(), ctrl.Request{NamespacedName: util.ObjectKey(vsphereVM)}) g := NewWithT(t) g.Expect(err).To(HaveOccurred()) }, @@ -570,7 +570,7 @@ func Test_reconcile(t *testing.T) { setupReconciler := func(vmService services.VirtualMachineService, initObjs ...client.Object) vmReconciler { return vmReconciler{ - ControllerContext: &context.ControllerContext{ + ControllerContext: &capvcontext.ControllerContext{ ControllerManagerContext: fake.NewControllerManagerContext(initObjs...), Recorder: record.New(apirecord.NewFakeRecorder(100)), Logger: log.Log, @@ -590,7 +590,7 @@ func Test_reconcile(t *testing.T) { State: infrav1.VirtualMachineStateReady, }, nil) r := setupReconciler(fakeVMSvc, initObjs...) - _, err := r.reconcile(&context.VMContext{ + _, err := r.reconcile(&capvcontext.VMContext{ ControllerContext: r.ControllerContext, VSphereVM: vsphereVM, Logger: r.Logger, @@ -606,7 +606,7 @@ func Test_reconcile(t *testing.T) { t.Run("when anti affinity feature gate is turned on", func(t *testing.T) { _ = feature.MutableGates.Set("NodeAntiAffinity=true") r := setupReconciler(new(fake_svc.VMService), initObjs...) - _, err := r.reconcile(&context.VMContext{ + _, err := r.reconcile(&capvcontext.VMContext{ ControllerContext: r.ControllerContext, VSphereVM: vsphereVM, Logger: r.Logger, @@ -632,7 +632,7 @@ func Test_reconcile(t *testing.T) { }, nil) r := setupReconciler(fakeVMSvc, objsWithHierarchy...) - _, err := r.reconcile(&context.VMContext{ + _, err := r.reconcile(&capvcontext.VMContext{ ControllerContext: r.ControllerContext, VSphereVM: vsphereVM, Logger: r.Logger, @@ -665,7 +665,7 @@ func Test_reconcile(t *testing.T) { objsWithHierarchy = append(objsWithHierarchy, createMachineOwnerHierarchy(machine)...) r := setupReconciler(fakeVMSvc, objsWithHierarchy...) - _, err := r.reconcile(&context.VMContext{ + _, err := r.reconcile(&capvcontext.VMContext{ ControllerContext: r.ControllerContext, VSphereVM: deletedVM, Logger: r.Logger, @@ -680,7 +680,7 @@ func Test_reconcile(t *testing.T) { t.Run("when info cannot be fetched", func(t *testing.T) { r := setupReconciler(fakeVMSvc, initObjs...) - _, err := r.reconcile(&context.VMContext{ + _, err := r.reconcile(&capvcontext.VMContext{ ControllerContext: r.ControllerContext, VSphereVM: deletedVM, Logger: r.Logger, diff --git a/controllers/vspherevm_ipaddress_reconciler.go b/controllers/vspherevm_ipaddress_reconciler.go index 1bcd07885a..cc87e58384 100644 --- a/controllers/vspherevm_ipaddress_reconciler.go +++ b/controllers/vspherevm_ipaddress_reconciler.go @@ -33,7 +33,7 @@ import ( ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" ) @@ -42,7 +42,7 @@ import ( // reconcileIPAddressClaims ensures that VSphereVMs that are configured with .spec.network.devices.addressFromPools // have corresponding IPAddressClaims. -func (r vmReconciler) reconcileIPAddressClaims(ctx *context.VMContext) error { +func (r vmReconciler) reconcileIPAddressClaims(vmCtx *capvcontext.VMContext) error { totalClaims, claimsCreated := 0, 0 claimsFulfilled := 0 @@ -51,23 +51,23 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx *context.VMContext) error { errList []error ) - for devIdx, device := range ctx.VSphereVM.Spec.Network.Devices { + for devIdx, device := range vmCtx.VSphereVM.Spec.Network.Devices { for poolRefIdx, poolRef := range device.AddressesFromPools { totalClaims++ - ipAddrClaimName := util.IPAddressClaimName(ctx.VSphereVM.Name, devIdx, poolRefIdx) + ipAddrClaimName := util.IPAddressClaimName(vmCtx.VSphereVM.Name, devIdx, poolRefIdx) ipAddrClaim := &ipamv1.IPAddressClaim{} ipAddrClaimKey := client.ObjectKey{ - Namespace: ctx.VSphereVM.Namespace, + Namespace: vmCtx.VSphereVM.Namespace, Name: ipAddrClaimName, } - err := ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim) + err := vmCtx.Client.Get(vmCtx, ipAddrClaimKey, ipAddrClaim) if err != nil && !apierrors.IsNotFound(err) { - ctx.Logger.Error(err, "fetching IPAddressClaim failed", "name", ipAddrClaimName) + vmCtx.Logger.Error(err, "fetching IPAddressClaim failed", "name", ipAddrClaimName) return err } - ipAddrClaim, created, err := createOrPatchIPAddressClaim(ctx, ipAddrClaimName, poolRef) + ipAddrClaim, created, err := createOrPatchIPAddressClaim(vmCtx, ipAddrClaimName, poolRef) if err != nil { - ctx.Logger.Error(err, "createOrPatchIPAddressClaim failed", "name", ipAddrClaimName) + vmCtx.Logger.Error(err, "createOrPatchIPAddressClaim failed", "name", ipAddrClaimName) errList = append(errList, err) continue } @@ -88,7 +88,7 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx *context.VMContext) error { if len(errList) > 0 { aggregatedErr := kerrors.NewAggregate(errList) - conditions.MarkFalse(ctx.VSphereVM, + conditions.MarkFalse(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition, infrav1.IPAddressClaimNotFoundReason, clusterv1.ConditionSeverityError, @@ -101,7 +101,7 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx *context.VMContext) error { // To correctly calculate the status of the condition, we would want all the IPAddressClaim objects // to report the Ready Condition. if len(claims) == totalClaims { - conditions.SetAggregate(ctx.VSphereVM, + conditions.SetAggregate(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition, claims, conditions.AddSourceRef(), @@ -112,13 +112,13 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx *context.VMContext) error { // Fallback logic to calculate the state of the IPAddressClaimed condition switch { case totalClaims == claimsFulfilled: - conditions.MarkTrue(ctx.VSphereVM, infrav1.IPAddressClaimedCondition) + conditions.MarkTrue(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition) case claimsFulfilled < totalClaims && claimsCreated > 0: - conditions.MarkFalse(ctx.VSphereVM, infrav1.IPAddressClaimedCondition, + conditions.MarkFalse(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition, infrav1.IPAddressClaimsBeingCreatedReason, clusterv1.ConditionSeverityInfo, "%d/%d claims being created", claimsCreated, totalClaims) case claimsFulfilled < totalClaims && claimsCreated == 0: - conditions.MarkFalse(ctx.VSphereVM, infrav1.IPAddressClaimedCondition, + conditions.MarkFalse(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition, infrav1.WaitingForIPAddressReason, clusterv1.ConditionSeverityInfo, "%d/%d claims being processed", totalClaims-claimsFulfilled, totalClaims) } @@ -129,21 +129,21 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx *context.VMContext) error { // from an externally managed IPPool. Ensures that the claim has a reference to the cluster of the VM to // support pausing reconciliation. // The responsibility of the IP address resolution is handled by an external IPAM provider. -func createOrPatchIPAddressClaim(ctx *context.VMContext, name string, poolRef corev1.TypedLocalObjectReference) (*ipamv1.IPAddressClaim, bool, error) { +func createOrPatchIPAddressClaim(vmCtx *capvcontext.VMContext, name string, poolRef corev1.TypedLocalObjectReference) (*ipamv1.IPAddressClaim, bool, error) { claim := &ipamv1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: ctx.VSphereVM.Namespace, + Namespace: vmCtx.VSphereVM.Namespace, }, } mutateFn := func() (err error) { claim.SetOwnerReferences(clusterutilv1.EnsureOwnerRef( claim.OwnerReferences, metav1.OwnerReference{ - APIVersion: ctx.VSphereVM.APIVersion, - Kind: ctx.VSphereVM.Kind, - Name: ctx.VSphereVM.Name, - UID: ctx.VSphereVM.UID, + APIVersion: vmCtx.VSphereVM.APIVersion, + Kind: vmCtx.VSphereVM.Kind, + Name: vmCtx.VSphereVM.Name, + UID: vmCtx.VSphereVM.UID, })) ctrlutil.AddFinalizer(claim, infrav1.IPAddressClaimFinalizer) @@ -151,7 +151,7 @@ func createOrPatchIPAddressClaim(ctx *context.VMContext, name string, poolRef co if claim.Labels == nil { claim.Labels = make(map[string]string) } - claim.Labels[clusterv1.ClusterNameLabel] = ctx.VSphereVM.Labels[clusterv1.ClusterNameLabel] + claim.Labels[clusterv1.ClusterNameLabel] = vmCtx.VSphereVM.Labels[clusterv1.ClusterNameLabel] claim.Spec.PoolRef.APIGroup = poolRef.APIGroup claim.Spec.PoolRef.Kind = poolRef.Kind @@ -159,9 +159,9 @@ func createOrPatchIPAddressClaim(ctx *context.VMContext, name string, poolRef co return nil } - result, err := ctrlutil.CreateOrPatch(ctx, ctx.Client, claim, mutateFn) + result, err := ctrlutil.CreateOrPatch(vmCtx, vmCtx.Client, claim, mutateFn) if err != nil { - ctx.Logger.Error( + vmCtx.Logger.Error( err, "failed to CreateOrPatch IPAddressClaim", "namespace", @@ -177,20 +177,20 @@ func createOrPatchIPAddressClaim(ctx *context.VMContext, name string, poolRef co } switch result { case ctrlutil.OperationResultCreated: - ctx.Logger.Info( + vmCtx.Logger.Info( "created claim", "claim", key, ) return claim, true, nil case ctrlutil.OperationResultUpdated: - ctx.Logger.Info( + vmCtx.Logger.Info( "updated claim", "claim", key, ) case ctrlutil.OperationResultNone, ctrlutil.OperationResultUpdatedStatus, ctrlutil.OperationResultUpdatedStatusOnly: - ctx.Logger.V(5).Info( + vmCtx.Logger.V(5).Info( "no change required for claim", "claim", key, "operation", result, @@ -201,25 +201,25 @@ func createOrPatchIPAddressClaim(ctx *context.VMContext, name string, poolRef co // deleteIPAddressClaims removes the finalizers from the IPAddressClaim objects // thus freeing them up for garbage collection. -func (r vmReconciler) deleteIPAddressClaims(ctx *context.VMContext) error { - for devIdx, device := range ctx.VSphereVM.Spec.Network.Devices { +func (r vmReconciler) deleteIPAddressClaims(vmCtx *capvcontext.VMContext) error { + for devIdx, device := range vmCtx.VSphereVM.Spec.Network.Devices { for poolRefIdx := range device.AddressesFromPools { // check if claim exists ipAddrClaim := &ipamv1.IPAddressClaim{} - ipAddrClaimName := util.IPAddressClaimName(ctx.VSphereVM.Name, devIdx, poolRefIdx) - ctx.Logger.Info("removing finalizer", "IPAddressClaim", ipAddrClaimName) + ipAddrClaimName := util.IPAddressClaimName(vmCtx.VSphereVM.Name, devIdx, poolRefIdx) + vmCtx.Logger.Info("removing finalizer", "IPAddressClaim", ipAddrClaimName) ipAddrClaimKey := client.ObjectKey{ - Namespace: ctx.VSphereVM.Namespace, + Namespace: vmCtx.VSphereVM.Namespace, Name: ipAddrClaimName, } - if err := ctx.Client.Get(ctx, ipAddrClaimKey, ipAddrClaim); err != nil { + if err := vmCtx.Client.Get(vmCtx, ipAddrClaimKey, ipAddrClaim); err != nil { if apierrors.IsNotFound(err) { continue } return errors.Wrapf(err, fmt.Sprintf("failed to find IPAddressClaim %q to remove the finalizer", ipAddrClaimName)) } if ctrlutil.RemoveFinalizer(ipAddrClaim, infrav1.IPAddressClaimFinalizer) { - if err := ctx.Client.Update(ctx, ipAddrClaim); err != nil { + if err := vmCtx.Client.Update(vmCtx, ipAddrClaim); err != nil { return errors.Wrapf(err, fmt.Sprintf("failed to update IPAddressClaim %q", ipAddrClaimName)) } } diff --git a/controllers/vspherevm_ipaddress_reconciler_test.go b/controllers/vspherevm_ipaddress_reconciler_test.go index 1c5223fb9d..f20951281b 100644 --- a/controllers/vspherevm_ipaddress_reconciler_test.go +++ b/controllers/vspherevm_ipaddress_reconciler_test.go @@ -17,7 +17,7 @@ limitations under the License. package controllers import ( - goctx "context" + "context" "testing" "github.com/go-logr/logr" @@ -32,16 +32,16 @@ import ( ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/fake" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" ) func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { name, namespace := "test-vm", "my-namespace" - setup := func(vsphereVM *infrav1.VSphereVM, initObjects ...client.Object) *context.VMContext { + setup := func(vsphereVM *infrav1.VSphereVM, initObjects ...client.Object) *capvcontext.VMContext { ctx := fake.NewControllerContext(fake.NewControllerManagerContext(initObjects...)) - return &context.VMContext{ + return &capvcontext.VMContext{ ControllerContext: ctx, VSphereVM: vsphereVM, Logger: logr.Discard(), @@ -84,7 +84,7 @@ func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { g.Expect(err).ToNot(gomega.HaveOccurred()) ipAddrClaimList := &ipamv1.IPAddressClaimList{} - g.Expect(testCtx.Client.List(goctx.TODO(), ipAddrClaimList)).To(gomega.Succeed()) + g.Expect(testCtx.Client.List(context.TODO(), ipAddrClaimList)).To(gomega.Succeed()) g.Expect(ipAddrClaimList.Items).To(gomega.HaveLen(3)) for idx := range ipAddrClaimList.Items { @@ -133,7 +133,7 @@ func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { g.Expect(claimedCondition.Message).To(gomega.Equal("3/3 claims being processed")) ipAddrClaimList := &ipamv1.IPAddressClaimList{} - g.Expect(testCtx.Client.List(goctx.TODO(), ipAddrClaimList)).To(gomega.Succeed()) + g.Expect(testCtx.Client.List(context.TODO(), ipAddrClaimList)).To(gomega.Succeed()) for idx := range ipAddrClaimList.Items { claim := ipAddrClaimList.Items[idx] @@ -167,7 +167,7 @@ func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { g.Expect(claimedCondition.Status).To(gomega.Equal(corev1.ConditionTrue)) ipAddrClaimList := &ipamv1.IPAddressClaimList{} - g.Expect(testCtx.Client.List(goctx.TODO(), ipAddrClaimList)).To(gomega.Succeed()) + g.Expect(testCtx.Client.List(context.TODO(), ipAddrClaimList)).To(gomega.Succeed()) for idx := range ipAddrClaimList.Items { claim := ipAddrClaimList.Items[idx] diff --git a/main.go b/main.go index d7cd33875d..660653d4ae 100644 --- a/main.go +++ b/main.go @@ -52,7 +52,7 @@ import ( "sigs.k8s.io/cluster-api-provider-vsphere/feature" "sigs.k8s.io/cluster-api-provider-vsphere/internal/webhooks" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/constants" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/manager" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/version" @@ -256,8 +256,8 @@ func main() { managerOpts.RetryPeriod = &leaderElectionRetryPeriod // Create a function that adds all the controllers and webhooks to the manager. - addToManager := func(ctx *context.ControllerManagerContext, mgr ctrlmgr.Manager) error { - tracker, err := setupRemoteClusterCacheTracker(ctx, mgr) + addToManager := func(controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager) error { + tracker, err := setupRemoteClusterCacheTracker(controllerCtx, mgr) if err != nil { return perrors.Wrapf(err, "unable to create remote cluster cache tracker") } @@ -269,7 +269,7 @@ func main() { return err } if isLoaded { - if err := setupVAPIControllers(ctx, mgr, tracker); err != nil { + if err := setupVAPIControllers(controllerCtx, mgr, tracker); err != nil { return fmt.Errorf("setupVAPIControllers: %w", err) } } else { @@ -283,7 +283,7 @@ func main() { return err } if isLoaded { - if err := setupSupervisorControllers(ctx, mgr, tracker); err != nil { + if err := setupSupervisorControllers(controllerCtx, mgr, tracker); err != nil { return fmt.Errorf("setupSupervisorControllers: %w", err) } } else { @@ -330,7 +330,7 @@ func main() { defer session.Clear() } -func setupVAPIControllers(ctx *context.ControllerManagerContext, mgr ctrlmgr.Manager, tracker *remote.ClusterCacheTracker) error { +func setupVAPIControllers(controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager, tracker *remote.ClusterCacheTracker) error { if err := (&webhooks.VSphereClusterTemplateWebhook{}).SetupWebhookWithManager(mgr); err != nil { return err } @@ -355,36 +355,36 @@ func setupVAPIControllers(ctx *context.ControllerManagerContext, mgr ctrlmgr.Man return err } - if err := controllers.AddClusterControllerToManager(ctx, mgr, &infrav1.VSphereCluster{}, concurrency(vSphereClusterConcurrency)); err != nil { + if err := controllers.AddClusterControllerToManager(controllerCtx, mgr, &infrav1.VSphereCluster{}, concurrency(vSphereClusterConcurrency)); err != nil { return err } - if err := controllers.AddMachineControllerToManager(ctx, mgr, &infrav1.VSphereMachine{}, concurrency(vSphereMachineConcurrency)); err != nil { + if err := controllers.AddMachineControllerToManager(controllerCtx, mgr, &infrav1.VSphereMachine{}, concurrency(vSphereMachineConcurrency)); err != nil { return err } - if err := controllers.AddVMControllerToManager(ctx, mgr, tracker, concurrency(vSphereVMConcurrency)); err != nil { + if err := controllers.AddVMControllerToManager(controllerCtx, mgr, tracker, concurrency(vSphereVMConcurrency)); err != nil { return err } - if err := controllers.AddVsphereClusterIdentityControllerToManager(ctx, mgr, concurrency(vSphereClusterIdentityConcurrency)); err != nil { + if err := controllers.AddVsphereClusterIdentityControllerToManager(controllerCtx, mgr, concurrency(vSphereClusterIdentityConcurrency)); err != nil { return err } - return controllers.AddVSphereDeploymentZoneControllerToManager(ctx, mgr, concurrency(vSphereDeploymentZoneConcurrency)) + return controllers.AddVSphereDeploymentZoneControllerToManager(controllerCtx, mgr, concurrency(vSphereDeploymentZoneConcurrency)) } -func setupSupervisorControllers(ctx *context.ControllerManagerContext, mgr ctrlmgr.Manager, tracker *remote.ClusterCacheTracker) error { - if err := controllers.AddClusterControllerToManager(ctx, mgr, &vmwarev1.VSphereCluster{}, concurrency(vSphereClusterConcurrency)); err != nil { +func setupSupervisorControllers(controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager, tracker *remote.ClusterCacheTracker) error { + if err := controllers.AddClusterControllerToManager(controllerCtx, mgr, &vmwarev1.VSphereCluster{}, concurrency(vSphereClusterConcurrency)); err != nil { return err } - if err := controllers.AddMachineControllerToManager(ctx, mgr, &vmwarev1.VSphereMachine{}, concurrency(vSphereMachineConcurrency)); err != nil { + if err := controllers.AddMachineControllerToManager(controllerCtx, mgr, &vmwarev1.VSphereMachine{}, concurrency(vSphereMachineConcurrency)); err != nil { return err } - if err := controllers.AddServiceAccountProviderControllerToManager(ctx, mgr, tracker, concurrency(providerServiceAccountConcurrency)); err != nil { + if err := controllers.AddServiceAccountProviderControllerToManager(controllerCtx, mgr, tracker, concurrency(providerServiceAccountConcurrency)); err != nil { return err } - return controllers.AddServiceDiscoveryControllerToManager(ctx, mgr, tracker, concurrency(serviceDiscoveryConcurrency)) + return controllers.AddServiceDiscoveryControllerToManager(controllerCtx, mgr, tracker, concurrency(serviceDiscoveryConcurrency)) } func setupChecks(mgr ctrlmgr.Manager) { @@ -422,7 +422,7 @@ func concurrency(c int) controller.Options { return controller.Options{MaxConcurrentReconciles: c} } -func setupRemoteClusterCacheTracker(ctx *context.ControllerManagerContext, mgr ctrlmgr.Manager) (*remote.ClusterCacheTracker, error) { +func setupRemoteClusterCacheTracker(controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager) (*remote.ClusterCacheTracker, error) { secretCachingClient, err := client.New(mgr.GetConfig(), client.Options{ HTTPClient: mgr.GetHTTPClient(), Cache: &client.CacheOptions{ @@ -452,7 +452,7 @@ func setupRemoteClusterCacheTracker(ctx *context.ControllerManagerContext, mgr c Client: mgr.GetClient(), Tracker: tracker, WatchFilterValue: managerOpts.WatchFilterValue, - }).SetupWithManager(ctx, mgr, concurrency(clusterCacheTrackerConcurrency)); err != nil { + }).SetupWithManager(controllerCtx, mgr, concurrency(clusterCacheTrackerConcurrency)); err != nil { return nil, perrors.Wrapf(err, "unable to create ClusterCacheReconciler controller") } diff --git a/pkg/clustermodule/service.go b/pkg/clustermodule/service.go index e8ad30e8f0..42db4fd127 100644 --- a/pkg/clustermodule/service.go +++ b/pkg/clustermodule/service.go @@ -17,13 +17,13 @@ limitations under the License. package clustermodule import ( - goctx "context" + "context" "github.com/pkg/errors" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/vim25/types" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/clustermodules" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" ) @@ -36,10 +36,10 @@ func NewService() Service { return service{} } -func (s service) Create(ctx *context.ClusterContext, wrapper Wrapper) (string, error) { - logger := ctx.Logger.WithValues("object", wrapper.GetName(), "namespace", wrapper.GetNamespace()) +func (s service) Create(clusterCtx *capvcontext.ClusterContext, wrapper Wrapper) (string, error) { + logger := clusterCtx.Logger.WithValues("object", wrapper.GetName(), "namespace", wrapper.GetNamespace()) - templateRef, err := fetchTemplateRef(ctx, ctx.Client, wrapper) + templateRef, err := fetchTemplateRef(clusterCtx, clusterCtx.Client, wrapper) if err != nil { logger.V(4).Error(err, "error fetching template for object") return "", errors.Wrapf(err, "error fetching machine template for object %s/%s", wrapper.GetNamespace(), wrapper.GetName()) @@ -50,17 +50,17 @@ func (s service) Create(ctx *context.ClusterContext, wrapper Wrapper) (string, e return "", nil } - template, err := fetchMachineTemplate(ctx, wrapper, templateRef.Name) + template, err := fetchMachineTemplate(clusterCtx, wrapper, templateRef.Name) if err != nil { logger.V(4).Error(err, "error fetching template") return "", err } - if server := template.Spec.Template.Spec.Server; server != ctx.VSphereCluster.Spec.Server { + if server := template.Spec.Template.Spec.Server; server != clusterCtx.VSphereCluster.Spec.Server { logger.V(4).Info("skipping module creation for object since template uses a different server", "server", server) return "", nil } - vCenterSession, err := fetchSessionForObject(ctx, template) + vCenterSession, err := fetchSessionForObject(clusterCtx, template) if err != nil { logger.V(4).Error(err, "error fetching session") return "", err @@ -68,14 +68,14 @@ func (s service) Create(ctx *context.ClusterContext, wrapper Wrapper) (string, e // Fetch the compute cluster resource by tracing the owner of the resource pool in use. // TODO (srm09): How do we support Multi AZ scenarios here - computeClusterRef, err := getComputeClusterResource(ctx, vCenterSession, template.Spec.Template.Spec.ResourcePool) + computeClusterRef, err := getComputeClusterResource(clusterCtx, vCenterSession, template.Spec.Template.Spec.ResourcePool) if err != nil { logger.V(4).Error(err, "error fetching compute cluster resource") return "", err } provider := clustermodules.NewProvider(vCenterSession.TagManager.Client) - moduleUUID, err := provider.CreateModule(ctx, computeClusterRef) + moduleUUID, err := provider.CreateModule(clusterCtx, computeClusterRef) if err != nil { logger.V(4).Error(err, "error creating cluster module") return "", err @@ -84,22 +84,22 @@ func (s service) Create(ctx *context.ClusterContext, wrapper Wrapper) (string, e return moduleUUID, nil } -func (s service) DoesExist(ctx *context.ClusterContext, wrapper Wrapper, moduleUUID string) (bool, error) { - logger := ctx.Logger.WithValues("object", wrapper.GetName()) +func (s service) DoesExist(clusterCtx *capvcontext.ClusterContext, wrapper Wrapper, moduleUUID string) (bool, error) { + logger := clusterCtx.Logger.WithValues("object", wrapper.GetName()) - templateRef, err := fetchTemplateRef(ctx, ctx.Client, wrapper) + templateRef, err := fetchTemplateRef(clusterCtx, clusterCtx.Client, wrapper) if err != nil { logger.V(4).Error(err, "error fetching template for object") return false, errors.Wrapf(err, "error fetching infrastructure machine template for object %s/%s", wrapper.GetNamespace(), wrapper.GetName()) } - template, err := fetchMachineTemplate(ctx, wrapper, templateRef.Name) + template, err := fetchMachineTemplate(clusterCtx, wrapper, templateRef.Name) if err != nil { logger.V(4).Error(err, "error fetching template") return false, err } - vCenterSession, err := fetchSessionForObject(ctx, template) + vCenterSession, err := fetchSessionForObject(clusterCtx, template) if err != nil { logger.V(4).Error(err, "error fetching session") return false, err @@ -107,28 +107,28 @@ func (s service) DoesExist(ctx *context.ClusterContext, wrapper Wrapper, moduleU // Fetch the compute cluster resource by tracing the owner of the resource pool in use. // TODO (srm09): How do we support Multi AZ scenarios here - computeClusterRef, err := getComputeClusterResource(ctx, vCenterSession, template.Spec.Template.Spec.ResourcePool) + computeClusterRef, err := getComputeClusterResource(clusterCtx, vCenterSession, template.Spec.Template.Spec.ResourcePool) if err != nil { logger.V(4).Error(err, "error fetching compute cluster resource") return false, err } provider := clustermodules.NewProvider(vCenterSession.TagManager.Client) - return provider.DoesModuleExist(ctx, moduleUUID, computeClusterRef) + return provider.DoesModuleExist(clusterCtx, moduleUUID, computeClusterRef) } -func (s service) Remove(ctx *context.ClusterContext, moduleUUID string) error { - params := newParams(*ctx) - vcenterSession, err := fetchSession(ctx, params) +func (s service) Remove(clusterCtx *capvcontext.ClusterContext, moduleUUID string) error { + params := newParams(*clusterCtx) + vcenterSession, err := fetchSession(clusterCtx, params) if err != nil { return err } provider := clustermodules.NewProvider(vcenterSession.TagManager.Client) - return provider.DeleteModule(ctx, moduleUUID) + return provider.DeleteModule(clusterCtx, moduleUUID) } -func getComputeClusterResource(ctx goctx.Context, s *session.Session, resourcePool string) (types.ManagedObjectReference, error) { +func getComputeClusterResource(ctx context.Context, s *session.Session, resourcePool string) (types.ManagedObjectReference, error) { rp, err := s.Finder.ResourcePoolOrDefault(ctx, resourcePool) if err != nil { return types.ManagedObjectReference{}, err diff --git a/pkg/clustermodule/session.go b/pkg/clustermodule/session.go index 9aa3480231..f7e2798f78 100644 --- a/pkg/clustermodule/session.go +++ b/pkg/clustermodule/session.go @@ -17,7 +17,7 @@ limitations under the License. package clustermodule import ( - goctx "context" + "context" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -26,45 +26,45 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/identity" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" ) -func fetchSessionForObject(ctx *context.ClusterContext, template *infrav1.VSphereMachineTemplate) (*session.Session, error) { - params := newParams(*ctx) +func fetchSessionForObject(clusterCtx *capvcontext.ClusterContext, template *infrav1.VSphereMachineTemplate) (*session.Session, error) { + params := newParams(*clusterCtx) // Datacenter is necessary since we use the finder. params = params.WithDatacenter(template.Spec.Template.Spec.Datacenter) - return fetchSession(ctx, params) + return fetchSession(clusterCtx, params) } -func newParams(ctx context.ClusterContext) *session.Params { +func newParams(clusterCtx capvcontext.ClusterContext) *session.Params { return session.NewParams(). - WithServer(ctx.VSphereCluster.Spec.Server). - WithThumbprint(ctx.VSphereCluster.Spec.Thumbprint). + WithServer(clusterCtx.VSphereCluster.Spec.Server). + WithThumbprint(clusterCtx.VSphereCluster.Spec.Thumbprint). WithFeatures(session.Feature{ - EnableKeepAlive: ctx.EnableKeepAlive, - KeepAliveDuration: ctx.KeepAliveDuration, + EnableKeepAlive: clusterCtx.EnableKeepAlive, + KeepAliveDuration: clusterCtx.KeepAliveDuration, }) } -func fetchSession(ctx *context.ClusterContext, params *session.Params) (*session.Session, error) { - if ctx.VSphereCluster.Spec.IdentityRef != nil { - creds, err := identity.GetCredentials(ctx, ctx.Client, ctx.VSphereCluster, ctx.Namespace) +func fetchSession(clusterCtx *capvcontext.ClusterContext, params *session.Params) (*session.Session, error) { + if clusterCtx.VSphereCluster.Spec.IdentityRef != nil { + creds, err := identity.GetCredentials(clusterCtx, clusterCtx.Client, clusterCtx.VSphereCluster, clusterCtx.Namespace) if err != nil { return nil, err } params = params.WithUserInfo(creds.Username, creds.Password) - return session.GetOrCreate(ctx, params) + return session.GetOrCreate(clusterCtx, params) } - params = params.WithUserInfo(ctx.Username, ctx.Password) - return session.GetOrCreate(ctx, params) + params = params.WithUserInfo(clusterCtx.Username, clusterCtx.Password) + return session.GetOrCreate(clusterCtx, params) } -func fetchTemplateRef(ctx goctx.Context, c client.Client, input Wrapper) (*corev1.ObjectReference, error) { +func fetchTemplateRef(ctx context.Context, c client.Client, input Wrapper) (*corev1.ObjectReference, error) { obj := new(unstructured.Unstructured) obj.SetAPIVersion(input.GetObjectKind().GroupVersionKind().GroupVersion().String()) obj.SetKind(input.GetObjectKind().GroupVersionKind().Kind) @@ -81,9 +81,9 @@ func fetchTemplateRef(ctx goctx.Context, c client.Client, input Wrapper) (*corev return &objRef, nil } -func fetchMachineTemplate(ctx *context.ClusterContext, input Wrapper, templateName string) (*infrav1.VSphereMachineTemplate, error) { +func fetchMachineTemplate(clusterCtx *capvcontext.ClusterContext, input Wrapper, templateName string) (*infrav1.VSphereMachineTemplate, error) { template := &infrav1.VSphereMachineTemplate{} - if err := ctx.Client.Get(ctx, client.ObjectKey{ + if err := clusterCtx.Client.Get(clusterCtx, client.ObjectKey{ Name: templateName, Namespace: input.GetNamespace(), }, template); err != nil { diff --git a/pkg/context/fake/fake_controller_manager_context.go b/pkg/context/fake/fake_controller_manager_context.go index 798ddd501a..b9787880de 100644 --- a/pkg/context/fake/fake_controller_manager_context.go +++ b/pkg/context/fake/fake_controller_manager_context.go @@ -17,7 +17,7 @@ limitations under the License. package fake import ( - goctx "context" + "context" vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" "k8s.io/apimachinery/pkg/runtime" @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" ) @@ -40,7 +40,7 @@ import ( // testing reconcilers and webhooks with a fake client. You can choose to // initialize it with a slice of runtime.Object. -func NewControllerManagerContext(initObjects ...client.Object) *context.ControllerManagerContext { +func NewControllerManagerContext(initObjects ...client.Object) *capvcontext.ControllerManagerContext { scheme := runtime.NewScheme() _ = clientgoscheme.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) @@ -55,8 +55,8 @@ func NewControllerManagerContext(initObjects ...client.Object) *context.Controll &vmwarev1.VSphereCluster{}, ).WithObjects(initObjects...).Build() - return &context.ControllerManagerContext{ - Context: goctx.Background(), + return &capvcontext.ControllerManagerContext{ + Context: context.Background(), Client: clientWithObjects, Logger: ctrllog.Log.WithName(ControllerManagerName), Scheme: scheme, diff --git a/pkg/identity/identity_suite_test.go b/pkg/identity/identity_suite_test.go index 6bcc26cf08..fae6e52d46 100644 --- a/pkg/identity/identity_suite_test.go +++ b/pkg/identity/identity_suite_test.go @@ -17,7 +17,7 @@ limitations under the License. package identity import ( - goctx "context" + "context" "fmt" "path" "path/filepath" @@ -44,7 +44,7 @@ var ( scheme = runtime.NewScheme() env *envtest.Environment k8sclient client.Client - ctx goctx.Context + ctx context.Context ) func getTestEnv() *envtest.Environment { @@ -75,7 +75,7 @@ func TestIdentity(t *testing.T) { var _ = SynchronizedBeforeSuite(func() []byte { By("Creating new test environment") - ctx = goctx.Background() + ctx = context.Background() env = getTestEnv() cfg, err := env.Start() Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 89f3214fdd..f449b1f18b 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -17,7 +17,7 @@ limitations under the License. package manager import ( - goctx "context" + "context" "fmt" "os" @@ -38,7 +38,7 @@ import ( infrav1alpha4 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1alpha4" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/record" ) @@ -47,7 +47,7 @@ type Manager interface { ctrl.Manager // GetContext returns the controller manager's context. - GetContext() *context.ControllerManagerContext + GetContext() *capvcontext.ControllerManagerContext } // New returns a new CAPV controller manager. @@ -81,8 +81,8 @@ func New(opts Options) (Manager, error) { } // Build the controller manager context. - controllerManagerContext := &context.ControllerManagerContext{ - Context: goctx.Background(), + controllerManagerContext := &capvcontext.ControllerManagerContext{ + Context: context.Background(), WatchNamespaces: opts.Cache.Namespaces, Namespace: opts.PodNamespace, Name: opts.PodName, @@ -106,18 +106,18 @@ func New(opts Options) (Manager, error) { } return &manager{ - Manager: mgr, - ctx: controllerManagerContext, + Manager: mgr, + controllerCtx: controllerManagerContext, }, nil } type manager struct { ctrl.Manager - ctx *context.ControllerManagerContext + controllerCtx *capvcontext.ControllerManagerContext } -func (m *manager) GetContext() *context.ControllerManagerContext { - return m.ctx +func (m *manager) GetContext() *capvcontext.ControllerManagerContext { + return m.controllerCtx } func UpdateCredentials(opts *Options) { @@ -126,7 +126,7 @@ func UpdateCredentials(opts *Options) { // InitializeWatch adds a filesystem watcher for the capv credentials file // In case of any update to the credentials file, the new credentials are passed to the capv manager context. -func InitializeWatch(ctx *context.ControllerManagerContext, managerOpts *Options) (watch *fsnotify.Watcher, err error) { +func InitializeWatch(controllerCtx *capvcontext.ControllerManagerContext, managerOpts *Options) (watch *fsnotify.Watcher, err error) { capvCredentialsFile := managerOpts.CredentialsFile updateEventCh := make(chan bool) watch, err = fsnotify.NewWatcher() @@ -140,9 +140,9 @@ func InitializeWatch(ctx *context.ControllerManagerContext, managerOpts *Options for { select { case err := <-watch.Errors: - ctx.Logger.Error(err, "received error on CAPV credential watcher") + controllerCtx.Logger.Error(err, "received error on CAPV credential watcher") case event := <-watch.Events: - ctx.Logger.Info(fmt.Sprintf("received event %v on the credential file %s", event, capvCredentialsFile)) + controllerCtx.Logger.Info(fmt.Sprintf("received event %v on the credential file %s", event, capvCredentialsFile)) updateEventCh <- true } } diff --git a/pkg/services/fake/vmservice.go b/pkg/services/fake/vmservice.go index 58d39d8a6b..175d7bd55c 100644 --- a/pkg/services/fake/vmservice.go +++ b/pkg/services/fake/vmservice.go @@ -28,12 +28,12 @@ type VMService struct { mock.Mock } -func (v *VMService) ReconcileVM(ctx *context.VMContext) (infrav1.VirtualMachine, error) { - args := v.Called(ctx) +func (v *VMService) ReconcileVM(vmCtx *context.VMContext) (infrav1.VirtualMachine, error) { + args := v.Called(vmCtx) return args.Get(0).(infrav1.VirtualMachine), args.Error(1) } -func (v *VMService) DestroyVM(ctx *context.VMContext) (reconcile.Result, infrav1.VirtualMachine, error) { - args := v.Called(ctx) +func (v *VMService) DestroyVM(vmCtx *context.VMContext) (reconcile.Result, infrav1.VirtualMachine, error) { + args := v.Called(vmCtx) return args.Get(0).(reconcile.Result), args.Get(1).(infrav1.VirtualMachine), args.Error(2) } diff --git a/pkg/services/govmomi/power_test.go b/pkg/services/govmomi/power_test.go index 8bcf8fac9b..fcf055d5d4 100644 --- a/pkg/services/govmomi/power_test.go +++ b/pkg/services/govmomi/power_test.go @@ -17,7 +17,7 @@ limitations under the License. package govmomi import ( - goctx "context" + "context" "testing" "time" @@ -147,7 +147,7 @@ func TestTriggerSoftPowerOff(t *testing.T) { g = NewWithT(t) before() - simulator.Run(func(ctx goctx.Context, c *vim25.Client) error { + simulator.Run(func(ctx context.Context, c *vim25.Client) error { finder := find.NewFinder(c) vm, err := finder.VirtualMachine(ctx, "DC0_H0_VM0") g.Expect(err).NotTo(HaveOccurred()) @@ -176,7 +176,7 @@ func TestTriggerSoftPowerOff(t *testing.T) { g = NewWithT(t) before() - simulator.Run(func(ctx goctx.Context, c *vim25.Client) error { + simulator.Run(func(ctx context.Context, c *vim25.Client) error { finder := find.NewFinder(c) vm, err := finder.VirtualMachine(ctx, "DC0_H0_VM0") g.Expect(err).NotTo(HaveOccurred()) @@ -213,7 +213,7 @@ func TestTriggerSoftPowerOff(t *testing.T) { g = NewWithT(t) before() - simulator.Run(func(ctx goctx.Context, c *vim25.Client) error { + simulator.Run(func(ctx context.Context, c *vim25.Client) error { finder := find.NewFinder(c) vm, err := finder.VirtualMachine(ctx, "DC0_H0_VM0") g.Expect(err).NotTo(HaveOccurred()) @@ -250,7 +250,7 @@ func TestTriggerSoftPowerOff(t *testing.T) { g = NewWithT(t) before() - simulator.Run(func(ctx goctx.Context, c *vim25.Client) error { + simulator.Run(func(ctx context.Context, c *vim25.Client) error { finder := find.NewFinder(c) vm, err := finder.VirtualMachine(ctx, "DC0_H0_VM0") g.Expect(err).NotTo(HaveOccurred()) @@ -287,7 +287,7 @@ func TestTriggerSoftPowerOff(t *testing.T) { g = NewWithT(t) before() - simulator.Run(func(ctx goctx.Context, c *vim25.Client) error { + simulator.Run(func(ctx context.Context, c *vim25.Client) error { finder := find.NewFinder(c) vm, err := finder.VirtualMachine(ctx, "DC0_H0_VM0") g.Expect(err).NotTo(HaveOccurred()) @@ -315,7 +315,7 @@ func TestTriggerSoftPowerOff(t *testing.T) { g = NewWithT(t) before() - simulator.Run(func(ctx goctx.Context, c *vim25.Client) error { + simulator.Run(func(ctx context.Context, c *vim25.Client) error { finder := find.NewFinder(c) vm, err := finder.VirtualMachine(ctx, "DC0_H0_VM0") g.Expect(err).NotTo(HaveOccurred()) diff --git a/pkg/services/govmomi/service_test.go b/pkg/services/govmomi/service_test.go index b6115cb69d..5cf6a8135d 100644 --- a/pkg/services/govmomi/service_test.go +++ b/pkg/services/govmomi/service_test.go @@ -17,7 +17,7 @@ limitations under the License. package govmomi import ( - goctx "context" + "context" "testing" "github.com/go-logr/logr" @@ -31,16 +31,16 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" ) func emptyVirtualMachineContext() *virtualMachineContext { return &virtualMachineContext{ - VMContext: context.VMContext{ + VMContext: capvcontext.VMContext{ Logger: logr.Discard(), - ControllerContext: &context.ControllerContext{ - ControllerManagerContext: &context.ControllerManagerContext{ - Context: goctx.TODO(), + ControllerContext: &capvcontext.ControllerContext{ + ControllerManagerContext: &capvcontext.ControllerManagerContext{ + Context: context.TODO(), }, }, }, @@ -63,7 +63,7 @@ func Test_reconcilePCIDevices(t *testing.T) { g = NewWithT(t) before() - simulator.Run(func(ctx goctx.Context, c *vim25.Client) error { + simulator.Run(func(ctx context.Context, c *vim25.Client) error { finder := find.NewFinder(c) vm, err := finder.VirtualMachine(ctx, "DC0_H0_VM0") g.Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/services/vimmachine.go b/pkg/services/vimmachine.go index 91122086c6..26711aee7c 100644 --- a/pkg/services/vimmachine.go +++ b/pkg/services/vimmachine.go @@ -17,7 +17,7 @@ limitations under the License. package services import ( - goctx "context" + "context" "encoding/json" "strings" @@ -37,21 +37,21 @@ import ( ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" ) type VimMachineService struct{} -func (v *VimMachineService) FetchVSphereMachine(c client.Client, name types.NamespacedName) (context.MachineContext, error) { +func (v *VimMachineService) FetchVSphereMachine(c client.Client, name types.NamespacedName) (capvcontext.MachineContext, error) { vsphereMachine := &infrav1.VSphereMachine{} - err := c.Get(goctx.Background(), name, vsphereMachine) + err := c.Get(context.Background(), name, vsphereMachine) - return &context.VIMMachineContext{VSphereMachine: vsphereMachine}, err + return &capvcontext.VIMMachineContext{VSphereMachine: vsphereMachine}, err } -func (v *VimMachineService) FetchVSphereCluster(c client.Client, cluster *clusterv1.Cluster, machineContext context.MachineContext) (context.MachineContext, error) { - ctx, ok := machineContext.(*context.VIMMachineContext) +func (v *VimMachineService) FetchVSphereCluster(c client.Client, cluster *clusterv1.Cluster, machineContext capvcontext.MachineContext) (capvcontext.MachineContext, error) { + vimMachineCtx, ok := machineContext.(*capvcontext.VIMMachineContext) if !ok { return nil, errors.New("received unexpected VIMMachineContext type") } @@ -60,19 +60,19 @@ func (v *VimMachineService) FetchVSphereCluster(c client.Client, cluster *cluste Namespace: machineContext.GetObjectMeta().Namespace, Name: cluster.Spec.InfrastructureRef.Name, } - err := c.Get(goctx.Background(), vsphereClusterName, vsphereCluster) + err := c.Get(context.Background(), vsphereClusterName, vsphereCluster) - ctx.VSphereCluster = vsphereCluster - return ctx, err + vimMachineCtx.VSphereCluster = vsphereCluster + return vimMachineCtx, err } -func (v *VimMachineService) ReconcileDelete(c context.MachineContext) error { - ctx, ok := c.(*context.VIMMachineContext) +func (v *VimMachineService) ReconcileDelete(machineCtx capvcontext.MachineContext) error { + vimMachineCtx, ok := machineCtx.(*capvcontext.VIMMachineContext) if !ok { return errors.New("received unexpected VIMMachineContext type") } - vm, err := v.findVSphereVM(ctx) + vm, err := v.findVSphereVM(vimMachineCtx) // Attempt to find the associated VSphereVM resource. if err != nil { return err @@ -81,49 +81,49 @@ func (v *VimMachineService) ReconcileDelete(c context.MachineContext) error { if vm != nil && vm.GetDeletionTimestamp().IsZero() { // If the VSphereVM was found and it's not already enqueued for // deletion, go ahead and attempt to delete it. - if err := ctx.Client.Delete(ctx, vm); err != nil { + if err := vimMachineCtx.Client.Delete(vimMachineCtx, vm); err != nil { return err } } // VSphereMachine wraps a VMSphereVM, so we are mirroring status from the underlying VMSphereVM // in order to provide evidences about machine deletion. - conditions.SetMirror(ctx.VSphereMachine, infrav1.VMProvisionedCondition, vm) + conditions.SetMirror(vimMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vm) return nil } -func (v *VimMachineService) SyncFailureReason(c context.MachineContext) (bool, error) { - ctx, ok := c.(*context.VIMMachineContext) +func (v *VimMachineService) SyncFailureReason(machineCtx capvcontext.MachineContext) (bool, error) { + vimMachineCtx, ok := machineCtx.(*capvcontext.VIMMachineContext) if !ok { return false, errors.New("received unexpected VIMMachineContext type") } - vsphereVM, err := v.findVSphereVM(ctx) + vsphereVM, err := v.findVSphereVM(vimMachineCtx) if err != nil { return false, err } if vsphereVM != nil { // Reconcile VSphereMachine's failures - ctx.VSphereMachine.Status.FailureReason = vsphereVM.Status.FailureReason - ctx.VSphereMachine.Status.FailureMessage = vsphereVM.Status.FailureMessage + vimMachineCtx.VSphereMachine.Status.FailureReason = vsphereVM.Status.FailureReason + vimMachineCtx.VSphereMachine.Status.FailureMessage = vsphereVM.Status.FailureMessage } - return ctx.VSphereMachine.Status.FailureReason != nil || ctx.VSphereMachine.Status.FailureMessage != nil, err + return vimMachineCtx.VSphereMachine.Status.FailureReason != nil || vimMachineCtx.VSphereMachine.Status.FailureMessage != nil, err } -func (v *VimMachineService) ReconcileNormal(c context.MachineContext) (bool, error) { - ctx, ok := c.(*context.VIMMachineContext) +func (v *VimMachineService) ReconcileNormal(machineCtx capvcontext.MachineContext) (bool, error) { + vimMachineCtx, ok := machineCtx.(*capvcontext.VIMMachineContext) if !ok { return false, errors.New("received unexpected VIMMachineContext type") } - vsphereVM, err := v.findVSphereVM(ctx) + vsphereVM, err := v.findVSphereVM(vimMachineCtx) if err != nil && !apierrors.IsNotFound(err) { return false, err } - vm, err := v.createOrPatchVSphereVM(ctx, vsphereVM) + vm, err := v.createOrPatchVSphereVM(vimMachineCtx, vsphereVM) if err != nil { - ctx.Logger.Error(err, "error creating or patching VM", "vsphereVM", vsphereVM) + vimMachineCtx.Logger.Error(err, "error creating or patching VM", "vsphereVM", vsphereVM) return false, err } @@ -140,50 +140,50 @@ func (v *VimMachineService) ReconcileNormal(c context.MachineContext) (bool, err vmObj.SetKind(vm.GetObjectKind().GroupVersionKind().Kind) // Waits the VM's ready state. - if ok, err := v.waitReadyState(ctx, vmObj); !ok { + if ok, err := v.waitReadyState(vimMachineCtx, vmObj); !ok { if err != nil { - return false, errors.Wrapf(err, "unexpected error while reconciling ready state for %s", ctx) + return false, errors.Wrapf(err, "unexpected error while reconciling ready state for %s", vimMachineCtx) } - ctx.Logger.Info("waiting for ready state") + vimMachineCtx.Logger.Info("waiting for ready state") // VSphereMachine wraps a VMSphereVM, so we are mirroring status from the underlying VMSphereVM // in order to provide evidences about machine provisioning while provisioning is actually happening. - conditions.SetMirror(ctx.VSphereMachine, infrav1.VMProvisionedCondition, conditions.UnstructuredGetter(vmObj)) + conditions.SetMirror(vimMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, conditions.UnstructuredGetter(vmObj)) return true, nil } // Reconcile the VSphereMachine's provider ID using the VM's BIOS UUID. - if ok, err := v.reconcileProviderID(ctx, vmObj); !ok { + if ok, err := v.reconcileProviderID(vimMachineCtx, vmObj); !ok { if err != nil { - return false, errors.Wrapf(err, "unexpected error while reconciling provider ID for %s", ctx) + return false, errors.Wrapf(err, "unexpected error while reconciling provider ID for %s", vimMachineCtx) } - ctx.Logger.Info("provider ID is not reconciled") + vimMachineCtx.Logger.Info("provider ID is not reconciled") return true, nil } // Reconcile the VSphereMachine's node addresses from the VM's IP addresses. - if ok, err := v.reconcileNetwork(ctx, vmObj); !ok { + if ok, err := v.reconcileNetwork(vimMachineCtx, vmObj); !ok { if err != nil { - return false, errors.Wrapf(err, "unexpected error while reconciling network for %s", ctx) + return false, errors.Wrapf(err, "unexpected error while reconciling network for %s", vimMachineCtx) } - ctx.Logger.Info("network is not reconciled") - conditions.MarkFalse(ctx.VSphereMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForNetworkAddressesReason, clusterv1.ConditionSeverityInfo, "") + vimMachineCtx.Logger.Info("network is not reconciled") + conditions.MarkFalse(vimMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForNetworkAddressesReason, clusterv1.ConditionSeverityInfo, "") return true, nil } - ctx.VSphereMachine.Status.Ready = true + vimMachineCtx.VSphereMachine.Status.Ready = true return false, nil } -func (v *VimMachineService) GetHostInfo(c context.MachineContext) (string, error) { - ctx, ok := c.(*context.VIMMachineContext) +func (v *VimMachineService) GetHostInfo(c capvcontext.MachineContext) (string, error) { + vimMachineCtx, ok := c.(*capvcontext.VIMMachineContext) if !ok { return "", errors.New("received unexpected VIMMachineContext type") } vsphereVM := &infrav1.VSphereVM{} - if err := ctx.Client.Get(ctx, client.ObjectKey{ - Namespace: ctx.VSphereMachine.Namespace, - Name: generateVMObjectName(ctx, ctx.Machine.Name), + if err := vimMachineCtx.Client.Get(vimMachineCtx, client.ObjectKey{ + Namespace: vimMachineCtx.VSphereMachine.Namespace, + Name: generateVMObjectName(vimMachineCtx, vimMachineCtx.Machine.Name), }, vsphereVM); err != nil { return "", err } @@ -191,25 +191,25 @@ func (v *VimMachineService) GetHostInfo(c context.MachineContext) (string, error if conditions.IsTrue(vsphereVM, infrav1.VMProvisionedCondition) { return vsphereVM.Status.Host, nil } - ctx.Logger.V(4).Info("VMProvisionedCondition is set to false", "vsphereVM", vsphereVM.Name) + vimMachineCtx.Logger.V(4).Info("VMProvisionedCondition is set to false", "vsphereVM", vsphereVM.Name) return "", nil } -func (v *VimMachineService) findVSphereVM(ctx *context.VIMMachineContext) (*infrav1.VSphereVM, error) { +func (v *VimMachineService) findVSphereVM(vimMachineCtx *capvcontext.VIMMachineContext) (*infrav1.VSphereVM, error) { // Get ready to find the associated VSphereVM resource. vm := &infrav1.VSphereVM{} vmKey := types.NamespacedName{ - Namespace: ctx.VSphereMachine.Namespace, - Name: generateVMObjectName(ctx, ctx.Machine.Name), + Namespace: vimMachineCtx.VSphereMachine.Namespace, + Name: generateVMObjectName(vimMachineCtx, vimMachineCtx.Machine.Name), } // Attempt to find the associated VSphereVM resource. - if err := ctx.Client.Get(ctx, vmKey, vm); err != nil { + if err := vimMachineCtx.Client.Get(vimMachineCtx, vmKey, vm); err != nil { return nil, err } return vm, nil } -func (v *VimMachineService) waitReadyState(ctx *context.VIMMachineContext, vm *unstructured.Unstructured) (bool, error) { +func (v *VimMachineService) waitReadyState(vimMachineCtx *capvcontext.VIMMachineContext, vm *unstructured.Unstructured) (bool, error) { ready, ok, err := unstructured.NestedBool(vm.Object, "status", "ready") if !ok { if err != nil { @@ -218,16 +218,16 @@ func (v *VimMachineService) waitReadyState(ctx *context.VIMMachineContext, vm *u vm.GroupVersionKind(), vm.GetNamespace(), vm.GetName(), - ctx) + vimMachineCtx) } - ctx.Logger.Info("status.ready not found", + vimMachineCtx.Logger.Info("status.ready not found", "vmGVK", vm.GroupVersionKind().String(), "vmNamespace", vm.GetNamespace(), "vmName", vm.GetName()) return false, nil } if !ready { - ctx.Logger.Info("status.ready is false", + vimMachineCtx.Logger.Info("status.ready is false", "vmGVK", vm.GroupVersionKind().String(), "vmNamespace", vm.GetNamespace(), "vmName", vm.GetName()) @@ -237,7 +237,7 @@ func (v *VimMachineService) waitReadyState(ctx *context.VIMMachineContext, vm *u return true, nil } -func (v *VimMachineService) reconcileProviderID(ctx *context.VIMMachineContext, vm *unstructured.Unstructured) (bool, error) { +func (v *VimMachineService) reconcileProviderID(vimMachineCtx *capvcontext.VIMMachineContext, vm *unstructured.Unstructured) (bool, error) { biosUUID, ok, err := unstructured.NestedString(vm.Object, "spec", "biosUUID") if !ok { if err != nil { @@ -246,16 +246,16 @@ func (v *VimMachineService) reconcileProviderID(ctx *context.VIMMachineContext, vm.GroupVersionKind(), vm.GetNamespace(), vm.GetName(), - ctx) + vimMachineCtx) } - ctx.Logger.Info("spec.biosUUID not found", + vimMachineCtx.Logger.Info("spec.biosUUID not found", "vmGVK", vm.GroupVersionKind().String(), "vmNamespace", vm.GetNamespace(), "vmName", vm.GetName()) return false, nil } if biosUUID == "" { - ctx.Logger.Info("spec.biosUUID is empty", + vimMachineCtx.Logger.Info("spec.biosUUID is empty", "vmGVK", vm.GroupVersionKind().String(), "vmNamespace", vm.GetNamespace(), "vmName", vm.GetName()) @@ -269,24 +269,24 @@ func (v *VimMachineService) reconcileProviderID(ctx *context.VIMMachineContext, vm.GroupVersionKind(), vm.GetNamespace(), vm.GetName(), - ctx) + vimMachineCtx) } - if ctx.VSphereMachine.Spec.ProviderID == nil || *ctx.VSphereMachine.Spec.ProviderID != providerID { - ctx.VSphereMachine.Spec.ProviderID = &providerID - ctx.Logger.Info("updated provider ID", "provider-id", providerID) + if vimMachineCtx.VSphereMachine.Spec.ProviderID == nil || *vimMachineCtx.VSphereMachine.Spec.ProviderID != providerID { + vimMachineCtx.VSphereMachine.Spec.ProviderID = &providerID + vimMachineCtx.Logger.Info("updated provider ID", "provider-id", providerID) } return true, nil } //nolint:nestif -func (v *VimMachineService) reconcileNetwork(ctx *context.VIMMachineContext, vm *unstructured.Unstructured) (bool, error) { +func (v *VimMachineService) reconcileNetwork(vimMachineCtx *capvcontext.VIMMachineContext, vm *unstructured.Unstructured) (bool, error) { var errs []error if networkStatusListOfIfaces, ok, _ := unstructured.NestedSlice(vm.Object, "status", "network"); ok { var networkStatusList []infrav1.NetworkStatus for i, networkStatusListMemberIface := range networkStatusListOfIfaces { if buf, err := json.Marshal(networkStatusListMemberIface); err != nil { - ctx.Logger.Error(err, + vimMachineCtx.Logger.Error(err, "unsupported data for member of status.network list", "index", i) errs = append(errs, err) @@ -298,7 +298,7 @@ func (v *VimMachineService) reconcileNetwork(ctx *context.VIMMachineContext, vm errs = append(errs, err) } if err != nil { - ctx.Logger.Error(err, + vimMachineCtx.Logger.Error(err, "unsupported data for member of status.network list", "index", i, "data", string(buf)) errs = append(errs, err) @@ -307,7 +307,7 @@ func (v *VimMachineService) reconcileNetwork(ctx *context.VIMMachineContext, vm } } } - ctx.VSphereMachine.Status.Network = networkStatusList + vimMachineCtx.VSphereMachine.Status.Network = networkStatusList } if addresses, ok, _ := unstructured.NestedStringSlice(vm.Object, "status", "addresses"); ok { @@ -318,23 +318,23 @@ func (v *VimMachineService) reconcileNetwork(ctx *context.VIMMachineContext, vm Address: addr, }) } - ctx.VSphereMachine.Status.Addresses = machineAddresses + vimMachineCtx.VSphereMachine.Status.Addresses = machineAddresses } - if len(ctx.VSphereMachine.Status.Addresses) == 0 { - ctx.Logger.Info("waiting on IP addresses") + if len(vimMachineCtx.VSphereMachine.Status.Addresses) == 0 { + vimMachineCtx.Logger.Info("waiting on IP addresses") return false, kerrors.NewAggregate(errs) } return true, nil } -func (v *VimMachineService) createOrPatchVSphereVM(ctx *context.VIMMachineContext, vsphereVM *infrav1.VSphereVM) (runtime.Object, error) { +func (v *VimMachineService) createOrPatchVSphereVM(vimMachineCtx *capvcontext.VIMMachineContext, vsphereVM *infrav1.VSphereVM) (runtime.Object, error) { // Create or update the VSphereVM resource. vm := &infrav1.VSphereVM{ ObjectMeta: metav1.ObjectMeta{ - Namespace: ctx.VSphereMachine.Namespace, - Name: generateVMObjectName(ctx, ctx.Machine.Name), + Namespace: vimMachineCtx.VSphereMachine.Namespace, + Name: generateVMObjectName(vimMachineCtx, vimMachineCtx.Machine.Name), }, } mutateFn := func() (err error) { @@ -342,10 +342,10 @@ func (v *VimMachineService) createOrPatchVSphereVM(ctx *context.VIMMachineContex vm.SetOwnerReferences(clusterutilv1.EnsureOwnerRef( vm.OwnerReferences, metav1.OwnerReference{ - APIVersion: ctx.VSphereMachine.APIVersion, - Kind: ctx.VSphereMachine.Kind, - Name: ctx.VSphereMachine.Name, - UID: ctx.VSphereMachine.UID, + APIVersion: vimMachineCtx.VSphereMachine.APIVersion, + Kind: vimMachineCtx.VSphereMachine.Kind, + Name: vimMachineCtx.VSphereMachine.Name, + UID: vimMachineCtx.VSphereMachine.UID, })) // Instruct the VSphereVM to use the CAPI bootstrap data resource. @@ -353,8 +353,8 @@ func (v *VimMachineService) createOrPatchVSphereVM(ctx *context.VIMMachineContex vm.Spec.BootstrapRef = &corev1.ObjectReference{ APIVersion: "v1", Kind: "Secret", - Name: *ctx.Machine.Spec.Bootstrap.DataSecretName, - Namespace: ctx.Machine.ObjectMeta.Namespace, + Name: *vimMachineCtx.Machine.Spec.Bootstrap.DataSecretName, + Namespace: vimMachineCtx.Machine.ObjectMeta.Namespace, } // Initialize the VSphereVM's labels map if it is nil. @@ -364,20 +364,20 @@ func (v *VimMachineService) createOrPatchVSphereVM(ctx *context.VIMMachineContex // Ensure the VSphereVM has a label that can be used when searching for // resources associated with the target cluster. - vm.Labels[clusterv1.ClusterNameLabel] = ctx.Machine.Labels[clusterv1.ClusterNameLabel] + vm.Labels[clusterv1.ClusterNameLabel] = vimMachineCtx.Machine.Labels[clusterv1.ClusterNameLabel] // For convenience, add a label that makes it easy to figure out if the // VSphereVM resource is part of some control plane. - if val, ok := ctx.Machine.Labels[clusterv1.MachineControlPlaneLabel]; ok { + if val, ok := vimMachineCtx.Machine.Labels[clusterv1.MachineControlPlaneLabel]; ok { vm.Labels[clusterv1.MachineControlPlaneLabel] = val } // Copy the VSphereMachine's VM clone spec into the VSphereVM's // clone spec. - ctx.VSphereMachine.Spec.VirtualMachineCloneSpec.DeepCopyInto(&vm.Spec.VirtualMachineCloneSpec) + vimMachineCtx.VSphereMachine.Spec.VirtualMachineCloneSpec.DeepCopyInto(&vm.Spec.VirtualMachineCloneSpec) // If Failure Domain is present on CAPI machine, use that to override the vm clone spec. - if overrideFunc, ok := v.generateOverrideFunc(ctx); ok { + if overrideFunc, ok := v.generateOverrideFunc(vimMachineCtx); ok { overrideFunc(vm) } @@ -388,16 +388,16 @@ func (v *VimMachineService) createOrPatchVSphereVM(ctx *context.VIMMachineContex // 2. From the VSphereMachine.Spec (the DeepCopyInto above) // 3. From the VSphereCluster.Spec if vm.Spec.Server == "" { - vm.Spec.Server = ctx.VSphereCluster.Spec.Server + vm.Spec.Server = vimMachineCtx.VSphereCluster.Spec.Server } if vm.Spec.Thumbprint == "" { - vm.Spec.Thumbprint = ctx.VSphereCluster.Spec.Thumbprint + vm.Spec.Thumbprint = vimMachineCtx.VSphereCluster.Spec.Thumbprint } if vsphereVM != nil { vm.Spec.BiosUUID = vsphereVM.Spec.BiosUUID } - vm.Spec.PowerOffMode = ctx.VSphereMachine.Spec.PowerOffMode - vm.Spec.GuestSoftPowerOffTimeout = ctx.VSphereMachine.Spec.GuestSoftPowerOffTimeout + vm.Spec.PowerOffMode = vimMachineCtx.VSphereMachine.Spec.PowerOffMode + vm.Spec.GuestSoftPowerOffTimeout = vimMachineCtx.VSphereMachine.Spec.GuestSoftPowerOffTimeout return nil } @@ -405,9 +405,9 @@ func (v *VimMachineService) createOrPatchVSphereVM(ctx *context.VIMMachineContex Namespace: vm.Namespace, Name: vm.Name, } - result, err := ctrlutil.CreateOrPatch(ctx, ctx.Client, vm, mutateFn) + result, err := ctrlutil.CreateOrPatch(vimMachineCtx, vimMachineCtx.Client, vm, mutateFn) if err != nil { - ctx.Logger.Error( + vimMachineCtx.Logger.Error( err, "failed to CreateOrPatch VSphereVM", "namespace", @@ -419,31 +419,31 @@ func (v *VimMachineService) createOrPatchVSphereVM(ctx *context.VIMMachineContex } switch result { case ctrlutil.OperationResultNone: - ctx.Logger.Info( + vimMachineCtx.Logger.Info( "no update required for vm", "vm", vmKey, ) case ctrlutil.OperationResultCreated: - ctx.Logger.Info( + vimMachineCtx.Logger.Info( "created vm", "vm", vmKey, ) case ctrlutil.OperationResultUpdated: - ctx.Logger.Info( + vimMachineCtx.Logger.Info( "updated vm", "vm", vmKey, ) case ctrlutil.OperationResultUpdatedStatus: - ctx.Logger.Info( + vimMachineCtx.Logger.Info( "updated vm and vm status", "vm", vmKey, ) case ctrlutil.OperationResultUpdatedStatusOnly: - ctx.Logger.Info( + vimMachineCtx.Logger.Info( "updated vm status", "vm", vmKey, @@ -455,9 +455,9 @@ func (v *VimMachineService) createOrPatchVSphereVM(ctx *context.VIMMachineContex // generateVMObjectName returns a new VM object name in specific cases, otherwise return the same // passed in the parameter. -func generateVMObjectName(ctx *context.VIMMachineContext, machineName string) string { +func generateVMObjectName(vimMachineCtx *capvcontext.VIMMachineContext, machineName string) string { // Windows VM names must have 15 characters length at max. - if ctx.VSphereMachine.Spec.OS == infrav1.Windows && len(machineName) > 15 { + if vimMachineCtx.VSphereMachine.Spec.OS == infrav1.Windows && len(machineName) > 15 { return strings.TrimSuffix(machineName[0:9], "-") + "-" + machineName[len(machineName)-5:] } return machineName @@ -467,22 +467,22 @@ func generateVMObjectName(ctx *context.VIMMachineContext, machineName string) st // with the values from the FailureDomain (if any) set on the owner CAPI machine. // //nolint:nestif -func (v *VimMachineService) generateOverrideFunc(ctx *context.VIMMachineContext) (func(vm *infrav1.VSphereVM), bool) { - failureDomainName := ctx.Machine.Spec.FailureDomain +func (v *VimMachineService) generateOverrideFunc(vimMachineCtx *capvcontext.VIMMachineContext) (func(vm *infrav1.VSphereVM), bool) { + failureDomainName := vimMachineCtx.Machine.Spec.FailureDomain if failureDomainName == nil { return nil, false } // Use the failureDomain name to fetch the vSphereDeploymentZone object var vsphereDeploymentZone infrav1.VSphereDeploymentZone - if err := ctx.Client.Get(ctx, client.ObjectKey{Name: *failureDomainName}, &vsphereDeploymentZone); err != nil { - ctx.Logger.Error(err, "unable to fetch vsphere deployment zone", "name", *failureDomainName) + if err := vimMachineCtx.Client.Get(vimMachineCtx, client.ObjectKey{Name: *failureDomainName}, &vsphereDeploymentZone); err != nil { + vimMachineCtx.Logger.Error(err, "unable to fetch vsphere deployment zone", "name", *failureDomainName) return nil, false } var vsphereFailureDomain infrav1.VSphereFailureDomain - if err := ctx.Client.Get(ctx, client.ObjectKey{Name: vsphereDeploymentZone.Spec.FailureDomain}, &vsphereFailureDomain); err != nil { - ctx.Logger.Error(err, "unable to fetch failure domain", "name", vsphereDeploymentZone.Spec.FailureDomain) + if err := vimMachineCtx.Client.Get(vimMachineCtx, client.ObjectKey{Name: vsphereDeploymentZone.Spec.FailureDomain}, &vsphereFailureDomain); err != nil { + vimMachineCtx.Logger.Error(err, "unable to fetch failure domain", "name", vsphereDeploymentZone.Spec.FailureDomain) return nil, false } diff --git a/pkg/services/vmoperator/vmopmachine.go b/pkg/services/vmoperator/vmopmachine.go index bdaac021d7..77ab227562 100644 --- a/pkg/services/vmoperator/vmopmachine.go +++ b/pkg/services/vmoperator/vmopmachine.go @@ -17,7 +17,7 @@ limitations under the License. package vmoperator import ( - goctx "context" + "context" "encoding/json" "fmt" @@ -35,7 +35,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" infrautilv1 "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util" vmwareutil "sigs.k8s.io/cluster-api-provider-vsphere/pkg/util/vmware" @@ -43,13 +43,13 @@ import ( type VmopMachineService struct{} -func (v *VmopMachineService) FetchVSphereMachine(client client.Client, name types.NamespacedName) (context.MachineContext, error) { +func (v *VmopMachineService) FetchVSphereMachine(client client.Client, name types.NamespacedName) (capvcontext.MachineContext, error) { vsphereMachine := &vmwarev1.VSphereMachine{} - err := client.Get(goctx.Background(), name, vsphereMachine) + err := client.Get(context.Background(), name, vsphereMachine) return &vmware.SupervisorMachineContext{VSphereMachine: vsphereMachine}, err } -func (v *VmopMachineService) FetchVSphereCluster(c client.Client, cluster *clusterv1.Cluster, machineContext context.MachineContext) (context.MachineContext, error) { +func (v *VmopMachineService) FetchVSphereCluster(c client.Client, cluster *clusterv1.Cluster, machineContext capvcontext.MachineContext) (capvcontext.MachineContext, error) { ctx, ok := machineContext.(*vmware.SupervisorMachineContext) if !ok { return nil, errors.New("received unexpected SupervisorMachineContext type") @@ -60,105 +60,105 @@ func (v *VmopMachineService) FetchVSphereCluster(c client.Client, cluster *clust Namespace: machineContext.GetObjectMeta().Namespace, Name: cluster.Spec.InfrastructureRef.Name, } - err := c.Get(goctx.Background(), key, vsphereCluster) + err := c.Get(context.Background(), key, vsphereCluster) ctx.VSphereCluster = vsphereCluster return ctx, err } -func (v *VmopMachineService) ReconcileDelete(c context.MachineContext) error { - ctx, ok := c.(*vmware.SupervisorMachineContext) +func (v *VmopMachineService) ReconcileDelete(machineCtx capvcontext.MachineContext) error { + supervisorMachineCtx, ok := machineCtx.(*vmware.SupervisorMachineContext) if !ok { return errors.New("received unexpected SupervisorMachineContext type") } - ctx.Logger.V(2).Info("Destroying VM") + supervisorMachineCtx.Logger.V(2).Info("Destroying VM") // If debug logging is enabled, report the number of vms in the cluster before and after the reconcile - if ctx.Logger.V(5).Enabled() { - vms, err := getVirtualMachinesInCluster(ctx) - ctx.Logger.Info("Trace Destroy PRE: VirtualMachines", "vmcount", len(vms), "error", err) + if supervisorMachineCtx.Logger.V(5).Enabled() { + vms, err := getVirtualMachinesInCluster(supervisorMachineCtx) + supervisorMachineCtx.Logger.Info("Trace Destroy PRE: VirtualMachines", "vmcount", len(vms), "error", err) defer func() { - vms, err := getVirtualMachinesInCluster(ctx) - ctx.Logger.Info("Trace Destroy POST: VirtualMachines", "vmcount", len(vms), "error", err) + vms, err := getVirtualMachinesInCluster(supervisorMachineCtx) + supervisorMachineCtx.Logger.Info("Trace Destroy POST: VirtualMachines", "vmcount", len(vms), "error", err) }() } // First, check to see if it's already deleted vmopVM := vmoprv1.VirtualMachine{} - if err := ctx.Client.Get(ctx, types.NamespacedName{Namespace: ctx.Machine.Namespace, Name: ctx.Machine.Name}, &vmopVM); err != nil { + if err := supervisorMachineCtx.Client.Get(supervisorMachineCtx, types.NamespacedName{Namespace: supervisorMachineCtx.Machine.Namespace, Name: supervisorMachineCtx.Machine.Name}, &vmopVM); err != nil { if apierrors.IsNotFound(err) { - ctx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateNotFound + supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateNotFound return err } - ctx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateError + supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateError return err } // Next, check to see if it's in the process of being deleted if vmopVM.GetDeletionTimestamp() != nil { - ctx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateDeleting + supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateDeleting return nil } // If none of the above are true, Delete the VM - if err := ctx.Client.Delete(ctx, &vmopVM); err != nil { + if err := supervisorMachineCtx.Client.Delete(supervisorMachineCtx, &vmopVM); err != nil { if apierrors.IsNotFound(err) { - ctx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateNotFound + supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateNotFound return err } - ctx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateError + supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateError return err } - ctx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateDeleting + supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateDeleting return nil } -func (v *VmopMachineService) SyncFailureReason(c context.MachineContext) (bool, error) { - ctx, ok := c.(*vmware.SupervisorMachineContext) +func (v *VmopMachineService) SyncFailureReason(machineCtx capvcontext.MachineContext) (bool, error) { + supervisorMachineCtx, ok := machineCtx.(*vmware.SupervisorMachineContext) if !ok { return false, errors.New("received unexpected SupervisorMachineContext type") } - return ctx.VSphereMachine.Status.FailureReason != nil || ctx.VSphereMachine.Status.FailureMessage != nil, nil + return supervisorMachineCtx.VSphereMachine.Status.FailureReason != nil || supervisorMachineCtx.VSphereMachine.Status.FailureMessage != nil, nil } -func (v *VmopMachineService) ReconcileNormal(c context.MachineContext) (bool, error) { - ctx, ok := c.(*vmware.SupervisorMachineContext) +func (v *VmopMachineService) ReconcileNormal(machineCtx capvcontext.MachineContext) (bool, error) { + supervisorMachineCtx, ok := machineCtx.(*vmware.SupervisorMachineContext) if !ok { return false, errors.New("received unexpected SupervisorMachineContext type") } - ctx.VSphereMachine.Spec.FailureDomain = ctx.Machine.Spec.FailureDomain + supervisorMachineCtx.VSphereMachine.Spec.FailureDomain = supervisorMachineCtx.Machine.Spec.FailureDomain - ctx.Logger.V(2).Info("Reconciling VM") + supervisorMachineCtx.Logger.V(2).Info("Reconciling VM") // If debug logging is enabled, report the number of vms in the cluster before and after the reconcile - if ctx.Logger.V(5).Enabled() { - vms, err := getVirtualMachinesInCluster(ctx) - ctx.Logger.Info("Trace ReconcileVM PRE: VirtualMachines", "vmcount", len(vms), "error", err) + if supervisorMachineCtx.Logger.V(5).Enabled() { + vms, err := getVirtualMachinesInCluster(supervisorMachineCtx) + supervisorMachineCtx.Logger.Info("Trace ReconcileVM PRE: VirtualMachines", "vmcount", len(vms), "error", err) defer func() { - vms, err := getVirtualMachinesInCluster(ctx) - ctx.Logger.Info("Trace ReconcileVM POST: VirtualMachines", "vmcount", len(vms), "error", err) + vms, err := getVirtualMachinesInCluster(supervisorMachineCtx) + supervisorMachineCtx.Logger.Info("Trace ReconcileVM POST: VirtualMachines", "vmcount", len(vms), "error", err) }() } // Set the VM state. Will get reset throughout the reconcile - ctx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStatePending + supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStatePending // Define the VM Operator VirtualMachine resource to reconcile. - vmOperatorVM := v.newVMOperatorVM(ctx) + vmOperatorVM := v.newVMOperatorVM(supervisorMachineCtx) // Reconcile the VM Operator VirtualMachine. - if err := v.reconcileVMOperatorVM(ctx, vmOperatorVM); err != nil { - conditions.MarkFalse(ctx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMCreationFailedReason, clusterv1.ConditionSeverityWarning, + if err := v.reconcileVMOperatorVM(supervisorMachineCtx, vmOperatorVM); err != nil { + conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMCreationFailedReason, clusterv1.ConditionSeverityWarning, fmt.Sprintf("failed to create or update VirtualMachine: %v", err)) // TODO: what to do if AlreadyExists error return false, err } // Update the VM's state to Pending - ctx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStatePending + supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStatePending // Since vm operator only has one condition for now, we can't set vspheremachine's condition fully based on virtualmachine's // condition. Once vm operator surfaces enough conditions in virtualmachine, we could simply mirror the conditions in vspheremachine. @@ -166,8 +166,8 @@ func (v *VmopMachineService) ReconcileNormal(c context.MachineContext) (bool, er // TODO: vm-operator does not use the cluster-api condition type. so can't use cluster-api util functions to fetch the condition for _, cond := range vmOperatorVM.Status.Conditions { if cond.Type == vmoprv1.VirtualMachinePrereqReadyCondition && cond.Severity == vmoprv1.ConditionSeverityError { - conditions.MarkFalse(ctx.VSphereMachine, infrav1.VMProvisionedCondition, cond.Reason, clusterv1.ConditionSeverityError, cond.Message) - return false, errors.Errorf("vm prerequisites check fails: %s", ctx) + conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, cond.Reason, clusterv1.ConditionSeverityError, cond.Message) + return false, errors.Errorf("vm prerequisites check fails: %s", supervisorMachineCtx) } } @@ -177,59 +177,59 @@ func (v *VmopMachineService) ReconcileNormal(c context.MachineContext) (bool, er // * An IP address // * A BIOS UUID if vmOperatorVM.Status.Phase != vmoprv1.Created { - conditions.MarkFalse(ctx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMProvisionStartedReason, clusterv1.ConditionSeverityInfo, "") - ctx.Logger.Info(fmt.Sprintf("vm is not yet created: %s", ctx)) + conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMProvisionStartedReason, clusterv1.ConditionSeverityInfo, "") + supervisorMachineCtx.Logger.Info(fmt.Sprintf("vm is not yet created: %s", supervisorMachineCtx)) return true, nil } // Mark the VM as created - ctx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateCreated + supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateCreated if vmOperatorVM.Status.PowerState != vmoprv1.VirtualMachinePoweredOn { - conditions.MarkFalse(ctx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.PoweringOnReason, clusterv1.ConditionSeverityInfo, "") - ctx.Logger.Info(fmt.Sprintf("vm is not yet powered on: %s", ctx)) + conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.PoweringOnReason, clusterv1.ConditionSeverityInfo, "") + supervisorMachineCtx.Logger.Info(fmt.Sprintf("vm is not yet powered on: %s", supervisorMachineCtx)) return true, nil } // Mark the VM as poweredOn - ctx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStatePoweredOn + supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStatePoweredOn if vmOperatorVM.Status.VmIp == "" { - conditions.MarkFalse(ctx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.WaitingForNetworkAddressReason, clusterv1.ConditionSeverityInfo, "") - ctx.Logger.Info(fmt.Sprintf("vm does not have an IP address: %s", ctx)) + conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.WaitingForNetworkAddressReason, clusterv1.ConditionSeverityInfo, "") + supervisorMachineCtx.Logger.Info(fmt.Sprintf("vm does not have an IP address: %s", supervisorMachineCtx)) return true, nil } if vmOperatorVM.Status.BiosUUID == "" { - conditions.MarkFalse(ctx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.WaitingForBIOSUUIDReason, clusterv1.ConditionSeverityInfo, "") - ctx.Logger.Info(fmt.Sprintf("vm does not have a BIOS UUID: %s", ctx)) + conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.WaitingForBIOSUUIDReason, clusterv1.ConditionSeverityInfo, "") + supervisorMachineCtx.Logger.Info(fmt.Sprintf("vm does not have a BIOS UUID: %s", supervisorMachineCtx)) return true, nil } // Mark the VM as ready - ctx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateReady + supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateReady - if ok := v.reconcileNetwork(ctx, vmOperatorVM); !ok { - ctx.Logger.Info("ip not yet assigned") + if ok := v.reconcileNetwork(supervisorMachineCtx, vmOperatorVM); !ok { + supervisorMachineCtx.Logger.Info("ip not yet assigned") return true, nil } - v.reconcileProviderID(ctx, vmOperatorVM) + v.reconcileProviderID(supervisorMachineCtx, vmOperatorVM) // Mark the VSphereMachine as Ready - ctx.VSphereMachine.Status.Ready = true - conditions.MarkTrue(ctx.VSphereMachine, infrav1.VMProvisionedCondition) + supervisorMachineCtx.VSphereMachine.Status.Ready = true + conditions.MarkTrue(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition) return false, nil } -func (v VmopMachineService) GetHostInfo(c context.MachineContext) (string, error) { - ctx, ok := c.(*vmware.SupervisorMachineContext) +func (v VmopMachineService) GetHostInfo(machineCtx capvcontext.MachineContext) (string, error) { + supervisorMachineCtx, ok := machineCtx.(*vmware.SupervisorMachineContext) if !ok { return "", errors.New("received unexpected SupervisorMachineContext type") } vmOperatorVM := &vmoprv1.VirtualMachine{} - if err := ctx.Client.Get(ctx, client.ObjectKey{ - Name: ctx.Machine.Name, - Namespace: ctx.Machine.Namespace, + if err := supervisorMachineCtx.Client.Get(supervisorMachineCtx, client.ObjectKey{ + Name: supervisorMachineCtx.Machine.Name, + Namespace: supervisorMachineCtx.Machine.Namespace, }, vmOperatorVM); err != nil { return "", err } @@ -237,11 +237,11 @@ func (v VmopMachineService) GetHostInfo(c context.MachineContext) (string, error return vmOperatorVM.Status.Host, nil } -func (v VmopMachineService) newVMOperatorVM(ctx *vmware.SupervisorMachineContext) *vmoprv1.VirtualMachine { +func (v VmopMachineService) newVMOperatorVM(supervisorMachineCtx *vmware.SupervisorMachineContext) *vmoprv1.VirtualMachine { return &vmoprv1.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ - Name: ctx.Machine.Name, - Namespace: ctx.Machine.Namespace, + Name: supervisorMachineCtx.Machine.Name, + Namespace: supervisorMachineCtx.Machine.Namespace, }, TypeMeta: metav1.TypeMeta{ APIVersion: vmoprv1.SchemeGroupVersion.String(), @@ -250,35 +250,35 @@ func (v VmopMachineService) newVMOperatorVM(ctx *vmware.SupervisorMachineContext } } -func (v VmopMachineService) reconcileVMOperatorVM(ctx *vmware.SupervisorMachineContext, vmOperatorVM *vmoprv1.VirtualMachine) error { +func (v VmopMachineService) reconcileVMOperatorVM(supervisorMachineCtx *vmware.SupervisorMachineContext, vmOperatorVM *vmoprv1.VirtualMachine) error { // All Machine resources should define the version of Kubernetes to use. - if ctx.Machine.Spec.Version == nil || *ctx.Machine.Spec.Version == "" { + if supervisorMachineCtx.Machine.Spec.Version == nil || *supervisorMachineCtx.Machine.Spec.Version == "" { return errors.Errorf( "missing kubernetes version for %s %s/%s", - ctx.Machine.GroupVersionKind(), - ctx.Machine.Namespace, - ctx.Machine.Name) + supervisorMachineCtx.Machine.GroupVersionKind(), + supervisorMachineCtx.Machine.Namespace, + supervisorMachineCtx.Machine.Name) } var dataSecretName string - if dsn := ctx.Machine.Spec.Bootstrap.DataSecretName; dsn != nil { + if dsn := supervisorMachineCtx.Machine.Spec.Bootstrap.DataSecretName; dsn != nil { dataSecretName = *dsn } - _, err := ctrlutil.CreateOrPatch(ctx, ctx.Client, vmOperatorVM, func() error { + _, err := ctrlutil.CreateOrPatch(supervisorMachineCtx, supervisorMachineCtx.Client, vmOperatorVM, func() error { // Define a new VM Operator virtual machine. // NOTE: Set field-by-field in order to preserve changes made directly // to the VirtualMachine spec by other sources (e.g. the cloud provider) - vmOperatorVM.Spec.ImageName = ctx.VSphereMachine.Spec.ImageName - vmOperatorVM.Spec.ClassName = ctx.VSphereMachine.Spec.ClassName - vmOperatorVM.Spec.StorageClass = ctx.VSphereMachine.Spec.StorageClass + vmOperatorVM.Spec.ImageName = supervisorMachineCtx.VSphereMachine.Spec.ImageName + vmOperatorVM.Spec.ClassName = supervisorMachineCtx.VSphereMachine.Spec.ClassName + vmOperatorVM.Spec.StorageClass = supervisorMachineCtx.VSphereMachine.Spec.StorageClass vmOperatorVM.Spec.PowerState = vmoprv1.VirtualMachinePoweredOn - vmOperatorVM.Spec.ResourcePolicyName = ctx.VSphereCluster.Status.ResourcePolicyName + vmOperatorVM.Spec.ResourcePolicyName = supervisorMachineCtx.VSphereCluster.Status.ResourcePolicyName vmOperatorVM.Spec.VmMetadata = &vmoprv1.VirtualMachineMetadata{ SecretName: dataSecretName, Transport: vmoprv1.VirtualMachineMetadataCloudInitTransport, } - vmOperatorVM.Spec.PowerOffMode = vmoprv1.VirtualMachinePowerOpMode(ctx.VSphereMachine.Spec.PowerOffMode) + vmOperatorVM.Spec.PowerOffMode = vmoprv1.VirtualMachinePowerOpMode(supervisorMachineCtx.VSphereMachine.Spec.PowerOffMode) // VMOperator supports readiness probe and will add/remove endpoints to a // VirtualMachineService based on the outcome of the readiness check. @@ -288,7 +288,7 @@ func (v VmopMachineService) reconcileVMOperatorVM(ctx *vmware.SupervisorMachineC // Once the initial control plane node is ready, we can re-add the probe so // that subsequent machines do not attempt to speak to a kube-apiserver // that is not yet ready. - if infrautilv1.IsControlPlaneMachine(ctx.Machine) && ctx.Cluster.Status.ControlPlaneReady { + if infrautilv1.IsControlPlaneMachine(supervisorMachineCtx.Machine) && supervisorMachineCtx.Cluster.Status.ControlPlaneReady { vmOperatorVM.Spec.ReadinessProbe = &vmoprv1.Probe{ TCPSocket: &vmoprv1.TCPSocketAction{ Port: intstr.FromInt(defaultAPIBindPort), @@ -297,17 +297,17 @@ func (v VmopMachineService) reconcileVMOperatorVM(ctx *vmware.SupervisorMachineC } // Assign the VM's labels. - vmOperatorVM.Labels = getVMLabels(ctx, vmOperatorVM.Labels) + vmOperatorVM.Labels = getVMLabels(supervisorMachineCtx, vmOperatorVM.Labels) - addResourcePolicyAnnotations(ctx, vmOperatorVM) + addResourcePolicyAnnotations(supervisorMachineCtx, vmOperatorVM) - if err := addVolumes(ctx, vmOperatorVM); err != nil { + if err := addVolumes(supervisorMachineCtx, vmOperatorVM); err != nil { return err } // Apply hooks to modify the VM spec // The hooks are loosely typed so as to allow for different VirtualMachine backends - for _, vmModifier := range ctx.VMModifiers { + for _, vmModifier := range supervisorMachineCtx.VMModifiers { modified, err := vmModifier(vmOperatorVM) if err != nil { return err @@ -320,11 +320,11 @@ func (v VmopMachineService) reconcileVMOperatorVM(ctx *vmware.SupervisorMachineC } // Make sure the VSphereMachine owns the VM Operator VirtualMachine. - if err := ctrlutil.SetControllerReference(ctx.VSphereMachine, vmOperatorVM, ctx.Scheme); err != nil { + if err := ctrlutil.SetControllerReference(supervisorMachineCtx.VSphereMachine, vmOperatorVM, supervisorMachineCtx.Scheme); err != nil { return errors.Wrapf(err, "failed to mark %s %s/%s as owner of %s %s/%s", - ctx.VSphereMachine.GroupVersionKind(), - ctx.VSphereMachine.Namespace, - ctx.VSphereMachine.Name, + supervisorMachineCtx.VSphereMachine.GroupVersionKind(), + supervisorMachineCtx.VSphereMachine.Namespace, + supervisorMachineCtx.VSphereMachine.Name, vmOperatorVM.GroupVersionKind(), vmOperatorVM.Namespace, vmOperatorVM.Name) @@ -335,55 +335,55 @@ func (v VmopMachineService) reconcileVMOperatorVM(ctx *vmware.SupervisorMachineC return err } -func (v *VmopMachineService) reconcileNetwork(ctx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) bool { +func (v *VmopMachineService) reconcileNetwork(supervisorMachineCtx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) bool { if vm.Status.VmIp == "" { return false } - ctx.VSphereMachine.Status.IPAddr = vm.Status.VmIp + supervisorMachineCtx.VSphereMachine.Status.IPAddr = vm.Status.VmIp return true } -func (v *VmopMachineService) reconcileProviderID(ctx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) { +func (v *VmopMachineService) reconcileProviderID(supervisorMachineCtx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) { providerID := fmt.Sprintf("vsphere://%s", vm.Status.BiosUUID) - if ctx.VSphereMachine.Spec.ProviderID == nil || *ctx.VSphereMachine.Spec.ProviderID != providerID { - ctx.VSphereMachine.Spec.ProviderID = &providerID - ctx.Logger.Info("Updated provider ID for machine", "machine", ctx.VSphereMachine.Name, "provider-id", providerID) + if supervisorMachineCtx.VSphereMachine.Spec.ProviderID == nil || *supervisorMachineCtx.VSphereMachine.Spec.ProviderID != providerID { + supervisorMachineCtx.VSphereMachine.Spec.ProviderID = &providerID + supervisorMachineCtx.Logger.Info("Updated provider ID for machine", "machine", supervisorMachineCtx.VSphereMachine.Name, "provider-id", providerID) } - if ctx.VSphereMachine.Status.ID == nil || *ctx.VSphereMachine.Status.ID != vm.Status.BiosUUID { - ctx.VSphereMachine.Status.ID = &vm.Status.BiosUUID - ctx.Logger.Info("Updated VM ID for machine", "machine", ctx.VSphereMachine.Name, "vm-id", vm.Status.BiosUUID) + if supervisorMachineCtx.VSphereMachine.Status.ID == nil || *supervisorMachineCtx.VSphereMachine.Status.ID != vm.Status.BiosUUID { + supervisorMachineCtx.VSphereMachine.Status.ID = &vm.Status.BiosUUID + supervisorMachineCtx.Logger.Info("Updated VM ID for machine", "machine", supervisorMachineCtx.VSphereMachine.Name, "vm-id", vm.Status.BiosUUID) } } // getVirtualMachinesInCluster returns all VMOperator VirtualMachine objects in the current cluster. // First filter by clusterSelectorKey. If the result is empty, they fall back to legacyClusterSelectorKey. -func getVirtualMachinesInCluster(ctx *vmware.SupervisorMachineContext) ([]*vmoprv1.VirtualMachine, error) { - labels := map[string]string{clusterSelectorKey: ctx.Cluster.Name} +func getVirtualMachinesInCluster(supervisorMachineCtx *vmware.SupervisorMachineContext) ([]*vmoprv1.VirtualMachine, error) { + labels := map[string]string{clusterSelectorKey: supervisorMachineCtx.Cluster.Name} vmList := &vmoprv1.VirtualMachineList{} - if err := ctx.Client.List( - ctx, vmList, - client.InNamespace(ctx.Cluster.Namespace), + if err := supervisorMachineCtx.Client.List( + supervisorMachineCtx, vmList, + client.InNamespace(supervisorMachineCtx.Cluster.Namespace), client.MatchingLabels(labels)); err != nil { return nil, errors.Wrapf( err, "error getting virtualmachines in cluster %s/%s", - ctx.Cluster.Namespace, ctx.Cluster.Name) + supervisorMachineCtx.Cluster.Namespace, supervisorMachineCtx.Cluster.Name) } // If the list is empty, fall back to usse legacy labels for filtering if len(vmList.Items) == 0 { - legacyLabels := map[string]string{legacyClusterSelectorKey: ctx.Cluster.Name} - if err := ctx.Client.List( - ctx, vmList, - client.InNamespace(ctx.Cluster.Namespace), + legacyLabels := map[string]string{legacyClusterSelectorKey: supervisorMachineCtx.Cluster.Name} + if err := supervisorMachineCtx.Client.List( + supervisorMachineCtx, vmList, + client.InNamespace(supervisorMachineCtx.Cluster.Namespace), client.MatchingLabels(legacyLabels)); err != nil { return nil, errors.Wrapf( err, "error getting virtualmachines in cluster %s/%s using legacy labels", - ctx.Cluster.Namespace, ctx.Cluster.Name) + supervisorMachineCtx.Cluster.Namespace, supervisorMachineCtx.Cluster.Name) } } @@ -397,18 +397,18 @@ func getVirtualMachinesInCluster(ctx *vmware.SupervisorMachineContext) ([]*vmopr // Helper function to add annotations to indicate which tag vm-operator should add as well as which clusterModule VM // should be associated. -func addResourcePolicyAnnotations(ctx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) { +func addResourcePolicyAnnotations(supervisorMachineCtx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) { annotations := vm.ObjectMeta.GetAnnotations() if annotations == nil { annotations = make(map[string]string) } - if infrautilv1.IsControlPlaneMachine(ctx.Machine) { + if infrautilv1.IsControlPlaneMachine(supervisorMachineCtx.Machine) { annotations[ProviderTagsAnnotationKey] = ControlPlaneVMVMAntiAffinityTagValue annotations[ClusterModuleNameAnnotationKey] = ControlPlaneVMClusterModuleGroupName } else { annotations[ProviderTagsAnnotationKey] = WorkerVMVMAntiAffinityTagValue - annotations[ClusterModuleNameAnnotationKey] = vmwareutil.GetMachineDeploymentNameForCluster(ctx.Cluster) + annotations[ClusterModuleNameAnnotationKey] = vmwareutil.GetMachineDeploymentNameForCluster(supervisorMachineCtx.Cluster) } vm.ObjectMeta.SetAnnotations(annotations) @@ -438,22 +438,22 @@ func addVolume(vm *vmoprv1.VirtualMachine, name string) { }) } -func addVolumes(ctx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) error { - nvolumes := len(ctx.VSphereMachine.Spec.Volumes) +func addVolumes(supervisorMachineCtx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine) error { + nvolumes := len(supervisorMachineCtx.VSphereMachine.Spec.Volumes) if nvolumes == 0 { return nil } - for _, volume := range ctx.VSphereMachine.Spec.Volumes { + for _, volume := range supervisorMachineCtx.VSphereMachine.Spec.Volumes { storageClassName := volume.StorageClass if volume.StorageClass == "" { - storageClassName = ctx.VSphereMachine.Spec.StorageClass + storageClassName = supervisorMachineCtx.VSphereMachine.Spec.StorageClass } pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ - Name: volumeName(ctx.VSphereMachine, volume), - Namespace: ctx.VSphereMachine.Namespace, + Name: volumeName(supervisorMachineCtx.VSphereMachine, volume), + Namespace: supervisorMachineCtx.VSphereMachine.Namespace, }, Spec: corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ @@ -470,9 +470,9 @@ func addVolumes(ctx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine // which is required when the cluster has multiple (3) zones. // Single zone clusters (legacy/default) do not support zonal storage and must not // have the zone annotation set. - zonal := len(ctx.VSphereCluster.Status.FailureDomains) > 1 + zonal := len(supervisorMachineCtx.VSphereCluster.Status.FailureDomains) > 1 - if zone := ctx.VSphereMachine.Spec.FailureDomain; zonal && zone != nil { + if zone := supervisorMachineCtx.VSphereMachine.Spec.FailureDomain; zonal && zone != nil { topology := []map[string]string{ {kubeTopologyZoneLabelKey: *zone}, } @@ -485,17 +485,17 @@ func addVolumes(ctx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine } } - if _, err := ctrlutil.CreateOrPatch(ctx, ctx.Client, pvc, func() error { + if _, err := ctrlutil.CreateOrPatch(supervisorMachineCtx, supervisorMachineCtx.Client, pvc, func() error { if err := ctrlutil.SetOwnerReference( - ctx.VSphereMachine, + supervisorMachineCtx.VSphereMachine, pvc, - ctx.Scheme, + supervisorMachineCtx.Scheme, ); err != nil { return errors.Wrapf( err, "error setting %s/%s as owner of %s/%s", - ctx.VSphereMachine.Namespace, - ctx.VSphereMachine.Name, + supervisorMachineCtx.VSphereMachine.Namespace, + supervisorMachineCtx.VSphereMachine.Name, pvc.Namespace, pvc.Name, ) @@ -516,21 +516,21 @@ func addVolumes(ctx *vmware.SupervisorMachineContext, vm *vmoprv1.VirtualMachine } // getVMLabels returns the labels applied to a VirtualMachine. -func getVMLabels(ctx *vmware.SupervisorMachineContext, vmLabels map[string]string) map[string]string { +func getVMLabels(supervisorMachineCtx *vmware.SupervisorMachineContext, vmLabels map[string]string) map[string]string { if vmLabels == nil { vmLabels = map[string]string{} } // Get the labels for the VM that differ based on the cluster role of // the Kubernetes node hosted on this VM. - clusterRoleLabels := clusterRoleVMLabels(ctx.GetClusterContext(), infrautilv1.IsControlPlaneMachine(ctx.Machine)) + clusterRoleLabels := clusterRoleVMLabels(supervisorMachineCtx.GetClusterContext(), infrautilv1.IsControlPlaneMachine(supervisorMachineCtx.Machine)) for k, v := range clusterRoleLabels { vmLabels[k] = v } // Get the labels that determine the VM's placement inside of a stretched // cluster. - topologyLabels := getTopologyLabels(ctx) + topologyLabels := getTopologyLabels(supervisorMachineCtx) for k, v := range topologyLabels { vmLabels[k] = v } @@ -544,8 +544,8 @@ func getVMLabels(ctx *vmware.SupervisorMachineContext, vmLabels map[string]strin // // and thus the code is optimized as such. However, in the future // this function may return a more diverse topology. -func getTopologyLabels(ctx *vmware.SupervisorMachineContext) map[string]string { - if fd := ctx.VSphereMachine.Spec.FailureDomain; fd != nil && *fd != "" { +func getTopologyLabels(supervisorMachineCtx *vmware.SupervisorMachineContext) map[string]string { + if fd := supervisorMachineCtx.VSphereMachine.Spec.FailureDomain; fd != nil && *fd != "" { return map[string]string{ kubeTopologyZoneLabelKey: *fd, } diff --git a/pkg/util/testutil.go b/pkg/util/testutil.go index fd9d4b42a4..5a3e11568f 100644 --- a/pkg/util/testutil.go +++ b/pkg/util/testutil.go @@ -17,7 +17,7 @@ limitations under the License. package util import ( - goctx "context" + "context" netopv1 "github.com/vmware-tanzu/net-operator-api/api/v1alpha1" vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha1" @@ -33,7 +33,7 @@ import ( testclient "sigs.k8s.io/controller-runtime/pkg/client/fake" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" ) @@ -151,8 +151,8 @@ func createScheme() *runtime.Scheme { func CreateClusterContext(cluster *clusterv1.Cluster, vsphereCluster *vmwarev1.VSphereCluster) *vmware.ClusterContext { scheme := createScheme() - controllerManagerContext := &context.ControllerManagerContext{ - Context: goctx.Background(), + controllerManagerContext := &capvcontext.ControllerManagerContext{ + Context: context.Background(), Logger: klog.Background().WithName("controller-manager-logger"), Scheme: scheme, Client: testclient.NewClientBuilder().WithScheme(scheme).WithStatusSubresource( @@ -162,7 +162,7 @@ func CreateClusterContext(cluster *clusterv1.Cluster, vsphereCluster *vmwarev1.V } // Build the controller context. - controllerContext := &context.ControllerContext{ + controllerContext := &capvcontext.ControllerContext{ ControllerManagerContext: controllerManagerContext, Logger: controllerManagerContext.Logger.WithName("controller-logger"), } @@ -180,7 +180,7 @@ func CreateMachineContext(clusterContext *vmware.ClusterContext, machine *cluste vsphereMachine *vmwarev1.VSphereMachine) *vmware.SupervisorMachineContext { // Build the machine context. return &vmware.SupervisorMachineContext{ - BaseMachineContext: &context.BaseMachineContext{ + BaseMachineContext: &capvcontext.BaseMachineContext{ Logger: clusterContext.Logger.WithName(vsphereMachine.Name), Machine: machine, Cluster: clusterContext.Cluster, diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index 0d3b208474..52e30db003 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -18,7 +18,7 @@ limitations under the License. package helpers import ( - goctx "context" + "context" "fmt" "path" "path/filepath" @@ -49,7 +49,7 @@ import ( infrav1alpha4 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1alpha4" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/internal/webhooks" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/manager" "sigs.k8s.io/cluster-api-provider-vsphere/test/helpers/vcsim" ) @@ -107,7 +107,7 @@ type ( Config *rest.Config Simulator *vcsim.Simulator - cancel goctx.CancelFunc + cancel context.CancelFunc } ) @@ -141,7 +141,7 @@ func NewTestEnvironment() *TestEnvironment { Username: simr.Username(), Password: simr.Password(), } - managerOpts.AddToManager = func(ctx *context.ControllerManagerContext, mgr ctrlmgr.Manager) error { + managerOpts.AddToManager = func(controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager) error { if err := (&webhooks.VSphereClusterTemplateWebhook{}).SetupWebhookWithManager(mgr); err != nil { return err } @@ -178,8 +178,8 @@ func NewTestEnvironment() *TestEnvironment { } } -func (t *TestEnvironment) StartManager(ctx goctx.Context) error { - ctx, cancel := goctx.WithCancel(ctx) +func (t *TestEnvironment) StartManager(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) t.cancel = cancel return t.Manager.Start(ctx) } @@ -190,7 +190,7 @@ func (t *TestEnvironment) Stop() error { return env.Stop() } -func (t *TestEnvironment) Cleanup(ctx goctx.Context, objs ...client.Object) error { +func (t *TestEnvironment) Cleanup(ctx context.Context, objs ...client.Object) error { errs := make([]error, 0, len(objs)) for _, o := range objs { err := t.Client.Delete(ctx, o) @@ -205,7 +205,7 @@ func (t *TestEnvironment) Cleanup(ctx goctx.Context, objs ...client.Object) erro return kerrors.NewAggregate(errs) } -func (t *TestEnvironment) CreateNamespace(ctx goctx.Context, generateName string) (*corev1.Namespace, error) { +func (t *TestEnvironment) CreateNamespace(ctx context.Context, generateName string) (*corev1.Namespace, error) { ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ GenerateName: fmt.Sprintf("%s-", generateName), @@ -221,7 +221,7 @@ func (t *TestEnvironment) CreateNamespace(ctx goctx.Context, generateName string return ns, nil } -func (t *TestEnvironment) CreateKubeconfigSecret(ctx goctx.Context, cluster *clusterv1.Cluster) error { +func (t *TestEnvironment) CreateKubeconfigSecret(ctx context.Context, cluster *clusterv1.Cluster) error { return t.Create(ctx, kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(t.Config, cluster))) } diff --git a/test/helpers/framework.go b/test/helpers/framework.go index 77959e5296..60eab18382 100644 --- a/test/helpers/framework.go +++ b/test/helpers/framework.go @@ -17,7 +17,7 @@ limitations under the License. package helpers import ( - goctx "context" + "context" "errors" "fmt" "os" @@ -33,7 +33,7 @@ import ( // Util functions to interact with the clusterctl e2e framework. func LoadE2EConfig(configPath string) (*clusterctl.E2EConfig, error) { - config := clusterctl.LoadE2EConfig(goctx.TODO(), clusterctl.LoadE2EConfigInput{ConfigPath: configPath}) + config := clusterctl.LoadE2EConfig(context.TODO(), clusterctl.LoadE2EConfigInput{ConfigPath: configPath}) if config == nil { return nil, fmt.Errorf("cannot load E2E config found at %s", configPath) } @@ -58,7 +58,7 @@ func CreateClusterctlLocalRepository(config *clusterctl.E2EConfig, repositoryFol createRepositoryInput.RegisterClusterResourceSetConfigMapTransformation(cniPath, capi_e2e.CNIResources) } - clusterctlConfig := clusterctl.CreateRepository(goctx.TODO(), createRepositoryInput) + clusterctlConfig := clusterctl.CreateRepository(context.TODO(), createRepositoryInput) if _, err := os.Stat(clusterctlConfig); err != nil { return "", fmt.Errorf("the clusterctl config file does not exists in the local repository %s", repositoryFolder) } @@ -69,7 +69,7 @@ func SetupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme, var clusterProvider bootstrap.ClusterProvider kubeconfigPath := "" if !useExistingCluster { - clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(goctx.TODO(), bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{ + clusterProvider = bootstrap.CreateKindBootstrapClusterAndLoadImages(context.TODO(), bootstrap.CreateKindBootstrapClusterAndLoadImagesInput{ Name: config.ManagementClusterName, RequiresDockerSock: config.HasDockerProvider(), Images: config.Images, @@ -87,7 +87,7 @@ func SetupBootstrapCluster(config *clusterctl.E2EConfig, scheme *runtime.Scheme, } func InitBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config *clusterctl.E2EConfig, clusterctlConfig, artifactFolder string) { - clusterctl.InitManagementClusterAndWatchControllerLogs(goctx.TODO(), clusterctl.InitManagementClusterAndWatchControllerLogsInput{ + clusterctl.InitManagementClusterAndWatchControllerLogs(context.TODO(), clusterctl.InitManagementClusterAndWatchControllerLogsInput{ ClusterProxy: bootstrapClusterProxy, ClusterctlConfigPath: clusterctlConfig, InfrastructureProviders: config.InfrastructureProviders(), @@ -97,9 +97,9 @@ func InitBootstrapCluster(bootstrapClusterProxy framework.ClusterProxy, config * func TearDown(bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClusterProxy framework.ClusterProxy) { if bootstrapClusterProxy != nil { - bootstrapClusterProxy.Dispose(goctx.TODO()) + bootstrapClusterProxy.Dispose(context.TODO()) } if bootstrapClusterProvider != nil { - bootstrapClusterProvider.Dispose(goctx.TODO()) + bootstrapClusterProvider.Dispose(context.TODO()) } } diff --git a/test/helpers/vmware/intg_test_context.go b/test/helpers/vmware/intg_test_context.go index 73faea7c20..3314a6011b 100644 --- a/test/helpers/vmware/intg_test_context.go +++ b/test/helpers/vmware/intg_test_context.go @@ -83,9 +83,9 @@ func (ctx *IntegrationTestContext) AfterEach() { // // The resources created by this function may be cleaned up by calling AfterEach // with the IntegrationTestContext returned by this function. -func NewIntegrationTestContextWithClusters(goctx context.Context, integrationTestClient client.Client) *IntegrationTestContext { - ctx := &IntegrationTestContext{ - Context: goctx, +func NewIntegrationTestContextWithClusters(ctx context.Context, integrationTestClient client.Client) *IntegrationTestContext { + testCtx := &IntegrationTestContext{ + Context: ctx, Client: integrationTestClient, } @@ -95,17 +95,17 @@ func NewIntegrationTestContextWithClusters(goctx context.Context, integrationTes Name: uuid.New().String(), }, } - Expect(ctx.Client.Create(goctx, namespace)).To(Succeed()) + Expect(testCtx.Client.Create(ctx, namespace)).To(Succeed()) - ctx.Namespace = namespace.Name + testCtx.Namespace = namespace.Name }) vsphereClusterName := capiutil.RandomString(6) - ctx.Cluster = createCluster(goctx, integrationTestClient, ctx.Namespace, vsphereClusterName) + testCtx.Cluster = createCluster(ctx, integrationTestClient, testCtx.Namespace, vsphereClusterName) By("Create a vsphere cluster and wait for it to exist", func() { - ctx.VSphereCluster = createVSphereCluster(goctx, integrationTestClient, ctx.Namespace, vsphereClusterName, ctx.Cluster.GetName()) - ctx.VSphereClusterKey = client.ObjectKeyFromObject(ctx.VSphereCluster) + testCtx.VSphereCluster = createVSphereCluster(ctx, integrationTestClient, testCtx.Namespace, vsphereClusterName, testCtx.Cluster.GetName()) + testCtx.VSphereClusterKey = client.ObjectKeyFromObject(testCtx.VSphereCluster) }) var config *rest.Config @@ -125,25 +125,25 @@ func NewIntegrationTestContextWithClusters(goctx context.Context, integrationTes Expect(err).ShouldNot(HaveOccurred()) Expect(config).ShouldNot(BeNil()) - ctx.GuestClient, err = client.New(config, client.Options{}) + testCtx.GuestClient, err = client.New(config, client.Options{}) Expect(err).ShouldNot(HaveOccurred()) - Expect(ctx.GuestClient).ShouldNot(BeNil()) + Expect(testCtx.GuestClient).ShouldNot(BeNil()) - ctx.envTest = envTest + testCtx.envTest = envTest }) By("Create the kubeconfig secret for the cluster", func() { buf, err := writeKubeConfig(config) Expect(err).ToNot(HaveOccurred()) secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Namespace: ctx.Namespace, - Name: fmt.Sprintf("%s-kubeconfig", ctx.Cluster.Name), + Namespace: testCtx.Namespace, + Name: fmt.Sprintf("%s-kubeconfig", testCtx.Cluster.Name), OwnerReferences: []metav1.OwnerReference{ { - APIVersion: ctx.Cluster.APIVersion, - Kind: ctx.Cluster.Kind, - Name: ctx.Cluster.Name, - UID: ctx.Cluster.UID, + APIVersion: testCtx.Cluster.APIVersion, + Kind: testCtx.Cluster.Kind, + Name: testCtx.Cluster.Name, + UID: testCtx.Cluster.UID, }, }, }, @@ -151,13 +151,13 @@ func NewIntegrationTestContextWithClusters(goctx context.Context, integrationTes "value": buf, }, } - Expect(integrationTestClient.Create(goctx, secret)).To(Succeed()) + Expect(integrationTestClient.Create(ctx, secret)).To(Succeed()) Eventually(func() error { - return integrationTestClient.Get(goctx, client.ObjectKeyFromObject(secret), secret) + return integrationTestClient.Get(ctx, client.ObjectKeyFromObject(secret), secret) }).Should(Succeed()) }) - return ctx + return testCtx } func createCluster(ctx context.Context, integrationTestClient client.Client, namespace, name string) *clusterv1.Cluster { diff --git a/test/integration/integration_suite_test.go b/test/integration/integration_suite_test.go index cae3fac264..00abb4a72e 100644 --- a/test/integration/integration_suite_test.go +++ b/test/integration/integration_suite_test.go @@ -17,7 +17,7 @@ limitations under the License. package integration import ( - goctx "context" + "context" "encoding/json" "flag" "fmt" @@ -69,7 +69,7 @@ const ( var ( testClusterName string dummyKubernetesVersion = "1.15.0+vmware.1" - ctx goctx.Context + ctx context.Context k8sClient dynamic.Interface ) @@ -246,7 +246,7 @@ var _ = SynchronizedBeforeSuite(func() []byte { By("Initializing the bootstrap cluster") helpers.InitBootstrapCluster(bootstrapClusterProxy, e2eConfig, clusterctlConfigPath, artifactFolder) - ctx = goctx.Background() + ctx = context.Background() return []byte( strings.Join([]string{ artifactFolder,