diff --git a/controllers/controllers_suite_test.go b/controllers/controllers_suite_test.go index 8883837797..8554b5dcad 100644 --- a/controllers/controllers_suite_test.go +++ b/controllers/controllers_suite_test.go @@ -61,7 +61,7 @@ func setup() { utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(vmwarev1.AddToScheme(scheme.Scheme)) - testEnv = helpers.NewTestEnvironment() + testEnv = helpers.NewTestEnvironment(ctx) secretCachingClient, err := client.New(testEnv.Manager.GetConfig(), client.Options{ HTTPClient: testEnv.Manager.GetHTTPClient(), @@ -93,7 +93,7 @@ func setup() { panic(fmt.Sprintf("unable to create ClusterCacheReconciler controller: %v", err)) } - if err := AddClusterControllerToManager(testEnv.GetContext(), testEnv.Manager, &infrav1.VSphereCluster{}, controllerOpts); err != nil { + if err := AddClusterControllerToManager(ctx, testEnv.GetContext(), testEnv.Manager, &infrav1.VSphereCluster{}, controllerOpts); err != nil { panic(fmt.Sprintf("unable to setup VsphereCluster controller: %v", err)) } if err := AddMachineControllerToManager(testEnv.GetContext(), testEnv.Manager, &infrav1.VSphereMachine{}, controllerOpts); err != nil { diff --git a/controllers/vmware/test/controllers_test.go b/controllers/vmware/test/controllers_test.go index 8d3b13bc75..dbb59ecd13 100644 --- a/controllers/vmware/test/controllers_test.go +++ b/controllers/vmware/test/controllers_test.go @@ -223,15 +223,15 @@ func getManager(cfg *rest.Config, networkProvider string) manager.Manager { controllerOpts := controller.Options{MaxConcurrentReconciles: 10} - opts.AddToManager = func(controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager) error { - if err := controllers.AddClusterControllerToManager(controllerCtx, mgr, &vmwarev1.VSphereCluster{}, controllerOpts); err != nil { + opts.AddToManager = func(ctx context.Context, controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager) error { + if err := controllers.AddClusterControllerToManager(ctx, controllerCtx, mgr, &vmwarev1.VSphereCluster{}, controllerOpts); err != nil { return err } return controllers.AddMachineControllerToManager(controllerCtx, mgr, &vmwarev1.VSphereMachine{}, controllerOpts) } - mgr, err := manager.New(opts) + mgr, err := manager.New(ctx, opts) Expect(err).NotTo(HaveOccurred()) return mgr } diff --git a/controllers/vspherecluster_controller.go b/controllers/vspherecluster_controller.go index 23629486f2..e4af2ab405 100644 --- a/controllers/vspherecluster_controller.go +++ b/controllers/vspherecluster_controller.go @@ -57,7 +57,7 @@ import ( // AddClusterControllerToManager adds the cluster controller to the provided // manager. -func AddClusterControllerToManager(controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, clusterControlledType client.Object, options controller.Options) error { +func AddClusterControllerToManager(ctx context.Context, controllerCtx *capvcontext.ControllerManagerContext, mgr manager.Manager, clusterControlledType client.Object, options controller.Options) error { supervisorBased, err := util.IsSupervisorType(clusterControlledType) if err != nil { return err @@ -110,7 +110,7 @@ func AddClusterControllerToManager(controllerCtx *capvcontext.ControllerManagerC ControllerContext: controllerContext, clusterModuleReconciler: NewReconciler(controllerContext), } - clusterToInfraFn := clusterToInfrastructureMapFunc(controllerCtx) + clusterToInfraFn := clusterToInfrastructureMapFunc(ctx, controllerCtx) c, err := ctrl.NewControllerManagedBy(mgr). // Watch the controlled, infrastructure resource. For(clusterControlledType). @@ -173,7 +173,7 @@ func AddClusterControllerToManager(controllerCtx *capvcontext.ControllerManagerC return nil } -func clusterToInfrastructureMapFunc(controllerCtx *capvcontext.ControllerManagerContext) handler.MapFunc { +func clusterToInfrastructureMapFunc(ctx context.Context, controllerCtx *capvcontext.ControllerManagerContext) handler.MapFunc { gvk := infrav1.GroupVersion.WithKind(reflect.TypeOf(&infrav1.VSphereCluster{}).Elem().Name()) - return clusterutilv1.ClusterToInfrastructureMapFunc(controllerCtx, gvk, controllerCtx.Client, &infrav1.VSphereCluster{}) + return clusterutilv1.ClusterToInfrastructureMapFunc(ctx, gvk, controllerCtx.Client, &infrav1.VSphereCluster{}) } diff --git a/controllers/vspherecluster_reconciler.go b/controllers/vspherecluster_reconciler.go index 7ea6d4559b..771e34ac51 100644 --- a/controllers/vspherecluster_reconciler.go +++ b/controllers/vspherecluster_reconciler.go @@ -61,10 +61,10 @@ type clusterReconciler struct { } // Reconcile ensures the back-end state reflects the Kubernetes resource state intent. -func (r clusterReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { +func (r clusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { // Get the VSphereCluster resource for this request. vsphereCluster := &infrav1.VSphereCluster{} - if err := r.Client.Get(r, req.NamespacedName, vsphereCluster); err != nil { + if err := r.Client.Get(ctx, req.NamespacedName, vsphereCluster); err != nil { if apierrors.IsNotFound(err) { r.Logger.V(4).Info("VSphereCluster not found, won't reconcile", "key", req.NamespacedName) return reconcile.Result{}, nil @@ -73,7 +73,7 @@ func (r clusterReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ctr } // Fetch the CAPI Cluster. - cluster, err := clusterutilv1.GetOwnerCluster(r, r.Client, vsphereCluster.ObjectMeta) + cluster, err := clusterutilv1.GetOwnerCluster(ctx, r.Client, vsphereCluster.ObjectMeta) if err != nil { return reconcile.Result{}, err } @@ -118,13 +118,13 @@ func (r clusterReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ctr } }() - if err := setOwnerRefsOnVsphereMachines(clusterContext); err != nil { + if err := setOwnerRefsOnVsphereMachines(ctx, clusterContext); err != nil { return reconcile.Result{}, errors.Wrapf(err, "failed to set owner refs on VSphereMachine objects") } // Handle deleted clusters if !vsphereCluster.DeletionTimestamp.IsZero() { - return r.reconcileDelete(clusterContext) + return r.reconcileDelete(ctx, clusterContext) } // If the VSphereCluster doesn't have our finalizer, add it. @@ -135,13 +135,13 @@ func (r clusterReconciler) Reconcile(_ context.Context, req ctrl.Request) (_ ctr } // Handle non-deleted clusters - return r.reconcileNormal(clusterContext) + return r.reconcileNormal(ctx, clusterContext) } -func (r clusterReconciler) reconcileDelete(clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { +func (r clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { clusterCtx.Logger.Info("Reconciling VSphereCluster delete") - vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(clusterCtx, clusterCtx.Client, clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name) + vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(ctx, clusterCtx.Client, clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name) if err != nil { return reconcile.Result{}, errors.Wrapf(err, "unable to list VSphereMachines part of VSphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) @@ -160,10 +160,10 @@ func (r clusterReconciler) reconcileDelete(clusterCtx *capvcontext.ClusterContex // Remove the finalizer since VM creation wouldn't proceed r.Logger.Info("Removing finalizer from VSphereMachine", "namespace", vsphereMachine.Namespace, "name", vsphereMachine.Name) ctrlutil.RemoveFinalizer(vsphereMachine, infrav1.MachineFinalizer) - if err := r.Client.Update(clusterCtx, vsphereMachine); err != nil { + if err := r.Client.Update(ctx, vsphereMachine); err != nil { return reconcile.Result{}, err } - if err := r.Client.Delete(clusterCtx, vsphereMachine); err != nil && !apierrors.IsNotFound(err) { + if err := r.Client.Delete(ctx, vsphereMachine); err != nil && !apierrors.IsNotFound(err) { clusterCtx.Logger.Error(err, "Failed to delete for VSphereMachine", "namespace", vsphereMachine.Namespace, "name", vsphereMachine.Name) deletionErrors = append(deletionErrors, err) } @@ -192,7 +192,7 @@ func (r clusterReconciler) reconcileDelete(clusterCtx *capvcontext.ClusterContex Namespace: clusterCtx.VSphereCluster.Namespace, Name: clusterCtx.VSphereCluster.Spec.IdentityRef.Name, } - err := clusterCtx.Client.Get(clusterCtx, secretKey, secret) + err := clusterCtx.Client.Get(ctx, secretKey, secret) if err != nil { if apierrors.IsNotFound(err) { ctrlutil.RemoveFinalizer(clusterCtx.VSphereCluster, infrav1.ClusterFinalizer) @@ -208,10 +208,10 @@ func (r clusterReconciler) reconcileDelete(clusterCtx *capvcontext.ClusterContex if ctrlutil.ContainsFinalizer(secret, legacyIdentityFinalizer) { ctrlutil.RemoveFinalizer(secret, legacyIdentityFinalizer) } - if err := clusterCtx.Client.Update(clusterCtx, secret); err != nil { + if err := clusterCtx.Client.Update(ctx, secret); err != nil { return reconcile.Result{}, err } - if err := clusterCtx.Client.Delete(clusterCtx, secret); err != nil { + if err := clusterCtx.Client.Delete(ctx, secret); err != nil { return reconcile.Result{}, err } } @@ -222,10 +222,10 @@ func (r clusterReconciler) reconcileDelete(clusterCtx *capvcontext.ClusterContex return reconcile.Result{}, nil } -func (r clusterReconciler) reconcileNormal(clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { +func (r clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { clusterCtx.Logger.Info("Reconciling VSphereCluster") - ok, err := r.reconcileDeploymentZones(clusterCtx) + ok, err := r.reconcileDeploymentZones(ctx, clusterCtx) if err != nil { return reconcile.Result{}, err } @@ -234,12 +234,12 @@ func (r clusterReconciler) reconcileNormal(clusterCtx *capvcontext.ClusterContex return reconcile.Result{RequeueAfter: 10 * time.Second}, nil } - if err := r.reconcileIdentitySecret(clusterCtx); err != nil { + if err := r.reconcileIdentitySecret(ctx, clusterCtx); err != nil { conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) return reconcile.Result{}, err } - vcenterSession, err := r.reconcileVCenterConnectivity(clusterCtx) + vcenterSession, err := r.reconcileVCenterConnectivity(ctx, clusterCtx) if err != nil { conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) return reconcile.Result{}, errors.Wrapf(err, @@ -276,14 +276,14 @@ func (r clusterReconciler) reconcileNormal(clusterCtx *capvcontext.ClusterContex } // Wait until the API server is online and accessible. - if !r.isAPIServerOnline(clusterCtx) { + if !r.isAPIServerOnline(ctx, clusterCtx) { return reconcile.Result{}, nil } return reconcile.Result{}, nil } -func (r clusterReconciler) reconcileIdentitySecret(clusterCtx *capvcontext.ClusterContext) error { +func (r clusterReconciler) reconcileIdentitySecret(ctx context.Context, clusterCtx *capvcontext.ClusterContext) error { vsphereCluster := clusterCtx.VSphereCluster if identity.IsSecretIdentity(vsphereCluster) { secret := &corev1.Secret{} @@ -291,7 +291,7 @@ func (r clusterReconciler) reconcileIdentitySecret(clusterCtx *capvcontext.Clust Namespace: vsphereCluster.Namespace, Name: vsphereCluster.Spec.IdentityRef.Name, } - err := clusterCtx.Client.Get(clusterCtx, secretKey, secret) + err := clusterCtx.Client.Get(ctx, secretKey, secret) if err != nil { return err } @@ -313,7 +313,7 @@ func (r clusterReconciler) reconcileIdentitySecret(clusterCtx *capvcontext.Clust if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) { ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer) } - err = r.Client.Update(clusterCtx, secret) + err = r.Client.Update(ctx, secret) if err != nil { return err } @@ -322,7 +322,7 @@ func (r clusterReconciler) reconcileIdentitySecret(clusterCtx *capvcontext.Clust return nil } -func (r clusterReconciler) reconcileVCenterConnectivity(clusterCtx *capvcontext.ClusterContext) (*session.Session, error) { +func (r clusterReconciler) reconcileVCenterConnectivity(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (*session.Session, error) { params := session.NewParams(). WithServer(clusterCtx.VSphereCluster.Spec.Server). WithThumbprint(clusterCtx.VSphereCluster.Spec.Thumbprint). @@ -332,13 +332,13 @@ func (r clusterReconciler) reconcileVCenterConnectivity(clusterCtx *capvcontext. }) if clusterCtx.VSphereCluster.Spec.IdentityRef != nil { - creds, err := identity.GetCredentials(clusterCtx, r.Client, clusterCtx.VSphereCluster, r.Namespace) + creds, err := identity.GetCredentials(ctx, r.Client, clusterCtx.VSphereCluster, r.Namespace) if err != nil { return nil, err } params = params.WithUserInfo(creds.Username, creds.Password) - return session.GetOrCreate(clusterCtx, params) + return session.GetOrCreate(ctx, params) } params = params.WithUserInfo(clusterCtx.Username, clusterCtx.Password) @@ -354,7 +354,7 @@ func (r clusterReconciler) reconcileVCenterVersion(clusterCtx *capvcontext.Clust return nil } -func (r clusterReconciler) reconcileDeploymentZones(clusterCtx *capvcontext.ClusterContext) (bool, error) { +func (r clusterReconciler) reconcileDeploymentZones(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (bool, error) { // If there is no failure domain selector, we should simply skip it if clusterCtx.VSphereCluster.Spec.FailureDomainSelector == nil { return true, nil @@ -368,7 +368,7 @@ func (r clusterReconciler) reconcileDeploymentZones(clusterCtx *capvcontext.Clus } var deploymentZoneList infrav1.VSphereDeploymentZoneList - err = r.Client.List(clusterCtx, &deploymentZoneList, &opts) + err = r.Client.List(ctx, &deploymentZoneList, &opts) if err != nil { return false, errors.Wrap(err, "unable to list deployment zones") } @@ -438,9 +438,10 @@ func (r clusterReconciler) reconcileVSphereClusterWhenAPIServerIsOnline(clusterC } apiServerTriggers[clusterCtx.Cluster.UID] = struct{}{} go func() { + ctx := context.Background() // Block until the target API server is online. clusterCtx.Logger.Info("start polling API server for online check") - wait.PollUntilContextCancel(context.Background(), time.Second*1, true, func(context.Context) (bool, error) { return r.isAPIServerOnline(clusterCtx), nil }) //nolint:errcheck + wait.PollUntilContextCancel(ctx, time.Second*1, true, func(context.Context) (bool, error) { return r.isAPIServerOnline(ctx, clusterCtx), nil }) //nolint:errcheck clusterCtx.Logger.Info("stop polling API server for online check") clusterCtx.Logger.Info("triggering GenericEvent", "reason", "api-server-online") eventChannel := clusterCtx.GetGenericEventChannelFor(clusterCtx.VSphereCluster.GetObjectKind().GroupVersionKind()) @@ -452,7 +453,7 @@ func (r clusterReconciler) reconcileVSphereClusterWhenAPIServerIsOnline(clusterC // remove the key from the map that prevents multiple goroutines from // polling the API server to see if it is online. clusterCtx.Logger.Info("start polling for control plane initialized") - wait.PollUntilContextCancel(context.Background(), time.Second*1, true, func(context.Context) (bool, error) { return r.isControlPlaneInitialized(clusterCtx), nil }) //nolint:errcheck + wait.PollUntilContextCancel(ctx, time.Second*1, true, func(context.Context) (bool, error) { return r.isControlPlaneInitialized(ctx, clusterCtx), nil }) //nolint:errcheck clusterCtx.Logger.Info("stop polling for control plane initialized") apiServerTriggersMu.Lock() delete(apiServerTriggers, clusterCtx.Cluster.UID) @@ -460,9 +461,9 @@ func (r clusterReconciler) reconcileVSphereClusterWhenAPIServerIsOnline(clusterC }() } -func (r clusterReconciler) isAPIServerOnline(clusterCtx *capvcontext.ClusterContext) bool { - if kubeClient, err := infrautilv1.NewKubeClient(clusterCtx, clusterCtx.Client, clusterCtx.Cluster); err == nil { - if _, err := kubeClient.CoreV1().Nodes().List(clusterCtx, metav1.ListOptions{}); err == nil { +func (r clusterReconciler) isAPIServerOnline(ctx context.Context, clusterCtx *capvcontext.ClusterContext) bool { + if kubeClient, err := infrautilv1.NewKubeClient(ctx, clusterCtx.Client, clusterCtx.Cluster); err == nil { + if _, err := kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{}); err == nil { // The target cluster is online. To make sure the correct control // plane endpoint information is logged, it is necessary to fetch // an up-to-date Cluster resource. If this fails, then set the @@ -471,7 +472,7 @@ func (r clusterReconciler) isAPIServerOnline(clusterCtx *capvcontext.ClusterCont // if the API server is online. cluster := &clusterv1.Cluster{} clusterKey := client.ObjectKey{Namespace: clusterCtx.Cluster.Namespace, Name: clusterCtx.Cluster.Name} - if err := clusterCtx.Client.Get(clusterCtx, clusterKey, cluster); err != nil { + if err := clusterCtx.Client.Get(ctx, clusterKey, cluster); err != nil { cluster = clusterCtx.Cluster.DeepCopy() cluster.Spec.ControlPlaneEndpoint.Host = clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Host cluster.Spec.ControlPlaneEndpoint.Port = clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Port @@ -486,10 +487,10 @@ func (r clusterReconciler) isAPIServerOnline(clusterCtx *capvcontext.ClusterCont return false } -func (r clusterReconciler) isControlPlaneInitialized(clusterCtx *capvcontext.ClusterContext) bool { +func (r clusterReconciler) isControlPlaneInitialized(ctx context.Context, clusterCtx *capvcontext.ClusterContext) bool { cluster := &clusterv1.Cluster{} clusterKey := client.ObjectKey{Namespace: clusterCtx.Cluster.Namespace, Name: clusterCtx.Cluster.Name} - if err := clusterCtx.Client.Get(clusterCtx, clusterKey, cluster); err != nil { + if err := clusterCtx.Client.Get(ctx, clusterKey, cluster); err != nil { if !apierrors.IsNotFound(err) { clusterCtx.Logger.Error(err, "failed to get updated cluster object while checking if control plane is initialized") return false @@ -500,8 +501,8 @@ func (r clusterReconciler) isControlPlaneInitialized(clusterCtx *capvcontext.Clu return conditions.IsTrue(clusterCtx.Cluster, clusterv1.ControlPlaneInitializedCondition) } -func setOwnerRefsOnVsphereMachines(clusterCtx *capvcontext.ClusterContext) error { - vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(clusterCtx, clusterCtx.Client, clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name) +func setOwnerRefsOnVsphereMachines(ctx context.Context, clusterCtx *capvcontext.ClusterContext) error { + vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(ctx, clusterCtx.Client, clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name) if err != nil { return errors.Wrapf(err, "unable to list VSphereMachines part of VSphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) @@ -524,13 +525,14 @@ func setOwnerRefsOnVsphereMachines(clusterCtx *capvcontext.ClusterContext) error UID: clusterCtx.VSphereCluster.UID, })) - if err := patchHelper.Patch(clusterCtx, vsphereMachine); err != nil { + if err := patchHelper.Patch(ctx, vsphereMachine); err != nil { patchErrors = append(patchErrors, err) } } return kerrors.NewAggregate(patchErrors) } +// TODO: zhanggbj: Add golang context after refactoring ClusterModule controller and reconciler. func (r clusterReconciler) reconcileClusterModules(clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { if feature.Gates.Enabled(feature.NodeAntiAffinity) { return r.clusterModuleReconciler.Reconcile(clusterCtx) diff --git a/controllers/vspherecluster_reconciler_test.go b/controllers/vspherecluster_reconciler_test.go index 30cda5f543..f1e050320e 100644 --- a/controllers/vspherecluster_reconciler_test.go +++ b/controllers/vspherecluster_reconciler_test.go @@ -700,14 +700,14 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) controllerCtx := fake.NewControllerContext(fake.NewControllerManagerContext(tt.initObjs...)) - ctx := fake.NewClusterContext(controllerCtx) - ctx.VSphereCluster.Spec.Server = server + clusterCtx := fake.NewClusterContext(controllerCtx) + clusterCtx.VSphereCluster.Spec.Server = server r := clusterReconciler{ControllerContext: controllerCtx} - reconciled, err := r.reconcileDeploymentZones(ctx) + reconciled, err := r.reconcileDeploymentZones(ctx, clusterCtx) g.Expect(err).NotTo(HaveOccurred()) g.Expect(reconciled).To(Equal(tt.reconciled)) - tt.assert(ctx.VSphereCluster) + tt.assert(clusterCtx.VSphereCluster) }) } }) @@ -769,15 +769,15 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) controllerCtx := fake.NewControllerContext(fake.NewControllerManagerContext(tt.initObjs...)) - ctx := fake.NewClusterContext(controllerCtx) - ctx.VSphereCluster.Spec.Server = server - ctx.VSphereCluster.Spec.FailureDomainSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}} + clusterCtx := fake.NewClusterContext(controllerCtx) + clusterCtx.VSphereCluster.Spec.Server = server + clusterCtx.VSphereCluster.Spec.FailureDomainSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}} r := clusterReconciler{ControllerContext: controllerCtx} - reconciled, err := r.reconcileDeploymentZones(ctx) + reconciled, err := r.reconcileDeploymentZones(ctx, clusterCtx) g.Expect(err).NotTo(HaveOccurred()) g.Expect(reconciled).To(Equal(tt.reconciled)) - tt.assert(ctx.VSphereCluster) + tt.assert(clusterCtx.VSphereCluster) }) } }) @@ -802,14 +802,14 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { assertNumberOfZones := func(selector *metav1.LabelSelector, selectedZones int) { controllerCtx := fake.NewControllerContext(fake.NewControllerManagerContext(zoneOne, zoneTwo, zoneThree)) - ctx := fake.NewClusterContext(controllerCtx) - ctx.VSphereCluster.Spec.Server = server - ctx.VSphereCluster.Spec.FailureDomainSelector = selector + clusterCtx := fake.NewClusterContext(controllerCtx) + clusterCtx.VSphereCluster.Spec.Server = server + clusterCtx.VSphereCluster.Spec.FailureDomainSelector = selector r := clusterReconciler{ControllerContext: controllerCtx} - _, err := r.reconcileDeploymentZones(ctx) + _, err := r.reconcileDeploymentZones(ctx, clusterCtx) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(ctx.VSphereCluster.Status.FailureDomains).To(HaveLen(selectedZones)) + g.Expect(clusterCtx.VSphereCluster.Status.FailureDomains).To(HaveLen(selectedZones)) } t.Run("with no zones matching labels", func(_ *testing.T) { diff --git a/main.go b/main.go index 660653d4ae..91fe11cb52 100644 --- a/main.go +++ b/main.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "context" "errors" "flag" "fmt" @@ -43,7 +44,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" - ctrlsig "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/webhook" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -256,7 +256,7 @@ func main() { managerOpts.RetryPeriod = &leaderElectionRetryPeriod // Create a function that adds all the controllers and webhooks to the manager. - addToManager := func(controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager) error { + addToManager := func(ctx context.Context, controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager) error { tracker, err := setupRemoteClusterCacheTracker(controllerCtx, mgr) if err != nil { return perrors.Wrapf(err, "unable to create remote cluster cache tracker") @@ -269,7 +269,7 @@ func main() { return err } if isLoaded { - if err := setupVAPIControllers(controllerCtx, mgr, tracker); err != nil { + if err := setupVAPIControllers(ctx, controllerCtx, mgr, tracker); err != nil { return fmt.Errorf("setupVAPIControllers: %w", err) } } else { @@ -283,7 +283,7 @@ func main() { return err } if isLoaded { - if err := setupSupervisorControllers(controllerCtx, mgr, tracker); err != nil { + if err := setupSupervisorControllers(ctx, controllerCtx, mgr, tracker); err != nil { return fmt.Errorf("setupSupervisorControllers: %w", err) } } else { @@ -303,7 +303,11 @@ func main() { setupLog.Info("creating controller manager", "version", version.Get().String()) managerOpts.AddToManager = addToManager - mgr, err := manager.New(managerOpts) + + // Set up the context that's going to be used in controllers and for the manager. + ctx := ctrl.SetupSignalHandler() + + mgr, err := manager.New(ctx, managerOpts) if err != nil { setupLog.Error(err, "problem creating controller manager") os.Exit(1) @@ -311,9 +315,8 @@ func main() { setupChecks(mgr) - sigHandler := ctrlsig.SetupSignalHandler() setupLog.Info("starting controller manager") - if err := mgr.Start(sigHandler); err != nil { + if err := mgr.Start(ctx); err != nil { setupLog.Error(err, "problem running controller manager") os.Exit(1) } @@ -330,7 +333,7 @@ func main() { defer session.Clear() } -func setupVAPIControllers(controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager, tracker *remote.ClusterCacheTracker) error { +func setupVAPIControllers(ctx context.Context, controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager, tracker *remote.ClusterCacheTracker) error { if err := (&webhooks.VSphereClusterTemplateWebhook{}).SetupWebhookWithManager(mgr); err != nil { return err } @@ -355,7 +358,7 @@ func setupVAPIControllers(controllerCtx *capvcontext.ControllerManagerContext, m return err } - if err := controllers.AddClusterControllerToManager(controllerCtx, mgr, &infrav1.VSphereCluster{}, concurrency(vSphereClusterConcurrency)); err != nil { + if err := controllers.AddClusterControllerToManager(ctx, controllerCtx, mgr, &infrav1.VSphereCluster{}, concurrency(vSphereClusterConcurrency)); err != nil { return err } if err := controllers.AddMachineControllerToManager(controllerCtx, mgr, &infrav1.VSphereMachine{}, concurrency(vSphereMachineConcurrency)); err != nil { @@ -371,8 +374,8 @@ func setupVAPIControllers(controllerCtx *capvcontext.ControllerManagerContext, m return controllers.AddVSphereDeploymentZoneControllerToManager(controllerCtx, mgr, concurrency(vSphereDeploymentZoneConcurrency)) } -func setupSupervisorControllers(controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager, tracker *remote.ClusterCacheTracker) error { - if err := controllers.AddClusterControllerToManager(controllerCtx, mgr, &vmwarev1.VSphereCluster{}, concurrency(vSphereClusterConcurrency)); err != nil { +func setupSupervisorControllers(ctx context.Context, controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager, tracker *remote.ClusterCacheTracker) error { + if err := controllers.AddClusterControllerToManager(ctx, controllerCtx, mgr, &vmwarev1.VSphereCluster{}, concurrency(vSphereClusterConcurrency)); err != nil { return err } diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index f449b1f18b..0a674a4185 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -51,7 +51,7 @@ type Manager interface { } // New returns a new CAPV controller manager. -func New(opts Options) (Manager, error) { +func New(ctx context.Context, opts Options) (Manager, error) { // Ensure the default options are set. opts.defaults() @@ -101,7 +101,7 @@ func New(opts Options) (Manager, error) { } // Add the requested items to the manager. - if err := opts.AddToManager(controllerManagerContext, mgr); err != nil { + if err := opts.AddToManager(ctx, controllerManagerContext, mgr); err != nil { return nil, errors.Wrap(err, "failed to add resources to the manager") } @@ -124,7 +124,7 @@ func UpdateCredentials(opts *Options) { opts.readAndSetCredentials() } -// InitializeWatch adds a filesystem watcher for the capv credentials file +// InitializeWatch adds a filesystem watcher for the capv credentials file. // In case of any update to the credentials file, the new credentials are passed to the capv manager context. func InitializeWatch(controllerCtx *capvcontext.ControllerManagerContext, managerOpts *Options) (watch *fsnotify.Watcher, err error) { capvCredentialsFile := managerOpts.CredentialsFile diff --git a/pkg/manager/network.go b/pkg/manager/network.go index 99f817cbd0..829e2627af 100644 --- a/pkg/manager/network.go +++ b/pkg/manager/network.go @@ -30,20 +30,20 @@ const ( // GetNetworkProvider will return a network provider instance based on the environment // the cfg is used to initialize a client that talks directly to api-server without using the cache. -func GetNetworkProvider(ctx *context.ControllerManagerContext) (services.NetworkProvider, error) { - switch ctx.NetworkProvider { +func GetNetworkProvider(controllerCtx *context.ControllerManagerContext) (services.NetworkProvider, error) { + switch controllerCtx.NetworkProvider { case NSXNetworkProvider: // TODO: disableFirewall not configurable - ctx.Logger.Info("Pick NSX-T network provider") - return network.NsxtNetworkProvider(ctx.Client, "false"), nil + controllerCtx.Logger.Info("Pick NSX-T network provider") + return network.NsxtNetworkProvider(controllerCtx.Client, "false"), nil case VDSNetworkProvider: - ctx.Logger.Info("Pick NetOp (VDS) network provider") - return network.NetOpNetworkProvider(ctx.Client), nil + controllerCtx.Logger.Info("Pick NetOp (VDS) network provider") + return network.NetOpNetworkProvider(controllerCtx.Client), nil case DummyLBNetworkProvider: - ctx.Logger.Info("Pick Dummy network provider") + controllerCtx.Logger.Info("Pick Dummy network provider") return network.DummyLBNetworkProvider(), nil default: - ctx.Logger.Info("NetworkProvider not set. Pick Dummy network provider") + controllerCtx.Logger.Info("NetworkProvider not set. Pick Dummy network provider") return network.DummyNetworkProvider(), nil } } diff --git a/pkg/manager/options.go b/pkg/manager/options.go index 498b18224e..f44abdcf2b 100644 --- a/pkg/manager/options.go +++ b/pkg/manager/options.go @@ -17,6 +17,7 @@ limitations under the License. package manager import ( + "context" "os" "strings" "time" @@ -28,13 +29,13 @@ import ( ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/yaml" - "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" + capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" ) // AddToManagerFunc is a function that can be optionally specified with // the manager's Options in order to explicitly decide what controllers and // webhooks to add to the manager. -type AddToManagerFunc func(*context.ControllerManagerContext, ctrlmgr.Manager) error +type AddToManagerFunc func(context.Context, *capvcontext.ControllerManagerContext, ctrlmgr.Manager) error // Options describes the options used to create a new CAPV manager. type Options struct { diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index 52e30db003..a65c4617bf 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -112,7 +112,7 @@ type ( ) // NewTestEnvironment creates a new environment spinning up a local api-server. -func NewTestEnvironment() *TestEnvironment { +func NewTestEnvironment(ctx context.Context) *TestEnvironment { // initialize webhook here to be able to test the envtest install via webhookOptions initializeWebhookInEnvironment() @@ -141,7 +141,7 @@ func NewTestEnvironment() *TestEnvironment { Username: simr.Username(), Password: simr.Password(), } - managerOpts.AddToManager = func(controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager) error { + managerOpts.AddToManager = func(ctx context.Context, controllerCtx *capvcontext.ControllerManagerContext, mgr ctrlmgr.Manager) error { if err := (&webhooks.VSphereClusterTemplateWebhook{}).SetupWebhookWithManager(mgr); err != nil { return err } @@ -165,7 +165,7 @@ func NewTestEnvironment() *TestEnvironment { return (&webhooks.VSphereFailureDomainWebhook{}).SetupWebhookWithManager(mgr) } - mgr, err := manager.New(managerOpts) + mgr, err := manager.New(ctx, managerOpts) if err != nil { klog.Fatalf("failed to create the CAPV controller manager: %v", err) }