From 0a51d7c63c32fef2345306c8d96b59ee9120390b Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Tue, 8 Aug 2023 15:14:00 +0200 Subject: [PATCH] clustermodules: prevent creation of new modules if DoesExist returns an error --- controllers/clustermodule_reconciler.go | 14 +- controllers/clustermodule_reconciler_test.go | 148 +++++++++++++++++++ 2 files changed, 161 insertions(+), 1 deletion(-) diff --git a/controllers/clustermodule_reconciler.go b/controllers/clustermodule_reconciler.go index afe6d5d013..f623224fbf 100644 --- a/controllers/clustermodule_reconciler.go +++ b/controllers/clustermodule_reconciler.go @@ -74,6 +74,8 @@ func (r Reconciler) Reconcile(ctx *context.ClusterContext) (reconcile.Result, er return reconcile.Result{}, err } + modErrs := []clusterModError{} + clusterModuleSpecs := []infrav1.ClusterModule{} for _, mod := range ctx.VSphereCluster.Spec.ClusterModules { curr := mod.TargetObjectName @@ -92,9 +94,20 @@ func (r Reconciler) Reconcile(ctx *context.ClusterContext) (reconcile.Result, er // verify the cluster module exists, err := r.ClusterModuleService.DoesExist(ctx, obj, mod.ModuleUUID) if err != nil { + // Add the error to modErrs so it gets handled below. + modErrs = append(modErrs, clusterModError{obj.GetName(), errors.Wrapf(err, "failed to verify cluster module %q", mod.ModuleUUID)}) ctx.Logger.Error(err, "failed to verify cluster module for object", "name", mod.TargetObjectName, "moduleUUID", mod.ModuleUUID) + // Append the module and remove it from objectMap to not create new ones instead. + clusterModuleSpecs = append(clusterModuleSpecs, infrav1.ClusterModule{ + ControlPlane: obj.IsControlPlane(), + TargetObjectName: obj.GetName(), + ModuleUUID: mod.ModuleUUID, + }) + delete(objectMap, curr) + continue } + // append the module and object info to the VSphereCluster object // and remove it from the object map since no new cluster module // needs to be created. @@ -113,7 +126,6 @@ func (r Reconciler) Reconcile(ctx *context.ClusterContext) (reconcile.Result, er } } - modErrs := []clusterModError{} for _, obj := range objectMap { moduleUUID, err := r.ClusterModuleService.Create(ctx, obj) if err != nil { diff --git a/controllers/clustermodule_reconciler_test.go b/controllers/clustermodule_reconciler_test.go index e77c39bfa0..b83b5f62eb 100644 --- a/controllers/clustermodule_reconciler_test.go +++ b/controllers/clustermodule_reconciler_test.go @@ -41,6 +41,7 @@ func TestReconciler_Reconcile(t *testing.T) { kcpUUID, mdUUID := uuid.New().String(), uuid.New().String() kcp := controlPlane("kcp", metav1.NamespaceDefault, fake.Clusterv1a2Name) md := machineDeployment("md", metav1.NamespaceDefault, fake.Clusterv1a2Name) + vCenter500err := errors.New("500 Internal Server Error") tests := []struct { name string @@ -92,6 +93,153 @@ func TestReconciler_Reconcile(t *testing.T) { g.Expect(moduleUUIDs).To(gomega.ConsistOf(kcpUUID, mdUUID)) }, }, + { + name: "when one cluster modules exist", + clusterModules: []infrav1.ClusterModule{ + { + ControlPlane: true, + TargetObjectName: "kcp", + ModuleUUID: kcpUUID, + }, + }, + setupMocks: func(svc *cmodfake.CMService) { + svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) + svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID, nil) + }, + customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { + g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + var ( + names, moduleUUIDs []string + ) + for _, mod := range ctx.VSphereCluster.Spec.ClusterModules { + names = append(names, mod.TargetObjectName) + moduleUUIDs = append(moduleUUIDs, mod.ModuleUUID) + } + g.Expect(names).To(gomega.ConsistOf("kcp", "md")) + g.Expect(moduleUUIDs).To(gomega.ConsistOf(kcpUUID, mdUUID)) + }, + }, + { + name: "when cluster modules do not exist anymore", + clusterModules: []infrav1.ClusterModule{ + { + ControlPlane: true, + TargetObjectName: "kcp", + ModuleUUID: kcpUUID, + }, + { + ControlPlane: false, + TargetObjectName: "md", + ModuleUUID: mdUUID, + }, + }, + setupMocks: func(svc *cmodfake.CMService) { + svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(false, nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(false, nil) + svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID+"a", nil) + svc.On("Create", mock.Anything, clustermodule.NewWrapper(md)).Return(mdUUID+"a", nil) + }, + customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { + g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + // Ensure the new modules exist. + g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID+"a")) + g.Expect(ctx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID+"a")) + // Check that condition got set. + g.Expect(conditions.Has(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.IsTrue(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + }, + }, + { + name: "when cluster modules already exist but vCenter returns an error", + clusterModules: []infrav1.ClusterModule{ + { + ControlPlane: true, + TargetObjectName: "kcp", + ModuleUUID: kcpUUID, + }, + { + ControlPlane: false, + TargetObjectName: "md", + ModuleUUID: mdUUID, + }, + }, + haveError: true, + setupMocks: func(svc *cmodfake.CMService) { + svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(false, vCenter500err) + svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) + }, + customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { + g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + // Ensure the old modules still exist. + g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) + g.Expect(ctx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) + // Check that condition got set. + g.Expect(conditions.Has(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.IsFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.Get(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) + }, + }, + { + name: "when one cluster modules exists and for the other we get an error", + clusterModules: []infrav1.ClusterModule{ + { + ControlPlane: true, + TargetObjectName: "kcp", + ModuleUUID: kcpUUID, + }, + { + ControlPlane: false, + TargetObjectName: "md", + ModuleUUID: mdUUID, + }, + }, + haveError: true, + setupMocks: func(svc *cmodfake.CMService) { + svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(true, nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) + }, + customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { + g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + // Ensure the old modules still exist. + g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) + g.Expect(ctx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) + // Check that condition got set. + g.Expect(conditions.Has(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.IsFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.Get(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) + }, + }, + { + name: "when one cluster modules does not exist and for the other we get an error", + clusterModules: []infrav1.ClusterModule{ + { + ControlPlane: true, + TargetObjectName: "kcp", + ModuleUUID: kcpUUID, + }, + { + ControlPlane: false, + TargetObjectName: "md", + ModuleUUID: mdUUID, + }, + }, + haveError: true, + setupMocks: func(svc *cmodfake.CMService) { + svc.On("DoesExist", mock.Anything, mock.Anything, kcpUUID).Return(false, nil) + svc.On("DoesExist", mock.Anything, mock.Anything, mdUUID).Return(false, vCenter500err) + svc.On("Create", mock.Anything, clustermodule.NewWrapper(kcp)).Return(kcpUUID+"a", nil) + }, + customAssert: func(g *gomega.WithT, ctx *context.ClusterContext) { + g.Expect(ctx.VSphereCluster.Spec.ClusterModules).To(gomega.HaveLen(2)) + // Ensure the errored and the new module exist. + g.Expect(ctx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID)) + g.Expect(ctx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID)) + // Check that condition got set. + g.Expect(conditions.Has(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.IsFalse(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(conditions.Get(ctx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) + }, + }, { name: "when cluster module creation is called for a resource pool owned by non compute cluster resource", clusterModules: []infrav1.ClusterModule{},