Skip to content

Commit

Permalink
Improve logging for VSphereCluster controllers
Browse files Browse the repository at this point in the history
Signed-off-by: Stefan Büringer [email protected]
  • Loading branch information
sbueringer committed Sep 12, 2023
1 parent 62877a6 commit 928f5a5
Show file tree
Hide file tree
Showing 24 changed files with 248 additions and 245 deletions.
40 changes: 24 additions & 16 deletions controllers/clustermodule_reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,12 @@ import (

"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/event"
Expand All @@ -47,24 +49,26 @@ import (
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=vspheremachinetemplates,verbs=get;list;watch

type Reconciler struct {
*capvcontext.ControllerContext
*capvcontext.ControllerManagerContext

ClusterModuleService clustermodule.Service
}

func NewReconciler(controllerCtx *capvcontext.ControllerContext) Reconciler {
func NewReconciler(controllerCtx *capvcontext.ControllerManagerContext) Reconciler {
return Reconciler{
ControllerContext: controllerCtx,
ClusterModuleService: clustermodule.NewService(),
ControllerManagerContext: controllerCtx,
ClusterModuleService: clustermodule.NewService(),
}
}

func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) {
clusterCtx.Logger.Info("reconcile anti affinity setup")
func (r Reconciler) Reconcile(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) {
log := ctrl.LoggerFrom(ctx)

log.Info("Reconcile anti affinity setup")
if !clustermodule.IsClusterCompatible(clusterCtx) {
conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.VCenterVersionIncompatibleReason, clusterv1.ConditionSeverityInfo,
"vCenter API version %s is not compatible with cluster modules", clusterCtx.VSphereCluster.Status.VCenterVersion)
clusterCtx.Logger.Info("cluster is not compatible for anti affinity",
log.Info("Cluster is not compatible for anti affinity",

Check warning on line 71 in controllers/clustermodule_reconciler.go

View check run for this annotation

Codecov / codecov/patch

controllers/clustermodule_reconciler.go#L71

Added line #L71 was not covered by tests
"api version", clusterCtx.VSphereCluster.Status.VCenterVersion)
return reconcile.Result{}, nil
}
Expand All @@ -86,7 +90,7 @@ func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile
// delete the cluster module as the object is marked for deletion
// or already deleted.
if err := r.ClusterModuleService.Remove(clusterCtx, mod.ModuleUUID); err != nil {
clusterCtx.Logger.Error(err, "failed to delete cluster module for object",
log.Error(err, "failed to delete cluster module for object",

Check warning on line 93 in controllers/clustermodule_reconciler.go

View check run for this annotation

Codecov / codecov/patch

controllers/clustermodule_reconciler.go#L93

Added line #L93 was not covered by tests
"name", mod.TargetObjectName, "moduleUUID", mod.ModuleUUID)
}
delete(objectMap, curr)
Expand All @@ -96,7 +100,7 @@ func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile
if err != nil {
// Add the error to modErrs so it gets handled below.
modErrs = append(modErrs, clusterModError{obj.GetName(), errors.Wrapf(err, "failed to verify cluster module %q", mod.ModuleUUID)})
clusterCtx.Logger.Error(err, "failed to verify cluster module for object",
log.Error(err, "failed to verify cluster module for object",
"name", mod.TargetObjectName, "moduleUUID", mod.ModuleUUID)
// Append the module and remove it from objectMap to not create new ones instead.
clusterModuleSpecs = append(clusterModuleSpecs, infrav1.ClusterModule{
Expand All @@ -119,7 +123,7 @@ func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile
})
delete(objectMap, curr)
} else {
clusterCtx.Logger.Info("module for object not found",
log.Info("module for object not found",
"moduleUUID", mod.ModuleUUID,
"object", mod.TargetObjectName)
}
Expand All @@ -129,7 +133,7 @@ func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile
for _, obj := range objectMap {
moduleUUID, err := r.ClusterModuleService.Create(clusterCtx, obj)
if err != nil {
clusterCtx.Logger.Error(err, "failed to create cluster module for target object", "name", obj.GetName())
log.Error(err, "failed to create cluster module for target object", "name", obj.GetName())
modErrs = append(modErrs, clusterModError{obj.GetName(), err})
continue
}
Expand Down Expand Up @@ -166,28 +170,32 @@ func (r Reconciler) Reconcile(clusterCtx *capvcontext.ClusterContext) (reconcile
}

func (r Reconciler) toAffinityInput(ctx context.Context, obj client.Object) []reconcile.Request {
log := ctrl.LoggerFrom(ctx)

Check warning on line 174 in controllers/clustermodule_reconciler.go

View check run for this annotation

Codecov / codecov/patch

controllers/clustermodule_reconciler.go#L173-L174

Added lines #L173 - L174 were not covered by tests
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, metav1.ObjectMeta{
Namespace: obj.GetNamespace(),
Labels: obj.GetLabels(),
OwnerReferences: obj.GetOwnerReferences(),
})
if err != nil {
r.Logger.Error(err, "failed to get owner cluster")
log.Error(err, "failed to get owner cluster")

Check warning on line 181 in controllers/clustermodule_reconciler.go

View check run for this annotation

Codecov / codecov/patch

controllers/clustermodule_reconciler.go#L181

Added line #L181 was not covered by tests
return nil
}
log.WithValues("Cluster", klog.KObj(cluster))

Check warning on line 184 in controllers/clustermodule_reconciler.go

View check run for this annotation

Codecov / codecov/patch

controllers/clustermodule_reconciler.go#L184

Added line #L184 was not covered by tests

if cluster.Spec.InfrastructureRef == nil {
r.Logger.Error(err, "cluster infrastructureRef not set. Requeing",
"namespace", cluster.Namespace, "cluster_name", cluster.Name)
log.Error(err, "cluster infrastructureRef not set. Requeing")

Check warning on line 187 in controllers/clustermodule_reconciler.go

View check run for this annotation

Codecov / codecov/patch

controllers/clustermodule_reconciler.go#L187

Added line #L187 was not covered by tests
return nil
}

log.WithValues("VSphereCluster", klog.KRef(cluster.Namespace, cluster.Spec.InfrastructureRef.Name))

Check warning on line 192 in controllers/clustermodule_reconciler.go

View check run for this annotation

Codecov / codecov/patch

controllers/clustermodule_reconciler.go#L191-L192

Added lines #L191 - L192 were not covered by tests
vsphereCluster := &infrav1.VSphereCluster{}
if err := r.Client.Get(ctx, client.ObjectKey{
Name: cluster.Spec.InfrastructureRef.Name,
Namespace: cluster.Namespace,
}, vsphereCluster); err != nil {
r.Logger.Error(err, "failed to get vSphereCluster object",
"namespace", cluster.Namespace, "name", cluster.Spec.InfrastructureRef.Name)
log.Error(err, "failed to get vSphereCluster object")

Check warning on line 198 in controllers/clustermodule_reconciler.go

View check run for this annotation

Codecov / codecov/patch

controllers/clustermodule_reconciler.go#L198

Added line #L198 was not covered by tests
return nil
}

Expand Down
26 changes: 13 additions & 13 deletions controllers/clustermodule_reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -409,27 +409,27 @@ func TestReconciler_Reconcile(t *testing.T) {
if tt.beforeFn != nil {
tt.beforeFn(md)
}
controllerCtx := fake.NewControllerContext(fake.NewControllerManagerContext(kcp, md))
ctx := fake.NewClusterContext(controllerCtx)
ctx.VSphereCluster.Spec.ClusterModules = tt.clusterModules
ctx.VSphereCluster.Status = infrav1.VSphereClusterStatus{VCenterVersion: infrav1.NewVCenterVersion("7.0.0")}
controllerCtx := fake.NewControllerManagerContext(kcp, md)
clusterCtx := fake.NewClusterContext(controllerCtx)
clusterCtx.VSphereCluster.Spec.ClusterModules = tt.clusterModules
clusterCtx.VSphereCluster.Status = infrav1.VSphereClusterStatus{VCenterVersion: infrav1.NewVCenterVersion("7.0.0")}

svc := new(cmodfake.CMService)
if tt.setupMocks != nil {
tt.setupMocks(svc)
}

r := Reconciler{
ControllerContext: controllerCtx,
ClusterModuleService: svc,
ControllerManagerContext: controllerCtx,
ClusterModuleService: svc,
}
_, err := r.Reconcile(ctx)
_, err := r.Reconcile(ctx, clusterCtx)
if tt.haveError {
g.Expect(err).To(gomega.HaveOccurred())
} else {
g.Expect(err).ToNot(gomega.HaveOccurred())
}
tt.customAssert(g, ctx)
tt.customAssert(g, clusterCtx)

svc.AssertExpectations(t)
})
Expand Down Expand Up @@ -485,9 +485,9 @@ func TestReconciler_fetchMachineOwnerObjects(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := gomega.NewWithT(t)
controllerCtx := fake.NewControllerContext(fake.NewControllerManagerContext(tt.initObjs...))
controllerCtx := fake.NewControllerManagerContext(tt.initObjs...)
ctx := fake.NewClusterContext(controllerCtx)
r := Reconciler{ControllerContext: controllerCtx}
r := Reconciler{ControllerManagerContext: controllerCtx}
objMap, err := r.fetchMachineOwnerObjects(ctx)
if tt.hasError {
g.Expect(err).To(gomega.HaveOccurred())
Expand All @@ -506,13 +506,13 @@ func TestReconciler_fetchMachineOwnerObjects(t *testing.T) {
mdToBeDeleted := machineDeployment("foo-1", metav1.NamespaceDefault, fake.Clusterv1a2Name)
mdToBeDeleted.DeletionTimestamp = &currTime
mdToBeDeleted.ObjectMeta.Finalizers = append(mdToBeDeleted.ObjectMeta.Finalizers, "keep-this-for-the-test")
controllerCtx := fake.NewControllerContext(fake.NewControllerManagerContext(
controllerCtx := fake.NewControllerManagerContext(
controlPlane("foo", metav1.NamespaceDefault, fake.Clusterv1a2Name),
machineDeployment("foo", metav1.NamespaceDefault, fake.Clusterv1a2Name),
mdToBeDeleted,
))
)
ctx := fake.NewClusterContext(controllerCtx)
objMap, err := Reconciler{ControllerContext: controllerCtx}.fetchMachineOwnerObjects(ctx)
objMap, err := Reconciler{ControllerManagerContext: controllerCtx}.fetchMachineOwnerObjects(ctx)
g.Expect(err).NotTo(gomega.HaveOccurred())
g.Expect(objMap).To(gomega.HaveLen(2))
})
Expand Down
7 changes: 3 additions & 4 deletions controllers/serviceaccount_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,10 +164,9 @@ func (r ServiceAccountReconciler) Reconcile(_ context.Context, req reconcile.Req

// Create the cluster context for this request.
clusterContext := &vmwarecontext.ClusterContext{
ControllerContext: r.ControllerContext,
VSphereCluster: vsphereCluster,
Logger: r.Logger.WithName(req.Namespace).WithName(req.Name),
PatchHelper: patchHelper,
ControllerManagerContext: r.ControllerManagerContext,
VSphereCluster: vsphereCluster,
PatchHelper: patchHelper,
}

// Always issue a patch when exiting this function so changes to the
Expand Down
7 changes: 3 additions & 4 deletions controllers/servicediscovery_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -158,10 +158,9 @@ func (r serviceDiscoveryReconciler) Reconcile(_ context.Context, req reconcile.R

// Create the cluster context for this request.
clusterContext := &vmwarecontext.ClusterContext{
ControllerContext: r.ControllerContext,
VSphereCluster: vsphereCluster,
Logger: logger,
PatchHelper: patchHelper,
ControllerManagerContext: r.ControllerManagerContext,
VSphereCluster: vsphereCluster,
PatchHelper: patchHelper,
}

// Always issue a patch when exiting this function so changes to the
Expand Down
Loading

0 comments on commit 928f5a5

Please sign in to comment.