diff --git a/api/v1beta1/machinedeployment_types.go b/api/v1beta1/machinedeployment_types.go index 1e4b1c6b0ded..bda8e7d92e15 100644 --- a/api/v1beta1/machinedeployment_types.go +++ b/api/v1beta1/machinedeployment_types.go @@ -25,6 +25,10 @@ const ( // MachineDeploymentTopologyFinalizer is the finalizer used by the topology MachineDeployment controller to // clean up referenced template resources if necessary when a MachineDeployment is being deleted. MachineDeploymentTopologyFinalizer = "machinedeployment.topology.cluster.x-k8s.io" + + // MachineDeploymentFinalizer is the finalizer used by the MachineDeployment controller to + // ensure ordered cleanup of corresponding MachineSets when a MachineDeployment is being deleted. + MachineDeploymentFinalizer = "cluster.x-k8s.io/machinedeployment" ) // MachineDeploymentStrategyType defines the type of MachineDeployment rollout strategies. diff --git a/api/v1beta1/machineset_types.go b/api/v1beta1/machineset_types.go index f10e44f4f28d..f0899d48e73d 100644 --- a/api/v1beta1/machineset_types.go +++ b/api/v1beta1/machineset_types.go @@ -29,6 +29,10 @@ const ( // MachineSetTopologyFinalizer is the finalizer used by the topology MachineDeployment controller to // clean up referenced template resources if necessary when a MachineSet is being deleted. MachineSetTopologyFinalizer = "machineset.topology.cluster.x-k8s.io" + + // MachineSetFinalizer is the finalizer used by the MachineSet controller to + // ensure ordered cleanup of corresponding Machines when a Machineset is being deleted. + MachineSetFinalizer = "cluster.x-k8s.io/machineset" ) // ANCHOR: MachineSetSpec diff --git a/internal/controllers/machinedeployment/machinedeployment_controller.go b/internal/controllers/machinedeployment/machinedeployment_controller.go index f9b55db02c4c..fe9e663b442f 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller.go @@ -33,6 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -42,6 +43,7 @@ import ( "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" utilconversion "sigs.k8s.io/cluster-api/util/conversion" + clog "sigs.k8s.io/cluster-api/util/log" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -158,9 +160,15 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re } }() - // Ignore deleted MachineDeployments, this can happen when foregroundDeletion - // is enabled + // Handle deletion reconciliation loop. if !deployment.DeletionTimestamp.IsZero() { + return ctrl.Result{}, r.reconcileDelete(ctx, deployment) + } + + // Add finalizer first if not set to avoid the race condition between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp is not set. + if !controllerutil.ContainsFinalizer(deployment, clusterv1.MachineDeploymentFinalizer) { + controllerutil.AddFinalizer(deployment, clusterv1.MachineDeploymentFinalizer) return ctrl.Result{}, nil } @@ -225,7 +233,7 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, } } - msList, err := r.getMachineSetsForDeployment(ctx, md) + msList, err := r.getAndAdoptMachineSetsForDeployment(ctx, md) if err != nil { return err } @@ -286,8 +294,36 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, return errors.Errorf("unexpected deployment strategy type: %s", md.Spec.Strategy.Type) } -// getMachineSetsForDeployment returns a list of MachineSets associated with a MachineDeployment. -func (r *Reconciler) getMachineSetsForDeployment(ctx context.Context, md *clusterv1.MachineDeployment) ([]*clusterv1.MachineSet, error) { +func (r *Reconciler) reconcileDelete(ctx context.Context, md *clusterv1.MachineDeployment) error { + log := ctrl.LoggerFrom(ctx) + msList, err := r.getAndAdoptMachineSetsForDeployment(ctx, md) + if err != nil { + return err + } + + // If all the descendant machinesets are deleted, then remove the machinedeployment's finalizer. + if len(msList) == 0 { + controllerutil.RemoveFinalizer(md, clusterv1.MachineDeploymentFinalizer) + return nil + } + + log.Info("Waiting for MachineSets to be deleted", "MachineSets", clog.ObjNamesString(msList)) + + // else delete owned machinesets. + for _, ms := range msList { + if ms.DeletionTimestamp.IsZero() { + log.Info("Deleting MachineSet", "MachineSet", klog.KObj(ms)) + if err := r.Client.Delete(ctx, ms); err != nil && !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to delete MachineSet %s", klog.KObj(ms)) + } + } + } + + return nil +} + +// getAndAdoptMachineSetsForDeployment returns a list of MachineSets associated with a MachineDeployment. +func (r *Reconciler) getAndAdoptMachineSetsForDeployment(ctx context.Context, md *clusterv1.MachineDeployment) ([]*clusterv1.MachineSet, error) { log := ctrl.LoggerFrom(ctx) // List all MachineSets to find those we own but that no longer match our selector. @@ -299,7 +335,8 @@ func (r *Reconciler) getMachineSetsForDeployment(ctx context.Context, md *cluste filtered := make([]*clusterv1.MachineSet, 0, len(machineSets.Items)) for idx := range machineSets.Items { ms := &machineSets.Items[idx] - log.WithValues("MachineSet", klog.KObj(ms)) + log := log.WithValues("MachineSet", klog.KObj(ms)) + ctx := ctrl.LoggerInto(ctx, log) selector, err := metav1.LabelSelectorAsSelector(&md.Spec.Selector) if err != nil { log.Error(err, "Skipping MachineSet, failed to get label selector from spec selector") diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index a82768f2abee..51e14f39d82c 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -34,6 +34,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" + "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" @@ -962,7 +963,7 @@ func TestGetMachineSetsForDeployment(t *testing.T) { recorder: record.NewFakeRecorder(32), } - got, err := r.getMachineSetsForDeployment(ctx, &tc.machineDeployment) + got, err := r.getAndAdoptMachineSetsForDeployment(ctx, &tc.machineDeployment) g.Expect(err).ToNot(HaveOccurred()) g.Expect(got).To(HaveLen(len(tc.expected))) @@ -993,3 +994,90 @@ func updateMachineDeployment(ctx context.Context, c client.Client, md *clusterv1 return patchHelper.Patch(ctx, md) }) } + +func TestReconciler_reconcileDelete(t *testing.T) { + labels := map[string]string{ + "some": "labelselector", + } + md := builder.MachineDeployment("default", "md0").WithClusterName("test").Build() + md.Finalizers = []string{ + clusterv1.MachineDeploymentFinalizer, + } + md.DeletionTimestamp = ptr.To(metav1.Now()) + md.Spec.Selector = metav1.LabelSelector{ + MatchLabels: labels, + } + mdWithoutFinalizer := md.DeepCopy() + mdWithoutFinalizer.Finalizers = []string{} + tests := []struct { + name string + machineDeployment *clusterv1.MachineDeployment + want *clusterv1.MachineDeployment + objs []client.Object + wantMachineSets []clusterv1.MachineSet + expectError bool + }{ + { + name: "Should do nothing when no descendant MachineSets exist and finalizer is already gone", + machineDeployment: mdWithoutFinalizer.DeepCopy(), + want: mdWithoutFinalizer.DeepCopy(), + objs: nil, + wantMachineSets: nil, + expectError: false, + }, + { + name: "Should remove finalizer when no descendant MachineSets exist", + machineDeployment: md.DeepCopy(), + want: mdWithoutFinalizer.DeepCopy(), + objs: nil, + wantMachineSets: nil, + expectError: false, + }, + { + name: "Should keep finalizer when descendant MachineSets exist and trigger deletion only for descendant MachineSets", + machineDeployment: md.DeepCopy(), + want: md.DeepCopy(), + objs: []client.Object{ + builder.MachineSet("default", "ms0").WithClusterName("test").WithLabels(labels).Build(), + builder.MachineSet("default", "ms1").WithClusterName("test").WithLabels(labels).Build(), + builder.MachineSet("default", "ms2-not-part-of-md").WithClusterName("test").Build(), + builder.MachineSet("default", "ms3-not-part-of-md").WithClusterName("test").Build(), + }, + wantMachineSets: []clusterv1.MachineSet{ + *builder.MachineSet("default", "ms2-not-part-of-md").WithClusterName("test").Build(), + *builder.MachineSet("default", "ms3-not-part-of-md").WithClusterName("test").Build(), + }, + expectError: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + c := fake.NewClientBuilder().WithObjects(tt.objs...).Build() + r := &Reconciler{ + Client: c, + recorder: record.NewFakeRecorder(32), + } + + err := r.reconcileDelete(ctx, tt.machineDeployment) + if tt.expectError { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + g.Expect(tt.machineDeployment).To(BeComparableTo(tt.want)) + + machineSetList := &clusterv1.MachineSetList{} + g.Expect(c.List(ctx, machineSetList, client.InNamespace("default"))).ToNot(HaveOccurred()) + + // Remove ResourceVersion so we can actually compare. + for i := range machineSetList.Items { + machineSetList.Items[i].ResourceVersion = "" + } + + g.Expect(machineSetList.Items).To(ConsistOf(tt.wantMachineSets)) + }) + } +} diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 360616e58412..27b9682adf9e 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -41,6 +41,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -182,9 +183,15 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re } }() - // Ignore deleted MachineSets, this can happen when foregroundDeletion - // is enabled + // Handle deletion reconciliation loop. if !machineSet.DeletionTimestamp.IsZero() { + return ctrl.Result{}, r.reconcileDelete(ctx, machineSet) + } + + // Add finalizer first if not set to avoid the race condition between init and delete. + // Note: Finalizers in general can only be added when the deletionTimestamp is not set. + if !controllerutil.ContainsFinalizer(machineSet, clusterv1.MachineSetFinalizer) { + controllerutil.AddFinalizer(machineSet, clusterv1.MachineSetFinalizer) return ctrl.Result{}, nil } @@ -224,8 +231,6 @@ func patchMachineSet(ctx context.Context, patchHelper *patch.Helper, machineSet } func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, machineSet *clusterv1.MachineSet) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx) - // Reconcile and retrieve the Cluster object. if machineSet.Labels == nil { machineSet.Labels = make(map[string]string) @@ -266,63 +271,28 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, machineSet.Spec.Selector.MatchLabels[clusterv1.ClusterNameLabel] = machineSet.Spec.ClusterName machineSet.Spec.Template.Labels[clusterv1.ClusterNameLabel] = machineSet.Spec.ClusterName - selectorMap, err := metav1.LabelSelectorAsMap(&machineSet.Spec.Selector) - if err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to convert MachineSet %q label selector to a map", machineSet.Name) - } - - // Get all Machines linked to this MachineSet. - allMachines := &clusterv1.MachineList{} - err = r.Client.List(ctx, - allMachines, - client.InNamespace(machineSet.Namespace), - client.MatchingLabels(selectorMap), - ) + machines, err := r.getAndAdoptMachinesForMachineSet(ctx, machineSet) if err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to list machines") - } - - // Filter out irrelevant machines (i.e. IsControlledBy something else) and claim orphaned machines. - // Machines in deleted state are deliberately not excluded https://github.com/kubernetes-sigs/cluster-api/pull/3434. - filteredMachines := make([]*clusterv1.Machine, 0, len(allMachines.Items)) - for idx := range allMachines.Items { - machine := &allMachines.Items[idx] - log := log.WithValues("Machine", klog.KObj(machine)) - if shouldExcludeMachine(machineSet, machine) { - continue - } - - // Attempt to adopt machine if it meets previous conditions and it has no controller references. - if metav1.GetControllerOf(machine) == nil { - if err := r.adoptOrphan(ctx, machineSet, machine); err != nil { - log.Error(err, "Failed to adopt Machine") - r.recorder.Eventf(machineSet, corev1.EventTypeWarning, "FailedAdopt", "Failed to adopt Machine %q: %v", machine.Name, err) - continue - } - log.Info("Adopted Machine") - r.recorder.Eventf(machineSet, corev1.EventTypeNormal, "SuccessfulAdopt", "Adopted Machine %q", machine.Name) - } - - filteredMachines = append(filteredMachines, machine) + return ctrl.Result{}, errors.Wrap(err, "failed to list Machines") } result := ctrl.Result{} - reconcileUnhealthyMachinesResult, err := r.reconcileUnhealthyMachines(ctx, cluster, machineSet, filteredMachines) + reconcileUnhealthyMachinesResult, err := r.reconcileUnhealthyMachines(ctx, cluster, machineSet, machines) if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to reconcile unhealthy machines") } result = util.LowestNonZeroResult(result, reconcileUnhealthyMachinesResult) - if err := r.syncMachines(ctx, machineSet, filteredMachines); err != nil { + if err := r.syncMachines(ctx, machineSet, machines); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to update Machines") } - syncReplicasResult, syncErr := r.syncReplicas(ctx, cluster, machineSet, filteredMachines) + syncReplicasResult, syncErr := r.syncReplicas(ctx, cluster, machineSet, machines) result = util.LowestNonZeroResult(result, syncReplicasResult) // Always updates status as machines come up or die. - if err := r.updateStatus(ctx, cluster, machineSet, filteredMachines); err != nil { + if err := r.updateStatus(ctx, cluster, machineSet, machines); err != nil { return ctrl.Result{}, errors.Wrapf(kerrors.NewAggregate([]error{err, syncErr}), "failed to update MachineSet's Status") } @@ -359,6 +329,80 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, return result, nil } +func (r *Reconciler) reconcileDelete(ctx context.Context, machineSet *clusterv1.MachineSet) error { + log := ctrl.LoggerFrom(ctx) + machineList, err := r.getAndAdoptMachinesForMachineSet(ctx, machineSet) + if err != nil { + return err + } + + // If all the descendant machines are deleted, then remove the machineset's finalizer. + if len(machineList) == 0 { + controllerutil.RemoveFinalizer(machineSet, clusterv1.MachineSetFinalizer) + return nil + } + + log.Info("Waiting for Machines to be deleted", "Machines", clog.ObjNamesString(machineList)) + + // else delete owned machines. + for _, machine := range machineList { + if machine.DeletionTimestamp.IsZero() { + log.Info("Deleting Machine", "Machine", klog.KObj(machine)) + if err := r.Client.Delete(ctx, machine); err != nil && !apierrors.IsNotFound(err) { + return errors.Wrapf(err, "failed to delete Machine %s", klog.KObj(machine)) + } + } + } + + return nil +} + +func (r *Reconciler) getAndAdoptMachinesForMachineSet(ctx context.Context, machineSet *clusterv1.MachineSet) ([]*clusterv1.Machine, error) { + log := ctrl.LoggerFrom(ctx) + selectorMap, err := metav1.LabelSelectorAsMap(&machineSet.Spec.Selector) + if err != nil { + return nil, errors.Wrapf(err, "failed to convert MachineSet %q label selector to a map", machineSet.Name) + } + + // Get all Machines linked to this MachineSet. + allMachines := &clusterv1.MachineList{} + err = r.Client.List(ctx, + allMachines, + client.InNamespace(machineSet.Namespace), + client.MatchingLabels(selectorMap), + ) + if err != nil { + return nil, errors.Wrap(err, "failed to list machines") + } + + // Filter out irrelevant machines (i.e. IsControlledBy something else) and claim orphaned machines. + // Machines in deleted state are deliberately not excluded https://github.com/kubernetes-sigs/cluster-api/pull/3434. + filteredMachines := make([]*clusterv1.Machine, 0, len(allMachines.Items)) + for idx := range allMachines.Items { + machine := &allMachines.Items[idx] + log := log.WithValues("Machine", klog.KObj(machine)) + ctx := ctrl.LoggerInto(ctx, log) + if shouldExcludeMachine(machineSet, machine) { + continue + } + + // Attempt to adopt machine if it meets previous conditions and it has no controller references. + if metav1.GetControllerOf(machine) == nil { + if err := r.adoptOrphan(ctx, machineSet, machine); err != nil { + log.Error(err, "Failed to adopt Machine") + r.recorder.Eventf(machineSet, corev1.EventTypeWarning, "FailedAdopt", "Failed to adopt Machine %q: %v", machine.Name, err) + continue + } + log.Info("Adopted Machine") + r.recorder.Eventf(machineSet, corev1.EventTypeNormal, "SuccessfulAdopt", "Adopted Machine %q", machine.Name) + } + + filteredMachines = append(filteredMachines, machine) + } + + return filteredMachines, nil +} + // syncMachines updates Machines, InfrastructureMachine and BootstrapConfig to propagate in-place mutable fields // from the MachineSet. // Note: It also cleans up managed fields of all Machines so that Machines that were diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 4656d208f368..757f8447995f 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -890,6 +890,9 @@ func newMachineSet(name, cluster string, replicas int32) *clusterv1.MachineSet { ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: metav1.NamespaceDefault, + Finalizers: []string{ + clusterv1.MachineSetFinalizer, + }, Labels: map[string]string{ clusterv1.ClusterNameLabel: cluster, }, @@ -931,6 +934,9 @@ func TestMachineSetReconcile_MachinesCreatedConditionFalseOnBadInfraRef(t *testi Labels: map[string]string{ clusterv1.ClusterNameLabel: cluster.Name, }, + Finalizers: []string{ + clusterv1.MachineSetFinalizer, + }, }, Spec: clusterv1.MachineSetSpec{ ClusterName: cluster.ObjectMeta.Name, @@ -2123,3 +2129,90 @@ func assertMachine(g *WithT, actualMachine *clusterv1.Machine, expectedMachine * g.Expect(actualMachine.Finalizers).Should(Equal(expectedMachine.Finalizers)) } } + +func TestReconciler_reconcileDelete(t *testing.T) { + labels := map[string]string{ + "some": "labelselector", + } + ms := builder.MachineSet("default", "ms0").WithClusterName("test").Build() + ms.Finalizers = []string{ + clusterv1.MachineSetFinalizer, + } + ms.DeletionTimestamp = ptr.To(metav1.Now()) + ms.Spec.Selector = metav1.LabelSelector{ + MatchLabels: labels, + } + msWithoutFinalizer := ms.DeepCopy() + msWithoutFinalizer.Finalizers = []string{} + tests := []struct { + name string + machineSet *clusterv1.MachineSet + want *clusterv1.MachineSet + objs []client.Object + wantMachines []clusterv1.Machine + expectError bool + }{ + { + name: "Should do nothing when no descendant Machines exist and finalizer is already gone", + machineSet: msWithoutFinalizer.DeepCopy(), + want: msWithoutFinalizer.DeepCopy(), + objs: nil, + wantMachines: nil, + expectError: false, + }, + { + name: "Should remove finalizer when no descendant Machines exist", + machineSet: ms.DeepCopy(), + want: msWithoutFinalizer.DeepCopy(), + objs: nil, + wantMachines: nil, + expectError: false, + }, + { + name: "Should keep finalizer when descendant Machines exist and trigger deletion only for descendant Machines", + machineSet: ms.DeepCopy(), + want: ms.DeepCopy(), + objs: []client.Object{ + builder.Machine("default", "m0").WithClusterName("test").WithLabels(labels).Build(), + builder.Machine("default", "m1").WithClusterName("test").WithLabels(labels).Build(), + builder.Machine("default", "m2-not-part-of-ms").WithClusterName("test").Build(), + builder.Machine("default", "m3-not-part-of-ms").WithClusterName("test").Build(), + }, + wantMachines: []clusterv1.Machine{ + *builder.Machine("default", "m2-not-part-of-ms").WithClusterName("test").Build(), + *builder.Machine("default", "m3-not-part-of-ms").WithClusterName("test").Build(), + }, + expectError: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + c := fake.NewClientBuilder().WithObjects(tt.objs...).Build() + r := &Reconciler{ + Client: c, + recorder: record.NewFakeRecorder(32), + } + + err := r.reconcileDelete(ctx, tt.machineSet) + if tt.expectError { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + g.Expect(tt.machineSet).To(BeComparableTo(tt.want)) + + machineList := &clusterv1.MachineList{} + g.Expect(c.List(ctx, machineList, client.InNamespace("default"))).ToNot(HaveOccurred()) + + // Remove ResourceVersion so we can actually compare. + for i := range machineList.Items { + machineList.Items[i].ResourceVersion = "" + } + + g.Expect(machineList.Items).To(ConsistOf(tt.wantMachines)) + }) + } +} diff --git a/test/e2e/cluster_deletion.go b/test/e2e/cluster_deletion.go new file mode 100644 index 000000000000..2cec799ad25b --- /dev/null +++ b/test/e2e/cluster_deletion.go @@ -0,0 +1,418 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" + "sigs.k8s.io/cluster-api/test/e2e/internal/log" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" +) + +// ClusterDeletionSpecInput is the input for ClusterDeletionSpec. +type ClusterDeletionSpecInput struct { + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + BootstrapClusterProxy framework.ClusterProxy + ArtifactFolder string + SkipCleanup bool + + // Cluster name allows to specify a deterministic clusterName. + // If not set, a random one will be generated. + ClusterName *string + + // InfrastructureProvider allows to specify the infrastructure provider to be used when looking for + // cluster templates. + // If not set, clusterctl will look at the infrastructure provider installed in the management cluster; + // if only one infrastructure provider exists, it will be used, otherwise the operation will fail if more than one exists. + InfrastructureProvider *string + + // Flavor, if specified is the template flavor used to create the cluster for testing. + // If not specified, the default flavor for the selected infrastructure provider is used. + Flavor *string + + // ControlPlaneMachineCount defines the number of control plane machines to be added to the workload cluster. + // If not specified, 1 will be used. + ControlPlaneMachineCount *int64 + + // WorkerMachineCount defines number of worker machines to be added to the workload cluster. + // If not specified, 1 will be used. + WorkerMachineCount *int64 + + // Allows to inject functions to be run while waiting for the control plane to be initialized, + // which unblocks CNI installation, and for the control plane machines to be ready (after CNI installation). + ControlPlaneWaiters clusterctl.ControlPlaneWaiters + + // Allows to inject a function to be run after test namespace is created. + // If not specified, this is a no-op. + PostNamespaceCreated func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace string) + + // Allows to inject a function to be run after machines are provisioned. + // If not specified, this is a no-op. + PostMachinesProvisioned func(managementClusterProxy framework.ClusterProxy, workloadClusterNamespace, workloadClusterName string) + + // ClusterDeletionPhases define kinds which are expected to get deleted in phases. + ClusterDeletionPhases []ClusterDeletionPhase +} + +type ClusterDeletionPhase struct { + // ObjectFilter identifies if an object should be considered to get deleted during this phase. + // During the test the identified objects are expected to only have an deletionTimestamp when this phase is tested + // and be gone after unblocking this phases blocking objects. + ObjectFilter func(objectReference corev1.ObjectReference, objectOwnerReferences []metav1.OwnerReference) bool + // BlockingObjectFilter is a filter on top of the ObjectFilter to identify which objects should block the deletion in + // this phase. The identified objects will get a finalizer added before the deletion of the cluster starts. + // After a successful verification that all objects are in the correct state the finalizer gets removed to unblock + // the deletion and go to the next phase. + BlockingObjectFilter func(objectReference corev1.ObjectReference, objectOwnerReferences []metav1.OwnerReference) bool +} + +// ClusterDeletionSpec goes through the following steps: +// * Create a cluster. +// * Add a finalizer to the Cluster object. +// * Add a finalizer to objects identified by input.ClusterDeletionPhases.BlockingObjectFilter. +// * Trigger deletion for the cluster. +// * For each phase: +// - Verify objects of the previous phase are gone. +// - Verify objects to be deleted in this phase have a deletionTimestamp set but still exist. +// - Verify objects expected to be deleted in a later phase don't have a deletionTimestamp set. +// - Remove finalizers for this phase's BlockingObjects to unblock deletion of them. +// +// * Do a final verification: +// - Verify objects expected to get deleted in the last phase are gone. +// - Verify the Cluster object has a deletionTimestamp set but still exists. +// - Remove finalizer for the Cluster object. +// +// * Verify the Cluster object is gone. +func ClusterDeletionSpec(ctx context.Context, inputGetter func() ClusterDeletionSpecInput) { + var ( + specName = "cluster-deletion" + input ClusterDeletionSpecInput + namespace *corev1.Namespace + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + finalizer = "test.cluster.x-k8s.io/cluster-deletion" + blockingObjects []client.Object + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0750)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = framework.SetupSpecNamespace(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.PostNamespaceCreated) + clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) + }) + + It("Should successfully create and delete a Cluster", func() { + infrastructureProvider := clusterctl.DefaultInfrastructureProvider + if input.InfrastructureProvider != nil { + infrastructureProvider = *input.InfrastructureProvider + } + + flavor := clusterctl.DefaultFlavor + if input.Flavor != nil { + flavor = *input.Flavor + } + + controlPlaneMachineCount := ptr.To[int64](1) + if input.ControlPlaneMachineCount != nil { + controlPlaneMachineCount = input.ControlPlaneMachineCount + } + + workerMachineCount := ptr.To[int64](1) + if input.WorkerMachineCount != nil { + workerMachineCount = input.WorkerMachineCount + } + + clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + if input.ClusterName != nil { + clusterName = *input.ClusterName + } + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: infrastructureProvider, + Flavor: flavor, + Namespace: namespace.Name, + ClusterName: clusterName, + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), + ControlPlaneMachineCount: controlPlaneMachineCount, + WorkerMachineCount: workerMachineCount, + }, + ControlPlaneWaiters: input.ControlPlaneWaiters, + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + PostMachinesProvisioned: func() { + if input.PostMachinesProvisioned != nil { + input.PostMachinesProvisioned(input.BootstrapClusterProxy, namespace.Name, clusterName) + } + }, + }, clusterResources) + + // Get all objects per deletion phase and the list of blocking objects. + var objectsPerPhase [][]client.Object + objectsPerPhase, blockingObjects = getDeletionPhaseObjects(ctx, input.BootstrapClusterProxy, clusterResources.Cluster, input.ClusterDeletionPhases) + + // Add a finalizer to all objects in blockingObjects to control when these objects are gone. + By(fmt.Sprintf("Adding finalizer %s on objects to block during cluster deletion", finalizer)) + addFinalizer(ctx, input.BootstrapClusterProxy.GetClient(), finalizer, blockingObjects...) + + // Trigger the deletion of the Cluster. + framework.DeleteCluster(ctx, framework.DeleteClusterInput{ + Cluster: clusterResources.Cluster, + Deleter: input.BootstrapClusterProxy.GetClient(), + }) + + var expectedObjectsDeleted, expectedObjectsInDeletion []client.Object + + // Verify deletion for each phase. + for i, phaseObjects := range objectsPerPhase { + By(fmt.Sprintf("Verify deletion phase %d/%d", i+1, len(objectsPerPhase))) + // Expect the objects of the previous phase to be deleted. + expectedObjectsDeleted = expectedObjectsInDeletion + // Expect the objects of this phase to have a deletionTimestamp set, but still exist due to the blocking objects having the finalizer. + expectedObjectsInDeletion = phaseObjects + // Expect the objects of upcoming phases to not yet have a deletionTimestamp set. + expectedObjectsNotInDeletion := []client.Object{} + for j := i + 1; j < len(objectsPerPhase); j++ { + expectedObjectsNotInDeletion = append(expectedObjectsNotInDeletion, objectsPerPhase[j]...) + } + + assertDeletionPhase(ctx, input.BootstrapClusterProxy.GetClient(), finalizer, + expectedObjectsDeleted, + expectedObjectsInDeletion, + expectedObjectsNotInDeletion, + ) + + // Remove the test's finalizer from all objects which had been expected to be in deletion to unblock the next phase. + // Note: this only removes the finalizer from objects which have it set. + By(fmt.Sprintf("Removing finalizers for phase %d/%d", i+1, len(input.ClusterDeletionPhases))) + removeFinalizer(ctx, input.BootstrapClusterProxy.GetClient(), finalizer, expectedObjectsInDeletion...) + } + + By("Final deletion verification") + // Verify that all objects of the last phase are gone and the cluster does still exist. + expectedObjectsDeleted = expectedObjectsInDeletion + assertDeletionPhase(ctx, input.BootstrapClusterProxy.GetClient(), finalizer, + expectedObjectsDeleted, + []client.Object{clusterResources.Cluster}, + nil, + ) + + // Remove the test's finalizer from the cluster object. + By("Removing finalizers for Cluster") + removeFinalizer(ctx, input.BootstrapClusterProxy.GetClient(), finalizer, clusterResources.Cluster) + + log.Logf("Waiting for the Cluster %s to be deleted", klog.KObj(clusterResources.Cluster)) + framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{ + Client: input.BootstrapClusterProxy.GetClient(), + Cluster: clusterResources.Cluster, + ArtifactFolder: input.ArtifactFolder, + }, input.E2EConfig.GetIntervals(specName, "wait-delete-cluster")...) + + By("PASSED!") + }) + + AfterEach(func() { + // Dump all the resources in the spec namespace and the workload cluster. + framework.DumpAllResourcesAndLogs(ctx, input.BootstrapClusterProxy, input.ArtifactFolder, namespace, clusterResources.Cluster) + + if !input.SkipCleanup { + // Remove finalizers we added to block normal deletion. + removeFinalizer(ctx, input.BootstrapClusterProxy.GetClient(), finalizer, blockingObjects...) + + By(fmt.Sprintf("Deleting cluster %s", klog.KObj(clusterResources.Cluster))) + // While https://github.com/kubernetes-sigs/cluster-api/issues/2955 is addressed in future iterations, there is a chance + // that cluster variable is not set even if the cluster exists, so we are calling DeleteAllClustersAndWait + // instead of DeleteClusterAndWait + framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{ + Client: input.BootstrapClusterProxy.GetClient(), + Namespace: namespace.Name, + ArtifactFolder: input.ArtifactFolder, + }, input.E2EConfig.GetIntervals(specName, "wait-delete-cluster")...) + + By(fmt.Sprintf("Deleting namespace used for hosting the %q test spec", specName)) + framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ + Deleter: input.BootstrapClusterProxy.GetClient(), + Name: namespace.Name, + }) + } + cancelWatches() + }) +} + +// getDeletionPhaseObjects gets the OwnerGraph for the cluster and returns all objects per phase and an additional array +// with all objects which should block during the cluster deletion. +func getDeletionPhaseObjects(ctx context.Context, bootstrapClusterProxy framework.ClusterProxy, cluster *clusterv1.Cluster, phases []ClusterDeletionPhase) (objectsPerPhase [][]client.Object, blockingObjects []client.Object) { + // Add Cluster object to blockingObjects to control when it gets actually removed during the test + // and to be able to cleanup the Cluster during Teardown on failures. + blockingObjects = append(blockingObjects, cluster) + + // Get all objects relevant to the test by filtering for the kinds of the phases. + graph, err := clusterctlcluster.GetOwnerGraph(ctx, cluster.GetNamespace(), bootstrapClusterProxy.GetKubeconfigPath(), clusterctlcluster.FilterClusterObjectsWithNameFilter(cluster.GetName())) + Expect(err).ToNot(HaveOccurred()) + + for _, phase := range phases { + objects := []client.Object{} + // Iterate over the graph and find all objects relevant for the phase and which of them should be blocking. + for _, node := range graph { + // Check if the node should be part of the phase. + if !phase.ObjectFilter(node.Object, node.Owners) { + continue + } + + obj := new(unstructured.Unstructured) + obj.SetAPIVersion(node.Object.APIVersion) + obj.SetKind(node.Object.Kind) + obj.SetName(node.Object.Name) + obj.SetNamespace(cluster.GetNamespace()) + + objects = append(objects, obj) + + // Add the object to the objectsWithFinalizer array if it should be blocking. + if phase.BlockingObjectFilter(node.Object, node.Owners) { + blockingObjects = append(blockingObjects, obj) + } + } + + objectsPerPhase = append(objectsPerPhase, objects) + } + return objectsPerPhase, blockingObjects +} + +func addFinalizer(ctx context.Context, c client.Client, finalizer string, objs ...client.Object) { + for _, obj := range objs { + log.Logf("Adding finalizer for %s %s", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj)) + Expect(c.Get(ctx, client.ObjectKeyFromObject(obj), obj)).To(Succeed(), fmt.Sprintf("Failed to get %s %s", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj))) + + helper, err := patch.NewHelper(obj, c) + Expect(err).ToNot(HaveOccurred()) + + if updated := controllerutil.AddFinalizer(obj, finalizer); !updated { + continue + } + + Expect(helper.Patch(ctx, obj)).ToNot(HaveOccurred(), fmt.Sprintf("Failed to add finalizer to %s %s", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj))) + } +} + +func removeFinalizer(ctx context.Context, c client.Client, finalizer string, objs ...client.Object) { + for _, obj := range objs { + log.Logf("Removing finalizer for %s %s", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj)) + err := c.Get(ctx, client.ObjectKeyFromObject(obj), obj) + if apierrors.IsNotFound(err) { + continue + } + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("Failed to get %s %s", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj))) + + helper, err := patch.NewHelper(obj, c) + Expect(err).ToNot(HaveOccurred()) + + if updated := controllerutil.RemoveFinalizer(obj, finalizer); !updated { + continue + } + + Expect(helper.Patch(ctx, obj)).ToNot(HaveOccurred(), fmt.Sprintf("Failed to remove finalizer from %s %s", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj))) + } +} + +func assertDeletionPhase(ctx context.Context, c client.Client, finalizer string, expectedObjectsDeleted, expectedObjectsInDeletion, expectedObjectsNotInDeletion []client.Object) { + Eventually(func() error { + var errs []error + // Ensure deleted objects are gone + for _, obj := range expectedObjectsDeleted { + err := c.Get(ctx, client.ObjectKeyFromObject(obj), obj) + if err != nil { + if apierrors.IsNotFound(err) { + continue + } + errs = append(errs, errors.Wrapf(err, "expected %s %s to be deleted", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj))) + continue + } + errs = append(errs, errors.Errorf("expected %s %s to be deleted but it still exists", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj))) + } + + // Ensure expected objects are in deletion + for _, obj := range expectedObjectsInDeletion { + if err := c.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + errs = append(errs, errors.Wrapf(err, "checking %s %s is in deletion", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj))) + continue + } + if obj.GetDeletionTimestamp().IsZero() { + errs = append(errs, errors.Errorf("expected %s %s to be in deletion but deletionTimestamp is not set", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj))) + continue + } + + // Blocking objects have our finalizer set. When being excepted to be in deletion, + // blocking objects should only have our finalizer, otherwise they are getting blocked by something else. + if sets.New[string](obj.GetFinalizers()...).Has(finalizer) && len(obj.GetFinalizers()) > 1 { + errs = append(errs, errors.Errorf("expected blocking %s %s to only have %s as finalizer, got [%s]", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj), finalizer, strings.Join(obj.GetFinalizers(), ", "))) + continue + } + } + + // Ensure other objects are not in deletion. + for _, obj := range expectedObjectsNotInDeletion { + if err := c.Get(ctx, client.ObjectKeyFromObject(obj), obj); err != nil { + errs = append(errs, errors.Wrapf(err, "checking %s %s is not in deletion", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj))) + continue + } + + if !obj.GetDeletionTimestamp().IsZero() { + errs = append(errs, errors.Errorf("expected %s %s to not be in deletion but deletionTimestamp is set", obj.GetObjectKind().GroupVersionKind().Kind, klog.KObj(obj))) + continue + } + } + + return kerrors.NewAggregate(errs) + }).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(Succeed()) +} diff --git a/test/e2e/cluster_deletion_test.go b/test/e2e/cluster_deletion_test.go new file mode 100644 index 000000000000..801a03cf9897 --- /dev/null +++ b/test/e2e/cluster_deletion_test.go @@ -0,0 +1,92 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + . "github.com/onsi/ginkgo/v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/ptr" +) + +var _ = Describe("When performing cluster deletion with ClusterClass [ClusterClass]", func() { + ClusterDeletionSpec(ctx, func() ClusterDeletionSpecInput { + return ClusterDeletionSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + ControlPlaneMachineCount: ptr.To[int64](3), + Flavor: ptr.To("topology"), + InfrastructureProvider: ptr.To("docker"), + + ClusterDeletionPhases: []ClusterDeletionPhase{ + { + // All Machines owned by MachineDeployments or MachineSets, MachineDeployments and MachineSets should be deleted in this phase. + ObjectFilter: func(objectReference corev1.ObjectReference, objectOwnerReferences []metav1.OwnerReference) bool { + if objectReference.Kind == "Machine" && hasOwner(objectOwnerReferences, "MachineDeployment", "MachineSet") { + return true + } + return objectReference.Kind == "MachineDeployment" || objectReference.Kind == "MachineSet" + }, + // Of the above, only Machines should be considered to be blocking to test foreground deletion. + BlockingObjectFilter: func(objectReference corev1.ObjectReference, _ []metav1.OwnerReference) bool { + return objectReference.Kind == "Machine" + }, + }, + { + // All Machines owned by KubeadmControlPlane and KubeadmControlPlane should be deleted in this phase. + ObjectFilter: func(objectReference corev1.ObjectReference, objectOwnerReferences []metav1.OwnerReference) bool { + if objectReference.Kind == "Machine" && hasOwner(objectOwnerReferences, "KubeadmControlPlane") { + return true + } + return objectReference.Kind == "KubeadmControlPlane" + }, + // Of the above, only Machines should be considered to be blocking to test foreground deletion. + BlockingObjectFilter: func(objectReference corev1.ObjectReference, _ []metav1.OwnerReference) bool { + return objectReference.Kind == "Machine" + }, + }, + { + // The DockerCluster should be deleted in this phase. + ObjectFilter: func(objectReference corev1.ObjectReference, _ []metav1.OwnerReference) bool { + return objectReference.Kind == "DockerCluster" + }, + // Of the above, the DockerCluster should be considered to be blocking. + BlockingObjectFilter: func(objectReference corev1.ObjectReference, _ []metav1.OwnerReference) bool { + return objectReference.Kind == "DockerCluster" + }, + }, + }, + } + }) +}) + +func hasOwner(ownerReferences []metav1.OwnerReference, owners ...string) bool { + ownersSet := sets.New[string](owners...) + for _, owner := range ownerReferences { + if ownersSet.Has(owner.Kind) { + return true + } + } + return false +} diff --git a/test/framework/finalizers_helpers.go b/test/framework/finalizers_helpers.go index 946ec36ea3b3..0cfd61c29f86 100644 --- a/test/framework/finalizers_helpers.go +++ b/test/framework/finalizers_helpers.go @@ -42,8 +42,10 @@ import ( // CoreFinalizersAssertionWithLegacyClusters maps Cluster API core types to their expected finalizers for legacy Clusters. var CoreFinalizersAssertionWithLegacyClusters = map[string]func(types.NamespacedName) []string{ - clusterKind: func(_ types.NamespacedName) []string { return []string{clusterv1.ClusterFinalizer} }, - machineKind: func(_ types.NamespacedName) []string { return []string{clusterv1.MachineFinalizer} }, + clusterKind: func(_ types.NamespacedName) []string { return []string{clusterv1.ClusterFinalizer} }, + machineKind: func(_ types.NamespacedName) []string { return []string{clusterv1.MachineFinalizer} }, + machineSetKind: func(_ types.NamespacedName) []string { return []string{clusterv1.MachineSetFinalizer} }, + machineDeploymentKind: func(_ types.NamespacedName) []string { return []string{clusterv1.MachineDeploymentFinalizer} }, } // CoreFinalizersAssertionWithClassyClusters maps Cluster API core types to their expected finalizers for classy Clusters. @@ -52,8 +54,12 @@ var CoreFinalizersAssertionWithClassyClusters = func() map[string]func(types.Nam for k, v := range CoreFinalizersAssertionWithLegacyClusters { r[k] = v } - r[machineSetKind] = func(_ types.NamespacedName) []string { return []string{clusterv1.MachineSetTopologyFinalizer} } - r[machineDeploymentKind] = func(_ types.NamespacedName) []string { return []string{clusterv1.MachineDeploymentTopologyFinalizer} } + r[machineSetKind] = func(_ types.NamespacedName) []string { + return []string{clusterv1.MachineSetTopologyFinalizer, clusterv1.MachineSetFinalizer} + } + r[machineDeploymentKind] = func(_ types.NamespacedName) []string { + return []string{clusterv1.MachineDeploymentTopologyFinalizer, clusterv1.MachineDeploymentFinalizer} + } return r }() diff --git a/util/log/log.go b/util/log/log.go index 8a8e0354ff60..11a41c460361 100644 --- a/util/log/log.go +++ b/util/log/log.go @@ -19,6 +19,8 @@ package log import ( "context" + "fmt" + "strings" "github.com/go-logr/logr" "github.com/pkg/errors" @@ -104,3 +106,25 @@ func getOwners(ctx context.Context, c client.Client, obj metav1.Object) ([]owner return owners, nil } + +// ObjNamesString returns a comma separated list of the object names, limited to +// five objects. On more than five objects it outputs the first five objects and +// adds information about how much more are in the given list. +func ObjNamesString[T client.Object](objs []T) string { + shortenedBy := 0 + if len(objs) > 5 { + shortenedBy = len(objs) - 5 + objs = objs[:5] + } + + stringList := []string{} + for _, obj := range objs { + stringList = append(stringList, obj.GetName()) + } + + if shortenedBy > 0 { + stringList = append(stringList, fmt.Sprintf("... (%d more)", shortenedBy)) + } + + return strings.Join(stringList, ", ") +}