Skip to content

Commit

Permalink
Remove handling for orphaned VSphereMachines
Browse files Browse the repository at this point in the history
Signed-off-by: killianmuldoon <[email protected]>
  • Loading branch information
killianmuldoon committed Nov 9, 2023
1 parent a1db12e commit c82f792
Show file tree
Hide file tree
Showing 5 changed files with 167 additions and 196 deletions.
68 changes: 2 additions & 66 deletions controllers/vspherecluster_reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,10 +121,6 @@ func (r *clusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_
}
}()

if err := r.setOwnerRefsOnVsphereMachines(ctx, clusterContext); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "failed to set owner refs on VSphereMachine objects")
}

// Handle deleted clusters
if !vsphereCluster.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, clusterContext)
Expand All @@ -151,37 +147,8 @@ func (r *clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *cap
"unable to list VSphereMachines part of VSphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name)
}

machineDeletionCount := 0
var deletionErrors []error
for _, vsphereMachine := range vsphereMachines {
// Note: We have to use := here to not overwrite log & ctx outside the for loop.
log := log.WithValues("VSphereMachine", klog.KObj(vsphereMachine))
ctx := ctrl.LoggerInto(ctx, log)

// If the VSphereMachine is not owned by the CAPI Machine object because the machine object was deleted
// before setting the owner references, then proceed with the deletion of the VSphereMachine object.
// This is required until CAPI has a solution for https://github.com/kubernetes-sigs/cluster-api/issues/5483
if !clusterutilv1.IsOwnedByObject(vsphereMachine, clusterCtx.VSphereCluster) || len(vsphereMachine.OwnerReferences) != 1 {
continue
}
machineDeletionCount++
// Remove the finalizer since VM creation wouldn't proceed
log.Info("Removing finalizer from VSphereMachine")
ctrlutil.RemoveFinalizer(vsphereMachine, infrav1.MachineFinalizer)
if err := r.Client.Update(ctx, vsphereMachine); err != nil {
return reconcile.Result{}, err
}
if err := r.Client.Delete(ctx, vsphereMachine); err != nil && !apierrors.IsNotFound(err) {
log.Error(err, "Failed to delete for VSphereMachine")
deletionErrors = append(deletionErrors, err)
}
}
if len(deletionErrors) > 0 {
return reconcile.Result{}, kerrors.NewAggregate(deletionErrors)
}

if len(vsphereMachines)-machineDeletionCount > 0 {
log.Info("Waiting for VSphereMachines to be deleted", "count", len(vsphereMachines)-machineDeletionCount)
if len(vsphereMachines) > 0 {
log.Info("Waiting for VSphereMachines to be deleted", "count", len(vsphereMachines))
return reconcile.Result{RequeueAfter: 10 * time.Second}, nil
}

Expand Down Expand Up @@ -528,37 +495,6 @@ func (r *clusterReconciler) isControlPlaneInitialized(ctx context.Context, clust
return conditions.IsTrue(clusterCtx.Cluster, clusterv1.ControlPlaneInitializedCondition)
}

func (r *clusterReconciler) setOwnerRefsOnVsphereMachines(ctx context.Context, clusterCtx *capvcontext.ClusterContext) error {
vsphereMachines, err := infrautilv1.GetVSphereMachinesInCluster(ctx, r.Client, clusterCtx.Cluster.Namespace, clusterCtx.Cluster.Name)
if err != nil {
return errors.Wrapf(err,
"unable to list VSphereMachines part of VSphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name)
}

var patchErrors []error
for _, vsphereMachine := range vsphereMachines {
patchHelper, err := patch.NewHelper(vsphereMachine, r.Client)
if err != nil {
patchErrors = append(patchErrors, err)
continue
}

vsphereMachine.SetOwnerReferences(clusterutilv1.EnsureOwnerRef(
vsphereMachine.OwnerReferences,
metav1.OwnerReference{
APIVersion: clusterCtx.VSphereCluster.APIVersion,
Kind: clusterCtx.VSphereCluster.Kind,
Name: clusterCtx.VSphereCluster.Name,
UID: clusterCtx.VSphereCluster.UID,
}))

if err := patchHelper.Patch(ctx, vsphereMachine); err != nil {
patchErrors = append(patchErrors, err)
}
}
return kerrors.NewAggregate(patchErrors)
}

func (r *clusterReconciler) reconcileClusterModules(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) {
if feature.Gates.Enabled(feature.NodeAntiAffinity) {
return r.clusterModuleReconciler.Reconcile(ctx, clusterCtx)
Expand Down
125 changes: 0 additions & 125 deletions controllers/vspherecluster_reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,13 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
clusterutil1v1 "sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"

infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/fake"
"sigs.k8s.io/cluster-api-provider-vsphere/pkg/identity"
"sigs.k8s.io/cluster-api-provider-vsphere/test/helpers"
"sigs.k8s.io/cluster-api-provider-vsphere/test/helpers/vcsim"
)

Expand Down Expand Up @@ -538,131 +536,8 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() {
})
})
})

Context("For VSphereMachines belonging to the cluster", func() {
var (
namespace string
testNs *corev1.Namespace
)

BeforeEach(func() {
var err error
testNs, err = testEnv.CreateNamespace(ctx, "vsm-owner-ref")
Expect(err).NotTo(HaveOccurred())
namespace = testNs.Name
})

AfterEach(func() {
Expect(testEnv.Delete(ctx, testNs)).To(Succeed())
})

It("sets owner references to those machines", func() {
// Create the VSphereCluster object
instance := &infrav1.VSphereCluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test1-",
Namespace: namespace,
},
Spec: infrav1.VSphereClusterSpec{
Server: testEnv.Simulator.ServerURL().Host,
},
}
Expect(testEnv.Create(ctx, instance)).To(Succeed())

capiCluster := &clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "capi-test1-",
Namespace: namespace,
},
Spec: clusterv1.ClusterSpec{
InfrastructureRef: &corev1.ObjectReference{
APIVersion: infrav1.GroupVersion.String(),
Kind: "VsphereCluster",
Name: instance.Name,
},
},
}
Expect(testEnv.Create(ctx, capiCluster)).To(Succeed())

// Make sure the VSphereCluster exists.
key := client.ObjectKey{Namespace: namespace, Name: instance.Name}
Eventually(func() error {
return testEnv.Get(ctx, key, instance)
}, timeout).Should(BeNil())

machineCount := 3
for i := 0; i < machineCount; i++ {
Expect(createVsphereMachine(ctx, testEnv, namespace, capiCluster.Name)).To(Succeed())
}

Eventually(func() bool {
ph, err := patch.NewHelper(instance, testEnv)
Expect(err).ShouldNot(HaveOccurred())
instance.OwnerReferences = append(instance.OwnerReferences, metav1.OwnerReference{
Kind: "Cluster",
APIVersion: clusterv1.GroupVersion.String(),
Name: capiCluster.Name,
UID: "blah",
})
Expect(ph.Patch(ctx, instance, patch.WithStatusObservedGeneration{})).ShouldNot(HaveOccurred())
return true
}, timeout).Should(BeTrue())

By("checking for presence of VSphereMachine objects")
Eventually(func() int {
machines := &infrav1.VSphereMachineList{}
if err := testEnv.List(ctx, machines, client.InNamespace(namespace),
client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: capiCluster.Name})); err != nil {
return -1
}
return len(machines.Items)
}, timeout).Should(Equal(machineCount))

By("checking VSphereMachine owner refs")
Eventually(func() int {
machines := &infrav1.VSphereMachineList{}
if err := testEnv.List(ctx, machines, client.InNamespace(namespace),
client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: capiCluster.Name})); err != nil {
return 0
}
ownerRefSet := 0
for _, m := range machines.Items {
if len(m.OwnerReferences) >= 1 && clusterutil1v1.HasOwnerRef(m.OwnerReferences, metav1.OwnerReference{
APIVersion: infrav1.GroupVersion.String(),
Kind: instance.Kind,
Name: instance.Name,
UID: instance.UID,
}) {
ownerRefSet++
}
}
return ownerRefSet
}, timeout).Should(Equal(machineCount))
})
})
})

func createVsphereMachine(ctx context.Context, env *helpers.TestEnvironment, namespace, clusterName string) error {
vsphereMachine := &infrav1.VSphereMachine{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-vsp",
Namespace: namespace,
Labels: map[string]string{clusterv1.ClusterNameLabel: clusterName},
},
Spec: infrav1.VSphereMachineSpec{
VirtualMachineCloneSpec: infrav1.VirtualMachineCloneSpec{
Template: "ubuntu-k9s-1.19",
Network: infrav1.NetworkSpec{
Devices: []infrav1.NetworkDeviceSpec{
{NetworkName: "network-1", DHCP4: true},
},
},
},
},
}
return env.Create(ctx, vsphereMachine)
}

func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) {
server := "vcenter123.foo.com"

Expand Down
18 changes: 18 additions & 0 deletions controllers/vspheremachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,23 @@ func (r *machineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_
return reconcile.Result{}, nil
}

// If the VSphereCluster has been previously set as an ownerReference remove it. This may have been set in an older
// version of CAPV to prevent VSphereMachines from being orphaned, but is no longer needed.
// For more info see: https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/issues/2054
// TODO: This should be removed in a future release of CAPV.
if cluster.Spec.InfrastructureRef != nil {
machineContext.GetVSphereMachine().SetOwnerReferences(
clusterutilv1.RemoveOwnerRef(
machineContext.GetVSphereMachine().GetOwnerReferences(),
metav1.OwnerReference{
Name: cluster.Spec.InfrastructureRef.Name,
APIVersion: cluster.Spec.InfrastructureRef.APIVersion,
Kind: cluster.Spec.InfrastructureRef.Kind,
},
),
)
}

// Fetch the VSphereCluster and update the machine context
machineContext, err = r.VMService.FetchVSphereCluster(ctx, cluster, machineContext)
if err != nil {
Expand All @@ -255,6 +272,7 @@ func (r *machineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_
ctrlutil.AddFinalizer(machineContext.GetVSphereMachine(), infrav1.MachineFinalizer)
return reconcile.Result{}, nil
}

// Handle non-deleted machines
return r.reconcileNormal(ctx, machineContext)
}
Expand Down
Loading

0 comments on commit c82f792

Please sign in to comment.