Skip to content

Commit

Permalink
add more details to the v1beta2 deleting condition
Browse files Browse the repository at this point in the history
  • Loading branch information
chrischdi committed Oct 18, 2024
1 parent 5bc99d6 commit 79741e6
Show file tree
Hide file tree
Showing 2 changed files with 117 additions and 12 deletions.
32 changes: 31 additions & 1 deletion internal/controllers/machine/machine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -935,7 +935,7 @@ func (r *Reconciler) shouldWaitForNodeVolumes(ctx context.Context, s *scope) (ct
r.reconcileDeleteCache.Add(cache.NewReconcileEntry(machine, time.Now().Add(waitForVolumeDetachRetryInterval)))

s.deletingReason = clusterv1.MachineDeletingWaitingForVolumeDetachV1Beta2Reason
s.deletingMessage = fmt.Sprintf("Waiting for Node volumes to be detached (started at %s)", machine.Status.Deletion.WaitForNodeVolumeDetachStartTime.Format(time.RFC3339))
s.deletingMessage = attachedVolumeInformation.conditionMessage(machine)

log.Info("Waiting for Node volumes to be detached",
attachedVolumeInformation.logKeys()...,
Expand Down Expand Up @@ -1240,6 +1240,36 @@ func (a *attachedVolumeInformation) logKeys() []any {
return logKeys
}

func (a *attachedVolumeInformation) conditionMessage(machine *clusterv1.Machine) string {
if a.isEmpty() {
return ""
}

conditionMessage := fmt.Sprintf("Waiting for Node volumes to be detached (started at %s)", machine.Status.Deletion.WaitForNodeVolumeDetachStartTime.Format(time.RFC3339))

if len(a.persistentVolumeClaims) > 0 {
slices.Sort(a.persistentVolumeClaims)
conditionMessage = fmt.Sprintf("%s\n* PersistentVolumeClaims: %s", conditionMessage, clog.StringListToString(a.persistentVolumeClaims))
}

if len(a.persistentVolumesWithoutPVCClaimRef) > 0 {
slices.Sort(a.persistentVolumesWithoutPVCClaimRef)
conditionMessage = fmt.Sprintf("%s\n* PersistentVolumes without a .spec.claimRef to a PersistentVolumeClaim: %s", conditionMessage, clog.StringListToString(a.persistentVolumesWithoutPVCClaimRef))
}

if len(a.nodeStatusVolumeNamesWithoutPV) > 0 {
slices.Sort(a.nodeStatusVolumeNamesWithoutPV)
conditionMessage = fmt.Sprintf("%s\n* Node.status.volumesAttached entries not matching a PersistentVolume: %s", conditionMessage, clog.StringListToString(a.nodeStatusVolumeNamesWithoutPV))
}

if len(a.persistentVolumeNamesWithoutPV) > 0 {
slices.Sort(a.persistentVolumeNamesWithoutPV)
conditionMessage = fmt.Sprintf("%s\n* VolumeAttachment.spec.source.persistentVolumeName not matching a PersistentVolume: %s", conditionMessage, clog.StringListToString(a.persistentVolumeNamesWithoutPV))
}

return conditionMessage
}

// getPersistentVolumesWaitingForDetach returns a list of all persistentVolumes which either have
// the calculated AttachedVolume name in attachedVolumeNames or their name in attachedPVNames.
// PersistentVolumes which refer a PersistentVolumeClaim contained in pvcsToIgnore are filtered out.
Expand Down
97 changes: 86 additions & 11 deletions internal/controllers/machine/machine_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1931,6 +1931,9 @@ func TestShouldWaitForNodeVolumes(t *testing.T) {
},
}

persistentVolumeWithoutClaim := persistentVolume.DeepCopy()
persistentVolumeWithoutClaim.Spec.ClaimRef.Kind = "NotAPVC"

volumeAttachment := &storagev1.VolumeAttachment{
ObjectMeta: metav1.ObjectMeta{
Name: "test-va",
Expand Down Expand Up @@ -1991,9 +1994,56 @@ func TestShouldWaitForNodeVolumes(t *testing.T) {
objs: []client.Object{
persistentVolume,
},
expected: true,
expectedDeletingReason: clusterv1.MachineDeletingWaitingForVolumeDetachV1Beta2Reason,
expectedDeletingMessage: "Waiting for Node volumes to be detached (started at 2024-10-09T16:13:59Z)",
expected: true,
expectedDeletingReason: clusterv1.MachineDeletingWaitingForVolumeDetachV1Beta2Reason,
expectedDeletingMessage: `Waiting for Node volumes to be detached (started at 2024-10-09T16:13:59Z)
* PersistentVolumeClaims: default/test-pvc`,
},
{
name: "Node has volumes attached according to node status but the pv does not reference a PersistentVolumeClaim",
node: &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
Status: corev1.NodeStatus{
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
VolumesAttached: attachedVolumes,
},
},
objs: []client.Object{
persistentVolumeWithoutClaim,
},
expected: true,
expectedDeletingReason: clusterv1.MachineDeletingWaitingForVolumeDetachV1Beta2Reason,
expectedDeletingMessage: `Waiting for Node volumes to be detached (started at 2024-10-09T16:13:59Z)
* PersistentVolumes without a .spec.claimRef to a PersistentVolumeClaim: test-pv`,
},
{
name: "Node has volumes attached according to node status but without a pv",
node: &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
Status: corev1.NodeStatus{
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
VolumesAttached: attachedVolumes,
},
},
objs: []client.Object{},
expected: true,
expectedDeletingReason: clusterv1.MachineDeletingWaitingForVolumeDetachV1Beta2Reason,
expectedDeletingMessage: `Waiting for Node volumes to be detached (started at 2024-10-09T16:13:59Z)
* Node.status.volumesAttached entries not matching a PersistentVolume: kubernetes.io/csi/dummy^foo`,
},
{
name: "Node has volumes attached according to node status but its from a daemonset pod which gets ignored",
Expand Down Expand Up @@ -2067,9 +2117,33 @@ func TestShouldWaitForNodeVolumes(t *testing.T) {
volumeAttachment,
persistentVolume,
},
expected: true,
expectedDeletingReason: clusterv1.MachineDeletingWaitingForVolumeDetachV1Beta2Reason,
expectedDeletingMessage: "Waiting for Node volumes to be detached (started at 2024-10-09T16:13:59Z)",
expected: true,
expectedDeletingReason: clusterv1.MachineDeletingWaitingForVolumeDetachV1Beta2Reason,
expectedDeletingMessage: `Waiting for Node volumes to be detached (started at 2024-10-09T16:13:59Z)
* PersistentVolumeClaims: default/test-pvc`,
},
{
name: "Node has volumes attached according to volumeattachments but without a pv",
node: &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
Status: corev1.NodeStatus{
Conditions: []corev1.NodeCondition{
{
Type: corev1.NodeReady,
Status: corev1.ConditionTrue,
},
},
},
},
objs: []client.Object{
volumeAttachment,
},
expected: true,
expectedDeletingReason: clusterv1.MachineDeletingWaitingForVolumeDetachV1Beta2Reason,
expectedDeletingMessage: `Waiting for Node volumes to be detached (started at 2024-10-09T16:13:59Z)
* VolumeAttachment.spec.source.persistentVolumeName not matching a PersistentVolume: test-pv`,
},
{
name: "Node has volumes attached according to volumeattachments but its from a daemonset pod which gets ignored",
Expand Down Expand Up @@ -2167,9 +2241,10 @@ func TestShouldWaitForNodeVolumes(t *testing.T) {
},
persistentVolume,
},
expected: true,
expectedDeletingReason: clusterv1.MachineDeletingWaitingForVolumeDetachV1Beta2Reason,
expectedDeletingMessage: "Waiting for Node volumes to be detached (started at 2024-10-09T16:13:59Z)",
expected: true,
expectedDeletingReason: clusterv1.MachineDeletingWaitingForVolumeDetachV1Beta2Reason,
expectedDeletingMessage: `Waiting for Node volumes to be detached (started at 2024-10-09T16:13:59Z)
* PersistentVolumeClaims: default/test-pvc`,
},
{
name: "Node has no volumes attached",
Expand Down Expand Up @@ -2252,8 +2327,8 @@ func TestShouldWaitForNodeVolumes(t *testing.T) {
got, err := r.shouldWaitForNodeVolumes(ctx, s)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(!got.IsZero()).To(Equal(tt.expected))
g.Expect(s.deletingReason).To(Equal(tt.expectedDeletingReason))
g.Expect(s.deletingMessage).To(Equal(tt.expectedDeletingMessage))
g.Expect(s.deletingReason).To(BeEquivalentTo(tt.expectedDeletingReason))
g.Expect(s.deletingMessage).To(BeEquivalentTo(tt.expectedDeletingMessage))
})
}
}
Expand Down

0 comments on commit 79741e6

Please sign in to comment.