From 42eac4c8d18115f7ae3969d88374ae7bbc05ce37 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Tue, 10 Sep 2024 16:42:22 +0200 Subject: [PATCH 1/7] machine: Introduce Deletion status field and add timestamps for drain and volumeDetach instead of using the condition --- api/v1beta1/machine_types.go | 12 +++++++++ internal/apis/core/v1alpha3/conversion.go | 1 + internal/apis/core/v1alpha4/conversion.go | 1 + .../controllers/machine/machine_controller.go | 27 ++++++++++++------- 4 files changed, 31 insertions(+), 10 deletions(-) diff --git a/api/v1beta1/machine_types.go b/api/v1beta1/machine_types.go index ebe3030d6eb1..085f76a29458 100644 --- a/api/v1beta1/machine_types.go +++ b/api/v1beta1/machine_types.go @@ -230,10 +230,22 @@ type MachineStatus struct { // Conditions defines current service state of the Machine. // +optional Conditions Conditions `json:"conditions,omitempty"` + + // Deletion is the deletion state of the Machine. + // +optional + Deletion MachineStatusDeletion `json:"deletion,omitempty"` } // ANCHOR_END: MachineStatus +// MachineStatusDeletion is the deletion state of the Machine. +type MachineStatusDeletion struct { + // NodeDrainStartTime is the time when the drain of the node started. + NodeDrainStartTime *metav1.Time `json:"nodeDrainStartTime,omitempty"` + // WaitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started. + WaitForNodeVolumeDetachStartTime *metav1.Time `json:"nodeVolumeDetachStartTime,omitempty"` +} + // SetTypedPhase sets the Phase field to the string representation of MachinePhase. func (m *MachineStatus) SetTypedPhase(p MachinePhase) { m.Phase = string(p) diff --git a/internal/apis/core/v1alpha3/conversion.go b/internal/apis/core/v1alpha3/conversion.go index 405cf123f813..c325b48cee21 100644 --- a/internal/apis/core/v1alpha3/conversion.go +++ b/internal/apis/core/v1alpha3/conversion.go @@ -101,6 +101,7 @@ func (src *Machine) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.NodeVolumeDetachTimeout = restored.Spec.NodeVolumeDetachTimeout dst.Status.NodeInfo = restored.Status.NodeInfo dst.Status.CertificatesExpiryDate = restored.Status.CertificatesExpiryDate + dst.Status.Deletion = restored.Status.Deletion return nil } diff --git a/internal/apis/core/v1alpha4/conversion.go b/internal/apis/core/v1alpha4/conversion.go index ba4469e52445..8043349a9678 100644 --- a/internal/apis/core/v1alpha4/conversion.go +++ b/internal/apis/core/v1alpha4/conversion.go @@ -190,6 +190,7 @@ func (src *Machine) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.NodeDeletionTimeout = restored.Spec.NodeDeletionTimeout dst.Status.CertificatesExpiryDate = restored.Status.CertificatesExpiryDate dst.Spec.NodeVolumeDetachTimeout = restored.Spec.NodeVolumeDetachTimeout + dst.Status.Deletion = restored.Status.Deletion return nil } diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index c4daa59d1ae2..4e8f13282bc8 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -31,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -415,6 +416,10 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu conditions.MarkFalse(m, clusterv1.VolumeDetachSucceededCondition, clusterv1.WaitingForVolumeDetachReason, clusterv1.ConditionSeverityInfo, "Waiting for node volumes to be detached") } + if m.Status.Deletion.WaitForNodeVolumeDetachStartTime == nil { + m.Status.Deletion.WaitForNodeVolumeDetachStartTime = ptr.To(metav1.Now()) + } + if ok, err := r.shouldWaitForNodeVolumes(ctx, cluster, m.Status.NodeRef.Name); ok || err != nil { if err != nil { r.recorder.Eventf(m, corev1.EventTypeWarning, "FailedWaitForVolumeDetach", "error waiting for node volumes detaching, Machine's node %q: %v", m.Status.NodeRef.Name, err) @@ -527,34 +532,32 @@ func (r *Reconciler) nodeDrainTimeoutExceeded(machine *clusterv1.Machine) bool { return false } - // if the draining succeeded condition does not exist - if conditions.Get(machine, clusterv1.DrainingSucceededCondition) == nil { + // if the NodeDrainStartTime does not exist + if machine.Status.Deletion.NodeDrainStartTime == nil { return false } now := time.Now() - firstTimeDrain := conditions.GetLastTransitionTime(machine, clusterv1.DrainingSucceededCondition) - diff := now.Sub(firstTimeDrain.Time) + diff := now.Sub(machine.Status.Deletion.NodeDrainStartTime.Time) return diff.Seconds() >= machine.Spec.NodeDrainTimeout.Seconds() } // nodeVolumeDetachTimeoutExceeded returns False if either NodeVolumeDetachTimeout is set to nil or <=0 OR -// VolumeDetachSucceededCondition is not set on the Machine. Otherwise returns true if the timeout is expired -// since the last transition time of VolumeDetachSucceededCondition. +// WaitForNodeVolumeDetachStartTime is not set on the Machine. Otherwise returns true if the timeout is expired +// since the WaitForNodeVolumeDetachStartTime. func (r *Reconciler) nodeVolumeDetachTimeoutExceeded(machine *clusterv1.Machine) bool { // if the NodeVolumeDetachTimeout type is not set by user if machine.Spec.NodeVolumeDetachTimeout == nil || machine.Spec.NodeVolumeDetachTimeout.Seconds() <= 0 { return false } - // if the volume detaching succeeded condition does not exist - if conditions.Get(machine, clusterv1.VolumeDetachSucceededCondition) == nil { + // if the NodeVolumeDetachStartTime does not exist + if machine.Status.Deletion.WaitForNodeVolumeDetachStartTime == nil { return false } now := time.Now() - firstTimeDetach := conditions.GetLastTransitionTime(machine, clusterv1.VolumeDetachSucceededCondition) - diff := now.Sub(firstTimeDetach.Time) + diff := now.Sub(machine.Status.Deletion.WaitForNodeVolumeDetachStartTime.Time) return diff.Seconds() >= machine.Spec.NodeVolumeDetachTimeout.Seconds() } @@ -697,6 +700,10 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, log.V(3).Info("Node is unreachable, draining will use 1s GracePeriodSeconds and will ignore all Pods that have a deletionTimestamp > 1s old") } + if machine.Status.Deletion.NodeDrainStartTime == nil { + machine.Status.Deletion.NodeDrainStartTime = ptr.To(metav1.Now()) + } + if err := drainer.CordonNode(ctx, node); err != nil { // Machine will be re-reconciled after a cordon failure. return ctrl.Result{}, errors.Wrapf(err, "failed to cordon Node %s", node.Name) From 6a0422e9d29ae3b5ccb69440bc51106a34a42c3f Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Tue, 10 Sep 2024 17:42:12 +0200 Subject: [PATCH 2/7] fix tests --- .../machine/machine_controller_test.go | 48 +++++-------------- 1 file changed, 12 insertions(+), 36 deletions(-) diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index ec72dee7cc50..e5abb53e347d 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -1386,12 +1386,8 @@ func TestIsNodeDrainedAllowed(t *testing.T) { }, Status: clusterv1.MachineStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.DrainingSucceededCondition, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 70)).UTC()}, - }, + Deletion: clusterv1.MachineStatusDeletion{ + NodeDrainStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 70)).UTC()}, }, }, }, @@ -1412,12 +1408,8 @@ func TestIsNodeDrainedAllowed(t *testing.T) { NodeDrainTimeout: &metav1.Duration{Duration: time.Second * 60}, }, Status: clusterv1.MachineStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.DrainingSucceededCondition, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 30)).UTC()}, - }, + Deletion: clusterv1.MachineStatusDeletion{ + NodeDrainStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 30)).UTC()}, }, }, }, @@ -1437,12 +1429,8 @@ func TestIsNodeDrainedAllowed(t *testing.T) { Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.DrainingSucceededCondition, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 1000)).UTC()}, - }, + Deletion: clusterv1.MachineStatusDeletion{ + NodeDrainStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 1000)).UTC()}, }, }, }, @@ -1896,12 +1884,8 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { }, Status: clusterv1.MachineStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.VolumeDetachSucceededCondition, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 60)).UTC()}, - }, + Deletion: clusterv1.MachineStatusDeletion{ + WaitForNodeVolumeDetachStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 60)).UTC()}, }, }, }, @@ -1922,12 +1906,8 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second * 60}, }, Status: clusterv1.MachineStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.VolumeDetachSucceededCondition, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 30)).UTC()}, - }, + Deletion: clusterv1.MachineStatusDeletion{ + WaitForNodeVolumeDetachStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 30)).UTC()}, }, }, }, @@ -1947,12 +1927,8 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ - Conditions: clusterv1.Conditions{ - { - Type: clusterv1.VolumeDetachSucceededCondition, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 1000)).UTC()}, - }, + Deletion: clusterv1.MachineStatusDeletion{ + WaitForNodeVolumeDetachStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 1000)).UTC()}, }, }, }, From b2ddb801442320e23fec24fbe1978e937318df32 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Tue, 10 Sep 2024 16:20:05 +0200 Subject: [PATCH 3/7] make generate --- api/v1beta1/zz_generated.deepcopy.go | 24 ++++++++++++ api/v1beta1/zz_generated.openapi.go | 37 ++++++++++++++++++- .../crd/bases/cluster.x-k8s.io_machines.yaml | 14 +++++++ .../core/v1alpha3/zz_generated.conversion.go | 1 + .../core/v1alpha4/zz_generated.conversion.go | 1 + 5 files changed, 76 insertions(+), 1 deletion(-) diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index bb0ecf062264..597d59ab52b2 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -1912,6 +1912,7 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.Deletion.DeepCopyInto(&out.Deletion) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. @@ -1924,6 +1925,29 @@ func (in *MachineStatus) DeepCopy() *MachineStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineStatusDeletion) DeepCopyInto(out *MachineStatusDeletion) { + *out = *in + if in.NodeDrainStartTime != nil { + in, out := &in.NodeDrainStartTime, &out.NodeDrainStartTime + *out = (*in).DeepCopy() + } + if in.WaitForNodeVolumeDetachStartTime != nil { + in, out := &in.WaitForNodeVolumeDetachStartTime, &out.WaitForNodeVolumeDetachStartTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatusDeletion. +func (in *MachineStatusDeletion) DeepCopy() *MachineStatusDeletion { + if in == nil { + return nil + } + out := new(MachineStatusDeletion) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { *out = *in diff --git a/api/v1beta1/zz_generated.openapi.go b/api/v1beta1/zz_generated.openapi.go index 4e1017dec6d6..464bb7cca6d3 100644 --- a/api/v1beta1/zz_generated.openapi.go +++ b/api/v1beta1/zz_generated.openapi.go @@ -87,6 +87,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/v1beta1.MachineSetStatus": schema_sigsk8sio_cluster_api_api_v1beta1_MachineSetStatus(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineSpec": schema_sigsk8sio_cluster_api_api_v1beta1_MachineSpec(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineStatus": schema_sigsk8sio_cluster_api_api_v1beta1_MachineStatus(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.MachineStatusDeletion": schema_sigsk8sio_cluster_api_api_v1beta1_MachineStatusDeletion(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineTemplateSpec": schema_sigsk8sio_cluster_api_api_v1beta1_MachineTemplateSpec(ref), "sigs.k8s.io/cluster-api/api/v1beta1.NetworkRanges": schema_sigsk8sio_cluster_api_api_v1beta1_NetworkRanges(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta": schema_sigsk8sio_cluster_api_api_v1beta1_ObjectMeta(ref), @@ -3353,11 +3354,45 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineStatus(ref common.Reference }, }, }, + "deletion": { + SchemaProps: spec.SchemaProps{ + Description: "Deletion is the deletion state of the Machine.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineStatusDeletion"), + }, + }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.NodeSystemInfo", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "sigs.k8s.io/cluster-api/api/v1beta1.Condition", "sigs.k8s.io/cluster-api/api/v1beta1.MachineAddress"}, + "k8s.io/api/core/v1.NodeSystemInfo", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "sigs.k8s.io/cluster-api/api/v1beta1.Condition", "sigs.k8s.io/cluster-api/api/v1beta1.MachineAddress", "sigs.k8s.io/cluster-api/api/v1beta1.MachineStatusDeletion"}, + } +} + +func schema_sigsk8sio_cluster_api_api_v1beta1_MachineStatusDeletion(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachineStatusDeletion is the deletion state of the Machine.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodeDrainStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "NodeDrainStartTime is the time when the drain of the node started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "nodeVolumeDetachStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "WaitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } diff --git a/config/crd/bases/cluster.x-k8s.io_machines.yaml b/config/crd/bases/cluster.x-k8s.io_machines.yaml index 493d6aca6646..1d0b45633bb9 100644 --- a/config/crd/bases/cluster.x-k8s.io_machines.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machines.yaml @@ -1113,6 +1113,20 @@ spec: - type type: object type: array + deletion: + description: Deletion is the deletion state of the Machine. + properties: + nodeDrainStartTime: + description: NodeDrainStartTime is the time when the drain of + the node started. + format: date-time + type: string + nodeVolumeDetachStartTime: + description: WaitForNodeVolumeDetachStartTime is the time when + waiting for volume detachment started. + format: date-time + type: string + type: object failureMessage: description: |- FailureMessage will be set in the event that there is a terminal problem diff --git a/internal/apis/core/v1alpha3/zz_generated.conversion.go b/internal/apis/core/v1alpha3/zz_generated.conversion.go index 1d8c7451e31c..ec286c3c42fb 100644 --- a/internal/apis/core/v1alpha3/zz_generated.conversion.go +++ b/internal/apis/core/v1alpha3/zz_generated.conversion.go @@ -1241,6 +1241,7 @@ func autoConvert_v1beta1_MachineStatus_To_v1alpha3_MachineStatus(in *v1beta1.Mac out.InfrastructureReady = in.InfrastructureReady out.ObservedGeneration = in.ObservedGeneration out.Conditions = *(*Conditions)(unsafe.Pointer(&in.Conditions)) + // WARNING: in.Deletion requires manual conversion: does not exist in peer-type return nil } diff --git a/internal/apis/core/v1alpha4/zz_generated.conversion.go b/internal/apis/core/v1alpha4/zz_generated.conversion.go index e8f640274d79..4b504ce878c8 100644 --- a/internal/apis/core/v1alpha4/zz_generated.conversion.go +++ b/internal/apis/core/v1alpha4/zz_generated.conversion.go @@ -1653,6 +1653,7 @@ func autoConvert_v1beta1_MachineStatus_To_v1alpha4_MachineStatus(in *v1beta1.Mac out.InfrastructureReady = in.InfrastructureReady out.ObservedGeneration = in.ObservedGeneration out.Conditions = *(*Conditions)(unsafe.Pointer(&in.Conditions)) + // WARNING: in.Deletion requires manual conversion: does not exist in peer-type return nil } From 95b28b570f92861d1d2e3c46b85fc7ee3c2bd914 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Thu, 19 Sep 2024 10:56:34 +0200 Subject: [PATCH 4/7] review fixes --- api/v1beta1/machine_types.go | 12 +++++++++++- .../crd/bases/cluster.x-k8s.io_machines.yaml | 18 +++++++++++++----- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/api/v1beta1/machine_types.go b/api/v1beta1/machine_types.go index 085f76a29458..d7b331921a93 100644 --- a/api/v1beta1/machine_types.go +++ b/api/v1beta1/machine_types.go @@ -231,7 +231,8 @@ type MachineStatus struct { // +optional Conditions Conditions `json:"conditions,omitempty"` - // Deletion is the deletion state of the Machine. + // Deletion contains information relating to removal of the Machine. + // Only present when the Machine has a deletionTimestamp and is being removed from the cluster. // +optional Deletion MachineStatusDeletion `json:"deletion,omitempty"` } @@ -241,8 +242,17 @@ type MachineStatus struct { // MachineStatusDeletion is the deletion state of the Machine. type MachineStatusDeletion struct { // NodeDrainStartTime is the time when the drain of the node started. + // Only present when the Machine has a deletionTimestamp, is being removed from the cluster + // and draining the node had been started. + // +optional NodeDrainStartTime *metav1.Time `json:"nodeDrainStartTime,omitempty"` + // WaitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started. + // Detaching volumes from nodes is usually done by CSI implementations and the current state + // is observed from the node's `.Status.VolumesAttached` field. + // Only present when the Machine has a deletionTimestamp, is being removed from the cluster + // and waiting for volume detachments had been started. + // +optional WaitForNodeVolumeDetachStartTime *metav1.Time `json:"nodeVolumeDetachStartTime,omitempty"` } diff --git a/config/crd/bases/cluster.x-k8s.io_machines.yaml b/config/crd/bases/cluster.x-k8s.io_machines.yaml index 1d0b45633bb9..2dedb9c4d7fa 100644 --- a/config/crd/bases/cluster.x-k8s.io_machines.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machines.yaml @@ -1114,16 +1114,24 @@ spec: type: object type: array deletion: - description: Deletion is the deletion state of the Machine. + description: |- + Deletion contains information relating to removal of the Machine. + Only present when the Machine has a deletionTimestamp and is being removed from the cluster. properties: nodeDrainStartTime: - description: NodeDrainStartTime is the time when the drain of - the node started. + description: |- + NodeDrainStartTime is the time when the drain of the node started. + Only present when the Machine has a deletionTimestamp, is being removed from the cluster + and draining the node had been started. format: date-time type: string nodeVolumeDetachStartTime: - description: WaitForNodeVolumeDetachStartTime is the time when - waiting for volume detachment started. + description: |- + WaitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started. + Detaching volumes from nodes is usually done by CSI implementations and the current state + is observed from the node's `.Status.VolumesAttached` field. + Only present when the Machine has a deletionTimestamp, is being removed from the cluster + and waiting for volume detachments had been started. format: date-time type: string type: object From bcf9d6ab5f031552b00e8e040f256f76327ffa0b Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Thu, 19 Sep 2024 15:59:26 +0200 Subject: [PATCH 5/7] fix openapi gen --- api/v1beta1/zz_generated.openapi.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/v1beta1/zz_generated.openapi.go b/api/v1beta1/zz_generated.openapi.go index 464bb7cca6d3..6e5ea8181081 100644 --- a/api/v1beta1/zz_generated.openapi.go +++ b/api/v1beta1/zz_generated.openapi.go @@ -3356,7 +3356,7 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineStatus(ref common.Reference }, "deletion": { SchemaProps: spec.SchemaProps{ - Description: "Deletion is the deletion state of the Machine.", + Description: "Deletion contains information relating to removal of the Machine. Only present when the Machine has a deletionTimestamp and is being removed from the cluster.", Default: map[string]interface{}{}, Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineStatusDeletion"), }, @@ -3378,13 +3378,13 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineStatusDeletion(ref common.R Properties: map[string]spec.Schema{ "nodeDrainStartTime": { SchemaProps: spec.SchemaProps{ - Description: "NodeDrainStartTime is the time when the drain of the node started.", + Description: "NodeDrainStartTime is the time when the drain of the node started. Only present when the Machine has a deletionTimestamp, is being removed from the cluster and draining the node had been started.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, "nodeVolumeDetachStartTime": { SchemaProps: spec.SchemaProps{ - Description: "WaitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started.", + Description: "WaitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started. Detaching volumes from nodes is usually done by CSI implementations and the current state is observed from the node's `.Status.VolumesAttached` field. Only present when the Machine has a deletionTimestamp, is being removed from the cluster and waiting for volume detachments had been started.", Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), }, }, From 914975929d7f2faad46088401e21dfb166540092 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Tue, 24 Sep 2024 18:21:23 +0200 Subject: [PATCH 6/7] review fixes --- api/v1beta1/machine_types.go | 24 +++---- api/v1beta1/zz_generated.deepcopy.go | 52 ++++++++------- api/v1beta1/zz_generated.openapi.go | 63 +++++++++---------- .../crd/bases/cluster.x-k8s.io_machines.yaml | 18 +++--- .../controllers/machine/machine_controller.go | 26 ++++---- .../machine/machine_controller_test.go | 12 ++-- 6 files changed, 101 insertions(+), 94 deletions(-) diff --git a/api/v1beta1/machine_types.go b/api/v1beta1/machine_types.go index d7b331921a93..53dfabfb4cc9 100644 --- a/api/v1beta1/machine_types.go +++ b/api/v1beta1/machine_types.go @@ -231,29 +231,29 @@ type MachineStatus struct { // +optional Conditions Conditions `json:"conditions,omitempty"` - // Deletion contains information relating to removal of the Machine. - // Only present when the Machine has a deletionTimestamp and is being removed from the cluster. + // deletion contains information relating to removal of the Machine. + // Only present when the Machine has a deletionTimestamp and drain or wait for volume detach started. // +optional - Deletion MachineStatusDeletion `json:"deletion,omitempty"` + Deletion *MachineDeletionStatus `json:"deletion,omitempty"` } // ANCHOR_END: MachineStatus -// MachineStatusDeletion is the deletion state of the Machine. -type MachineStatusDeletion struct { - // NodeDrainStartTime is the time when the drain of the node started. - // Only present when the Machine has a deletionTimestamp, is being removed from the cluster - // and draining the node had been started. +// MachineDeletionStatus is the deletion state of the Machine. +type MachineDeletionStatus struct { + // nodeDrainStartTime is the time when the drain of the node started and is used to determine + // if the NodeDrainTimeout is exceeded. + // Only present when the Machine has a deletionTimestamp and draining the node had been started. // +optional NodeDrainStartTime *metav1.Time `json:"nodeDrainStartTime,omitempty"` - // WaitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started. + // waitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started + // and is used to determine if the NodeVolumeDetachTimeout is exceeded. // Detaching volumes from nodes is usually done by CSI implementations and the current state // is observed from the node's `.Status.VolumesAttached` field. - // Only present when the Machine has a deletionTimestamp, is being removed from the cluster - // and waiting for volume detachments had been started. + // Only present when the Machine has a deletionTimestamp and waiting for volume detachments had been started. // +optional - WaitForNodeVolumeDetachStartTime *metav1.Time `json:"nodeVolumeDetachStartTime,omitempty"` + WaitForNodeVolumeDetachStartTime *metav1.Time `json:"waitForNodeVolumeDetachStartTime,omitempty"` } // SetTypedPhase sets the Phase field to the string representation of MachinePhase. diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 597d59ab52b2..4337861cbf45 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -973,6 +973,29 @@ func (in MachineAddresses) DeepCopy() MachineAddresses { return *out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeletionStatus) DeepCopyInto(out *MachineDeletionStatus) { + *out = *in + if in.NodeDrainStartTime != nil { + in, out := &in.NodeDrainStartTime, &out.NodeDrainStartTime + *out = (*in).DeepCopy() + } + if in.WaitForNodeVolumeDetachStartTime != nil { + in, out := &in.WaitForNodeVolumeDetachStartTime, &out.WaitForNodeVolumeDetachStartTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeletionStatus. +func (in *MachineDeletionStatus) DeepCopy() *MachineDeletionStatus { + if in == nil { + return nil + } + out := new(MachineDeletionStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineDeployment) DeepCopyInto(out *MachineDeployment) { *out = *in @@ -1912,7 +1935,11 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - in.Deletion.DeepCopyInto(&out.Deletion) + if in.Deletion != nil { + in, out := &in.Deletion, &out.Deletion + *out = new(MachineDeletionStatus) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. @@ -1925,29 +1952,6 @@ func (in *MachineStatus) DeepCopy() *MachineStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MachineStatusDeletion) DeepCopyInto(out *MachineStatusDeletion) { - *out = *in - if in.NodeDrainStartTime != nil { - in, out := &in.NodeDrainStartTime, &out.NodeDrainStartTime - *out = (*in).DeepCopy() - } - if in.WaitForNodeVolumeDetachStartTime != nil { - in, out := &in.WaitForNodeVolumeDetachStartTime, &out.WaitForNodeVolumeDetachStartTime - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatusDeletion. -func (in *MachineStatusDeletion) DeepCopy() *MachineStatusDeletion { - if in == nil { - return nil - } - out := new(MachineStatusDeletion) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { *out = *in diff --git a/api/v1beta1/zz_generated.openapi.go b/api/v1beta1/zz_generated.openapi.go index 6e5ea8181081..7a309c8370e1 100644 --- a/api/v1beta1/zz_generated.openapi.go +++ b/api/v1beta1/zz_generated.openapi.go @@ -58,6 +58,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/v1beta1.LocalObjectTemplate": schema_sigsk8sio_cluster_api_api_v1beta1_LocalObjectTemplate(ref), "sigs.k8s.io/cluster-api/api/v1beta1.Machine": schema_sigsk8sio_cluster_api_api_v1beta1_Machine(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineAddress": schema_sigsk8sio_cluster_api_api_v1beta1_MachineAddress(ref), + "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeletionStatus": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeletionStatus(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeployment": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeployment(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClass": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClass(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeploymentClassNamingStrategy": schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeploymentClassNamingStrategy(ref), @@ -87,7 +88,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "sigs.k8s.io/cluster-api/api/v1beta1.MachineSetStatus": schema_sigsk8sio_cluster_api_api_v1beta1_MachineSetStatus(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineSpec": schema_sigsk8sio_cluster_api_api_v1beta1_MachineSpec(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineStatus": schema_sigsk8sio_cluster_api_api_v1beta1_MachineStatus(ref), - "sigs.k8s.io/cluster-api/api/v1beta1.MachineStatusDeletion": schema_sigsk8sio_cluster_api_api_v1beta1_MachineStatusDeletion(ref), "sigs.k8s.io/cluster-api/api/v1beta1.MachineTemplateSpec": schema_sigsk8sio_cluster_api_api_v1beta1_MachineTemplateSpec(ref), "sigs.k8s.io/cluster-api/api/v1beta1.NetworkRanges": schema_sigsk8sio_cluster_api_api_v1beta1_NetworkRanges(ref), "sigs.k8s.io/cluster-api/api/v1beta1.ObjectMeta": schema_sigsk8sio_cluster_api_api_v1beta1_ObjectMeta(ref), @@ -1651,6 +1651,33 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineAddress(ref common.Referenc } } +func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeletionStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachineDeletionStatus is the deletion state of the Machine.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodeDrainStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "nodeDrainStartTime is the time when the drain of the node started and is used to determine if the NodeDrainTimeout is exceeded. Only present when the Machine has a deletionTimestamp and draining the node had been started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "waitForNodeVolumeDetachStartTime": { + SchemaProps: spec.SchemaProps{ + Description: "waitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started and is used to determine if the NodeVolumeDetachTimeout is exceeded. Detaching volumes from nodes is usually done by CSI implementations and the current state is observed from the node's `.Status.VolumesAttached` field. Only present when the Machine has a deletionTimestamp and waiting for volume detachments had been started.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + func schema_sigsk8sio_cluster_api_api_v1beta1_MachineDeployment(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3356,43 +3383,15 @@ func schema_sigsk8sio_cluster_api_api_v1beta1_MachineStatus(ref common.Reference }, "deletion": { SchemaProps: spec.SchemaProps{ - Description: "Deletion contains information relating to removal of the Machine. Only present when the Machine has a deletionTimestamp and is being removed from the cluster.", - Default: map[string]interface{}{}, - Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineStatusDeletion"), + Description: "deletion contains information relating to removal of the Machine. Only present when the Machine has a deletionTimestamp and drain or wait for volume detach started.", + Ref: ref("sigs.k8s.io/cluster-api/api/v1beta1.MachineDeletionStatus"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.NodeSystemInfo", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "sigs.k8s.io/cluster-api/api/v1beta1.Condition", "sigs.k8s.io/cluster-api/api/v1beta1.MachineAddress", "sigs.k8s.io/cluster-api/api/v1beta1.MachineStatusDeletion"}, - } -} - -func schema_sigsk8sio_cluster_api_api_v1beta1_MachineStatusDeletion(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "MachineStatusDeletion is the deletion state of the Machine.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "nodeDrainStartTime": { - SchemaProps: spec.SchemaProps{ - Description: "NodeDrainStartTime is the time when the drain of the node started. Only present when the Machine has a deletionTimestamp, is being removed from the cluster and draining the node had been started.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - "nodeVolumeDetachStartTime": { - SchemaProps: spec.SchemaProps{ - Description: "WaitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started. Detaching volumes from nodes is usually done by CSI implementations and the current state is observed from the node's `.Status.VolumesAttached` field. Only present when the Machine has a deletionTimestamp, is being removed from the cluster and waiting for volume detachments had been started.", - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "k8s.io/api/core/v1.NodeSystemInfo", "k8s.io/api/core/v1.ObjectReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time", "sigs.k8s.io/cluster-api/api/v1beta1.Condition", "sigs.k8s.io/cluster-api/api/v1beta1.MachineAddress", "sigs.k8s.io/cluster-api/api/v1beta1.MachineDeletionStatus"}, } } diff --git a/config/crd/bases/cluster.x-k8s.io_machines.yaml b/config/crd/bases/cluster.x-k8s.io_machines.yaml index 2dedb9c4d7fa..cc5aab3633ad 100644 --- a/config/crd/bases/cluster.x-k8s.io_machines.yaml +++ b/config/crd/bases/cluster.x-k8s.io_machines.yaml @@ -1115,23 +1115,23 @@ spec: type: array deletion: description: |- - Deletion contains information relating to removal of the Machine. - Only present when the Machine has a deletionTimestamp and is being removed from the cluster. + deletion contains information relating to removal of the Machine. + Only present when the Machine has a deletionTimestamp and drain or wait for volume detach started. properties: nodeDrainStartTime: description: |- - NodeDrainStartTime is the time when the drain of the node started. - Only present when the Machine has a deletionTimestamp, is being removed from the cluster - and draining the node had been started. + nodeDrainStartTime is the time when the drain of the node started and is used to determine + if the NodeDrainTimeout is exceeded. + Only present when the Machine has a deletionTimestamp and draining the node had been started. format: date-time type: string - nodeVolumeDetachStartTime: + waitForNodeVolumeDetachStartTime: description: |- - WaitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started. + waitForNodeVolumeDetachStartTime is the time when waiting for volume detachment started + and is used to determine if the NodeVolumeDetachTimeout is exceeded. Detaching volumes from nodes is usually done by CSI implementations and the current state is observed from the node's `.Status.VolumesAttached` field. - Only present when the Machine has a deletionTimestamp, is being removed from the cluster - and waiting for volume detachments had been started. + Only present when the Machine has a deletionTimestamp and waiting for volume detachments had been started. format: date-time type: string type: object diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 4e8f13282bc8..392e907615e0 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -379,13 +379,19 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu return ctrl.Result{}, err } - // The DrainingSucceededCondition never exists before the node is drained for the first time, - // so its transition time can be used to record the first time draining. + // The DrainingSucceededCondition never exists before the node is drained for the first time. // This `if` condition prevents the transition time to be changed more than once. if conditions.Get(m, clusterv1.DrainingSucceededCondition) == nil { conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingReason, clusterv1.ConditionSeverityInfo, "Draining the node before deletion") } + if m.Status.Deletion == nil { + m.Status.Deletion = &clusterv1.MachineDeletionStatus{} + } + if m.Status.Deletion.NodeDrainStartTime == nil { + m.Status.Deletion.NodeDrainStartTime = ptr.To(metav1.Now()) + } + if err := patchMachine(ctx, patchHelper, m); err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to patch Machine") } @@ -409,13 +415,15 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu // volumes are detached before proceeding to delete the Node. // In case the node is unreachable, the detachment is skipped. if r.isNodeVolumeDetachingAllowed(m) { - // The VolumeDetachSucceededCondition never exists before we wait for volume detachment for the first time, - // so its transition time can be used to record the first time we wait for volume detachment. + // The VolumeDetachSucceededCondition never exists before we wait for volume detachment for the first time. // This `if` condition prevents the transition time to be changed more than once. if conditions.Get(m, clusterv1.VolumeDetachSucceededCondition) == nil { conditions.MarkFalse(m, clusterv1.VolumeDetachSucceededCondition, clusterv1.WaitingForVolumeDetachReason, clusterv1.ConditionSeverityInfo, "Waiting for node volumes to be detached") } + if m.Status.Deletion == nil { + m.Status.Deletion = &clusterv1.MachineDeletionStatus{} + } if m.Status.Deletion.WaitForNodeVolumeDetachStartTime == nil { m.Status.Deletion.WaitForNodeVolumeDetachStartTime = ptr.To(metav1.Now()) } @@ -528,7 +536,7 @@ func (r *Reconciler) isNodeVolumeDetachingAllowed(m *clusterv1.Machine) bool { func (r *Reconciler) nodeDrainTimeoutExceeded(machine *clusterv1.Machine) bool { // if the NodeDrainTimeout type is not set by user - if machine.Spec.NodeDrainTimeout == nil || machine.Spec.NodeDrainTimeout.Seconds() <= 0 { + if machine.Status.Deletion == nil || machine.Spec.NodeDrainTimeout == nil || machine.Spec.NodeDrainTimeout.Seconds() <= 0 { return false } @@ -547,11 +555,11 @@ func (r *Reconciler) nodeDrainTimeoutExceeded(machine *clusterv1.Machine) bool { // since the WaitForNodeVolumeDetachStartTime. func (r *Reconciler) nodeVolumeDetachTimeoutExceeded(machine *clusterv1.Machine) bool { // if the NodeVolumeDetachTimeout type is not set by user - if machine.Spec.NodeVolumeDetachTimeout == nil || machine.Spec.NodeVolumeDetachTimeout.Seconds() <= 0 { + if machine.Status.Deletion == nil || machine.Spec.NodeVolumeDetachTimeout == nil || machine.Spec.NodeVolumeDetachTimeout.Seconds() <= 0 { return false } - // if the NodeVolumeDetachStartTime does not exist + // if the WaitForNodeVolumeDetachStartTime does not exist if machine.Status.Deletion.WaitForNodeVolumeDetachStartTime == nil { return false } @@ -700,10 +708,6 @@ func (r *Reconciler) drainNode(ctx context.Context, cluster *clusterv1.Cluster, log.V(3).Info("Node is unreachable, draining will use 1s GracePeriodSeconds and will ignore all Pods that have a deletionTimestamp > 1s old") } - if machine.Status.Deletion.NodeDrainStartTime == nil { - machine.Status.Deletion.NodeDrainStartTime = ptr.To(metav1.Now()) - } - if err := drainer.CordonNode(ctx, node); err != nil { // Machine will be re-reconciled after a cordon failure. return ctrl.Result{}, errors.Wrapf(err, "failed to cordon Node %s", node.Name) diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index e5abb53e347d..7ab8096215fa 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -1386,7 +1386,7 @@ func TestIsNodeDrainedAllowed(t *testing.T) { }, Status: clusterv1.MachineStatus{ - Deletion: clusterv1.MachineStatusDeletion{ + Deletion: &clusterv1.MachineDeletionStatus{ NodeDrainStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 70)).UTC()}, }, }, @@ -1408,7 +1408,7 @@ func TestIsNodeDrainedAllowed(t *testing.T) { NodeDrainTimeout: &metav1.Duration{Duration: time.Second * 60}, }, Status: clusterv1.MachineStatus{ - Deletion: clusterv1.MachineStatusDeletion{ + Deletion: &clusterv1.MachineDeletionStatus{ NodeDrainStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 30)).UTC()}, }, }, @@ -1429,7 +1429,7 @@ func TestIsNodeDrainedAllowed(t *testing.T) { Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ - Deletion: clusterv1.MachineStatusDeletion{ + Deletion: &clusterv1.MachineDeletionStatus{ NodeDrainStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 1000)).UTC()}, }, }, @@ -1884,7 +1884,7 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { }, Status: clusterv1.MachineStatus{ - Deletion: clusterv1.MachineStatusDeletion{ + Deletion: &clusterv1.MachineDeletionStatus{ WaitForNodeVolumeDetachStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 60)).UTC()}, }, }, @@ -1906,7 +1906,7 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second * 60}, }, Status: clusterv1.MachineStatus{ - Deletion: clusterv1.MachineStatusDeletion{ + Deletion: &clusterv1.MachineDeletionStatus{ WaitForNodeVolumeDetachStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 30)).UTC()}, }, }, @@ -1927,7 +1927,7 @@ func TestIsNodeVolumeDetachingAllowed(t *testing.T) { Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, }, Status: clusterv1.MachineStatus{ - Deletion: clusterv1.MachineStatusDeletion{ + Deletion: &clusterv1.MachineDeletionStatus{ WaitForNodeVolumeDetachStartTime: &metav1.Time{Time: time.Now().Add(-(time.Second * 1000)).UTC()}, }, }, From b4f0943697d9954bc5f90fcb427f779376220f15 Mon Sep 17 00:00:00 2001 From: Christian Schlotter Date: Wed, 25 Sep 2024 14:00:40 +0200 Subject: [PATCH 7/7] fix --- internal/controllers/machine/machine_controller.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 392e907615e0..66010cfe28d4 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -380,7 +380,6 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu } // The DrainingSucceededCondition never exists before the node is drained for the first time. - // This `if` condition prevents the transition time to be changed more than once. if conditions.Get(m, clusterv1.DrainingSucceededCondition) == nil { conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingReason, clusterv1.ConditionSeverityInfo, "Draining the node before deletion") } @@ -416,7 +415,6 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu // In case the node is unreachable, the detachment is skipped. if r.isNodeVolumeDetachingAllowed(m) { // The VolumeDetachSucceededCondition never exists before we wait for volume detachment for the first time. - // This `if` condition prevents the transition time to be changed more than once. if conditions.Get(m, clusterv1.VolumeDetachSucceededCondition) == nil { conditions.MarkFalse(m, clusterv1.VolumeDetachSucceededCondition, clusterv1.WaitingForVolumeDetachReason, clusterv1.ConditionSeverityInfo, "Waiting for node volumes to be detached") }