Skip to content

Commit

Permalink
Address comments
Browse files Browse the repository at this point in the history
  • Loading branch information
fabriziopandini committed Nov 12, 2024
1 parent 889583c commit 2795a59
Show file tree
Hide file tree
Showing 17 changed files with 115 additions and 79 deletions.
12 changes: 8 additions & 4 deletions controlplane/kubeadm/internal/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,11 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, con
conditions.MarkFalse(controlPlane.KCP, controlplanev1.ResizedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "Waiting for worker nodes to be deleted first")

controlPlane.DeletingReason = controlplanev1.KubeadmControlPlaneDeletingWaitingForWorkersDeletionV1Beta2Reason
controlPlane.DeletingMessage = fmt.Sprintf("KCP deletion blocked because %s still exist", objectsPendingDeleteNames(allMachines, allMachinePools, controlPlane.Cluster))
names := objectsPendingDeleteNames(allMachines, allMachinePools, controlPlane.Cluster)
for i := range names {
names[i] = "* " + names[i]
}
controlPlane.DeletingMessage = fmt.Sprintf("KCP deletion blocked because following objects still exist: \n%s", strings.Join(names, "\n"))
return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil
}

Expand Down Expand Up @@ -703,7 +707,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, con
}

// objectsPendingDeleteNames return the names of worker Machines and MachinePools pending delete.
func objectsPendingDeleteNames(allMachines collections.Machines, allMachinePools *expv1.MachinePoolList, cluster *clusterv1.Cluster) string {
func objectsPendingDeleteNames(allMachines collections.Machines, allMachinePools *expv1.MachinePoolList, cluster *clusterv1.Cluster) []string {
controlPlaneMachines := allMachines.Filter(collections.ControlPlaneMachines(cluster.Name))
workerMachines := allMachines.Difference(controlPlaneMachines)

Expand All @@ -725,9 +729,9 @@ func objectsPendingDeleteNames(allMachines collections.Machines, allMachinePools
}
if len(workerMachineNames) > 0 {
sort.Strings(workerMachineNames)
descendants = append(descendants, "worker Machines: "+clog.StringListToString(workerMachineNames))
descendants = append(descendants, "Machines: "+clog.StringListToString(workerMachineNames))
}
return strings.Join(descendants, "; ")
return descendants
}

func (r *KubeadmControlPlaneReconciler) removePreTerminateHookAnnotationFromMachine(ctx context.Context, machine *clusterv1.Machine) error {
Expand Down
6 changes: 3 additions & 3 deletions controlplane/kubeadm/internal/controllers/controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3354,7 +3354,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) {
g.Expect(err).ToNot(HaveOccurred())
g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
g.Expect(controlPlane.DeletingReason).To(Equal(controlplanev1.KubeadmControlPlaneDeletingWaitingForWorkersDeletionV1Beta2Reason))
g.Expect(controlPlane.DeletingMessage).To(Equal("KCP deletion blocked because worker Machines: worker-0, worker-1, worker-2, worker-3, worker-4, ... (5 more) still exist"))
g.Expect(controlPlane.DeletingMessage).To(Equal("KCP deletion blocked because following objects still exist: \n* Machines: worker-0, worker-1, worker-2, worker-3, worker-4, ... (5 more)"))

controlPlaneMachines := clusterv1.MachineList{}
labels := map[string]string{
Expand Down Expand Up @@ -3415,7 +3415,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) {
g.Expect(err).ToNot(HaveOccurred())
g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
g.Expect(controlPlane.DeletingReason).To(Equal(controlplanev1.KubeadmControlPlaneDeletingWaitingForWorkersDeletionV1Beta2Reason))
g.Expect(controlPlane.DeletingMessage).To(Equal("KCP deletion blocked because MachinePools: mp-0, mp-1, mp-2, mp-3, mp-4, ... (5 more) still exist"))
g.Expect(controlPlane.DeletingMessage).To(Equal("KCP deletion blocked because following objects still exist: \n* MachinePools: mp-0, mp-1, mp-2, mp-3, mp-4, ... (5 more)"))

controlPlaneMachines := clusterv1.MachineList{}
labels := map[string]string{
Expand Down Expand Up @@ -3499,7 +3499,7 @@ func TestObjectsPendingDelete(t *testing.T) {

g := NewWithT(t)

g.Expect(objectsPendingDeleteNames(allMachines, machinePools, c)).To(Equal("MachinePools: mp1; worker Machines: w1, w2, w3, w4, w5, ... (3 more)"))
g.Expect(objectsPendingDeleteNames(allMachines, machinePools, c)).To(Equal([]string{"MachinePools: mp1", "Machines: w1, w2, w3, w4, w5, ... (3 more)"}))
}

// test utils.
Expand Down
3 changes: 0 additions & 3 deletions controlplane/kubeadm/internal/workload_cluster_conditions.go
Original file line number Diff line number Diff line change
Expand Up @@ -965,9 +965,6 @@ func aggregateV1Beta2ConditionsFromMachinesToKCP(input aggregateV1Beta2Condition

if len(machineMessages) > 0 {
message := strings.Join(machineMessages, "\n")
if _, ok := messageMap[message]; !ok {
messageMap[message] = []string{}
}
messageMap[message] = append(messageMap[message], machine.Name)
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1476,7 +1476,7 @@ func withMachineReadyV1beta2Condition(status metav1.ConditionStatus) fakeMachine
machine.Status.V1Beta2 = &clusterv1.MachineV1Beta2Status{}
}
machine.Status.V1Beta2.Conditions = append(machine.Status.V1Beta2.Conditions, metav1.Condition{
Type: clusterv1.MachinesReadyV1Beta2Condition,
Type: clusterv1.MachineReadyV1Beta2Condition,
Status: status,
Reason: "SomeReason",
Message: fmt.Sprintf("ready condition is %s", status),
Expand Down Expand Up @@ -1627,9 +1627,9 @@ func TestAggregateV1Beta2ConditionsFromMachinesToKCP(t *testing.T) {
Status: metav1.ConditionFalse,
Reason: falseReason,
Message: "* Machines m1, m2:\n" +
" * MachinesReady: ready condition is False\n" +
" * Ready: ready condition is False\n" +
"* Machine m4:\n" +
" * MachinesReady: ready condition is Unknown",
" * Ready: ready condition is Unknown",
},
},
{
Expand Down Expand Up @@ -1657,7 +1657,7 @@ func TestAggregateV1Beta2ConditionsFromMachinesToKCP(t *testing.T) {
Status: metav1.ConditionUnknown,
Reason: unknownReason,
Message: "* Machines m1, m3:\n" +
" * MachinesReady: ready condition is Unknown",
" * Ready: ready condition is Unknown",
},
},
{
Expand Down Expand Up @@ -1694,7 +1694,7 @@ func TestAggregateV1Beta2ConditionsFromMachinesToKCP(t *testing.T) {
KCP: &controlplanev1.KubeadmControlPlane{},
Machines: collections.FromMachines(tt.machines...),
},
machineConditions: []string{clusterv1.MachinesReadyV1Beta2Condition},
machineConditions: []string{clusterv1.MachineReadyV1Beta2Condition},
kcpErrors: tt.kcpErrors,
condition: conditionType,
trueReason: trueReason,
Expand Down
10 changes: 5 additions & 5 deletions internal/controllers/cluster/cluster_controller_status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) {
Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.ClusterInfrastructureReadyNoReasonReportedV1Beta2Reason,
Message: "some message (from FakeInfraCluster)",
Message: "some message",
},
},
{
Expand Down Expand Up @@ -402,7 +402,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) {
Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.ClusterControlPlaneAvailableNoReasonReportedV1Beta2Reason,
Message: "some message (from FakeControlPlane)",
Message: "some message",
},
},
{
Expand Down Expand Up @@ -1650,15 +1650,15 @@ func TestDeletingCondition(t *testing.T) {
{
name: "deletionTimestamp set (some reason/message reported)",
cluster: fakeCluster("c", deleted(true)),
deletingReason: clusterv1.ClusterDeletingWaitingForBeforeDeleteHookV1Beta2Reason,
deletingReason: clusterv1.ClusterDeletingWaitingForWorkersDeletionV1Beta2Reason,
deletingMessage: "* Control plane Machines: cp1, cp2, cp3\n" +
"* MachineDeployments: md1, md2\n" +
"* MachineSets: ms1, ms2\n" +
"* Worker Machines: w1, w2, w3, w4, w5, ... (3 more)",
expectCondition: metav1.Condition{
Type: clusterv1.ClusterDeletingV1Beta2Condition,
Status: metav1.ConditionTrue,
Reason: clusterv1.ClusterDeletingWaitingForBeforeDeleteHookV1Beta2Reason,
Reason: clusterv1.ClusterDeletingWaitingForWorkersDeletionV1Beta2Reason,
Message: "* Control plane Machines: cp1, cp2, cp3\n" +
"* MachineDeployments: md1, md2\n" +
"* MachineSets: ms1, ms2\n" +
Expand Down Expand Up @@ -1815,7 +1815,7 @@ func TestSetAvailableCondition(t *testing.T) {
Type: clusterv1.ClusterAvailableV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.ClusterDeletingWaitingForWorkersDeletionV1Beta2Reason,
Message: "* Deleting: \n" +
Message: "* Deleting:\n" +
" * Control plane Machines: cp1, cp2, cp3\n" +
" * MachineDeployments: md1, md2\n" +
" * MachineSets: ms1, ms2\n" +
Expand Down
20 changes: 16 additions & 4 deletions internal/controllers/machine/drain/drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ func filterPods(ctx context.Context, allPods []*corev1.Pod, filters []PodFilter)
// Add the pod to PodDeleteList no matter what PodDeleteStatus is,
// those pods whose PodDeleteStatus is false like DaemonSet will
// be caught by list.errors()
pod.Kind = "Pod"
pod.Kind = "Pod" //nolint:goconst
pod.APIVersion = "v1"
pods = append(pods, PodDelete{
Pod: pod,
Expand Down Expand Up @@ -448,8 +448,12 @@ func (r EvictionResult) ConditionMessage(nodeDrainStartTime *metav1.Time) string

conditionMessage := fmt.Sprintf("Drain not completed yet (started at %s):", nodeDrainStartTime.Format(time.RFC3339))
if len(r.PodsDeletionTimestampSet) > 0 {
conditionMessage = fmt.Sprintf("%s\n* Pod %s with deletionTimestamp and still not removed from the node",
conditionMessage, PodListToString(r.PodsDeletionTimestampSet, 3))
kind := "Pod"
if len(r.PodsToTriggerEvictionLater) > 1 {
kind = "Pods"
}
conditionMessage = fmt.Sprintf("%s\n* %s %s: deletionTimestamp set, but still not removed from the Node",
conditionMessage, kind, PodListToString(r.PodsDeletionTimestampSet, 3))
}
if len(r.PodsFailedEviction) > 0 {
sortedFailureMessages := maps.Keys(r.PodsFailedEviction)
Expand All @@ -462,7 +466,15 @@ func (r EvictionResult) ConditionMessage(nodeDrainStartTime *metav1.Time) string
}
for _, failureMessage := range sortedFailureMessages {
pods := r.PodsFailedEviction[failureMessage]
conditionMessage = fmt.Sprintf("%s\n* Pod %s: %s", conditionMessage, PodListToString(pods, 3), failureMessage)
kind := "Pod"
if len(r.PodsFailedEviction[failureMessage]) > 1 {
kind = "Pods"
}
failureMessage = strings.Replace(failureMessage, "Cannot evict pod as it would violate the pod's disruption budget.", "cannot evict pod as it would violate the pod's disruption budget.", -1)
if !strings.HasPrefix(failureMessage, "cannot evict pod as it would violate the pod's disruption budget.") {
failureMessage = "failed to evict Pod, " + failureMessage
}
conditionMessage = fmt.Sprintf("%s\n* %s %s: %s", conditionMessage, kind, PodListToString(pods, 3), failureMessage)
}
if len(skippedFailureMessages) > 0 {
podCount := 0
Expand Down
28 changes: 14 additions & 14 deletions internal/controllers/machine/drain/drain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1496,9 +1496,9 @@ func TestEvictionResult_ConditionMessage(t *testing.T) {
},
},
wantConditionMessage: `Drain not completed yet (started at 2024-10-09T16:13:59Z):
* Pod pod-2-deletionTimestamp-set-1, pod-3-to-trigger-eviction-successfully-1 with deletionTimestamp and still not removed from the node
* Pod pod-5-to-trigger-eviction-pdb-violated-1: Cannot evict pod as it would violate the pod's disruption budget. The disruption budget pod-5-pdb needs 20 healthy pods and has 20 currently
* Pod pod-6-to-trigger-eviction-some-other-error: some other error 1
* Pods pod-2-deletionTimestamp-set-1, pod-3-to-trigger-eviction-successfully-1: deletionTimestamp set, but still not removed from the Node
* Pod pod-5-to-trigger-eviction-pdb-violated-1: cannot evict pod as it would violate the pod's disruption budget. The disruption budget pod-5-pdb needs 20 healthy pods and has 20 currently
* Pod pod-6-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 1
After above Pods have been removed from the Node, the following Pods will be evicted: pod-7-eviction-later, pod-8-eviction-later`,
},
{
Expand Down Expand Up @@ -1653,12 +1653,12 @@ After above Pods have been removed from the Node, the following Pods will be evi
},
},
wantConditionMessage: `Drain not completed yet (started at 2024-10-09T16:13:59Z):
* Pod pod-2-deletionTimestamp-set-1, pod-2-deletionTimestamp-set-2, pod-2-deletionTimestamp-set-3, ... (4 more) with deletionTimestamp and still not removed from the node
* Pod pod-5-to-trigger-eviction-pdb-violated-1, pod-5-to-trigger-eviction-pdb-violated-2, pod-5-to-trigger-eviction-pdb-violated-3, ... (3 more): Cannot evict pod as it would violate the pod's disruption budget. The disruption budget pod-5-pdb needs 20 healthy pods and has 20 currently
* Pod pod-6-to-trigger-eviction-some-other-error: some other error 1
* Pod pod-7-to-trigger-eviction-some-other-error: some other error 2
* Pod pod-8-to-trigger-eviction-some-other-error: some other error 3
* Pod pod-9-to-trigger-eviction-some-other-error: some other error 4
* Pods pod-2-deletionTimestamp-set-1, pod-2-deletionTimestamp-set-2, pod-2-deletionTimestamp-set-3, ... (4 more): deletionTimestamp set, but still not removed from the Node
* Pods pod-5-to-trigger-eviction-pdb-violated-1, pod-5-to-trigger-eviction-pdb-violated-2, pod-5-to-trigger-eviction-pdb-violated-3, ... (3 more): cannot evict pod as it would violate the pod's disruption budget. The disruption budget pod-5-pdb needs 20 healthy pods and has 20 currently
* Pod pod-6-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 1
* Pod pod-7-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 2
* Pod pod-8-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 3
* Pod pod-9-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 4
* 1 Pod with other issues
After above Pods have been removed from the Node, the following Pods will be evicted: pod-11-eviction-later, pod-12-eviction-later, pod-13-eviction-later, ... (2 more)`,
},
Expand Down Expand Up @@ -1728,11 +1728,11 @@ After above Pods have been removed from the Node, the following Pods will be evi
},
},
wantConditionMessage: `Drain not completed yet (started at 2024-10-09T16:13:59Z):
* Pod pod-1-to-trigger-eviction-some-other-error: some other error 1
* Pod pod-2-to-trigger-eviction-some-other-error: some other error 2
* Pod pod-3-to-trigger-eviction-some-other-error: some other error 3
* Pod pod-4-to-trigger-eviction-some-other-error: some other error 4
* Pod pod-5-to-trigger-eviction-some-other-error: some other error 5
* Pod pod-1-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 1
* Pod pod-2-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 2
* Pod pod-3-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 3
* Pod pod-4-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 4
* Pod pod-5-to-trigger-eviction-some-other-error: failed to evict Pod, some other error 5
* 4 Pods with other issues`,
},
}
Expand Down
8 changes: 4 additions & 4 deletions internal/controllers/machine/machine_controller_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl

message := ""
if condition.Message != "" {
message = fmt.Sprintf("%s (from Node)", condition.Message)
message = fmt.Sprintf("* Node.Ready: %s", condition.Message)
}
reason := condition.Reason
if reason == "" {
Expand Down Expand Up @@ -405,7 +405,7 @@ func summarizeNodeV1Beta2Conditions(_ context.Context, node *corev1.Node) (metav
}
}
if condition == nil {
messages = append(messages, fmt.Sprintf("* Node.%s: condition not yet reported", conditionType))
messages = append(messages, fmt.Sprintf("* Node.%s: Condition not yet reported", conditionType))
if unknownStatus == 0 {
unknownReason = clusterv1.MachineNodeConditionNotYetReportedV1Beta2Reason
} else {
Expand All @@ -420,7 +420,7 @@ func summarizeNodeV1Beta2Conditions(_ context.Context, node *corev1.Node) (metav
if condition.Status != corev1.ConditionFalse {
m := condition.Message
if m == "" {
m = fmt.Sprintf("condition is %s", condition.Status)
m = fmt.Sprintf("Condition is %s", condition.Status)
}
messages = append(messages, fmt.Sprintf("* Node.%s: %s", condition.Type, m))
if condition.Status == corev1.ConditionUnknown {
Expand All @@ -444,7 +444,7 @@ func summarizeNodeV1Beta2Conditions(_ context.Context, node *corev1.Node) (metav
if condition.Status != corev1.ConditionTrue {
m := condition.Message
if m == "" {
m = fmt.Sprintf("condition is %s", condition.Status)
m = fmt.Sprintf("Condition is %s", condition.Status)
}
messages = append(messages, fmt.Sprintf("* Node.%s: %s", condition.Type, m))
if condition.Status == corev1.ConditionUnknown {
Expand Down
18 changes: 9 additions & 9 deletions internal/controllers/machine/machine_controller_status_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ func TestSetBootstrapReadyCondition(t *testing.T) {
Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.MachineBootstrapConfigReadyNoReasonReportedV1Beta2Reason,
Message: "some message (from GenericBootstrapConfig)",
Message: "some message",
},
},
{
Expand Down Expand Up @@ -304,7 +304,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) {
Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: clusterv1.MachineInfrastructureReadyNoReasonReportedV1Beta2Reason,
Message: "some message (from GenericInfrastructureMachine)",
Message: "some message",
},
},
{
Expand Down Expand Up @@ -553,7 +553,7 @@ func TestSummarizeNodeV1Beta2Conditions(t *testing.T) {
},
expectedStatus: metav1.ConditionUnknown,
expectedReason: clusterv1.MachineNodeConditionNotYetReportedV1Beta2Reason,
expectedMessage: "* Node.Ready: condition not yet reported",
expectedMessage: "* Node.Ready: Condition not yet reported",
},
}
for _, test := range testCases {
Expand Down Expand Up @@ -687,7 +687,7 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) {
Type: clusterv1.MachineNodeReadyV1Beta2Condition,
Status: metav1.ConditionFalse,
Reason: "SomeReason",
Message: "kubelet is NOT ready (from Node)",
Message: "* Node.Ready: kubelet is NOT ready",
},
},
},
Expand Down Expand Up @@ -716,7 +716,7 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) {
Type: clusterv1.MachineNodeReadyV1Beta2Condition,
Status: metav1.ConditionTrue,
Reason: "KubeletReady",
Message: "kubelet is posting ready status (from Node)",
Message: "* Node.Ready: kubelet is posting ready status",
},
},
},
Expand All @@ -739,7 +739,7 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) {
Type: clusterv1.MachineNodeHealthyV1Beta2Condition,
Status: metav1.ConditionUnknown,
Reason: clusterv1.MachineNodeConditionNotYetReportedV1Beta2Reason,
Message: "* Node.Ready: condition not yet reported",
Message: "* Node.Ready: Condition not yet reported",
},
{
Type: clusterv1.MachineNodeReadyV1Beta2Condition,
Expand Down Expand Up @@ -896,7 +896,7 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) {
Type: clusterv1.MachineNodeReadyV1Beta2Condition,
Status: metav1.ConditionTrue,
Reason: "KubeletReady",
Message: "kubelet is posting ready status (from Node)",
Message: "kubelet is posting ready status",
})
return m
}(),
Expand All @@ -916,7 +916,7 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) {
Type: clusterv1.MachineNodeReadyV1Beta2Condition,
Status: metav1.ConditionTrue,
Reason: "KubeletReady",
Message: "kubelet is posting ready status (from Node)",
Message: "kubelet is posting ready status",
},
},
},
Expand Down Expand Up @@ -975,7 +975,7 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) {
Type: clusterv1.MachineNodeReadyV1Beta2Condition,
Status: metav1.ConditionTrue,
Reason: "KubeletReady",
Message: "kubelet is posting ready status (from Node)",
Message: "kubelet is posting ready status)",
})
return m
}(),
Expand Down
Loading

0 comments on commit 2795a59

Please sign in to comment.