From 18d0cdcadc84b94cac37a6a26d64aed5954736fb Mon Sep 17 00:00:00 2001 From: fabriziopandini Date: Wed, 13 Nov 2024 15:19:38 +0100 Subject: [PATCH] Refine v1beta2 condition reasons --- api/v1beta1/cluster_types.go | 53 +++++++- api/v1beta1/machine_types.go | 34 ++++- api/v1beta1/machinedeployment_types.go | 20 +++ api/v1beta1/machineset_types.go | 20 +++ api/v1beta1/v1beta2_condition_consts.go | 21 +++ .../api/v1beta1/v1beta2_condition_consts.go | 20 +++ .../kubeadm/internal/controllers/status.go | 22 ++++ .../internal/controllers/status_test.go | 12 +- .../cluster/cluster_controller_status.go | 116 +++++++++++++---- .../cluster/cluster_controller_status_test.go | 64 ++++----- .../machine/machine_controller_status.go | 107 +++++++-------- .../machine/machine_controller_status_test.go | 59 ++++----- .../machinedeployment_status.go | 22 ++++ .../machinedeployment_status_test.go | 18 +-- .../machineset_controller_status.go | 22 ++++ .../machineset_controller_status_test.go | 31 +++-- util/conditions/v1beta2/aggregate.go | 6 +- util/conditions/v1beta2/aggregate_test.go | 89 +++++++++---- util/conditions/v1beta2/merge_strategies.go | 122 ++++++++++++------ .../v1beta2/merge_strategies_test.go | 9 +- util/conditions/v1beta2/options.go | 25 ++++ util/conditions/v1beta2/summary.go | 2 +- util/conditions/v1beta2/summary_test.go | 45 ++++--- 23 files changed, 660 insertions(+), 279 deletions(-) diff --git a/api/v1beta1/cluster_types.go b/api/v1beta1/cluster_types.go index 3abe40d7e975..8e94aa136da4 100644 --- a/api/v1beta1/cluster_types.go +++ b/api/v1beta1/cluster_types.go @@ -47,6 +47,16 @@ const ( // If conditions are defined in spec.availabilityGates, those conditions must be true as well. ClusterAvailableV1Beta2Condition = AvailableV1Beta2Condition + // ClusterAvailableV1Beta2Reason surfaces when the cluster availability criteria is met. + ClusterAvailableV1Beta2Reason = AvailableV1Beta2Reason + + // ClusterNotAvailableV1Beta2Reason surfaces when the cluster availability criteria is not met (and thus the machine is not available). + ClusterNotAvailableV1Beta2Reason = NotAvailableV1Beta2Reason + + // ClusterAvailableUnknownV1Beta2Reason surfaces when at least one cluster availability criteria is unknown + // and no availability criteria is not met. + ClusterAvailableUnknownV1Beta2Reason = AvailableUnknownV1Beta2Reason + // ClusterAvailableInternalErrorV1Beta2Reason surfaces unexpected error when computing the Available condition. ClusterAvailableInternalErrorV1Beta2Reason = InternalErrorV1Beta2Reason ) @@ -186,6 +196,17 @@ const ( // Note: Stand-alone MachineSets and stand-alone Machines are not included in this condition. ClusterWorkersAvailableV1Beta2Condition = "WorkersAvailable" + // ClusterWorkersAvailableV1Beta2Reason surfaces when all MachineDeployment and MachinePool's Available conditions are true. + ClusterWorkersAvailableV1Beta2Reason = AvailableV1Beta2Reason + + // ClusterWorkersNotAvailableV1Beta2Reason surfaces when at least one of the MachineDeployment and MachinePool's Available + // conditions is false. + ClusterWorkersNotAvailableV1Beta2Reason = NotAvailableV1Beta2Reason + + // ClusterWorkersAvailableUnknownV1Beta2Reason surfaces when at least one of the MachineDeployment and MachinePool's Available + // conditions is unknown and none of those Available conditions is false. + ClusterWorkersAvailableUnknownV1Beta2Reason = AvailableUnknownV1Beta2Reason + // ClusterWorkersAvailableNoWorkersV1Beta2Reason surfaces when no MachineDeployment and MachinePool exist for the Cluster. ClusterWorkersAvailableNoWorkersV1Beta2Reason = "NoWorkers" @@ -199,6 +220,16 @@ const ( // ClusterMachinesReadyV1Beta2Condition surfaces detail of issues on the controlled machines, if any. ClusterMachinesReadyV1Beta2Condition = MachinesReadyV1Beta2Condition + // ClusterMachinesReadyV1Beta2Reason surfaces when all the controlled machine's Ready conditions are true. + ClusterMachinesReadyV1Beta2Reason = ReadyV1Beta2Reason + + // ClusterMachinesNotReadyV1Beta2Reason surfaces when at least one of the controlled machine's Ready conditions is false. + ClusterMachinesNotReadyV1Beta2Reason = NotReadyV1Beta2Reason + + // ClusterMachinesReadyUnknownV1Beta2Reason surfaces when at least one of the controlled machine's Ready conditions is unknown + // and none of the controlled machine's Ready conditions is false. + ClusterMachinesReadyUnknownV1Beta2Reason = ReadyUnknownV1Beta2Reason + // ClusterMachinesReadyNoReplicasV1Beta2Reason surfaces when no machines exist for the Cluster. ClusterMachinesReadyNoReplicasV1Beta2Reason = NoReplicasV1Beta2Reason @@ -212,6 +243,16 @@ const ( // ClusterMachinesUpToDateV1Beta2Condition surfaces details of Cluster's machines not up to date, if any. ClusterMachinesUpToDateV1Beta2Condition = MachinesUpToDateV1Beta2Condition + // ClusterMachinesUpToDateV1Beta2Reason surfaces when all the controlled machine's UpToDate conditions are true. + ClusterMachinesUpToDateV1Beta2Reason = UpToDateV1Beta2Reason + + // ClusterMachinesNotUpToDateV1Beta2Reason surfaces when at least one of the controlled machine's UpToDate conditions is false. + ClusterMachinesNotUpToDateV1Beta2Reason = NotUpToDateV1Beta2Reason + + // ClusterMachinesUpToDateUnknownV1Beta2Reason surfaces when at least one of the controlled machine's UpToDate conditions is unknown + // and none of the controlled machine's UpToDate conditions is false. + ClusterMachinesUpToDateUnknownV1Beta2Reason = UpToDateUnknownV1Beta2Reason + // ClusterMachinesUpToDateNoReplicasV1Beta2Reason surfaces when no machines exist for the Cluster. ClusterMachinesUpToDateNoReplicasV1Beta2Reason = NoReplicasV1Beta2Reason @@ -244,10 +285,14 @@ const ( // MachinePools and stand-alone MachineSets are scaling up. ClusterScalingUpV1Beta2Reason = ScalingUpV1Beta2Reason - // ClusterNotScalingUpV1Beta2Reason surfaces when no one of the Cluster's control plane, MachineDeployments, + // ClusterNotScalingUpV1Beta2Reason surfaces when none of the Cluster's control plane, MachineDeployments, // MachinePools and stand-alone MachineSets are scaling up. ClusterNotScalingUpV1Beta2Reason = NotScalingUpV1Beta2Reason + // ClusterScalingUpUnknownV1Beta2Reason surfaces when one of the Cluster's control plane, MachineDeployments, + // MachinePools and stand-alone MachineSets scaling up condition is unknown, and none true. + ClusterScalingUpUnknownV1Beta2Reason = "ScalingUpUnknown" + // ClusterScalingUpInternalErrorV1Beta2Reason surfaces unexpected failures when listing machines // or computing the ScalingUp condition. ClusterScalingUpInternalErrorV1Beta2Reason = InternalErrorV1Beta2Reason @@ -263,10 +308,14 @@ const ( // MachinePools and stand-alone MachineSets are scaling down. ClusterScalingDownV1Beta2Reason = ScalingDownV1Beta2Reason - // ClusterNotScalingDownV1Beta2Reason surfaces when no one of the Cluster's control plane, MachineDeployments, + // ClusterNotScalingDownV1Beta2Reason surfaces when none of the Cluster's control plane, MachineDeployments, // MachinePools and stand-alone MachineSets are scaling down. ClusterNotScalingDownV1Beta2Reason = NotScalingUpV1Beta2Reason + // ClusterScalingDownUnknownV1Beta2Reason surfaces when one of the Cluster's control plane, MachineDeployments, + // MachinePools and stand-alone MachineSets scaling down condition is unknown, and none true. + ClusterScalingDownUnknownV1Beta2Reason = "ScalingDownUnknown" + // ClusterScalingDownInternalErrorV1Beta2Reason surfaces unexpected failures when listing machines // or computing the ScalingDown condition. ClusterScalingDownInternalErrorV1Beta2Reason = InternalErrorV1Beta2Reason diff --git a/api/v1beta1/machine_types.go b/api/v1beta1/machine_types.go index 3a3038370e5e..ce225a88cc63 100644 --- a/api/v1beta1/machine_types.go +++ b/api/v1beta1/machine_types.go @@ -92,9 +92,6 @@ const ( // Note: MinReadySeconds is assumed 0 until it will be implemented in v1beta2 API. MachineAvailableV1Beta2Condition = AvailableV1Beta2Condition - // MachineNotReadyV1Beta2Reason surfaces when a machine is not ready (and thus not available). - MachineNotReadyV1Beta2Reason = "NotReady" - // MachineWaitingForMinReadySecondsV1Beta2Reason surfaces when a machine is ready for less than MinReadySeconds (and thus not yet available). MachineWaitingForMinReadySecondsV1Beta2Reason = "WaitingForMinReadySeconds" @@ -114,6 +111,17 @@ const ( // these conditions must be true as well. MachineReadyV1Beta2Condition = ReadyV1Beta2Condition + // MachineReadyV1Beta2Reason surfaces when the machine readiness criteria is met. + MachineReadyV1Beta2Reason = ReadyV1Beta2Reason + + // MachineNotReadyV1Beta2Reason surfaces when the machine readiness criteria is not met. + // Note: when a machine is not ready, it is also not available. + MachineNotReadyV1Beta2Reason = NotReadyV1Beta2Reason + + // MachineReadyUnknownV1Beta2Reason surfaces when at least one machine readiness criteria is unknown + // and no machine readiness criteria is not met. + MachineReadyUnknownV1Beta2Reason = ReadyUnknownV1Beta2Reason + // MachineReadyInternalErrorV1Beta2Reason surfaces unexpected error when computing the Ready condition. MachineReadyInternalErrorV1Beta2Reason = InternalErrorV1Beta2Reason ) @@ -199,8 +207,24 @@ const ( // MachineNodeReadyV1Beta2Condition is true if the Machine's Node is ready. MachineNodeReadyV1Beta2Condition = "NodeReady" - // MachineNodeConditionNotYetReportedV1Beta2Reason surfaces when a Machine's Node doesn't have a condition reported yet. - MachineNodeConditionNotYetReportedV1Beta2Reason = "NodeConditionNotYetReported" + // MachineNodeReadyV1Beta2Reason surfaces when Machine's Node Ready condition is true. + MachineNodeReadyV1Beta2Reason = "NodeReady" + + // MachineNodeNotReadyV1Beta2Reason surfaces when Machine's Node Ready condition is false. + MachineNodeNotReadyV1Beta2Reason = "NodeNotReady" + + // MachineNodeReadyUnknownV1Beta2Reason surfaces when Machine's Node Ready condition is unknown. + MachineNodeReadyUnknownV1Beta2Reason = "NodeReadyUnknown" + + // MachineNodeHealthyV1Beta2Reason surfaces when all the node conditions report healthy state. + MachineNodeHealthyV1Beta2Reason = "NodeHealthy" + + // MachineNodeNotHealthyV1Beta2Reason surfaces when at least one node conditions report not healthy state. + MachineNodeNotHealthyV1Beta2Reason = "NodeNotHealthy" + + // MachineNodeHealthUnknownV1Beta2Reason surfaces when at least one node conditions report healthy state unknown + // and no node conditions report not healthy state. + MachineNodeHealthUnknownV1Beta2Reason = "NodeHealthyUnknown" // MachineNodeInternalErrorV1Beta2Reason surfaces unexpected failures when reading a Node object. MachineNodeInternalErrorV1Beta2Reason = InternalErrorV1Beta2Reason diff --git a/api/v1beta1/machinedeployment_types.go b/api/v1beta1/machinedeployment_types.go index 96e0bd9c6232..de068d1bce2c 100644 --- a/api/v1beta1/machinedeployment_types.go +++ b/api/v1beta1/machinedeployment_types.go @@ -107,6 +107,16 @@ const ( // MachineDeploymentMachinesReadyV1Beta2Condition surfaces detail of issues on the controlled machines, if any. MachineDeploymentMachinesReadyV1Beta2Condition = MachinesReadyV1Beta2Condition + // MachineDeploymentMachinesReadyV1Beta2Reason surfaces when all the controlled machine's Ready conditions are true. + MachineDeploymentMachinesReadyV1Beta2Reason = ReadyV1Beta2Reason + + // MachineDeploymentMachinesNotReadyV1Beta2Reason surfaces when at least one of the controlled machine's Ready conditions is false. + MachineDeploymentMachinesNotReadyV1Beta2Reason = NotReadyV1Beta2Reason + + // MachineDeploymentMachinesReadyUnknownV1Beta2Reason surfaces when at least one of the controlled machine's Ready conditions is unknown + // and none of the controlled machine's Ready conditions is false. + MachineDeploymentMachinesReadyUnknownV1Beta2Reason = ReadyUnknownV1Beta2Reason + // MachineDeploymentMachinesReadyNoReplicasV1Beta2Reason surfaces when no machines exist for the MachineDeployment. MachineDeploymentMachinesReadyNoReplicasV1Beta2Reason = NoReplicasV1Beta2Reason @@ -120,6 +130,16 @@ const ( // MachineDeploymentMachinesUpToDateV1Beta2Condition surfaces details of controlled machines not up to date, if any. MachineDeploymentMachinesUpToDateV1Beta2Condition = MachinesUpToDateV1Beta2Condition + // MachineDeploymentMachinesUpToDateV1Beta2Reason surfaces when all the controlled machine's UpToDate conditions are true. + MachineDeploymentMachinesUpToDateV1Beta2Reason = UpToDateV1Beta2Reason + + // MachineDeploymentMachinesNotUpToDateV1Beta2Reason surfaces when at least one of the controlled machine's UpToDate conditions is false. + MachineDeploymentMachinesNotUpToDateV1Beta2Reason = NotUpToDateV1Beta2Reason + + // MachineDeploymentMachinesUpToDateUnknownV1Beta2Reason surfaces when at least one of the controlled machine's UpToDate conditions is unknown + // and none of the controlled machine's UpToDate conditions is false. + MachineDeploymentMachinesUpToDateUnknownV1Beta2Reason = UpToDateUnknownV1Beta2Reason + // MachineDeploymentMachinesUpToDateNoReplicasV1Beta2Reason surfaces when no machines exist for the MachineDeployment. MachineDeploymentMachinesUpToDateNoReplicasV1Beta2Reason = NoReplicasV1Beta2Reason diff --git a/api/v1beta1/machineset_types.go b/api/v1beta1/machineset_types.go index 03c8edd2aa09..78a8f8e30698 100644 --- a/api/v1beta1/machineset_types.go +++ b/api/v1beta1/machineset_types.go @@ -131,6 +131,16 @@ const ( // MachineSetMachinesReadyV1Beta2Condition surfaces detail of issues on the controlled machines, if any. MachineSetMachinesReadyV1Beta2Condition = MachinesReadyV1Beta2Condition + // MachineSetMachinesReadyV1Beta2Reason surfaces when all the controlled machine's Ready conditions are true. + MachineSetMachinesReadyV1Beta2Reason = ReadyV1Beta2Reason + + // MachineSetMachinesNotReadyV1Beta2Reason surfaces when at least one of the controlled machine's Ready conditions is false. + MachineSetMachinesNotReadyV1Beta2Reason = NotReadyV1Beta2Reason + + // MachineSetMachinesReadyUnknownV1Beta2Reason surfaces when at least one of the controlled machine's Ready conditions is unknown + // and none of the controlled machine's Ready conditions is false. + MachineSetMachinesReadyUnknownV1Beta2Reason = ReadyUnknownV1Beta2Reason + // MachineSetMachinesReadyNoReplicasV1Beta2Reason surfaces when no machines exist for the MachineSet. MachineSetMachinesReadyNoReplicasV1Beta2Reason = NoReplicasV1Beta2Reason @@ -145,6 +155,16 @@ const ( // MachineSetMachinesUpToDateV1Beta2Condition surfaces details of controlled machines not up to date, if any. MachineSetMachinesUpToDateV1Beta2Condition = MachinesUpToDateV1Beta2Condition + // MachineSetMachinesUpToDateV1Beta2Reason surfaces when all the controlled machine's UpToDate conditions are true. + MachineSetMachinesUpToDateV1Beta2Reason = UpToDateV1Beta2Reason + + // MachineSetMachinesNotUpToDateV1Beta2Reason surfaces when at least one of the controlled machine's UpToDate conditions is false. + MachineSetMachinesNotUpToDateV1Beta2Reason = NotUpToDateV1Beta2Reason + + // MachineSetMachinesUpToDateUnknownV1Beta2Reason surfaces when at least one of the controlled machine's UpToDate conditions is unknown + // and none of the controlled machine's UpToDate conditions is false. + MachineSetMachinesUpToDateUnknownV1Beta2Reason = UpToDateUnknownV1Beta2Reason + // MachineSetMachinesUpToDateNoReplicasV1Beta2Reason surfaces when no machines exist for the MachineSet. MachineSetMachinesUpToDateNoReplicasV1Beta2Reason = NoReplicasV1Beta2Reason diff --git a/api/v1beta1/v1beta2_condition_consts.go b/api/v1beta1/v1beta2_condition_consts.go index 3dd93361177c..47aad7f48b18 100644 --- a/api/v1beta1/v1beta2_condition_consts.go +++ b/api/v1beta1/v1beta2_condition_consts.go @@ -93,6 +93,27 @@ const ( // NotAvailableV1Beta2Reason applies to a condition surfacing object not satisfying availability criteria. NotAvailableV1Beta2Reason = "NotAvailable" + // AvailableUnknownV1Beta2Reason applies to a condition surfacing object availability unknown. + AvailableUnknownV1Beta2Reason = "AvailableUnknown" + + // ReadyV1Beta2Reason applies to a condition surfacing object readiness. + ReadyV1Beta2Reason = "Ready" + + // NotReadyV1Beta2Reason applies to a condition surfacing object not satisfying readiness criteria. + NotReadyV1Beta2Reason = "NotReady" + + // ReadyUnknownV1Beta2Reason applies to a condition surfacing object readiness unknown. + ReadyUnknownV1Beta2Reason = "ReadyUnknown" + + // UpToDateV1Beta2Reason applies to a condition surfacing object up-tp-date. + UpToDateV1Beta2Reason = "UpToDate" + + // NotUpToDateV1Beta2Reason applies to a condition surfacing object not up-tp-date. + NotUpToDateV1Beta2Reason = "NotUpToDate" + + // UpToDateUnknownV1Beta2Reason applies to a condition surfacing object up-tp-date unknown. + UpToDateUnknownV1Beta2Reason = "UpToDateUnknown" + // ScalingUpV1Beta2Reason surfaces when an object is scaling up. ScalingUpV1Beta2Reason = "ScalingUp" diff --git a/controlplane/kubeadm/api/v1beta1/v1beta2_condition_consts.go b/controlplane/kubeadm/api/v1beta1/v1beta2_condition_consts.go index 3565e31b3dde..b486eea57e77 100644 --- a/controlplane/kubeadm/api/v1beta1/v1beta2_condition_consts.go +++ b/controlplane/kubeadm/api/v1beta1/v1beta2_condition_consts.go @@ -137,6 +137,16 @@ const ( // If not using an external etcd also EtcdPodHealthy, EtcdMemberHealthy conditions are included. KubeadmControlPlaneMachinesReadyV1Beta2Condition = clusterv1.MachinesReadyV1Beta2Condition + // KubeadmControlPlaneMachinesReadyV1Beta2Reason surfaces when all the controlled machine's Ready conditions are true. + KubeadmControlPlaneMachinesReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + + // KubeadmControlPlaneMachinesNotReadyV1Beta2Reason surfaces when at least one of the controlled machine's Ready conditions is false. + KubeadmControlPlaneMachinesNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + + // KubeadmControlPlaneMachinesReadyUnknownV1Beta2Reason surfaces when at least one of the controlled machine's Ready conditions is unknown + // and no one of the controlled machine's Ready conditions is false. + KubeadmControlPlaneMachinesReadyUnknownV1Beta2Reason = clusterv1.ReadyUnknownV1Beta2Reason + // KubeadmControlPlaneMachinesReadyNoReplicasV1Beta2Reason surfaces when no machines exist for the KubeadmControlPlane. KubeadmControlPlaneMachinesReadyNoReplicasV1Beta2Reason = clusterv1.NoReplicasV1Beta2Reason @@ -149,6 +159,16 @@ const ( // KubeadmControlPlaneMachinesUpToDateV1Beta2Condition surfaces details of controlled machines not up to date, if any. KubeadmControlPlaneMachinesUpToDateV1Beta2Condition = clusterv1.MachinesUpToDateV1Beta2Condition + // KubeadmControlPlaneMachinesUpToDateV1Beta2Reason surfaces when all the controlled machine's UpToDate conditions are true. + KubeadmControlPlaneMachinesUpToDateV1Beta2Reason = clusterv1.UpToDateV1Beta2Reason + + // KubeadmControlPlaneMachinesNotUpToDateV1Beta2Reason surfaces when at least one of the controlled machine's UpToDate conditions is false. + KubeadmControlPlaneMachinesNotUpToDateV1Beta2Reason = clusterv1.NotUpToDateV1Beta2Reason + + // KubeadmControlPlaneMachinesUpToDateUnknownV1Beta2Reason surfaces when at least one of the controlled machine's UpToDate conditions is unknown + // and no one of the controlled machine's UpToDate conditions is false. + KubeadmControlPlaneMachinesUpToDateUnknownV1Beta2Reason = clusterv1.UpToDateUnknownV1Beta2Reason + // KubeadmControlPlaneMachinesUpToDateNoReplicasV1Beta2Reason surfaces when no machines exist for the KubeadmControlPlane. KubeadmControlPlaneMachinesUpToDateNoReplicasV1Beta2Reason = clusterv1.NoReplicasV1Beta2Reason diff --git a/controlplane/kubeadm/internal/controllers/status.go b/controlplane/kubeadm/internal/controllers/status.go index 485351b7e5e4..3ee2813993de 100644 --- a/controlplane/kubeadm/internal/controllers/status.go +++ b/controlplane/kubeadm/internal/controllers/status.go @@ -330,6 +330,16 @@ func setMachinesReadyCondition(ctx context.Context, kcp *controlplanev1.KubeadmC readyCondition, err := v1beta2conditions.NewAggregateCondition( machines.UnsortedList(), clusterv1.MachineReadyV1Beta2Condition, v1beta2conditions.TargetConditionType(controlplanev1.KubeadmControlPlaneMachinesReadyV1Beta2Condition), + // Using a custom merge strategy to override reasons applied during merge. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + controlplanev1.KubeadmControlPlaneMachinesNotReadyV1Beta2Reason, + controlplanev1.KubeadmControlPlaneMachinesReadyUnknownV1Beta2Reason, + controlplanev1.KubeadmControlPlaneMachinesReadyV1Beta2Reason, + )), + ), + }, ) if err != nil { v1beta2conditions.Set(kcp, metav1.Condition{ @@ -360,6 +370,16 @@ func setMachinesUpToDateCondition(ctx context.Context, kcp *controlplanev1.Kubea upToDateCondition, err := v1beta2conditions.NewAggregateCondition( machines.UnsortedList(), clusterv1.MachineUpToDateV1Beta2Condition, v1beta2conditions.TargetConditionType(controlplanev1.KubeadmControlPlaneMachinesUpToDateV1Beta2Condition), + // Using a custom merge strategy to override reasons applied during merge. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + controlplanev1.KubeadmControlPlaneMachinesNotUpToDateV1Beta2Reason, + controlplanev1.KubeadmControlPlaneMachinesUpToDateUnknownV1Beta2Reason, + controlplanev1.KubeadmControlPlaneMachinesUpToDateV1Beta2Reason, + )), + ), + }, ) if err != nil { v1beta2conditions.Set(kcp, metav1.Condition{ @@ -399,6 +419,8 @@ func setRemediatingCondition(ctx context.Context, kcp *controlplanev1.KubeadmCon remediatingCondition, err := v1beta2conditions.NewAggregateCondition( machinesToBeRemediated.UnsortedList(), clusterv1.MachineOwnerRemediatedV1Beta2Condition, v1beta2conditions.TargetConditionType(controlplanev1.KubeadmControlPlaneRemediatingV1Beta2Condition), + // Note: in case of the remediating conditions it is not required to use a CustomMergeStrategy/ComputeReasonFunc + // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the condition). ) if err != nil { v1beta2conditions.Set(kcp, metav1.Condition{ diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index 33f14d3f2874..78c73059d22b 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -439,11 +439,11 @@ func Test_setScalingDownCondition(t *testing.T) { } func Test_setMachinesReadyAndMachinesUpToDateConditions(t *testing.T) { - readyTrue := metav1.Condition{Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionTrue} - readyFalse := metav1.Condition{Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: "SomeReason", Message: "NotReady"} + readyTrue := metav1.Condition{Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineReadyV1Beta2Reason} + readyFalse := metav1.Condition{Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotReadyV1Beta2Reason, Message: "NotReady"} - upToDateTrue := metav1.Condition{Type: clusterv1.MachineUpToDateV1Beta2Condition, Status: metav1.ConditionTrue} - upToDateFalse := metav1.Condition{Type: clusterv1.MachineUpToDateV1Beta2Condition, Status: metav1.ConditionFalse, Reason: "SomeReason", Message: "NotUpToDate"} + upToDateTrue := metav1.Condition{Type: clusterv1.MachineUpToDateV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineUpToDateV1Beta2Reason} + upToDateFalse := metav1.Condition{Type: clusterv1.MachineUpToDateV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNotUpToDateV1Beta2Reason, Message: "NotUpToDate"} tests := []struct { name string @@ -481,13 +481,13 @@ func Test_setMachinesReadyAndMachinesUpToDateConditions(t *testing.T) { expectMachinesReadyCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachinesReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: "SomeReason", // There is only one machine reporting issues, using the reason from that machine. + Reason: controlplanev1.KubeadmControlPlaneMachinesNotReadyV1Beta2Reason, Message: "* Machine m3: NotReady", }, expectMachinesUpToDateCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, // There are many machines reporting issues, using a generic reason. + Reason: controlplanev1.KubeadmControlPlaneMachinesNotUpToDateV1Beta2Reason, Message: "* Machines m2, m3: NotUpToDate", }, }, diff --git a/internal/controllers/cluster/cluster_controller_status.go b/internal/controllers/cluster/cluster_controller_status.go index ff7d50c9f872..a24adf769185 100644 --- a/internal/controllers/cluster/cluster_controller_status.go +++ b/internal/controllers/cluster/cluster_controller_status.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" @@ -546,6 +545,16 @@ func setWorkersAvailableCondition(ctx context.Context, cluster *clusterv1.Cluste workersAvailableCondition, err := v1beta2conditions.NewAggregateCondition( ws, clusterv1.AvailableV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.ClusterWorkersAvailableV1Beta2Condition), + // Using a custom merge strategy to override reasons applied during merge + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.ClusterWorkersNotAvailableV1Beta2Reason, + clusterv1.ClusterWorkersAvailableUnknownV1Beta2Reason, + clusterv1.ClusterWorkersAvailableV1Beta2Reason, + )), + ), + }, ) if err != nil { log.Error(err, "Failed to aggregate MachinePool and MachineDeployment's Available conditions") @@ -587,6 +596,16 @@ func setMachinesReadyCondition(ctx context.Context, cluster *clusterv1.Cluster, readyCondition, err := v1beta2conditions.NewAggregateCondition( machines.UnsortedList(), clusterv1.MachineReadyV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.ClusterMachinesReadyV1Beta2Condition), + // Using a custom merge strategy to override reasons applied during merge + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.ClusterMachinesNotReadyV1Beta2Reason, + clusterv1.ClusterMachinesReadyUnknownV1Beta2Reason, + clusterv1.ClusterMachinesReadyV1Beta2Reason, + )), + ), + }, ) if err != nil { log.Error(err, "Failed to aggregate Machine's Ready conditions") @@ -627,6 +646,16 @@ func setMachinesUpToDateCondition(ctx context.Context, cluster *clusterv1.Cluste upToDateCondition, err := v1beta2conditions.NewAggregateCondition( machines.UnsortedList(), clusterv1.MachineUpToDateV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.ClusterMachinesUpToDateV1Beta2Condition), + // Using a custom merge strategy to override reasons applied during merge + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.ClusterMachinesNotUpToDateV1Beta2Reason, + clusterv1.ClusterMachinesUpToDateUnknownV1Beta2Reason, + clusterv1.ClusterMachinesUpToDateV1Beta2Reason, + )), + ), + }, ) if err != nil { log.Error(err, "Failed to aggregate Machine's UpToDate conditions") @@ -667,6 +696,8 @@ func setRemediatingCondition(ctx context.Context, cluster *clusterv1.Cluster, ma remediatingCondition, err := v1beta2conditions.NewAggregateCondition( machinesToBeRemediated.UnsortedList(), clusterv1.MachineOwnerRemediatedV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.ClusterRemediatingV1Beta2Condition), + // Note: in case of the remediating conditions it is not required to use a CustomMergeStrategy/ComputeReasonFunc + // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the condition). ) if err != nil { v1beta2conditions.Set(cluster, metav1.Condition{ @@ -736,7 +767,21 @@ func setScalingUpCondition(ctx context.Context, cluster *clusterv1.Cluster, cont scalingUpCondition, err := v1beta2conditions.NewAggregateCondition( ws, clusterv1.ScalingUpV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.ClusterScalingUpV1Beta2Condition), - v1beta2conditions.NegativePolarityConditionTypes{clusterv1.ClusterScalingUpV1Beta2Condition}, + // Instruct aggregate to consider ScalingUp condition with negative polarity. + v1beta2conditions.NegativePolarityConditionTypes{clusterv1.ScalingUpV1Beta2Condition}, + // Using a custom merge strategy to override reasons applied during merge and to ensure merge + // takes into account the fact the ScalingUp has negative polarity. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.TargetConditionHasPositivePolarity(false), + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.ClusterScalingUpV1Beta2Reason, + clusterv1.ClusterScalingUpUnknownV1Beta2Reason, + clusterv1.ClusterNotScalingUpV1Beta2Reason, + )), + v1beta2conditions.GetPriorityFunc(v1beta2conditions.GetDefaultMergePriorityFunc(clusterv1.ScalingUpV1Beta2Condition)), + ), + }, ) if err != nil { log.Error(err, "Failed to aggregate ControlPlane, MachinePool, MachineDeployment, MachineSet's ScalingUp conditions") @@ -799,7 +844,21 @@ func setScalingDownCondition(ctx context.Context, cluster *clusterv1.Cluster, co scalingDownCondition, err := v1beta2conditions.NewAggregateCondition( ws, clusterv1.ScalingDownV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.ClusterScalingDownV1Beta2Condition), - v1beta2conditions.NegativePolarityConditionTypes{clusterv1.ClusterScalingDownV1Beta2Condition}, + // Instruct aggregate to consider ScalingDown condition with negative polarity. + v1beta2conditions.NegativePolarityConditionTypes{clusterv1.ScalingDownV1Beta2Condition}, + // Using a custom merge strategy to override reasons applied during merge and to ensure merge + // takes into account the fact the ScalingDown has negative polarity. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.TargetConditionHasPositivePolarity(false), + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.ClusterScalingDownV1Beta2Reason, + clusterv1.ClusterScalingDownUnknownV1Beta2Reason, + clusterv1.ClusterNotScalingDownV1Beta2Reason, + )), + v1beta2conditions.GetPriorityFunc(v1beta2conditions.GetDefaultMergePriorityFunc(clusterv1.ScalingDownV1Beta2Condition)), + ), + }, ) if err != nil { log.Error(err, "Failed to aggregate ControlPlane, MachinePool, MachineDeployment, MachineSet's ScalingDown conditions") @@ -835,28 +894,35 @@ func setDeletingCondition(_ context.Context, cluster *clusterv1.Cluster, deletin type clusterConditionCustomMergeStrategy struct { cluster *clusterv1.Cluster - negativePolarityConditionTypes sets.Set[string] + negativePolarityConditionTypes []string } func (c clusterConditionCustomMergeStrategy) Merge(conditions []v1beta2conditions.ConditionWithOwnerInfo, conditionTypes []string) (status metav1.ConditionStatus, reason, message string, err error) { - return v1beta2conditions.DefaultMergeStrategyWithCustomPriority(func(condition metav1.Condition) v1beta2conditions.MergePriority { - // While cluster is deleting, treat unknown conditions from external objects as info (it is ok that those objects have been deleted at this stage). - if !c.cluster.DeletionTimestamp.IsZero() { - if condition.Type == clusterv1.ClusterInfrastructureReadyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.ClusterInfrastructureDeletedV1Beta2Reason || condition.Reason == clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason) { - return v1beta2conditions.InfoMergePriority + return v1beta2conditions.DefaultMergeStrategy(v1beta2conditions.GetPriorityFunc( + func(condition metav1.Condition) v1beta2conditions.MergePriority { + // While cluster is deleting, treat unknown conditions from external objects as info (it is ok that those objects have been deleted at this stage). + if !c.cluster.DeletionTimestamp.IsZero() { + if condition.Type == clusterv1.ClusterInfrastructureReadyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.ClusterInfrastructureDeletedV1Beta2Reason || condition.Reason == clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason) { + return v1beta2conditions.InfoMergePriority + } + if condition.Type == clusterv1.ClusterControlPlaneAvailableV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.ClusterControlPlaneDeletedV1Beta2Reason || condition.Reason == clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason) { + return v1beta2conditions.InfoMergePriority + } } - if condition.Type == clusterv1.ClusterControlPlaneAvailableV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.ClusterControlPlaneDeletedV1Beta2Reason || condition.Reason == clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason) { + + // Treat all reasons except TopologyReconcileFailed and ClusterClassNotReconciled of TopologyReconciled condition as info. + if condition.Type == clusterv1.ClusterTopologyReconciledV1Beta2Condition && condition.Status == metav1.ConditionFalse && + condition.Reason != clusterv1.ClusterTopologyReconciledFailedV1Beta2Reason && condition.Reason != clusterv1.ClusterTopologyReconciledClusterClassNotReconciledV1Beta2Reason { return v1beta2conditions.InfoMergePriority } - } - - // Treat all reasons except TopologyReconcileFailed and ClusterClassNotReconciled of TopologyReconciled condition as info. - if condition.Type == clusterv1.ClusterTopologyReconciledV1Beta2Condition && condition.Status == metav1.ConditionFalse && - condition.Reason != clusterv1.ClusterTopologyReconciledFailedV1Beta2Reason && condition.Reason != clusterv1.ClusterTopologyReconciledClusterClassNotReconciledV1Beta2Reason { - return v1beta2conditions.InfoMergePriority - } - return v1beta2conditions.GetDefaultMergePriorityFunc(c.negativePolarityConditionTypes)(condition) - }).Merge(conditions, conditionTypes) + return v1beta2conditions.GetDefaultMergePriorityFunc(c.negativePolarityConditionTypes...)(condition) + }), + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.ClusterNotAvailableV1Beta2Reason, + clusterv1.ClusterAvailableUnknownV1Beta2Reason, + clusterv1.ClusterAvailableV1Beta2Reason, + )), + ).Merge(conditions, conditionTypes) } func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster) { @@ -876,13 +942,15 @@ func setAvailableCondition(ctx context.Context, cluster *clusterv1.Cluster) { summaryOpts := []v1beta2conditions.SummaryOption{ forConditionTypes, - v1beta2conditions.NegativePolarityConditionTypes{ - clusterv1.ClusterDeletingV1Beta2Condition, - }, + // Instruct summary to consider Deleting condition with negative polarity. + v1beta2conditions.NegativePolarityConditionTypes{clusterv1.ClusterDeletingV1Beta2Condition}, + // Using a custom merge strategy to override reasons applied during merge and to ignore some + // info message so the available condition is less noisy. v1beta2conditions.CustomMergeStrategy{ MergeStrategy: clusterConditionCustomMergeStrategy{ - cluster: cluster, - negativePolarityConditionTypes: sets.Set[string]{}.Insert(clusterv1.ClusterDeletingV1Beta2Condition), + cluster: cluster, + // Instruct merge to consider Deleting condition with negative polarity, + negativePolarityConditionTypes: []string{clusterv1.ClusterDeletingV1Beta2Condition}, }, }, } diff --git a/internal/controllers/cluster/cluster_controller_status_test.go b/internal/controllers/cluster/cluster_controller_status_test.go index 91a6a58f9e05..729ee9aea0ac 100644 --- a/internal/controllers/cluster/cluster_controller_status_test.go +++ b/internal/controllers/cluster/cluster_controller_status_test.go @@ -696,7 +696,7 @@ func TestSetWorkersAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterWorkersAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: v1beta2conditions.MultipleUnknownReportedReason, + Reason: clusterv1.ClusterWorkersAvailableUnknownV1Beta2Reason, Message: "* MachineDeployment md1: Condition Available not yet reported\n" + "* MachinePool mp1: Condition Available not yet reported", }, @@ -724,7 +724,7 @@ func TestSetWorkersAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterWorkersAvailableV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterWorkersNotAvailableV1Beta2Reason, Message: "* MachineDeployment md1: 3 available replicas, at least 4 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 5)\n" + "* MachinePool mp1: 2 available replicas, at least 3 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 4)", }, @@ -747,7 +747,7 @@ func TestSetMachinesReadyCondition(t *testing.T) { readyCondition := metav1.Condition{ Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineReadyV1Beta2Reason, } tests := []struct { @@ -791,7 +791,7 @@ func TestSetMachinesReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterMachinesReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.ClusterMachinesReadyV1Beta2Reason, }, }, { @@ -805,7 +805,7 @@ func TestSetMachinesReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterMachinesReadyV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: v1beta2conditions.NotYetReportedReason, + Reason: clusterv1.ClusterMachinesReadyUnknownV1Beta2Reason, Message: "* Machine machine-2: Condition Ready not yet reported", }, }, @@ -837,7 +837,7 @@ func TestSetMachinesReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterMachinesReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterMachinesNotReadyV1Beta2Reason, Message: "* Machine machine-2: HealthCheckSucceeded: Some message\n" + "* Machine machine-4: Deleting: Machine deletion in progress, stage: DrainingNode\n" + "* Machine machine-3: Some unknown message", @@ -907,7 +907,7 @@ func TestSetMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: "some-reason-1", + Reason: clusterv1.ClusterMachinesUpToDateV1Beta2Reason, Message: "", }, }, @@ -926,7 +926,7 @@ func TestSetMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: "some-unknown-reason-1", + Reason: clusterv1.ClusterMachinesUpToDateUnknownV1Beta2Reason, Message: "* Machine unknown-1: some unknown message", }, }, @@ -945,7 +945,7 @@ func TestSetMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: "some-not-up-to-date-reason", + Reason: clusterv1.ClusterMachinesNotUpToDateV1Beta2Reason, Message: "* Machine not-up-to-date-machine-1: some not up-to-date message", }, }, @@ -959,7 +959,7 @@ func TestSetMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: v1beta2conditions.NotYetReportedReason, + Reason: clusterv1.ClusterMachinesUpToDateUnknownV1Beta2Reason, Message: "* Machine no-condition-machine-1: Condition UpToDate not yet reported", }, }, @@ -996,7 +996,7 @@ func TestSetMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterMachinesNotUpToDateV1Beta2Reason, Message: "* Machines not-up-to-date-machine-1, not-up-to-date-machine-2: This is not up-to-date message\n" + "* Machines no-condition-machine-1, no-condition-machine-2: Condition UpToDate not yet reported", }, @@ -1091,7 +1091,7 @@ func TestSetScalingUpCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterScalingUpV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: v1beta2conditions.MultipleUnknownReportedReason, + Reason: clusterv1.ClusterScalingUpUnknownV1Beta2Reason, Message: "* MachineDeployment md1: Condition ScalingUp not yet reported\n" + "* MachinePool mp1: Condition ScalingUp not yet reported\n" + "* MachineSet ms1: Condition ScalingUp not yet reported", @@ -1140,7 +1140,7 @@ func TestSetScalingUpCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterScalingUpV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterScalingUpV1Beta2Reason, Message: "* FakeControlPlane cp1: Scaling up from 0 to 3 replicas\n" + "* MachineDeployment md1: Scaling up from 1 to 5 replicas\n" + "* MachinePool mp1: Scaling up from 0 to 3 replicas\n" + @@ -1185,7 +1185,7 @@ func TestSetScalingUpCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterScalingUpV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterScalingUpV1Beta2Reason, Message: "* MachineDeployment md1: Scaling up from 1 to 5 replicas\n" + "* MachinePool mp1: Scaling up from 0 to 3 replicas\n" + "* MachineSet ms1: Scaling up from 2 to 7 replicas", @@ -1250,7 +1250,7 @@ func TestSetScalingUpCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterScalingUpV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterScalingUpV1Beta2Reason, Message: "* MachineDeployment md1: Scaling up from 1 to 5 replicas\n" + "* MachinePool mp1: Scaling up from 0 to 3 replicas\n" + "* MachineSet ms1: Scaling up from 2 to 7 replicas", @@ -1342,7 +1342,7 @@ func TestSetScalingDownCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterScalingDownV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: v1beta2conditions.MultipleUnknownReportedReason, + Reason: clusterv1.ClusterScalingDownUnknownV1Beta2Reason, Message: "* MachineDeployment md1: Condition ScalingDown not yet reported\n" + "* MachinePool mp1: Condition ScalingDown not yet reported\n" + "* MachineSet ms1: Condition ScalingDown not yet reported", @@ -1391,7 +1391,7 @@ func TestSetScalingDownCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterScalingDownV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterScalingDownV1Beta2Reason, Message: "* FakeControlPlane cp1: Scaling down from 0 to 3 replicas\n" + "* MachineDeployment md1: Scaling down from 1 to 5 replicas\n" + "* MachinePool mp1: Scaling down from 0 to 3 replicas\n" + @@ -1436,7 +1436,7 @@ func TestSetScalingDownCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterScalingDownV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterScalingDownV1Beta2Reason, Message: "* MachineDeployment md1: Scaling down from 1 to 5 replicas\n" + "* MachinePool mp1: Scaling down from 0 to 3 replicas\n" + "* MachineSet ms1: Scaling down from 2 to 7 replicas", @@ -1501,7 +1501,7 @@ func TestSetScalingDownCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterScalingDownV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterScalingDownV1Beta2Reason, Message: "* MachineDeployment md1: Scaling down from 1 to 5 replicas\n" + "* MachinePool mp1: Scaling down from 0 to 3 replicas\n" + "* MachineSet ms1: Scaling down from 2 to 7 replicas", @@ -1707,7 +1707,7 @@ func TestSetAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: v1beta2conditions.MultipleUnknownReportedReason, + Reason: clusterv1.ClusterAvailableUnknownV1Beta2Reason, Message: "* InfrastructureReady: Condition not yet reported\n" + "* ControlPlaneAvailable: Condition not yet reported\n" + "* WorkersAvailable: Condition not yet reported\n" + @@ -1761,7 +1761,7 @@ func TestSetAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.ClusterAvailableV1Beta2Reason, Message: "", }, }, @@ -1814,7 +1814,7 @@ func TestSetAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.ClusterDeletingWaitingForWorkersDeletionV1Beta2Reason, + Reason: clusterv1.ClusterNotAvailableV1Beta2Reason, Message: "* Deleting:\n" + " * Control plane Machines: cp1, cp2, cp3\n" + " * MachineDeployments: md1, md2\n" + @@ -1868,7 +1868,7 @@ func TestSetAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: v1beta2conditions.NotYetReportedReason, + Reason: clusterv1.ClusterAvailableUnknownV1Beta2Reason, Message: "* TopologyReconciled: Condition not yet reported", }, }, @@ -1927,7 +1927,7 @@ func TestSetAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: "SomeReason", + Reason: clusterv1.ClusterNotAvailableV1Beta2Reason, Message: "* MyAvailabilityGate: Some message", }, }, @@ -1975,7 +1975,7 @@ func TestSetAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.ClusterDeletingWaitingForBeforeDeleteHookV1Beta2Reason, + Reason: clusterv1.ClusterNotAvailableV1Beta2Reason, Message: "* Deleting: Some message", }, }, @@ -2036,7 +2036,7 @@ func TestSetAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.ClusterAvailableV1Beta2Reason, }, }, { @@ -2090,7 +2090,7 @@ func TestSetAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.ClusterAvailableV1Beta2Reason, Message: "* TopologyReconciled: Control plane rollout and upgrade to version v1.29.0 on hold.", }, }, @@ -2120,7 +2120,7 @@ func TestSetAvailableCondition(t *testing.T) { { Type: clusterv1.ClusterWorkersAvailableV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterWorkersNotAvailableV1Beta2Reason, Message: "3 available replicas, at least 4 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 5) from MachineDeployment md1; 2 available replicas, at least 3 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 4) from MachinePool mp1", }, { @@ -2146,7 +2146,7 @@ func TestSetAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, // Note: There is only one condition that is an issue, but it has the MultipleIssuesReported reason. + Reason: clusterv1.ClusterNotAvailableV1Beta2Reason, Message: "* WorkersAvailable: 3 available replicas, at least 4 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 5) from MachineDeployment md1; 2 available replicas, at least 3 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 4) from MachinePool mp1", }, }, @@ -2202,7 +2202,7 @@ func TestSetAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.ClusterTopologyReconciledClusterClassNotReconciledV1Beta2Reason, + Reason: clusterv1.ClusterNotAvailableV1Beta2Reason, Message: "* TopologyReconciled: ClusterClass not reconciled. If this condition persists please check ClusterClass status. A ClusterClass is reconciled if" + ".status.observedGeneration == .metadata.generation is true. If this is not the case either ClusterClass reconciliation failed or the ClusterClass is paused", }, @@ -2233,7 +2233,7 @@ func TestSetAvailableCondition(t *testing.T) { { Type: clusterv1.ClusterWorkersAvailableV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterWorkersNotAvailableV1Beta2Reason, Message: "3 available replicas, at least 4 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 5) from MachineDeployment md1; 2 available replicas, at least 3 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 4) from MachinePool mp1", }, { @@ -2260,7 +2260,7 @@ func TestSetAvailableCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.ClusterNotAvailableV1Beta2Reason, Message: "* WorkersAvailable: 3 available replicas, at least 4 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 5) from MachineDeployment md1; 2 available replicas, at least 3 required (spec.strategy.rollout.maxUnavailable is 1, spec.replicas is 4) from MachinePool mp1\n" + "* TopologyReconciled: ClusterClass not reconciled. If this condition persists please check ClusterClass status. A ClusterClass is reconciled if.status.observedGeneration == .metadata.generation is true. If this is not the case either ClusterClass reconciliation failed or the ClusterClass is paused", }, diff --git a/internal/controllers/machine/machine_controller_status.go b/internal/controllers/machine/machine_controller_status.go index 83a365559957..b3141fd01f53 100644 --- a/internal/controllers/machine/machine_controller_status.go +++ b/internal/controllers/machine/machine_controller_status.go @@ -26,7 +26,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -287,9 +286,15 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl if condition.Message != "" { message = fmt.Sprintf("* Node.Ready: %s", condition.Message) } - reason := condition.Reason - if reason == "" { - reason = clusterv1.NoReasonReportedV1Beta2Reason + + reason := "" + switch condition.Status { + case corev1.ConditionFalse: + reason = clusterv1.MachineNodeNotReadyV1Beta2Reason + case corev1.ConditionUnknown: + reason = clusterv1.MachineNodeReadyUnknownV1Beta2Reason + case corev1.ConditionTrue: + reason = clusterv1.MachineNodeReadyV1Beta2Reason } nodeReady = &metav1.Condition{ Type: clusterv1.MachineNodeReadyV1Beta2Condition, @@ -302,9 +307,10 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl if nodeReady == nil { nodeReady = &metav1.Condition{ - Type: clusterv1.MachineNodeReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, - Reason: clusterv1.MachineNodeConditionNotYetReportedV1Beta2Reason, + Type: clusterv1.MachineNodeReadyV1Beta2Condition, + Status: metav1.ConditionUnknown, + Reason: clusterv1.MachineNodeReadyUnknownV1Beta2Reason, + Message: "* Node.Ready: Condition not yet reported", } } v1beta2conditions.Set(machine, *nodeReady) @@ -395,8 +401,6 @@ func summarizeNodeV1Beta2Conditions(_ context.Context, node *corev1.Node) (metav unknownStatus := 0 messages := []string{} - issueReason := "" - unknownReason := "" for _, conditionType := range []corev1.NodeConditionType{corev1.NodeReady, corev1.NodeMemoryPressure, corev1.NodeDiskPressure, corev1.NodePIDPressure} { var condition *corev1.NodeCondition for _, c := range node.Status.Conditions { @@ -406,11 +410,6 @@ func summarizeNodeV1Beta2Conditions(_ context.Context, node *corev1.Node) (metav } if condition == nil { messages = append(messages, fmt.Sprintf("* Node.%s: Condition not yet reported", conditionType)) - if unknownStatus == 0 { - unknownReason = clusterv1.MachineNodeConditionNotYetReportedV1Beta2Reason - } else { - unknownReason = v1beta2conditions.MultipleUnknownReportedReason - } unknownStatus++ continue } @@ -424,19 +423,9 @@ func summarizeNodeV1Beta2Conditions(_ context.Context, node *corev1.Node) (metav } messages = append(messages, fmt.Sprintf("* Node.%s: %s", condition.Type, m)) if condition.Status == corev1.ConditionUnknown { - if unknownStatus == 0 { - unknownReason = condition.Reason - } else { - unknownReason = v1beta2conditions.MultipleUnknownReportedReason - } unknownStatus++ continue } - if semanticallyFalseStatus == 0 { - issueReason = condition.Reason - } else { - issueReason = v1beta2conditions.MultipleIssuesReportedReason - } semanticallyFalseStatus++ continue } @@ -448,19 +437,9 @@ func summarizeNodeV1Beta2Conditions(_ context.Context, node *corev1.Node) (metav } messages = append(messages, fmt.Sprintf("* Node.%s: %s", condition.Type, m)) if condition.Status == corev1.ConditionUnknown { - if unknownStatus == 0 { - unknownReason = condition.Reason - } else { - unknownReason = v1beta2conditions.MultipleUnknownReportedReason - } unknownStatus++ continue } - if semanticallyFalseStatus == 0 { - issueReason = condition.Reason - } else { - issueReason = v1beta2conditions.MultipleIssuesReportedReason - } semanticallyFalseStatus++ } } @@ -468,42 +447,43 @@ func summarizeNodeV1Beta2Conditions(_ context.Context, node *corev1.Node) (metav message := strings.Join(messages, "\n") if semanticallyFalseStatus > 0 { - if issueReason == "" { - issueReason = v1beta2conditions.NoReasonReported - } - return metav1.ConditionFalse, issueReason, message + return metav1.ConditionFalse, clusterv1.MachineNodeNotHealthyV1Beta2Reason, message } if unknownStatus > 0 { - if unknownReason == "" { - unknownReason = v1beta2conditions.NoReasonReported - } - return metav1.ConditionUnknown, unknownReason, message + return metav1.ConditionUnknown, clusterv1.MachineNodeHealthUnknownV1Beta2Reason, message } - return metav1.ConditionTrue, v1beta2conditions.MultipleInfoReportedReason, "" + return metav1.ConditionTrue, clusterv1.MachineNodeHealthyV1Beta2Reason, "" } type machineConditionCustomMergeStrategy struct { machine *clusterv1.Machine - negativePolarityConditionTypes sets.Set[string] + negativePolarityConditionTypes []string } func (c machineConditionCustomMergeStrategy) Merge(conditions []v1beta2conditions.ConditionWithOwnerInfo, conditionTypes []string) (status metav1.ConditionStatus, reason, message string, err error) { - return v1beta2conditions.DefaultMergeStrategyWithCustomPriority(func(condition metav1.Condition) v1beta2conditions.MergePriority { - // While machine is deleting, treat unknown conditions from external objects as info (it is ok that those objects have been deleted at this stage). - if !c.machine.DeletionTimestamp.IsZero() { - if condition.Type == clusterv1.MachineBootstrapConfigReadyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.MachineBootstrapConfigDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineBootstrapConfigDoesNotExistV1Beta2Reason) { - return v1beta2conditions.InfoMergePriority - } - if condition.Type == clusterv1.MachineInfrastructureReadyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.MachineInfrastructureDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineInfrastructureDoesNotExistV1Beta2Reason) { - return v1beta2conditions.InfoMergePriority - } - if condition.Type == clusterv1.MachineNodeHealthyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.MachineNodeDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineNodeDoesNotExistV1Beta2Reason) { - return v1beta2conditions.InfoMergePriority + return v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.GetPriorityFunc(func(condition metav1.Condition) v1beta2conditions.MergePriority { + // While machine is deleting, treat unknown conditions from external objects as info (it is ok that those objects have been deleted at this stage). + if !c.machine.DeletionTimestamp.IsZero() { + if condition.Type == clusterv1.MachineBootstrapConfigReadyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.MachineBootstrapConfigDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineBootstrapConfigDoesNotExistV1Beta2Reason) { + return v1beta2conditions.InfoMergePriority + } + if condition.Type == clusterv1.MachineInfrastructureReadyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.MachineInfrastructureDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineInfrastructureDoesNotExistV1Beta2Reason) { + return v1beta2conditions.InfoMergePriority + } + if condition.Type == clusterv1.MachineNodeHealthyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.MachineNodeDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineNodeDoesNotExistV1Beta2Reason) { + return v1beta2conditions.InfoMergePriority + } + // Note: MachineNodeReadyV1Beta2Condition is not relevant for the summary. } - // Note: MachineNodeReadyV1Beta2Condition is not relevant for the summary. - } - return v1beta2conditions.GetDefaultMergePriorityFunc(c.negativePolarityConditionTypes)(condition) - }).Merge(conditions, conditionTypes) + return v1beta2conditions.GetDefaultMergePriorityFunc(c.negativePolarityConditionTypes...)(condition) + }), + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.MachineNotReadyV1Beta2Reason, + clusterv1.MachineReadyUnknownV1Beta2Reason, + clusterv1.MachineReadyV1Beta2Reason, + )), + ).Merge(conditions, conditionTypes) } func setDeletingCondition(_ context.Context, machine *clusterv1.Machine, reconcileDeleteExecuted bool, deletingReason, deletingMessage string) { @@ -546,15 +526,20 @@ func setReadyCondition(ctx context.Context, machine *clusterv1.Machine) { summaryOpts := []v1beta2conditions.SummaryOption{ forConditionTypes, + // Tolerate HealthCheckSucceeded to not exist. v1beta2conditions.IgnoreTypesIfMissing{ clusterv1.MachineHealthCheckSucceededV1Beta2Condition, }, + // Using a custom merge strategy to override reasons applied during merge and to ignore some + // info message so the ready condition aggregation in other resources is less noisy. v1beta2conditions.CustomMergeStrategy{ MergeStrategy: machineConditionCustomMergeStrategy{ - machine: machine, - negativePolarityConditionTypes: sets.Set[string]{}.Insert(clusterv1.MachineDeletingV1Beta2Condition), + machine: machine, + // Instruct merge to consider Deleting condition with negative polarity, + negativePolarityConditionTypes: []string{clusterv1.MachineDeletingV1Beta2Condition}, }, }, + // Instruct summary to consider Deleting condition with negative polarity. v1beta2conditions.NegativePolarityConditionTypes{ clusterv1.MachineDeletingV1Beta2Condition, }, diff --git a/internal/controllers/machine/machine_controller_status_test.go b/internal/controllers/machine/machine_controller_status_test.go index 5c3617502b22..0c18884471b2 100644 --- a/internal/controllers/machine/machine_controller_status_test.go +++ b/internal/controllers/machine/machine_controller_status_test.go @@ -488,7 +488,7 @@ func TestSummarizeNodeV1Beta2Conditions(t *testing.T) { {Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse}, }, expectedStatus: metav1.ConditionTrue, - expectedReason: v1beta2conditions.MultipleInfoReportedReason, + expectedReason: clusterv1.MachineNodeHealthyV1Beta2Reason, }, { name: "all conditions are unknown", @@ -499,7 +499,7 @@ func TestSummarizeNodeV1Beta2Conditions(t *testing.T) { {Type: corev1.NodePIDPressure, Status: corev1.ConditionUnknown, Message: "Node is not reporting status"}, }, expectedStatus: metav1.ConditionUnknown, - expectedReason: v1beta2conditions.MultipleUnknownReportedReason, + expectedReason: clusterv1.MachineNodeHealthUnknownV1Beta2Reason, expectedMessage: "* Node.Ready: Node is not reporting status\n" + "* Node.MemoryPressure: Node is not reporting status\n" + "* Node.DiskPressure: Node is not reporting status\n" + @@ -514,7 +514,7 @@ func TestSummarizeNodeV1Beta2Conditions(t *testing.T) { {Type: corev1.NodePIDPressure, Status: corev1.ConditionTrue, Message: "kubelet has NOT sufficient PID available"}, }, expectedStatus: metav1.ConditionFalse, - expectedReason: v1beta2conditions.MultipleIssuesReportedReason, + expectedReason: clusterv1.MachineNodeNotHealthyV1Beta2Reason, expectedMessage: "* Node.Ready: Node is not reporting status\n" + "* Node.MemoryPressure: kubelet has NOT sufficient memory available\n" + "* Node.DiskPressure: kubelet has disk pressure\n" + @@ -529,7 +529,7 @@ func TestSummarizeNodeV1Beta2Conditions(t *testing.T) { {Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse, Message: "kubelet has sufficient PID available"}, }, expectedStatus: metav1.ConditionFalse, - expectedReason: "SomeReason", + expectedReason: clusterv1.MachineNodeNotHealthyV1Beta2Reason, expectedMessage: "* Node.Ready: kubelet is NOT ready", }, { @@ -541,7 +541,7 @@ func TestSummarizeNodeV1Beta2Conditions(t *testing.T) { {Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse, Message: "kubelet has sufficient PID available"}, }, expectedStatus: metav1.ConditionUnknown, - expectedReason: "SomeReason", + expectedReason: clusterv1.MachineNodeHealthUnknownV1Beta2Reason, expectedMessage: "* Node.Ready: Node is not reporting status", }, { @@ -552,7 +552,7 @@ func TestSummarizeNodeV1Beta2Conditions(t *testing.T) { {Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse, Message: "kubelet has sufficient PID available"}, }, expectedStatus: metav1.ConditionUnknown, - expectedReason: clusterv1.MachineNodeConditionNotYetReportedV1Beta2Reason, + expectedReason: clusterv1.MachineNodeHealthUnknownV1Beta2Reason, expectedMessage: "* Node.Ready: Condition not yet reported", }, } @@ -680,13 +680,13 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { { Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: "SomeReason", + Reason: clusterv1.MachineNodeNotHealthyV1Beta2Reason, Message: "* Node.Ready: kubelet is NOT ready", }, { Type: clusterv1.MachineNodeReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: "SomeReason", + Reason: clusterv1.MachineNodeNotReadyV1Beta2Reason, Message: "* Node.Ready: kubelet is NOT ready", }, }, @@ -710,12 +710,12 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { { Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineNodeHealthyV1Beta2Condition, }, { Type: clusterv1.MachineNodeReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: "KubeletReady", + Reason: clusterv1.MachineNodeReadyV1Beta2Reason, Message: "* Node.Ready: kubelet is posting ready status", }, }, @@ -738,13 +738,14 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { { Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: clusterv1.MachineNodeConditionNotYetReportedV1Beta2Reason, + Reason: clusterv1.MachineNodeHealthUnknownV1Beta2Reason, Message: "* Node.Ready: Condition not yet reported", }, { - Type: clusterv1.MachineNodeReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, - Reason: clusterv1.MachineNodeConditionNotYetReportedV1Beta2Reason, + Type: clusterv1.MachineNodeReadyV1Beta2Condition, + Status: metav1.ConditionUnknown, + Reason: clusterv1.MachineNodeReadyUnknownV1Beta2Reason, + Message: "* Node.Ready: Condition not yet reported", }, }, }, @@ -890,12 +891,12 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { v1beta2conditions.Set(m, metav1.Condition{ Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineNodeHealthyV1Beta2Reason, }) v1beta2conditions.Set(m, metav1.Condition{ Type: clusterv1.MachineNodeReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: "KubeletReady", + Reason: clusterv1.MachineNodeReadyV1Beta2Reason, Message: "kubelet is posting ready status", }) return m @@ -910,12 +911,12 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { { Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineNodeHealthyV1Beta2Reason, }, { Type: clusterv1.MachineNodeReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: "KubeletReady", + Reason: clusterv1.MachineNodeReadyV1Beta2Reason, Message: "kubelet is posting ready status", }, }, @@ -969,13 +970,13 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { v1beta2conditions.Set(m, metav1.Condition{ Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineNodeHealthyV1Beta2Reason, }) v1beta2conditions.Set(m, metav1.Condition{ Type: clusterv1.MachineNodeReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: "KubeletReady", - Message: "kubelet is posting ready status)", + Reason: clusterv1.MachineNodeReadyV1Beta2Reason, + Message: "kubelet is posting ready status", }) return m }(), @@ -1203,7 +1204,7 @@ func TestSetReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineReadyV1Beta2Reason, }, }, { @@ -1245,7 +1246,7 @@ func TestSetReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineDeletingV1Beta2Reason, + Reason: clusterv1.MachineNotReadyV1Beta2Reason, Message: "* Deleting: Machine deletion in progress, stage: WaitingForPreDrainHook", }, }, @@ -1300,7 +1301,7 @@ func TestSetReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineReadyV1Beta2Reason, }, }, { @@ -1350,7 +1351,7 @@ func TestSetReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineDeletingV1Beta2Reason, + Reason: clusterv1.MachineNotReadyV1Beta2Reason, Message: "* Deleting: Machine deletion in progress, stage: DrainingNode", }, }, @@ -1397,7 +1398,7 @@ func TestSetReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: "SomeReason", + Reason: clusterv1.MachineNotReadyV1Beta2Reason, Message: "* HealthCheckSucceeded: Some message", }, }, @@ -1456,7 +1457,7 @@ func TestSetReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: "SomeReason", + Reason: clusterv1.MachineNotReadyV1Beta2Reason, Message: "* MyReadinessGate: Some message", }, }, @@ -1675,7 +1676,7 @@ func TestAvailableCondition(t *testing.T) { { Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineReadyV1Beta2Reason, LastTransitionTime: metav1.Time{Time: time.Now().Add(10 * time.Second)}, }, }, @@ -1701,7 +1702,7 @@ func TestAvailableCondition(t *testing.T) { { Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineReadyV1Beta2Reason, LastTransitionTime: metav1.Time{Time: time.Now().Add(-10 * time.Second)}, }, }, diff --git a/internal/controllers/machinedeployment/machinedeployment_status.go b/internal/controllers/machinedeployment/machinedeployment_status.go index c614bdd0a193..f576bbdcc396 100644 --- a/internal/controllers/machinedeployment/machinedeployment_status.go +++ b/internal/controllers/machinedeployment/machinedeployment_status.go @@ -281,6 +281,16 @@ func setMachinesReadyCondition(ctx context.Context, machineDeployment *clusterv1 readyCondition, err := v1beta2conditions.NewAggregateCondition( machines.UnsortedList(), clusterv1.MachineReadyV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition), + // Using a custom merge strategy to override reasons applied during merge. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.MachineDeploymentMachinesNotReadyV1Beta2Reason, + clusterv1.MachineDeploymentMachinesReadyUnknownV1Beta2Reason, + clusterv1.MachineDeploymentMachinesReadyV1Beta2Reason, + )), + ), + }, ) if err != nil { log.Error(err, "Failed to aggregate Machine's Ready conditions") @@ -321,6 +331,16 @@ func setMachinesUpToDateCondition(ctx context.Context, machineDeployment *cluste upToDateCondition, err := v1beta2conditions.NewAggregateCondition( machines.UnsortedList(), clusterv1.MachineUpToDateV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition), + // Using a custom merge strategy to override reasons applied during merge. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.MachineDeploymentMachinesNotUpToDateV1Beta2Reason, + clusterv1.MachineDeploymentMachinesUpToDateUnknownV1Beta2Reason, + clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Reason, + )), + ), + }, ) if err != nil { log.Error(err, "Failed to aggregate Machine's UpToDate conditions") @@ -361,6 +381,8 @@ func setRemediatingCondition(ctx context.Context, machineDeployment *clusterv1.M remediatingCondition, err := v1beta2conditions.NewAggregateCondition( machinesToBeRemediated.UnsortedList(), clusterv1.MachineOwnerRemediatedV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.MachineDeploymentRemediatingV1Beta2Condition), + // Note: in case of the remediating conditions it is not required to use a CustomMergeStrategy/ComputeReasonFunc + // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the condition). ) if err != nil { v1beta2conditions.Set(machineDeployment, metav1.Condition{ diff --git a/internal/controllers/machinedeployment/machinedeployment_status_test.go b/internal/controllers/machinedeployment/machinedeployment_status_test.go index 1bc48bc29bda..1128dc342023 100644 --- a/internal/controllers/machinedeployment/machinedeployment_status_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_status_test.go @@ -611,7 +611,7 @@ func Test_setMachinesReadyCondition(t *testing.T) { readyCondition := metav1.Condition{ Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineReadyV1Beta2Reason, } tests := []struct { @@ -655,7 +655,7 @@ func Test_setMachinesReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineDeploymentMachinesReadyV1Beta2Reason, }, }, { @@ -669,7 +669,7 @@ func Test_setMachinesReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: v1beta2conditions.NotYetReportedReason, + Reason: clusterv1.MachineDeploymentMachinesReadyUnknownV1Beta2Reason, Message: "* Machine machine-2: Condition Ready not yet reported", }, }, @@ -701,7 +701,7 @@ func Test_setMachinesReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.MachineDeploymentMachinesNotReadyV1Beta2Reason, Message: "* Machine machine-2: HealthCheckSucceeded: Some message\n" + "* Machine machine-4: Deleting: Machine deletion in progress, stage: DrainingNode\n" + "* Machine machine-3: Some unknown message", @@ -771,7 +771,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: "some-reason-1", + Reason: clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Reason, Message: "", }, }, @@ -790,7 +790,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: "some-unknown-reason-1", + Reason: clusterv1.MachineDeploymentMachinesUpToDateUnknownV1Beta2Reason, Message: "* Machine unknown-1: some unknown message", }, }, @@ -809,7 +809,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: "some-not-up-to-date-reason", + Reason: clusterv1.MachineDeploymentMachinesNotUpToDateV1Beta2Reason, Message: "* Machine not-up-to-date-machine-1: some not up-to-date message", }, }, @@ -823,7 +823,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: v1beta2conditions.NotYetReportedReason, + Reason: clusterv1.MachineDeploymentMachinesUpToDateUnknownV1Beta2Reason, Message: "* Machine no-condition-machine-1: Condition UpToDate not yet reported", }, }, @@ -860,7 +860,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.MachineDeploymentMachinesNotUpToDateV1Beta2Reason, Message: "* Machines not-up-to-date-machine-1, not-up-to-date-machine-2: This is not up-to-date message\n" + "* Machines no-condition-machine-1, no-condition-machine-2: Condition UpToDate not yet reported", }, diff --git a/internal/controllers/machineset/machineset_controller_status.go b/internal/controllers/machineset/machineset_controller_status.go index de6036aae082..60633856b603 100644 --- a/internal/controllers/machineset/machineset_controller_status.go +++ b/internal/controllers/machineset/machineset_controller_status.go @@ -230,6 +230,16 @@ func setMachinesReadyCondition(ctx context.Context, machineSet *clusterv1.Machin readyCondition, err := v1beta2conditions.NewAggregateCondition( machines, clusterv1.MachineReadyV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.MachineSetMachinesReadyV1Beta2Condition), + // Using a custom merge strategy to override reasons applied during merge. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.MachineSetMachinesNotReadyV1Beta2Reason, + clusterv1.MachineSetMachinesReadyUnknownV1Beta2Reason, + clusterv1.MachineSetMachinesReadyV1Beta2Reason, + )), + ), + }, ) if err != nil { log.Error(err, "Failed to aggregate Machine's Ready conditions") @@ -270,6 +280,16 @@ func setMachinesUpToDateCondition(ctx context.Context, machineSet *clusterv1.Mac upToDateCondition, err := v1beta2conditions.NewAggregateCondition( machines, clusterv1.MachineUpToDateV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.MachineSetMachinesUpToDateV1Beta2Condition), + // Using a custom merge strategy to override reasons applied during merge. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + clusterv1.MachineSetMachinesNotUpToDateV1Beta2Reason, + clusterv1.MachineSetMachinesUpToDateUnknownV1Beta2Reason, + clusterv1.MachineSetMachinesUpToDateV1Beta2Reason, + )), + ), + }, ) if err != nil { log.Error(err, "Failed to aggregate Machine's UpToDate conditions") @@ -310,6 +330,8 @@ func setRemediatingCondition(ctx context.Context, machineSet *clusterv1.MachineS remediatingCondition, err := v1beta2conditions.NewAggregateCondition( machinesToBeRemediated.UnsortedList(), clusterv1.MachineOwnerRemediatedV1Beta2Condition, v1beta2conditions.TargetConditionType(clusterv1.MachineSetRemediatingV1Beta2Condition), + // Note: in case of the remediating conditions it is not required to use a CustomMergeStrategy/ComputeReasonFunc + // because we are considering only machinesToBeRemediated (and we can pin the reason when we set the condition). ) if err != nil { v1beta2conditions.Set(machineSet, metav1.Condition{ diff --git a/internal/controllers/machineset/machineset_controller_status_test.go b/internal/controllers/machineset/machineset_controller_status_test.go index 509046ce7c7b..e6f9fbed735d 100644 --- a/internal/controllers/machineset/machineset_controller_status_test.go +++ b/internal/controllers/machineset/machineset_controller_status_test.go @@ -513,7 +513,7 @@ func Test_setMachinesReadyCondition(t *testing.T) { readyCondition := metav1.Condition{ Type: clusterv1.MachineReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineReadyV1Beta2Reason, } tests := []struct { @@ -546,6 +546,19 @@ func Test_setMachinesReadyCondition(t *testing.T) { Reason: clusterv1.MachineSetMachinesReadyNoReplicasV1Beta2Reason, }, }, + { + name: "one machine is ready", + machineSet: machineSet, + machines: []*clusterv1.Machine{ + newMachine("machine-1", readyCondition), + }, + getAndAdoptMachinesForMachineSetSucceeded: true, + expectCondition: metav1.Condition{ + Type: clusterv1.MachineSetMachinesReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineSetMachinesReadyV1Beta2Reason, + }, + }, { name: "all machines are ready", machineSet: machineSet, @@ -557,7 +570,7 @@ func Test_setMachinesReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineSetMachinesReadyV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: v1beta2conditions.MultipleInfoReportedReason, + Reason: clusterv1.MachineSetMachinesReadyV1Beta2Reason, }, }, { @@ -571,7 +584,7 @@ func Test_setMachinesReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineSetMachinesReadyV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: v1beta2conditions.NotYetReportedReason, + Reason: clusterv1.MachineSetMachinesReadyUnknownV1Beta2Reason, Message: "* Machine machine-2: Condition Ready not yet reported", }, }, @@ -603,7 +616,7 @@ func Test_setMachinesReadyCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineSetMachinesReadyV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.MachineSetMachinesNotReadyV1Beta2Reason, Message: "* Machine machine-2: HealthCheckSucceeded: Some message\n" + "* Machine machine-4: Deleting: Machine deletion in progress, stage: DrainingNode\n" + "* Machine machine-3: Some unknown message", @@ -671,7 +684,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineSetMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: "some-reason-1", + Reason: clusterv1.MachineSetMachinesUpToDateV1Beta2Reason, Message: "", }, }, @@ -690,7 +703,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineSetMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: "some-unknown-reason-1", + Reason: clusterv1.MachineSetMachinesUpToDateUnknownV1Beta2Reason, Message: "* Machine unknown-1: some unknown message", }, }, @@ -709,7 +722,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineSetMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: "some-not-up-to-date-reason", + Reason: clusterv1.MachineSetMachinesNotUpToDateV1Beta2Reason, Message: "* Machine not-up-to-date-machine-1: some not up-to-date message", }, }, @@ -723,7 +736,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineSetMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: v1beta2conditions.NotYetReportedReason, + Reason: clusterv1.MachineSetMachinesUpToDateUnknownV1Beta2Reason, Message: "* Machine no-condition-machine-1: Condition UpToDate not yet reported", }, }, @@ -760,7 +773,7 @@ func Test_setMachinesUpToDateCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineSetMachinesUpToDateV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: v1beta2conditions.MultipleIssuesReportedReason, + Reason: clusterv1.MachineSetMachinesNotUpToDateV1Beta2Reason, Message: "* Machines not-up-to-date-machine-1, not-up-to-date-machine-2: This is not up-to-date message\n" + "* Machines no-condition-machine-1, no-condition-machine-2: Condition UpToDate not yet reported", }, diff --git a/util/conditions/v1beta2/aggregate.go b/util/conditions/v1beta2/aggregate.go index bbfa9994be71..3a3615a28966 100644 --- a/util/conditions/v1beta2/aggregate.go +++ b/util/conditions/v1beta2/aggregate.go @@ -72,9 +72,9 @@ func NewAggregateCondition[T Getter](sourceObjs []T, sourceConditionType string, } if aggregateOpt.mergeStrategy == nil { - negativePolarityConditionTypes := sets.New[string](aggregateOpt.negativePolarityConditionTypes...) - targetConditionHasPositivePolarity := !negativePolarityConditionTypes.Has(sourceConditionType) - aggregateOpt.mergeStrategy = newDefaultMergeStrategy(targetConditionHasPositivePolarity, negativePolarityConditionTypes) + // Note: If mergeStrategy is not explicitly set, target condition has negative polarity if source condition has negative polarity + targetConditionHasPositivePolarity := !sets.New[string](aggregateOpt.negativePolarityConditionTypes...).Has(sourceConditionType) + aggregateOpt.mergeStrategy = DefaultMergeStrategy(TargetConditionHasPositivePolarity(targetConditionHasPositivePolarity), GetPriorityFunc(GetDefaultMergePriorityFunc(aggregateOpt.negativePolarityConditionTypes...))) } conditionsInScope := make([]ConditionWithOwnerInfo, 0, len(sourceObjs)) diff --git a/util/conditions/v1beta2/aggregate_test.go b/util/conditions/v1beta2/aggregate_test.go index a5edd3ee86b3..4958f2b9a6f7 100644 --- a/util/conditions/v1beta2/aggregate_test.go +++ b/util/conditions/v1beta2/aggregate_test.go @@ -22,7 +22,6 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/test/builder" @@ -48,7 +47,7 @@ func TestAggregate(t *testing.T) { want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, Status: metav1.ConditionFalse, // False because there is one issue - Reason: "Reason-1", // Picking the reason from the only existing issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Obj obj0: Message-1", // messages from all the issues & unknown conditions (info dropped) }, wantErr: false, @@ -64,7 +63,7 @@ func TestAggregate(t *testing.T) { want: &metav1.Condition{ Type: clusterv1.ScalingUpV1Beta2Condition, Status: metav1.ConditionTrue, // True because there is one issue, and the target condition has negative polarity - Reason: "Reason-1", // Picking the reason from the only existing issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Obj obj0: Message-1", // messages from all the issues & unknown conditions (info dropped) }, wantErr: false, @@ -84,11 +83,47 @@ func TestAggregate(t *testing.T) { {{Type: clusterv1.ScalingUpV1Beta2Condition, Status: metav1.ConditionFalse, Reason: "Reason-99", Message: "Message-99"}}, // obj1 }, conditionType: clusterv1.ScalingUpV1Beta2Condition, - options: []AggregateOption{NegativePolarityConditionTypes{clusterv1.ScalingUpV1Beta2Condition}, CustomMergeStrategy{newDefaultMergeStrategy(true, sets.New(clusterv1.ScalingUpV1Beta2Condition))}}, + options: []AggregateOption{NegativePolarityConditionTypes{clusterv1.ScalingUpV1Beta2Condition}, CustomMergeStrategy{ + MergeStrategy: DefaultMergeStrategy( + TargetConditionHasPositivePolarity(true), + GetPriorityFunc(GetDefaultMergePriorityFunc(clusterv1.ScalingUpV1Beta2Condition)), + ComputeReasonFunc(GetDefaultComputeMergeReasonFunc( + "bad", + "unknown", + "good", + )), + ), + }}, want: &metav1.Condition{ Type: clusterv1.ScalingUpV1Beta2Condition, Status: metav1.ConditionFalse, // False because there is one issue, and the custom merge strategy doesn't set the flag that defines that the target condition has negative polarity - Reason: "Reason-1", // Picking the reason from the only existing issue + Reason: "bad", // Using reason from the ComputeReasonFunc + Message: "* Phase3Obj obj0: Message-1", // messages from all the issues & unknown conditions (info dropped) + }, + wantErr: false, + }, + { + name: "One issue with custom merge strategy (negative polarity)", + conditions: [][]metav1.Condition{ + {{Type: clusterv1.ScalingUpV1Beta2Condition, Status: metav1.ConditionTrue, Reason: "Reason-1", Message: "Message-1"}}, // obj0 + {{Type: clusterv1.ScalingUpV1Beta2Condition, Status: metav1.ConditionFalse, Reason: "Reason-99", Message: "Message-99"}}, // obj1 + }, + conditionType: clusterv1.ScalingUpV1Beta2Condition, + options: []AggregateOption{NegativePolarityConditionTypes{clusterv1.ScalingUpV1Beta2Condition}, CustomMergeStrategy{ + MergeStrategy: DefaultMergeStrategy( + TargetConditionHasPositivePolarity(false), + GetPriorityFunc(GetDefaultMergePriorityFunc(clusterv1.ScalingUpV1Beta2Condition)), + ComputeReasonFunc(GetDefaultComputeMergeReasonFunc( + "good", // Note: with negative polarity, false is good + "unknown", + "bad", + )), + ), + }}, + want: &metav1.Condition{ + Type: clusterv1.ScalingUpV1Beta2Condition, + Status: metav1.ConditionTrue, // True because there is one issue, and the custom merge strategy sets the flag that defines that the target condition has negative polarity + Reason: "good", // Using reason from the ComputeReasonFunc Message: "* Phase3Obj obj0: Message-1", // messages from all the issues & unknown conditions (info dropped) }, wantErr: false, @@ -104,7 +139,7 @@ func TestAggregate(t *testing.T) { want: &metav1.Condition{ Type: "SomethingAvailable", Status: metav1.ConditionFalse, // False because there is one issue - Reason: "Reason-1", // Picking the reason from the only existing issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Obj obj0: Message-1", // messages from all the issues & unknown conditions (info dropped) }, wantErr: false, @@ -120,7 +155,7 @@ func TestAggregate(t *testing.T) { want: &metav1.Condition{ Type: "SomethingAvailable", Status: metav1.ConditionTrue, // True because there is one issue, and the target condition has negative polarity - Reason: "Reason-1", // Picking the reason from the only existing issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Obj obj0: Message-1", // messages from all the issues & unknown conditions (info dropped) }, wantErr: false, @@ -138,7 +173,7 @@ func TestAggregate(t *testing.T) { want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, Status: metav1.ConditionFalse, // False because there is one issue - Reason: MultipleIssuesReportedReason, // Using a generic reason + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Objs obj0, obj1, obj2: Message-1", // messages from all the issues & unknown conditions (info dropped) }, wantErr: false, @@ -158,7 +193,7 @@ func TestAggregate(t *testing.T) { want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, Status: metav1.ConditionFalse, // False because there is one issue - Reason: MultipleIssuesReportedReason, // Using a generic reason + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Objs obj0, obj1, obj2, ... (2 more): Message-1", // messages from all the issues & unknown conditions (info dropped) }, wantErr: false, @@ -178,8 +213,8 @@ func TestAggregate(t *testing.T) { options: []AggregateOption{}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionFalse, // False because there is one issue - Reason: MultipleIssuesReportedReason, // Using a generic reason + Status: metav1.ConditionFalse, // False because there is one issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Objs obj0, obj3, obj4: Message-1\n" + "* Phase3Objs obj1, obj2: Message-2\n" + "* Phase3Obj obj5: Message-3", // messages from all the issues & unknown conditions (info dropped) @@ -201,8 +236,8 @@ func TestAggregate(t *testing.T) { options: []AggregateOption{}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionFalse, // False because there is one issue - Reason: MultipleIssuesReportedReason, // Using a generic reason + Status: metav1.ConditionFalse, // False because there is one issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Objs obj0, obj3, obj4:\n" + " * Message-1\n" + "* Phase3Objs obj1, obj2:\n" + @@ -228,8 +263,8 @@ func TestAggregate(t *testing.T) { options: []AggregateOption{}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionFalse, // False because there is one issue - Reason: MultipleIssuesReportedReason, // Using a generic reason + Status: metav1.ConditionFalse, // False because there is one issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Objs obj0, obj4: Message-1\n" + "* Phase3Obj obj1: Message-2\n" + "* Phase3Obj obj2: Message-4\n" + @@ -249,8 +284,8 @@ func TestAggregate(t *testing.T) { options: []AggregateOption{}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionFalse, // False because there is one issue - Reason: MultipleIssuesReportedReason, // Using a generic reason + Status: metav1.ConditionFalse, // False because there is one issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Obj obj0: Message-1\n" + "* Phase3Obj obj1: Message-2\n" + "* Phase3Obj obj2: Message-3", // messages from all the issues & unknown conditions (info dropped) @@ -270,8 +305,8 @@ func TestAggregate(t *testing.T) { options: []AggregateOption{}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionFalse, // False because there is one issue - Reason: MultipleIssuesReportedReason, // Using a generic reason + Status: metav1.ConditionFalse, // False because there is one issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Obj obj0: Message-1\n" + "* Phase3Obj obj1: Message-2\n" + "* Phase3Obj obj3: Message-4\n" + @@ -294,8 +329,8 @@ func TestAggregate(t *testing.T) { options: []AggregateOption{}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionFalse, // False because there is one issue - Reason: MultipleIssuesReportedReason, // Using a generic reason + Status: metav1.ConditionFalse, // False because there is one issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Obj obj0: Message-1\n" + "* Phase3Obj obj1: Message-2\n" + "* Phase3Obj obj3: Message-4\n" + @@ -319,8 +354,8 @@ func TestAggregate(t *testing.T) { options: []AggregateOption{}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, // Unknown because there is at least an unknown and no issue - Reason: MultipleUnknownReportedReason, // Using a generic reason + Status: metav1.ConditionUnknown, // Unknown because there is at least an unknown and no issue + Reason: unknownReportedReason, // Using a generic reason Message: "* Phase3Objs obj0, obj4: Message-1\n" + "* Phase3Obj obj1: Message-2\n" + "* Phase3Obj obj2: Message-4\n" + @@ -342,8 +377,8 @@ func TestAggregate(t *testing.T) { options: []AggregateOption{}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionTrue, // True because there are no issue and unknown - Reason: MultipleInfoReportedReason, // Using a generic reason + Status: metav1.ConditionTrue, // True because there are no issue and unknown + Reason: infoReportedReason, // Using a generic reason Message: "* Phase3Objs obj0, obj4: Message-1\n" + "* Phase3Obj obj1: Message-2\n" + "* Phase3Obj obj2: Message-4\n" + @@ -362,7 +397,7 @@ func TestAggregate(t *testing.T) { want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, Status: metav1.ConditionFalse, // False because there is one issue - Reason: "Reason-1", // Picking the reason from the only existing issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Obj obj0: Message-1\n" + "* Phase3Obj obj1: Condition Available not yet reported", // messages from all the issues & unknown conditions (info dropped) }, @@ -379,7 +414,7 @@ func TestAggregate(t *testing.T) { want: &metav1.Condition{ Type: "SomethingAvailable", Status: metav1.ConditionFalse, // False because there is one issue - Reason: "Reason-1", // Picking the reason from the only existing issue + Reason: issuesReportedReason, // Using a generic reason Message: "* Phase3Obj obj0: Message-1\n" + "* Phase3Obj obj1: Condition Available not yet reported", // messages from all the issues & unknown conditions (info dropped) }, diff --git a/util/conditions/v1beta2/merge_strategies.go b/util/conditions/v1beta2/merge_strategies.go index a8e0fb109da0..0fef8b8cd682 100644 --- a/util/conditions/v1beta2/merge_strategies.go +++ b/util/conditions/v1beta2/merge_strategies.go @@ -29,21 +29,6 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -// TODO: Move to the API package. -const ( - // MultipleIssuesReportedReason is set on conditions generated during aggregate or summary operations when multiple conditions/objects are reporting issues. - // NOTE: If a custom merge strategy is used for the aggregate or summary operations, this might not be true anymore. - MultipleIssuesReportedReason = "MultipleIssuesReported" - - // MultipleUnknownReportedReason is set on conditions generated during aggregate or summary operations when multiple conditions/objects are reporting unknown. - // NOTE: If a custom merge strategy is used for the aggregate or summary operations, this might not be true anymore. - MultipleUnknownReportedReason = "MultipleUnknownReported" - - // MultipleInfoReportedReason is set on conditions generated during aggregate or summary operations when multiple conditions/objects are reporting info. - // NOTE: If a custom merge strategy is used for the aggregate or summary operations, this might not be true anymore. - MultipleInfoReportedReason = "MultipleInfoReported" -) - // ConditionWithOwnerInfo is a wrapper around metav1.Condition with additional ConditionOwnerInfo. // These infos can be used when generating the message resulting from the merge operation. type ConditionWithOwnerInfo struct { @@ -73,18 +58,52 @@ type MergeStrategy interface { Merge(conditions []ConditionWithOwnerInfo, conditionTypes []string) (status metav1.ConditionStatus, reason, message string, err error) } -// DefaultMergeStrategyWithCustomPriority is the default merge strategy with a customized getPriority function. -func DefaultMergeStrategyWithCustomPriority(getPriorityFunc func(condition metav1.Condition) MergePriority) MergeStrategy { - return &defaultMergeStrategy{ - targetConditionHasPositivePolarity: true, - getPriorityFunc: getPriorityFunc, +// DefaultMergeStrategyOption is some configuration that modifies the DefaultMergeStrategy behaviour. +type DefaultMergeStrategyOption interface { + // ApplyToDefaultMergeStrategy applies this configuration to the given DefaultMergeStrategy options. + ApplyToDefaultMergeStrategy(option *DefaultMergeStrategyOptions) +} + +// DefaultMergeStrategyOptions allows to set options for the DefaultMergeStrategy behaviour. +type DefaultMergeStrategyOptions struct { + getPriorityFunc func(condition metav1.Condition) MergePriority + computeReasonFunc func(issueConditions []ConditionWithOwnerInfo, unknownConditions []ConditionWithOwnerInfo, infoConditions []ConditionWithOwnerInfo) string + targetConditionHasPositivePolarity bool +} + +// ApplyOptions applies the given list options on these options, +// and then returns itself (for convenient chaining). +func (o *DefaultMergeStrategyOptions) ApplyOptions(opts []DefaultMergeStrategyOption) *DefaultMergeStrategyOptions { + for _, opt := range opts { + opt.ApplyToDefaultMergeStrategy(o) } + return o } -func newDefaultMergeStrategy(targetConditionHasPositivePolarity bool, negativePolarityConditionTypes sets.Set[string]) MergeStrategy { +// DefaultMergeStrategy returns the default merge strategy. +// +// Use the GetPriorityFunc option to customize how the MergePriority for a given condition is computed. +// If not specified, conditions are considered issues or not if not to their normal state given the polarity +// (e.g. a positive polarity condition is considered to be reporting an issue when status is false, +// otherwise the condition is considered to be reporting an info unless status is unknown). +// +// Use the TargetConditionHasPositivePolarity to define the polarity of the condition returned by the DefaultMergeStrategy. +// If not specified, the generate condition will have positive polarity (status true = good). +// +// Use the ComputeReasonFunc to customize how the reason for the resulting condition will be computed. +// If not specified, generic reasons will be used. +func DefaultMergeStrategy(opts ...DefaultMergeStrategyOption) MergeStrategy { + strategyOpt := &DefaultMergeStrategyOptions{ + targetConditionHasPositivePolarity: true, + computeReasonFunc: GetDefaultComputeMergeReasonFunc(issuesReportedReason, unknownReportedReason, infoReportedReason), // NOTE: when no specific reason are provided, generic ones are used. + getPriorityFunc: GetDefaultMergePriorityFunc(), + } + strategyOpt.ApplyOptions(opts) + return &defaultMergeStrategy{ - targetConditionHasPositivePolarity: targetConditionHasPositivePolarity, - getPriorityFunc: GetDefaultMergePriorityFunc(negativePolarityConditionTypes), + getPriorityFunc: strategyOpt.getPriorityFunc, + computeReasonFunc: strategyOpt.computeReasonFunc, + targetConditionHasPositivePolarity: strategyOpt.targetConditionHasPositivePolarity, } } @@ -93,16 +112,17 @@ func newDefaultMergeStrategy(targetConditionHasPositivePolarity bool, negativePo // - issues: conditions with positive polarity (normal True) and status False or conditions with negative polarity (normal False) and status True. // - unknown: conditions with status unknown. // - info: conditions with positive polarity (normal True) and status True or conditions with negative polarity (normal False) and status False. -func GetDefaultMergePriorityFunc(negativePolarityConditionTypes sets.Set[string]) func(condition metav1.Condition) MergePriority { +func GetDefaultMergePriorityFunc(negativePolarityConditionTypes ...string) func(condition metav1.Condition) MergePriority { + negativePolarityConditionTypesSet := sets.New[string](negativePolarityConditionTypes...) return func(condition metav1.Condition) MergePriority { switch condition.Status { case metav1.ConditionTrue: - if negativePolarityConditionTypes.Has(condition.Type) { + if negativePolarityConditionTypesSet.Has(condition.Type) { return IssueMergePriority } return InfoMergePriority case metav1.ConditionFalse: - if negativePolarityConditionTypes.Has(condition.Type) { + if negativePolarityConditionTypesSet.Has(condition.Type) { return InfoMergePriority } return IssueMergePriority @@ -129,9 +149,41 @@ const ( InfoMergePriority ) +// GetDefaultComputeMergeReasonFunc return a function picking one of the three reasons in input depending on +// the status of the conditions being merged. +func GetDefaultComputeMergeReasonFunc(issueReason, unknownReason, infoReason string) func(issueConditions []ConditionWithOwnerInfo, unknownConditions []ConditionWithOwnerInfo, infoConditions []ConditionWithOwnerInfo) string { + return func(issueConditions []ConditionWithOwnerInfo, unknownConditions []ConditionWithOwnerInfo, _ []ConditionWithOwnerInfo) string { + switch { + case len(issueConditions) > 0: + return issueReason + case len(unknownConditions) > 0: + return unknownReason + default: + // Note: This func can assume that there is at least one condition, so this branch is equivalent to len(infoReason) > 0, + // and it makes the linter happy. + return infoReason + } + } +} + +const ( + // issuesReportedReason is set on conditions generated during aggregate or summary operations when at least one conditions/objects are reporting issues. + // NOTE: This const is used by GetDefaultComputeMergeReasonFunc if no specific reasons are provided. + issuesReportedReason = "IssuesReported" + + // unknownReportedReason is set on conditions generated during aggregate or summary operations when at least one conditions/objects are reporting unknown. + // NOTE: This const is used by GetDefaultComputeMergeReasonFunc if no specific reasons are provided. + unknownReportedReason = "UnknownReported" + + // infoReportedReason is set on conditions generated during aggregate or summary operations when at least one conditions/objects are reporting info. + // NOTE: This const is used by GetDefaultComputeMergeReasonFunc if no specific reasons are provided. + infoReportedReason = "InfoReported" +) + // defaultMergeStrategy defines the default merge strategy for Cluster API conditions. type defaultMergeStrategy struct { getPriorityFunc func(condition metav1.Condition) MergePriority + computeReasonFunc func(issueConditions []ConditionWithOwnerInfo, unknownConditions []ConditionWithOwnerInfo, infoConditions []ConditionWithOwnerInfo) string targetConditionHasPositivePolarity bool } @@ -190,23 +242,7 @@ func (d *defaultMergeStrategy) Merge(conditions []ConditionWithOwnerInfo, condit // Compute the reason for the target condition: // - In case there is only one condition in the top group, use the reason from this condition // - In case there are more than one condition in the top group, use a generic reason (for the target group) - switch { - case len(issueConditions) == 1: - reason = issueConditions[0].Reason - case len(issueConditions) > 1: - reason = MultipleIssuesReportedReason - case len(unknownConditions) == 1: - reason = unknownConditions[0].Reason - case len(unknownConditions) > 1: - reason = MultipleUnknownReportedReason - case len(infoConditions) == 1: - reason = infoConditions[0].Reason - case len(infoConditions) > 1: - reason = MultipleInfoReportedReason - default: - // NOTE: this is already handled above, but repeating also here for better readability. - return "", "", "", errors.New("can't merge an empty list of conditions") - } + reason = d.computeReasonFunc(issueConditions, unknownConditions, infoConditions) // Compute the message for the target condition, which is optimized for the operation being performed. diff --git a/util/conditions/v1beta2/merge_strategies_test.go b/util/conditions/v1beta2/merge_strategies_test.go index 7847973d3879..b24eefd9b655 100644 --- a/util/conditions/v1beta2/merge_strategies_test.go +++ b/util/conditions/v1beta2/merge_strategies_test.go @@ -22,7 +22,6 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" ) func TestAggregateMessages(t *testing.T) { @@ -112,7 +111,7 @@ func TestSplitConditionsByPriority(t *testing.T) { {OwnerResource: ConditionOwnerInfo{Name: "baz"}, Condition: metav1.Condition{Type: "!C", Status: metav1.ConditionFalse}}, // info } - issueConditions, unknownConditions, infoConditions := splitConditionsByPriority(conditions, GetDefaultMergePriorityFunc(sets.New[string]("!C"))) + issueConditions, unknownConditions, infoConditions := splitConditionsByPriority(conditions, GetDefaultMergePriorityFunc("!C")) // Check condition are grouped as expected and order is preserved. @@ -196,11 +195,11 @@ func TestDefaultMergePriority(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - negativePolarityConditionTypes := sets.New[string]() + negativePolarityConditionTypes := []string{} if tt.negativePolarity { - negativePolarityConditionTypes.Insert(tt.condition.Type) + negativePolarityConditionTypes = append(negativePolarityConditionTypes, tt.condition.Type) } - gotPriority := GetDefaultMergePriorityFunc(negativePolarityConditionTypes)(tt.condition) + gotPriority := GetDefaultMergePriorityFunc(negativePolarityConditionTypes...)(tt.condition) g.Expect(gotPriority).To(Equal(tt.wantPriority)) }) diff --git a/util/conditions/v1beta2/options.go b/util/conditions/v1beta2/options.go index 168f09d7d553..c0dcb1a5804d 100644 --- a/util/conditions/v1beta2/options.go +++ b/util/conditions/v1beta2/options.go @@ -129,3 +129,28 @@ type ForceOverwrite bool func (f ForceOverwrite) ApplyToPatchApply(opts *PatchApplyOptions) { opts.forceOverwrite = bool(f) } + +// GetPriorityFunc defines priority of a given condition when processed by the DefaultMergeStrategy. +// Note: The return value must be one of IssueMergePriority, UnknownMergePriority, InfoMergePriority. +type GetPriorityFunc func(condition metav1.Condition) MergePriority + +// ApplyToDefaultMergeStrategy applies this configuration to the given DefaultMergeStrategy options. +func (f GetPriorityFunc) ApplyToDefaultMergeStrategy(opts *DefaultMergeStrategyOptions) { + opts.getPriorityFunc = f +} + +// TargetConditionHasPositivePolarity defines the polarity of the condition returned by the DefaultMergeStrategy. +type TargetConditionHasPositivePolarity bool + +// ApplyToDefaultMergeStrategy applies this configuration to the given DefaultMergeStrategy options. +func (t TargetConditionHasPositivePolarity) ApplyToDefaultMergeStrategy(opts *DefaultMergeStrategyOptions) { + opts.targetConditionHasPositivePolarity = bool(t) +} + +// ComputeReasonFunc defines a function to be used when computing the reason of the condition returned by the DefaultMergeStrategy. +type ComputeReasonFunc func(issueConditions []ConditionWithOwnerInfo, unknownConditions []ConditionWithOwnerInfo, infoConditions []ConditionWithOwnerInfo) string + +// ApplyToDefaultMergeStrategy applies this configuration to the given DefaultMergeStrategy options. +func (f ComputeReasonFunc) ApplyToDefaultMergeStrategy(opts *DefaultMergeStrategyOptions) { + opts.computeReasonFunc = f +} diff --git a/util/conditions/v1beta2/summary.go b/util/conditions/v1beta2/summary.go index 6ca07a5e89f1..d7a08463d66a 100644 --- a/util/conditions/v1beta2/summary.go +++ b/util/conditions/v1beta2/summary.go @@ -60,7 +60,7 @@ func NewSummaryCondition(sourceObj Getter, targetConditionType string, opts ...S summarizeOpt.ApplyOptions(opts) if summarizeOpt.mergeStrategy == nil { // Note. Summary always assume the target condition type has positive polarity. - summarizeOpt.mergeStrategy = newDefaultMergeStrategy(true, sets.New[string](summarizeOpt.negativePolarityConditionTypes...)) + summarizeOpt.mergeStrategy = DefaultMergeStrategy(GetPriorityFunc(GetDefaultMergePriorityFunc(summarizeOpt.negativePolarityConditionTypes...))) } if len(summarizeOpt.conditionTypes) == 0 { diff --git a/util/conditions/v1beta2/summary_test.go b/util/conditions/v1beta2/summary_test.go index 256dd5e00993..1a3b1ee41b21 100644 --- a/util/conditions/v1beta2/summary_test.go +++ b/util/conditions/v1beta2/summary_test.go @@ -21,7 +21,6 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/test/builder" @@ -48,7 +47,7 @@ func TestSummary(t *testing.T) { want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, Status: metav1.ConditionFalse, // False because there is one issue - Reason: "Reason-!C", // Picking the reason from the only existing issue + Reason: issuesReportedReason, // Using a generic reason Message: "* !C: Message-!C", // messages from all the issues & unknown conditions (info dropped) }, }, @@ -64,7 +63,7 @@ func TestSummary(t *testing.T) { want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, Status: metav1.ConditionFalse, // False because there is one issue - Reason: "Reason-!C", // Picking the reason from the only existing issue + Reason: issuesReportedReason, // Using a generic reason Message: "* !C: No additional info provided", // messages from all the issues & unknown conditions (info dropped); since message is empty, a default one is added }, }, @@ -79,8 +78,8 @@ func TestSummary(t *testing.T) { options: []SummaryOption{ForConditionTypes{"A", "B", "!C"}, NegativePolarityConditionTypes{"!C"}}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionFalse, // False because there are many issues - Reason: MultipleIssuesReportedReason, // Using a generic reason + Status: metav1.ConditionFalse, // False because there are many issues + Reason: issuesReportedReason, // Using a generic reason Message: "* B: Message-B\n" + "* !C: Message-!C", // messages from all the issues & unknown conditions (info dropped) }, @@ -96,8 +95,8 @@ func TestSummary(t *testing.T) { options: []SummaryOption{ForConditionTypes{"A", "B", "!C"}, NegativePolarityConditionTypes{"!C"}}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionFalse, // False because there are many issues - Reason: MultipleIssuesReportedReason, // Using a generic reason + Status: metav1.ConditionFalse, // False because there are many issues + Reason: issuesReportedReason, // Using a generic reason Message: "* B: Message-B\n" + "* !C:\n" + " * Message-!C1\n" + @@ -115,8 +114,8 @@ func TestSummary(t *testing.T) { options: []SummaryOption{ForConditionTypes{"A", "B", "!C"}, NegativePolarityConditionTypes{"!C"}}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionFalse, // False because there are many issues - Reason: MultipleIssuesReportedReason, // Using a generic reason + Status: metav1.ConditionFalse, // False because there are many issues + Reason: issuesReportedReason, // Using a generic reason Message: "* B: Message-B\n" + "* !C: Message-!C\n" + "* A: Message-A", // messages from all the issues & unknown conditions (info dropped) @@ -134,7 +133,7 @@ func TestSummary(t *testing.T) { want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, Status: metav1.ConditionUnknown, // Unknown because there is one unknown - Reason: "Reason-!C", // Picking the reason from the only existing unknown + Reason: unknownReportedReason, // Using a generic reason Message: "* !C: Message-!C", // messages from all the issues & unknown conditions (info dropped) }, }, @@ -149,8 +148,8 @@ func TestSummary(t *testing.T) { options: []SummaryOption{ForConditionTypes{"A", "B", "!C"}, NegativePolarityConditionTypes{"!C"}}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, // Unknown because there are many unknown - Reason: MultipleUnknownReportedReason, // Using a generic reason + Status: metav1.ConditionUnknown, // Unknown because there are many unknown + Reason: unknownReportedReason, // Using a generic reason Message: "* B: Message-B\n" + "* !C: Message-!C", // messages from all the issues & unknown conditions (info dropped) }, @@ -164,11 +163,11 @@ func TestSummary(t *testing.T) { {Type: "!C", Status: metav1.ConditionFalse, Reason: "Reason-!C", Message: "Message-!C"}, // info }, conditionType: clusterv1.AvailableV1Beta2Condition, - options: []SummaryOption{ForConditionTypes{"A", "B", "!C"}, NegativePolarityConditionTypes{"!C"}, CustomMergeStrategy{newDefaultMergeStrategy(true, sets.New("!C"))}}, + options: []SummaryOption{ForConditionTypes{"A", "B", "!C"}, NegativePolarityConditionTypes{"!C"}, CustomMergeStrategy{DefaultMergeStrategy(TargetConditionHasPositivePolarity(true), GetPriorityFunc(GetDefaultMergePriorityFunc("!C")))}}, want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionTrue, // True because there are many info - Reason: MultipleInfoReportedReason, // Using a generic reason + Status: metav1.ConditionTrue, // True because there are many info + Reason: infoReportedReason, // Using a generic reason Message: "* B: Message-B\n" + "* !C: Message-!C", // messages from all the info conditions (empty messages are dropped) }, @@ -183,8 +182,8 @@ func TestSummary(t *testing.T) { options: []SummaryOption{ForConditionTypes{"A", "B", "!C"}, NegativePolarityConditionTypes{"!C"}}, // B and !C are required! want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, // Unknown because there more than one unknown - Reason: MultipleUnknownReportedReason, // Using a generic reason + Status: metav1.ConditionUnknown, // Unknown because there more than one unknown + Reason: unknownReportedReason, // Using a generic reason Message: "* B: Condition not yet reported\n" + "* !C: Condition not yet reported", // messages from all the issues & unknown conditions (info dropped) }, @@ -200,7 +199,7 @@ func TestSummary(t *testing.T) { want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, Status: metav1.ConditionUnknown, // Unknown because there more than one unknown - Reason: NotYetReportedReason, // Picking the reason from the only existing issue, which is a default missing condition added for !C + Reason: unknownReportedReason, // Using a generic reason Message: "* !C: Condition not yet reported", // messages from all the issues & unknown conditions (info dropped) }, }, @@ -215,7 +214,7 @@ func TestSummary(t *testing.T) { want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, Status: metav1.ConditionTrue, // True because B and !C are ignored - Reason: "Reason-A", // Picking the reason from A, the only existing info + Reason: infoReportedReason, // Using a generic reason Message: "* A: Message-A", // messages from A, the only existing info }, }, @@ -230,9 +229,9 @@ func TestSummary(t *testing.T) { options: []SummaryOption{ForConditionTypes{"A", "B"}}, // C not in scope want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, - Status: metav1.ConditionTrue, // True because there are many info - Reason: MultipleInfoReportedReason, // Using a generic reason - Message: "* B: Message-B", // messages from all the info conditions (empty messages are dropped) + Status: metav1.ConditionTrue, // True because there are many info + Reason: infoReportedReason, // Using a generic reason + Message: "* B: Message-B", // messages from all the info conditions (empty messages are dropped) }, }, { @@ -261,7 +260,7 @@ func TestSummary(t *testing.T) { want: &metav1.Condition{ Type: clusterv1.AvailableV1Beta2Condition, Status: metav1.ConditionFalse, // False because !C is an issue - Reason: "Reason-C-additional", // Picking the reason from the additional condition + Reason: issuesReportedReason, // Using a generic reason Message: "* !C: Message-C-additional", // Picking the message from the additional condition (info dropped) }, },