diff --git a/api/v1beta1/cluster_types.go b/api/v1beta1/cluster_types.go index 11af512fc561..b9cbe1b98170 100644 --- a/api/v1beta1/cluster_types.go +++ b/api/v1beta1/cluster_types.go @@ -68,11 +68,11 @@ const ( ClusterTopologyReconciledV1Beta2Condition = "TopologyReconciled" // ClusterTopologyReconcileSucceededV1Beta2Reason documents the reconciliation of a Cluster topology succeeded. - ClusterTopologyReconcileSucceededV1Beta2Reason = "TopologyReconcileSucceeded" + ClusterTopologyReconcileSucceededV1Beta2Reason = "ReconcileSucceeded" // ClusterTopologyReconciledFailedV1Beta2Reason documents the reconciliation of a Cluster topology // failing due to an error. - ClusterTopologyReconciledFailedV1Beta2Reason = "TopologyReconcileFailed" + ClusterTopologyReconciledFailedV1Beta2Reason = "ReconcileFailed" // ClusterTopologyReconciledControlPlaneUpgradePendingV1Beta2Reason documents reconciliation of a Cluster topology // not yet completed because Control Plane is not yet updated to match the desired topology spec. @@ -113,9 +113,9 @@ const ( // with the ClusterClass surfaced in the ClusterClass status or controller logs. ClusterTopologyReconciledClusterClassNotReconciledV1Beta2Reason = "ClusterClassNotReconciled" - // ClusterTopologyReconciledDeletionTimestampSetV1Beta2Reason surfaces when the Cluster is deleting because the + // ClusterTopologyReconciledDeletingV1Beta2Reason surfaces when the Cluster is deleting because the // DeletionTimestamp is set. - ClusterTopologyReconciledDeletionTimestampSetV1Beta2Reason = DeletionTimestampSetV1Beta2Reason + ClusterTopologyReconciledDeletingV1Beta2Reason = DeletingV1Beta2Reason // ClusterTopologyReconcilePausedV1Beta2Reason surfaces when the Cluster is paused. ClusterTopologyReconcilePausedV1Beta2Reason = PausedV1Beta2Reason @@ -316,7 +316,7 @@ const ( // ClusterNotScalingDownV1Beta2Reason surfaces when none of the Cluster's control plane, MachineDeployments, // MachinePools and stand-alone MachineSets are scaling down. - ClusterNotScalingDownV1Beta2Reason = NotScalingUpV1Beta2Reason + ClusterNotScalingDownV1Beta2Reason = NotScalingDownV1Beta2Reason // ClusterScalingDownUnknownV1Beta2Reason surfaces when one of the Cluster's control plane, MachineDeployments, // MachinePools and stand-alone MachineSets scaling down condition is unknown, and none true. @@ -349,9 +349,9 @@ const ( // ClusterDeletingV1Beta2Condition surfaces details about ongoing deletion of the cluster. ClusterDeletingV1Beta2Condition = DeletingV1Beta2Condition - // ClusterDeletingDeletionTimestampNotSetV1Beta2Reason surfaces when the Cluster is not deleting because the + // ClusterNotDeletingV1Beta2Reason surfaces when the Cluster is not deleting because the // DeletionTimestamp is not set. - ClusterDeletingDeletionTimestampNotSetV1Beta2Reason = DeletionTimestampNotSetV1Beta2Reason + ClusterNotDeletingV1Beta2Reason = NotDeletingV1Beta2Reason // ClusterDeletingWaitingForBeforeDeleteHookV1Beta2Reason surfaces when the Cluster deletion // waits for the ClusterDelete hooks to allow deletion to complete. diff --git a/api/v1beta1/machine_types.go b/api/v1beta1/machine_types.go index 501a7ea8c7b0..2af03602e4be 100644 --- a/api/v1beta1/machine_types.go +++ b/api/v1beta1/machine_types.go @@ -237,12 +237,12 @@ const ( // MachineNodeDoesNotExistV1Beta2Reason surfaces when the node hosted on the machine does not exist. // Note: this could happen when creating the machine. However, this state should be treated as an error if it lasts indefinitely. - MachineNodeDoesNotExistV1Beta2Reason = ObjectDoesNotExistV1Beta2Reason + MachineNodeDoesNotExistV1Beta2Reason = "NodeDoesNotExist" // MachineNodeDeletedV1Beta2Reason surfaces when the node hosted on the machine has been deleted. // Note: controllers can't identify if the Node was deleted by the controller itself, e.g. // during the deletion workflow, or by a users. - MachineNodeDeletedV1Beta2Reason = ObjectDeletedV1Beta2Reason + MachineNodeDeletedV1Beta2Reason = "NodeDeleted" // MachineNodeInspectionFailedV1Beta2Reason documents a failure when inspecting the status of a Node. MachineNodeInspectionFailedV1Beta2Reason = InspectionFailedV1Beta2Reason @@ -317,18 +317,13 @@ const ( // MachineDeletingV1Beta2Condition surfaces details about progress in the machine deletion workflow. MachineDeletingV1Beta2Condition = DeletingV1Beta2Condition - // MachineDeletingV1Beta2Reason surfaces when the Machine is deleting. - // This reason is only used for the MachineDeletingV1Beta2Condition when calculating the - // Ready condition when the deletionTimestamp on a Machine is set. - MachineDeletingV1Beta2Reason = "Deleting" - - // MachineDeletingDeletionTimestampNotSetV1Beta2Reason surfaces when the Machine is not deleting because the + // MachineNotDeletingV1Beta2Reason surfaces when the Machine is not deleting because the // DeletionTimestamp is not set. - MachineDeletingDeletionTimestampNotSetV1Beta2Reason = DeletionTimestampNotSetV1Beta2Reason + MachineNotDeletingV1Beta2Reason = NotDeletingV1Beta2Reason - // MachineDeletingDeletionTimestampSetV1Beta2Reason surfaces when the Machine is deleting because the + // MachineDeletingV1Beta2Reason surfaces when the Machine is deleting because the // DeletionTimestamp is set. This reason is used if none of the more specific reasons apply. - MachineDeletingDeletionTimestampSetV1Beta2Reason = DeletionTimestampSetV1Beta2Reason + MachineDeletingV1Beta2Reason = DeletingV1Beta2Reason // MachineDeletingInternalErrorV1Beta2Reason surfaces unexpected failures when deleting a Machine. MachineDeletingInternalErrorV1Beta2Reason = InternalErrorV1Beta2Reason diff --git a/api/v1beta1/machinedeployment_types.go b/api/v1beta1/machinedeployment_types.go index de068d1bce2c..9f0a5856f6a2 100644 --- a/api/v1beta1/machinedeployment_types.go +++ b/api/v1beta1/machinedeployment_types.go @@ -208,13 +208,13 @@ const ( // MachineDeploymentDeletingV1Beta2Condition surfaces details about ongoing deletion of the controlled machines. MachineDeploymentDeletingV1Beta2Condition = DeletingV1Beta2Condition - // MachineDeploymentDeletingDeletionTimestampNotSetV1Beta2Reason surfaces when the MachineDeployment is not deleting because the + // MachineDeploymentNotDeletingV1Beta2Reason surfaces when the MachineDeployment is not deleting because the // DeletionTimestamp is not set. - MachineDeploymentDeletingDeletionTimestampNotSetV1Beta2Reason = DeletionTimestampNotSetV1Beta2Reason + MachineDeploymentNotDeletingV1Beta2Reason = NotDeletingV1Beta2Reason - // MachineDeploymentDeletingDeletionTimestampSetV1Beta2Reason surfaces when the MachineDeployment is deleting because the + // MachineDeploymentDeletingV1Beta2Reason surfaces when the MachineDeployment is deleting because the // DeletionTimestamp is set. - MachineDeploymentDeletingDeletionTimestampSetV1Beta2Reason = DeletionTimestampSetV1Beta2Reason + MachineDeploymentDeletingV1Beta2Reason = DeletingV1Beta2Reason // MachineDeploymentDeletingInternalErrorV1Beta2Reason surfaces unexpected failures when deleting a MachineDeployment. MachineDeploymentDeletingInternalErrorV1Beta2Reason = InternalErrorV1Beta2Reason diff --git a/api/v1beta1/machineset_types.go b/api/v1beta1/machineset_types.go index 78a8f8e30698..b18313b3f2bc 100644 --- a/api/v1beta1/machineset_types.go +++ b/api/v1beta1/machineset_types.go @@ -199,11 +199,11 @@ const ( // MachineSetMachineRemediationDeferredV1Beta2Reason surfaces when remediation of a MachineSet machine must be deferred. MachineSetMachineRemediationDeferredV1Beta2Reason = "RemediationDeferred" - // MachineSetMachineRemediationMachineDeletedV1Beta2Reason surfaces when remediation of a MachineSet machine + // MachineSetMachineRemediationMachineDeletingV1Beta2Reason surfaces when remediation of a MachineSet machine // has been completed by deleting the unhealthy machine. // Note: After an unhealthy machine is deleted, a new one is created by the MachineSet as part of the // regular reconcile loop that ensures the correct number of replicas exist. - MachineSetMachineRemediationMachineDeletedV1Beta2Reason = "MachineDeleted" + MachineSetMachineRemediationMachineDeletingV1Beta2Reason = "MachineDeleting" ) // MachineSet's Deleting condition and corresponding reasons that will be used in v1Beta2 API version. @@ -211,13 +211,13 @@ const ( // MachineSetDeletingV1Beta2Condition surfaces details about ongoing deletion of the controlled machines. MachineSetDeletingV1Beta2Condition = DeletingV1Beta2Condition - // MachineSetDeletingDeletionTimestampNotSetV1Beta2Reason surfaces when the MachineSet is not deleting because the + // MachineSetNotDeletingV1Beta2Reason surfaces when the MachineSet is not deleting because the // DeletionTimestamp is not set. - MachineSetDeletingDeletionTimestampNotSetV1Beta2Reason = DeletionTimestampNotSetV1Beta2Reason + MachineSetNotDeletingV1Beta2Reason = NotDeletingV1Beta2Reason - // MachineSetDeletingDeletionTimestampSetV1Beta2Reason surfaces when the MachineSet is deleting because the + // MachineSetDeletingV1Beta2Reason surfaces when the MachineSet is deleting because the // DeletionTimestamp is set. - MachineSetDeletingDeletionTimestampSetV1Beta2Reason = DeletionTimestampSetV1Beta2Reason + MachineSetDeletingV1Beta2Reason = DeletingV1Beta2Reason // MachineSetDeletingInternalErrorV1Beta2Reason surfaces unexpected failures when deleting a MachineSet. MachineSetDeletingInternalErrorV1Beta2Reason = InternalErrorV1Beta2Reason diff --git a/api/v1beta1/v1beta2_condition_consts.go b/api/v1beta1/v1beta2_condition_consts.go index 47aad7f48b18..f30b00dac6c3 100644 --- a/api/v1beta1/v1beta2_condition_consts.go +++ b/api/v1beta1/v1beta2_condition_consts.go @@ -169,13 +169,13 @@ const ( // ConnectionDownV1Beta2Reason surfaces that the connection to the workload cluster is down. ConnectionDownV1Beta2Reason = "ConnectionDown" - // DeletionTimestampNotSetV1Beta2Reason surfaces when an object is not deleting because the + // NotDeletingV1Beta2Reason surfaces when an object is not deleting because the // DeletionTimestamp is not set. - DeletionTimestampNotSetV1Beta2Reason = "DeletionTimestampNotSet" + NotDeletingV1Beta2Reason = "NotDeleting" - // DeletionTimestampSetV1Beta2Reason surfaces when an object is deleting because the + // DeletingV1Beta2Reason surfaces when an object is deleting because the // DeletionTimestamp is set. This reason is used if none of the more specific reasons apply. - DeletionTimestampSetV1Beta2Reason = "DeletionTimestampSet" + DeletingV1Beta2Reason = "Deleting" // DeletionCompletedV1Beta2Reason surfaces when the deletion process has been completed. // This reason is set right after the corresponding finalizer is removed. diff --git a/controlplane/kubeadm/api/v1beta1/v1beta2_condition_consts.go b/controlplane/kubeadm/api/v1beta1/v1beta2_condition_consts.go index b486eea57e77..84bd7adacedb 100644 --- a/controlplane/kubeadm/api/v1beta1/v1beta2_condition_consts.go +++ b/controlplane/kubeadm/api/v1beta1/v1beta2_condition_consts.go @@ -238,13 +238,13 @@ const ( // KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason surfaces when remediation of a control plane machine must be deferred. KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason = "RemediationDeferred" - // KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason surfaces when remediation of a control plane machine + // KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason surfaces when remediation of a control plane machine // has been completed by deleting the unhealthy machine. // Note: After an unhealthy machine is deleted, a new one is created by the KubeadmControlPlaneMachine as part of the // regular reconcile loop that ensures the correct number of replicas exist; KubeadmControlPlane machine waits for // the new machine to exists before removing the controlplane.cluster.x-k8s.io/remediation-in-progress annotation. // This is part of a series of safeguards to ensure that operation are performed sequentially on control plane machines. - KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason = "MachineDeleted" + KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason = "MachineDeleting" ) // KubeadmControlPlane's Deleting condition and corresponding reasons that will be used in v1Beta2 API version. @@ -252,9 +252,9 @@ const ( // KubeadmControlPlaneDeletingV1Beta2Condition surfaces details about ongoing deletion of the controlled machines. KubeadmControlPlaneDeletingV1Beta2Condition = clusterv1.DeletingV1Beta2Condition - // KubeadmControlPlaneDeletingDeletionTimestampNotSetV1Beta2Reason surfaces when the KCP is not deleting because the + // KubeadmControlPlaneNotDeletingV1Beta2Reason surfaces when the KCP is not deleting because the // DeletionTimestamp is not set. - KubeadmControlPlaneDeletingDeletionTimestampNotSetV1Beta2Reason = clusterv1.DeletionTimestampNotSetV1Beta2Reason + KubeadmControlPlaneNotDeletingV1Beta2Reason = clusterv1.NotDeletingV1Beta2Reason // KubeadmControlPlaneDeletingWaitingForWorkersDeletionV1Beta2Reason surfaces when the KCP deletion // waits for the workers to be deleted. diff --git a/controlplane/kubeadm/internal/controllers/controller.go b/controlplane/kubeadm/internal/controllers/controller.go index 61705ba9aee2..0dc9bfe90d95 100644 --- a/controlplane/kubeadm/internal/controllers/controller.go +++ b/controlplane/kubeadm/internal/controllers/controller.go @@ -597,7 +597,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, con // If no control plane machines remain, remove the finalizer if len(controlPlane.Machines) == 0 { controlPlane.DeletingReason = controlplanev1.KubeadmControlPlaneDeletingDeletionCompletedV1Beta2Reason - controlPlane.DeletingMessage = "" + controlPlane.DeletingMessage = "Deletion completed" controllerutil.RemoveFinalizer(controlPlane.KCP, controlplanev1.KubeadmControlPlaneFinalizer) return ctrl.Result{}, nil diff --git a/controlplane/kubeadm/internal/controllers/controller_test.go b/controlplane/kubeadm/internal/controllers/controller_test.go index 38739dabaecd..c45509ce7886 100644 --- a/controlplane/kubeadm/internal/controllers/controller_test.go +++ b/controlplane/kubeadm/internal/controllers/controller_test.go @@ -2012,17 +2012,17 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthUnknownV1Beta2Reason, Message: "* Machine machine1-test:\n" + - " * EtcdMemberHealthy: Node does not exist", + " * EtcdMemberHealthy: Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthUnknownV1Beta2Reason, Message: "* Machine machine1-test:\n" + - " * APIServerPodHealthy: Node does not exist\n" + - " * ControllerManagerPodHealthy: Node does not exist\n" + - " * SchedulerPodHealthy: Node does not exist\n" + - " * EtcdPodHealthy: Node does not exist", + " * APIServerPodHealthy: Waiting for GenericInfrastructureMachine to report spec.providerID\n" + + " * ControllerManagerPodHealthy: Waiting for GenericInfrastructureMachine to report spec.providerID\n" + + " * SchedulerPodHealthy: Waiting for GenericInfrastructureMachine to report spec.providerID\n" + + " * EtcdPodHealthy: Waiting for GenericInfrastructureMachine to report spec.providerID", }, }, expectMachineConditions: []metav1.Condition{ @@ -2030,31 +2030,31 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: clusterv1.MachineUpToDateV1Beta2Condition, @@ -2092,17 +2092,17 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthUnknownV1Beta2Reason, Message: "* Machine machine1-test:\n" + - " * EtcdMemberHealthy: Node does not exist", + " * EtcdMemberHealthy: Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthUnknownV1Beta2Reason, Message: "* Machine machine1-test:\n" + - " * APIServerPodHealthy: Node does not exist\n" + - " * ControllerManagerPodHealthy: Node does not exist\n" + - " * SchedulerPodHealthy: Node does not exist\n" + - " * EtcdPodHealthy: Node does not exist", + " * APIServerPodHealthy: Waiting for GenericInfrastructureMachine to report spec.providerID\n" + + " * ControllerManagerPodHealthy: Waiting for GenericInfrastructureMachine to report spec.providerID\n" + + " * SchedulerPodHealthy: Waiting for GenericInfrastructureMachine to report spec.providerID\n" + + " * EtcdPodHealthy: Waiting for GenericInfrastructureMachine to report spec.providerID", }, }, expectMachineConditions: []metav1.Condition{ @@ -2110,31 +2110,31 @@ func TestKubeadmControlPlaneReconciler_reconcileControlPlaneAndMachinesCondition Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: clusterv1.MachineUpToDateV1Beta2Condition, @@ -3301,7 +3301,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(kcp.Finalizers).To(BeEmpty()) g.Expect(controlPlane.DeletingReason).To(Equal(controlplanev1.KubeadmControlPlaneDeletingDeletionCompletedV1Beta2Reason)) - g.Expect(controlPlane.DeletingMessage).To(BeEmpty()) + g.Expect(controlPlane.DeletingMessage).To(Equal("Deletion completed")) }) t.Run("does not remove any control plane Machines if other Machines exist", func(t *testing.T) { @@ -3453,7 +3453,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) g.Expect(kcp.Finalizers).To(BeEmpty()) g.Expect(controlPlane.DeletingReason).To(Equal(controlplanev1.KubeadmControlPlaneDeletingDeletionCompletedV1Beta2Reason)) - g.Expect(controlPlane.DeletingMessage).To(BeEmpty()) + g.Expect(controlPlane.DeletingMessage).To(Equal("Deletion completed")) }) } diff --git a/controlplane/kubeadm/internal/controllers/remediation.go b/controlplane/kubeadm/internal/controllers/remediation.go index 48253222768d..e2c24d93d8c7 100644 --- a/controlplane/kubeadm/internal/controllers/remediation.go +++ b/controlplane/kubeadm/internal/controllers/remediation.go @@ -183,13 +183,13 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // The cluster MUST have more than one replica, because this is the smallest cluster size that allows any etcd failure tolerance. if controlPlane.Machines.Len() <= 1 { log.Info("A control plane machine needs remediation, but the number of current replicas is less or equal to 1. Skipping remediation", "replicas", controlPlane.Machines.Len()) - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate if current replicas are less or equal to 1") + conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, - Message: "KCP can't remediate if current replicas are less or equal to 1", + Message: "KubeadmControlPlane can't remediate if current replicas are less or equal to 1", }) return ctrl.Result{}, nil } @@ -197,13 +197,13 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // The cluster MUST NOT have healthy machines still being provisioned. This rule prevents KCP taking actions while the cluster is in a transitional state. if controlPlane.HasHealthyMachineStillProvisioning() { log.Info("A control plane machine needs remediation, but there are other control-plane machines being provisioned. Skipping remediation") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine provisioning to complete before triggering remediation") + conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, - Message: "KCP waiting for control plane Machine provisioning to complete before triggering remediation", + Message: "KubeadmControlPlane waiting for control plane Machine provisioning to complete before triggering remediation", }) return ctrl.Result{}, nil } @@ -211,13 +211,13 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C // The cluster MUST have no machines with a deletion timestamp. This rule prevents KCP taking actions while the cluster is in a transitional state. if controlPlane.HasDeletingMachine() { log.Info("A control plane machine needs remediation, but there are other control-plane machines being deleted. Skipping remediation") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine deletion to complete before triggering remediation") + conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine deletion to complete before triggering remediation") v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, - Message: "KCP waiting for control plane Machine deletion to complete before triggering remediation", + Message: "KubeadmControlPlane waiting for control plane Machine deletion to complete before triggering remediation", }) return ctrl.Result{}, nil } @@ -239,13 +239,13 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C } if !canSafelyRemediate { log.Info("A control plane machine needs remediation, but removing this machine could result in etcd quorum loss. Skipping remediation") - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because this could result in etcd loosing quorum") + conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, - Message: "KCP can't remediate this Machine because this could result in etcd loosing quorum", + Message: "KubeadmControlPlane can't remediate this Machine because this could result in etcd loosing quorum", }) return ctrl.Result{}, nil } @@ -276,7 +276,7 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, - Message: "KCP can't remediate this Machine because there is no healthy Machine to forward etcd leadership to", + Message: "KubeadmControlPlane can't remediate this Machine because there is no healthy Machine to forward etcd leadership to", }) return ctrl.Result{}, nil } @@ -317,8 +317,8 @@ func (r *KubeadmControlPlaneReconciler) reconcileUnhealthyMachines(ctx context.C v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, - Message: "Machine deletionTimestamp set", + Reason: controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, + Message: "Machine is deleting", }) // Prepare the info for tracking the remediation progress into the RemediationInProgressAnnotation. @@ -415,13 +415,13 @@ func (r *KubeadmControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin // Check if remediation can happen because retryPeriod is passed. if lastRemediationTime.Add(retryPeriod).After(reconciliationTime) { log.Info(fmt.Sprintf("A control plane machine needs remediation, but the operation already failed in the latest %s. Skipping remediation", retryPeriod)) - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because the operation already failed in the latest %s (RetryPeriod)", retryPeriod) + conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest %s (RetryPeriod)", retryPeriod) v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, - Message: fmt.Sprintf("KCP can't remediate this machine because the operation already failed in the latest %s (RetryPeriod)", retryPeriod), + Message: fmt.Sprintf("KubeadmControlPlane can't remediate this machine because the operation already failed in the latest %s (RetryPeriod)", retryPeriod), }) return remediationInProgressData, false, nil } @@ -431,13 +431,13 @@ func (r *KubeadmControlPlaneReconciler) checkRetryLimits(log logr.Logger, machin maxRetry := int(*controlPlane.KCP.Spec.RemediationStrategy.MaxRetry) if remediationInProgressData.RetryCount >= maxRetry { log.Info(fmt.Sprintf("A control plane machine needs remediation, but the operation already failed %d times (MaxRetry %d). Skipping remediation", remediationInProgressData.RetryCount, maxRetry)) - conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because the operation already failed %d times (MaxRetry)", maxRetry) + conditions.MarkFalse(machineToBeRemediated, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed %d times (MaxRetry)", maxRetry) v1beta2conditions.Set(machineToBeRemediated, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, - Message: fmt.Sprintf("KCP can't remediate this machine because the operation already failed %d times (MaxRetry)", maxRetry), + Message: fmt.Sprintf("KubeadmControlPlane can't remediate this machine because the operation already failed %d times (MaxRetry)", maxRetry), }) return remediationInProgressData, false, nil } diff --git a/controlplane/kubeadm/internal/controllers/remediation_test.go b/controlplane/kubeadm/internal/controllers/remediation_test.go index 89203037b759..48a2ebd1b12c 100644 --- a/controlplane/kubeadm/internal/controllers/remediation_test.go +++ b/controlplane/kubeadm/internal/controllers/remediation_test.go @@ -198,7 +198,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m.Namespace, Name: m.Name}, m) g.Expect(err).ToNot(HaveOccurred()) @@ -265,8 +265,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because the operation already failed 3 times (MaxRetry)") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KCP can't remediate this machine because the operation already failed 3 times (MaxRetry)") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed 3 times (MaxRetry)") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KubeadmControlPlane can't remediate this machine because the operation already failed 3 times (MaxRetry)") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -322,7 +322,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -381,7 +381,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -433,8 +433,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KCP can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KubeadmControlPlane can't remediate this machine because the operation already failed in the latest 1h0m0s (RetryPeriod)") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -479,8 +479,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate if current replicas are less or equal to 1") - assertMachineV1beta2Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KCP can't remediate if current replicas are less or equal to 1") + assertMachineCondition(ctx, g, m, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") + assertMachineV1beta2Condition(ctx, g, m, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KubeadmControlPlane can't remediate if current replicas are less or equal to 1") g.Expect(env.Cleanup(ctx, m)).To(Succeed()) }) @@ -509,8 +509,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine deletion to complete before triggering remediation") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KCP waiting for control plane Machine deletion to complete before triggering remediation") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine deletion to complete before triggering remediation") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KubeadmControlPlane waiting for control plane Machine deletion to complete before triggering remediation") g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) @@ -539,8 +539,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine provisioning to complete before triggering remediation") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KCP waiting for control plane Machine provisioning to complete before triggering remediation") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KubeadmControlPlane waiting for control plane Machine provisioning to complete before triggering remediation") g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) @@ -570,8 +570,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP waiting for control plane machine provisioning to complete before triggering remediation") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KCP waiting for control plane Machine provisioning to complete before triggering remediation") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane waiting for control plane machine provisioning to complete before triggering remediation") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationDeferredV1Beta2Reason, "KubeadmControlPlane waiting for control plane Machine provisioning to complete before triggering remediation") g.Expect(env.Cleanup(ctx, m1, m2)).To(Succeed()) }) @@ -613,8 +613,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because this could result in etcd loosing quorum") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KCP can't remediate this Machine because this could result in etcd loosing quorum") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KubeadmControlPlane can't remediate this Machine because this could result in etcd loosing quorum") g.Expect(env.Cleanup(ctx, m1, m2, m3)).To(Succeed()) }) @@ -658,8 +658,8 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(controlPlane.KCP.Annotations).ToNot(HaveKey(controlplanev1.RemediationInProgressAnnotation)) - assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KCP can't remediate this machine because this could result in etcd loosing quorum") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KCP can't remediate this Machine because this could result in etcd loosing quorum") + assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "KubeadmControlPlane can't remediate this machine because this could result in etcd loosing quorum") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, "KubeadmControlPlane can't remediate this Machine because this could result in etcd loosing quorum") g.Expect(env.Cleanup(ctx, m1, m2, m3, m4, m5)).To(Succeed()) }) @@ -707,7 +707,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -758,7 +758,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -794,7 +794,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(i - 1)) assertMachineCondition(ctx, g, mi, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: mi.Namespace, Name: mi.Name}, mi) g.Expect(err).ToNot(HaveOccurred()) @@ -850,7 +850,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -903,7 +903,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -956,7 +956,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1010,7 +1010,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1064,7 +1064,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1114,7 +1114,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationFailedReason, clusterv1.ConditionSeverityWarning, "A control plane machine needs remediation, but there is no healthy machine to forward etcd leadership to. Skipping remediation") assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineCannotBeRemediatedV1Beta2Reason, - "KCP can't remediate this Machine because there is no healthy Machine to forward etcd leadership to") + "KubeadmControlPlane can't remediate this Machine because there is no healthy Machine to forward etcd leadership to") removeFinalizer(g, m1) g.Expect(env.Cleanup(ctx, m1, m2, m3, m4)).To(Succeed()) @@ -1163,7 +1163,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1199,7 +1199,7 @@ func TestReconcileUnhealthyMachines(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(i - 4)) assertMachineCondition(ctx, g, mi, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, mi, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: mi.Namespace, Name: mi.Name}, mi) g.Expect(err).ToNot(HaveOccurred()) @@ -1270,7 +1270,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m1, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m1, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m1.Namespace, Name: m1.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1306,7 +1306,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(1)) assertMachineCondition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m2.Namespace, Name: m2.Name}, m1) g.Expect(err).ToNot(HaveOccurred()) @@ -1382,7 +1382,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(0)) assertMachineCondition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m2.Namespace, Name: m2.Name}, m2) g.Expect(err).ToNot(HaveOccurred()) @@ -1419,7 +1419,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { g.Expect(remediationData.RetryCount).To(Equal(1)) assertMachineCondition(ctx, g, m3, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m3, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m3, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") err = env.Get(ctx, client.ObjectKey{Namespace: m3.Namespace, Name: m3.Name}, m3) g.Expect(err).ToNot(HaveOccurred()) @@ -1498,7 +1498,7 @@ func TestReconcileUnhealthyMachinesSequences(t *testing.T) { assertMachineCondition(ctx, g, m2, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.RemediationInProgressReason, clusterv1.ConditionSeverityWarning, "") assertMachineCondition(ctx, g, m3, clusterv1.MachineOwnerRemediatedCondition, corev1.ConditionFalse, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "") - assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, "Machine deletionTimestamp set") + assertMachineV1beta2Condition(ctx, g, m2, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, "Machine is deleting") assertMachineV1beta2Condition(ctx, g, m3, clusterv1.MachineOwnerRemediatedV1Beta2Condition, metav1.ConditionFalse, clusterv1.MachineOwnerRemediatedWaitingForRemediationV1Beta2Reason, "Waiting for remediation") err = env.Get(ctx, client.ObjectKey{Namespace: m2.Namespace, Name: m2.Name}, m2) diff --git a/controlplane/kubeadm/internal/controllers/status.go b/controlplane/kubeadm/internal/controllers/status.go index 3ee2813993de..3d4fdc08f53a 100644 --- a/controlplane/kubeadm/internal/controllers/status.go +++ b/controlplane/kubeadm/internal/controllers/status.go @@ -448,7 +448,7 @@ func setDeletingCondition(_ context.Context, kcp *controlplanev1.KubeadmControlP v1beta2conditions.Set(kcp, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: controlplanev1.KubeadmControlPlaneDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: controlplanev1.KubeadmControlPlaneNotDeletingV1Beta2Reason, }) return } diff --git a/controlplane/kubeadm/internal/controllers/status_test.go b/controlplane/kubeadm/internal/controllers/status_test.go index 78c73059d22b..c110544d757c 100644 --- a/controlplane/kubeadm/internal/controllers/status_test.go +++ b/controlplane/kubeadm/internal/controllers/status_test.go @@ -514,7 +514,7 @@ func Test_setRemediatingCondition(t *testing.T) { healthCheckSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: corev1.ConditionTrue} healthCheckNotSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: corev1.ConditionFalse} ownerRemediated := clusterv1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: corev1.ConditionFalse} - ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletedV1Beta2Reason, Message: "Machine deletionTimestamp set"} + ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: controlplanev1.KubeadmControlPlaneMachineRemediationMachineDeletingV1Beta2Reason, Message: "Machine is deleting"} tests := []struct { name string @@ -550,7 +550,7 @@ func Test_setRemediatingCondition(t *testing.T) { Type: controlplanev1.KubeadmControlPlaneRemediatingV1Beta2Condition, Status: metav1.ConditionTrue, Reason: controlplanev1.KubeadmControlPlaneRemediatingV1Beta2Reason, - Message: "* Machine m3: Machine deletionTimestamp set", + Message: "* Machine m3: Machine is deleting", }, }, { @@ -622,7 +622,7 @@ func TestDeletingCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: controlplanev1.KubeadmControlPlaneDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: controlplanev1.KubeadmControlPlaneNotDeletingV1Beta2Reason, }, }, { diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions.go b/controlplane/kubeadm/internal/workload_cluster_conditions.go index 7c650a8198cb..f2e417f23b47 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions.go @@ -29,6 +29,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -100,11 +101,20 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane provisioningMachines := controlPlane.Machines.Filter(collections.Not(collections.HasNode())) for _, machine := range provisioningMachines { + var msg string + if ptr.Deref(machine.Spec.ProviderID, "") != "" { + // If the machine is at the end of the provisioning phase, with ProviderID set, but still waiting + // for a matching Node to exists, surface this. + msg = fmt.Sprintf("Waiting for a Node with spec.providerID %s to exist", *machine.Spec.ProviderID) + } else { + // If the machine is at the beginning of the provisioning phase, with ProviderID not yet set, surface this. + msg = fmt.Sprintf("Waiting for %s to report spec.providerID", machine.Spec.InfrastructureRef.Kind) + } v1beta2conditions.Set(machine, metav1.Condition{ Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: msg, }) } @@ -132,9 +142,10 @@ func (w *Workload) updateManagedEtcdConditions(ctx context.Context, controlPlane conditions.MarkFalse(machine, controlplanev1.MachineEtcdMemberHealthyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") v1beta2conditions.Set(machine, metav1.Condition{ - Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, - Status: metav1.ConditionFalse, - Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberDeletingV1Beta2Reason, + Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberDeletingV1Beta2Reason, + Message: "Machine is deleting", }) continue } @@ -463,11 +474,20 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * provisioningMachines := controlPlane.Machines.Filter(collections.Not(collections.HasNode())) for _, machine := range provisioningMachines { for _, condition := range allMachinePodV1beta2Conditions { + var msg string + if ptr.Deref(machine.Spec.ProviderID, "") != "" { + // If the machine is at the end of the provisioning phase, with ProviderID set, but still waiting + // for a matching Node to exists, surface this. + msg = fmt.Sprintf("Waiting for a Node with spec.providerID %s to exist", *machine.Spec.ProviderID) + } else { + // If the machine is at the beginning of the provisioning phase, with ProviderID not yet set, surface this. + msg = fmt.Sprintf("Waiting for %s to report spec.providerID", machine.Spec.InfrastructureRef.Kind) + } v1beta2conditions.Set(machine, metav1.Condition{ Type: condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, - Message: "Node does not exist", + Message: msg, }) } } @@ -501,9 +521,10 @@ func (w *Workload) UpdateStaticPodConditions(ctx context.Context, controlPlane * for _, condition := range allMachinePodV1beta2Conditions { v1beta2conditions.Set(machine, metav1.Condition{ - Type: condition, - Status: metav1.ConditionFalse, - Reason: controlplanev1.KubeadmControlPlaneMachinePodDeletingV1Beta2Reason, + Type: condition, + Status: metav1.ConditionFalse, + Reason: controlplanev1.KubeadmControlPlaneMachinePodDeletingV1Beta2Reason, + Message: "Machine is deleting", }) } continue diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go index c8c520218493..1e4a70ab2b71 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go @@ -29,6 +29,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -87,7 +88,7 @@ func TestUpdateEtcdConditions(t *testing.T) { expectedEtcdMembersAndMachinesAreMatching: false, // without reading nodes, we can not make assumptions. }, { - name: "If there are provisioning machines, a node without machine should be ignored in v1beta1, reported in v1beta2", + name: "If there are provisioning machines, a node without machine should be ignored in v1beta1, reported in v1beta2 (without providerID)", machines: []*clusterv1.Machine{ fakeMachine("m1"), // without NodeRef (provisioning) }, @@ -105,11 +106,41 @@ func TestUpdateEtcdConditions(t *testing.T) { Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthUnknownV1Beta2Reason, Message: "* Machine m1:\n" + - " * EtcdMemberHealthy: Node does not exist", + " * EtcdMemberHealthy: Waiting for GenericInfraMachine to report spec.providerID", }, expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ "m1": { - {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, Message: "Node does not exist"}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfraMachine to report spec.providerID"}, + }, + }, + expectedEtcdMembersAgreeOnMemberList: false, // without reading members, we can not make assumptions. + expectedEtcdMembersAgreeOnClusterID: false, // without reading members, we can not make assumptions. + expectedEtcdMembersAndMachinesAreMatching: false, // without reading members, we can not make assumptions. + }, + { + name: "If there are provisioning machines, a node without machine should be ignored in v1beta1, reported in v1beta2 (with providerID)", + machines: []*clusterv1.Machine{ + fakeMachine("m1", withProviderID("dummy-provider-id")), // without NodeRef (provisioning) + }, + injectClient: &fakeClient{ + list: &corev1.NodeList{ + Items: []corev1.Node{*fakeNode("n1")}, + }, + }, + expectedKCPCondition: nil, + expectedMachineConditions: map[string]clusterv1.Conditions{ + "m1": {}, + }, + expectedKCPV1Beta2Condition: &metav1.Condition{ + Type: controlplanev1.KubeadmControlPlaneEtcdClusterHealthyV1Beta2Condition, + Status: metav1.ConditionUnknown, + Reason: controlplanev1.KubeadmControlPlaneEtcdClusterHealthUnknownV1Beta2Reason, + Message: "* Machine m1:\n" + + " * EtcdMemberHealthy: Waiting for a Node with spec.providerID dummy-provider-id to exist", + }, + expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + "m1": { + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdMemberHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachineEtcdMemberInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID dummy-provider-id to exist"}, }, }, expectedEtcdMembersAgreeOnMemberList: false, // without reading members, we can not make assumptions. @@ -755,7 +786,7 @@ func TestUpdateStaticPodConditions(t *testing.T) { }, }, { - name: "If there are provisioning machines, a node without machine should be ignored in v1beta1, reported in v1beta2", + name: "If there are provisioning machines, a node without machine should be ignored in v1beta1, reported in v1beta2 (without providerID)", machines: []*clusterv1.Machine{ fakeMachine("m1"), // without NodeRef (provisioning) }, @@ -773,17 +804,50 @@ func TestUpdateStaticPodConditions(t *testing.T) { Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthUnknownV1Beta2Reason, Message: "* Machine m1:\n" + - " * APIServerPodHealthy: Node does not exist\n" + - " * ControllerManagerPodHealthy: Node does not exist\n" + - " * SchedulerPodHealthy: Node does not exist\n" + - " * EtcdPodHealthy: Node does not exist", + " * APIServerPodHealthy: Waiting for GenericInfraMachine to report spec.providerID\n" + + " * ControllerManagerPodHealthy: Waiting for GenericInfraMachine to report spec.providerID\n" + + " * SchedulerPodHealthy: Waiting for GenericInfraMachine to report spec.providerID\n" + + " * EtcdPodHealthy: Waiting for GenericInfraMachine to report spec.providerID", }, expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ "m1": { - {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node does not exist"}, - {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node does not exist"}, - {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node does not exist"}, - {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node does not exist"}, + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfraMachine to report spec.providerID"}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfraMachine to report spec.providerID"}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfraMachine to report spec.providerID"}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfraMachine to report spec.providerID"}, + }, + }, + }, + { + name: "If there are provisioning machines, a node without machine should be ignored in v1beta1, reported in v1beta2 (with providerID)", + machines: []*clusterv1.Machine{ + fakeMachine("m1", withProviderID("dummy-provider-id")), // without NodeRef (provisioning) + }, + injectClient: &fakeClient{ + list: &corev1.NodeList{ + Items: []corev1.Node{*fakeNode("n1")}, + }, + }, + expectedKCPCondition: nil, + expectedMachineConditions: map[string]clusterv1.Conditions{ + "m1": {}, + }, + expectedKCPV1Beta2Condition: metav1.Condition{ + Type: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthyV1Beta2Condition, + Status: metav1.ConditionUnknown, + Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthUnknownV1Beta2Reason, + Message: "* Machine m1:\n" + + " * APIServerPodHealthy: Waiting for a Node with spec.providerID dummy-provider-id to exist\n" + + " * ControllerManagerPodHealthy: Waiting for a Node with spec.providerID dummy-provider-id to exist\n" + + " * SchedulerPodHealthy: Waiting for a Node with spec.providerID dummy-provider-id to exist\n" + + " * EtcdPodHealthy: Waiting for a Node with spec.providerID dummy-provider-id to exist", + }, + expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ + "m1": { + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID dummy-provider-id to exist"}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID dummy-provider-id to exist"}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID dummy-provider-id to exist"}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID dummy-provider-id to exist"}, }, }, }, @@ -858,17 +922,17 @@ func TestUpdateStaticPodConditions(t *testing.T) { Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneControlPlaneComponentsHealthUnknownV1Beta2Reason, Message: "* Machine m1:\n" + - " * APIServerPodHealthy: Node does not exist\n" + - " * ControllerManagerPodHealthy: Node does not exist\n" + - " * SchedulerPodHealthy: Node does not exist\n" + - " * EtcdPodHealthy: Node does not exist", + " * APIServerPodHealthy: Waiting for GenericInfraMachine to report spec.providerID\n" + + " * ControllerManagerPodHealthy: Waiting for GenericInfraMachine to report spec.providerID\n" + + " * SchedulerPodHealthy: Waiting for GenericInfraMachine to report spec.providerID\n" + + " * EtcdPodHealthy: Waiting for GenericInfraMachine to report spec.providerID", }, expectedMachineV1Beta2Conditions: map[string][]metav1.Condition{ "m1": { - {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node does not exist"}, - {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node does not exist"}, - {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node does not exist"}, - {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Node does not exist"}, + {Type: controlplanev1.KubeadmControlPlaneMachineAPIServerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfraMachine to report spec.providerID"}, + {Type: controlplanev1.KubeadmControlPlaneMachineControllerManagerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfraMachine to report spec.providerID"}, + {Type: controlplanev1.KubeadmControlPlaneMachineEtcdPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfraMachine to report spec.providerID"}, + {Type: controlplanev1.KubeadmControlPlaneMachineSchedulerPodHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, Reason: controlplanev1.KubeadmControlPlaneMachinePodInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfraMachine to report spec.providerID"}, }, }, }, @@ -1444,6 +1508,12 @@ func fakeMachine(name string, options ...fakeMachineOption) *clusterv1.Machine { ObjectMeta: metav1.ObjectMeta{ Name: name, }, + Spec: clusterv1.MachineSpec{ + InfrastructureRef: corev1.ObjectReference{ + Kind: "GenericInfraMachine", + Name: fmt.Sprintf("infra-%s", name), + }, + }, } for _, opt := range options { opt(p) @@ -1460,6 +1530,12 @@ func withNodeRef(ref string) fakeMachineOption { } } +func withProviderID(providerID string) fakeMachineOption { + return func(machine *clusterv1.Machine) { + machine.Spec.ProviderID = ptr.To(providerID) + } +} + func withMachineReadyCondition(status corev1.ConditionStatus, severity clusterv1.ConditionSeverity) fakeMachineOption { return func(machine *clusterv1.Machine) { machine.Status.Conditions = append(machine.Status.Conditions, clusterv1.Condition{ diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 16693da639ba..62c19e4d738c 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -342,7 +342,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (reconcile.R if feature.Gates.Enabled(feature.RuntimeSDK) && feature.Gates.Enabled(feature.ClusterTopology) { if cluster.Spec.Topology != nil && !hooks.IsOkToDelete(cluster) { s.deletingReason = clusterv1.ClusterDeletingWaitingForBeforeDeleteHookV1Beta2Reason - s.deletingMessage = "" + s.deletingMessage = "Waiting for BeforeClusterDelete hook" return ctrl.Result{}, nil } } @@ -441,7 +441,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (reconcile.R // Return here so we don't remove the finalizer yet. s.deletingReason = clusterv1.ClusterDeletingWaitingForControlPlaneDeletionV1Beta2Reason - s.deletingMessage = "" + s.deletingMessage = fmt.Sprintf("Waiting for %s to be deleted", cluster.Spec.ControlPlaneRef.Kind) log.Info("Cluster still has descendants - need to requeue", "controlPlaneRef", cluster.Spec.ControlPlaneRef.Name) return ctrl.Result{}, nil @@ -479,7 +479,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (reconcile.R // Return here so we don't remove the finalizer yet. s.deletingReason = clusterv1.ClusterDeletingWaitingForInfrastructureDeletionV1Beta2Reason - s.deletingMessage = "" + s.deletingMessage = fmt.Sprintf("Waiting for %s to be deleted", cluster.Spec.InfrastructureRef.Kind) log.Info("Cluster still has descendants - need to requeue", "infrastructureRef", cluster.Spec.InfrastructureRef.Name) return ctrl.Result{}, nil @@ -487,7 +487,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (reconcile.R } s.deletingReason = clusterv1.ClusterDeletingDeletionCompletedV1Beta2Reason - s.deletingMessage = "" + s.deletingMessage = "Deletion completed" controllerutil.RemoveFinalizer(cluster, clusterv1.ClusterFinalizer) r.recorder.Eventf(cluster, corev1.EventTypeNormal, "Deleted", "Cluster %s has been deleted", cluster.Name) diff --git a/internal/controllers/cluster/cluster_controller_status.go b/internal/controllers/cluster/cluster_controller_status.go index dc71ff1d4b2b..cc1d029166f7 100644 --- a/internal/controllers/cluster/cluster_controller_status.go +++ b/internal/controllers/cluster/cluster_controller_status.go @@ -246,7 +246,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust } v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason, Message: message, }) @@ -299,7 +299,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust if cluster.Status.InfrastructureReady { v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDeletedV1Beta2Reason, Message: fmt.Sprintf("%s has been deleted", cluster.Spec.InfrastructureRef.Kind), }) @@ -308,7 +308,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason, Message: fmt.Sprintf("%s does not exist", cluster.Spec.InfrastructureRef.Kind), }) @@ -319,7 +319,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust if cluster.Status.InfrastructureReady { v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDeletedV1Beta2Reason, Message: fmt.Sprintf("%s has been deleted while the cluster still exists", cluster.Spec.InfrastructureRef.Kind), }) @@ -331,7 +331,7 @@ func setInfrastructureReadyCondition(_ context.Context, cluster *clusterv1.Clust // - when applying the yaml file with the cluster and all the objects referenced by it (provisioning yet to start/started, but status.InfrastructureReady not yet set). v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason, Message: fmt.Sprintf("%s does not exist", cluster.Spec.InfrastructureRef.Kind), }) @@ -346,7 +346,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu } v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, Message: message, }) @@ -399,7 +399,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu if cluster.Status.ControlPlaneReady { v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDeletedV1Beta2Reason, Message: fmt.Sprintf("%s has been deleted", cluster.Spec.ControlPlaneRef.Kind), }) @@ -408,7 +408,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, Message: fmt.Sprintf("%s does not exist", cluster.Spec.ControlPlaneRef.Kind), }) @@ -419,7 +419,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu if cluster.Status.ControlPlaneReady { v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDeletedV1Beta2Reason, Message: fmt.Sprintf("%s has been deleted while the cluster still exists", cluster.Spec.ControlPlaneRef.Kind), }) @@ -431,7 +431,7 @@ func setControlPlaneAvailableCondition(_ context.Context, cluster *clusterv1.Clu // - when applying the yaml file with the cluster and all the objects referenced by it (provisioning yet to start/started, but status.ControlPlaneReady not yet set). v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, Message: fmt.Sprintf("%s does not exist", cluster.Spec.ControlPlaneRef.Kind), }) @@ -910,7 +910,7 @@ func setDeletingCondition(_ context.Context, cluster *clusterv1.Cluster, deletin v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.ClusterDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: clusterv1.ClusterNotDeletingV1Beta2Reason, }) return } @@ -933,10 +933,10 @@ func (c clusterConditionCustomMergeStrategy) Merge(conditions []v1beta2condition func(condition metav1.Condition) v1beta2conditions.MergePriority { // While cluster is deleting, treat unknown conditions from external objects as info (it is ok that those objects have been deleted at this stage). if !c.cluster.DeletionTimestamp.IsZero() { - if condition.Type == clusterv1.ClusterInfrastructureReadyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.ClusterInfrastructureDeletedV1Beta2Reason || condition.Reason == clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason) { + if condition.Type == clusterv1.ClusterInfrastructureReadyV1Beta2Condition && (condition.Reason == clusterv1.ClusterInfrastructureDeletedV1Beta2Reason || condition.Reason == clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason) { return v1beta2conditions.InfoMergePriority } - if condition.Type == clusterv1.ClusterControlPlaneAvailableV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.ClusterControlPlaneDeletedV1Beta2Reason || condition.Reason == clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason) { + if condition.Type == clusterv1.ClusterControlPlaneAvailableV1Beta2Condition && (condition.Reason == clusterv1.ClusterControlPlaneDeletedV1Beta2Reason || condition.Reason == clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason) { return v1beta2conditions.InfoMergePriority } } diff --git a/internal/controllers/cluster/cluster_controller_status_test.go b/internal/controllers/cluster/cluster_controller_status_test.go index 09270cd84258..76b2bc2d8ebf 100644 --- a/internal/controllers/cluster/cluster_controller_status_test.go +++ b/internal/controllers/cluster/cluster_controller_status_test.go @@ -223,7 +223,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { infraClusterIsNotFound: false, expectCondition: metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason, Message: "Waiting for cluster topology to be reconciled", }, @@ -235,7 +235,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { infraClusterIsNotFound: false, expectCondition: metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason, }, }, @@ -317,7 +317,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { infraClusterIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDeletedV1Beta2Reason, Message: "FakeInfraCluster has been deleted", }, @@ -329,7 +329,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { infraClusterIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason, Message: "FakeInfraCluster does not exist", }, @@ -341,7 +341,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { infraClusterIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDeletedV1Beta2Reason, Message: "FakeInfraCluster has been deleted while the cluster still exists", }, @@ -353,7 +353,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { infraClusterIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDoesNotExistV1Beta2Reason, Message: "FakeInfraCluster does not exist", }, @@ -388,7 +388,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { controlPlaneIsNotFound: false, expectCondition: metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, Message: "Waiting for cluster topology to be reconciled", }, @@ -400,7 +400,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { controlPlaneIsNotFound: false, expectCondition: metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, }, }, @@ -482,7 +482,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { controlPlaneIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDeletedV1Beta2Reason, Message: "FakeControlPlane has been deleted", }, @@ -494,7 +494,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { controlPlaneIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, Message: "FakeControlPlane does not exist", }, @@ -506,7 +506,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { controlPlaneIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDeletedV1Beta2Reason, Message: "FakeControlPlane has been deleted while the cluster still exists", }, @@ -518,7 +518,7 @@ func TestSetControlPlaneAvailableCondition(t *testing.T) { controlPlaneIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDoesNotExistV1Beta2Reason, Message: "FakeControlPlane does not exist", }, @@ -1547,7 +1547,7 @@ func TestSetRemediatingCondition(t *testing.T) { healthCheckSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: corev1.ConditionTrue} healthCheckNotSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: corev1.ConditionFalse} ownerRemediated := clusterv1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: corev1.ConditionFalse} - ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletedV1Beta2Reason, Message: "Machine deletionTimestamp set"} + ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingV1Beta2Reason, Message: "Machine is deleting"} tests := []struct { name string @@ -1595,7 +1595,7 @@ func TestSetRemediatingCondition(t *testing.T) { Type: clusterv1.ClusterRemediatingV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.ClusterRemediatingV1Beta2Reason, - Message: "* Machine m3: Machine deletionTimestamp set", + Message: "* Machine m3: Machine is deleting", }, }, { @@ -1666,7 +1666,7 @@ func TestDeletingCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.ClusterDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.ClusterDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: clusterv1.ClusterNotDeletingV1Beta2Reason, }, }, { @@ -1966,12 +1966,12 @@ func TestSetAvailableCondition(t *testing.T) { Conditions: []metav1.Condition{ { Type: clusterv1.ClusterInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterInfrastructureDeletedV1Beta2Reason, }, { Type: clusterv1.ClusterControlPlaneAvailableV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.ClusterControlPlaneDeletedV1Beta2Reason, }, { diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index 4bb7b69076ad..00b419a26c0a 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -434,8 +434,8 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result r.reconcileDeleteCache.Add(cache.NewReconcileEntry(s.machine, time.Now().Add(1*time.Second))) // Set "fallback" reason and message. This is used if we don't set a more specific reason and message below. - s.deletingReason = clusterv1.MachineDeletingDeletionTimestampSetV1Beta2Reason - s.deletingMessage = "" + s.deletingReason = clusterv1.MachineDeletingV1Beta2Reason + s.deletingMessage = "Deletion started" err := r.isDeleteNodeAllowed(ctx, cluster, m) isDeleteNodeAllowed := err == nil @@ -624,7 +624,7 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (ctrl.Result } s.deletingReason = clusterv1.MachineDeletingDeletionCompletedV1Beta2Reason - s.deletingMessage = "" + s.deletingMessage = "Deletion completed" controllerutil.RemoveFinalizer(m, clusterv1.MachineFinalizer) return ctrl.Result{}, nil diff --git a/internal/controllers/machine/machine_controller_status.go b/internal/controllers/machine/machine_controller_status.go index 870490b4d21b..4c5a1101ddbc 100644 --- a/internal/controllers/machine/machine_controller_status.go +++ b/internal/controllers/machine/machine_controller_status.go @@ -68,7 +68,6 @@ func (r *Reconciler) updateStatus(ctx context.Context, s *scope) { setDeletingCondition(ctx, s.machine, s.reconcileDeleteExecuted, s.deletingReason, s.deletingMessage) setReadyCondition(ctx, s.machine) setAvailableCondition(ctx, s.machine) - // TODO: Set the uptodate condition for standalone pods setMachinePhaseAndLastUpdated(ctx, s.machine) } @@ -128,7 +127,7 @@ func setBootstrapReadyCondition(_ context.Context, machine *clusterv1.Machine, b if !machine.DeletionTimestamp.IsZero() && machine.Status.BootstrapReady { v1beta2conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigDeletedV1Beta2Reason, Message: fmt.Sprintf("%s has been deleted", machine.Spec.Bootstrap.ConfigRef.Kind), }) @@ -141,7 +140,7 @@ func setBootstrapReadyCondition(_ context.Context, machine *clusterv1.Machine, b // - when the machine has been provisioned v1beta2conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigDoesNotExistV1Beta2Reason, Message: fmt.Sprintf("%s does not exist", machine.Spec.Bootstrap.ConfigRef.Kind), }) @@ -210,7 +209,7 @@ func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machi if machine.Status.InfrastructureReady { v1beta2conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDeletedV1Beta2Reason, Message: fmt.Sprintf("%s has been deleted", machine.Spec.InfrastructureRef.Kind), }) @@ -219,7 +218,7 @@ func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machi v1beta2conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDoesNotExistV1Beta2Reason, Message: fmt.Sprintf("%s does not exist", machine.Spec.InfrastructureRef.Kind), }) @@ -230,9 +229,9 @@ func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machi if machine.Status.InfrastructureReady { v1beta2conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionFalse, // setting to false to give more relevance in the ready condition summary. + Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDeletedV1Beta2Reason, - Message: fmt.Sprintf("%s has been deleted while the machine still exists", machine.Spec.InfrastructureRef.Kind), + Message: fmt.Sprintf("%s has been deleted while the Machine still exists", machine.Spec.InfrastructureRef.Kind), }) return } @@ -242,7 +241,7 @@ func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machi // - when applying the yaml file with the machine and all the objects referenced by it (provisioning yet to start/started, but status.InfrastructureReady not yet set). v1beta2conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDoesNotExistV1Beta2Reason, Message: fmt.Sprintf("%s does not exist", machine.Spec.InfrastructureRef.Kind), }) @@ -310,7 +309,7 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl } message := "" - if condition.Message != "" { + if condition.Status != corev1.ConditionTrue && condition.Message != "" { message = fmt.Sprintf("* Node.Ready: %s", condition.Message) } @@ -358,7 +357,7 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl // will be considered unreachable Machine deletion will complete. if !machine.DeletionTimestamp.IsZero() { if machine.Status.NodeRef != nil { - setNodeConditions(machine, metav1.ConditionUnknown, + setNodeConditions(machine, metav1.ConditionFalse, clusterv1.MachineNodeDeletedV1Beta2Reason, fmt.Sprintf("Node %s has been deleted", machine.Status.NodeRef.Name)) return @@ -376,7 +375,7 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl // Setting MachineNodeReadyV1Beta2Condition to False to keep it consistent with MachineNodeHealthyV1Beta2Condition. setNodeConditions(machine, metav1.ConditionFalse, clusterv1.MachineNodeDeletedV1Beta2Reason, - fmt.Sprintf("Node %s has been deleted while the machine still exists", machine.Status.NodeRef.Name)) + fmt.Sprintf("Node %s has been deleted while the Machine still exists", machine.Status.NodeRef.Name)) return } @@ -384,14 +383,14 @@ func setNodeHealthyAndReadyConditions(ctx context.Context, cluster *clusterv1.Cl // for a matching Node to exists, surface this. if ptr.Deref(machine.Spec.ProviderID, "") != "" { setNodeConditions(machine, metav1.ConditionUnknown, - clusterv1.MachineNodeDoesNotExistV1Beta2Reason, + clusterv1.MachineNodeInspectionFailedV1Beta2Reason, fmt.Sprintf("Waiting for a Node with spec.providerID %s to exist", *machine.Spec.ProviderID)) return } // If the machine is at the beginning of the provisioning phase, with ProviderID not yet set, surface this. setNodeConditions(machine, metav1.ConditionUnknown, - clusterv1.MachineNodeDoesNotExistV1Beta2Reason, + clusterv1.MachineNodeInspectionFailedV1Beta2Reason, fmt.Sprintf("Waiting for %s to report spec.providerID", machine.Spec.InfrastructureRef.Kind)) } @@ -492,13 +491,13 @@ func (c machineConditionCustomMergeStrategy) Merge(conditions []v1beta2condition v1beta2conditions.GetPriorityFunc(func(condition metav1.Condition) v1beta2conditions.MergePriority { // While machine is deleting, treat unknown conditions from external objects as info (it is ok that those objects have been deleted at this stage). if !c.machine.DeletionTimestamp.IsZero() { - if condition.Type == clusterv1.MachineBootstrapConfigReadyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.MachineBootstrapConfigDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineBootstrapConfigDoesNotExistV1Beta2Reason) { + if condition.Type == clusterv1.MachineBootstrapConfigReadyV1Beta2Condition && (condition.Reason == clusterv1.MachineBootstrapConfigDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineBootstrapConfigDoesNotExistV1Beta2Reason) { return v1beta2conditions.InfoMergePriority } - if condition.Type == clusterv1.MachineInfrastructureReadyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.MachineInfrastructureDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineInfrastructureDoesNotExistV1Beta2Reason) { + if condition.Type == clusterv1.MachineInfrastructureReadyV1Beta2Condition && (condition.Reason == clusterv1.MachineInfrastructureDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineInfrastructureDoesNotExistV1Beta2Reason) { return v1beta2conditions.InfoMergePriority } - if condition.Type == clusterv1.MachineNodeHealthyV1Beta2Condition && condition.Status == metav1.ConditionUnknown && (condition.Reason == clusterv1.MachineNodeDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineNodeDoesNotExistV1Beta2Reason) { + if condition.Type == clusterv1.MachineNodeHealthyV1Beta2Condition && (condition.Reason == clusterv1.MachineNodeDeletedV1Beta2Reason || condition.Reason == clusterv1.MachineNodeDoesNotExistV1Beta2Reason) { return v1beta2conditions.InfoMergePriority } // Note: MachineNodeReadyV1Beta2Condition is not relevant for the summary. @@ -518,7 +517,7 @@ func setDeletingCondition(_ context.Context, machine *clusterv1.Machine, reconci v1beta2conditions.Set(machine, metav1.Condition{ Type: clusterv1.MachineDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: clusterv1.MachineNotDeletingV1Beta2Reason, }) return } diff --git a/internal/controllers/machine/machine_controller_status_test.go b/internal/controllers/machine/machine_controller_status_test.go index 4ffcf237b502..439077b016cf 100644 --- a/internal/controllers/machine/machine_controller_status_test.go +++ b/internal/controllers/machine/machine_controller_status_test.go @@ -238,7 +238,7 @@ func TestSetBootstrapReadyCondition(t *testing.T) { bootstrapConfigIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigDeletedV1Beta2Reason, Message: "GenericBootstrapConfig has been deleted", }, @@ -254,7 +254,7 @@ func TestSetBootstrapReadyCondition(t *testing.T) { bootstrapConfigIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigDoesNotExistV1Beta2Reason, Message: "GenericBootstrapConfig does not exist", }, @@ -266,7 +266,7 @@ func TestSetBootstrapReadyCondition(t *testing.T) { bootstrapConfigIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigDoesNotExistV1Beta2Reason, Message: "GenericBootstrapConfig does not exist", }, @@ -465,7 +465,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { infraMachineIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDeletedV1Beta2Reason, Message: "GenericInfrastructureMachine has been deleted", }, @@ -481,7 +481,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { infraMachineIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDoesNotExistV1Beta2Reason, Message: "GenericInfrastructureMachine does not exist", }, @@ -499,7 +499,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDeletedV1Beta2Reason, - Message: "GenericInfrastructureMachine has been deleted while the machine still exists", + Message: "GenericInfrastructureMachine has been deleted while the Machine still exists", }, }, { @@ -509,7 +509,7 @@ func TestSetInfrastructureReadyCondition(t *testing.T) { infraMachineIsNotFound: true, expectCondition: metav1.Condition{ Type: clusterv1.MachineInfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDoesNotExistV1Beta2Reason, Message: "GenericInfrastructureMachine does not exist", }, @@ -771,10 +771,9 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { Reason: clusterv1.MachineNodeHealthyV1Beta2Condition, }, { - Type: clusterv1.MachineNodeReadyV1Beta2Condition, - Status: metav1.ConditionTrue, - Reason: clusterv1.MachineNodeReadyV1Beta2Reason, - Message: "* Node.Ready: kubelet is posting ready status", + Type: clusterv1.MachineNodeReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: clusterv1.MachineNodeReadyV1Beta2Reason, }, }, }, @@ -823,13 +822,13 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { expectConditions: []metav1.Condition{ { Type: clusterv1.MachineNodeHealthyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineNodeDeletedV1Beta2Reason, Message: "Node test-node-1 has been deleted", }, { Type: clusterv1.MachineNodeReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineNodeDeletedV1Beta2Reason, Message: "Node test-node-1 has been deleted", }, @@ -877,13 +876,13 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNodeDeletedV1Beta2Reason, - Message: "Node test-node-1 has been deleted while the machine still exists", + Message: "Node test-node-1 has been deleted while the Machine still exists", }, { Type: clusterv1.MachineNodeReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineNodeDeletedV1Beta2Reason, - Message: "Node test-node-1 has been deleted while the machine still exists", + Message: "Node test-node-1 has been deleted while the Machine still exists", }, }, }, @@ -901,13 +900,13 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { { Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: clusterv1.MachineNodeDoesNotExistV1Beta2Reason, + Reason: clusterv1.MachineNodeInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID foo://test-node-1 to exist", }, { Type: clusterv1.MachineNodeReadyV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: clusterv1.MachineNodeDoesNotExistV1Beta2Reason, + Reason: clusterv1.MachineNodeInspectionFailedV1Beta2Reason, Message: "Waiting for a Node with spec.providerID foo://test-node-1 to exist", }, }, @@ -922,13 +921,13 @@ func TestSetNodeHealthyAndReadyConditions(t *testing.T) { { Type: clusterv1.MachineNodeHealthyV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: clusterv1.MachineNodeDoesNotExistV1Beta2Reason, + Reason: clusterv1.MachineNodeInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, { Type: clusterv1.MachineNodeReadyV1Beta2Condition, Status: metav1.ConditionUnknown, - Reason: clusterv1.MachineNodeDoesNotExistV1Beta2Reason, + Reason: clusterv1.MachineNodeInspectionFailedV1Beta2Reason, Message: "Waiting for GenericInfrastructureMachine to report spec.providerID", }, }, @@ -1115,7 +1114,7 @@ func TestDeletingCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: clusterv1.MachineNotDeletingV1Beta2Reason, }, }, { @@ -1253,7 +1252,7 @@ func TestSetReadyCondition(t *testing.T) { { Type: clusterv1.MachineDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: clusterv1.MachineNotDeletingV1Beta2Reason, }, }, }, @@ -1278,17 +1277,17 @@ func TestSetReadyCondition(t *testing.T) { Conditions: []metav1.Condition{ { Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineBootstrapConfigDoesNotExistV1Beta2Reason, }, { Type: clusterv1.InfrastructureReadyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineInfrastructureDeletedV1Beta2Reason, }, { Type: clusterv1.MachineNodeHealthyV1Beta2Condition, - Status: metav1.ConditionUnknown, + Status: metav1.ConditionFalse, Reason: clusterv1.MachineNodeDeletedV1Beta2Reason, }, { @@ -1393,7 +1392,7 @@ func TestSetReadyCondition(t *testing.T) { { Type: clusterv1.MachineDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: clusterv1.MachineNotDeletingV1Beta2Reason, }, }, }, @@ -1452,7 +1451,7 @@ func TestSetReadyCondition(t *testing.T) { { Type: clusterv1.MachineDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: clusterv1.MachineNotDeletingV1Beta2Reason, }, }, }, diff --git a/internal/controllers/machinedeployment/machinedeployment_status.go b/internal/controllers/machinedeployment/machinedeployment_status.go index f576bbdcc396..6bff6bf0aee3 100644 --- a/internal/controllers/machinedeployment/machinedeployment_status.go +++ b/internal/controllers/machinedeployment/machinedeployment_status.go @@ -177,7 +177,8 @@ func setScalingUpCondition(_ context.Context, machineDeployment *clusterv1.Machi if currentReplicas >= desiredReplicas { var message string - if missingReferencesMessage != "" { + // Only surface this message if the MachineDeployment is not deleting. + if machineDeployment.DeletionTimestamp.IsZero() && missingReferencesMessage != "" { message = fmt.Sprintf("Scaling up would be blocked %s", missingReferencesMessage) } v1beta2conditions.Set(machineDeployment, metav1.Condition{ @@ -421,7 +422,7 @@ func setDeletingCondition(_ context.Context, machineDeployment *clusterv1.Machin v1beta2conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineDeploymentDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: clusterv1.MachineDeploymentNotDeletingV1Beta2Reason, }) return } @@ -442,10 +443,13 @@ func setDeletingCondition(_ context.Context, machineDeployment *clusterv1.Machin // Note: this should not happen or happen for a very short time while the finalizer is removed. message = fmt.Sprintf("Deleting %d MachineSets", len(machineSets)) } + if message == "" { + message = "Deletion completed" + } v1beta2conditions.Set(machineDeployment, metav1.Condition{ Type: clusterv1.MachineDeploymentDeletingV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: clusterv1.MachineDeploymentDeletingDeletionTimestampSetV1Beta2Reason, + Reason: clusterv1.MachineDeploymentDeletingV1Beta2Reason, Message: message, }) } diff --git a/internal/controllers/machinedeployment/machinedeployment_status_test.go b/internal/controllers/machinedeployment/machinedeployment_status_test.go index 1128dc342023..721a6f2fc642 100644 --- a/internal/controllers/machinedeployment/machinedeployment_status_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_status_test.go @@ -395,11 +395,11 @@ func Test_setScalingUpCondition(t *testing.T) { }, }, { - name: "deleting", + name: "deleting, don't show block message when templates are not found", machineDeployment: deletingMachineDeploymentWith3Replicas, machineSets: []*clusterv1.MachineSet{{}, {}, {}}, - bootstrapTemplateNotFound: false, - infrastructureTemplateNotFound: false, + bootstrapTemplateNotFound: true, + infrastructureTemplateNotFound: true, getAndAdoptMachineSetsForDeploymentSucceeded: true, expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentScalingUpV1Beta2Condition, @@ -887,7 +887,7 @@ func Test_setRemediatingCondition(t *testing.T) { healthCheckSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: corev1.ConditionTrue} healthCheckNotSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: corev1.ConditionFalse} ownerRemediated := clusterv1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: corev1.ConditionFalse} - ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletedV1Beta2Reason, Message: "Machine deletionTimestamp set"} + ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingV1Beta2Reason, Message: "Machine is deleting"} tests := []struct { name string @@ -935,7 +935,7 @@ func Test_setRemediatingCondition(t *testing.T) { Type: clusterv1.MachineDeploymentRemediatingV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineDeploymentRemediatingV1Beta2Reason, - Message: "* Machine m3: Machine deletionTimestamp set", + Message: "* Machine m3: Machine is deleting", }, }, { @@ -1038,7 +1038,7 @@ func Test_setDeletingCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineDeploymentDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: clusterv1.MachineDeploymentNotDeletingV1Beta2Reason, }, }, { @@ -1055,7 +1055,7 @@ func Test_setDeletingCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentDeletingV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: clusterv1.MachineDeploymentDeletingDeletionTimestampSetV1Beta2Reason, + Reason: clusterv1.MachineDeploymentDeletingV1Beta2Reason, Message: "Deleting 1 Machine", }, }, @@ -1073,7 +1073,7 @@ func Test_setDeletingCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentDeletingV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: clusterv1.MachineDeploymentDeletingDeletionTimestampSetV1Beta2Reason, + Reason: clusterv1.MachineDeploymentDeletingV1Beta2Reason, Message: "Deleting 1 Machine and Machine m1 is in deletion since more than 30m", }, }, @@ -1089,7 +1089,7 @@ func Test_setDeletingCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineDeploymentDeletingV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: clusterv1.MachineDeploymentDeletingDeletionTimestampSetV1Beta2Reason, + Reason: clusterv1.MachineDeploymentDeletingV1Beta2Reason, Message: "Deleting 1 MachineSets", }, }, diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go index 4da988900559..4d67be5fa6cf 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets.go @@ -103,7 +103,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineHealthCheckHasRemediateAnnotationV1Beta2Reason, - Message: "Marked for remediation via cluster.x-k8s.io/remediate-machine annotation", + Message: "Health check failed: marked for remediation via cluster.x-k8s.io/remediate-machine annotation", }) return true, time.Duration(0) } @@ -129,7 +129,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineHealthCheckNodeDeletedV1Beta2Reason, - Message: fmt.Sprintf("Node %s has been deleted", t.Machine.Status.NodeRef.Name), + Message: fmt.Sprintf("Health check failed: Node %s has been deleted", t.Machine.Status.NodeRef.Name), }) return true, time.Duration(0) } @@ -190,7 +190,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineHealthCheckNodeStartupTimeoutV1Beta2Reason, - Message: fmt.Sprintf("Node failed to report startup in %s", timeoutDuration), + Message: fmt.Sprintf("Health check failed: Node failed to report startup in %s", timeoutDuration), }) return true, time.Duration(0) } @@ -221,7 +221,7 @@ func (t *healthCheckTarget) needsRemediation(logger logr.Logger, timeoutForMachi Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineHealthCheckUnhealthyNodeV1Beta2Reason, - Message: fmt.Sprintf("Condition %s on Node is reporting status %s for more than %s", c.Type, c.Status, c.Timeout.Duration.String()), + Message: fmt.Sprintf("Health check failed: Condition %s on Node is reporting status %s for more than %s", c.Type, c.Status, c.Timeout.Duration.String()), }) return true, time.Duration(0) } diff --git a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go index a318a605fdac..ecb525d229ac 100644 --- a/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go +++ b/internal/controllers/machinehealthcheck/machinehealthcheck_targets_test.go @@ -270,7 +270,7 @@ func TestHealthCheckTargets(t *testing.T) { Node: nil, } nodeNotYetStartedTarget1200sCondition := newFailedHealthCheckCondition(clusterv1.NodeStartupTimeoutReason, "Node failed to report startup in %s", timeoutForMachineToHaveNode) - nodeNotYetStartedTarget1200sV1Beta2Condition := newFailedHealthCheckV1Beta2Condition(clusterv1.MachineHealthCheckNodeStartupTimeoutV1Beta2Reason, "Node failed to report startup in %s", timeoutForMachineToHaveNode) + nodeNotYetStartedTarget1200sV1Beta2Condition := newFailedHealthCheckV1Beta2Condition(clusterv1.MachineHealthCheckNodeStartupTimeoutV1Beta2Reason, "Health check failed: Node failed to report startup in %s", timeoutForMachineToHaveNode) testMachineCreated400s := testMachine.DeepCopy() nowMinus400s := metav1.NewTime(time.Now().Add(-400 * time.Second)) @@ -292,7 +292,7 @@ func TestHealthCheckTargets(t *testing.T) { nodeMissing: true, } nodeGoneAwayCondition := newFailedHealthCheckCondition(clusterv1.NodeNotFoundReason, "") - nodeGoneAwayV1Beta2Condition := newFailedHealthCheckV1Beta2Condition(clusterv1.MachineHealthCheckNodeDeletedV1Beta2Reason, "Node %s has been deleted", testMachine.Status.NodeRef.Name) + nodeGoneAwayV1Beta2Condition := newFailedHealthCheckV1Beta2Condition(clusterv1.MachineHealthCheckNodeDeletedV1Beta2Reason, "Health check failed: Node %s has been deleted", testMachine.Status.NodeRef.Name) // Create a test MHC without conditions testMHCEmptyConditions := &clusterv1.MachineHealthCheck{ @@ -372,7 +372,7 @@ func TestHealthCheckTargets(t *testing.T) { nodeMissing: false, } nodeUnknown400Condition := newFailedHealthCheckCondition(clusterv1.UnhealthyNodeConditionReason, "Condition Ready on node is reporting status Unknown for more than %s", timeoutForUnhealthyConditions) - nodeUnknown400V1Beta2Condition := newFailedHealthCheckV1Beta2Condition(clusterv1.MachineHealthCheckUnhealthyNodeV1Beta2Reason, "Condition Ready on Node is reporting status Unknown for more than %s", timeoutForUnhealthyConditions) + nodeUnknown400V1Beta2Condition := newFailedHealthCheckV1Beta2Condition(clusterv1.MachineHealthCheckUnhealthyNodeV1Beta2Reason, "Health check failed: Condition Ready on Node is reporting status Unknown for more than %s", timeoutForUnhealthyConditions) // Target for when a node is healthy testNodeHealthy := newTestNode("node1") @@ -411,7 +411,7 @@ func TestHealthCheckTargets(t *testing.T) { // Target for when the machine has the remediate machine annotation const annotationRemediationMsg = "Marked for remediation via remediate-machine annotation" - const annotationRemediationV1Beta2Msg = "Marked for remediation via cluster.x-k8s.io/remediate-machine annotation" + const annotationRemediationV1Beta2Msg = "Health check failed: marked for remediation via cluster.x-k8s.io/remediate-machine annotation" testMachineAnnotationRemediation := testMachine.DeepCopy() testMachineAnnotationRemediation.Annotations = map[string]string{clusterv1.RemediateMachineAnnotation: ""} machineAnnotationRemediation := healthCheckTarget{ diff --git a/internal/controllers/machineset/machineset_controller.go b/internal/controllers/machineset/machineset_controller.go index 26457fd30b61..05a2b63544a6 100644 --- a/internal/controllers/machineset/machineset_controller.go +++ b/internal/controllers/machineset/machineset_controller.go @@ -1367,7 +1367,7 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( // reports that remediation has been completed and the Machine has been deleted. for _, m := range machines { if !m.DeletionTimestamp.IsZero() { - // TODO: Check for Status: False and Reason: MachineSetMachineRemediationMachineDeletedV1Beta2Reason + // TODO: Check for Status: False and Reason: MachineSetMachineRemediationMachineDeletingV1Beta2Reason // instead when starting to use v1beta2 conditions for control flow. if conditions.IsTrue(m, clusterv1.MachineOwnerRemediatedCondition) { // Remediation for this Machine has been triggered by this controller but it is still in flight, @@ -1455,8 +1455,8 @@ func (r *Reconciler) reconcileUnhealthyMachines(ctx context.Context, s *scope) ( if err := patchMachineConditions(ctx, r.Client, machinesToRemediate, metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineSetMachineRemediationMachineDeletedV1Beta2Reason, - Message: "Machine deletionTimestamp set", + Reason: clusterv1.MachineSetMachineRemediationMachineDeletingV1Beta2Reason, + Message: "Machine is deleting", }, &clusterv1.Condition{ Type: clusterv1.MachineOwnerRemediatedCondition, Status: corev1.ConditionTrue, diff --git a/internal/controllers/machineset/machineset_controller_status.go b/internal/controllers/machineset/machineset_controller_status.go index 60633856b603..bc383f5d1320 100644 --- a/internal/controllers/machineset/machineset_controller_status.go +++ b/internal/controllers/machineset/machineset_controller_status.go @@ -125,7 +125,8 @@ func setScalingUpCondition(_ context.Context, ms *clusterv1.MachineSet, machines if currentReplicas >= desiredReplicas { var message string - if missingReferencesMessage != "" { + // Only surface this message if the MachineSet is not deleting. + if ms.DeletionTimestamp.IsZero() && missingReferencesMessage != "" { message = fmt.Sprintf("Scaling up would be blocked because %s", missingReferencesMessage) } v1beta2conditions.Set(ms, metav1.Condition{ @@ -370,7 +371,7 @@ func setDeletingCondition(_ context.Context, machineSet *clusterv1.MachineSet, m v1beta2conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineSetDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: clusterv1.MachineSetNotDeletingV1Beta2Reason, }) return } @@ -387,10 +388,13 @@ func setDeletingCondition(_ context.Context, machineSet *clusterv1.MachineSet, m message += fmt.Sprintf(" and %s", staleMessage) } } + if message == "" { + message = "Deletion completed" + } v1beta2conditions.Set(machineSet, metav1.Condition{ Type: clusterv1.MachineSetDeletingV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: clusterv1.MachineSetDeletingDeletionTimestampSetV1Beta2Reason, + Reason: clusterv1.MachineSetDeletingV1Beta2Reason, Message: message, }) } diff --git a/internal/controllers/machineset/machineset_controller_status_test.go b/internal/controllers/machineset/machineset_controller_status_test.go index e6f9fbed735d..043a7c57a05b 100644 --- a/internal/controllers/machineset/machineset_controller_status_test.go +++ b/internal/controllers/machineset/machineset_controller_status_test.go @@ -322,11 +322,11 @@ func Test_setScalingUpCondition(t *testing.T) { }, }, { - name: "deleting", + name: "deleting, don't show block message when templates are not found", ms: deletingMachineSetWith3Replicas, machines: []*clusterv1.Machine{{}, {}, {}}, - bootstrapObjectNotFound: false, - infrastructureObjectNotFound: false, + bootstrapObjectNotFound: true, + infrastructureObjectNotFound: true, getAndAdoptMachinesForMachineSetSucceeded: true, expectCondition: metav1.Condition{ Type: clusterv1.MachineSetScalingUpV1Beta2Condition, @@ -796,7 +796,7 @@ func Test_setRemediatingCondition(t *testing.T) { healthCheckSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: corev1.ConditionTrue} healthCheckNotSucceeded := clusterv1.Condition{Type: clusterv1.MachineHealthCheckSucceededV1Beta2Condition, Status: corev1.ConditionFalse} ownerRemediated := clusterv1.Condition{Type: clusterv1.MachineOwnerRemediatedCondition, Status: corev1.ConditionFalse} - ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletedV1Beta2Reason, Message: "Machine deletionTimestamp set"} + ownerRemediatedV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineSetMachineRemediationMachineDeletingV1Beta2Reason, Message: "Machine is deleting"} ownerRemediatedWaitingForRemediationV1Beta2 := metav1.Condition{Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: clusterv1.MachineOwnerRemediatedWaitingForRemediationV1Beta2Reason, Message: "KubeadmControlPlane ns1/cp1 is upgrading (\"ControlPlaneIsStable\" preflight check failed)"} tests := []struct { @@ -845,7 +845,7 @@ func Test_setRemediatingCondition(t *testing.T) { Type: clusterv1.MachineSetRemediatingV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineSetRemediatingV1Beta2Reason, - Message: "* Machine m3: Machine deletionTimestamp set", + Message: "* Machine m3: Machine is deleting", }, }, { @@ -863,7 +863,7 @@ func Test_setRemediatingCondition(t *testing.T) { Type: clusterv1.MachineSetRemediatingV1Beta2Condition, Status: metav1.ConditionTrue, Reason: clusterv1.MachineSetRemediatingV1Beta2Reason, - Message: "* Machine m3: Machine deletionTimestamp set\n" + + Message: "* Machine m3: Machine is deleting\n" + "* Machine m4: KubeadmControlPlane ns1/cp1 is upgrading (\"ControlPlaneIsStable\" preflight check failed)", }, }, @@ -946,7 +946,7 @@ func Test_setDeletingCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineSetDeletingV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineSetDeletingDeletionTimestampNotSetV1Beta2Reason, + Reason: clusterv1.MachineSetNotDeletingV1Beta2Reason, }, }, { @@ -959,7 +959,7 @@ func Test_setDeletingCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineSetDeletingV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: clusterv1.MachineSetDeletingDeletionTimestampSetV1Beta2Reason, + Reason: clusterv1.MachineSetDeletingV1Beta2Reason, Message: "Deleting 1 Machine", }, }, @@ -975,7 +975,7 @@ func Test_setDeletingCondition(t *testing.T) { expectCondition: metav1.Condition{ Type: clusterv1.MachineSetDeletingV1Beta2Condition, Status: metav1.ConditionTrue, - Reason: clusterv1.MachineSetDeletingDeletionTimestampSetV1Beta2Reason, + Reason: clusterv1.MachineSetDeletingV1Beta2Reason, Message: "Deleting 3 Machines and Machines m1, m2 are in deletion since more than 30m", }, }, diff --git a/internal/controllers/machineset/machineset_controller_test.go b/internal/controllers/machineset/machineset_controller_test.go index 22ea76b7697d..1e937ac0459e 100644 --- a/internal/controllers/machineset/machineset_controller_test.go +++ b/internal/controllers/machineset/machineset_controller_test.go @@ -1596,8 +1596,8 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineSetMachineRemediationMachineDeletedV1Beta2Reason, - Message: "Machine deletionTimestamp set", + Reason: clusterv1.MachineSetMachineRemediationMachineDeletingV1Beta2Reason, + Message: "Machine is deleting", }, v1beta2conditions.IgnoreLastTransitionTime(true))) // Verify the healthy machine is not deleted and does not have the OwnerRemediated condition. @@ -1939,8 +1939,8 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineSetMachineRemediationMachineDeletedV1Beta2Reason, - Message: "Machine deletionTimestamp set", + Reason: clusterv1.MachineSetMachineRemediationMachineDeletingV1Beta2Reason, + Message: "Machine is deleting", }, v1beta2conditions.IgnoreLastTransitionTime(true))) // Verify (again) the healthy machine is not deleted and does not have the OwnerRemediated condition. @@ -2208,8 +2208,8 @@ func TestMachineSetReconciler_reconcileUnhealthyMachines(t *testing.T) { g.Expect(*c).To(v1beta2conditions.MatchCondition(metav1.Condition{ Type: clusterv1.MachineOwnerRemediatedV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.MachineSetMachineRemediationMachineDeletedV1Beta2Reason, - Message: "Machine deletionTimestamp set", + Reason: clusterv1.MachineSetMachineRemediationMachineDeletingV1Beta2Reason, + Message: "Machine is deleting", }, v1beta2conditions.IgnoreLastTransitionTime(true))) g.Expect(m.DeletionTimestamp).ToNot(BeZero()) diff --git a/internal/controllers/topology/cluster/conditions.go b/internal/controllers/topology/cluster/conditions.go index 1670b5d9bbdf..586606f27640 100644 --- a/internal/controllers/topology/cluster/conditions.go +++ b/internal/controllers/topology/cluster/conditions.go @@ -86,7 +86,7 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste v1beta2conditions.Set(cluster, metav1.Condition{ Type: clusterv1.ClusterTopologyReconciledV1Beta2Condition, Status: metav1.ConditionFalse, - Reason: clusterv1.ClusterTopologyReconciledDeletionTimestampSetV1Beta2Reason, + Reason: clusterv1.ClusterTopologyReconciledDeletingV1Beta2Reason, }) return nil } diff --git a/internal/controllers/topology/cluster/conditions_test.go b/internal/controllers/topology/cluster/conditions_test.go index f215de684c64..f2a507a95f8b 100644 --- a/internal/controllers/topology/cluster/conditions_test.go +++ b/internal/controllers/topology/cluster/conditions_test.go @@ -1018,7 +1018,7 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { wantConditionReason: clusterv1.DeletedReason, wantConditionMessage: "", wantV1Beta2ConditionStatus: metav1.ConditionFalse, - wantV1Beta2ConditionReason: clusterv1.ClusterTopologyReconciledDeletionTimestampSetV1Beta2Reason, + wantV1Beta2ConditionReason: clusterv1.ClusterTopologyReconciledDeletingV1Beta2Reason, wantV1Beta2ConditionMessage: "", }, } diff --git a/test/e2e/node_drain.go b/test/e2e/node_drain.go index 75729da31f06..232bcfeb9367 100644 --- a/test/e2e/node_drain.go +++ b/test/e2e/node_drain.go @@ -559,7 +559,7 @@ func verifyNodeDrainsBlockedAndUnblock(ctx context.Context, input verifyNodeDrai for _, messageSubstring := range input.CPConditionMessageSubstrings { var re = regexp.MustCompile(messageSubstring) match := re.MatchString(condition.Message) - g.Expect(match).To(BeTrue(), fmt.Sprintf("message substring '%s' does not match %s", condition.Message, messageSubstring)) + g.Expect(match).To(BeTrue(), fmt.Sprintf("message '%s' does not match regexp %s", condition.Message, messageSubstring)) } // Verify evictable Pod was evicted and terminated (i.e. phase is succeeded) @@ -587,7 +587,7 @@ func verifyNodeDrainsBlockedAndUnblock(ctx context.Context, input verifyNodeDrai for _, messageSubstring := range input.MDConditionMessageSubstrings[md.Name] { var re = regexp.MustCompile(messageSubstring) match := re.MatchString(condition.Message) - g.Expect(match).To(BeTrue(), fmt.Sprintf("message substring '%s' does not match %s", condition.Message, messageSubstring)) + g.Expect(match).To(BeTrue(), fmt.Sprintf("message '%s' does not match regexp %s", condition.Message, messageSubstring)) } // Verify evictable Pod was evicted and terminated (i.e. phase is succeeded)