diff --git a/internal/controllers/machine/machine_controller_status.go b/internal/controllers/machine/machine_controller_status.go index a35786c0d427..d063402584bd 100644 --- a/internal/controllers/machine/machine_controller_status.go +++ b/internal/controllers/machine/machine_controller_status.go @@ -94,7 +94,7 @@ func setBootstrapReadyCondition(_ context.Context, machine *clusterv1.Machine, b v1beta2conditions.FallbackCondition{ Status: v1beta2conditions.BoolToStatus(machine.Status.BootstrapReady), Reason: clusterv1.MachineBootstrapConfigReadyNoReasonReportedV1Beta2Reason, - Message: fmt.Sprintf("%s status.ready is %t", machine.Spec.Bootstrap.ConfigRef.Kind, machine.Status.BootstrapReady), + Message: bootstrapConfigReadyFallBackMessage(machine.Spec.Bootstrap.ConfigRef.Kind, machine.Status.BootstrapReady), }, ); err != nil { v1beta2conditions.Set(machine, metav1.Condition{ @@ -142,6 +142,10 @@ func setBootstrapReadyCondition(_ context.Context, machine *clusterv1.Machine, b }) } +func bootstrapConfigReadyFallBackMessage(kind string, ready bool) string { + return fmt.Sprintf("%s status.ready is %t", kind, ready) +} + func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machine, infraMachine *unstructured.Unstructured, infraMachineIsNotFound bool) { if infraMachine != nil { if err := v1beta2conditions.SetMirrorConditionFromUnstructured( @@ -150,7 +154,7 @@ func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machi v1beta2conditions.FallbackCondition{ Status: v1beta2conditions.BoolToStatus(machine.Status.InfrastructureReady), Reason: clusterv1.MachineInfrastructureReadyNoReasonReportedV1Beta2Reason, - Message: fmt.Sprintf("%s status.ready is %t", machine.Spec.InfrastructureRef.Kind, machine.Status.InfrastructureReady), + Message: infrastructureReadyFallBackMessage(machine.Spec.InfrastructureRef.Kind, machine.Status.InfrastructureReady), }, ); err != nil { v1beta2conditions.Set(machine, metav1.Condition{ @@ -220,6 +224,10 @@ func setInfrastructureReadyCondition(_ context.Context, machine *clusterv1.Machi }) } +func infrastructureReadyFallBackMessage(kind string, ready bool) string { + return fmt.Sprintf("%s status.ready is %t", kind, ready) +} + func setNodeHealthyAndReadyConditions(ctx context.Context, machine *clusterv1.Machine, node *corev1.Node, nodeGetErr error, lastProbeSuccessTime time.Time, remoteConditionsGracePeriod time.Duration) { if time.Since(lastProbeSuccessTime) > remoteConditionsGracePeriod { var msg string @@ -591,11 +599,27 @@ func setReadyCondition(ctx context.Context, machine *clusterv1.Machine) { }, } + // Add overrides for conditions we don't to surface in the Ready condition with slightly different messages, + // mostly to improve when we will aggregate the Ready condition from many machines on MS, MD etc. + var overrideConditions v1beta2conditions.OverrideConditions if !machine.DeletionTimestamp.IsZero() { - summaryOpts = append(summaryOpts, v1beta2conditions.OverrideConditions{calculateDeletingConditionForSummary(machine)}) + overrideConditions = append(overrideConditions, calculateDeletingConditionForSummary(machine)) + } + + if infrastructureReadyCondition := calculateInfrastructureReadyForSummary(machine); infrastructureReadyCondition != nil { + overrideConditions = append(overrideConditions, *infrastructureReadyCondition) + } + + if bootstrapReadyCondition := calculateBootstrapConfigReadyForSummary(machine); bootstrapReadyCondition != nil { + overrideConditions = append(overrideConditions, *bootstrapReadyCondition) + } + + if len(overrideConditions) > 0 { + summaryOpts = append(summaryOpts, overrideConditions) } readyCondition, err := v1beta2conditions.NewSummaryCondition(machine, clusterv1.MachineReadyV1Beta2Condition, summaryOpts...) + if err != nil { // Note, this could only happen if we hit edge cases in computing the summary, which should not happen due to the fact // that we are passing a non empty list of ForConditionTypes. @@ -654,6 +678,57 @@ func calculateDeletingConditionForSummary(machine *clusterv1.Machine) v1beta2con } } +func calculateInfrastructureReadyForSummary(machine *clusterv1.Machine) *v1beta2conditions.ConditionWithOwnerInfo { + infrastructureReadyCondition := v1beta2conditions.Get(machine, clusterv1.MachineInfrastructureReadyV1Beta2Condition) + + if infrastructureReadyCondition == nil { + return nil + } + + message := infrastructureReadyCondition.Message + if infrastructureReadyCondition.Status == metav1.ConditionTrue && infrastructureReadyCondition.Message == infrastructureReadyFallBackMessage(machine.Spec.InfrastructureRef.Kind, machine.Status.InfrastructureReady) { + message = "" + } + + return &v1beta2conditions.ConditionWithOwnerInfo{ + OwnerResource: v1beta2conditions.ConditionOwnerInfo{ + Kind: "Machine", + Name: machine.Name, + }, + Condition: metav1.Condition{ + Type: infrastructureReadyCondition.Type, + Status: infrastructureReadyCondition.Status, + Reason: infrastructureReadyCondition.Reason, + Message: message, + }, + } +} + +func calculateBootstrapConfigReadyForSummary(machine *clusterv1.Machine) *v1beta2conditions.ConditionWithOwnerInfo { + bootstrapConfigReadyCondition := v1beta2conditions.Get(machine, clusterv1.MachineBootstrapConfigReadyV1Beta2Condition) + if bootstrapConfigReadyCondition == nil { + return nil + } + + message := bootstrapConfigReadyCondition.Message + if bootstrapConfigReadyCondition.Status == metav1.ConditionTrue && machine.Spec.Bootstrap.ConfigRef != nil && bootstrapConfigReadyCondition.Message == bootstrapConfigReadyFallBackMessage(machine.Spec.Bootstrap.ConfigRef.Kind, machine.Status.BootstrapReady) { + message = "" + } + + return &v1beta2conditions.ConditionWithOwnerInfo{ + OwnerResource: v1beta2conditions.ConditionOwnerInfo{ + Kind: "Machine", + Name: machine.Name, + }, + Condition: metav1.Condition{ + Type: bootstrapConfigReadyCondition.Type, + Status: bootstrapConfigReadyCondition.Status, + Reason: bootstrapConfigReadyCondition.Reason, + Message: message, + }, + } +} + func setAvailableCondition(ctx context.Context, machine *clusterv1.Machine) { log := ctrl.LoggerFrom(ctx) readyCondition := v1beta2conditions.Get(machine, clusterv1.MachineReadyV1Beta2Condition) diff --git a/internal/controllers/machine/machine_controller_status_test.go b/internal/controllers/machine/machine_controller_status_test.go index b1774d091e55..a8cfa271d94e 100644 --- a/internal/controllers/machine/machine_controller_status_test.go +++ b/internal/controllers/machine/machine_controller_status_test.go @@ -1124,6 +1124,60 @@ func TestSetReadyCondition(t *testing.T) { Message: "Deleting: Machine deletion in progress, stage: WaitingForPreDrainHook", }, }, + { + name: "Drops messages from BootstrapConfigReady and Infrastructure ready when using fallback to fields", + machine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "machine-test", + Namespace: metav1.NamespaceDefault, + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + Kind: "KubeadmConfig", + }, + }, + InfrastructureRef: corev1.ObjectReference{ + Kind: "AWSMachine", + }, + }, + Status: clusterv1.MachineStatus{ + InfrastructureReady: true, + BootstrapReady: true, + V1Beta2: &clusterv1.MachineV1Beta2Status{ + Conditions: []metav1.Condition{ + { + Type: clusterv1.MachineBootstrapConfigReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: "Foo", + Message: bootstrapConfigReadyFallBackMessage("KubeadmConfig", true), + }, + { + Type: clusterv1.InfrastructureReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: "Bar", + Message: infrastructureReadyFallBackMessage("AWSMachine", true), + }, + { + Type: clusterv1.MachineNodeHealthyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: "AllGood", + }, + { + Type: clusterv1.MachineDeletingV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: clusterv1.MachineDeletingDeletionTimestampNotSetV1Beta2Reason, + }, + }, + }, + }, + }, + expectCondition: metav1.Condition{ + Type: clusterv1.MachineReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: v1beta2conditions.MultipleInfoReportedReason, + }, + }, { name: "Aggregates Ready condition correctly while the machine is deleting (waiting for Node drain)", machine: &clusterv1.Machine{