Skip to content

Commit

Permalink
Implement metadata propagation on the workload cluster nodes
Browse files Browse the repository at this point in the history
Signed-off-by: Danil Grigorev <[email protected]>
  • Loading branch information
Danil-Grigorev committed Oct 13, 2023
1 parent 8ae2ee5 commit 8f9ac3b
Show file tree
Hide file tree
Showing 11 changed files with 214 additions and 124 deletions.
8 changes: 8 additions & 0 deletions bootstrap/api/v1alpha1/rke2config_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,14 @@ type RKE2AgentConfig struct {
//+optional
NodeLabels []string `json:"nodeLabels,omitempty"`

// NodeAnnotations Aplying annotations on created nodes post bootstrap phase
//
// Unfortunately it is not possible to apply annotations via kubelet
// using current bootstrap configurations.
// Issue: https://github.com/kubernetes/kubernetes/issues/108046
//+optional
NodeAnnotations map[string]string `json:"nodeAnnotations,omitempty"`

// NodeTaints Registering kubelet with set of taints.
//+optional
NodeTaints []string `json:"nodeTaints,omitempty"`
Expand Down
7 changes: 7 additions & 0 deletions bootstrap/api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,14 @@ spec:
an additional port 1 less than this port will also be used for
the apiserver client load-balancer (default: 6444).'
type: integer
nodeAnnotations:
additionalProperties:
type: string
description: "NodeAnnotations Aplying annotations on created
nodes post bootstrap phase \n Unfortunately it is not possible
to apply annotations via kubelet using current bootstrap configurations.
Issue: https://github.com/kubernetes/kubernetes/issues/108046"
type: object
nodeLabels:
description: NodeLabels Registering and starting kubelet with
set of labels.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -214,6 +214,14 @@ spec:
port will also be used for the apiserver client load-balancer
(default: 6444).'
type: integer
nodeAnnotations:
additionalProperties:
type: string
description: "NodeAnnotations Aplying annotations on
created nodes post bootstrap phase \n Unfortunately
it is not possible to apply annotations via kubelet
using current bootstrap configurations. Issue: https://github.com/kubernetes/kubernetes/issues/108046"
type: object
nodeLabels:
description: NodeLabels Registering and starting kubelet
with set of labels.
Expand Down
2 changes: 1 addition & 1 deletion bootstrap/config/default/manager_image_patch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,5 @@ spec:
template:
spec:
containers:
- image: ghcr.io/rancher-sandbox/cluster-api-provider-rke2-bootstrap:dev
- image: ghcr.io/rancher-sandbox/cluster-api-provider-rke2-bootstrap-amd64:dev
name: manager
7 changes: 7 additions & 0 deletions controlplane/api/v1alpha1/condition_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,16 @@ const (
// is up to date. Whe this condition is false, the RKE2ControlPlane is executing a rolling upgrade.
MachinesSpecUpToDateCondition clusterv1.ConditionType = "MachinesSpecUpToDate"

// NodeMetadataUpToDate documents that the metadata of the nodes controlled by the RKE2 machines
// is up to date. Whe this condition is false, the node metadata is not propagated.
NodeMetadataUpToDate clusterv1.ConditionType = "NodeMetadataUpToDate"

// MachineAgentHealthyCondition reports a machine's rke2 agent's operational status.
MachineAgentHealthyCondition clusterv1.ConditionType = "AgentHealthy"

// NodePatchFailedReason (Severity=Error) documents reasong why Node object could not be patched

Check failure on line 64 in controlplane/api/v1alpha1/condition_consts.go

View workflow job for this annotation

GitHub Actions / lint

Comment should end in a period (godot)
NodePatchFailedReason = "NodePatchFailed"

// PodInspectionFailedReason documents a failure in inspecting the pod status.
PodInspectionFailedReason = "PodInspectionFailed"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,14 @@ spec:
an additional port 1 less than this port will also be used for
the apiserver client load-balancer (default: 6444).'
type: integer
nodeAnnotations:
additionalProperties:
type: string
description: "NodeAnnotations Aplying annotations on created
nodes post bootstrap phase \n Unfortunately it is not possible
to apply annotations via kubelet using current bootstrap configurations.
Issue: https://github.com/kubernetes/kubernetes/issues/108046"
type: object
nodeLabels:
description: NodeLabels Registering and starting kubelet with
set of labels.
Expand Down
31 changes: 23 additions & 8 deletions controlplane/internal/controllers/rke2controlplane_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -649,7 +649,7 @@ func (r *RKE2ControlPlaneReconciler) reconcileKubeconfig(

// reconcileControlPlaneConditions is responsible of reconciling conditions reporting the status of static pods and
// the status of the etcd cluster.
func (r *RKE2ControlPlaneReconciler) reconcileControlPlaneConditions(ctx context.Context, controlPlane *rke2.ControlPlane) (ctrl.Result, error) {
func (r *RKE2ControlPlaneReconciler) reconcileControlPlaneConditions(ctx context.Context, controlPlane *rke2.ControlPlane) (res ctrl.Result, retErr error) {

Check failure on line 652 in controlplane/internal/controllers/rke2controlplane_controller.go

View workflow job for this annotation

GitHub Actions / lint

line is 156 characters (lll)
logger := log.FromContext(ctx)

readyCPMachines := controlPlane.Machines.Filter(collections.IsReady())
Expand Down Expand Up @@ -679,17 +679,32 @@ func (r *RKE2ControlPlaneReconciler) reconcileControlPlaneConditions(ctx context

workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(controlPlane.Cluster))
if err != nil {
logger.Info("Unable to get Workload cluster")
logger.Error(err, "Unable to get Workload cluster")

return ctrl.Result{}, errors.Wrap(err, "cannot get remote client to workload cluster")
}

defer func() {
// Always attempt to Patch the Machine conditions after each reconcile.
if err := controlPlane.PatchMachines(ctx); err != nil {
retErr = kerrors.NewAggregate([]error{retErr, err})
}
}()

if err := workloadCluster.InitWorkload(ctx, controlPlane); err != nil {
logger.Error(err, "Unable to initialize workload cluster")

return ctrl.Result{}, err
}

// Update conditions status
workloadCluster.UpdateAgentConditions(ctx, controlPlane)
workloadCluster.UpdateEtcdConditions(ctx, controlPlane)
workloadCluster.UpdateAgentConditions(controlPlane)
workloadCluster.UpdateEtcdConditions(controlPlane)

// Patch nodes metadata
if err := workloadCluster.UpdateNodeMetadata(ctx, controlPlane); err != nil {
logger.Error(err, "Unable to update node metadata")

// Patch machines with the updated conditions.
if err := controlPlane.PatchMachines(ctx); err != nil {
return ctrl.Result{}, err
}

Expand Down Expand Up @@ -721,11 +736,11 @@ func (r *RKE2ControlPlaneReconciler) upgradeControlPlane(
return ctrl.Result{}, err
}

status, err := workloadCluster.ClusterStatus(ctx)
if err != nil {
if err := workloadCluster.InitWorkload(ctx, controlPlane); err != nil {
return ctrl.Result{}, err
}

status := workloadCluster.ClusterStatus()
if status.Nodes <= *rcp.Spec.Replicas {
// scaleUp ensures that we don't continue scaling up while waiting for Machines to have NodeRefs
return r.scaleUpControlPlane(ctx, cluster, rcp, controlPlane)
Expand Down
1 change: 1 addition & 0 deletions pkg/rke2/control_plane.go
Original file line number Diff line number Diff line change
Expand Up @@ -352,6 +352,7 @@ func (c *ControlPlane) PatchMachines(ctx context.Context) error {
if err := helper.Patch(ctx, machine, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
controlplanev1.MachineAgentHealthyCondition,
controlplanev1.MachineEtcdMemberHealthyCondition,
controlplanev1.NodeMetadataUpToDate,
}}); err != nil {
errList = append(errList, errors.Wrapf(err, "failed to patch machine %s", machine.Name))
}
Expand Down
4 changes: 1 addition & 3 deletions pkg/rke2/management_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,5 @@ func (m *Management) GetWorkloadCluster(ctx context.Context, clusterKey ctrlclie
return nil, &RemoteClusterConnectionError{Name: clusterKey.String(), Err: err}
}

return &Workload{
Client: c,
}, nil
return NewWorkload(c), nil
}
Loading

0 comments on commit 8f9ac3b

Please sign in to comment.