Skip to content

Commit

Permalink
resolve merge conflicts
Browse files Browse the repository at this point in the history
Signed-off-by: Rahul Ganesh <[email protected]>
  • Loading branch information
Rahul Ganesh committed Jan 23, 2024
1 parent 6a09a2e commit a518b9d
Show file tree
Hide file tree
Showing 4 changed files with 71 additions and 106 deletions.
22 changes: 22 additions & 0 deletions controllers/controlplaneupgrade_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kerrors "k8s.io/apimachinery/pkg/util/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -214,6 +215,19 @@ func (r *ControlPlaneUpgradeReconciler) updateStatus(ctx context.Context, log lo
return fmt.Errorf("getting node upgrader for machine %s: %v", machineRef.Name, err)
}
if nodeUpgrade.Status.Completed {
machine, err := getCapiMachine(ctx, r.client, nodeUpgrade)
if err != nil {
return err
}
machinePatchHelper, err := patch.NewHelper(machine, r.client)
if err != nil {
return err
}
log.Info("Updating K8s version in machine", "Machine", machine.Name)
machine.Spec.Version = &nodeUpgrade.Spec.KubernetesVersion
if err := machinePatchHelper.Patch(ctx, machine); err != nil {
return fmt.Errorf("updating status for machine %s: %v", machine.Name, err)
}
nodesUpgradeCompleted++
nodesUpgradeRequired--
}
Expand All @@ -224,3 +238,11 @@ func (r *ControlPlaneUpgradeReconciler) updateStatus(ctx context.Context, log lo
cpUpgrade.Status.Ready = nodesUpgradeRequired == 0
return nil
}

func getCapiMachine(ctx context.Context, client client.Client, nodeUpgrade *anywherev1.NodeUpgrade) (*clusterv1.Machine, error) {
machine := &clusterv1.Machine{}
if err := client.Get(ctx, GetNamespacedNameType(nodeUpgrade.Spec.Machine.Name, nodeUpgrade.Spec.Machine.Namespace), machine); err != nil {
return nil, fmt.Errorf("getting machine %s: %v", nodeUpgrade.Spec.Machine.Name, err)
}
return machine, nil
}
28 changes: 28 additions & 0 deletions controllers/controlplaneupgrade_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package controllers_test
import (
"context"
"fmt"
"strings"
"testing"

. "github.com/onsi/gomega"
Expand Down Expand Up @@ -191,6 +192,33 @@ func TestCPUpgradeObjectDoesNotExist(t *testing.T) {
g.Expect(err).To(MatchError("controlplaneupgrades.anywhere.eks.amazonaws.com \"cp-upgrade-request\" not found"))
}

func TestCPUpgradeReconcileUpdateCapiMachineVersion(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()

cluster, machines, nodes, cpUpgrade, nodeUpgrades := getObjectsForCPUpgradeTest()
for i := range nodeUpgrades {
nodeUpgrades[i].Name = fmt.Sprintf("%s-node-upgrader", machines[i].Name)
nodeUpgrades[i].Status = anywherev1.NodeUpgradeStatus{
Completed: true,
}
}
objs := []runtime.Object{cluster, machines[0], machines[1], nodes[0], nodes[1], cpUpgrade, nodeUpgrades[0], nodeUpgrades[1]}
nodeUpgrades[0].Status.Completed = true
client := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
r := controllers.NewControlPlaneUpgradeReconciler(client)

req := cpUpgradeRequest(cpUpgrade)
_, err := r.Reconcile(ctx, req)
g.Expect(err).ToNot(HaveOccurred())
machine := &clusterv1.Machine{}
err = client.Get(ctx, types.NamespacedName{Name: nodeUpgrades[0].Spec.Machine.Name, Namespace: "eksa-system"}, machine)
g.Expect(err).ToNot(HaveOccurred())
if !strings.Contains(*machine.Spec.Version, "v1.28.3-eks-1-28-9") {
t.Fatalf("unexpected k8s version in capi machine: %s", *machine.Spec.Version)
}
}

func getObjectsForCPUpgradeTest() (*clusterv1.Cluster, []*clusterv1.Machine, []*corev1.Node, *anywherev1.ControlPlaneUpgrade, []*anywherev1.NodeUpgrade) {
cluster := generateCluster()
node1 := generateNode()
Expand Down
22 changes: 5 additions & 17 deletions controllers/nodeupgrade_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -100,18 +100,13 @@ func (r *NodeUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request)
}

// Initialize the patch helper
nodeUpgradePatchHelper, err := patch.NewHelper(nodeUpgrade, r.client)
if err != nil {
return ctrl.Result{}, err
}

capiMachinePatchHelper, err := patch.NewHelper(machineToBeUpgraded, r.client)
patchHelper, err := patch.NewHelper(nodeUpgrade, r.client)
if err != nil {
return ctrl.Result{}, err
}

defer func() {
err := r.updateStatus(ctx, log, rClient, nodeUpgrade, machineToBeUpgraded)
err := r.updateStatus(ctx, log, rClient, nodeUpgrade, machineToBeUpgraded.Status.NodeRef.Name)
if err != nil {
reterr = kerrors.NewAggregate([]error{reterr, err})
}
Expand All @@ -127,13 +122,10 @@ func (r *NodeUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request)
if reterr == nil {
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
}
if err := patchNodeUpgrade(ctx, nodeUpgradePatchHelper, *nodeUpgrade, patchOpts...); err != nil {
if err := patchNodeUpgrade(ctx, patchHelper, *nodeUpgrade, patchOpts...); err != nil {
reterr = kerrors.NewAggregate([]error{reterr, err})
}

if err := capiMachinePatchHelper.Patch(ctx, machineToBeUpgraded); err != nil {
reterr = kerrors.NewAggregate([]error{reterr, err})
}
// Only requeue if we are not already re-queueing and the NodeUpgrade ready condition is false.
// We do this to be able to update the status continuously until the NodeUpgrade becomes ready,
// since there might be changes in state of the world that don't trigger reconciliation requests
Expand Down Expand Up @@ -250,7 +242,7 @@ func (r *NodeUpgradeReconciler) reconcileDelete(ctx context.Context, log logr.Lo
return ctrl.Result{}, nil
}

func (r *NodeUpgradeReconciler) updateStatus(ctx context.Context, log logr.Logger, remoteClient client.Client, nodeUpgrade *anywherev1.NodeUpgrade, machine *clusterv1.Machine) error {
func (r *NodeUpgradeReconciler) updateStatus(ctx context.Context, log logr.Logger, remoteClient client.Client, nodeUpgrade *anywherev1.NodeUpgrade, nodeName string) error {
// When NodeUpgrade is fully deleted, we do not need to update the status. Without this check
// the subsequent patch operations would fail if the status is updated after it is fully deleted.
if !nodeUpgrade.DeletionTimestamp.IsZero() && len(nodeUpgrade.GetFinalizers()) == 0 {
Expand All @@ -260,7 +252,7 @@ func (r *NodeUpgradeReconciler) updateStatus(ctx context.Context, log logr.Logge

log.Info("Updating NodeUpgrade status")

pod, err := getUpgraderPod(ctx, remoteClient, machine.Status.NodeRef.Name)
pod, err := getUpgraderPod(ctx, remoteClient, nodeName)
if err != nil {
if apierrors.IsNotFound(err) {
markAllConditionsFalse(nodeUpgrade, podDNEMessage, clusterv1.ConditionSeverityInfo)
Expand All @@ -273,10 +265,6 @@ func (r *NodeUpgradeReconciler) updateStatus(ctx context.Context, log logr.Logge
conditions.MarkTrue(nodeUpgrade, anywherev1.UpgraderPodCreated)
updateComponentsConditions(pod, nodeUpgrade)

if nodeUpgrade.Status.Completed {
log.Info("Updating machine status", "Machine", machine.Name)
machine.Spec.Version = &nodeUpgrade.Spec.KubernetesVersion
}
// Always update the readyCondition by summarizing the state of other conditions.
conditions.SetSummary(nodeUpgrade,
conditions.WithConditions(
Expand Down
105 changes: 16 additions & 89 deletions controllers/nodeupgrade_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package controllers_test

import (
"context"
"strings"
"testing"

"github.com/golang/mock/gomock"
Expand All @@ -27,13 +26,15 @@ func TestNodeUpgradeReconcilerReconcileFirstControlPlane(t *testing.T) {
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
nodeUpgrade.Spec.FirstNodeToBeUpgraded = true
nodeUpgrade.Spec.EtcdVersion = ptr.String("v3.5.9-eks-1-28-9")
node.Labels = map[string]string{
"node-role.kubernetes.io/control-plane": "true",
}
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil)

Expand All @@ -53,11 +54,13 @@ func TestNodeUpgradeReconcilerReconcileNextControlPlane(t *testing.T) {
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
node.Labels = map[string]string{
"node-role.kubernetes.io/control-plane": "true",
}
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil)

Expand All @@ -71,99 +74,14 @@ func TestNodeUpgradeReconcilerReconcileNextControlPlane(t *testing.T) {
g.Expect(err).ToNot(HaveOccurred())
}

func TestNodeUpgradeReconcilerReconcileMachineStatus(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade := getObjectsForNodeUpgradeTest()
node.Labels = map[string]string{
"node-role.kubernetes.io/control-plane": "true",
}
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade).Build()

clientRegistry.EXPECT().GetClient(ctx, types.NamespacedName{Name: cluster.Name, Namespace: cluster.Namespace}).Return(client, nil).Times(2)

r := controllers.NewNodeUpgradeReconciler(client, clientRegistry)
// nodeUpgrade.Status.Completed = true
req := nodeUpgradeRequest(nodeUpgrade)
_, err := r.Reconcile(ctx, req)
g.Expect(err).ToNot(HaveOccurred())

pod := &corev1.Pod{}
g.Expect(client.Get(ctx, types.NamespacedName{Name: upgrader.PodName(node.Name), Namespace: "eksa-system"}, pod)).To(Succeed())

statuses := []corev1.ContainerStatus{
{
Name: upgrader.CopierContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
{
Name: upgrader.ContainerdUpgraderContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
{
Name: upgrader.CNIPluginsUpgraderContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
{
Name: upgrader.KubeadmUpgraderContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
{
Name: upgrader.KubeletUpgradeContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
{
Name: upgrader.PostUpgradeContainerName,
State: corev1.ContainerState{
Terminated: &corev1.ContainerStateTerminated{
ExitCode: 0,
},
},
},
}

pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses, statuses...)
g.Expect(client.Update(ctx, pod)).To(Succeed())

_, err = r.Reconcile(ctx, req)
g.Expect(err).ToNot(HaveOccurred())
machine = &clusterv1.Machine{}
err = client.Get(ctx, types.NamespacedName{Name: nodeUpgrade.Spec.Machine.Name, Namespace: "eksa-system"}, machine)
g.Expect(err).ToNot(HaveOccurred())
if !strings.Contains(*machine.Spec.Version, "v1.28.3-eks-1-28-9") {
t.Fatalf("unexpected k8s version in capi machine: %s", *machine.Spec.Version)
}
}

func TestNodeUpgradeReconcilerReconcileWorker(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

Expand All @@ -185,6 +103,8 @@ func TestNodeUpgradeReconcilerReconcileCreateUpgraderPodState(t *testing.T) {
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

Expand Down Expand Up @@ -246,6 +166,8 @@ func TestNodeUpgradeReconcilerReconcileDelete(t *testing.T) {
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

Expand Down Expand Up @@ -277,6 +199,8 @@ func TestNodeUpgradeReconcilerReconcileDeleteUpgraderPodAlreadyDeleted(t *testin
ctrl := gomock.NewController(t)
clientRegistry := mocks.NewMockRemoteClientRegistry(ctrl)

cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()
cluster, machine, node, nodeUpgrade, configMap := getObjectsForNodeUpgradeTest()
client := fake.NewClientBuilder().WithRuntimeObjects(cluster, machine, node, nodeUpgrade, configMap).Build()

Expand Down Expand Up @@ -305,13 +229,16 @@ func TestNodeUpgradeReconcilerReconcileDeleteUpgraderPodAlreadyDeleted(t *testin
g.Expect(err).To(MatchError("pods \"node01-node-upgrader\" not found"))
}

func getObjectsForNodeUpgradeTest() (*clusterv1.Cluster, *clusterv1.Machine, *corev1.Node, *anywherev1.NodeUpgrade, *corev1.ConfigMap) {
func getObjectsForNodeUpgradeTest() (*clusterv1.Cluster, *clusterv1.Machine, *corev1.Node, *anywherev1.NodeUpgrade, *corev1.ConfigMap) {

Check failure on line 233 in controllers/nodeupgrade_controller_test.go

View workflow job for this annotation

GitHub Actions / build

expected '(', found getObjectsForNodeUpgradeTest
cluster := generateCluster()
node := generateNode()
machine := generateMachine(cluster, node)
nodeUpgrade := generateNodeUpgrade(machine)
configMap := generateConfigMap()
return cluster, machine, node, nodeUpgrade, configMap
configMap := generateConfigMap()
return cluster, machine, node, nodeUpgrade, configMap
}

func nodeUpgradeRequest(nodeUpgrade *anywherev1.NodeUpgrade) reconcile.Request {
Expand Down Expand Up @@ -380,6 +307,6 @@ func generateConfigMap() *corev1.ConfigMap {
Name: "in-place-upgrade",
Namespace: "eksa-system",
},
Data: map[string]string{"v1.28.1": "test"},
Data: map[string]string{"v1.28.3-eks-1-28-9": "test"},
}
}

0 comments on commit a518b9d

Please sign in to comment.