Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-0.18] Remove experimentalSelfManagedUpgrade flag for management cluster controller #6889

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 1 addition & 21 deletions controllers/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,13 +50,6 @@ type ClusterReconciler struct {
clusterValidator ClusterValidator
packagesClient PackagesClient
machineHealthCheck MachineHealthCheckReconciler

// experimentalSelfManagedUpgrade enables management cluster full upgrades.
// The default behavior for management cluster only reconciles the worker nodes.
// When this is enabled, the controller will handle management clusters in the same
// way as workload clusters: it will reconcile CP, etcd and workers.
// Only intended for internal testing.
experimentalSelfManagedUpgrade bool
}

// PackagesClient handles curated packages operations from within the cluster
Expand Down Expand Up @@ -109,14 +102,6 @@ func NewClusterReconciler(client client.Client, registry ProviderClusterReconcil
return c
}

// WithExperimentalSelfManagedClusterUpgrades allows to enable experimental upgrades for self
// managed clusters.
func WithExperimentalSelfManagedClusterUpgrades(exp bool) ClusterReconcilerOption {
return func(c *ClusterReconciler) {
c.experimentalSelfManagedUpgrade = exp
}
}

// SetupWithManager sets up the controller with the Manager.
func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager, log logr.Logger) error {
childObjectHandler := handlers.ChildObjectToClusters(log)
Expand Down Expand Up @@ -322,12 +307,7 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, log logr.Logger, clus
return reconcileResult.ToCtrlResult(), nil
}

if cluster.IsSelfManaged() && !r.experimentalSelfManagedUpgrade {
// self-managed clusters should only reconcile worker nodes to avoid control plane instability
reconcileResult, err = clusterProviderReconciler.ReconcileWorkerNodes(ctx, log, cluster)
} else {
reconcileResult, err = clusterProviderReconciler.Reconcile(ctx, log, cluster)
}
reconcileResult, err = clusterProviderReconciler.Reconcile(ctx, log, cluster)

if err != nil {
return ctrl.Result{}, err
Expand Down
55 changes: 4 additions & 51 deletions controllers/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ func TestClusterReconcilerReconcileSelfManagedCluster(t *testing.T) {
registry := newRegistryMock(providerReconciler)
c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster, kcp).Build()
mockPkgs := mocks.NewMockPackagesClient(controller)
providerReconciler.EXPECT().ReconcileWorkerNodes(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster))
providerReconciler.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster))
mhcReconciler.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster)).Return(nil)

r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, mockPkgs, mhcReconciler)
Expand Down Expand Up @@ -622,7 +622,7 @@ func TestClusterReconcilerReconcileSelfManagedClusterConditions(t *testing.T) {
iam.EXPECT().EnsureCASecret(logCtx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(config.Cluster)).Return(controller.Result{}, nil)
iam.EXPECT().Reconcile(logCtx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(config.Cluster)).Return(controller.Result{}, nil)

providerReconciler.EXPECT().ReconcileWorkerNodes(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
providerReconciler.EXPECT().Reconcile(gomock.Any(), gomock.Any(), gomock.Any()).Times(1)
mhcReconciler.EXPECT().Reconcile(logCtx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(config.Cluster)).Return(nil)

r := controllers.NewClusterReconciler(testClient, registry, iam, clusterValidator, mockPkgs, mhcReconciler)
Expand Down Expand Up @@ -774,11 +774,11 @@ func TestClusterReconcilerReconcileGenerations(t *testing.T) {
if tt.wantReconciliation {
iam.EXPECT().EnsureCASecret(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(config.Cluster)).Return(controller.Result{}, nil)
iam.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), gomock.AssignableToTypeOf(config.Cluster)).Return(controller.Result{}, nil)
providerReconciler.EXPECT().ReconcileWorkerNodes(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(config.Cluster)).Times(1)
providerReconciler.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(config.Cluster)).Times(1)
mhcReconciler.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(config.Cluster)).Return(nil)

} else {
providerReconciler.EXPECT().ReconcileWorkerNodes(gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
providerReconciler.EXPECT().Reconcile(gomock.Any(), gomock.Any(), gomock.Any()).Times(0)
}

r := controllers.NewClusterReconciler(client, registry, iam, clusterValidator, mockPkgs, mhcReconciler)
Expand All @@ -802,53 +802,6 @@ func TestClusterReconcilerReconcileGenerations(t *testing.T) {
}
}

func TestClusterReconcilerReconcileSelfManagedClusterWithExperimentalUpgrades(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
version := test.DevEksaVersion()

selfManagedCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
},
Spec: anywherev1.ClusterSpec{
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
},
EksaVersion: &version,
ClusterNetwork: anywherev1.ClusterNetwork{
CNIConfig: &anywherev1.CNIConfig{
Cilium: &anywherev1.CiliumConfig{},
},
},
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
},
}

kcp := testKubeadmControlPlaneFromCluster(selfManagedCluster)

controller := gomock.NewController(t)
providerReconciler := mocks.NewMockProviderClusterReconciler(controller)
iam := mocks.NewMockAWSIamConfigReconciler(controller)
clusterValidator := mocks.NewMockClusterValidator(controller)
registry := newRegistryMock(providerReconciler)
c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster, kcp).Build()
mockPkgs := mocks.NewMockPackagesClient(controller)
mhcReconciler := mocks.NewMockMachineHealthCheckReconciler(controller)

providerReconciler.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster))
mhcReconciler.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster)).Return(nil)

r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, mockPkgs, mhcReconciler,
controllers.WithExperimentalSelfManagedClusterUpgrades(true),
)
result, err := r.Reconcile(ctx, clusterRequest(selfManagedCluster))
g.Expect(err).ToNot(HaveOccurred())
g.Expect(result).To(Equal(ctrl.Result{}))
}

func TestClusterReconcilerReconcilePausedCluster(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
Expand Down