Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-0.17] Waiting for control plane to be fully upgraded #7143

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 29 additions & 7 deletions pkg/awsiamauth/reconciler/reconciler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
logf "sigs.k8s.io/controller-runtime/pkg/log"
Expand Down Expand Up @@ -164,7 +165,7 @@ func TestReconcileBuildClusterSpecError(t *testing.T) {
g.Expect(result).To(Equal(controller.Result{}))
}

func TestReconcileCAPIClusterNotFound(t *testing.T) {
func TestReconcileKCPObjectNotFound(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
ctrl := gomock.NewController(t)
Expand All @@ -181,6 +182,7 @@ func TestReconcileCAPIClusterNotFound(t *testing.T) {
_ = releasev1.AddToScheme(scheme)
_ = eksdv1.AddToScheme(scheme)
_ = clusterv1.AddToScheme(scheme)
_ = controlplanev1.AddToScheme(scheme)
cl := cb.WithScheme(scheme).WithRuntimeObjects(objs...).Build()
version := test.DevEksaVersion()

Expand Down Expand Up @@ -233,22 +235,32 @@ func TestReconcileRemoteGetClientError(t *testing.T) {
EksaVersion: &version,
},
}
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = cluster.Name
kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Name = cluster.Name
kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
Conditions: clusterv1.Conditions{
{
Type: clusterapi.ReadyCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
}
})
sec := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: awsiamauth.CASecretName(cluster.Name),
Namespace: constants.EksaSystemNamespace,
},
}
objs := []runtime.Object{bundle, eksdRelease, capiCluster, sec, eksaRelease}
objs := []runtime.Object{bundle, eksdRelease, kcp, sec, eksaRelease}
cb := fake.NewClientBuilder()
scheme := runtime.NewScheme()
_ = releasev1.AddToScheme(scheme)
_ = eksdv1.AddToScheme(scheme)
_ = clusterv1.AddToScheme(scheme)
_ = corev1.AddToScheme(scheme)
_ = controlplanev1.AddToScheme(scheme)
cl := cb.WithScheme(scheme).WithRuntimeObjects(objs...).Build()

remoteClientRegistry.EXPECT().GetClient(context.Background(), gomock.AssignableToTypeOf(client.ObjectKey{})).Return(nil, errors.New("client error"))
Expand Down Expand Up @@ -297,8 +309,17 @@ func TestReconcileConfigMapNotFoundApplyError(t *testing.T) {
EksaVersion: &version,
},
}
capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
c.Name = cluster.Name
kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
kcp.Name = cluster.Name
kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
Conditions: clusterv1.Conditions{
{
Type: clusterapi.ReadyCondition,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
}
})
sec := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -321,14 +342,15 @@ func TestReconcileConfigMapNotFoundApplyError(t *testing.T) {
Namespace: "eksa-system",
},
}
objs := []runtime.Object{bundle, eksdRelease, capiCluster, sec, awsiamconfig, caSec, eksaRelease}
objs := []runtime.Object{bundle, eksdRelease, kcp, sec, awsiamconfig, caSec, eksaRelease}
cb := fake.NewClientBuilder()
scheme := runtime.NewScheme()
_ = anywherev1.AddToScheme(scheme)
_ = releasev1.AddToScheme(scheme)
_ = eksdv1.AddToScheme(scheme)
_ = clusterv1.AddToScheme(scheme)
_ = corev1.AddToScheme(scheme)
_ = controlplanev1.AddToScheme(scheme)
cl := cb.WithScheme(scheme).WithRuntimeObjects(objs...).Build()

rCb := fake.NewClientBuilder()
Expand Down
1 change: 1 addition & 0 deletions pkg/clusterapi/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@ import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"

const (
ControlPlaneReadyCondition clusterv1.ConditionType = "ControlPlaneReady"
ReadyCondition clusterv1.ConditionType = "Ready"
)
23 changes: 14 additions & 9 deletions pkg/controller/clusters/clusterapi.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,27 +13,32 @@ import (
"github.com/aws/eks-anywhere/pkg/controller"
)

// CheckControlPlaneReady is a controller helper to check whether a CAPI cluster CP for
// an eks-a cluster is ready or not. This is intended to be used from cluster reconcilers
// CheckControlPlaneReady is a controller helper to check whether KCP object for
// the cluster is ready or not. This is intended to be used from cluster reconcilers
// due its signature and that it returns controller results with appropriate wait times whenever
// the cluster is not ready.
func CheckControlPlaneReady(ctx context.Context, client client.Client, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
capiCluster, err := controller.GetCAPICluster(ctx, client, cluster)
kcp, err := controller.GetKubeadmControlPlane(ctx, client, cluster)
if err != nil {
return controller.Result{}, err
}

if capiCluster == nil {
log.Info("CAPI cluster does not exist yet, requeuing")
if kcp == nil {
log.Info("KCP does not exist yet, requeuing")
return controller.ResultWithRequeue(5 * time.Second), nil
}

if !conditions.IsTrue(capiCluster, clusterapi.ControlPlaneReadyCondition) {
log.Info("CAPI control plane is not ready yet, requeuing")
// TODO: eventually this can be implemented with controller watches
// We make sure to check that the status is up to date before using it
if kcp.Status.ObservedGeneration != kcp.ObjectMeta.Generation {
log.Info("KCP information is outdated, requeing")
return controller.ResultWithRequeue(5 * time.Second), nil
}

if !conditions.IsTrue(kcp, clusterapi.ReadyCondition) {
log.Info("KCP is not ready yet, requeing")
return controller.ResultWithRequeue(30 * time.Second), nil
}

log.Info("CAPI control plane is ready")
log.Info("KCP is ready")
return controller.Result{}, nil
}
62 changes: 38 additions & 24 deletions pkg/controller/clusters/clusterapi_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,16 @@ import (
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client/fake"

"github.com/aws/eks-anywhere/internal/test"
_ "github.com/aws/eks-anywhere/internal/test/envtest"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clusterapi"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/controller"
"github.com/aws/eks-anywhere/pkg/controller/clusters"
)
Expand All @@ -25,27 +26,26 @@ func TestCheckControlPlaneReadyItIsReady(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
capiCluster := capiCluster(func(c *clusterv1.Cluster) {
c.Status.Conditions = clusterv1.Conditions{
kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
k.Status.Conditions = clusterv1.Conditions{
{
Type: clusterapi.ControlPlaneReadyCondition,
Type: clusterapi.ReadyCondition,
Status: corev1.ConditionTrue,
},
}
})

client := fake.NewClientBuilder().WithObjects(eksaCluster, capiCluster).Build()
client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()

result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(controller.Result{}))
}

func TestCheckControlPlaneReadyNoCluster(t *testing.T) {
func TestCheckControlPlaneReadyNoKcp(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()

client := fake.NewClientBuilder().WithObjects(eksaCluster).Build()

result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
Expand All @@ -55,31 +55,45 @@ func TestCheckControlPlaneReadyNoCluster(t *testing.T) {
)
}

func TestCheckControlPlaneReadyNotReady(t *testing.T) {
func TestCheckControlPlaneNotReady(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
capiCluster := capiCluster()
kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
k.Status = v1beta1.KubeadmControlPlaneStatus{
ObservedGeneration: 2,
}
})

client := fake.NewClientBuilder().WithObjects(eksaCluster, capiCluster).Build()
client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()

result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(
controller.Result{Result: &controllerruntime.Result{RequeueAfter: 30 * time.Second}}),
controller.Result{Result: &controllerruntime.Result{RequeueAfter: 5 * time.Second}}),
)
}

func TestCheckControlPlaneReadyErrorReading(t *testing.T) {
func TestCheckControlPlaneStatusNotReady(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
eksaCluster := eksaCluster()
kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
k.Status.Conditions = clusterv1.Conditions{
{
Type: clusterapi.ReadyCondition,
Status: corev1.ConditionFalse,
},
}
})

// This should make the client fail because CRDs are not registered
client := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build()
client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()

_, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type")))
result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result).To(Equal(
controller.Result{Result: &controllerruntime.Result{RequeueAfter: 30 * time.Second}}),
)
}

func eksaCluster() *anywherev1.Cluster {
Expand All @@ -94,23 +108,23 @@ func eksaCluster() *anywherev1.Cluster {
}
}

type capiClusterOpt func(*clusterv1.Cluster)
type kcpObjectOpt func(*v1beta1.KubeadmControlPlane)

func capiCluster(opts ...capiClusterOpt) *clusterv1.Cluster {
c := &clusterv1.Cluster{
func kcpObject(opts ...kcpObjectOpt) *v1beta1.KubeadmControlPlane {
k := &v1beta1.KubeadmControlPlane{
TypeMeta: metav1.TypeMeta{
Kind: "Cluster",
APIVersion: clusterv1.GroupVersion.String(),
Kind: "KubeadmControlPlane",
APIVersion: v1beta1.GroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Name: "my-cluster",
Namespace: "eksa-system",
Namespace: constants.EksaSystemNamespace,
},
}

for _, opt := range opts {
opt(c)
opt(k)
}

return c
return k
}
Loading