diff --git a/pkg/awsiamauth/reconciler/reconciler_test.go b/pkg/awsiamauth/reconciler/reconciler_test.go
index 9586334784db..2fa1aa1e9d65 100644
--- a/pkg/awsiamauth/reconciler/reconciler_test.go
+++ b/pkg/awsiamauth/reconciler/reconciler_test.go
@@ -15,6 +15,7 @@ import (
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
 	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+	controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
 	"sigs.k8s.io/controller-runtime/pkg/client"
 	"sigs.k8s.io/controller-runtime/pkg/client/fake"
 	logf "sigs.k8s.io/controller-runtime/pkg/log"
@@ -164,7 +165,7 @@ func TestReconcileBuildClusterSpecError(t *testing.T) {
 	g.Expect(result).To(Equal(controller.Result{}))
 }
 
-func TestReconcileCAPIClusterNotFound(t *testing.T) {
+func TestReconcileKCPObjectNotFound(t *testing.T) {
 	g := NewWithT(t)
 	ctx := context.Background()
 	ctrl := gomock.NewController(t)
@@ -181,6 +182,7 @@ func TestReconcileCAPIClusterNotFound(t *testing.T) {
 	_ = releasev1.AddToScheme(scheme)
 	_ = eksdv1.AddToScheme(scheme)
 	_ = clusterv1.AddToScheme(scheme)
+	_ = controlplanev1.AddToScheme(scheme)
 	cl := cb.WithScheme(scheme).WithRuntimeObjects(objs...).Build()
 	version := test.DevEksaVersion()
 
@@ -233,8 +235,17 @@ func TestReconcileRemoteGetClientError(t *testing.T) {
 			EksaVersion: &version,
 		},
 	}
-	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = cluster.Name
+	kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
+		kcp.Name = cluster.Name
+		kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
+			Conditions: clusterv1.Conditions{
+				{
+					Type:               clusterapi.ReadyCondition,
+					Status:             corev1.ConditionTrue,
+					LastTransitionTime: metav1.NewTime(time.Now()),
+				},
+			},
+		}
 	})
 	sec := &corev1.Secret{
 		ObjectMeta: metav1.ObjectMeta{
@@ -242,13 +253,14 @@ func TestReconcileRemoteGetClientError(t *testing.T) {
 			Namespace: constants.EksaSystemNamespace,
 		},
 	}
-	objs := []runtime.Object{bundle, eksdRelease, capiCluster, sec, eksaRelease}
+	objs := []runtime.Object{bundle, eksdRelease, kcp, sec, eksaRelease}
 	cb := fake.NewClientBuilder()
 	scheme := runtime.NewScheme()
 	_ = releasev1.AddToScheme(scheme)
 	_ = eksdv1.AddToScheme(scheme)
 	_ = clusterv1.AddToScheme(scheme)
 	_ = corev1.AddToScheme(scheme)
+	_ = controlplanev1.AddToScheme(scheme)
 	cl := cb.WithScheme(scheme).WithRuntimeObjects(objs...).Build()
 
 	remoteClientRegistry.EXPECT().GetClient(context.Background(), gomock.AssignableToTypeOf(client.ObjectKey{})).Return(nil, errors.New("client error"))
@@ -297,8 +309,17 @@ func TestReconcileConfigMapNotFoundApplyError(t *testing.T) {
 			EksaVersion: &version,
 		},
 	}
-	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = cluster.Name
+	kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
+		kcp.Name = cluster.Name
+		kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
+			Conditions: clusterv1.Conditions{
+				{
+					Type:               clusterapi.ReadyCondition,
+					Status:             corev1.ConditionTrue,
+					LastTransitionTime: metav1.NewTime(time.Now()),
+				},
+			},
+		}
 	})
 	sec := &corev1.Secret{
 		ObjectMeta: metav1.ObjectMeta{
@@ -321,7 +342,7 @@ func TestReconcileConfigMapNotFoundApplyError(t *testing.T) {
 			Namespace: "eksa-system",
 		},
 	}
-	objs := []runtime.Object{bundle, eksdRelease, capiCluster, sec, awsiamconfig, caSec, eksaRelease}
+	objs := []runtime.Object{bundle, eksdRelease, kcp, sec, awsiamconfig, caSec, eksaRelease}
 	cb := fake.NewClientBuilder()
 	scheme := runtime.NewScheme()
 	_ = anywherev1.AddToScheme(scheme)
@@ -329,6 +350,7 @@ func TestReconcileConfigMapNotFoundApplyError(t *testing.T) {
 	_ = eksdv1.AddToScheme(scheme)
 	_ = clusterv1.AddToScheme(scheme)
 	_ = corev1.AddToScheme(scheme)
+	_ = controlplanev1.AddToScheme(scheme)
 	cl := cb.WithScheme(scheme).WithRuntimeObjects(objs...).Build()
 
 	rCb := fake.NewClientBuilder()
diff --git a/pkg/clusterapi/constants.go b/pkg/clusterapi/constants.go
index 95929199c4f1..ab3d4257a6e3 100644
--- a/pkg/clusterapi/constants.go
+++ b/pkg/clusterapi/constants.go
@@ -4,4 +4,5 @@ import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
 
 const (
 	ControlPlaneReadyCondition clusterv1.ConditionType = "ControlPlaneReady"
+	ReadyCondition             clusterv1.ConditionType = "Ready"
 )
diff --git a/pkg/controller/clusters/clusterapi.go b/pkg/controller/clusters/clusterapi.go
index ceddd76aeb2c..f72afdaa5be0 100644
--- a/pkg/controller/clusters/clusterapi.go
+++ b/pkg/controller/clusters/clusterapi.go
@@ -13,27 +13,32 @@ import (
 	"github.com/aws/eks-anywhere/pkg/controller"
 )
 
-// CheckControlPlaneReady is a controller helper to check whether a CAPI cluster CP for
-// an eks-a cluster is ready or not. This is intended to be used from cluster reconcilers
+// CheckControlPlaneReady is a controller helper to check whether KCP object for
+// the cluster is ready or not. This is intended to be used from cluster reconcilers
 // due its signature and that it returns controller results with appropriate wait times whenever
 // the cluster is not ready.
 func CheckControlPlaneReady(ctx context.Context, client client.Client, log logr.Logger, cluster *anywherev1.Cluster) (controller.Result, error) {
-	capiCluster, err := controller.GetCAPICluster(ctx, client, cluster)
+	kcp, err := controller.GetKubeadmControlPlane(ctx, client, cluster)
 	if err != nil {
 		return controller.Result{}, err
 	}
 
-	if capiCluster == nil {
-		log.Info("CAPI cluster does not exist yet, requeuing")
+	if kcp == nil {
+		log.Info("KCP does not exist yet, requeuing")
 		return controller.ResultWithRequeue(5 * time.Second), nil
 	}
 
-	if !conditions.IsTrue(capiCluster, clusterapi.ControlPlaneReadyCondition) {
-		log.Info("CAPI control plane is not ready yet, requeuing")
-		// TODO: eventually this can be implemented with controller watches
+	// We make sure to check that the status is up to date before using it
+	if kcp.Status.ObservedGeneration != kcp.ObjectMeta.Generation {
+		log.Info("KCP information is outdated, requeing")
+		return controller.ResultWithRequeue(5 * time.Second), nil
+	}
+
+	if !conditions.IsTrue(kcp, clusterapi.ReadyCondition) {
+		log.Info("KCP is not ready yet, requeing")
 		return controller.ResultWithRequeue(30 * time.Second), nil
 	}
 
-	log.Info("CAPI control plane is ready")
+	log.Info("KCP is ready")
 	return controller.Result{}, nil
 }
diff --git a/pkg/controller/clusters/clusterapi_test.go b/pkg/controller/clusters/clusterapi_test.go
index 6fc0ecb9ed51..ae5461dce9c2 100644
--- a/pkg/controller/clusters/clusterapi_test.go
+++ b/pkg/controller/clusters/clusterapi_test.go
@@ -8,8 +8,8 @@ import (
 	. "github.com/onsi/gomega"
 	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-	"k8s.io/apimachinery/pkg/runtime"
 	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
+	"sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
 	controllerruntime "sigs.k8s.io/controller-runtime"
 	"sigs.k8s.io/controller-runtime/pkg/client/fake"
 
@@ -17,6 +17,7 @@ import (
 	_ "github.com/aws/eks-anywhere/internal/test/envtest"
 	anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
 	"github.com/aws/eks-anywhere/pkg/clusterapi"
+	"github.com/aws/eks-anywhere/pkg/constants"
 	"github.com/aws/eks-anywhere/pkg/controller"
 	"github.com/aws/eks-anywhere/pkg/controller/clusters"
 )
@@ -25,27 +26,26 @@ func TestCheckControlPlaneReadyItIsReady(t *testing.T) {
 	g := NewWithT(t)
 	ctx := context.Background()
 	eksaCluster := eksaCluster()
-	capiCluster := capiCluster(func(c *clusterv1.Cluster) {
-		c.Status.Conditions = clusterv1.Conditions{
+	kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
+		k.Status.Conditions = clusterv1.Conditions{
 			{
-				Type:   clusterapi.ControlPlaneReadyCondition,
+				Type:   clusterapi.ReadyCondition,
 				Status: corev1.ConditionTrue,
 			},
 		}
 	})
 
-	client := fake.NewClientBuilder().WithObjects(eksaCluster, capiCluster).Build()
+	client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()
 
 	result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
 	g.Expect(err).NotTo(HaveOccurred())
 	g.Expect(result).To(Equal(controller.Result{}))
 }
 
-func TestCheckControlPlaneReadyNoCluster(t *testing.T) {
+func TestCheckControlPlaneReadyNoKcp(t *testing.T) {
 	g := NewWithT(t)
 	ctx := context.Background()
 	eksaCluster := eksaCluster()
-
 	client := fake.NewClientBuilder().WithObjects(eksaCluster).Build()
 
 	result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
@@ -55,31 +55,45 @@ func TestCheckControlPlaneReadyNoCluster(t *testing.T) {
 	)
 }
 
-func TestCheckControlPlaneReadyNotReady(t *testing.T) {
+func TestCheckControlPlaneNotReady(t *testing.T) {
 	g := NewWithT(t)
 	ctx := context.Background()
 	eksaCluster := eksaCluster()
-	capiCluster := capiCluster()
+	kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
+		k.Status = v1beta1.KubeadmControlPlaneStatus{
+			ObservedGeneration: 2,
+		}
+	})
 
-	client := fake.NewClientBuilder().WithObjects(eksaCluster, capiCluster).Build()
+	client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()
 
 	result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
 	g.Expect(err).NotTo(HaveOccurred())
 	g.Expect(result).To(Equal(
-		controller.Result{Result: &controllerruntime.Result{RequeueAfter: 30 * time.Second}}),
+		controller.Result{Result: &controllerruntime.Result{RequeueAfter: 5 * time.Second}}),
 	)
 }
 
-func TestCheckControlPlaneReadyErrorReading(t *testing.T) {
+func TestCheckControlPlaneStatusNotReady(t *testing.T) {
 	g := NewWithT(t)
 	ctx := context.Background()
 	eksaCluster := eksaCluster()
+	kcp := kcpObject(func(k *v1beta1.KubeadmControlPlane) {
+		k.Status.Conditions = clusterv1.Conditions{
+			{
+				Type:   clusterapi.ReadyCondition,
+				Status: corev1.ConditionFalse,
+			},
+		}
+	})
 
-	// This should make the client fail because CRDs are not registered
-	client := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build()
+	client := fake.NewClientBuilder().WithObjects(eksaCluster, kcp).Build()
 
-	_, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
-	g.Expect(err).To(MatchError(ContainSubstring("no kind is registered for the type")))
+	result, err := clusters.CheckControlPlaneReady(ctx, client, test.NewNullLogger(), eksaCluster)
+	g.Expect(err).NotTo(HaveOccurred())
+	g.Expect(result).To(Equal(
+		controller.Result{Result: &controllerruntime.Result{RequeueAfter: 30 * time.Second}}),
+	)
 }
 
 func eksaCluster() *anywherev1.Cluster {
@@ -94,23 +108,23 @@ func eksaCluster() *anywherev1.Cluster {
 	}
 }
 
-type capiClusterOpt func(*clusterv1.Cluster)
+type kcpObjectOpt func(*v1beta1.KubeadmControlPlane)
 
-func capiCluster(opts ...capiClusterOpt) *clusterv1.Cluster {
-	c := &clusterv1.Cluster{
+func kcpObject(opts ...kcpObjectOpt) *v1beta1.KubeadmControlPlane {
+	k := &v1beta1.KubeadmControlPlane{
 		TypeMeta: metav1.TypeMeta{
-			Kind:       "Cluster",
-			APIVersion: clusterv1.GroupVersion.String(),
+			Kind:       "KubeadmControlPlane",
+			APIVersion: v1beta1.GroupVersion.String(),
 		},
 		ObjectMeta: metav1.ObjectMeta{
 			Name:      "my-cluster",
-			Namespace: "eksa-system",
+			Namespace: constants.EksaSystemNamespace,
 		},
 	}
 
 	for _, opt := range opts {
-		opt(c)
+		opt(k)
 	}
 
-	return c
+	return k
 }
diff --git a/pkg/providers/cloudstack/reconciler/reconciler_test.go b/pkg/providers/cloudstack/reconciler/reconciler_test.go
index 9862ebd44c2f..0fc25bdff85a 100644
--- a/pkg/providers/cloudstack/reconciler/reconciler_test.go
+++ b/pkg/providers/cloudstack/reconciler/reconciler_test.go
@@ -2,7 +2,9 @@ package reconciler_test
 
 import (
 	"context"
+	"fmt"
 	"math"
+	"strings"
 	"testing"
 	"time"
 
@@ -44,10 +46,7 @@ func TestReconcilerReconcileSuccess(t *testing.T) {
 	// We want to check that the cluster status is cleaned up if validations are passed
 	tt.cluster.SetFailure(anywherev1.FailureReasonType("InvalidCluster"), "invalid cluster")
 
-	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = tt.cluster.Name
-	})
-	tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster, tt.secret)
+	tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.kcp, tt.secret)
 	tt.createAllObjs()
 
 	logger := test.NewNullLogger()
@@ -56,7 +55,7 @@ func TestReconcilerReconcileSuccess(t *testing.T) {
 	spec := tt.buildSpec()
 	tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, spec).Return(controller.Result{}, nil)
 	tt.remoteClientRegistry.EXPECT().GetClient(
-		tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: constants.EksaSystemNamespace},
+		tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: constants.EksaSystemNamespace},
 	).Return(remoteClient, nil).Times(1)
 	tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, tt.buildSpec())
 	ctrl := gomock.NewController(t)
@@ -181,17 +180,17 @@ func TestReconcilerValidateMachineConfigFail(t *testing.T) {
 func TestReconcilerControlPlaneIsNotReady(t *testing.T) {
 	tt := newReconcilerTest(t)
 
-	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = tt.cluster.Name
-	})
-	capiCluster.Status.Conditions = clusterv1.Conditions{
-		{
-			Type:               clusterapi.ControlPlaneReadyCondition,
-			Status:             corev1.ConditionFalse,
-			LastTransitionTime: metav1.NewTime(time.Now()),
+	tt.kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
+		Conditions: clusterv1.Conditions{
+			{
+				Type:               clusterapi.ReadyCondition,
+				Status:             corev1.ConditionFalse,
+				LastTransitionTime: metav1.NewTime(time.Now()),
+			},
 		},
+		ObservedGeneration: 2,
 	}
-	tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster, tt.secret)
+	tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.kcp, tt.secret)
 	tt.createAllObjs()
 
 	logger := test.NewNullLogger()
@@ -341,7 +340,7 @@ func TestReconcileCNISuccess(t *testing.T) {
 	spec := tt.buildSpec()
 
 	tt.remoteClientRegistry.EXPECT().GetClient(
-		tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"},
+		tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: "eksa-system"},
 	).Return(remoteClient, nil)
 	tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, spec)
 
@@ -362,7 +361,7 @@ func TestReconcileCNIErrorClientRegistry(t *testing.T) {
 	spec := tt.buildSpec()
 
 	tt.remoteClientRegistry.EXPECT().GetClient(
-		tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"},
+		tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: "eksa-system"},
 	).Return(nil, errors.New("building client"))
 
 	result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec)
@@ -548,6 +547,7 @@ type reconcilerTest struct {
 	validatorRegistry         *cloudstack.MockValidatorRegistry
 	execConfig                *decoder.CloudStackExecConfig
 	secret                    *corev1.Secret
+	kcp                       *controlplanev1.KubeadmControlPlane
 }
 
 func newReconcilerTest(t testing.TB) *reconcilerTest {
@@ -612,7 +612,7 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 	})
 
 	cluster := cloudstackCluster(func(c *anywherev1.Cluster) {
-		c.Name = "workload-cluster"
+		c.Name = strings.ToLower(t.Name())
 		c.Spec.ManagementCluster = anywherev1.ManagementCluster{
 			Name: managementCluster.Name,
 		}
@@ -650,6 +650,28 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 
 		c.Spec.EksaVersion = &version
 	})
+
+	kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
+		kcp.Name = cluster.Name
+		kcp.Spec = controlplanev1.KubeadmControlPlaneSpec{
+			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
+				InfrastructureRef: corev1.ObjectReference{
+					Name: fmt.Sprintf("%s-control-plane-1", cluster.Name),
+				},
+			},
+		}
+		kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
+			Conditions: clusterv1.Conditions{
+				{
+					Type:               clusterapi.ReadyCondition,
+					Status:             corev1.ConditionTrue,
+					LastTransitionTime: metav1.NewTime(time.Now()),
+				},
+			},
+			ObservedGeneration: 2,
+		}
+	})
+
 	secret := &corev1.Secret{
 		TypeMeta: metav1.TypeMeta{
 			APIVersion: "v1",
@@ -691,6 +713,7 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 		validatorRegistry:         validatorRegistry,
 		execConfig:                execConfig,
 		secret:                    secret,
+		kcp:                       kcp,
 	}
 
 	t.Cleanup(tt.cleanup)
diff --git a/pkg/providers/docker/reconciler/reconciler_test.go b/pkg/providers/docker/reconciler/reconciler_test.go
index 9850c2c9a1fd..a34ccc8e25b9 100644
--- a/pkg/providers/docker/reconciler/reconciler_test.go
+++ b/pkg/providers/docker/reconciler/reconciler_test.go
@@ -3,12 +3,15 @@ package reconciler_test
 import (
 	"context"
 	"errors"
+	"fmt"
 	"strings"
 	"testing"
+	"time"
 
 	etcdv1 "github.com/aws/etcdadm-controller/api/v1beta1"
 	"github.com/golang/mock/gomock"
 	. "github.com/onsi/gomega"
+	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/runtime"
 	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
@@ -22,6 +25,7 @@ import (
 	"github.com/aws/eks-anywhere/internal/test/envtest"
 	anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
 	clusterspec "github.com/aws/eks-anywhere/pkg/cluster"
+	"github.com/aws/eks-anywhere/pkg/clusterapi"
 	"github.com/aws/eks-anywhere/pkg/constants"
 	"github.com/aws/eks-anywhere/pkg/controller"
 	"github.com/aws/eks-anywhere/pkg/controller/clientutil"
@@ -37,10 +41,8 @@ const (
 func TestReconcilerReconcileSuccess(t *testing.T) {
 	tt := newReconcilerTest(t)
 	logger := test.NewNullLogger()
-	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = tt.cluster.Name
-	})
-	tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
+
+	tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.kcp)
 	tt.createAllObjs()
 
 	remoteClient := fake.NewClientBuilder().Build()
@@ -53,7 +55,7 @@ func TestReconcilerReconcileSuccess(t *testing.T) {
 	tt.Expect(tt.cluster.Status.FailureMessage).To(BeZero())
 	tt.Expect(tt.cluster.Status.FailureReason).To(BeZero())
 
-	tt.ShouldEventuallyExist(tt.ctx, capiCluster)
+	tt.ShouldEventuallyExist(tt.ctx, tt.kcp)
 	tt.ShouldEventuallyExist(tt.ctx,
 		&controlplanev1.KubeadmControlPlane{
 			ObjectMeta: metav1.ObjectMeta{
@@ -356,11 +358,7 @@ func TestReconcileControlPlaneUnstackedEtcdSuccess(t *testing.T) {
 	tt.Expect(tt.cluster.Status.FailureReason).To(BeZero())
 	tt.Expect(result).To(Equal(controller.Result{}))
 
-	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = tt.cluster.Name
-	})
-
-	tt.ShouldEventuallyExist(tt.ctx, capiCluster)
+	tt.ShouldEventuallyExist(tt.ctx, tt.kcp)
 	tt.ShouldEventuallyExist(tt.ctx,
 		&controlplanev1.KubeadmControlPlane{
 			ObjectMeta: metav1.ObjectMeta{
@@ -424,6 +422,7 @@ type reconcilerTest struct {
 	env                  *envtest.Environment
 	eksaSupportObjs      []client.Object
 	datacenterConfig     *anywherev1.DockerDatacenterConfig
+	kcp                  *controlplanev1.KubeadmControlPlane
 }
 
 func newReconcilerTest(t testing.TB) *reconcilerTest {
@@ -476,6 +475,27 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 		c.Spec.EksaVersion = &version
 	})
 
+	kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
+		kcp.Name = cluster.Name
+		kcp.Spec = controlplanev1.KubeadmControlPlaneSpec{
+			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
+				InfrastructureRef: corev1.ObjectReference{
+					Name: fmt.Sprintf("%s-control-plane-1", cluster.Name),
+				},
+			},
+		}
+		kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
+			Conditions: clusterv1.Conditions{
+				{
+					Type:               clusterapi.ReadyCondition,
+					Status:             corev1.ConditionTrue,
+					LastTransitionTime: metav1.NewTime(time.Now()),
+				},
+			},
+			ObservedGeneration: 2,
+		}
+	})
+
 	tt := &reconcilerTest{
 		t:                    t,
 		WithT:                NewWithT(t),
@@ -496,6 +516,7 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 			test.EKSARelease(),
 		},
 		datacenterConfig: workloadClusterDatacenter,
+		kcp:              kcp,
 	}
 
 	t.Cleanup(tt.cleanup)
diff --git a/pkg/providers/snow/reconciler/reconciler_test.go b/pkg/providers/snow/reconciler/reconciler_test.go
index 082cfe60b858..83e90145c6da 100644
--- a/pkg/providers/snow/reconciler/reconciler_test.go
+++ b/pkg/providers/snow/reconciler/reconciler_test.go
@@ -2,7 +2,10 @@ package reconciler_test
 
 import (
 	"context"
+	"fmt"
+	"strings"
 	"testing"
+	"time"
 
 	"github.com/golang/mock/gomock"
 	. "github.com/onsi/gomega"
@@ -20,6 +23,7 @@ import (
 	"github.com/aws/eks-anywhere/internal/test/envtest"
 	anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
 	clusterspec "github.com/aws/eks-anywhere/pkg/cluster"
+	"github.com/aws/eks-anywhere/pkg/clusterapi"
 	"github.com/aws/eks-anywhere/pkg/constants"
 	"github.com/aws/eks-anywhere/pkg/controller"
 	"github.com/aws/eks-anywhere/pkg/controller/clientutil"
@@ -37,10 +41,8 @@ func TestReconcilerReconcileSuccess(t *testing.T) {
 	tt := newReconcilerTest(t)
 	// We want to check that the cluster status is cleaned up if validations are passed
 	tt.cluster.SetFailure(anywherev1.FailureReasonType("InvalidCluster"), "invalid cluster")
-	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = tt.cluster.Name
-	})
-	tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
+
+	tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.kcp)
 	tt.createAllObjs()
 
 	logger := test.NewNullLogger()
@@ -49,7 +51,7 @@ func TestReconcilerReconcileSuccess(t *testing.T) {
 	tt.ipValidator.EXPECT().ValidateControlPlaneIP(tt.ctx, logger, tt.buildSpec()).Return(controller.Result{}, nil)
 
 	tt.remoteClientRegistry.EXPECT().GetClient(
-		tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"},
+		tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: "eksa-system"},
 	).Return(remoteClient, nil)
 	tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, tt.buildSpec())
 
@@ -198,7 +200,7 @@ func TestReconcilerReconcileControlPlane(t *testing.T) {
 	tt.ShouldEventuallyExist(tt.ctx,
 		&controlplanev1.KubeadmControlPlane{
 			ObjectMeta: metav1.ObjectMeta{
-				Name:      "workload-cluster",
+				Name:      tt.cluster.Name,
 				Namespace: "eksa-system",
 			},
 		},
@@ -207,26 +209,32 @@ func TestReconcilerReconcileControlPlane(t *testing.T) {
 	tt.ShouldEventuallyExist(tt.ctx,
 		&snowv1.AWSSnowMachineTemplate{
 			ObjectMeta: metav1.ObjectMeta{
-				Name:      "workload-cluster-control-plane-1",
+				Name:      tt.cluster.Name + "-control-plane-1",
 				Namespace: "eksa-system",
 			},
 		},
 	)
 
 	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = "workload-cluster"
+		c.Name = tt.cluster.Name
 	})
 	tt.ShouldEventuallyExist(tt.ctx, capiCluster)
 
-	tt.ShouldEventuallyExist(tt.ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "workload-cluster-snow-credentials", Namespace: "eksa-system"}})
+	tt.ShouldEventuallyExist(tt.ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: tt.cluster.Name + "-snow-credentials", Namespace: "eksa-system"}})
 }
 
 func TestReconcilerCheckControlPlaneReadyItIsReady(t *testing.T) {
 	tt := newReconcilerTest(t)
-	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = tt.cluster.Name
-	})
-	tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
+	tt.kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
+		Conditions: clusterv1.Conditions{
+			{
+				Type:               clusterapi.ReadyCondition,
+				Status:             corev1.ConditionTrue,
+				LastTransitionTime: metav1.NewTime(time.Now()),
+			},
+		},
+	}
+	tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.kcp)
 	tt.withFakeClient()
 
 	result, err := tt.reconciler().CheckControlPlaneReady(tt.ctx, test.NewNullLogger(), tt.buildSpec())
@@ -246,7 +254,7 @@ func TestReconcilerReconcileCNISuccess(t *testing.T) {
 	spec := tt.buildSpec()
 
 	tt.remoteClientRegistry.EXPECT().GetClient(
-		tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"},
+		tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: "eksa-system"},
 	).Return(remoteClient, nil)
 	tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, spec)
 
@@ -266,7 +274,7 @@ func TestReconcilerReconcileCNIErrorClientRegistry(t *testing.T) {
 	spec := tt.buildSpec()
 
 	tt.remoteClientRegistry.EXPECT().GetClient(
-		tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"},
+		tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: "eksa-system"},
 	).Return(nil, errors.New("building client"))
 
 	result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec)
@@ -291,6 +299,7 @@ type reconcilerTest struct {
 	eksaSupportObjs           []client.Object
 	machineConfigControlPlane *anywherev1.SnowMachineConfig
 	machineConfigWorker       *anywherev1.SnowMachineConfig
+	kcp                       *controlplanev1.KubeadmControlPlane
 }
 
 func newReconcilerTest(t testing.TB) *reconcilerTest {
@@ -341,7 +350,7 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 	})
 
 	cluster := snowCluster(func(c *anywherev1.Cluster) {
-		c.Name = "workload-cluster"
+		c.Name = strings.ToLower(t.Name())
 		c.Spec.ManagementCluster = anywherev1.ManagementCluster{
 			Name: managementCluster.Name,
 		}
@@ -380,6 +389,27 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 		c.Spec.EksaVersion = &version
 	})
 
+	kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
+		kcp.Name = cluster.Name
+		kcp.Spec = controlplanev1.KubeadmControlPlaneSpec{
+			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
+				InfrastructureRef: corev1.ObjectReference{
+					Name: fmt.Sprintf("%s-control-plane-1", cluster.Name),
+				},
+			},
+		}
+		kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
+			Conditions: clusterv1.Conditions{
+				{
+					Type:               clusterapi.ReadyCondition,
+					Status:             corev1.ConditionTrue,
+					LastTransitionTime: metav1.NewTime(time.Now()),
+				},
+			},
+			ObservedGeneration: 2,
+		}
+	})
+
 	tt := &reconcilerTest{
 		t:                    t,
 		WithT:                NewWithT(t),
@@ -404,6 +434,7 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 		cluster:                   cluster,
 		machineConfigControlPlane: machineConfigCP,
 		machineConfigWorker:       machineConfigWN,
+		kcp:                       kcp,
 	}
 
 	t.Cleanup(tt.cleanup)
diff --git a/pkg/providers/tinkerbell/reconciler/reconciler_test.go b/pkg/providers/tinkerbell/reconciler/reconciler_test.go
index 7d408f1f4f86..d7f42c31e978 100644
--- a/pkg/providers/tinkerbell/reconciler/reconciler_test.go
+++ b/pkg/providers/tinkerbell/reconciler/reconciler_test.go
@@ -3,7 +3,9 @@ package reconciler_test
 import (
 	"context"
 	"errors"
+	"fmt"
 	"testing"
+	"time"
 
 	"github.com/golang/mock/gomock"
 	. "github.com/onsi/gomega"
@@ -56,10 +58,7 @@ func TestReconcilerGenerateSpec(t *testing.T) {
 func TestReconcilerReconcileSuccess(t *testing.T) {
 	tt := newReconcilerTest(t)
 
-	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = tt.cluster.Name
-	})
-	tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
+	tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.kcp)
 	tt.eksaSupportObjs = append(tt.eksaSupportObjs, tinkHardware("hw1", "cp"))
 	tt.eksaSupportObjs = append(tt.eksaSupportObjs, tinkHardware("hw2", "worker"))
 	tt.createAllObjs()
@@ -801,6 +800,7 @@ type reconcilerTest struct {
 	ipValidator               *tinkerbellreconcilermocks.MockIPValidator
 	cniReconciler             *tinkerbellreconcilermocks.MockCNIReconciler
 	remoteClientRegistry      *tinkerbellreconcilermocks.MockRemoteClientRegistry
+	kcp                       *controlplanev1.KubeadmControlPlane
 }
 
 func newReconcilerTest(t testing.TB) *reconcilerTest {
@@ -886,6 +886,29 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 		c.Spec.EksaVersion = &version
 	})
 
+	kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
+		kcp.Name = cluster.Name
+		kcp.Spec = controlplanev1.KubeadmControlPlaneSpec{
+			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
+				InfrastructureRef: corev1.ObjectReference{
+					Name: fmt.Sprintf("%s-control-plane-1", cluster.Name),
+				},
+			},
+			Version:  "v1.19.8",
+			Replicas: ptr.Int32(1),
+		}
+		kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
+			Conditions: clusterv1.Conditions{
+				{
+					Type:               clusterapi.ReadyCondition,
+					Status:             corev1.ConditionTrue,
+					LastTransitionTime: metav1.NewTime(time.Now()),
+				},
+			},
+			ObservedGeneration: 2,
+		}
+	})
+
 	tt := &reconcilerTest{
 		t:                    t,
 		WithT:                NewWithT(t),
@@ -909,6 +932,7 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 		datacenterConfig:          workloadClusterDatacenter,
 		machineConfigControlPlane: machineConfigCP,
 		machineConfigWorker:       machineConfigWN,
+		kcp:                       kcp,
 	}
 
 	t.Cleanup(tt.cleanup)
diff --git a/pkg/providers/vsphere/reconciler/reconciler_test.go b/pkg/providers/vsphere/reconciler/reconciler_test.go
index 75bc156e8bfd..6e2b0746b504 100644
--- a/pkg/providers/vsphere/reconciler/reconciler_test.go
+++ b/pkg/providers/vsphere/reconciler/reconciler_test.go
@@ -4,6 +4,7 @@ import (
 	"context"
 	"fmt"
 	"os"
+	"strings"
 	"testing"
 	"time"
 
@@ -49,10 +50,7 @@ func TestReconcilerReconcileSuccess(t *testing.T) {
 	// We want to check that the cluster status is cleaned up if validations are passed
 	tt.cluster.SetFailure(anywherev1.FailureReasonType("InvalidCluster"), "invalid cluster")
 
-	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = tt.cluster.Name
-	})
-	tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
+	tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.kcp)
 	tt.createAllObjs()
 
 	logger := test.NewNullLogger()
@@ -66,7 +64,7 @@ func TestReconcilerReconcileSuccess(t *testing.T) {
 	tt.govcClient.EXPECT().ListTags(tt.ctx).Return([]executables.Tag{}, nil)
 
 	tt.remoteClientRegistry.EXPECT().GetClient(
-		tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"},
+		tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: "eksa-system"},
 	).Return(remoteClient, nil).Times(1)
 	tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, tt.buildSpec())
 
@@ -167,17 +165,18 @@ func TestSetupEnvVars(t *testing.T) {
 
 func TestReconcilerControlPlaneIsNotReady(t *testing.T) {
 	tt := newReconcilerTest(t)
-	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = tt.cluster.Name
-	})
-	capiCluster.Status.Conditions = clusterv1.Conditions{
-		{
-			Type:               clusterapi.ControlPlaneReadyCondition,
-			Status:             corev1.ConditionFalse,
-			LastTransitionTime: metav1.NewTime(time.Now()),
+	tt.kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
+		Conditions: clusterv1.Conditions{
+			{
+				Type:               clusterapi.ReadyCondition,
+				Status:             corev1.ConditionFalse,
+				LastTransitionTime: metav1.NewTime(time.Now()),
+			},
 		},
+		ObservedGeneration: 2,
 	}
-	tt.eksaSupportObjs = append(tt.eksaSupportObjs, capiCluster)
+
+	tt.eksaSupportObjs = append(tt.eksaSupportObjs, tt.kcp)
 	tt.createAllObjs()
 
 	logger := test.NewNullLogger()
@@ -252,7 +251,7 @@ func TestReconcileCNISuccess(t *testing.T) {
 	spec := tt.buildSpec()
 
 	tt.remoteClientRegistry.EXPECT().GetClient(
-		tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"},
+		tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: "eksa-system"},
 	).Return(remoteClient, nil)
 	tt.cniReconciler.EXPECT().Reconcile(tt.ctx, logger, remoteClient, spec)
 
@@ -272,7 +271,7 @@ func TestReconcileCNIErrorClientRegistry(t *testing.T) {
 	spec := tt.buildSpec()
 
 	tt.remoteClientRegistry.EXPECT().GetClient(
-		tt.ctx, client.ObjectKey{Name: "workload-cluster", Namespace: "eksa-system"},
+		tt.ctx, client.ObjectKey{Name: tt.cluster.Name, Namespace: "eksa-system"},
 	).Return(nil, errors.New("building client"))
 
 	result, err := tt.reconciler().ReconcileCNI(tt.ctx, logger, spec)
@@ -297,7 +296,7 @@ func TestReconcilerReconcileControlPlaneSuccess(t *testing.T) {
 	tt.ShouldEventuallyExist(tt.ctx,
 		&addonsv1.ClusterResourceSet{
 			ObjectMeta: metav1.ObjectMeta{
-				Name:      "workload-cluster-cpi",
+				Name:      tt.cluster.Name + "-cpi",
 				Namespace: "eksa-system",
 			},
 		},
@@ -306,7 +305,7 @@ func TestReconcilerReconcileControlPlaneSuccess(t *testing.T) {
 	tt.ShouldEventuallyExist(tt.ctx,
 		&controlplanev1.KubeadmControlPlane{
 			ObjectMeta: metav1.ObjectMeta{
-				Name:      "workload-cluster",
+				Name:      tt.cluster.Name,
 				Namespace: "eksa-system",
 			},
 		},
@@ -315,20 +314,20 @@ func TestReconcilerReconcileControlPlaneSuccess(t *testing.T) {
 	tt.ShouldEventuallyExist(tt.ctx,
 		&vspherev1.VSphereMachineTemplate{
 			ObjectMeta: metav1.ObjectMeta{
-				Name:      "workload-cluster-control-plane-1",
+				Name:      tt.cluster.Name + "-control-plane-1",
 				Namespace: "eksa-system",
 			},
 		},
 	)
 
 	capiCluster := test.CAPICluster(func(c *clusterv1.Cluster) {
-		c.Name = "workload-cluster"
+		c.Name = tt.cluster.Name
 	})
 	tt.ShouldEventuallyExist(tt.ctx, capiCluster)
 
-	tt.ShouldEventuallyExist(tt.ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "workload-cluster-cloud-controller-manager", Namespace: "eksa-system"}})
-	tt.ShouldEventuallyExist(tt.ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "workload-cluster-cloud-provider-vsphere-credentials", Namespace: "eksa-system"}})
-	tt.ShouldEventuallyExist(tt.ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "workload-cluster-cpi-manifests", Namespace: "eksa-system"}})
+	tt.ShouldEventuallyExist(tt.ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: tt.cluster.Name + "-cloud-controller-manager", Namespace: "eksa-system"}})
+	tt.ShouldEventuallyExist(tt.ctx, &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: tt.cluster.Name + "-cloud-provider-vsphere-credentials", Namespace: "eksa-system"}})
+	tt.ShouldEventuallyExist(tt.ctx, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: tt.cluster.Name + "-cpi-manifests", Namespace: "eksa-system"}})
 }
 
 type reconcilerTest struct {
@@ -350,6 +349,7 @@ type reconcilerTest struct {
 	machineConfigControlPlane *anywherev1.VSphereMachineConfig
 	machineConfigWorker       *anywherev1.VSphereMachineConfig
 	ipValidator               *vspherereconcilermocks.MockIPValidator
+	kcp                       *controlplanev1.KubeadmControlPlane
 }
 
 func newReconcilerTest(t testing.TB) *reconcilerTest {
@@ -393,7 +393,7 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 	})
 
 	cluster := vsphereCluster(func(c *anywherev1.Cluster) {
-		c.Name = "workload-cluster"
+		c.Name = strings.ToLower(t.Name())
 		c.Spec.ManagementCluster = anywherev1.ManagementCluster{
 			Name: managementCluster.Name,
 		}
@@ -432,6 +432,27 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 		c.Spec.EksaVersion = &version
 	})
 
+	kcp := test.KubeadmControlPlane(func(kcp *controlplanev1.KubeadmControlPlane) {
+		kcp.Name = cluster.Name
+		kcp.Spec = controlplanev1.KubeadmControlPlaneSpec{
+			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
+				InfrastructureRef: corev1.ObjectReference{
+					Name: fmt.Sprintf("%s-control-plane-1", cluster.Name),
+				},
+			},
+		}
+		kcp.Status = controlplanev1.KubeadmControlPlaneStatus{
+			Conditions: clusterv1.Conditions{
+				{
+					Type:               clusterapi.ReadyCondition,
+					Status:             corev1.ConditionTrue,
+					LastTransitionTime: metav1.NewTime(time.Now()),
+				},
+			},
+			ObservedGeneration: 2,
+		}
+	})
+
 	tt := &reconcilerTest{
 		t:                    t,
 		WithT:                NewWithT(t),
@@ -460,6 +481,7 @@ func newReconcilerTest(t testing.TB) *reconcilerTest {
 		datacenterConfig:          workloadClusterDatacenter,
 		machineConfigControlPlane: machineConfigCP,
 		machineConfigWorker:       machineConfigWN,
+		kcp:                       kcp,
 	}
 
 	t.Cleanup(tt.cleanup)