diff --git a/cmd/eksctl-anywhere/cmd/createcluster.go b/cmd/eksctl-anywhere/cmd/createcluster.go index f67779f68dac..e34f65df423f 100644 --- a/cmd/eksctl-anywhere/cmd/createcluster.go +++ b/cmd/eksctl-anywhere/cmd/createcluster.go @@ -213,6 +213,8 @@ func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) er deps.PackageInstaller, ) + mgmt := getManagementCluster(clusterSpec) + validationOpts := &validations.Opts{ Kubectl: deps.UnAuthKubectlClient, Spec: clusterSpec, @@ -220,10 +222,11 @@ func (cc *createClusterOptions) createCluster(cmd *cobra.Command, _ []string) er Name: clusterSpec.Cluster.Name, KubeconfigFile: kubeconfig.FromClusterName(clusterSpec.Cluster.Name), }, - ManagementCluster: getManagementCluster(clusterSpec), + ManagementCluster: mgmt, Provider: deps.Provider, CliConfig: cliConfig, SkippedValidations: skippedValidations, + KubeClient: deps.UnAuthKubeClient.KubeconfigClient(mgmt.KubeconfigFile), } createValidations := createvalidations.New(validationOpts) diff --git a/cmd/eksctl-anywhere/cmd/upgradecluster.go b/cmd/eksctl-anywhere/cmd/upgradecluster.go index bb20c88efbb5..34ce201beeaa 100644 --- a/cmd/eksctl-anywhere/cmd/upgradecluster.go +++ b/cmd/eksctl-anywhere/cmd/upgradecluster.go @@ -196,6 +196,7 @@ func (uc *upgradeClusterOptions) upgradeCluster(cmd *cobra.Command, args []strin Provider: deps.Provider, CliConfig: cliConfig, SkippedValidations: skippedValidations, + KubeClient: deps.UnAuthKubeClient.KubeconfigClient(managementCluster.KubeconfigFile), } upgradeValidations := upgradevalidations.New(validationOpts) diff --git a/controllers/cluster_controller.go b/controllers/cluster_controller.go index b0d91aab2f74..f52516d29806 100644 --- a/controllers/cluster_controller.go +++ b/controllers/cluster_controller.go @@ -375,6 +375,10 @@ func (r *ClusterReconciler) preClusterProviderReconcile(ctx context.Context, log } } + if err := validateEksaRelease(ctx, r.client, cluster); err != nil { + return controller.Result{}, err + } + if cluster.RegistryAuth() { rUsername, rPassword, err := config.ReadCredentialsFromSecret(ctx, r.client) if err != nil { @@ -611,3 +615,20 @@ func (r *ClusterReconciler) setDefaultBundlesRefOrEksaVersion(ctx context.Contex clus.Status.FailureMessage = ptr.String("Management cluster must have either EksaVersion or BundlesRef") return fmt.Errorf("could not set default values") } + +func validateEksaRelease(ctx context.Context, client client.Client, cluster *anywherev1.Cluster) error { + if cluster.Spec.EksaVersion == nil { + return nil + } + err := validations.ValidateEksaReleaseExistOnManagement(ctx, clientutil.NewKubeClient(client), cluster) + if apierrors.IsNotFound(err) { + errMsg := fmt.Sprintf("eksarelease %v could not be found on the management cluster", *cluster.Spec.EksaVersion) + reason := anywherev1.EksaVersionInvalidReason + cluster.Status.FailureMessage = ptr.String(errMsg) + cluster.Status.FailureReason = &reason + return err + } else if err != nil { + return err + } + return nil +} diff --git a/controllers/cluster_controller_test.go b/controllers/cluster_controller_test.go index 7ff79d37a26e..b96109edf347 100644 --- a/controllers/cluster_controller_test.go +++ b/controllers/cluster_controller_test.go @@ -180,7 +180,7 @@ func TestClusterReconcilerReconcileSelfManagedCluster(t *testing.T) { clusterValidator := mocks.NewMockClusterValidator(controller) registry := newRegistryMock(providerReconciler) - c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster, kcp).Build() + c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster, kcp, test.EKSARelease()).Build() mockPkgs := mocks.NewMockPackagesClient(controller) providerReconciler.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster)) mhcReconciler.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster)).Return(nil) @@ -432,7 +432,7 @@ func TestClusterReconcilerReconcileConditions(t *testing.T) { md.Status = tt.machineDeploymentStatus }) - objs = append(objs, config.Cluster, bundles, kcp, md1, mgmt.Cluster) + objs = append(objs, config.Cluster, bundles, kcp, md1, mgmt.Cluster, test.EKSARelease()) for _, o := range config.ChildObjects() { objs = append(objs, o) @@ -707,7 +707,7 @@ func TestClusterReconcilerReconcileSelfManagedClusterConditions(t *testing.T) { md.Status = tt.machineDeploymentStatus }) - objs = append(objs, config.Cluster, bundles, kcp, md1) + objs = append(objs, config.Cluster, bundles, kcp, md1, test.EKSARelease()) for _, o := range config.ChildObjects() { objs = append(objs, o) } @@ -873,7 +873,7 @@ func TestClusterReconcilerReconcileGenerations(t *testing.T) { ctx := context.Background() objs := make([]runtime.Object, 0, 7+len(machineDeployments)) - objs = append(objs, config.Cluster, bundles) + objs = append(objs, config.Cluster, bundles, test.EKSARelease()) for _, o := range config.ChildObjects() { objs = append(objs, o) } @@ -1039,7 +1039,7 @@ func TestClusterReconcilerReconcileSelfManagedClusterRegAuthFailNoSecret(t *test mhcReconciler := mocks.NewMockMachineHealthCheckReconciler(controller) registry := newRegistryMock(providerReconciler) - c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster).Build() + c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster, test.EKSARelease()).Build() r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, nil, mhcReconciler) _, err := r.Reconcile(ctx, clusterRequest(selfManagedCluster)) @@ -1255,7 +1255,7 @@ func TestClusterReconcilerSkipDontInstallPackagesOnSelfManaged(t *testing.T) { ReconciledGeneration: 1, }, } - objs := []runtime.Object{cluster} + objs := []runtime.Object{cluster, test.EKSARelease()} cb := fake.NewClientBuilder() mockClient := cb.WithRuntimeObjects(objs...).Build() nullRegistry := newRegistryForDummyProviderReconciler() @@ -1429,7 +1429,7 @@ func TestClusterReconcilerPackagesInstall(s *testing.T) { } mgmt := cluster.DeepCopy() mgmt.Name = "my-management-cluster" - objs := []runtime.Object{cluster, bundles, secret, mgmt} + objs := []runtime.Object{cluster, bundles, secret, mgmt, test.EKSARelease()} fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() nullRegistry := newRegistryForDummyProviderReconciler() mockIAM := mocks.NewMockAWSIamConfigReconciler(ctrl) @@ -1498,6 +1498,59 @@ func TestClusterReconcilerValidateManagementEksaVersionFail(t *testing.T) { g.Expect(err).To(HaveOccurred()) } +func TestClusterReconcilerNotAvailableEksaVersion(t *testing.T) { + version := test.DevEksaVersion() + config, _ := baseTestVsphereCluster() + config.Cluster.Name = "test-cluster" + config.Cluster.Spec.ManagementCluster = anywherev1.ManagementCluster{Name: "management-cluster"} + config.Cluster.Spec.BundlesRef = nil + config.Cluster.Spec.EksaVersion = &version + + mgmt := config.DeepCopy() + mgmt.Cluster.Name = "management-cluster" + mgmt.Cluster.Spec.BundlesRef = nil + mgmt.Cluster.Spec.EksaVersion = &version + + g := NewWithT(t) + + objs := make([]runtime.Object, 0, 4+len(config.ChildObjects())) + objs = append(objs, config.Cluster, mgmt.Cluster) + + for _, o := range config.ChildObjects() { + objs = append(objs, o) + } + + testClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build() + + mockCtrl := gomock.NewController(t) + providerReconciler := mocks.NewMockProviderClusterReconciler(mockCtrl) + iam := mocks.NewMockAWSIamConfigReconciler(mockCtrl) + clusterValidator := mocks.NewMockClusterValidator(mockCtrl) + registry := newRegistryMock(providerReconciler) + mockPkgs := mocks.NewMockPackagesClient(mockCtrl) + + ctx := context.Background() + log := testr.New(t) + logCtx := ctrl.LoggerInto(ctx, log) + + iam.EXPECT().EnsureCASecret(logCtx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(config.Cluster)).Return(controller.Result{}, nil) + clusterValidator.EXPECT().ValidateManagementClusterName(logCtx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(config.Cluster)).Return(nil) + + r := controllers.NewClusterReconciler(testClient, registry, iam, clusterValidator, mockPkgs, nil) + + req := clusterRequest(config.Cluster) + _, err := r.Reconcile(logCtx, req) + + g.Expect(err).To(HaveOccurred()) + eksaCluster := &anywherev1.Cluster{} + + expectedError := fmt.Sprintf("eksarelease %v could not be found on the management cluster", *config.Cluster.Spec.EksaVersion) + g.Expect(testClient.Get(ctx, req.NamespacedName, eksaCluster)).To(Succeed()) + + g.Expect(eksaCluster.Status.FailureReason).To(HaveValue(Equal(anywherev1.EksaVersionInvalidReason))) + g.Expect(eksaCluster.Status.FailureMessage).To(HaveValue(Equal(expectedError))) +} + func vsphereWorkerMachineConfig() *anywherev1.VSphereMachineConfig { return &anywherev1.VSphereMachineConfig{ TypeMeta: metav1.TypeMeta{ diff --git a/controllers/cluster_controller_test_test.go b/controllers/cluster_controller_test_test.go index 88eafcdecd95..405060271bad 100644 --- a/controllers/cluster_controller_test_test.go +++ b/controllers/cluster_controller_test_test.go @@ -33,6 +33,11 @@ import ( func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) { g := NewWithT(t) ctx := context.Background() + version := test.DevEksaVersion() + bundlesRef := &anywherev1.BundlesRef{ + Name: "my-bundles-ref", + Namespace: "my-namespace", + } managementCluster := &anywherev1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -40,9 +45,7 @@ func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) { Namespace: "my-namespace", }, Spec: anywherev1.ClusterSpec{ - BundlesRef: &anywherev1.BundlesRef{ - Name: "my-bundles-ref", - }, + EksaVersion: &version, }, Status: anywherev1.ClusterStatus{ ReconciledGeneration: 1, @@ -56,10 +59,7 @@ func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) { }, Spec: anywherev1.ClusterSpec{ KubernetesVersion: "v1.25", - BundlesRef: &anywherev1.BundlesRef{ - Name: "my-bundles-ref", - Namespace: "my-namespace", - }, + EksaVersion: &version, }, Status: anywherev1.ClusterStatus{ ReconciledGeneration: 1, @@ -118,7 +118,7 @@ func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) { Namespace: constants.EksaSystemNamespace, }, } - objs := []runtime.Object{cluster, managementCluster, oidc, awsIAM, bundles, secret} + objs := []runtime.Object{cluster, managementCluster, oidc, awsIAM, bundles, secret, test.EKSARelease()} cb := fake.NewClientBuilder() cl := cb.WithRuntimeObjects(objs...).Build() @@ -138,7 +138,7 @@ func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) { r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), iam, validator, pcc, mhc) _, err := r.Reconcile(ctx, clusterRequest(cluster)) - g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: cluster.Spec.BundlesRef.Namespace, Name: cluster.Spec.BundlesRef.Name}, bundles)).To(Succeed()) + g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: bundlesRef.Namespace, Name: bundlesRef.Name}, bundles)).To(Succeed()) g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: constants.EksaSystemNamespace, Name: cluster.Name + "-kubeconfig"}, secret)).To(Succeed()) g.Expect(err).NotTo(HaveOccurred()) @@ -359,7 +359,7 @@ func TestClusterReconcilerSetDefaultEksaVersion(t *testing.T) { } cluster.SetManagedBy("my-management-cluster") - objs := []runtime.Object{cluster, managementCluster} + objs := []runtime.Object{cluster, managementCluster, test.EKSARelease()} cb := fake.NewClientBuilder() cl := cb.WithRuntimeObjects(objs...).Build() diff --git a/pkg/validations/cluster.go b/pkg/validations/cluster.go index 593843c08495..875369cb43cf 100644 --- a/pkg/validations/cluster.go +++ b/pkg/validations/cluster.go @@ -6,14 +6,17 @@ import ( "fmt" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/config" + "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/logger" "github.com/aws/eks-anywhere/pkg/providers" "github.com/aws/eks-anywhere/pkg/semver" "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/utils/ptr" + releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1" ) // ValidateOSForRegistryMirror checks if the OS is valid for the provided registry mirror configuration. @@ -113,7 +116,7 @@ func ValidateEksaVersion(ctx context.Context, cliVersion string, workload *clust } if !parsedVersion.SamePatch(parsedCLIVersion) { - return fmt.Errorf("cluster's eksaVersion does not match EKS-A CLI's version") + return fmt.Errorf("cluster's eksaVersion does not match EKS-Anywhere CLI's version") } return nil @@ -215,3 +218,13 @@ func ValidateK8s129Support(clusterSpec *cluster.Spec) error { } return nil } + +// ValidateEksaReleaseExistOnManagement checks if there is a corresponding eksareleases CR for workload's eksaVersion on the mgmt cluster. +func ValidateEksaReleaseExistOnManagement(ctx context.Context, k kubernetes.Client, workload *v1alpha1.Cluster) error { + v := workload.Spec.EksaVersion + err := k.Get(ctx, releasev1alpha1.GenerateEKSAReleaseName(string(*v)), constants.EksaSystemNamespace, &releasev1alpha1.EKSARelease{}) + if err != nil { + return err + } + return nil +} diff --git a/pkg/validations/cluster_test.go b/pkg/validations/cluster_test.go index f6ef7d2a74d2..6da0fb1b58cc 100644 --- a/pkg/validations/cluster_test.go +++ b/pkg/validations/cluster_test.go @@ -10,6 +10,7 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/eks-anywhere/internal/test" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -320,6 +321,7 @@ func TestValidateEksaVersion(t *testing.T) { wantErr error version *anywherev1.EksaVersion cliVersion string + workload bool }{ { name: "Success", @@ -339,6 +341,12 @@ func TestValidateEksaVersion(t *testing.T) { version: &v, cliVersion: "badvalue", }, + { + name: "Mismatch", + wantErr: fmt.Errorf("cluster's eksaVersion does not match EKS-Anywhere CLI's version"), + version: &v, + cliVersion: "v0.0.1", + }, } for _, tc := range tests { @@ -348,8 +356,12 @@ func TestValidateEksaVersion(t *testing.T) { tt.clusterSpec.Cluster.Spec.EksaVersion = tc.version ctx := context.Background() + if tc.workload { + tt.clusterSpec.Cluster.SetManagedBy("other") + } + err := validations.ValidateEksaVersion(ctx, tc.cliVersion, tt.clusterSpec) - if err != nil { + if tc.wantErr != nil { tt.Expect(err).To(MatchError(tc.wantErr)) } }) @@ -524,3 +536,34 @@ func TestValidateK8s129SupportActive(t *testing.T) { os.Setenv(features.K8s129SupportEnvVar, "true") tt.Expect(validations.ValidateK8s129Support(tt.clusterSpec)).To(Succeed()) } + +func TestValidateEksaReleaseExistOnManagement(t *testing.T) { + tests := []struct { + name string + wantErr error + }{ + { + name: "success", + wantErr: nil, + }, + { + name: "not present", + wantErr: fmt.Errorf("not found"), + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + tt := newTest(t) + ctx := context.Background() + objs := []client.Object{} + if tc.wantErr == nil { + objs = append(objs, test.EKSARelease()) + } + fakeClient := test.NewFakeKubeClient(objs...) + err := validations.ValidateEksaReleaseExistOnManagement(ctx, fakeClient, tt.clusterSpec.Cluster) + if err != nil { + tt.Expect(err.Error()).To(ContainSubstring(tc.wantErr.Error())) + } + }) + } +} diff --git a/pkg/validations/createvalidations/preflightvalidations.go b/pkg/validations/createvalidations/preflightvalidations.go index 7ea7b28feb81..916f823998db 100644 --- a/pkg/validations/createvalidations/preflightvalidations.go +++ b/pkg/validations/createvalidations/preflightvalidations.go @@ -6,6 +6,7 @@ import ( anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/config" + "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/types" "github.com/aws/eks-anywhere/pkg/validations" @@ -105,6 +106,13 @@ func (v *CreateValidations) PreflightValidations(ctx context.Context) []validati Err: validations.ValidateManagementClusterEksaVersion(ctx, k, v.Opts.ManagementCluster, v.Opts.Spec), } }, + func() *validations.ValidationResult { + return &validations.ValidationResult{ + Name: "validate eksa release components exist on management cluster", + Remediation: fmt.Sprintf("ensure eksaVersion is in the correct format (vMajor.Minor.Patch) and matches one of the available releases on the management cluster: kubectl get eksareleases -n %s --kubeconfig %s", constants.EksaSystemNamespace, v.Opts.ManagementCluster.KubeconfigFile), + Err: validations.ValidateEksaReleaseExistOnManagement(ctx, v.Opts.KubeClient, v.Opts.Spec.Cluster), + } + }, ) } diff --git a/pkg/validations/createvalidations/preflightvalidations_test.go b/pkg/validations/createvalidations/preflightvalidations_test.go index 4996d8861bc1..8caa5ce21a43 100644 --- a/pkg/validations/createvalidations/preflightvalidations_test.go +++ b/pkg/validations/createvalidations/preflightvalidations_test.go @@ -7,6 +7,7 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/eks-anywhere/internal/test" "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -38,12 +39,14 @@ func newPreflightValidationsTest(t *testing.T) *preflightValidationsTest { } }) version := "v0.0.0-dev" + objects := []client.Object{test.EKSARelease()} opts := &validations.Opts{ Kubectl: k, Spec: clusterSpec, WorkloadCluster: c, ManagementCluster: c, CliVersion: version, + KubeClient: test.NewFakeKubeClient(objects...), } return &preflightValidationsTest{ WithT: NewWithT(t), diff --git a/pkg/validations/upgradevalidations/preflightvalidations.go b/pkg/validations/upgradevalidations/preflightvalidations.go index d361d4457b66..905de709db07 100644 --- a/pkg/validations/upgradevalidations/preflightvalidations.go +++ b/pkg/validations/upgradevalidations/preflightvalidations.go @@ -8,6 +8,7 @@ import ( anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" "github.com/aws/eks-anywhere/pkg/config" + "github.com/aws/eks-anywhere/pkg/constants" "github.com/aws/eks-anywhere/pkg/features" "github.com/aws/eks-anywhere/pkg/providers" "github.com/aws/eks-anywhere/pkg/types" @@ -109,8 +110,8 @@ func (u *UpgradeValidations) PreflightValidations(ctx context.Context) []validat }, func() *validations.ValidationResult { return &validations.ValidationResult{ - Name: "validate cluster's eksaVersion matches EKS-A Version", - Remediation: "ensure EksaVersion matches the EKS-A release or omit the value from the cluster config", + Name: "validate cluster's eksaVersion matches EKS-Anywhere Version", + Remediation: "ensure eksaVersion matches the EKS-Anywhere release or omit the value from the cluster config", Err: validations.ValidateEksaVersion(ctx, u.Opts.CliVersion, u.Opts.Spec), } }, @@ -134,6 +135,13 @@ func (u *UpgradeValidations) PreflightValidations(ctx context.Context) []validat Err: validations.ValidateManagementClusterEksaVersion(ctx, k, u.Opts.ManagementCluster, u.Opts.Spec), } }, + func() *validations.ValidationResult { + return &validations.ValidationResult{ + Name: "validate eksa release components exist on management cluster", + Remediation: fmt.Sprintf("ensure eksaVersion is in the correct format (vMajor.Minor.Patch) and matches one of the available releases on the management cluster: kubectl get eksareleases -n %s --kubeconfig %s", constants.EksaSystemNamespace, u.Opts.ManagementCluster.KubeconfigFile), + Err: validations.ValidateEksaReleaseExistOnManagement(ctx, u.Opts.KubeClient, u.Opts.Spec.Cluster), + } + }, ) } diff --git a/pkg/validations/upgradevalidations/preflightvalidations_test.go b/pkg/validations/upgradevalidations/preflightvalidations_test.go index d8300f183872..5830b72dc7fb 100644 --- a/pkg/validations/upgradevalidations/preflightvalidations_test.go +++ b/pkg/validations/upgradevalidations/preflightvalidations_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/golang/mock/gomock" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/aws/eks-anywhere/internal/test" anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1" @@ -1118,6 +1119,7 @@ func TestPreflightValidationsVsphere(t *testing.T) { if tc.modifyDefaultSpecFunc != nil { tc.modifyDefaultSpecFunc(clusterSpec) } + objects := []client.Object{test.EKSARelease()} opts := &validations.Opts{ Kubectl: k, Spec: clusterSpec, @@ -1126,6 +1128,7 @@ func TestPreflightValidationsVsphere(t *testing.T) { Provider: provider, TLSValidator: tlsValidator, CliVersion: "v0.0.0-dev", + KubeClient: test.NewFakeKubeClient(objects...), } clusterSpec.Cluster.Spec.KubernetesVersion = anywherev1.KubernetesVersion(tc.upgradeVersion) diff --git a/pkg/validations/validation_options.go b/pkg/validations/validation_options.go index 14829c2a2d05..70f9c298adf8 100644 --- a/pkg/validations/validation_options.go +++ b/pkg/validations/validation_options.go @@ -1,6 +1,7 @@ package validations import ( + "github.com/aws/eks-anywhere/pkg/clients/kubernetes" "github.com/aws/eks-anywhere/pkg/cluster" "github.com/aws/eks-anywhere/pkg/config" "github.com/aws/eks-anywhere/pkg/crypto" @@ -19,6 +20,7 @@ type Opts struct { CliConfig *config.CliConfig SkippedValidations map[string]bool CliVersion string + KubeClient kubernetes.Client } func (o *Opts) SetDefaults() {