Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Verify eksarelease exists on mgmt cluster #7231

Merged
merged 3 commits into from
Jan 2, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion cmd/eksctl-anywhere/cmd/createcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -213,17 +213,20 @@
deps.PackageInstaller,
)

mgmt := getManagementCluster(clusterSpec)

Check warning on line 217 in cmd/eksctl-anywhere/cmd/createcluster.go

View check run for this annotation

Codecov / codecov/patch

cmd/eksctl-anywhere/cmd/createcluster.go#L216-L217

Added lines #L216 - L217 were not covered by tests
validationOpts := &validations.Opts{
Kubectl: deps.UnAuthKubectlClient,
Spec: clusterSpec,
WorkloadCluster: &types.Cluster{
Name: clusterSpec.Cluster.Name,
KubeconfigFile: kubeconfig.FromClusterName(clusterSpec.Cluster.Name),
},
ManagementCluster: getManagementCluster(clusterSpec),
ManagementCluster: mgmt,

Check warning on line 225 in cmd/eksctl-anywhere/cmd/createcluster.go

View check run for this annotation

Codecov / codecov/patch

cmd/eksctl-anywhere/cmd/createcluster.go#L225

Added line #L225 was not covered by tests
Provider: deps.Provider,
CliConfig: cliConfig,
SkippedValidations: skippedValidations,
KubeClient: deps.UnAuthKubeClient.KubeconfigClient(mgmt.KubeconfigFile),

Check warning on line 229 in cmd/eksctl-anywhere/cmd/createcluster.go

View check run for this annotation

Codecov / codecov/patch

cmd/eksctl-anywhere/cmd/createcluster.go#L229

Added line #L229 was not covered by tests
}
createValidations := createvalidations.New(validationOpts)

Expand Down
1 change: 1 addition & 0 deletions cmd/eksctl-anywhere/cmd/upgradecluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,7 @@
Provider: deps.Provider,
CliConfig: cliConfig,
SkippedValidations: skippedValidations,
KubeClient: deps.UnAuthKubeClient.KubeconfigClient(managementCluster.KubeconfigFile),

Check warning on line 197 in cmd/eksctl-anywhere/cmd/upgradecluster.go

View check run for this annotation

Codecov / codecov/patch

cmd/eksctl-anywhere/cmd/upgradecluster.go#L197

Added line #L197 was not covered by tests
}

upgradeValidations := upgradevalidations.New(validationOpts)
Expand Down
21 changes: 21 additions & 0 deletions controllers/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -375,6 +375,10 @@
}
}

if err := validateEksaRelease(ctx, r.client, cluster); err != nil {
return controller.Result{}, err
}

if cluster.RegistryAuth() {
rUsername, rPassword, err := config.ReadCredentialsFromSecret(ctx, r.client)
if err != nil {
Expand Down Expand Up @@ -611,3 +615,20 @@
clus.Status.FailureMessage = ptr.String("Management cluster must have either EksaVersion or BundlesRef")
return fmt.Errorf("could not set default values")
}

func validateEksaRelease(ctx context.Context, client client.Client, cluster *anywherev1.Cluster) error {
if cluster.Spec.EksaVersion == nil {
return nil
}
err := validations.ValidateEksaReleaseExistOnManagement(ctx, clientutil.NewKubeClient(client), cluster)
if apierrors.IsNotFound(err) {
errMsg := fmt.Sprintf("eksarelease %v could not be found on the management cluster", *cluster.Spec.EksaVersion)
reason := anywherev1.EksaVersionInvalidReason
cluster.Status.FailureMessage = ptr.String(errMsg)
cluster.Status.FailureReason = &reason
return err
} else if err != nil {
return err
}

Check warning on line 632 in controllers/cluster_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/cluster_controller.go#L631-L632

Added lines #L631 - L632 were not covered by tests
return nil
}
68 changes: 61 additions & 7 deletions controllers/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ func TestClusterReconcilerReconcileSelfManagedCluster(t *testing.T) {

clusterValidator := mocks.NewMockClusterValidator(controller)
registry := newRegistryMock(providerReconciler)
c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster, kcp).Build()
c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster, kcp, test.EKSARelease()).Build()
mockPkgs := mocks.NewMockPackagesClient(controller)
providerReconciler.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster))
mhcReconciler.EXPECT().Reconcile(ctx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(selfManagedCluster)).Return(nil)
Expand Down Expand Up @@ -432,7 +432,7 @@ func TestClusterReconcilerReconcileConditions(t *testing.T) {
md.Status = tt.machineDeploymentStatus
})

objs = append(objs, config.Cluster, bundles, kcp, md1, mgmt.Cluster)
objs = append(objs, config.Cluster, bundles, kcp, md1, mgmt.Cluster, test.EKSARelease())

for _, o := range config.ChildObjects() {
objs = append(objs, o)
Expand Down Expand Up @@ -707,7 +707,7 @@ func TestClusterReconcilerReconcileSelfManagedClusterConditions(t *testing.T) {
md.Status = tt.machineDeploymentStatus
})

objs = append(objs, config.Cluster, bundles, kcp, md1)
objs = append(objs, config.Cluster, bundles, kcp, md1, test.EKSARelease())
for _, o := range config.ChildObjects() {
objs = append(objs, o)
}
Expand Down Expand Up @@ -873,7 +873,7 @@ func TestClusterReconcilerReconcileGenerations(t *testing.T) {
ctx := context.Background()

objs := make([]runtime.Object, 0, 7+len(machineDeployments))
objs = append(objs, config.Cluster, bundles)
objs = append(objs, config.Cluster, bundles, test.EKSARelease())
for _, o := range config.ChildObjects() {
objs = append(objs, o)
}
Expand Down Expand Up @@ -1039,7 +1039,7 @@ func TestClusterReconcilerReconcileSelfManagedClusterRegAuthFailNoSecret(t *test
mhcReconciler := mocks.NewMockMachineHealthCheckReconciler(controller)

registry := newRegistryMock(providerReconciler)
c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster).Build()
c := fake.NewClientBuilder().WithRuntimeObjects(selfManagedCluster, test.EKSARelease()).Build()

r := controllers.NewClusterReconciler(c, registry, iam, clusterValidator, nil, mhcReconciler)
_, err := r.Reconcile(ctx, clusterRequest(selfManagedCluster))
Expand Down Expand Up @@ -1255,7 +1255,7 @@ func TestClusterReconcilerSkipDontInstallPackagesOnSelfManaged(t *testing.T) {
ReconciledGeneration: 1,
},
}
objs := []runtime.Object{cluster}
objs := []runtime.Object{cluster, test.EKSARelease()}
cb := fake.NewClientBuilder()
mockClient := cb.WithRuntimeObjects(objs...).Build()
nullRegistry := newRegistryForDummyProviderReconciler()
Expand Down Expand Up @@ -1429,7 +1429,7 @@ func TestClusterReconcilerPackagesInstall(s *testing.T) {
}
mgmt := cluster.DeepCopy()
mgmt.Name = "my-management-cluster"
objs := []runtime.Object{cluster, bundles, secret, mgmt}
objs := []runtime.Object{cluster, bundles, secret, mgmt, test.EKSARelease()}
fakeClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()
nullRegistry := newRegistryForDummyProviderReconciler()
mockIAM := mocks.NewMockAWSIamConfigReconciler(ctrl)
Expand Down Expand Up @@ -1498,6 +1498,60 @@ func TestClusterReconcilerValidateManagementEksaVersionFail(t *testing.T) {
g.Expect(err).To(HaveOccurred())
}

func TestClusterReconcilerNotAvailableEksaVersion(t *testing.T) {
version := test.DevEksaVersion()
config, _ := baseTestVsphereCluster()
config.Cluster.Name = "test-cluster"
config.Cluster.Spec.ManagementCluster = anywherev1.ManagementCluster{Name: "management-cluster"}
config.Cluster.Spec.BundlesRef = nil
config.Cluster.Spec.EksaVersion = &version

mgmt := config.DeepCopy()
mgmt.Cluster.Name = "management-cluster"
mgmt.Cluster.Spec.BundlesRef = nil
mgmt.Cluster.Spec.EksaVersion = &version

g := NewWithT(t)

objs := make([]runtime.Object, 0, 4+len(config.ChildObjects()))
objs = append(objs, config.Cluster, mgmt.Cluster)

for _, o := range config.ChildObjects() {
objs = append(objs, o)
}

testClient := fake.NewClientBuilder().WithRuntimeObjects(objs...).Build()

mockCtrl := gomock.NewController(t)
providerReconciler := mocks.NewMockProviderClusterReconciler(mockCtrl)
iam := mocks.NewMockAWSIamConfigReconciler(mockCtrl)
clusterValidator := mocks.NewMockClusterValidator(mockCtrl)
registry := newRegistryMock(providerReconciler)
mockPkgs := mocks.NewMockPackagesClient(mockCtrl)

ctx := context.Background()
log := testr.New(t)
logCtx := ctrl.LoggerInto(ctx, log)

iam.EXPECT().EnsureCASecret(logCtx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(config.Cluster)).Return(controller.Result{}, nil)
clusterValidator.EXPECT().ValidateManagementClusterName(logCtx, gomock.AssignableToTypeOf(logr.Logger{}), sameName(config.Cluster)).Return(nil)

r := controllers.NewClusterReconciler(testClient, registry, iam, clusterValidator, mockPkgs, nil)

req := clusterRequest(config.Cluster)
_, err := r.Reconcile(logCtx, req)

g.Expect(err).To(HaveOccurred())
tatlat marked this conversation as resolved.
Show resolved Hide resolved
eksaCluster := &anywherev1.Cluster{}

err = testClient.Get(context.TODO(), req.NamespacedName, eksaCluster)
if apierrors.IsNotFound(err) {
t.Fatal("expected to find cluster")
}
tatlat marked this conversation as resolved.
Show resolved Hide resolved
g.Expect(eksaCluster.Status.FailureReason).ToNot(BeNil())
g.Expect(string(*eksaCluster.Status.FailureReason)).To(ContainSubstring(string(anywherev1.EksaVersionInvalidReason)))
tatlat marked this conversation as resolved.
Show resolved Hide resolved
}

func vsphereWorkerMachineConfig() *anywherev1.VSphereMachineConfig {
return &anywherev1.VSphereMachineConfig{
TypeMeta: metav1.TypeMeta{
Expand Down
20 changes: 10 additions & 10 deletions controllers/cluster_controller_test_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,16 +33,19 @@ import (
func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) {
g := NewWithT(t)
ctx := context.Background()
version := test.DevEksaVersion()
bundlesRef := &anywherev1.BundlesRef{
Name: "my-bundles-ref",
Namespace: "my-namespace",
}

managementCluster := &anywherev1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "my-management-cluster",
Namespace: "my-namespace",
},
Spec: anywherev1.ClusterSpec{
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
},
EksaVersion: &version,
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
Expand All @@ -56,10 +59,7 @@ func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) {
},
Spec: anywherev1.ClusterSpec{
KubernetesVersion: "v1.25",
BundlesRef: &anywherev1.BundlesRef{
Name: "my-bundles-ref",
Namespace: "my-namespace",
},
EksaVersion: &version,
},
Status: anywherev1.ClusterStatus{
ReconciledGeneration: 1,
Expand Down Expand Up @@ -118,7 +118,7 @@ func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) {
Namespace: constants.EksaSystemNamespace,
},
}
objs := []runtime.Object{cluster, managementCluster, oidc, awsIAM, bundles, secret}
objs := []runtime.Object{cluster, managementCluster, oidc, awsIAM, bundles, secret, test.EKSARelease()}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()

Expand All @@ -138,7 +138,7 @@ func TestClusterReconcilerEnsureOwnerReferences(t *testing.T) {
r := controllers.NewClusterReconciler(cl, newRegistryForDummyProviderReconciler(), iam, validator, pcc, mhc)
_, err := r.Reconcile(ctx, clusterRequest(cluster))

g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: cluster.Spec.BundlesRef.Namespace, Name: cluster.Spec.BundlesRef.Name}, bundles)).To(Succeed())
g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: bundlesRef.Namespace, Name: bundlesRef.Name}, bundles)).To(Succeed())
g.Expect(cl.Get(ctx, client.ObjectKey{Namespace: constants.EksaSystemNamespace, Name: cluster.Name + "-kubeconfig"}, secret)).To(Succeed())

g.Expect(err).NotTo(HaveOccurred())
Expand Down Expand Up @@ -359,7 +359,7 @@ func TestClusterReconcilerSetDefaultEksaVersion(t *testing.T) {
}
cluster.SetManagedBy("my-management-cluster")

objs := []runtime.Object{cluster, managementCluster}
objs := []runtime.Object{cluster, managementCluster, test.EKSARelease()}
cb := fake.NewClientBuilder()
cl := cb.WithRuntimeObjects(objs...).Build()

Expand Down
15 changes: 14 additions & 1 deletion pkg/validations/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,17 @@ import (
"fmt"

"github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/clients/kubernetes"
"github.com/aws/eks-anywhere/pkg/cluster"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/pkg/logger"
"github.com/aws/eks-anywhere/pkg/providers"
"github.com/aws/eks-anywhere/pkg/semver"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/utils/ptr"
releasev1alpha1 "github.com/aws/eks-anywhere/release/api/v1alpha1"
)

// ValidateOSForRegistryMirror checks if the OS is valid for the provided registry mirror configuration.
Expand Down Expand Up @@ -113,7 +116,7 @@ func ValidateEksaVersion(ctx context.Context, cliVersion string, workload *clust
}

if !parsedVersion.SamePatch(parsedCLIVersion) {
return fmt.Errorf("cluster's eksaVersion does not match EKS-A CLI's version")
return fmt.Errorf("cluster's eksaVersion does not match EKS-Anywhere CLI's version")
}

return nil
Expand Down Expand Up @@ -215,3 +218,13 @@ func ValidateK8s129Support(clusterSpec *cluster.Spec) error {
}
return nil
}

// ValidateEksaReleaseExistOnManagement checks if there is a corresponding eksareleases CR for workload's eksaVersion on the mgmt cluster.
func ValidateEksaReleaseExistOnManagement(ctx context.Context, k kubernetes.Client, workload *v1alpha1.Cluster) error {
v := workload.Spec.EksaVersion
err := k.Get(ctx, releasev1alpha1.GenerateEKSAReleaseName(string(*v)), constants.EksaSystemNamespace, &releasev1alpha1.EKSARelease{})
if err != nil {
return err
}
return nil
}
45 changes: 44 additions & 1 deletion pkg/validations/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"github.com/golang/mock/gomock"
. "github.com/onsi/gomega"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"

"github.com/aws/eks-anywhere/internal/test"
anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
Expand Down Expand Up @@ -320,6 +321,7 @@ func TestValidateEksaVersion(t *testing.T) {
wantErr error
version *anywherev1.EksaVersion
cliVersion string
workload bool
}{
{
name: "Success",
Expand All @@ -339,6 +341,12 @@ func TestValidateEksaVersion(t *testing.T) {
version: &v,
cliVersion: "badvalue",
},
{
name: "Mismatch",
wantErr: fmt.Errorf("cluster's eksaVersion does not match EKS-Anywhere CLI's version"),
version: &v,
cliVersion: "v0.0.1",
},
}

for _, tc := range tests {
Expand All @@ -348,8 +356,12 @@ func TestValidateEksaVersion(t *testing.T) {
tt.clusterSpec.Cluster.Spec.EksaVersion = tc.version
ctx := context.Background()

if tc.workload {
tt.clusterSpec.Cluster.SetManagedBy("other")
}

err := validations.ValidateEksaVersion(ctx, tc.cliVersion, tt.clusterSpec)
if err != nil {
if tc.wantErr != nil {
tt.Expect(err).To(MatchError(tc.wantErr))
}
})
Expand Down Expand Up @@ -524,3 +536,34 @@ func TestValidateK8s129SupportActive(t *testing.T) {
os.Setenv(features.K8s129SupportEnvVar, "true")
tt.Expect(validations.ValidateK8s129Support(tt.clusterSpec)).To(Succeed())
}

func TestValidateEksaReleaseExistOnManagement(t *testing.T) {
tests := []struct {
name string
wantErr error
}{
{
name: "success",
wantErr: nil,
},
{
name: "not present",
wantErr: fmt.Errorf("not found"),
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
tt := newTest(t)
ctx := context.Background()
objs := []client.Object{}
if tc.wantErr == nil {
objs = append(objs, test.EKSARelease())
}
fakeClient := test.NewFakeKubeClient(objs...)
err := validations.ValidateEksaReleaseExistOnManagement(ctx, fakeClient, tt.clusterSpec.Cluster)
if err != nil {
tt.Expect(err.Error()).To(ContainSubstring(tc.wantErr.Error()))
}
})
}
}
8 changes: 8 additions & 0 deletions pkg/validations/createvalidations/preflightvalidations.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (

anywherev1 "github.com/aws/eks-anywhere/pkg/api/v1alpha1"
"github.com/aws/eks-anywhere/pkg/config"
"github.com/aws/eks-anywhere/pkg/constants"
"github.com/aws/eks-anywhere/pkg/features"
"github.com/aws/eks-anywhere/pkg/types"
"github.com/aws/eks-anywhere/pkg/validations"
Expand Down Expand Up @@ -105,6 +106,13 @@ func (v *CreateValidations) PreflightValidations(ctx context.Context) []validati
Err: validations.ValidateManagementClusterEksaVersion(ctx, k, v.Opts.ManagementCluster, v.Opts.Spec),
}
},
func() *validations.ValidationResult {
return &validations.ValidationResult{
Name: "validate eksa release components exist on management cluster",
Remediation: fmt.Sprintf("ensure eksaVersion is in the correct format (vMajor.Minor.Patch) and matches one of the available releases on the management cluster: kubectl get eksareleases -n %s --kubeconfig %s", constants.EksaSystemNamespace, v.Opts.ManagementCluster.KubeconfigFile),
Err: validations.ValidateEksaReleaseExistOnManagement(ctx, v.Opts.KubeClient, v.Opts.Spec.Cluster),
}
},
)
}

Expand Down
Loading