diff --git a/api/v1/infinispan_types.go b/api/v1/infinispan_types.go index 69905f41d..78256abc8 100644 --- a/api/v1/infinispan_types.go +++ b/api/v1/infinispan_types.go @@ -623,6 +623,9 @@ const ( ) type OperandStatus struct { + // Whether the Operand installed/pending is using a custom image + // +optional + CustomImage bool `json:"customImage,omitempty"` // Whether the Operand has been deprecated and is subject for removal in a subsequent release // +optional Deprecated bool `json:"deprecated,omitempty"` diff --git a/config/crd/bases/infinispan.org_infinispans.yaml b/config/crd/bases/infinispan.org_infinispans.yaml index 4db43ccac..d72ca10e9 100644 --- a/config/crd/bases/infinispan.org_infinispans.yaml +++ b/config/crd/bases/infinispan.org_infinispans.yaml @@ -2347,6 +2347,10 @@ spec: operand: description: The Operand status properties: + customImage: + description: Whether the Operand installed/pending is using a + custom image + type: boolean deprecated: description: Whether the Operand has been deprecated and is subject for removal in a subsequent release diff --git a/pkg/reconcile/pipeline/infinispan/handler/manage/conditions.go b/pkg/reconcile/pipeline/infinispan/handler/manage/conditions.go index fb01a1d95..8ee90e7c0 100644 --- a/pkg/reconcile/pipeline/infinispan/handler/manage/conditions.go +++ b/pkg/reconcile/pipeline/infinispan/handler/manage/conditions.go @@ -250,17 +250,19 @@ func getCrossSiteViewCondition(ctx pipeline.Context, podList *corev1.PodList, si } func OperandStatus(i *ispnv1.Infinispan, phase ispnv1.OperandPhase, operand version.Operand) ispnv1.OperandStatus { + customImg := i.Spec.Image != nil var img string - if i.Spec.Image != nil { + if customImg { img = *i.Spec.Image } else { img = operand.Image } return ispnv1.OperandStatus{ - Deprecated: operand.Deprecated, - Image: img, - Phase: phase, - Version: operand.Ref(), + CustomImage: customImg, + Deprecated: operand.Deprecated, + Image: img, + Phase: phase, + Version: operand.Ref(), } } diff --git a/pkg/reconcile/pipeline/infinispan/handler/manage/upgrades.go b/pkg/reconcile/pipeline/infinispan/handler/manage/upgrades.go index 6b429fa2f..525c638ab 100644 --- a/pkg/reconcile/pipeline/infinispan/handler/manage/upgrades.go +++ b/pkg/reconcile/pipeline/infinispan/handler/manage/upgrades.go @@ -84,12 +84,29 @@ func UpgradeRequired(i *ispnv1.Infinispan, ctx pipeline.Context) bool { return true } + // Don't schedule an upgrade if one is already in progress + if i.Status.Operand.Phase == ispnv1.OperandPhasePending { + return false + } + // If the Operand is marked as a CVE base-image release, then we perform the upgrade as a StatefulSet rolling upgrade // as the server components are not changed. - if requestedOperand.CVE && installedOperand.UpstreamVersion.EQ(*requestedOperand.UpstreamVersion) { + customImage := i.Status.Operand.CustomImage + if !customImage && requestedOperand.CVE && installedOperand.UpstreamVersion.EQ(*requestedOperand.UpstreamVersion) { return false } - return !requestedOperand.EQ(installedOperand) + + if requestedOperand.EQ(installedOperand) { + if i.Spec.Image == nil { + // If the currently installed Operand was a custom image, but spec.Image is now nil, then we need to + // initiate a new upgrade to ensure that the default image associated with the Operand is installed + return customImage + } + // If operand versions match, but the FQN of the image differ, then we must schedule an upgrade so the user + // can transition to a custom/patched version of the Operand without having to recreate the Infinispan CR + return *i.Spec.Image != installedOperand.Image + } + return true } } diff --git a/test/e2e/infinispan/upgrade_operand_test.go b/test/e2e/infinispan/upgrade_operand_test.go index 9e6c2bef7..2706550e3 100644 --- a/test/e2e/infinispan/upgrade_operand_test.go +++ b/test/e2e/infinispan/upgrade_operand_test.go @@ -9,6 +9,7 @@ import ( "github.com/infinispan/infinispan-operator/pkg/kubernetes" "github.com/infinispan/infinispan-operator/pkg/reconcile/pipeline/infinispan/handler/provision" tutils "github.com/infinispan/infinispan-operator/test/e2e/utils" + "k8s.io/utils/pointer" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" @@ -210,3 +211,83 @@ func TestOperandCVEHotRodRolling(t *testing.T) { } genericTestForContainerUpdated(*spec, modifier, verifier) } + +func TestSpecImageUpdate(t *testing.T) { + defer testKube.CleanNamespaceAndLogOnPanic(t, tutils.Namespace) + + // Create Infinispan Cluster using the penultimate Operand release as this will have a different image name to the latest + // release. We can then manually specify spec.image using the FQN of the latest image to simulate a user specifying + // custom images + replicas := 1 + versionManager := tutils.VersionManager() + operand := versionManager.Operands[len(versionManager.Operands)-2] + spec := tutils.DefaultSpec(t, testKube, func(i *ispnv1.Infinispan) { + i.Spec.Replicas = int32(replicas) + i.Spec.Version = operand.Ref() + }) + + testKube.CreateInfinispan(spec, tutils.Namespace) + testKube.WaitForInfinispanPods(replicas, tutils.SinglePodTimeout, spec.Name, tutils.Namespace) + ispn := testKube.WaitForInfinispanCondition(spec.Name, spec.Namespace, ispnv1.ConditionWellFormed) + + customImage := versionManager.Latest().Image + tutils.ExpectNoError( + testKube.UpdateInfinispan(ispn, func() { + // Update the spec to install the custom image + ispn.Spec.Image = pointer.String(customImage) + }), + ) + testKube.WaitForInfinispanState(spec.Name, spec.Namespace, func(i *ispnv1.Infinispan) bool { + return !i.IsConditionTrue(ispnv1.ConditionWellFormed) && + i.Status.Operand.Version == operand.Ref() && + i.Status.Operand.Image == customImage && + i.Status.Operand.Phase == ispnv1.OperandPhasePending + }) + + testKube.WaitForInfinispanState(spec.Name, spec.Namespace, func(i *ispnv1.Infinispan) bool { + return i.IsConditionTrue(ispnv1.ConditionWellFormed) && + i.Status.Operand.Version == operand.Ref() && + i.Status.Operand.Image == customImage && + i.Status.Operand.Phase == ispnv1.OperandPhaseRunning + }) + + // Ensure that the newly created cluster pods have the correct Operand image + podList := &corev1.PodList{} + tutils.ExpectNoError(testKube.Kubernetes.ResourcesList(tutils.Namespace, ispn.PodSelectorLabels(), podList, context.TODO())) + for _, pod := range podList.Items { + container := kubernetes.GetContainer(provision.InfinispanContainer, &pod.Spec) + assert.Equal(t, customImage, container.Image) + } + + // Ensure that the StatefulSet is on its first generation, i.e. a RollingUpgrade has not been performed + ss := appsv1.StatefulSet{} + tutils.ExpectNoError(testKube.Kubernetes.Client.Get(context.TODO(), types.NamespacedName{Namespace: ispn.Namespace, Name: ispn.GetStatefulSetName()}, &ss)) + assert.EqualValues(t, 1, ss.Status.ObservedGeneration) + + latestOperand := versionManager.Latest() + tutils.ExpectNoError( + testKube.UpdateInfinispan(ispn, func() { + // Update the spec to move to the latest Operand version to ensure that a new GracefulShutdown is triggered + ispn.Spec.Image = nil + ispn.Spec.Version = latestOperand.Ref() + }), + ) + testKube.WaitForInfinispanState(spec.Name, spec.Namespace, func(i *ispnv1.Infinispan) bool { + return !i.IsConditionTrue(ispnv1.ConditionWellFormed) && + i.Status.Operand.Version == latestOperand.Ref() && + i.Status.Operand.Image == latestOperand.Image && + i.Status.Operand.Phase == ispnv1.OperandPhasePending + }) + + testKube.WaitForInfinispanState(spec.Name, spec.Namespace, func(i *ispnv1.Infinispan) bool { + return i.IsConditionTrue(ispnv1.ConditionWellFormed) && + i.Status.Operand.Version == latestOperand.Ref() && + i.Status.Operand.Image == latestOperand.Image && + i.Status.Operand.Phase == ispnv1.OperandPhaseRunning + }) + + // Ensure that the StatefulSet is on its first generation, i.e. a RollingUpgrade has not been performed + ss = appsv1.StatefulSet{} + tutils.ExpectNoError(testKube.Kubernetes.Client.Get(context.TODO(), types.NamespacedName{Namespace: ispn.Namespace, Name: ispn.GetStatefulSetName()}, &ss)) + assert.EqualValues(t, 1, ss.Status.ObservedGeneration) +}