diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml
index cff0d30b93b..b7f470a58d6 100644
--- a/.github/workflows/unit-tests.yaml
+++ b/.github/workflows/unit-tests.yaml
@@ -8,12 +8,21 @@ on:
pull_request:
jobs:
unit-test:
+ name: Run tests and collect coverage
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
- - name: Setup Go
- uses: actions/setup-go@v4
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set up Go
+ uses: actions/setup-go@v5
with:
go-version-file: go.mod
+
- name: Run Unit Tests
run: make unit-test
+
+ - name: Upload results to Codecov
+ uses: codecov/codecov-action@v4
+ with:
+ token: ${{ secrets.CODECOV_TOKEN }}
diff --git a/Makefile b/Makefile
index f99ed38d6b0..9fdf869a51b 100644
--- a/Makefile
+++ b/Makefile
@@ -3,7 +3,7 @@
# To re-generate a bundle for another specific version without changing the standard setup, you can:
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
-VERSION ?= 2.18.2
+VERSION ?= 2.19.0
# IMAGE_TAG_BASE defines the opendatahub.io namespace and part of the image name for remote images.
# This variable is used to construct full image tags for bundle and catalog images.
#
@@ -281,6 +281,7 @@ bundle: prepare operator-sdk ## Generate bundle manifests and metadata, then val
$(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS)
$(OPERATOR_SDK) bundle validate ./$(BUNDLE_DIR)
mv bundle.Dockerfile Dockerfiles/
+ rm -f bundle/manifests/opendatahub-operator-webhook-service_v1_service.yaml
.PHONY: bundle-build
bundle-build: bundle
diff --git a/apis/dscinitialization/v1/dscinitialization_types.go b/apis/dscinitialization/v1/dscinitialization_types.go
index 821df411fe4..50f758a5df8 100644
--- a/apis/dscinitialization/v1/dscinitialization_types.go
+++ b/apis/dscinitialization/v1/dscinitialization_types.go
@@ -34,6 +34,8 @@ type DSCInitializationSpec struct {
// +kubebuilder:default:=opendatahub
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="ApplicationsNamespace is immutable"
// +operator-sdk:csv:customresourcedefinitions:type=spec,order=1
+ // +kubebuilder:validation:Pattern="^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$"
+ // +kubebuilder:validation:MaxLength=63
ApplicationsNamespace string `json:"applicationsNamespace"`
// Enable monitoring on specified namespace
// +operator-sdk:csv:customresourcedefinitions:type=spec,order=2
@@ -68,8 +70,10 @@ type Monitoring struct {
// or if it is installed, the operator will try to remove it.
// +kubebuilder:validation:Enum=Managed;Removed
ManagementState operatorv1.ManagementState `json:"managementState,omitempty"`
- // +kubebuilder:default=opendatahub
// Namespace for monitoring if it is enabled
+ // +kubebuilder:default=opendatahub
+ // +kubebuilder:validation:Pattern="^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$"
+ // +kubebuilder:validation:MaxLength=63
Namespace string `json:"namespace,omitempty"`
}
diff --git a/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml b/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml
index 218d3e4a697..c91bac9ea2a 100644
--- a/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml
+++ b/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml
@@ -56,6 +56,8 @@ spec:
default: opendatahub
description: Namespace for applications to be installed, non-configurable,
default to "opendatahub"
+ maxLength: 63
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$
type: string
x-kubernetes-validations:
- message: ApplicationsNamespace is immutable
@@ -96,6 +98,8 @@ spec:
namespace:
default: opendatahub
description: Namespace for monitoring if it is enabled
+ maxLength: 63
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$
type: string
type: object
serviceMesh:
diff --git a/bundle/manifests/opendatahub-operator-webhook-service_v1_service.yaml b/bundle/manifests/opendatahub-operator-webhook-service_v1_service.yaml
deleted file mode 100644
index a3b1df4b380..00000000000
--- a/bundle/manifests/opendatahub-operator-webhook-service_v1_service.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
- annotations:
- service.beta.openshift.io/inject-cabundle: "true"
- service.beta.openshift.io/serving-cert-secret-name: opendatahub-operator-controller-webhook-cert
- creationTimestamp: null
- labels:
- app.kubernetes.io/created-by: opendatahub-operator
- app.kubernetes.io/managed-by: kustomize
- app.kubernetes.io/part-of: opendatahub-operator
- name: opendatahub-operator-webhook-service
-spec:
- ports:
- - port: 443
- protocol: TCP
- targetPort: 9443
- selector:
- control-plane: controller-manager
-status:
- loadBalancer: {}
diff --git a/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml b/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml
index 81b13c83f14..1519a9ef8ca 100644
--- a/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml
+++ b/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml
@@ -102,13 +102,13 @@ metadata:
capabilities: Full Lifecycle
categories: AI/Machine Learning, Big Data
certified: "False"
- containerImage: quay.io/opendatahub/opendatahub-operator:v2.18.2
- createdAt: "2024-09-24T15:16:50Z"
- olm.skipRange: '>=1.0.0 <2.18.2'
+ containerImage: quay.io/opendatahub/opendatahub-operator:v2.19.0
+ createdAt: "2024-10-09T14:46:54Z"
+ olm.skipRange: '>=1.0.0 <2.19.0'
operators.operatorframework.io/builder: operator-sdk-v1.31.0
operators.operatorframework.io/project_layout: go.kubebuilder.io/v3
repository: https://github.com/opendatahub-io/opendatahub-operator
- name: opendatahub-operator.v2.18.2
+ name: opendatahub-operator.v2.19.0
namespace: placeholder
spec:
apiservicedefinitions: {}
@@ -1091,6 +1091,8 @@ spec:
fieldPath: metadata.namespace
- name: DEFAULT_MANIFESTS_PATH
value: /opt/manifests
+ - name: ODH_PLATFORM_TYPE
+ value: OpenDataHub
image: REPLACE_IMAGE:latest
imagePullPolicy: Always
livenessProbe:
@@ -1176,7 +1178,7 @@ spec:
selector:
matchLabels:
component: opendatahub-operator
- version: 2.18.2
+ version: 2.19.0
webhookdefinitions:
- admissionReviewVersions:
- v1
diff --git a/components/codeflare/codeflare.go b/components/codeflare/codeflare.go
index 758b18d84dd..5e731c28ba4 100644
--- a/components/codeflare/codeflare.go
+++ b/components/codeflare/codeflare.go
@@ -12,6 +12,7 @@ import (
operatorv1 "github.com/openshift/api/operator/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
"github.com/opendatahub-io/opendatahub-operator/v2/components"
@@ -35,6 +36,20 @@ type CodeFlare struct {
components.Component `json:""`
}
+func (c *CodeFlare) Init(ctx context.Context, _ cluster.Platform) error {
+ log := logf.FromContext(ctx).WithName(ComponentName)
+
+ var imageParamMap = map[string]string{
+ "codeflare-operator-controller-image": "RELATED_IMAGE_ODH_CODEFLARE_OPERATOR_IMAGE", // no need mcad, embedded in cfo
+ }
+
+ if err := deploy.ApplyParams(ParamsPath, imageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", CodeflarePath+"/bases")
+ }
+
+ return nil
+}
+
func (c *CodeFlare) OverrideManifests(ctx context.Context, _ cluster.Platform) error {
// If devflags are set, update default manifests path
if len(c.DevFlags.Manifests) != 0 {
@@ -64,10 +79,6 @@ func (c *CodeFlare) ReconcileComponent(ctx context.Context,
dscispec *dsciv1.DSCInitializationSpec,
platform cluster.Platform,
_ bool) error {
- var imageParamMap = map[string]string{
- "codeflare-operator-controller-image": "RELATED_IMAGE_ODH_CODEFLARE_OPERATOR_IMAGE", // no need mcad, embedded in cfo
- }
-
enabled := c.GetManagementState() == operatorv1.Managed
monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed
@@ -89,11 +100,9 @@ func (c *CodeFlare) ReconcileComponent(ctx context.Context,
dependentOperator, ComponentName)
}
- // Update image parameters only when we do not have customized manifests set
- if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (c.DevFlags == nil || len(c.DevFlags.Manifests) == 0) {
- if err := deploy.ApplyParams(ParamsPath, imageParamMap, map[string]string{"namespace": dscispec.ApplicationsNamespace}); err != nil {
- return fmt.Errorf("failed update image from %s : %w", CodeflarePath+"/bases", err)
- }
+ // It updates stock manifests, overridden manifests should contain proper namespace
+ if err := deploy.ApplyParams(ParamsPath, nil, map[string]string{"namespace": dscispec.ApplicationsNamespace}); err != nil {
+ return fmt.Errorf("failed update image from %s : %w", CodeflarePath+"/bases", err)
}
}
diff --git a/components/component.go b/components/component.go
index 7fe17d6c8c8..c43cef7ac92 100644
--- a/components/component.go
+++ b/components/component.go
@@ -38,6 +38,10 @@ type Component struct {
DevFlags *DevFlags `json:"devFlags,omitempty"`
}
+func (c *Component) Init(_ context.Context, _ cluster.Platform) error {
+ return nil
+}
+
func (c *Component) GetManagementState() operatorv1.ManagementState {
return c.ManagementState
}
@@ -77,6 +81,7 @@ type ManifestsConfig struct {
}
type ComponentInterface interface {
+ Init(ctx context.Context, platform cluster.Platform) error
ReconcileComponent(ctx context.Context, cli client.Client, logger logr.Logger,
owner metav1.Object, DSCISpec *dsciv1.DSCInitializationSpec, platform cluster.Platform, currentComponentStatus bool) error
Cleanup(ctx context.Context, cli client.Client, owner metav1.Object, DSCISpec *dsciv1.DSCInitializationSpec) error
diff --git a/components/dashboard/dashboard.go b/components/dashboard/dashboard.go
index 7ab0e73d2d1..d2ed096b3bd 100644
--- a/components/dashboard/dashboard.go
+++ b/components/dashboard/dashboard.go
@@ -15,6 +15,7 @@ import (
k8serr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
"github.com/opendatahub-io/opendatahub-operator/v2/components"
@@ -31,6 +32,7 @@ var (
PathSelfDownstream = PathDownstream + "/onprem"
PathManagedDownstream = PathDownstream + "/addon"
OverridePath = ""
+ DefaultPath = ""
)
// Verifies that Dashboard implements ComponentInterface.
@@ -42,6 +44,26 @@ type Dashboard struct {
components.Component `json:""`
}
+func (d *Dashboard) Init(ctx context.Context, platform cluster.Platform) error {
+ log := logf.FromContext(ctx).WithName(ComponentNameUpstream)
+
+ imageParamMap := map[string]string{
+ "odh-dashboard-image": "RELATED_IMAGE_ODH_DASHBOARD_IMAGE",
+ }
+ DefaultPath = map[cluster.Platform]string{
+ cluster.SelfManagedRhods: PathDownstream + "/onprem",
+ cluster.ManagedRhods: PathDownstream + "/addon",
+ cluster.OpenDataHub: PathUpstream,
+ cluster.Unknown: PathUpstream,
+ }[platform]
+
+ if err := deploy.ApplyParams(DefaultPath, imageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", DefaultPath)
+ }
+
+ return nil
+}
+
func (d *Dashboard) OverrideManifests(ctx context.Context, platform cluster.Platform) error {
// If devflags are set, update default manifests path
if len(d.DevFlags.Manifests) != 0 {
@@ -68,16 +90,9 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context,
platform cluster.Platform,
currentComponentExist bool,
) error {
- entryPath := map[cluster.Platform]string{
- cluster.SelfManagedRhods: PathDownstream + "/onprem",
- cluster.ManagedRhods: PathDownstream + "/addon",
- cluster.OpenDataHub: PathUpstream,
- cluster.Unknown: PathUpstream,
- }[platform]
-
+ entryPath := DefaultPath
enabled := d.GetManagementState() == operatorv1.Managed
monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed
- imageParamMap := make(map[string]string)
if enabled {
// 1. cleanup OAuth client related secret and CR if dashboard is in 'installed false' status
@@ -92,8 +107,6 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context,
if OverridePath != "" {
entryPath = OverridePath
}
- } else { // Update image parameters if devFlags is not provided
- imageParamMap["odh-dashboard-image"] = "RELATED_IMAGE_ODH_DASHBOARD_IMAGE"
}
// 2. platform specific RBAC
@@ -114,7 +127,7 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context,
}
// 4. update params.env regardless devFlags is provided of not
- if err := deploy.ApplyParams(entryPath, imageParamMap, extraParamsMap); err != nil {
+ if err := deploy.ApplyParams(entryPath, nil, extraParamsMap); err != nil {
return fmt.Errorf("failed to update params.env from %s : %w", entryPath, err)
}
}
diff --git a/components/datasciencepipelines/datasciencepipelines.go b/components/datasciencepipelines/datasciencepipelines.go
index f7e1289b10f..f0066a6c544 100644
--- a/components/datasciencepipelines/datasciencepipelines.go
+++ b/components/datasciencepipelines/datasciencepipelines.go
@@ -16,6 +16,7 @@ import (
k8serr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
"github.com/opendatahub-io/opendatahub-operator/v2/components"
@@ -41,6 +42,35 @@ type DataSciencePipelines struct {
components.Component `json:""`
}
+func (d *DataSciencePipelines) Init(ctx context.Context, _ cluster.Platform) error {
+ log := logf.FromContext(ctx).WithName(ComponentName)
+
+ var imageParamMap = map[string]string{
+ // v1
+ "IMAGES_APISERVER": "RELATED_IMAGE_ODH_ML_PIPELINES_API_SERVER_IMAGE",
+ "IMAGES_ARTIFACT": "RELATED_IMAGE_ODH_ML_PIPELINES_ARTIFACT_MANAGER_IMAGE",
+ "IMAGES_PERSISTENTAGENT": "RELATED_IMAGE_ODH_ML_PIPELINES_PERSISTENCEAGENT_IMAGE",
+ "IMAGES_SCHEDULEDWORKFLOW": "RELATED_IMAGE_ODH_ML_PIPELINES_SCHEDULEDWORKFLOW_IMAGE",
+ "IMAGES_CACHE": "RELATED_IMAGE_ODH_ML_PIPELINES_CACHE_IMAGE",
+ "IMAGES_DSPO": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_OPERATOR_CONTROLLER_IMAGE",
+ // v2
+ "IMAGESV2_ARGO_APISERVER": "RELATED_IMAGE_ODH_ML_PIPELINES_API_SERVER_V2_IMAGE",
+ "IMAGESV2_ARGO_PERSISTENCEAGENT": "RELATED_IMAGE_ODH_ML_PIPELINES_PERSISTENCEAGENT_V2_IMAGE",
+ "IMAGESV2_ARGO_SCHEDULEDWORKFLOW": "RELATED_IMAGE_ODH_ML_PIPELINES_SCHEDULEDWORKFLOW_V2_IMAGE",
+ "IMAGESV2_ARGO_ARGOEXEC": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_ARGO_ARGOEXEC_IMAGE",
+ "IMAGESV2_ARGO_WORKFLOWCONTROLLER": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_ARGO_WORKFLOWCONTROLLER_IMAGE",
+ "V2_DRIVER_IMAGE": "RELATED_IMAGE_ODH_ML_PIPELINES_DRIVER_IMAGE",
+ "V2_LAUNCHER_IMAGE": "RELATED_IMAGE_ODH_ML_PIPELINES_LAUNCHER_IMAGE",
+ "IMAGESV2_ARGO_MLMDGRPC": "RELATED_IMAGE_ODH_MLMD_GRPC_SERVER_IMAGE",
+ }
+
+ if err := deploy.ApplyParams(Path, imageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", Path)
+ }
+
+ return nil
+}
+
func (d *DataSciencePipelines) OverrideManifests(ctx context.Context, _ cluster.Platform) error {
// If devflags are set, update default manifests path
if len(d.DevFlags.Manifests) != 0 {
@@ -71,25 +101,6 @@ func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context,
platform cluster.Platform,
_ bool,
) error {
- var imageParamMap = map[string]string{
- // v1
- "IMAGES_APISERVER": "RELATED_IMAGE_ODH_ML_PIPELINES_API_SERVER_IMAGE",
- "IMAGES_ARTIFACT": "RELATED_IMAGE_ODH_ML_PIPELINES_ARTIFACT_MANAGER_IMAGE",
- "IMAGES_PERSISTENTAGENT": "RELATED_IMAGE_ODH_ML_PIPELINES_PERSISTENCEAGENT_IMAGE",
- "IMAGES_SCHEDULEDWORKFLOW": "RELATED_IMAGE_ODH_ML_PIPELINES_SCHEDULEDWORKFLOW_IMAGE",
- "IMAGES_CACHE": "RELATED_IMAGE_ODH_ML_PIPELINES_CACHE_IMAGE",
- "IMAGES_DSPO": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_OPERATOR_CONTROLLER_IMAGE",
- // v2
- "IMAGESV2_ARGO_APISERVER": "RELATED_IMAGE_ODH_ML_PIPELINES_API_SERVER_V2_IMAGE",
- "IMAGESV2_ARGO_PERSISTENCEAGENT": "RELATED_IMAGE_ODH_ML_PIPELINES_PERSISTENCEAGENT_V2_IMAGE",
- "IMAGESV2_ARGO_SCHEDULEDWORKFLOW": "RELATED_IMAGE_ODH_ML_PIPELINES_SCHEDULEDWORKFLOW_V2_IMAGE",
- "IMAGESV2_ARGO_ARGOEXEC": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_ARGO_ARGOEXEC_IMAGE",
- "IMAGESV2_ARGO_WORKFLOWCONTROLLER": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_ARGO_WORKFLOWCONTROLLER_IMAGE",
- "V2_DRIVER_IMAGE": "RELATED_IMAGE_ODH_ML_PIPELINES_DRIVER_IMAGE",
- "V2_LAUNCHER_IMAGE": "RELATED_IMAGE_ODH_ML_PIPELINES_LAUNCHER_IMAGE",
- "IMAGESV2_ARGO_MLMDGRPC": "RELATED_IMAGE_ODH_MLMD_GRPC_SERVER_IMAGE",
- }
-
enabled := d.GetManagementState() == operatorv1.Managed
monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed
@@ -101,12 +112,6 @@ func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context,
}
}
// skip check if the dependent operator has beeninstalled, this is done in dashboard
- // Update image parameters only when we do not have customized manifests set
- if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (d.DevFlags == nil || len(d.DevFlags.Manifests) == 0) {
- if err := deploy.ApplyParams(Path, imageParamMap); err != nil {
- return fmt.Errorf("failed to update image from %s : %w", Path, err)
- }
- }
// Check for existing Argo Workflows
if err := UnmanagedArgoWorkFlowExists(ctx, cli); err != nil {
return err
diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go
index 03dd94a7e5d..85b739285ea 100644
--- a/components/kserve/kserve.go
+++ b/components/kserve/kserve.go
@@ -12,6 +12,7 @@ import (
operatorv1 "github.com/openshift/api/operator/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1"
@@ -56,6 +57,22 @@ type Kserve struct {
DefaultDeploymentMode DefaultDeploymentMode `json:"defaultDeploymentMode,omitempty"`
}
+func (k *Kserve) Init(ctx context.Context, _ cluster.Platform) error {
+ log := logf.FromContext(ctx).WithName(ComponentName)
+
+ // dependentParamMap for odh-model-controller to use.
+ var dependentParamMap = map[string]string{
+ "odh-model-controller": "RELATED_IMAGE_ODH_MODEL_CONTROLLER_IMAGE",
+ }
+
+ // Update image parameters for odh-model-controller
+ if err := deploy.ApplyParams(DependentPath, dependentParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", DependentPath)
+ }
+
+ return nil
+}
+
func (k *Kserve) OverrideManifests(ctx context.Context, _ cluster.Platform) error {
// Download manifests if defined by devflags
// Go through each manifest and set the overlays if defined
@@ -96,11 +113,6 @@ func (k *Kserve) GetComponentName() string {
func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client,
l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error {
- // dependentParamMap for odh-model-controller to use.
- var dependentParamMap = map[string]string{
- "odh-model-controller": "RELATED_IMAGE_ODH_MODEL_CONTROLLER_IMAGE",
- }
-
enabled := k.GetManagementState() == operatorv1.Managed
monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed
@@ -140,12 +152,6 @@ func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client,
if err := cluster.UpdatePodSecurityRolebinding(ctx, cli, dscispec.ApplicationsNamespace, "odh-model-controller"); err != nil {
return err
}
- // Update image parameters for odh-model-controller
- if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (k.DevFlags == nil || len(k.DevFlags.Manifests) == 0) {
- if err := deploy.ApplyParams(DependentPath, dependentParamMap); err != nil {
- return fmt.Errorf("failed to update image %s: %w", DependentPath, err)
- }
- }
}
if err := deploy.DeployManifestsFromPath(ctx, cli, owner, DependentPath, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil {
diff --git a/components/kueue/kueue.go b/components/kueue/kueue.go
index 0d15ae2b6d3..ec609317092 100644
--- a/components/kueue/kueue.go
+++ b/components/kueue/kueue.go
@@ -10,6 +10,7 @@ import (
operatorv1 "github.com/openshift/api/operator/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
"github.com/opendatahub-io/opendatahub-operator/v2/components"
@@ -31,6 +32,20 @@ type Kueue struct {
components.Component `json:""`
}
+func (k *Kueue) Init(ctx context.Context, _ cluster.Platform) error {
+ log := logf.FromContext(ctx).WithName(ComponentName)
+
+ var imageParamMap = map[string]string{
+ "odh-kueue-controller-image": "RELATED_IMAGE_ODH_KUEUE_CONTROLLER_IMAGE", // new kueue image
+ }
+
+ if err := deploy.ApplyParams(Path, imageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", Path)
+ }
+
+ return nil
+}
+
func (k *Kueue) OverrideManifests(ctx context.Context, _ cluster.Platform) error {
// If devflags are set, update default manifests path
if len(k.DevFlags.Manifests) != 0 {
@@ -55,10 +70,6 @@ func (k *Kueue) GetComponentName() string {
func (k *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger,
owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error {
- var imageParamMap = map[string]string{
- "odh-kueue-controller-image": "RELATED_IMAGE_ODH_KUEUE_CONTROLLER_IMAGE", // new kueue image
- }
-
enabled := k.GetManagementState() == operatorv1.Managed
monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed
if enabled {
@@ -68,11 +79,6 @@ func (k *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, l log
return err
}
}
- if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (k.DevFlags == nil || len(k.DevFlags.Manifests) == 0) {
- if err := deploy.ApplyParams(Path, imageParamMap); err != nil {
- return fmt.Errorf("failed to update image from %s : %w", Path, err)
- }
- }
}
// Deploy Kueue Operator
if err := deploy.DeployManifestsFromPath(ctx, cli, owner, Path, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil {
diff --git a/components/modelmeshserving/modelmeshserving.go b/components/modelmeshserving/modelmeshserving.go
index 9e343d1a9ce..cb1d07b7838 100644
--- a/components/modelmeshserving/modelmeshserving.go
+++ b/components/modelmeshserving/modelmeshserving.go
@@ -12,6 +12,7 @@ import (
operatorv1 "github.com/openshift/api/operator/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
"github.com/opendatahub-io/opendatahub-operator/v2/components"
@@ -35,6 +36,35 @@ type ModelMeshServing struct {
components.Component `json:""`
}
+func (m *ModelMeshServing) Init(ctx context.Context, _ cluster.Platform) error {
+ log := logf.FromContext(ctx).WithName(ComponentName)
+
+ var imageParamMap = map[string]string{
+ "odh-mm-rest-proxy": "RELATED_IMAGE_ODH_MM_REST_PROXY_IMAGE",
+ "odh-modelmesh-runtime-adapter": "RELATED_IMAGE_ODH_MODELMESH_RUNTIME_ADAPTER_IMAGE",
+ "odh-modelmesh": "RELATED_IMAGE_ODH_MODELMESH_IMAGE",
+ "odh-modelmesh-controller": "RELATED_IMAGE_ODH_MODELMESH_CONTROLLER_IMAGE",
+ "odh-model-controller": "RELATED_IMAGE_ODH_MODEL_CONTROLLER_IMAGE",
+ }
+
+ // odh-model-controller to use
+ var dependentImageParamMap = map[string]string{
+ "odh-model-controller": "RELATED_IMAGE_ODH_MODEL_CONTROLLER_IMAGE",
+ }
+
+ // Update image parameters
+ if err := deploy.ApplyParams(Path, imageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", Path)
+ }
+
+ // Update image parameters for odh-model-controller
+ if err := deploy.ApplyParams(DependentPath, dependentImageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", DependentPath)
+ }
+
+ return nil
+}
+
func (m *ModelMeshServing) OverrideManifests(ctx context.Context, _ cluster.Platform) error {
// Go through each manifest and set the overlays if defined
for _, subcomponent := range m.DevFlags.Manifests {
@@ -79,19 +109,6 @@ func (m *ModelMeshServing) ReconcileComponent(ctx context.Context,
platform cluster.Platform,
_ bool,
) error {
- var imageParamMap = map[string]string{
- "odh-mm-rest-proxy": "RELATED_IMAGE_ODH_MM_REST_PROXY_IMAGE",
- "odh-modelmesh-runtime-adapter": "RELATED_IMAGE_ODH_MODELMESH_RUNTIME_ADAPTER_IMAGE",
- "odh-modelmesh": "RELATED_IMAGE_ODH_MODELMESH_IMAGE",
- "odh-modelmesh-controller": "RELATED_IMAGE_ODH_MODELMESH_CONTROLLER_IMAGE",
- "odh-model-controller": "RELATED_IMAGE_ODH_MODEL_CONTROLLER_IMAGE",
- }
-
- // odh-model-controller to use
- var dependentImageParamMap = map[string]string{
- "odh-model-controller": "RELATED_IMAGE_ODH_MODEL_CONTROLLER_IMAGE",
- }
-
enabled := m.GetManagementState() == operatorv1.Managed
monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed
@@ -111,12 +128,6 @@ func (m *ModelMeshServing) ReconcileComponent(ctx context.Context,
"prometheus-custom"); err != nil {
return err
}
- // Update image parameters
- if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (m.DevFlags == nil || len(m.DevFlags.Manifests) == 0) {
- if err := deploy.ApplyParams(Path, imageParamMap); err != nil {
- return fmt.Errorf("failed update image from %s : %w", Path, err)
- }
- }
}
if err := deploy.DeployManifestsFromPath(ctx, cli, owner, Path, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil {
@@ -129,12 +140,6 @@ func (m *ModelMeshServing) ReconcileComponent(ctx context.Context,
"odh-model-controller"); err != nil {
return err
}
- // Update image parameters for odh-model-controller
- if dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "" {
- if err := deploy.ApplyParams(DependentPath, dependentImageParamMap); err != nil {
- return err
- }
- }
}
if err := deploy.DeployManifestsFromPath(ctx, cli, owner, DependentPath, dscispec.ApplicationsNamespace, m.GetComponentName(), enabled); err != nil {
// explicitly ignore error if error contains keywords "spec.selector" and "field is immutable" and return all other error.
diff --git a/components/modelregistry/modelregistry.go b/components/modelregistry/modelregistry.go
index c58f6bb15dc..dbf577ec8f8 100644
--- a/components/modelregistry/modelregistry.go
+++ b/components/modelregistry/modelregistry.go
@@ -15,6 +15,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1"
@@ -58,6 +59,22 @@ type ModelRegistry struct {
RegistriesNamespace string `json:"registriesNamespace,omitempty"`
}
+func (m *ModelRegistry) Init(ctx context.Context, _ cluster.Platform) error {
+ log := logf.FromContext(ctx).WithName(ComponentName)
+
+ var imageParamMap = map[string]string{
+ "IMAGES_MODELREGISTRY_OPERATOR": "RELATED_IMAGE_ODH_MODEL_REGISTRY_OPERATOR_IMAGE",
+ "IMAGES_GRPC_SERVICE": "RELATED_IMAGE_ODH_MLMD_GRPC_SERVER_IMAGE",
+ "IMAGES_REST_SERVICE": "RELATED_IMAGE_ODH_MODEL_REGISTRY_IMAGE",
+ }
+
+ if err := deploy.ApplyParams(Path, imageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", Path)
+ }
+
+ return nil
+}
+
func (m *ModelRegistry) OverrideManifests(ctx context.Context, _ cluster.Platform) error {
// If devflags are set, update default manifests path
if len(m.DevFlags.Manifests) != 0 {
@@ -82,11 +99,6 @@ func (m *ModelRegistry) GetComponentName() string {
func (m *ModelRegistry) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger,
owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error {
- var imageParamMap = map[string]string{
- "IMAGES_MODELREGISTRY_OPERATOR": "RELATED_IMAGE_ODH_MODEL_REGISTRY_OPERATOR_IMAGE",
- "IMAGES_GRPC_SERVICE": "RELATED_IMAGE_ODH_MLMD_GRPC_SERVER_IMAGE",
- "IMAGES_REST_SERVICE": "RELATED_IMAGE_ODH_MODEL_REGISTRY_IMAGE",
- }
enabled := m.GetManagementState() == operatorv1.Managed
monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed
@@ -107,14 +119,11 @@ func (m *ModelRegistry) ReconcileComponent(ctx context.Context, cli client.Clien
}
}
- // Update image parameters only when we do not have customized manifests set
- if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (m.DevFlags == nil || len(m.DevFlags.Manifests) == 0) {
- extraParamsMap := map[string]string{
- "DEFAULT_CERT": DefaultModelRegistryCert,
- }
- if err := deploy.ApplyParams(Path, imageParamMap, extraParamsMap); err != nil {
- return fmt.Errorf("failed to update image from %s : %w", Path, err)
- }
+ extraParamsMap := map[string]string{
+ "DEFAULT_CERT": DefaultModelRegistryCert,
+ }
+ if err := deploy.ApplyParams(Path, nil, extraParamsMap); err != nil {
+ return fmt.Errorf("failed to update image from %s : %w", Path, err)
}
// Create model registries namespace
diff --git a/components/ray/ray.go b/components/ray/ray.go
index dd0dd18ed95..c8fa30edbd4 100644
--- a/components/ray/ray.go
+++ b/components/ray/ray.go
@@ -12,6 +12,7 @@ import (
operatorv1 "github.com/openshift/api/operator/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
"github.com/opendatahub-io/opendatahub-operator/v2/components"
@@ -33,6 +34,19 @@ type Ray struct {
components.Component `json:""`
}
+func (r *Ray) Init(ctx context.Context, _ cluster.Platform) error {
+ log := logf.FromContext(ctx).WithName(ComponentName)
+
+ var imageParamMap = map[string]string{
+ "odh-kuberay-operator-controller-image": "RELATED_IMAGE_ODH_KUBERAY_OPERATOR_CONTROLLER_IMAGE",
+ }
+ if err := deploy.ApplyParams(RayPath, imageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", RayPath)
+ }
+
+ return nil
+}
+
func (r *Ray) OverrideManifests(ctx context.Context, _ cluster.Platform) error {
// If devflags are set, update default manifests path
if len(r.DevFlags.Manifests) != 0 {
@@ -57,10 +71,6 @@ func (r *Ray) GetComponentName() string {
func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger,
owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error {
- var imageParamMap = map[string]string{
- "odh-kuberay-operator-controller-image": "RELATED_IMAGE_ODH_KUBERAY_OPERATOR_CONTROLLER_IMAGE",
- }
-
enabled := r.GetManagementState() == operatorv1.Managed
monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed
@@ -71,10 +81,8 @@ func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, l logr.
return err
}
}
- if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (r.DevFlags == nil || len(r.DevFlags.Manifests) == 0) {
- if err := deploy.ApplyParams(RayPath, imageParamMap, map[string]string{"namespace": dscispec.ApplicationsNamespace}); err != nil {
- return fmt.Errorf("failed to update image from %s : %w", RayPath, err)
- }
+ if err := deploy.ApplyParams(RayPath, nil, map[string]string{"namespace": dscispec.ApplicationsNamespace}); err != nil {
+ return fmt.Errorf("failed to update namespace from %s : %w", RayPath, err)
}
}
// Deploy Ray Operator
diff --git a/components/trainingoperator/trainingoperator.go b/components/trainingoperator/trainingoperator.go
index 1e1de97679c..a6a7c8f87e7 100644
--- a/components/trainingoperator/trainingoperator.go
+++ b/components/trainingoperator/trainingoperator.go
@@ -12,6 +12,7 @@ import (
operatorv1 "github.com/openshift/api/operator/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
"github.com/opendatahub-io/opendatahub-operator/v2/components"
@@ -33,6 +34,20 @@ type TrainingOperator struct {
components.Component `json:""`
}
+func (r *TrainingOperator) Init(ctx context.Context, _ cluster.Platform) error {
+ log := logf.FromContext(ctx).WithName(ComponentName)
+
+ var imageParamMap = map[string]string{
+ "odh-training-operator-controller-image": "RELATED_IMAGE_ODH_TRAINING_OPERATOR_IMAGE",
+ }
+
+ if err := deploy.ApplyParams(TrainingOperatorPath, imageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", TrainingOperatorPath)
+ }
+
+ return nil
+}
+
func (r *TrainingOperator) OverrideManifests(ctx context.Context, _ cluster.Platform) error {
// If devflags are set, update default manifests path
if len(r.DevFlags.Manifests) != 0 {
@@ -57,10 +72,6 @@ func (r *TrainingOperator) GetComponentName() string {
func (r *TrainingOperator) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger,
owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error {
- var imageParamMap = map[string]string{
- "odh-training-operator-controller-image": "RELATED_IMAGE_ODH_TRAINING_OPERATOR_IMAGE",
- }
-
enabled := r.GetManagementState() == operatorv1.Managed
monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed
@@ -71,11 +82,6 @@ func (r *TrainingOperator) ReconcileComponent(ctx context.Context, cli client.Cl
return err
}
}
- if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (r.DevFlags == nil || len(r.DevFlags.Manifests) == 0) {
- if err := deploy.ApplyParams(TrainingOperatorPath, imageParamMap); err != nil {
- return err
- }
- }
}
// Deploy Training Operator
if err := deploy.DeployManifestsFromPath(ctx, cli, owner, TrainingOperatorPath, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil {
diff --git a/components/trustyai/trustyai.go b/components/trustyai/trustyai.go
index 049b63b1d6c..45a211e79c0 100644
--- a/components/trustyai/trustyai.go
+++ b/components/trustyai/trustyai.go
@@ -11,6 +11,7 @@ import (
operatorv1 "github.com/openshift/api/operator/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
"github.com/opendatahub-io/opendatahub-operator/v2/components"
@@ -24,6 +25,7 @@ var (
PathUpstream = deploy.DefaultManifestPath + "/" + ComponentPathName + "/overlays/odh"
PathDownstream = deploy.DefaultManifestPath + "/" + ComponentPathName + "/overlays/rhoai"
OverridePath = ""
+ DefaultPath = ""
)
// Verifies that TrustyAI implements ComponentInterface.
@@ -35,6 +37,27 @@ type TrustyAI struct {
components.Component `json:""`
}
+func (t *TrustyAI) Init(ctx context.Context, platform cluster.Platform) error {
+ log := logf.FromContext(ctx).WithName(ComponentName)
+
+ DefaultPath = map[cluster.Platform]string{
+ cluster.SelfManagedRhods: PathDownstream,
+ cluster.ManagedRhods: PathDownstream,
+ cluster.OpenDataHub: PathUpstream,
+ cluster.Unknown: PathUpstream,
+ }[platform]
+ var imageParamMap = map[string]string{
+ "trustyaiServiceImage": "RELATED_IMAGE_ODH_TRUSTYAI_SERVICE_IMAGE",
+ "trustyaiOperatorImage": "RELATED_IMAGE_ODH_TRUSTYAI_SERVICE_OPERATOR_IMAGE",
+ }
+
+ if err := deploy.ApplyParams(DefaultPath, imageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", DefaultPath)
+ }
+
+ return nil
+}
+
func (t *TrustyAI) OverrideManifests(ctx context.Context, _ cluster.Platform) error {
// If devflags are set, update default manifests path
if len(t.DevFlags.Manifests) != 0 {
@@ -58,19 +81,9 @@ func (t *TrustyAI) GetComponentName() string {
func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger,
owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error {
- var imageParamMap = map[string]string{
- "trustyaiServiceImage": "RELATED_IMAGE_ODH_TRUSTYAI_SERVICE_IMAGE",
- "trustyaiOperatorImage": "RELATED_IMAGE_ODH_TRUSTYAI_SERVICE_OPERATOR_IMAGE",
- }
- entryPath := map[cluster.Platform]string{
- cluster.SelfManagedRhods: PathDownstream,
- cluster.ManagedRhods: PathDownstream,
- cluster.OpenDataHub: PathUpstream,
- cluster.Unknown: PathUpstream,
- }[platform]
-
enabled := t.GetManagementState() == operatorv1.Managed
monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed
+ entryPath := DefaultPath
if enabled {
if t.DevFlags != nil {
@@ -82,11 +95,6 @@ func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, l
entryPath = OverridePath
}
}
- if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (t.DevFlags == nil || len(t.DevFlags.Manifests) == 0) {
- if err := deploy.ApplyParams(entryPath, imageParamMap); err != nil {
- return fmt.Errorf("failed to update image %s: %w", entryPath, err)
- }
- }
}
// Deploy TrustyAI Operator
if err := deploy.DeployManifestsFromPath(ctx, cli, owner, entryPath, dscispec.ApplicationsNamespace, t.GetComponentName(), enabled); err != nil {
diff --git a/components/workbenches/workbenches.go b/components/workbenches/workbenches.go
index 3570cf096ac..c11f1e24297 100644
--- a/components/workbenches/workbenches.go
+++ b/components/workbenches/workbenches.go
@@ -12,6 +12,7 @@ import (
operatorv1 "github.com/openshift/api/operator/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
"github.com/opendatahub-io/opendatahub-operator/v2/components"
@@ -40,6 +41,26 @@ type Workbenches struct {
components.Component `json:""`
}
+func (w *Workbenches) Init(ctx context.Context, _ cluster.Platform) error {
+ log := logf.FromContext(ctx).WithName(ComponentName)
+
+ var imageParamMap = map[string]string{
+ "odh-notebook-controller-image": "RELATED_IMAGE_ODH_NOTEBOOK_CONTROLLER_IMAGE",
+ "odh-kf-notebook-controller-image": "RELATED_IMAGE_ODH_KF_NOTEBOOK_CONTROLLER_IMAGE",
+ }
+
+ // for kf-notebook-controller image
+ if err := deploy.ApplyParams(notebookControllerPath, imageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", notebookControllerPath)
+ }
+ // for odh-notebook-controller image
+ if err := deploy.ApplyParams(kfnotebookControllerPath, imageParamMap); err != nil {
+ log.Error(err, "failed to update image", "path", kfnotebookControllerPath)
+ }
+
+ return nil
+}
+
func (w *Workbenches) OverrideManifests(ctx context.Context, platform cluster.Platform) error {
// Download manifests if defined by devflags
// Go through each manifest and set the overlays if defined
@@ -92,11 +113,6 @@ func (w *Workbenches) GetComponentName() string {
func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger,
owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error {
- var imageParamMap = map[string]string{
- "odh-notebook-controller-image": "RELATED_IMAGE_ODH_NOTEBOOK_CONTROLLER_IMAGE",
- "odh-kf-notebook-controller-image": "RELATED_IMAGE_ODH_KF_NOTEBOOK_CONTROLLER_IMAGE",
- }
-
// Set default notebooks namespace
// Create rhods-notebooks namespace in managed platforms
enabled := w.GetManagementState() == operatorv1.Managed
@@ -123,19 +139,6 @@ func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client,
}
}
- // Update image parameters for nbc
- if enabled {
- if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (w.DevFlags == nil || len(w.DevFlags.Manifests) == 0) {
- // for kf-notebook-controller image
- if err := deploy.ApplyParams(notebookControllerPath, imageParamMap); err != nil {
- return fmt.Errorf("failed to update image %s: %w", notebookControllerPath, err)
- }
- // for odh-notebook-controller image
- if err := deploy.ApplyParams(kfnotebookControllerPath, imageParamMap); err != nil {
- return fmt.Errorf("failed to update image %s: %w", kfnotebookControllerPath, err)
- }
- }
- }
if err := deploy.DeployManifestsFromPath(ctx, cli, owner,
notebookControllerPath,
dscispec.ApplicationsNamespace,
diff --git a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml
index 7c191a79027..dd381696bb1 100644
--- a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml
+++ b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml
@@ -56,6 +56,8 @@ spec:
default: opendatahub
description: Namespace for applications to be installed, non-configurable,
default to "opendatahub"
+ maxLength: 63
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$
type: string
x-kubernetes-validations:
- message: ApplicationsNamespace is immutable
@@ -96,6 +98,8 @@ spec:
namespace:
default: opendatahub
description: Namespace for monitoring if it is enabled
+ maxLength: 63
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$
type: string
type: object
serviceMesh:
diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml
index 0aadbcc2e89..bff26ae5022 100644
--- a/config/manager/manager.yaml
+++ b/config/manager/manager.yaml
@@ -46,6 +46,8 @@ spec:
fieldPath: metadata.namespace
- name: DEFAULT_MANIFESTS_PATH
value: /opt/manifests
+ - name: ODH_PLATFORM_TYPE
+ value: OpenDataHub
args:
- --leader-elect
- --operator-name=opendatahub
diff --git a/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml b/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml
index 41afa7a96b2..811770245b5 100644
--- a/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml
+++ b/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml
@@ -6,11 +6,11 @@ metadata:
capabilities: Full Lifecycle
categories: AI/Machine Learning, Big Data
certified: "False"
- containerImage: quay.io/opendatahub/opendatahub-operator:v2.18.2
- createdAt: "2024-9-24T00:00:00Z"
- olm.skipRange: '>=1.0.0 <2.18.2'
+ containerImage: quay.io/opendatahub/opendatahub-operator:v2.19.0
+ createdAt: "2024-10-09T00:00:00Z"
+ olm.skipRange: '>=1.0.0 <2.19.0'
repository: https://github.com/opendatahub-io/opendatahub-operator
- name: opendatahub-operator.v2.18.2
+ name: opendatahub-operator.v2.19.0
namespace: placeholder
spec:
apiservicedefinitions: {}
@@ -106,4 +106,4 @@ spec:
selector:
matchLabels:
component: opendatahub-operator
- version: 2.18.2
+ version: 2.19.0
diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go
index ba89a3479b9..af00a344e73 100644
--- a/controllers/datasciencecluster/datasciencecluster_controller.go
+++ b/controllers/datasciencecluster/datasciencecluster_controller.go
@@ -88,11 +88,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R
log.Info("Reconciling DataScienceCluster resources", "Request.Name", req.Name)
// Get information on version and platform
- currentOperatorRelease, err := cluster.GetRelease(ctx, r.Client)
- if err != nil {
- log.Error(err, "failed to get operator release version")
- return ctrl.Result{}, err
- }
+ currentOperatorRelease := cluster.GetRelease()
// Set platform
platform := currentOperatorRelease.Name
@@ -258,6 +254,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R
status.SetCompleteCondition(&saved.Status.Conditions, status.ReconcileCompletedWithComponentErrors,
fmt.Sprintf("DataScienceCluster resource reconciled with component errors: %v", componentErrors))
saved.Status.Phase = status.PhaseReady
+ saved.Status.Release = currentOperatorRelease
})
if err != nil {
log.Error(err, "failed to update DataScienceCluster conditions with incompleted reconciliation")
@@ -274,6 +271,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R
instance, err = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) {
status.SetCompleteCondition(&saved.Status.Conditions, status.ReconcileCompleted, "DataScienceCluster resource reconciled successfully")
saved.Status.Phase = status.PhaseReady
+ saved.Status.Release = currentOperatorRelease
})
if err != nil {
diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go
index 5c1a4bc9b20..37991131cb5 100644
--- a/controllers/dscinitialization/dscinitialization_controller.go
+++ b/controllers/dscinitialization/dscinitialization_controller.go
@@ -79,11 +79,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re
log := r.Log
log.Info("Reconciling DSCInitialization.", "DSCInitialization Request.Name", req.Name)
- currentOperatorRelease, err := cluster.GetRelease(ctx, r.Client)
- if err != nil {
- log.Error(err, "failed to get operator release version")
- return ctrl.Result{}, err
- }
+ currentOperatorRelease := cluster.GetRelease()
// Set platform
platform := currentOperatorRelease.Name
@@ -142,7 +138,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re
if instance.Status.Conditions == nil {
reason := status.ReconcileInit
message := "Initializing DSCInitialization resource"
- instance, err = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dsciv1.DSCInitialization) {
+ instance, err := status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dsciv1.DSCInitialization) {
status.SetProgressingCondition(&saved.Status.Conditions, reason, message)
saved.Status.Phase = status.PhaseProgressing
saved.Status.Release = currentOperatorRelease
@@ -156,9 +152,23 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re
}
}
+ // upgrade case to update release version in status
+ if !instance.Status.Release.Version.Equals(currentOperatorRelease.Version.Version) {
+ message := "Updating DSCInitialization status"
+ instance, err := status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dsciv1.DSCInitialization) {
+ saved.Status.Release = currentOperatorRelease
+ })
+ if err != nil {
+ log.Error(err, "Failed to update release version for DSCInitialization resource.", "DSCInitialization", req.Namespace, "Request.Name", req.Name)
+ r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError",
+ "%s for instance %s", message, instance.Name)
+ return reconcile.Result{}, err
+ }
+ }
+
// Check namespace is not exist, then create
namespace := instance.Spec.ApplicationsNamespace
- err = r.createOdhNamespace(ctx, instance, namespace, platform)
+ err := r.createOdhNamespace(ctx, instance, namespace, platform)
if err != nil {
// no need to log error as it was already logged in createOdhNamespace
return reconcile.Result{}, err
diff --git a/controllers/webhook/webhook.go b/controllers/webhook/webhook.go
index 9455cfd08ba..29739e2af00 100644
--- a/controllers/webhook/webhook.go
+++ b/controllers/webhook/webhook.go
@@ -46,6 +46,15 @@ type OpenDataHubValidatingWebhook struct {
Decoder *admission.Decoder
}
+func Init(mgr ctrl.Manager) {
+ (&OpenDataHubValidatingWebhook{
+ Client: mgr.GetClient(),
+ Decoder: admission.NewDecoder(mgr.GetScheme()),
+ }).SetupWithManager(mgr)
+
+ (&DSCDefaulter{}).SetupWithManager(mgr)
+}
+
func (w *OpenDataHubValidatingWebhook) SetupWithManager(mgr ctrl.Manager) {
hookServer := mgr.GetWebhookServer()
odhWebhook := &webhook.Admission{
diff --git a/docs/api-overview.md b/docs/api-overview.md
index 53959d76a51..3c963f15088 100644
--- a/docs/api-overview.md
+++ b/docs/api-overview.md
@@ -627,7 +627,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `applicationsNamespace` _string_ | Namespace for applications to be installed, non-configurable, default to "opendatahub" | opendatahub | |
+| `applicationsNamespace` _string_ | Namespace for applications to be installed, non-configurable, default to "opendatahub" | opendatahub | MaxLength: 63
Pattern: `^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$`
|
| `monitoring` _[Monitoring](#monitoring)_ | Enable monitoring on specified namespace | | |
| `serviceMesh` _[ServiceMeshSpec](#servicemeshspec)_ | Configures Service Mesh as networking layer for Data Science Clusters components.
The Service Mesh is a mandatory prerequisite for single model serving (KServe) and
you should review this configuration if you are planning to use KServe.
For other components, it enhances user experience; e.g. it provides unified
authentication giving a Single Sign On experience. | | |
| `trustedCABundle` _[TrustedCABundleSpec](#trustedcabundlespec)_ | When set to `Managed`, adds odh-trusted-ca-bundle Configmap to all namespaces that includes
cluster-wide Trusted CA Bundle in .data["ca-bundle.crt"].
Additionally, this fields allows admins to add custom CA bundles to the configmap using the .CustomCABundle field. | | |
@@ -686,7 +686,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:
- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so.
- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it. | | Enum: [Managed Removed]
|
-| `namespace` _string_ | Namespace for monitoring if it is enabled | opendatahub | |
+| `namespace` _string_ | Namespace for monitoring if it is enabled | opendatahub | MaxLength: 63
Pattern: `^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$`
|
#### TrustedCABundleSpec
diff --git a/main.go b/main.go
index 9cdd5fac84a..778a829f5ee 100644
--- a/main.go
+++ b/main.go
@@ -21,6 +21,7 @@ import (
"flag"
"os"
+ "github.com/hashicorp/go-multierror"
addonv1alpha1 "github.com/openshift/addon-operator/apis/addons/v1alpha1"
ocappsv1 "github.com/openshift/api/apps/v1" //nolint:importas //reason: conflicts with appsv1 "k8s.io/api/apps/v1"
buildv1 "github.com/openshift/api/build/v1"
@@ -49,10 +50,11 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/healthz"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server"
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
- "sigs.k8s.io/controller-runtime/pkg/webhook/admission"
dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1"
dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1"
@@ -101,6 +103,22 @@ func init() { //nolint:gochecknoinits
utilruntime.Must(operatorv1.Install(scheme))
}
+func initComponents(ctx context.Context, p cluster.Platform) error {
+ var errs *multierror.Error
+ var dummyDSC = &dscv1.DataScienceCluster{}
+
+ components, err := dummyDSC.GetComponents()
+ if err != nil {
+ return err
+ }
+
+ for _, c := range components {
+ errs = multierror.Append(errs, c.Init(ctx, p))
+ }
+
+ return errs.ErrorOrNil()
+}
+
func main() { //nolint:funlen,maintidx
var metricsAddr string
var enableLeaderElection bool
@@ -122,12 +140,16 @@ func main() { //nolint:funlen,maintidx
flag.StringVar(&operatorName, "operator-name", "opendatahub", "The name of the operator")
flag.StringVar(&logmode, "log-mode", "", "Log mode ('', prod, devel), default to ''")
+ opts := zap.Options{}
+ opts.BindFlags(flag.CommandLine)
+
flag.Parse()
- ctrl.SetLogger(logger.NewLogger(logmode))
+ ctrl.SetLogger(logger.NewLoggerWithOptions(logmode, &opts))
// root context
ctx := ctrl.SetupSignalHandler()
+ ctx = logf.IntoContext(ctx, setupLog)
// Create new uncached client to run initial setup
setupCfg, err := config.GetConfig()
if err != nil {
@@ -143,14 +165,16 @@ func main() { //nolint:funlen,maintidx
setupLog.Error(err, "error getting client for setup")
os.Exit(1)
}
- // Get operator platform
- release, err := cluster.GetRelease(ctx, setupClient)
+
+ err = cluster.Init(ctx, setupClient)
if err != nil {
- setupLog.Error(err, "error getting release")
+ setupLog.Error(err, "unable to initialize cluster config")
os.Exit(1)
}
+
+ // Get operator platform
+ release := cluster.GetRelease()
platform := release.Name
- setupLog.Info("running on", "platform", platform)
secretCache := createSecretCacheConfig(platform)
deploymentCache := createDeploymentCacheConfig(platform)
@@ -210,12 +234,7 @@ func main() { //nolint:funlen,maintidx
os.Exit(1)
}
- (&webhook.OpenDataHubValidatingWebhook{
- Client: mgr.GetClient(),
- Decoder: admission.NewDecoder(mgr.GetScheme()),
- }).SetupWithManager(mgr)
-
- (&webhook.DSCDefaulter{}).SetupWithManager(mgr)
+ webhook.Init(mgr)
if err = (&dscictrl.DSCInitializationReconciler{
Client: mgr.GetClient(),
@@ -319,6 +338,10 @@ func main() { //nolint:funlen,maintidx
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
+ if err := initComponents(ctx, platform); err != nil {
+ setupLog.Error(err, "unable to init components")
+ os.Exit(1)
+ }
setupLog.Info("starting manager")
if err := mgr.Start(ctx); err != nil {
diff --git a/pkg/cluster/cluster_config.go b/pkg/cluster/cluster_config.go
index 402c9f28934..22a3b6b574e 100644
--- a/pkg/cluster/cluster_config.go
+++ b/pkg/cluster/cluster_config.go
@@ -8,6 +8,7 @@ import (
"strings"
"github.com/blang/semver/v4"
+ "github.com/go-logr/logr"
"github.com/operator-framework/api/pkg/lib/version"
ofapiv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
corev1 "k8s.io/api/core/v1"
@@ -15,12 +16,66 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk"
)
// +kubebuilder:rbac:groups="config.openshift.io",resources=ingresses,verbs=get
+type Platform string
+
+// Release includes information on operator version and platform
+// +kubebuilder:object:generate=true
+type Release struct {
+ Name Platform `json:"name,omitempty"`
+ Version version.OperatorVersion `json:"version,omitempty"`
+}
+
+var clusterConfig struct {
+ Namespace string
+ Release Release
+}
+
+// Init initializes cluster configuration variables on startup
+// init() won't work since it is needed to check the error.
+func Init(ctx context.Context, cli client.Client) error {
+ var err error
+ log := logf.FromContext(ctx)
+
+ clusterConfig.Namespace, err = getOperatorNamespace()
+ if err != nil {
+ log.Error(err, "unable to find operator namespace")
+ // not fatal, fallback to ""
+ }
+
+ clusterConfig.Release, err = getRelease(ctx, cli)
+ if err != nil {
+ return err
+ }
+
+ printClusterConfig(log)
+
+ return nil
+}
+
+func printClusterConfig(log logr.Logger) {
+ log.Info("Cluster config",
+ "Namespace", clusterConfig.Namespace,
+ "Release", clusterConfig.Release)
+}
+
+func GetOperatorNamespace() (string, error) {
+ if clusterConfig.Namespace == "" {
+ return "", errors.New("unable to find operator namespace")
+ }
+ return clusterConfig.Namespace, nil
+}
+
+func GetRelease() Release {
+ return clusterConfig.Release
+}
+
func GetDomain(ctx context.Context, c client.Client) (string, error) {
ingress := &unstructured.Unstructured{}
ingress.SetGroupVersionKind(gvk.OpenshiftIngress)
@@ -40,7 +95,7 @@ func GetDomain(ctx context.Context, c client.Client) (string, error) {
return domain, err
}
-func GetOperatorNamespace() (string, error) {
+func getOperatorNamespace() (string, error) {
operatorNS, exist := os.LookupEnv("OPERATOR_NAMESPACE")
if exist && operatorNS != "" {
return operatorNS, nil
@@ -82,8 +137,6 @@ func GetClusterServiceVersion(ctx context.Context, c client.Client, namespace st
gvk.ClusterServiceVersion.Kind)
}
-type Platform string
-
// detectSelfManaged detects if it is Self Managed Rhods or OpenDataHub.
func detectSelfManaged(ctx context.Context, cli client.Client) (Platform, error) {
variants := map[string]Platform{
@@ -115,25 +168,24 @@ func detectManagedRHODS(ctx context.Context, cli client.Client) (Platform, error
}
func getPlatform(ctx context.Context, cli client.Client) (Platform, error) {
- // First check if its addon installation to return 'ManagedRhods, nil'
- if platform, err := detectManagedRHODS(ctx, cli); err != nil {
- return Unknown, err
- } else if platform == ManagedRhods {
+ switch os.Getenv("ODH_PLATFORM_TYPE") {
+ case "OpenDataHub", "":
+ return OpenDataHub, nil
+ case "ManagedRHOAI":
return ManagedRhods, nil
+ case "SelfManagedRHOAI":
+ return SelfManagedRhods, nil
+ default: // fall back to detect platform if ODH_PLATFORM_TYPE env is not provided
+ if platform, err := detectManagedRHODS(ctx, cli); err != nil {
+ return Unknown, err
+ } else if platform == ManagedRhods {
+ return ManagedRhods, nil
+ }
+ return detectSelfManaged(ctx, cli)
}
-
- // check and return whether ODH or self-managed platform
- return detectSelfManaged(ctx, cli)
-}
-
-// Release includes information on operator version and platform
-// +kubebuilder:object:generate=true
-type Release struct {
- Name Platform `json:"name,omitempty"`
- Version version.OperatorVersion `json:"version,omitempty"`
}
-func GetRelease(ctx context.Context, cli client.Client) (Release, error) {
+func getRelease(ctx context.Context, cli client.Client) (Release, error) {
initRelease := Release{
// dummy version set to name "", version 0.0.0
Version: version.OperatorVersion{
@@ -151,6 +203,7 @@ func GetRelease(ctx context.Context, cli client.Client) (Release, error) {
if os.Getenv("CI") == "true" {
return initRelease, nil
}
+
// Set Version
// Get watchNamespace
operatorNamespace, err := GetOperatorNamespace()
diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go
index 149ed5f1dac..dc8a4cf97f6 100644
--- a/pkg/logger/logger.go
+++ b/pkg/logger/logger.go
@@ -1,6 +1,7 @@
package logger
import (
+ "flag"
"os"
"strings"
@@ -35,9 +36,23 @@ func LogWithLevel(logger logr.Logger, level string) logr.Logger {
return logger.V(verbosityLevel)
}
+func NewLoggerWithOptions(mode string, override *zap.Options) logr.Logger {
+ opts := newOptions(mode)
+ overrideOptions(opts, override)
+ return newLogger(opts)
+}
+
// in DSC component, to use different mode for logging, e.g. development, production
// when not set mode it falls to "default" which is used by startup main.go.
func NewLogger(mode string) logr.Logger {
+ return newLogger(newOptions(mode))
+}
+
+func newLogger(opts *zap.Options) logr.Logger {
+ return zap.New(zap.UseFlagOptions(opts))
+}
+
+func newOptions(mode string) *zap.Options {
var opts zap.Options
switch mode {
case "devel", "development": // the most logging verbosity
@@ -72,5 +87,34 @@ func NewLogger(mode string) logr.Logger {
DestWriter: os.Stdout,
}
}
- return zap.New(zap.UseFlagOptions(&opts))
+ return &opts
+}
+
+func overrideOptions(orig, override *zap.Options) {
+ // Development is boolean, cannot check for nil, so check if it was set
+ isDevelopmentSet := false
+ flag.Visit(func(f *flag.Flag) {
+ if f.Name == "zap-devel" {
+ isDevelopmentSet = true
+ }
+ })
+ if isDevelopmentSet {
+ orig.Development = override.Development
+ }
+
+ if override.StacktraceLevel != nil {
+ orig.StacktraceLevel = override.StacktraceLevel
+ }
+
+ if override.Level != nil {
+ orig.Level = override.Level
+ }
+
+ if override.DestWriter != nil {
+ orig.DestWriter = override.DestWriter
+ }
+
+ if override.EncoderConfigOptions != nil {
+ orig.EncoderConfigOptions = override.EncoderConfigOptions
+ }
}