From d022865e5ee4ed98b5197cf4bd9fabe1feacfc7d Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Tue, 8 Oct 2024 20:21:36 +0300 Subject: [PATCH 01/22] logger: add zap command line switches (#1280) Allow to tune preconfigured by --log-mode zap backend with standard zap command line switches from controller-runtime (zap-devel, zap-encoder, zap-log-level, zap-stacktrace-level, zap-time-encoding)[1]. This brings flexibility in logger setup for development environments first of all. The patch does not change default logger setup and does not change DSCI override functionality. [1] https://sdk.operatorframework.io/docs/building-operators/golang/references/logging Signed-off-by: Yauheni Kaliuta --- main.go | 6 +++++- pkg/logger/logger.go | 46 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 50 insertions(+), 2 deletions(-) diff --git a/main.go b/main.go index 62c31edec0b..7d33d59a997 100644 --- a/main.go +++ b/main.go @@ -51,6 +51,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/healthz" logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -140,9 +141,12 @@ func main() { //nolint:funlen,maintidx flag.StringVar(&operatorName, "operator-name", "opendatahub", "The name of the operator") flag.StringVar(&logmode, "log-mode", "", "Log mode ('', prod, devel), default to ''") + opts := zap.Options{} + opts.BindFlags(flag.CommandLine) + flag.Parse() - ctrl.SetLogger(logger.NewLogger(logmode)) + ctrl.SetLogger(logger.NewLoggerWithOptions(logmode, &opts)) // root context ctx := ctrl.SetupSignalHandler() diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 149ed5f1dac..dc8a4cf97f6 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -1,6 +1,7 @@ package logger import ( + "flag" "os" "strings" @@ -35,9 +36,23 @@ func LogWithLevel(logger logr.Logger, level string) logr.Logger { return logger.V(verbosityLevel) } +func NewLoggerWithOptions(mode string, override *zap.Options) logr.Logger { + opts := newOptions(mode) + overrideOptions(opts, override) + return newLogger(opts) +} + // in DSC component, to use different mode for logging, e.g. development, production // when not set mode it falls to "default" which is used by startup main.go. func NewLogger(mode string) logr.Logger { + return newLogger(newOptions(mode)) +} + +func newLogger(opts *zap.Options) logr.Logger { + return zap.New(zap.UseFlagOptions(opts)) +} + +func newOptions(mode string) *zap.Options { var opts zap.Options switch mode { case "devel", "development": // the most logging verbosity @@ -72,5 +87,34 @@ func NewLogger(mode string) logr.Logger { DestWriter: os.Stdout, } } - return zap.New(zap.UseFlagOptions(&opts)) + return &opts +} + +func overrideOptions(orig, override *zap.Options) { + // Development is boolean, cannot check for nil, so check if it was set + isDevelopmentSet := false + flag.Visit(func(f *flag.Flag) { + if f.Name == "zap-devel" { + isDevelopmentSet = true + } + }) + if isDevelopmentSet { + orig.Development = override.Development + } + + if override.StacktraceLevel != nil { + orig.StacktraceLevel = override.StacktraceLevel + } + + if override.Level != nil { + orig.Level = override.Level + } + + if override.DestWriter != nil { + orig.DestWriter = override.DestWriter + } + + if override.EncoderConfigOptions != nil { + orig.EncoderConfigOptions = override.EncoderConfigOptions + } } From 6134496546f38dc1338623fb3f9688cf1efb332d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20Sanz=20G=C3=B3miz?= <100594859+asanzgom@users.noreply.github.com> Date: Tue, 8 Oct 2024 20:44:04 +0200 Subject: [PATCH 02/22] Modify Unit Tests GitHub Actions workflow to get code coverage test reports (#1279) * Create codecov.yml * Added to run test coverage also on PRs * Removing trailing ] * Update .github/workflows/codecov.yml Co-authored-by: Wen Zhou * Removed go mod install dependency * Consolidated codecov workflow into unit tests workflow --------- Co-authored-by: Wen Zhou --- .github/workflows/unit-tests.yaml | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml index cff0d30b93b..b7f470a58d6 100644 --- a/.github/workflows/unit-tests.yaml +++ b/.github/workflows/unit-tests.yaml @@ -8,12 +8,21 @@ on: pull_request: jobs: unit-test: + name: Run tests and collect coverage runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - name: Setup Go - uses: actions/setup-go@v4 + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 with: go-version-file: go.mod + - name: Run Unit Tests run: make unit-test + + - name: Upload results to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} From e40c770cdb4fdedabe2e0235ac0a2d5e97aed0aa Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Wed, 9 Oct 2024 13:33:19 +0300 Subject: [PATCH 03/22] webhook: move initialization inside of the module (#1284) Add webhook.Init() function and hide webhook setup inside of the module. It will make it possible to replace Init with a NOP (no operation) with conditional compilation for no-webhook build. Signed-off-by: Yauheni Kaliuta --- controllers/webhook/webhook.go | 9 +++++++++ main.go | 8 +------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/controllers/webhook/webhook.go b/controllers/webhook/webhook.go index 9455cfd08ba..29739e2af00 100644 --- a/controllers/webhook/webhook.go +++ b/controllers/webhook/webhook.go @@ -46,6 +46,15 @@ type OpenDataHubValidatingWebhook struct { Decoder *admission.Decoder } +func Init(mgr ctrl.Manager) { + (&OpenDataHubValidatingWebhook{ + Client: mgr.GetClient(), + Decoder: admission.NewDecoder(mgr.GetScheme()), + }).SetupWithManager(mgr) + + (&DSCDefaulter{}).SetupWithManager(mgr) +} + func (w *OpenDataHubValidatingWebhook) SetupWithManager(mgr ctrl.Manager) { hookServer := mgr.GetWebhookServer() odhWebhook := &webhook.Admission{ diff --git a/main.go b/main.go index 7d33d59a997..778a829f5ee 100644 --- a/main.go +++ b/main.go @@ -55,7 +55,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -235,12 +234,7 @@ func main() { //nolint:funlen,maintidx os.Exit(1) } - (&webhook.OpenDataHubValidatingWebhook{ - Client: mgr.GetClient(), - Decoder: admission.NewDecoder(mgr.GetScheme()), - }).SetupWithManager(mgr) - - (&webhook.DSCDefaulter{}).SetupWithManager(mgr) + webhook.Init(mgr) if err = (&dscictrl.DSCInitializationReconciler{ Client: mgr.GetClient(), From 283d3507ed91e7abe593af3d6a78effc81ca3c2b Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Wed, 9 Oct 2024 14:25:09 +0200 Subject: [PATCH 04/22] feat: pass platform from env variable or fall back to use old logic (#1252) * feat: pass platform from env variables or fall back to use old logic - introduce new env var ODH_PLATFORM_TYPE, value set during build time - if value not match, fall back to detect managed then self-managed - introduce new env var OPERATOR_RELEASE_VERSION, value also set during build time - if value is empty, fall back to use old way from CSV to read version or use 0.0.0 - add support from makefile - use envstubst to replace version Signed-off-by: Wen Zhou * update: remove release version in the change Signed-off-by: Wen Zhou --------- Signed-off-by: Wen Zhou --- ...atahub-operator.clusterserviceversion.yaml | 2 ++ config/manager/manager.yaml | 2 ++ pkg/cluster/cluster_config.go | 21 ++++++++++++------- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml b/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml index 81b13c83f14..b7bdec6808b 100644 --- a/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml +++ b/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml @@ -1091,6 +1091,8 @@ spec: fieldPath: metadata.namespace - name: DEFAULT_MANIFESTS_PATH value: /opt/manifests + - name: ODH_PLATFORM_TYPE + value: OpenDataHub image: REPLACE_IMAGE:latest imagePullPolicy: Always livenessProbe: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 0aadbcc2e89..bff26ae5022 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -46,6 +46,8 @@ spec: fieldPath: metadata.namespace - name: DEFAULT_MANIFESTS_PATH value: /opt/manifests + - name: ODH_PLATFORM_TYPE + value: OpenDataHub args: - --leader-elect - --operator-name=opendatahub diff --git a/pkg/cluster/cluster_config.go b/pkg/cluster/cluster_config.go index 8a09e3450cc..22a3b6b574e 100644 --- a/pkg/cluster/cluster_config.go +++ b/pkg/cluster/cluster_config.go @@ -168,15 +168,21 @@ func detectManagedRHODS(ctx context.Context, cli client.Client) (Platform, error } func getPlatform(ctx context.Context, cli client.Client) (Platform, error) { - // First check if its addon installation to return 'ManagedRhods, nil' - if platform, err := detectManagedRHODS(ctx, cli); err != nil { - return Unknown, err - } else if platform == ManagedRhods { + switch os.Getenv("ODH_PLATFORM_TYPE") { + case "OpenDataHub", "": + return OpenDataHub, nil + case "ManagedRHOAI": return ManagedRhods, nil + case "SelfManagedRHOAI": + return SelfManagedRhods, nil + default: // fall back to detect platform if ODH_PLATFORM_TYPE env is not provided + if platform, err := detectManagedRHODS(ctx, cli); err != nil { + return Unknown, err + } else if platform == ManagedRhods { + return ManagedRhods, nil + } + return detectSelfManaged(ctx, cli) } - - // check and return whether ODH or self-managed platform - return detectSelfManaged(ctx, cli) } func getRelease(ctx context.Context, cli client.Client) (Release, error) { @@ -197,6 +203,7 @@ func getRelease(ctx context.Context, cli client.Client) (Release, error) { if os.Getenv("CI") == "true" { return initRelease, nil } + // Set Version // Get watchNamespace operatorNamespace, err := GetOperatorNamespace() From 59cade99e5cd1e4d6fd2e1273f71cf2f1597e076 Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Wed, 9 Oct 2024 17:53:17 +0200 Subject: [PATCH 05/22] fix: update release version in DSCI and DSC .status for upgrade case (#1287) - DSCI: if current version is not matching, update it - DSC: in both reconcile pass and fail case, update it Signed-off-by: Wen Zhou --- .../datasciencecluster_controller.go | 2 ++ .../dscinitialization_controller.go | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index 5e522e335a5..af00a344e73 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -254,6 +254,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R status.SetCompleteCondition(&saved.Status.Conditions, status.ReconcileCompletedWithComponentErrors, fmt.Sprintf("DataScienceCluster resource reconciled with component errors: %v", componentErrors)) saved.Status.Phase = status.PhaseReady + saved.Status.Release = currentOperatorRelease }) if err != nil { log.Error(err, "failed to update DataScienceCluster conditions with incompleted reconciliation") @@ -270,6 +271,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R instance, err = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { status.SetCompleteCondition(&saved.Status.Conditions, status.ReconcileCompleted, "DataScienceCluster resource reconciled successfully") saved.Status.Phase = status.PhaseReady + saved.Status.Release = currentOperatorRelease }) if err != nil { diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index 56033a9a470..37991131cb5 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -152,6 +152,20 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } } + // upgrade case to update release version in status + if !instance.Status.Release.Version.Equals(currentOperatorRelease.Version.Version) { + message := "Updating DSCInitialization status" + instance, err := status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dsciv1.DSCInitialization) { + saved.Status.Release = currentOperatorRelease + }) + if err != nil { + log.Error(err, "Failed to update release version for DSCInitialization resource.", "DSCInitialization", req.Namespace, "Request.Name", req.Name) + r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", + "%s for instance %s", message, instance.Name) + return reconcile.Result{}, err + } + } + // Check namespace is not exist, then create namespace := instance.Spec.ApplicationsNamespace err := r.createOdhNamespace(ctx, instance, namespace, platform) From db0fa50316d48658701ca429b062da082d699521 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 9 Oct 2024 17:35:00 +0000 Subject: [PATCH 06/22] Update version to 2.19.0 (#1291) Co-authored-by: VaishnaviHire <17230536+VaishnaviHire@users.noreply.github.com> --- Makefile | 2 +- .../opendatahub-operator.clusterserviceversion.yaml | 10 +++++----- .../opendatahub-operator.clusterserviceversion.yaml | 10 +++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index 06101fae8c8..9fdf869a51b 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 2.18.2 +VERSION ?= 2.19.0 # IMAGE_TAG_BASE defines the opendatahub.io namespace and part of the image name for remote images. # This variable is used to construct full image tags for bundle and catalog images. # diff --git a/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml b/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml index b7bdec6808b..1519a9ef8ca 100644 --- a/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml +++ b/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml @@ -102,13 +102,13 @@ metadata: capabilities: Full Lifecycle categories: AI/Machine Learning, Big Data certified: "False" - containerImage: quay.io/opendatahub/opendatahub-operator:v2.18.2 - createdAt: "2024-09-24T15:16:50Z" - olm.skipRange: '>=1.0.0 <2.18.2' + containerImage: quay.io/opendatahub/opendatahub-operator:v2.19.0 + createdAt: "2024-10-09T14:46:54Z" + olm.skipRange: '>=1.0.0 <2.19.0' operators.operatorframework.io/builder: operator-sdk-v1.31.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 repository: https://github.com/opendatahub-io/opendatahub-operator - name: opendatahub-operator.v2.18.2 + name: opendatahub-operator.v2.19.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -1178,7 +1178,7 @@ spec: selector: matchLabels: component: opendatahub-operator - version: 2.18.2 + version: 2.19.0 webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml b/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml index 41afa7a96b2..811770245b5 100644 --- a/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml @@ -6,11 +6,11 @@ metadata: capabilities: Full Lifecycle categories: AI/Machine Learning, Big Data certified: "False" - containerImage: quay.io/opendatahub/opendatahub-operator:v2.18.2 - createdAt: "2024-9-24T00:00:00Z" - olm.skipRange: '>=1.0.0 <2.18.2' + containerImage: quay.io/opendatahub/opendatahub-operator:v2.19.0 + createdAt: "2024-10-09T00:00:00Z" + olm.skipRange: '>=1.0.0 <2.19.0' repository: https://github.com/opendatahub-io/opendatahub-operator - name: opendatahub-operator.v2.18.2 + name: opendatahub-operator.v2.19.0 namespace: placeholder spec: apiservicedefinitions: {} @@ -106,4 +106,4 @@ spec: selector: matchLabels: component: opendatahub-operator - version: 2.18.2 + version: 2.19.0 From e8e266f543d7f944ecec92fe7cdf85f616471171 Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Thu, 10 Oct 2024 12:03:15 +0300 Subject: [PATCH 07/22] Makefile, webhook: add run-nowebhook (#1286) Make it possible to compile operator without webhook enabled (with -tags nowebhook). Create a stub webhook.Init() function for that. Add run-nowebhook target to run webhook locally. It requires `make install` to be executed on the cluster beforehand. Since it repeats `make run`, move the command to a variable. Also use variable RUN_ARGS for operator arguments. It makes it possible to override them from the command line. In VSCode it is possible to debug it with the following example launch.json configuration: ``` { "name": "Debug Operator No Webhook", "type": "go", "request": "launch", "mode": "debug", "program": "main.go", "buildFlags": [ "-tags", "nowebhook" ], "env": { "OPERATOR_NAMESPACE": "opendatahub-operator-system", "DEFAULT_MANIFESTS_PATH": "./opt/manifests" }, "args": [ "--log-mode=devel" ], "cwd": "${workspaceFolder}", } ``` Signed-off-by: Yauheni Kaliuta --- Makefile | 9 ++++++++- controllers/webhook/nowebhook.go | 7 +++++++ controllers/webhook/webhook.go | 2 ++ 3 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 controllers/webhook/nowebhook.go diff --git a/Makefile b/Makefile index 9fdf869a51b..7ba0b5047ed 100644 --- a/Makefile +++ b/Makefile @@ -180,9 +180,16 @@ api-docs: crd-ref-docs ## Creates API docs using https://github.com/elastic/crd- build: generate fmt vet ## Build manager binary. go build -o bin/manager main.go +RUN_ARGS = --log-mode=devel +GO_RUN_MAIN = OPERATOR_NAMESPACE=$(OPERATOR_NAMESPACE) DEFAULT_MANIFESTS_PATH=$(DEFAULT_MANIFESTS_PATH) go run $(GO_RUN_ARGS) ./main.go $(RUN_ARGS) .PHONY: run run: manifests generate fmt vet ## Run a controller from your host. - OPERATOR_NAMESPACE=$(OPERATOR_NAMESPACE) DEFAULT_MANIFESTS_PATH=${DEFAULT_MANIFESTS_PATH} go run ./main.go --log-mode=devel + $(GO_RUN_MAIN) + +.PHONY: run-nowebhook +run-nowebhook: GO_RUN_ARGS += -tags nowebhook +run-nowebhook: manifests generate fmt vet ## Run a controller from your host without webhook enabled + $(GO_RUN_MAIN) .PHONY: image-build image-build: # unit-test ## Build image with the manager. diff --git a/controllers/webhook/nowebhook.go b/controllers/webhook/nowebhook.go new file mode 100644 index 00000000000..dc8eefe7533 --- /dev/null +++ b/controllers/webhook/nowebhook.go @@ -0,0 +1,7 @@ +//go:build nowebhook + +package webhook + +import ctrl "sigs.k8s.io/controller-runtime" + +func Init(mgr ctrl.Manager) {} diff --git a/controllers/webhook/webhook.go b/controllers/webhook/webhook.go index 29739e2af00..5a539667b91 100644 --- a/controllers/webhook/webhook.go +++ b/controllers/webhook/webhook.go @@ -1,3 +1,5 @@ +//go:build !nowebhook + /* Copyright 2023. From fa63b4db12ad52166c316c861afbf87ab271adfb Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Fri, 11 Oct 2024 19:25:30 +0200 Subject: [PATCH 08/22] update: use namespace dyniamically from operator env than hardcode value (#1298) - thses are only needed when it is downstream speicific cases Signed-off-by: Wen Zhou --- controllers/dscinitialization/utils.go | 8 +++++++- main.go | 6 +++++- pkg/cluster/cluster_config.go | 6 +++++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/controllers/dscinitialization/utils.go b/controllers/dscinitialization/utils.go index b2990c68ef3..e2e0cd10b22 100644 --- a/controllers/dscinitialization/utils.go +++ b/controllers/dscinitialization/utils.go @@ -195,8 +195,14 @@ func (r *DSCInitializationReconciler) createDefaultRoleBinding(ctx context.Conte func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context.Context, name string, dscInit *dsciv1.DSCInitialization, platform cluster.Platform) error { log := r.Log if platform == cluster.ManagedRhods || platform == cluster.SelfManagedRhods { + // Get operator namepsace + operatorNs, err := cluster.GetOperatorNamespace() + if err != nil { + log.Error(err, "error getting operator namespace for networkplicy creation") + return err + } // Deploy networkpolicy for operator namespace - err := deploy.DeployManifestsFromPath(ctx, r.Client, dscInit, networkpolicyPath+"/operator", "redhat-ods-operator", "networkpolicy", true) + err = deploy.DeployManifestsFromPath(ctx, r.Client, dscInit, networkpolicyPath+"/operator", operatorNs, "networkpolicy", true) if err != nil { log.Error(err, "error to set networkpolicy in operator namespace", "path", networkpolicyPath) return err diff --git a/main.go b/main.go index 778a829f5ee..c7d38cfb1c3 100644 --- a/main.go +++ b/main.go @@ -359,7 +359,11 @@ func createSecretCacheConfig(platform cluster.Platform) map[string]cache.Config case cluster.ManagedRhods: namespaceConfigs["redhat-ods-monitoring"] = cache.Config{} namespaceConfigs["redhat-ods-applications"] = cache.Config{} - namespaceConfigs["redhat-ods-operator"] = cache.Config{} + operatorNs, err := cluster.GetOperatorNamespace() + if err != nil { + operatorNs = "redhat-ods-operator" // fall back case + } + namespaceConfigs[operatorNs] = cache.Config{} case cluster.SelfManagedRhods: namespaceConfigs["redhat-ods-applications"] = cache.Config{} default: diff --git a/pkg/cluster/cluster_config.go b/pkg/cluster/cluster_config.go index 22a3b6b574e..442878baf2f 100644 --- a/pkg/cluster/cluster_config.go +++ b/pkg/cluster/cluster_config.go @@ -160,7 +160,11 @@ func detectSelfManaged(ctx context.Context, cli client.Client) (Platform, error) // detectManagedRHODS checks if catsrc CR add-on exists ManagedRhods. func detectManagedRHODS(ctx context.Context, cli client.Client) (Platform, error) { catalogSource := &ofapiv1alpha1.CatalogSource{} - err := cli.Get(ctx, client.ObjectKey{Name: "addon-managed-odh-catalog", Namespace: "redhat-ods-operator"}, catalogSource) + operatorNs, err := GetOperatorNamespace() + if err != nil { + operatorNs = "redhat-ods-operator" + } + err = cli.Get(ctx, client.ObjectKey{Name: "addon-managed-odh-catalog", Namespace: operatorNs}, catalogSource) if err != nil { return Unknown, client.IgnoreNotFound(err) } From 2eed721a7500971c1d498ed32d46cce6ad5f1e88 Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Mon, 14 Oct 2024 16:57:07 +0300 Subject: [PATCH 09/22] webhook: switch to contextual logger (#1295) Use k8s contextual logger instead of own. Comparing to other parts of the operator webhook is a bit special since it serves own endpoints and has context independent from controllers. Include more info (operation), just to get more details. Do not add kind since it's clear from "resource" field. Since controller-framework adds "admission" to the name, use own LogConstructor with own base logger for the framework's DefaultLogConstructor. Add name locally to distinguish from framework's messages. Add Name field to the structures to avoid copying string literal for both WithName() and WithValues(). The output changes and it looks like ``` {"level":"info","ts":"2024-10-11T05:17:20Z","logger":"ValidatingWebhook","msg":"Validation request","object":{"name":"default-dsci"},"namespace":"","name":"default-dsci","resource":{"group":"dscinitialization.opendatahub.io","version":"v1","resource":"dscinitializations"},"user":"kube:admin","requestID":"e5bf3768-6faa-4e14-9004-e54ee84ad8b7","webhook":"ValidatingWebhook","operation":"CREATE"} ``` or for the defaulter: ``` {"level":"info","ts":"2024-10-11T04:50:48Z","logger":"DefaultingWebhook","msg":"Defaulting DSC","object":{"name":"default-dsc"},"namespace":"","name":"default-dsc","resource":{"group":"datasciencecluster.opendatahub.io","version":"v1","resource":"datascienceclusters"},"user":"kube:admin","requestID":"c9213ff3-80ee-40c0-9f15-12188dece68e","webhook":"DefaultingWebhook"} ``` (the messages are not from the current codebase, was added for demo only) Signed-off-by: Yauheni Kaliuta --- controllers/webhook/webhook.go | 36 +++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/controllers/webhook/webhook.go b/controllers/webhook/webhook.go index 5a539667b91..7dbdf81b845 100644 --- a/controllers/webhook/webhook.go +++ b/controllers/webhook/webhook.go @@ -23,6 +23,7 @@ import ( "fmt" "net/http" + "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" admissionv1 "k8s.io/api/admission/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -30,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -38,29 +40,44 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" ) -var log = ctrl.Log.WithName("odh-controller-webhook") - //+kubebuilder:webhook:path=/validate-opendatahub-io-v1,mutating=false,failurePolicy=fail,sideEffects=None,groups=datasciencecluster.opendatahub.io;dscinitialization.opendatahub.io,resources=datascienceclusters;dscinitializations,verbs=create;delete,versions=v1,name=operator.opendatahub.io,admissionReviewVersions=v1 //nolint:lll +// TODO: Get rid of platform in name, rename to ValidatingWebhook. type OpenDataHubValidatingWebhook struct { Client client.Client Decoder *admission.Decoder + Name string } func Init(mgr ctrl.Manager) { (&OpenDataHubValidatingWebhook{ Client: mgr.GetClient(), Decoder: admission.NewDecoder(mgr.GetScheme()), + Name: "ValidatingWebhook", + }).SetupWithManager(mgr) + + (&DSCDefaulter{ + Name: "DefaultingWebhook", }).SetupWithManager(mgr) +} - (&DSCDefaulter{}).SetupWithManager(mgr) +// newLogConstructor creates a new logger constructor for a webhook. +// It is based on the root controller-runtime logger witch is set in main.go +// The purpose of it is to remove "admission" from the log name. +func newLogConstructor(name string) func(logr.Logger, *admission.Request) logr.Logger { + return func(_ logr.Logger, req *admission.Request) logr.Logger { + base := ctrl.Log + l := admission.DefaultLogConstructor(base, req) + return l.WithValues("webhook", name) + } } func (w *OpenDataHubValidatingWebhook) SetupWithManager(mgr ctrl.Manager) { hookServer := mgr.GetWebhookServer() odhWebhook := &webhook.Admission{ - Handler: w, + Handler: w, + LogConstructor: newLogConstructor(w.Name), } hookServer.Register("/validate-opendatahub-io-v1", odhWebhook) } @@ -90,6 +107,8 @@ func denyCountGtZero(ctx context.Context, cli client.Client, gvk schema.GroupVer } func (w *OpenDataHubValidatingWebhook) checkDupCreation(ctx context.Context, req admission.Request) admission.Response { + log := logf.FromContext(ctx) + switch req.Kind.Kind { case "DataScienceCluster", "DSCInitialization": default: @@ -119,6 +138,9 @@ func (w *OpenDataHubValidatingWebhook) checkDeletion(ctx context.Context, req ad } func (w *OpenDataHubValidatingWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { + log := logf.FromContext(ctx).WithName(w.Name).WithValues("operation", req.Operation) + ctx = logf.IntoContext(ctx, log) + var resp admission.Response resp.Allowed = true // initialize Allowed to be true in case Operation falls into "default" case @@ -141,19 +163,23 @@ func (w *OpenDataHubValidatingWebhook) Handle(ctx context.Context, req admission //+kubebuilder:webhook:path=/mutate-opendatahub-io-v1,mutating=true,failurePolicy=fail,sideEffects=None,groups=datasciencecluster.opendatahub.io,resources=datascienceclusters,verbs=create;update,versions=v1,name=mutate.operator.opendatahub.io,admissionReviewVersions=v1 //nolint:lll -type DSCDefaulter struct{} +type DSCDefaulter struct { + Name string +} // just assert that DSCDefaulter implements webhook.CustomDefaulter. var _ webhook.CustomDefaulter = &DSCDefaulter{} func (m *DSCDefaulter) SetupWithManager(mgr ctrl.Manager) { mutateWebhook := admission.WithCustomDefaulter(mgr.GetScheme(), &dscv1.DataScienceCluster{}, m) + mutateWebhook.LogConstructor = newLogConstructor(m.Name) mgr.GetWebhookServer().Register("/mutate-opendatahub-io-v1", mutateWebhook) } // Implement admission.CustomDefaulter interface. // It currently only sets defaults for modelregiestry in datascienceclusters. func (m *DSCDefaulter) Default(_ context.Context, obj runtime.Object) error { + // TODO: add debug logging, log := logf.FromContext(ctx).WithName(m.Name) dsc, isDSC := obj.(*dscv1.DataScienceCluster) if !isDSC { return fmt.Errorf("expected DataScienceCluster but got a different type: %T", obj) From 2708e540ec42b01f7dffc0a99cbd2958ac67de70 Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Mon, 14 Oct 2024 17:14:50 +0200 Subject: [PATCH 10/22] chore: use client.OjectKeyFromObject in client.Get() (#1301) Signed-off-by: Wen Zhou --- controllers/dscinitialization/monitoring.go | 2 +- controllers/dscinitialization/utils.go | 22 +++++-------------- pkg/cluster/resources.go | 10 +++------ pkg/cluster/roles.go | 4 ++-- pkg/trustedcabundle/trustedcabundle.go | 13 +++++------ tests/e2e/deletion_test.go | 6 ++--- tests/e2e/helper_test.go | 12 +++++----- .../integration/features/cleanup_int_test.go | 10 ++++----- .../features/servicemesh_feature_test.go | 4 ++-- 9 files changed, 32 insertions(+), 51 deletions(-) diff --git a/controllers/dscinitialization/monitoring.go b/controllers/dscinitialization/monitoring.go index cc5b7112175..96537c76241 100644 --- a/controllers/dscinitialization/monitoring.go +++ b/controllers/dscinitialization/monitoring.go @@ -418,7 +418,7 @@ func createMonitoringProxySecret(ctx context.Context, cli client.Client, name st } foundProxySecret := &corev1.Secret{} - err = cli.Get(ctx, client.ObjectKey{Name: name, Namespace: dsciInit.Spec.Monitoring.Namespace}, foundProxySecret) + err = cli.Get(ctx, client.ObjectKeyFromObject(desiredProxySecret), foundProxySecret) if err != nil { if k8serr.IsNotFound(err) { // Set Controller reference diff --git a/controllers/dscinitialization/utils.go b/controllers/dscinitialization/utils.go index e2e0cd10b22..be4d6ff071b 100644 --- a/controllers/dscinitialization/utils.go +++ b/controllers/dscinitialization/utils.go @@ -51,7 +51,7 @@ func (r *DSCInitializationReconciler) createOdhNamespace(ctx context.Context, ds // Create Application Namespace if it doesn't exist foundNamespace := &corev1.Namespace{} - err := r.Get(ctx, client.ObjectKey{Name: name}, foundNamespace) + err := r.Get(ctx, client.ObjectKeyFromObject(desiredNamespace), foundNamespace) if err != nil { if k8serr.IsNotFound(err) { log.Info("Creating namespace", "name", name) @@ -169,10 +169,7 @@ func (r *DSCInitializationReconciler) createDefaultRoleBinding(ctx context.Conte // Create RoleBinding if doesn't exists foundRoleBinding := &rbacv1.RoleBinding{} - err := r.Client.Get(ctx, client.ObjectKey{ - Name: name, - Namespace: name, - }, foundRoleBinding) + err := r.Client.Get(ctx, client.ObjectKeyFromObject(desiredRoleBinding), foundRoleBinding) if err != nil { if k8serr.IsNotFound(err) { // Set Controller reference @@ -292,10 +289,7 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. // Create NetworkPolicy if it doesn't exist foundNetworkPolicy := &networkingv1.NetworkPolicy{} justCreated := false - err := r.Client.Get(ctx, client.ObjectKey{ - Name: name, - Namespace: name, - }, foundNetworkPolicy) + err := r.Client.Get(ctx, client.ObjectKeyFromObject(desiredNetworkPolicy), foundNetworkPolicy) if err != nil { if k8serr.IsNotFound(err) { // Set Controller reference @@ -397,10 +391,7 @@ func (r *DSCInitializationReconciler) createOdhCommonConfigMap(ctx context.Conte // Create Configmap if doesn't exists foundConfigMap := &corev1.ConfigMap{} - err := r.Client.Get(ctx, client.ObjectKey{ - Name: name, - Namespace: name, - }, foundConfigMap) + err := r.Client.Get(ctx, client.ObjectKeyFromObject(desiredConfigMap), foundConfigMap) if err != nil { if k8serr.IsNotFound(err) { // Set Controller reference @@ -430,10 +421,7 @@ func (r *DSCInitializationReconciler) createUserGroup(ctx context.Context, dscIn // Otherwise is errors with "error": "Group.user.openshift.io \"odh-admins\" is invalid: users: Invalid value: \"null\": users in body must be of type array: \"null\""} Users: []string{}, } - err := r.Client.Get(ctx, client.ObjectKey{ - Name: userGroup.Name, - Namespace: dscInit.Spec.ApplicationsNamespace, - }, userGroup) + err := r.Client.Get(ctx, client.ObjectKeyFromObject(userGroup), userGroup) if err != nil { if k8serr.IsNotFound(err) { err = r.Client.Create(ctx, userGroup) diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 5e41eb25bc2..b1b0f84cdb5 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -72,7 +72,7 @@ func CreateSecret(ctx context.Context, cli client.Client, name, namespace string } foundSecret := &corev1.Secret{} - err := cli.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, foundSecret) + err := cli.Get(ctx, client.ObjectKeyFromObject(desiredSecret), foundSecret) if err != nil { if k8serr.IsNotFound(err) { err = cli.Create(ctx, desiredSecret) @@ -100,11 +100,7 @@ func CreateOrUpdateConfigMap(ctx context.Context, c client.Client, desiredCfgMap } existingCfgMap := &corev1.ConfigMap{} - err := c.Get(ctx, client.ObjectKey{ - Name: desiredCfgMap.Name, - Namespace: desiredCfgMap.Namespace, - }, existingCfgMap) - + err := c.Get(ctx, client.ObjectKeyFromObject(desiredCfgMap), existingCfgMap) if k8serr.IsNotFound(err) { return c.Create(ctx, desiredCfgMap) } else if err != nil { @@ -144,7 +140,7 @@ func CreateNamespace(ctx context.Context, cli client.Client, namespace string, m } foundNamespace := &corev1.Namespace{} - if getErr := cli.Get(ctx, client.ObjectKey{Name: namespace}, foundNamespace); client.IgnoreNotFound(getErr) != nil { + if getErr := cli.Get(ctx, client.ObjectKeyFromObject(desiredNamespace), foundNamespace); client.IgnoreNotFound(getErr) != nil { return nil, getErr } diff --git a/pkg/cluster/roles.go b/pkg/cluster/roles.go index c989915aefe..96ccbae0eb4 100644 --- a/pkg/cluster/roles.go +++ b/pkg/cluster/roles.go @@ -23,7 +23,7 @@ func CreateOrUpdateClusterRole(ctx context.Context, cli client.Client, name stri } foundClusterRole := &rbacv1.ClusterRole{} - err := cli.Get(ctx, client.ObjectKey{Name: desiredClusterRole.GetName()}, foundClusterRole) + err := cli.Get(ctx, client.ObjectKeyFromObject(desiredClusterRole), foundClusterRole) if k8serr.IsNotFound(err) { return desiredClusterRole, cli.Create(ctx, desiredClusterRole) } @@ -63,7 +63,7 @@ func CreateOrUpdateClusterRoleBinding(ctx context.Context, cli client.Client, na } foundClusterRoleBinding := &rbacv1.ClusterRoleBinding{} - err := cli.Get(ctx, client.ObjectKey{Name: desiredClusterRoleBinding.GetName()}, foundClusterRoleBinding) + err := cli.Get(ctx, client.ObjectKeyFromObject(desiredClusterRoleBinding), foundClusterRoleBinding) if k8serr.IsNotFound(err) { return desiredClusterRoleBinding, cli.Create(ctx, desiredClusterRoleBinding) } diff --git a/pkg/trustedcabundle/trustedcabundle.go b/pkg/trustedcabundle/trustedcabundle.go index 7e05256f34b..5be58c77283 100644 --- a/pkg/trustedcabundle/trustedcabundle.go +++ b/pkg/trustedcabundle/trustedcabundle.go @@ -71,10 +71,7 @@ func CreateOdhTrustedCABundleConfigMap(ctx context.Context, cli client.Client, n // Create Configmap if doesn't exist foundConfigMap := &corev1.ConfigMap{} - if err := cli.Get(ctx, client.ObjectKey{ - Name: CAConfigMapName, - Namespace: namespace, - }, foundConfigMap); err != nil { + if err := cli.Get(ctx, client.ObjectKeyFromObject(desiredConfigMap), foundConfigMap); err != nil { if k8serr.IsNotFound(err) { err = cli.Create(ctx, desiredConfigMap) if err != nil && !k8serr.IsAlreadyExists(err) { @@ -105,12 +102,12 @@ func DeleteOdhTrustedCABundleConfigMap(ctx context.Context, cli client.Client, n return cli.Delete(ctx, foundConfigMap) } -// IsTrustedCABundleUpdated check if data in CM "odh-trusted-ca-bundle" from applciation namespace matches DSCI's TrustedCABundle.CustomCABundle +// IsTrustedCABundleUpdated check if data in CM "odh-trusted-ca-bundle" from application namespace matches DSCI's TrustedCABundle.CustomCABundle // return false when these two are matching => skip update // return true when not match => need upate. func IsTrustedCABundleUpdated(ctx context.Context, cli client.Client, dscInit *dsciv1.DSCInitialization) (bool, error) { - userNamespace := &corev1.Namespace{} - if err := cli.Get(ctx, client.ObjectKey{Name: dscInit.Spec.ApplicationsNamespace}, userNamespace); err != nil { + appNamespace := &corev1.Namespace{} + if err := cli.Get(ctx, client.ObjectKey{Name: dscInit.Spec.ApplicationsNamespace}, appNamespace); err != nil { if k8serr.IsNotFound(err) { // if namespace is not found, return true. This is to ensure we reconcile, and check for other namespaces. return true, nil @@ -118,7 +115,7 @@ func IsTrustedCABundleUpdated(ctx context.Context, cli client.Client, dscInit *d return false, err } - if !ShouldInjectTrustedBundle(userNamespace) { + if !ShouldInjectTrustedBundle(appNamespace) { return false, nil } diff --git a/tests/e2e/deletion_test.go b/tests/e2e/deletion_test.go index 654ec01d62a..bb7caa7ada2 100644 --- a/tests/e2e/deletion_test.go +++ b/tests/e2e/deletion_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/errors" + k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" @@ -57,7 +57,7 @@ func (tc *testContext) testDeletionExistDSC() error { if dscerr != nil { return fmt.Errorf("error deleting DSC instance %s: %w", expectedDSC.Name, dscerr) } - } else if !errors.IsNotFound(err) { + } else if !k8serr.IsNotFound(err) { if err != nil { return fmt.Errorf("could not find DSC instance to delete: %w", err) } @@ -120,7 +120,7 @@ func (tc *testContext) testDeletionExistDSCI() error { if dscierr != nil { return fmt.Errorf("error deleting DSCI instance %s: %w", expectedDSCI.Name, dscierr) } - } else if !errors.IsNotFound(err) { + } else if !k8serr.IsNotFound(err) { if err != nil { return fmt.Errorf("could not find DSCI instance to delete :%w", err) } diff --git a/tests/e2e/helper_test.go b/tests/e2e/helper_test.go index 3de13ee0b90..62d9e92b24a 100644 --- a/tests/e2e/helper_test.go +++ b/tests/e2e/helper_test.go @@ -14,7 +14,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/api/errors" + k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -59,7 +59,7 @@ func (tc *testContext) waitForOperatorDeployment(name string, replicas int32) er err := wait.PollUntilContextTimeout(tc.ctx, generalRetryInterval, operatorReadyTimeout, false, func(ctx context.Context) (bool, error) { controllerDeployment, err := tc.kubeClient.AppsV1().Deployments(tc.operatorNamespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return false, nil } log.Printf("Failed to get %s controller deployment", name) @@ -207,7 +207,7 @@ func (tc *testContext) validateCRD(crdName string) error { err := wait.PollUntilContextTimeout(tc.ctx, generalRetryInterval, crdReadyTimeout, false, func(ctx context.Context) (bool, error) { err := tc.customClient.Get(ctx, obj, crd) if err != nil { - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return false, nil } log.Printf("Failed to get CRD %s", crdName) @@ -256,7 +256,7 @@ func getCSV(ctx context.Context, cli client.Client, name string, namespace strin } } - return nil, errors.NewNotFound(schema.GroupResource{}, name) + return nil, k8serr.NewNotFound(schema.GroupResource{}, name) } // Use existing or create a new one. @@ -279,7 +279,7 @@ func getSubscription(tc *testContext, name string, ns string) (*ofapi.Subscripti } err := tc.customClient.Get(tc.ctx, key, sub) - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return createSubscription(name, ns) } if err != nil { @@ -293,7 +293,7 @@ func waitCSV(tc *testContext, name string, ns string) error { interval := generalRetryInterval isReady := func(ctx context.Context) (bool, error) { csv, err := getCSV(ctx, tc.customClient, name, ns) - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return false, nil } if err != nil { diff --git a/tests/integration/features/cleanup_int_test.go b/tests/integration/features/cleanup_int_test.go index 8d0634cfc0c..070346ccbb3 100644 --- a/tests/integration/features/cleanup_int_test.go +++ b/tests/integration/features/cleanup_int_test.go @@ -4,7 +4,7 @@ import ( "context" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -81,7 +81,7 @@ var _ = Describe("feature cleanup", func() { WithContext(ctx). WithTimeout(fixtures.Timeout). WithPolling(fixtures.Interval). - Should(WithTransform(errors.IsNotFound, BeTrue())) + Should(WithTransform(k8serr.IsNotFound, BeTrue())) }) }) @@ -154,11 +154,11 @@ var _ = Describe("feature cleanup", func() { WithContext(ctx). WithTimeout(fixtures.Timeout). WithPolling(fixtures.Interval). - Should(WithTransform(errors.IsNotFound, BeTrue())) + Should(WithTransform(k8serr.IsNotFound, BeTrue())) Consistently(func() error { _, err := fixtures.GetFeatureTracker(ctx, envTestClient, namespace, featureName) - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return nil } return err @@ -213,7 +213,7 @@ func createdSecretHasOwnerReferenceToOwningFeature(namespace, featureName string func namespaceExists(ctx context.Context, cli client.Client, f *feature.Feature) (bool, error) { namespace, err := fixtures.GetNamespace(ctx, cli, "conditional-ns") - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return false, nil } // ensuring it fails if namespace is still deleting diff --git a/tests/integration/features/servicemesh_feature_test.go b/tests/integration/features/servicemesh_feature_test.go index 16a7b419063..0ced2db9745 100644 --- a/tests/integration/features/servicemesh_feature_test.go +++ b/tests/integration/features/servicemesh_feature_test.go @@ -5,7 +5,7 @@ import ( "path" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/api/errors" + k8serr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/yaml" "sigs.k8s.io/controller-runtime/pkg/client" @@ -251,7 +251,7 @@ var _ = Describe("Service Mesh setup", func() { Expect(found).To(BeTrue()) _, err = fixtures.GetNamespace(ctx, envTestClient, serviceMeshSpec.Auth.Namespace) - Expect(errors.IsNotFound(err)).To(BeTrue()) + Expect(k8serr.IsNotFound(err)).To(BeTrue()) return extensionProviders From f954f0be09621b53dad9b8b3a4a27639ed2d23cc Mon Sep 17 00:00:00 2001 From: Ajay Jaganathan <36824134+AjayJagan@users.noreply.github.com> Date: Tue, 15 Oct 2024 12:08:37 +0530 Subject: [PATCH 11/22] (fix): Change on trigger event to `pull_request_target` in the "Check config and readme updates / Ensure generated files are included (pull_request)" action to fix "Resource not accessible by integration" error while running the action (#1296) Signed-off-by: AJAY JAGANATHAN --- .github/workflows/check-file-updates.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/check-file-updates.yaml b/.github/workflows/check-file-updates.yaml index b6b7f0f1fee..bf142941ecb 100644 --- a/.github/workflows/check-file-updates.yaml +++ b/.github/workflows/check-file-updates.yaml @@ -1,6 +1,6 @@ name: Check config and readme updates on: - pull_request: + pull_request_target: jobs: file-updates: permissions: @@ -9,6 +9,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + with: + ref: ${{github.event.pull_request.head.ref}} + repository: ${{github.event.pull_request.head.repo.full_name}} - name: Generate files id: generate-files run: | From a5388adf5bd3a6387973edaf815fa81108cef975 Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Tue, 15 Oct 2024 11:12:42 +0200 Subject: [PATCH 12/22] feat: Operator disable create usergroup if detect user enabled external auth (#1297) * feat: Operator disable create usergroup if detect users enabled external auth - use internal Authentication CR Type "" or IntegratedOAuth to indicate if Operator should create usergroup or not CRD has validation to only allow "IntegratedOAuth", "", "None" or "OIDC" - only grant "get, watch , list" as least permission - remove duplicated rbac for "ingress", has been defined in other places - add object into cache - add CRD into unit-test - add unit-test: since we dont have auth CR, it should not create usergroup Signed-off-by: Wen Zhou * update: review comments Signed-off-by: Wen Zhou * update: review comments use const Signed-off-by: Wen Zhou --------- Signed-off-by: Wen Zhou --- ...atahub-operator.clusterserviceversion.yaml | 1 + .../config.openshift.io_authentications.yaml | 175 ++++++++++++++++++ config/rbac/role.yaml | 1 + .../dscinitialization_controller.go | 29 ++- .../dscinitialization_test.go | 14 +- controllers/dscinitialization/suite_test.go | 2 + main.go | 5 + pkg/cluster/cluster_config.go | 21 ++- pkg/cluster/const.go | 3 + 9 files changed, 241 insertions(+), 10 deletions(-) create mode 100644 config/crd/external/config.openshift.io_authentications.yaml diff --git a/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml b/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml index 1519a9ef8ca..7ec954e1a03 100644 --- a/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml +++ b/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml @@ -357,6 +357,7 @@ spec: - apiGroups: - config.openshift.io resources: + - authentications - clusterversions verbs: - get diff --git a/config/crd/external/config.openshift.io_authentications.yaml b/config/crd/external/config.openshift.io_authentications.yaml new file mode 100644 index 00000000000..86ad306c7fd --- /dev/null +++ b/config/crd/external/config.openshift.io_authentications.yaml @@ -0,0 +1,175 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: authentications.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Authentication + listKind: AuthenticationList + plural: authentications + singular: authentication + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: |- + Authentication specifies cluster-wide settings for authentication (like OAuth and + webhook token authenticators). The canonical name of an instance is `cluster`. + + Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + oauthMetadata: + description: |- + oauthMetadata contains the discovery endpoint data for OAuth 2.0 + Authorization Server Metadata for an external OAuth server. + This discovery document can be viewed from its served location: + oc get --raw '/.well-known/oauth-authorization-server' + For further details, see the IETF Draft: + https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + If oauthMetadata.name is non-empty, this value has precedence + over any metadata reference stored in status. + The key "oauthMetadata" is used to locate the data. + If specified and the config map or expected key is not found, no metadata is served. + If the specified metadata is not valid, no metadata is served. + The namespace for this config map is openshift-config. + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + required: + - name + type: object + serviceAccountIssuer: + description: |- + serviceAccountIssuer is the identifier of the bound service account token + issuer. + The default is https://kubernetes.default.svc + WARNING: Updating this field will not result in immediate invalidation of all bound tokens with the + previous issuer value. Instead, the tokens issued by previous service account issuer will continue to + be trusted for a time period chosen by the platform (currently set to 24h). + This time period is subject to change over time. + This allows internal components to transition to use new service account issuer without service distruption. + type: string + type: + description: |- + type identifies the cluster managed, user facing authentication mode in use. + Specifically, it manages the component that responds to login attempts. + The default is IntegratedOAuth. + type: string + webhookTokenAuthenticator: + description: |- + webhookTokenAuthenticator configures a remote token reviewer. + These remote authentication webhooks can be used to verify bearer tokens + via the tokenreviews.authentication.k8s.io REST API. This is required to + honor bearer tokens that are provisioned by an external authentication service. + properties: + kubeConfig: + description: |- + kubeConfig references a secret that contains kube config file data which + describes how to access the remote webhook service. + The namespace for the referenced secret is openshift-config. + + For further details, see: + + https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + + The key "kubeConfig" is used to locate the data. + If the secret or expected key is not found, the webhook is not honored. + If the specified kube config data is not valid, the webhook is not honored. + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + required: + - name + type: object + required: + - kubeConfig + type: object + webhookTokenAuthenticators: + description: webhookTokenAuthenticators is DEPRECATED, setting it + has no effect. + items: + description: |- + deprecatedWebhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator. + It's the same as WebhookTokenAuthenticator but it's missing the 'required' validation on KubeConfig field. + properties: + kubeConfig: + description: |- + kubeConfig contains kube config file data which describes how to access the remote webhook service. + For further details, see: + https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + The key "kubeConfig" is used to locate the data. + If the secret or expected key is not found, the webhook is not honored. + If the specified kube config data is not valid, the webhook is not honored. + The namespace for this secret is determined by the point of use. + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + required: + - name + type: object + type: object + type: array + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + integratedOAuthMetadata: + description: |- + integratedOAuthMetadata contains the discovery endpoint data for OAuth 2.0 + Authorization Server Metadata for the in-cluster integrated OAuth server. + This discovery document can be viewed from its served location: + oc get --raw '/.well-known/oauth-authorization-server' + For further details, see the IETF Draft: + https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + This contains the observed value based on cluster state. + An explicitly set value in spec.oauthMetadata has precedence over this field. + This field has no meaning if authentication spec.type is not set to IntegratedOAuth. + The key "oauthMetadata" is used to locate the data. + If the config map or expected key is not found, no metadata is served. + If the specified metadata is not valid, no metadata is served. + The namespace for this config map is openshift-config-managed. + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + required: + - name + type: object + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index d81b97e1555..01ab8982756 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -171,6 +171,7 @@ rules: - apiGroups: - config.openshift.io resources: + - authentications - clusterversions verbs: - get diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index 37991131cb5..845418ac843 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -29,6 +29,7 @@ import ( corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -73,6 +74,7 @@ type DSCInitializationReconciler struct { // +kubebuilder:rbac:groups="dscinitialization.opendatahub.io",resources=dscinitializations,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="features.opendatahub.io",resources=featuretrackers,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="features.opendatahub.io",resources=featuretrackers/status,verbs=get;update;patch;delete +// +kubebuilder:rbac:groups="config.openshift.io",resources=authentications,verbs=get;watch;list // Reconcile contains controller logic specific to DSCInitialization instance updates. func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { //nolint:funlen,gocyclo,maintidx @@ -212,11 +214,21 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil default: + createUsergroup, err := cluster.IsDefaultAuthMethod(ctx, r.Client) + if err != nil && !k8serr.IsNotFound(err) { // only keep reconcile if real error but not missing CRD or missing CR + return ctrl.Result{}, err + } + switch platform { case cluster.SelfManagedRhods: - err := r.createUserGroup(ctx, instance, "rhods-admins") - if err != nil { - return reconcile.Result{}, err + // Check if user opted for disabling creating user groups + if !createUsergroup { + log.Info("DSCI disabled usergroup creation") + } else { + err := r.createUserGroup(ctx, instance, "rhods-admins") + if err != nil { + return reconcile.Result{}, err + } } if instance.Spec.Monitoring.ManagementState == operatorv1.Managed { log.Info("Monitoring enabled, won't apply changes", "cluster", "Self-Managed RHODS Mode") @@ -246,9 +258,14 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } } default: - err := r.createUserGroup(ctx, instance, "odh-admins") - if err != nil { - return reconcile.Result{}, err + // Check if user opted for disabling creating user groups + if !createUsergroup { + log.Info("DSCI disabled usergroup creation") + } else { + err := r.createUserGroup(ctx, instance, "odh-admins") + if err != nil { + return reconcile.Result{}, err + } } if instance.Spec.Monitoring.ManagementState == operatorv1.Managed { log.Info("Monitoring enabled, won't apply changes", "cluster", "ODH Mode") diff --git a/controllers/dscinitialization/dscinitialization_test.go b/controllers/dscinitialization/dscinitialization_test.go index d380375ef63..d9afb51deb4 100644 --- a/controllers/dscinitialization/dscinitialization_test.go +++ b/controllers/dscinitialization/dscinitialization_test.go @@ -4,6 +4,7 @@ import ( "context" operatorv1 "github.com/openshift/api/operator/v1" + userv1 "github.com/openshift/api/user/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -21,6 +22,7 @@ const ( workingNamespace = "test-operator-ns" applicationName = "default-dsci" applicationNamespace = "test-application-ns" + usergroupName = "odh-admins" configmapName = "odh-common-config" monitoringNamespace = "test-monitoring-ns" readyPhase = "Ready" @@ -109,6 +111,14 @@ var _ = Describe("DataScienceCluster initialization", func() { Expect(foundConfigMap.Data).To(Equal(expectedConfigmapData)) }) + It("Should not create user group when we do not have authentications CR in the cluster", func(ctx context.Context) { + userGroup := &userv1.Group{} + Eventually(objectExists(usergroupName, "", userGroup)). + WithContext(ctx). + WithTimeout(timeout). + WithPolling(interval). + Should(BeFalse()) + }) }) Context("Monitoring Resource", func() { @@ -341,9 +351,9 @@ func namespaceExists(ns string, obj client.Object) func(ctx context.Context) boo } } -func objectExists(ns string, name string, obj client.Object) func(ctx context.Context) bool { //nolint:unparam +func objectExists(name string, namespace string, obj client.Object) func(ctx context.Context) bool { return func(ctx context.Context) bool { - err := k8sClient.Get(ctx, client.ObjectKey{Name: ns, Namespace: name}, obj) + err := k8sClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, obj) return err == nil } diff --git a/controllers/dscinitialization/suite_test.go b/controllers/dscinitialization/suite_test.go index f3ef428f878..985618bacf8 100644 --- a/controllers/dscinitialization/suite_test.go +++ b/controllers/dscinitialization/suite_test.go @@ -22,6 +22,7 @@ import ( "testing" "time" + configv1 "github.com/openshift/api/config/v1" routev1 "github.com/openshift/api/route/v1" userv1 "github.com/openshift/api/user/v1" ofapi "github.com/operator-framework/api/pkg/operators/v1alpha1" @@ -116,6 +117,7 @@ var _ = BeforeSuite(func() { utilruntime.Must(routev1.Install(testScheme)) utilruntime.Must(userv1.Install(testScheme)) utilruntime.Must(monitoringv1.AddToScheme(testScheme)) + utilruntime.Must(configv1.Install(testScheme)) // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: testScheme}) diff --git a/main.go b/main.go index c7d38cfb1c3..6eb6cffe420 100644 --- a/main.go +++ b/main.go @@ -25,6 +25,7 @@ import ( addonv1alpha1 "github.com/openshift/addon-operator/apis/addons/v1alpha1" ocappsv1 "github.com/openshift/api/apps/v1" //nolint:importas //reason: conflicts with appsv1 "k8s.io/api/apps/v1" buildv1 "github.com/openshift/api/build/v1" + configv1 "github.com/openshift/api/config/v1" imagev1 "github.com/openshift/api/image/v1" oauthv1 "github.com/openshift/api/oauth/v1" operatorv1 "github.com/openshift/api/operator/v1" @@ -201,6 +202,10 @@ func main() { //nolint:funlen,maintidx &operatorv1.IngressController{}: { Field: fields.Set{"metadata.name": "default"}.AsSelector(), }, + // For authentication CR "cluster" + &configv1.Authentication{}: { + Field: fields.Set{"metadata.name": cluster.ClusterAuthenticationObj}.AsSelector(), + }, // for prometheus and black-box deployment and ones we owns &appsv1.Deployment{}: {Namespaces: deploymentCache}, }, diff --git a/pkg/cluster/cluster_config.go b/pkg/cluster/cluster_config.go index 442878baf2f..e0e31fa199c 100644 --- a/pkg/cluster/cluster_config.go +++ b/pkg/cluster/cluster_config.go @@ -9,10 +9,12 @@ import ( "github.com/blang/semver/v4" "github.com/go-logr/logr" + configv1 "github.com/openshift/api/config/v1" "github.com/operator-framework/api/pkg/lib/version" ofapiv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" corev1 "k8s.io/api/core/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" @@ -21,8 +23,6 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" ) -// +kubebuilder:rbac:groups="config.openshift.io",resources=ingresses,verbs=get - type Platform string // Release includes information on operator version and platform @@ -226,3 +226,20 @@ func getRelease(ctx context.Context, cli client.Client) (Release, error) { initRelease.Version = csv.Spec.Version return initRelease, nil } + +// IsDefaultAuthMethod returns true if the default authentication method is IntegratedOAuth or empty. +// This will give indication that Operator should create userGroups or not in the cluster. +func IsDefaultAuthMethod(ctx context.Context, cli client.Client) (bool, error) { + authenticationobj := &configv1.Authentication{} + if err := cli.Get(ctx, client.ObjectKey{Name: ClusterAuthenticationObj, Namespace: ""}, authenticationobj); err != nil { + if errors.Is(err, &meta.NoKindMatchError{}) { // when CRD is missing, conver error type + return false, k8serr.NewNotFound(configv1.Resource("authentications"), ClusterAuthenticationObj) + } + return false, err + } + + // for now, HPC support "" "None" "IntegratedOAuth"(default) "OIDC" + // other offering support "" "None" "IntegratedOAuth"(default) + // we only create userGroups for "IntegratedOAuth" or "" and leave other or new supported type value in the future + return authenticationobj.Spec.Type == configv1.AuthenticationTypeIntegratedOAuth || authenticationobj.Spec.Type == "", nil +} diff --git a/pkg/cluster/const.go b/pkg/cluster/const.go index 5e218430db7..de5a27de29d 100644 --- a/pkg/cluster/const.go +++ b/pkg/cluster/const.go @@ -12,4 +12,7 @@ const ( // DefaultNotebooksNamespace defines default namespace for notebooks. DefaultNotebooksNamespace = "rhods-notebooks" + + // Default cluster-scope Authentication CR name. + ClusterAuthenticationObj = "cluster" ) From fa862a1a239955e1ed263c459b323ee8047c9513 Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Thu, 17 Oct 2024 13:22:20 +0300 Subject: [PATCH 13/22] components, logger: use contextual logging approach (#1253) Switch ReconcileComponent from passing logger explicitly to wrapping it into context[1][2] Makes one parameter less to pass and will allow called utilities to report component context where they are called from. No user or logging format impact until utilities takes contextual logging in use. [1] https://www.kubernetes.dev/blog/2022/05/25/contextual-logging/ [2] https://github.com/kubernetes/enhancements/blob/master/keps/sig-instrumentation/3077-contextual-logging/README.md Credits-To: Bartosz Majsak bartosz.majsak@gmail.com Signed-off-by: Yauheni Kaliuta --- components/codeflare/codeflare.go | 3 +-- components/component.go | 2 +- components/dashboard/dashboard.go | 2 +- components/datasciencepipelines/datasciencepipelines.go | 3 +-- components/kserve/kserve.go | 4 ++-- components/kueue/kueue.go | 4 ++-- components/modelmeshserving/modelmeshserving.go | 3 +-- components/modelregistry/modelregistry.go | 4 ++-- components/ray/ray.go | 4 ++-- components/trainingoperator/trainingoperator.go | 4 ++-- components/trustyai/trustyai.go | 4 ++-- components/workbenches/workbenches.go | 4 ++-- .../datasciencecluster/datasciencecluster_controller.go | 4 +++- 13 files changed, 22 insertions(+), 23 deletions(-) diff --git a/components/codeflare/codeflare.go b/components/codeflare/codeflare.go index 5e731c28ba4..c3c2ec48474 100644 --- a/components/codeflare/codeflare.go +++ b/components/codeflare/codeflare.go @@ -8,7 +8,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -74,11 +73,11 @@ func (c *CodeFlare) GetComponentName() string { func (c *CodeFlare) ReconcileComponent(ctx context.Context, cli client.Client, - l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := c.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/component.go b/components/component.go index c43cef7ac92..8256fbb0542 100644 --- a/components/component.go +++ b/components/component.go @@ -82,7 +82,7 @@ type ManifestsConfig struct { type ComponentInterface interface { Init(ctx context.Context, platform cluster.Platform) error - ReconcileComponent(ctx context.Context, cli client.Client, logger logr.Logger, + ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, DSCISpec *dsciv1.DSCInitializationSpec, platform cluster.Platform, currentComponentStatus bool) error Cleanup(ctx context.Context, cli client.Client, owner metav1.Object, DSCISpec *dsciv1.DSCInitializationSpec) error GetComponentName() string diff --git a/components/dashboard/dashboard.go b/components/dashboard/dashboard.go index d2ed096b3bd..f8b077d4416 100644 --- a/components/dashboard/dashboard.go +++ b/components/dashboard/dashboard.go @@ -84,13 +84,13 @@ func (d *Dashboard) GetComponentName() string { func (d *Dashboard) ReconcileComponent(ctx context.Context, cli client.Client, - l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, currentComponentExist bool, ) error { entryPath := DefaultPath + l := logf.FromContext(ctx) enabled := d.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/datasciencepipelines/datasciencepipelines.go b/components/datasciencepipelines/datasciencepipelines.go index f0066a6c544..ace22681f6b 100644 --- a/components/datasciencepipelines/datasciencepipelines.go +++ b/components/datasciencepipelines/datasciencepipelines.go @@ -8,7 +8,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" @@ -95,12 +94,12 @@ func (d *DataSciencePipelines) GetComponentName() string { func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context, cli client.Client, - l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool, ) error { + l := logf.FromContext(ctx) enabled := d.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index 85b739285ea..f7edf65479a 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -8,7 +8,6 @@ import ( "path/filepath" "strings" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -112,7 +111,8 @@ func (k *Kserve) GetComponentName() string { } func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, - l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := k.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/kueue/kueue.go b/components/kueue/kueue.go index ec609317092..73147085783 100644 --- a/components/kueue/kueue.go +++ b/components/kueue/kueue.go @@ -6,7 +6,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -68,8 +67,9 @@ func (k *Kueue) GetComponentName() string { return ComponentName } -func (k *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (k *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := k.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed if enabled { diff --git a/components/modelmeshserving/modelmeshserving.go b/components/modelmeshserving/modelmeshserving.go index cb1d07b7838..898d1f8617a 100644 --- a/components/modelmeshserving/modelmeshserving.go +++ b/components/modelmeshserving/modelmeshserving.go @@ -8,7 +8,6 @@ import ( "path/filepath" "strings" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -103,12 +102,12 @@ func (m *ModelMeshServing) GetComponentName() string { func (m *ModelMeshServing) ReconcileComponent(ctx context.Context, cli client.Client, - l logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool, ) error { + l := logf.FromContext(ctx) enabled := m.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/modelregistry/modelregistry.go b/components/modelregistry/modelregistry.go index dbf577ec8f8..72c9da1e381 100644 --- a/components/modelregistry/modelregistry.go +++ b/components/modelregistry/modelregistry.go @@ -10,7 +10,6 @@ import ( "strings" "text/template" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -97,8 +96,9 @@ func (m *ModelRegistry) GetComponentName() string { return ComponentName } -func (m *ModelRegistry) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (m *ModelRegistry) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := m.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/ray/ray.go b/components/ray/ray.go index c8fa30edbd4..f11ad8ec55f 100644 --- a/components/ray/ray.go +++ b/components/ray/ray.go @@ -8,7 +8,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -69,8 +68,9 @@ func (r *Ray) GetComponentName() string { return ComponentName } -func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := r.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/trainingoperator/trainingoperator.go b/components/trainingoperator/trainingoperator.go index a6a7c8f87e7..d422bc9a28a 100644 --- a/components/trainingoperator/trainingoperator.go +++ b/components/trainingoperator/trainingoperator.go @@ -8,7 +8,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -70,8 +69,9 @@ func (r *TrainingOperator) GetComponentName() string { return ComponentName } -func (r *TrainingOperator) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (r *TrainingOperator) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := r.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed diff --git a/components/trustyai/trustyai.go b/components/trustyai/trustyai.go index 45a211e79c0..114d099901b 100644 --- a/components/trustyai/trustyai.go +++ b/components/trustyai/trustyai.go @@ -7,7 +7,6 @@ import ( "fmt" "path/filepath" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -79,8 +78,9 @@ func (t *TrustyAI) GetComponentName() string { return ComponentName } -func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) enabled := t.GetManagementState() == operatorv1.Managed monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed entryPath := DefaultPath diff --git a/components/workbenches/workbenches.go b/components/workbenches/workbenches.go index c11f1e24297..b744a1660d6 100644 --- a/components/workbenches/workbenches.go +++ b/components/workbenches/workbenches.go @@ -8,7 +8,6 @@ import ( "path/filepath" "strings" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -111,8 +110,9 @@ func (w *Workbenches) GetComponentName() string { return ComponentName } -func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, l logr.Logger, +func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { + l := logf.FromContext(ctx) // Set default notebooks namespace // Create rhods-notebooks namespace in managed platforms enabled := w.GetManagementState() == operatorv1.Managed diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index af00a344e73..8570f270028 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -46,6 +46,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -313,7 +314,8 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context } // Reconcile component componentLogger := newComponentLogger(log, componentName, r.DataScienceCluster.DSCISpec) - err := component.ReconcileComponent(ctx, r.Client, componentLogger, instance, r.DataScienceCluster.DSCISpec, platform, installedComponentValue) + componentCtx := logf.IntoContext(ctx, componentLogger) + err := component.ReconcileComponent(componentCtx, r.Client, instance, r.DataScienceCluster.DSCISpec, platform, installedComponentValue) // TODO: replace this hack with a full refactor of component status in the future From de19e1e29b65f075c963b99d3fcfc205c915e643 Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Thu, 17 Oct 2024 15:23:23 +0300 Subject: [PATCH 14/22] logger, controllers: use common logging level, INFO by default (#1289) Remove increasing logging level for controllers (it was also passed to components if not overridden from DSCI) since: - it made logging inconsistent. The base contoller runtime logger is set up with INFO level for all log modes, so when controllers are configured for Error level, user sees INFO messages from both controller-runtime and other parts of operator which use controller-runtime's logger directly; - since the base logger is configured for INFO, there is no difference in levels between "default" and "devel". Having levels 1 and 2 there is misleading. Update documentation. This patch changes default logging, former filtered Info messages are displayed now. There is no _big_ difference in practice since currently the log is anyway full of info messages from parts which do not use reconciler's logger, like: {"level":"info","ts":"2024-10-09T13:23:11Z","msg":"waiting for 1 deployment to be ready for dashboard"} Signed-off-by: Yauheni Kaliuta --- README.md | 15 +++++---------- main.go | 8 ++++---- pkg/logger/logger.go | 17 ----------------- 3 files changed, 9 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index cb7b022887f..b1b7971d47f 100644 --- a/README.md +++ b/README.md @@ -224,17 +224,12 @@ This will ensure that the doc for the apis are updated accordingly. #### Controller level -Logger on all controllers can only be changed from CSV with parameters: --log-mode devel -valid value: "" (as default) || prod || production || devel || development +Global logger configuration can be changed with a command line switch `--log-mode ` +for example from CSV. Valid values for ``: "" (as default) || prod || production || devel || development. -This mainly impacts logging for operator pod startup, generating common resource, monitoring deployment. - -| --log-mode value | mapping Log level | Comments | -| ---------------- | ------------------- | -------------- | -| devel | debug / 0 | lowest level | -| "" | info / 1 | default option | -| default | info / 1 | default option | -| prod | error / 2 | highest level | +Verbosity level is INFO. +To fine tune zap backend [standard operator sdk zap switches](https://sdk.operatorframework.io/docs/building-operators/golang/references/logging/) +can be used. #### Component level diff --git a/main.go b/main.go index 6eb6cffe420..a50b4c96b40 100644 --- a/main.go +++ b/main.go @@ -244,7 +244,7 @@ func main() { //nolint:funlen,maintidx if err = (&dscictrl.DSCInitializationReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DSCInitialization"), logmode), + Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DSCInitialization"), Recorder: mgr.GetEventRecorderFor("dscinitialization-controller"), ApplicationsNamespace: dscApplicationsNamespace, }).SetupWithManager(ctx, mgr); err != nil { @@ -255,7 +255,7 @@ func main() { //nolint:funlen,maintidx if err = (&dscctrl.DataScienceClusterReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DataScienceCluster"), logmode), + Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DataScienceCluster"), DataScienceCluster: &dscctrl.DataScienceClusterConfig{ DSCISpec: &dsciv1.DSCInitializationSpec{ ApplicationsNamespace: dscApplicationsNamespace, @@ -270,7 +270,7 @@ func main() { //nolint:funlen,maintidx if err = (&secretgenerator.SecretGeneratorReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("SecretGenerator"), logmode), + Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("SecretGenerator"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "SecretGenerator") os.Exit(1) @@ -279,7 +279,7 @@ func main() { //nolint:funlen,maintidx if err = (&certconfigmapgenerator.CertConfigmapGeneratorReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("CertConfigmapGenerator"), logmode), + Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("CertConfigmapGenerator"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "CertConfigmapGenerator") os.Exit(1) diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index dc8a4cf97f6..ed569084b28 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -3,19 +3,12 @@ package logger import ( "flag" "os" - "strings" "github.com/go-logr/logr" "go.uber.org/zap/zapcore" "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -var logLevelMapping = map[string]int{ - "devel": 0, - "default": 1, // default one when not set log-mode - "prod": 2, -} - // NewNamedLogger creates a new logger for a component. // If the mode is set (so can be different from the default one), // it will create a new logger with the specified mode's options. @@ -26,16 +19,6 @@ func NewNamedLogger(log logr.Logger, name string, mode string) logr.Logger { return log.WithName(name) } -// in each controller, to use different log level. -func LogWithLevel(logger logr.Logger, level string) logr.Logger { - level = strings.TrimSpace(level) - verbosityLevel, ok := logLevelMapping[level] - if !ok { - verbosityLevel = 1 // fallback to info level - } - return logger.V(verbosityLevel) -} - func NewLoggerWithOptions(mode string, override *zap.Options) logr.Logger { opts := newOptions(mode) overrideOptions(opts, override) From 12fd9e564ee25444861ce8ad2f546ef0d8f7fb1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20Sanz=20G=C3=B3miz?= <100594859+asanzgom@users.noreply.github.com> Date: Thu, 17 Oct 2024 15:18:21 +0200 Subject: [PATCH 15/22] removed lint and typo fixes (#1302) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d6a99206393..6fd268aa3db 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,7 +20,7 @@ Issues are tracked using [Jira](https://issues.redhat.com/secure/RapidBoard.jspa 1. **Fork the Repository:** Create your own fork of the repository to work on your changes. 2. **Create a Branch:** Create your own branch to include changes for the feature or a bug fix off of `incubation` branch. 3. **Work on Your Changes:** Commit often, and ensure your code adheres to these [Code Style Guidelines](#code-style-guidelines) and passes all the [quality gates](#quality-gates) for the operator. -4. **Testing:** Make sure your code passes all the tests, including any new tests you've added. +4. **Testing:** Make sure your code passes all the tests, including any new tests you've added. And that your changes do not decrease the test coverage as shown on report. Every new feature should come with unit tests that cover that new part of the code. ### Open a Pull Request: From 0c16075169fed92ddb302970fb2e0892fbcf9957 Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Thu, 17 Oct 2024 17:41:21 +0200 Subject: [PATCH 16/22] fix: to make the unset env variable in CSV work with fallback (#1306) - previous code, will run into opendathaub case if env is not set Signed-off-by: Wen Zhou --- pkg/cluster/cluster_config.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/cluster/cluster_config.go b/pkg/cluster/cluster_config.go index e0e31fa199c..a2b5aaa4cb4 100644 --- a/pkg/cluster/cluster_config.go +++ b/pkg/cluster/cluster_config.go @@ -173,13 +173,14 @@ func detectManagedRHODS(ctx context.Context, cli client.Client) (Platform, error func getPlatform(ctx context.Context, cli client.Client) (Platform, error) { switch os.Getenv("ODH_PLATFORM_TYPE") { - case "OpenDataHub", "": + case "OpenDataHub": return OpenDataHub, nil case "ManagedRHOAI": return ManagedRhods, nil case "SelfManagedRHOAI": return SelfManagedRhods, nil - default: // fall back to detect platform if ODH_PLATFORM_TYPE env is not provided + default: + // fall back to detect platform if ODH_PLATFORM_TYPE env is not provided in CSV or set to "" if platform, err := detectManagedRHODS(ctx, cli); err != nil { return Unknown, err } else if platform == ManagedRhods { From 578d73e6827aa5f82477680a0ba4dde997b52c22 Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Mon, 21 Oct 2024 12:36:46 +0300 Subject: [PATCH 17/22] controllers: switch to k8s contextual logger (#1308) Jira: https://issues.redhat.com/browse/RHOAIENG-14096 This is a squashed commit of the following patches: d012b67bc139 ("controllers: switch to k8s contextual logger") 98805300894c ("logger: blindly convert ctrl.Log users to contextual") - controllers: switch to k8s contextual logger Remove own logger from controllers' reconcilers and switch to k8s contextual logger instead [1]. Use contextual logger for SecretGeneratorReconciler and CertConfigmapGeneratorReconciler setups as well. Add name to the logger coming from the framework. It will contains "controller" field already, and like in webhook with the name it's easy to distinguish framework and operator messages. - logger: blindly convert ctrl.Log users to contextual All the users should have proper context now. The log level changes will affect it as well. [1] https://www.kubernetes.dev/blog/2022/05/25/contextual-logging/ Signed-off-by: Yauheni Kaliuta --- components/kserve/servicemesh_setup.go | 5 ++- .../certconfigmapgenerator_controller.go | 14 +++---- .../datasciencecluster_controller.go | 20 ++++----- .../dscinitialization_controller.go | 15 ++++--- controllers/dscinitialization/monitoring.go | 13 +++--- .../dscinitialization/servicemesh_setup.go | 5 ++- controllers/dscinitialization/suite_test.go | 1 - controllers/dscinitialization/utils.go | 9 ++-- .../secretgenerator_controller.go | 12 +++--- main.go | 8 +--- pkg/cluster/resources.go | 8 ++-- pkg/upgrade/uninstallation.go | 15 ++++--- pkg/upgrade/upgrade.go | 41 +++++++++++-------- 13 files changed, 85 insertions(+), 81 deletions(-) diff --git a/components/kserve/servicemesh_setup.go b/components/kserve/servicemesh_setup.go index 126e23d88ea..44138ba625e 100644 --- a/components/kserve/servicemesh_setup.go +++ b/components/kserve/servicemesh_setup.go @@ -7,8 +7,8 @@ import ( operatorv1 "github.com/openshift/api/operator/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -37,6 +37,7 @@ func (k *Kserve) removeServiceMeshConfigurations(ctx context.Context, cli client } func (k *Kserve) defineServiceMeshFeatures(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec) feature.FeaturesProvider { + log := logf.FromContext(ctx) return func(registry feature.FeaturesRegistry) error { authorinoInstalled, err := cluster.SubscriptionExists(ctx, cli, "authorino-operator") if err != nil { @@ -68,7 +69,7 @@ func (k *Kserve) defineServiceMeshFeatures(ctx context.Context, cli client.Clien return kserveExtAuthzErr } } else { - ctrl.Log.Info("WARN: Authorino operator is not installed on the cluster, skipping authorization capability") + log.Info("WARN: Authorino operator is not installed on the cluster, skipping authorization capability") } return nil diff --git a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go index a3ce257dcb3..0c11a3238c2 100644 --- a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go +++ b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go @@ -5,7 +5,6 @@ import ( "context" "reflect" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -16,6 +15,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -28,13 +28,11 @@ import ( type CertConfigmapGeneratorReconciler struct { Client client.Client Scheme *runtime.Scheme - Log logr.Logger } // SetupWithManager sets up the controller with the Manager. -func (r *CertConfigmapGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { - log := r.Log - log.Info("Adding controller for Configmap Generation.") +func (r *CertConfigmapGeneratorReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + logf.FromContext(ctx).Info("Adding controller for Configmap Generation.") return ctrl.NewControllerManagedBy(mgr). Named("cert-configmap-generator-controller"). Watches(&corev1.ConfigMap{}, handler.EnqueueRequestsFromMapFunc(r.watchTrustedCABundleConfigMapResource), builder.WithPredicates(ConfigMapChangedPredicate)). @@ -45,7 +43,7 @@ func (r *CertConfigmapGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) er // Reconcile will generate new configmap, odh-trusted-ca-bundle, that includes cluster-wide trusted-ca bundle and custom // ca bundle in every new namespace created. func (r *CertConfigmapGeneratorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - log := r.Log + log := logf.FromContext(ctx).WithName("CertConfigmapGenerator") // Request includes namespace that is newly created or where odh-trusted-ca-bundle configmap is updated. log.Info("Reconciling CertConfigMapGenerator.", " Request.Namespace", req.NamespacedName) // Get namespace instance @@ -109,8 +107,8 @@ func (r *CertConfigmapGeneratorReconciler) watchNamespaceResource(_ context.Cont return nil } -func (r *CertConfigmapGeneratorReconciler) watchTrustedCABundleConfigMapResource(_ context.Context, a client.Object) []reconcile.Request { - log := r.Log +func (r *CertConfigmapGeneratorReconciler) watchTrustedCABundleConfigMapResource(ctx context.Context, a client.Object) []reconcile.Request { + log := logf.FromContext(ctx) if a.GetName() == trustedcabundle.CAConfigMapName { log.Info("Cert configmap has been updated, start reconcile") return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: a.GetName(), Namespace: a.GetNamespace()}}} diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index 8570f270028..e8d8750ca99 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -67,7 +67,6 @@ import ( type DataScienceClusterReconciler struct { client.Client Scheme *runtime.Scheme - Log logr.Logger // Recorder to generate events Recorder record.EventRecorder DataScienceCluster *DataScienceClusterConfig @@ -85,7 +84,7 @@ const ( // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { //nolint:maintidx,gocyclo - log := r.Log + log := logf.FromContext(ctx).WithName("DataScienceCluster") log.Info("Reconciling DataScienceCluster resources", "Request.Name", req.Name) // Get information on version and platform @@ -169,7 +168,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R saved.Status.Phase = status.PhaseError }) if err != nil { - r.reportError(err, instance, "failed to update DataScienceCluster condition") + r.reportError(ctx, err, instance, "failed to update DataScienceCluster condition") return ctrl.Result{}, err } @@ -233,7 +232,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R saved.Status.Release = currentOperatorRelease }) if err != nil { - _ = r.reportError(err, instance, fmt.Sprintf("failed to add conditions to status of DataScienceCluster resource name %s", req.Name)) + _ = r.reportError(ctx, err, instance, fmt.Sprintf("failed to add conditions to status of DataScienceCluster resource name %s", req.Name)) return ctrl.Result{}, err } @@ -291,7 +290,7 @@ func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.R func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context, instance *dscv1.DataScienceCluster, platform cluster.Platform, component components.ComponentInterface, ) (*dscv1.DataScienceCluster, error) { - log := r.Log + log := logf.FromContext(ctx) componentName := component.GetComponentName() enabled := component.GetManagementState() == operatorv1.Managed @@ -308,7 +307,7 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context status.SetComponentCondition(&saved.Status.Conditions, componentName, status.ReconcileInit, message, corev1.ConditionUnknown) }) if err != nil { - _ = r.reportError(err, instance, "failed to update DataScienceCluster conditions before first time reconciling "+componentName) + _ = r.reportError(ctx, err, instance, "failed to update DataScienceCluster conditions before first time reconciling "+componentName) // try to continue with reconciliation, as further updates can fix the status } } @@ -321,7 +320,7 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context if err != nil { // reconciliation failed: log errors, raise event and update status accordingly - instance = r.reportError(err, instance, "failed to reconcile "+componentName+" on DataScienceCluster") + instance = r.reportError(ctx, err, instance, "failed to reconcile "+componentName+" on DataScienceCluster") instance, _ = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { if enabled { if strings.Contains(err.Error(), datasciencepipelines.ArgoWorkflowCRD+" CRD already exists") { @@ -357,7 +356,7 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context } }) if err != nil { - instance = r.reportError(err, instance, "failed to update DataScienceCluster status after reconciling "+componentName) + instance = r.reportError(ctx, err, instance, "failed to update DataScienceCluster status after reconciling "+componentName) return instance, err } @@ -374,9 +373,8 @@ func newComponentLogger(logger logr.Logger, componentName string, dscispec *dsci return ctrlogger.NewNamedLogger(logger, "DSC.Components."+componentName, mode) } -func (r *DataScienceClusterReconciler) reportError(err error, instance *dscv1.DataScienceCluster, message string) *dscv1.DataScienceCluster { - log := r.Log - log.Error(err, message, "instance.Name", instance.Name) +func (r *DataScienceClusterReconciler) reportError(ctx context.Context, err error, instance *dscv1.DataScienceCluster, message string) *dscv1.DataScienceCluster { + logf.FromContext(ctx).Error(err, message, "instance.Name", instance.Name) r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DataScienceClusterReconcileError", "%s for instance %s", message, instance.Name) return instance diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index 845418ac843..023543dd633 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -22,7 +22,6 @@ import ( "path/filepath" "reflect" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" appsv1 "k8s.io/api/apps/v1" @@ -40,6 +39,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -64,7 +64,6 @@ var managementStateChangeTrustedCA = false type DSCInitializationReconciler struct { client.Client Scheme *runtime.Scheme - Log logr.Logger Recorder record.EventRecorder ApplicationsNamespace string } @@ -78,7 +77,7 @@ type DSCInitializationReconciler struct { // Reconcile contains controller logic specific to DSCInitialization instance updates. func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { //nolint:funlen,gocyclo,maintidx - log := r.Log + log := logf.FromContext(ctx).WithName("DSCInitialization") log.Info("Reconciling DSCInitialization.", "DSCInitialization Request.Name", req.Name) currentOperatorRelease := cluster.GetRelease() @@ -392,8 +391,8 @@ var dsciPredicateStateChangeTrustedCA = predicate.Funcs{ }, } -func (r *DSCInitializationReconciler) watchMonitoringConfigMapResource(_ context.Context, a client.Object) []reconcile.Request { - log := r.Log +func (r *DSCInitializationReconciler) watchMonitoringConfigMapResource(ctx context.Context, a client.Object) []reconcile.Request { + log := logf.FromContext(ctx) if a.GetName() == "prometheus" && a.GetNamespace() == "redhat-ods-monitoring" { log.Info("Found monitoring configmap has updated, start reconcile") @@ -402,8 +401,8 @@ func (r *DSCInitializationReconciler) watchMonitoringConfigMapResource(_ context return nil } -func (r *DSCInitializationReconciler) watchMonitoringSecretResource(_ context.Context, a client.Object) []reconcile.Request { - log := r.Log +func (r *DSCInitializationReconciler) watchMonitoringSecretResource(ctx context.Context, a client.Object) []reconcile.Request { + log := logf.FromContext(ctx) operatorNs, err := cluster.GetOperatorNamespace() if err != nil { return nil @@ -418,7 +417,7 @@ func (r *DSCInitializationReconciler) watchMonitoringSecretResource(_ context.Co } func (r *DSCInitializationReconciler) watchDSCResource(ctx context.Context) []reconcile.Request { - log := r.Log + log := logf.FromContext(ctx) instanceList := &dscv1.DataScienceClusterList{} if err := r.Client.List(ctx, instanceList); err != nil { // do not handle if cannot get list diff --git a/controllers/dscinitialization/monitoring.go b/controllers/dscinitialization/monitoring.go index 96537c76241..f81fff74939 100644 --- a/controllers/dscinitialization/monitoring.go +++ b/controllers/dscinitialization/monitoring.go @@ -15,6 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -39,7 +40,7 @@ var ( // only when reconcile on DSCI CR, initial set to true // if reconcile from monitoring, initial set to false, skip blackbox and rolebinding. func (r *DSCInitializationReconciler) configureManagedMonitoring(ctx context.Context, dscInit *dsciv1.DSCInitialization, initial string) error { - log := r.Log + log := logf.FromContext(ctx) if initial == "init" { // configure Blackbox exporter if err := configureBlackboxExporter(ctx, dscInit, r); err != nil { @@ -89,7 +90,7 @@ func (r *DSCInitializationReconciler) configureManagedMonitoring(ctx context.Con } func configureAlertManager(ctx context.Context, dsciInit *dsciv1.DSCInitialization, r *DSCInitializationReconciler) error { - log := r.Log + log := logf.FromContext(ctx) // Get Deadmansnitch secret deadmansnitchSecret, err := r.waitForManagedSecret(ctx, "redhat-rhods-deadmanssnitch", dsciInit.Spec.Monitoring.Namespace) if err != nil { @@ -200,7 +201,7 @@ func configureAlertManager(ctx context.Context, dsciInit *dsciv1.DSCInitializati } func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization, r *DSCInitializationReconciler) error { - log := r.Log + log := logf.FromContext(ctx) // Update rolebinding-viewer err := common.ReplaceStringsInFile(filepath.Join(prometheusManifestsPath, "prometheus-rolebinding-viewer.yaml"), map[string]string{ @@ -348,7 +349,7 @@ func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization } func configureBlackboxExporter(ctx context.Context, dsciInit *dsciv1.DSCInitialization, r *DSCInitializationReconciler) error { - log := r.Log + log := logf.FromContext(ctx) consoleRoute := &routev1.Route{} err := r.Client.Get(ctx, client.ObjectKey{Name: "console", Namespace: "openshift-console"}, consoleRoute) if err != nil { @@ -438,7 +439,7 @@ func createMonitoringProxySecret(ctx context.Context, cli client.Client, name st } func (r *DSCInitializationReconciler) configureSegmentIO(ctx context.Context, dsciInit *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) // create segment.io only when configmap does not exist in the cluster segmentioConfigMap := &corev1.ConfigMap{} if err := r.Client.Get(ctx, client.ObjectKey{ @@ -467,7 +468,7 @@ func (r *DSCInitializationReconciler) configureSegmentIO(ctx context.Context, ds } func (r *DSCInitializationReconciler) configureCommonMonitoring(ctx context.Context, dsciInit *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) if err := r.configureSegmentIO(ctx, dsciInit); err != nil { return err } diff --git a/controllers/dscinitialization/servicemesh_setup.go b/controllers/dscinitialization/servicemesh_setup.go index aef6daba07b..ed3e5a9424b 100644 --- a/controllers/dscinitialization/servicemesh_setup.go +++ b/controllers/dscinitialization/servicemesh_setup.go @@ -9,6 +9,7 @@ import ( conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" @@ -19,7 +20,7 @@ import ( ) func (r *DSCInitializationReconciler) configureServiceMesh(ctx context.Context, instance *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) serviceMeshManagementState := operatorv1.Removed if instance.Spec.ServiceMesh != nil { serviceMeshManagementState = instance.Spec.ServiceMesh.ManagementState @@ -62,7 +63,7 @@ func (r *DSCInitializationReconciler) configureServiceMesh(ctx context.Context, } func (r *DSCInitializationReconciler) removeServiceMesh(ctx context.Context, instance *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) // on condition of Managed, do not handle Removed when set to Removed it trigger DSCI reconcile to clean up if instance.Spec.ServiceMesh == nil { return nil diff --git a/controllers/dscinitialization/suite_test.go b/controllers/dscinitialization/suite_test.go index 985618bacf8..c4fa5ff89a7 100644 --- a/controllers/dscinitialization/suite_test.go +++ b/controllers/dscinitialization/suite_test.go @@ -139,7 +139,6 @@ var _ = BeforeSuite(func() { err = (&dscictrl.DSCInitializationReconciler{ Client: k8sClient, Scheme: testScheme, - Log: ctrl.Log.WithName("controllers").WithName("DSCInitialization"), Recorder: mgr.GetEventRecorderFor("dscinitialization-controller"), }).SetupWithManager(gCtx, mgr) diff --git a/controllers/dscinitialization/utils.go b/controllers/dscinitialization/utils.go index be4d6ff071b..77e43b024cf 100644 --- a/controllers/dscinitialization/utils.go +++ b/controllers/dscinitialization/utils.go @@ -18,6 +18,7 @@ import ( "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -37,7 +38,7 @@ var ( // - Network Policies 'opendatahub' that allow traffic between the ODH namespaces // - RoleBinding 'opendatahub'. func (r *DSCInitializationReconciler) createOdhNamespace(ctx context.Context, dscInit *dsciv1.DSCInitialization, name string, platform cluster.Platform) error { - log := r.Log + log := logf.FromContext(ctx) // Expected application namespace for the given name desiredNamespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -142,7 +143,7 @@ func (r *DSCInitializationReconciler) createOdhNamespace(ctx context.Context, ds } func (r *DSCInitializationReconciler) createDefaultRoleBinding(ctx context.Context, name string, dscInit *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) // Expected namespace for the given name desiredRoleBinding := &rbacv1.RoleBinding{ TypeMeta: metav1.TypeMeta{ @@ -190,7 +191,7 @@ func (r *DSCInitializationReconciler) createDefaultRoleBinding(ctx context.Conte } func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context.Context, name string, dscInit *dsciv1.DSCInitialization, platform cluster.Platform) error { - log := r.Log + log := logf.FromContext(ctx) if platform == cluster.ManagedRhods || platform == cluster.SelfManagedRhods { // Get operator namepsace operatorNs, err := cluster.GetOperatorNamespace() @@ -375,7 +376,7 @@ func GenerateRandomHex(length int) ([]byte, error) { } func (r *DSCInitializationReconciler) createOdhCommonConfigMap(ctx context.Context, name string, dscInit *dsciv1.DSCInitialization) error { - log := r.Log + log := logf.FromContext(ctx) // Expected configmap for the given namespace desiredConfigMap := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ diff --git a/controllers/secretgenerator/secretgenerator_controller.go b/controllers/secretgenerator/secretgenerator_controller.go index 957e02fe4ae..37519d734ea 100644 --- a/controllers/secretgenerator/secretgenerator_controller.go +++ b/controllers/secretgenerator/secretgenerator_controller.go @@ -23,7 +23,6 @@ import ( "fmt" "time" - "github.com/go-logr/logr" oauthv1 "github.com/openshift/api/oauth/v1" routev1 "github.com/openshift/api/route/v1" corev1 "k8s.io/api/core/v1" @@ -37,6 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -52,13 +52,11 @@ const ( type SecretGeneratorReconciler struct { Client client.Client Scheme *runtime.Scheme - Log logr.Logger } // SetupWithManager sets up the controller with the Manager. -func (r *SecretGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { - log := r.Log - log.Info("Adding controller for Secret Generation.") +func (r *SecretGeneratorReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + logf.FromContext(ctx).Info("Adding controller for Secret Generation.") // Watch only new secrets with the corresponding annotation predicates := predicate.Funcs{ @@ -106,7 +104,7 @@ func (r *SecretGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { // based on the specified type and complexity. This will avoid possible race // conditions when a deployment mounts the secret before it is reconciled. func (r *SecretGeneratorReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { - log := r.Log + log := logf.FromContext(ctx).WithName("SecretGenerator") foundSecret := &corev1.Secret{} err := r.Client.Get(ctx, request.NamespacedName, foundSecret) if err != nil { @@ -209,7 +207,7 @@ func (r *SecretGeneratorReconciler) getRoute(ctx context.Context, name string, n } func (r *SecretGeneratorReconciler) createOAuthClient(ctx context.Context, name string, secretName string, uri string) error { - log := r.Log + log := logf.FromContext(ctx) // Create OAuthClient resource oauthClient := &oauthv1.OAuthClient{ TypeMeta: metav1.TypeMeta{ diff --git a/main.go b/main.go index a50b4c96b40..a59dee6d4a3 100644 --- a/main.go +++ b/main.go @@ -244,7 +244,6 @@ func main() { //nolint:funlen,maintidx if err = (&dscictrl.DSCInitializationReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DSCInitialization"), Recorder: mgr.GetEventRecorderFor("dscinitialization-controller"), ApplicationsNamespace: dscApplicationsNamespace, }).SetupWithManager(ctx, mgr); err != nil { @@ -255,7 +254,6 @@ func main() { //nolint:funlen,maintidx if err = (&dscctrl.DataScienceClusterReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DataScienceCluster"), DataScienceCluster: &dscctrl.DataScienceClusterConfig{ DSCISpec: &dsciv1.DSCInitializationSpec{ ApplicationsNamespace: dscApplicationsNamespace, @@ -270,8 +268,7 @@ func main() { //nolint:funlen,maintidx if err = (&secretgenerator.SecretGeneratorReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("SecretGenerator"), - }).SetupWithManager(mgr); err != nil { + }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "SecretGenerator") os.Exit(1) } @@ -279,8 +276,7 @@ func main() { //nolint:funlen,maintidx if err = (&certconfigmapgenerator.CertConfigmapGeneratorReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), - Log: ctrl.Log.WithName(operatorName).WithName("controllers").WithName("CertConfigmapGenerator"), - }).SetupWithManager(mgr); err != nil { + }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "CertConfigmapGenerator") os.Exit(1) } diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index b1b0f84cdb5..073366683ea 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -13,8 +13,8 @@ import ( k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" ) @@ -178,6 +178,7 @@ func ExecuteOnAllNamespaces(ctx context.Context, cli client.Client, processFunc // WaitForDeploymentAvailable to check if component deployment from 'namespace' is ready within 'timeout' before apply prometheus rules for the component. func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentName string, namespace string, interval int, timeout int) error { + log := logf.FromContext(ctx) resourceInterval := time.Duration(interval) * time.Second resourceTimeout := time.Duration(timeout) * time.Minute @@ -188,7 +189,7 @@ func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentN return false, fmt.Errorf("error fetching list of deployments: %w", err) } - ctrl.Log.Info("waiting for " + strconv.Itoa(len(componentDeploymentList.Items)) + " deployment to be ready for " + componentName) + log.Info("waiting for " + strconv.Itoa(len(componentDeploymentList.Items)) + " deployment to be ready for " + componentName) for _, deployment := range componentDeploymentList.Items { if deployment.Status.ReadyReplicas != deployment.Status.Replicas { return false, nil @@ -200,6 +201,7 @@ func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentN } func CreateWithRetry(ctx context.Context, cli client.Client, obj client.Object, timeoutMin int) error { + log := logf.FromContext(ctx) interval := time.Second * 5 // arbitrary value timeout := time.Duration(timeoutMin) * time.Minute @@ -227,7 +229,7 @@ func CreateWithRetry(ctx context.Context, cli client.Client, obj client.Object, // retry if 500, assume webhook is not available if k8serr.IsInternalError(errCreate) { - ctrl.Log.Info("Error creating object, retrying...", "reason", errCreate) + log.Info("Error creating object, retrying...", "reason", errCreate) return false, nil } diff --git a/pkg/upgrade/uninstallation.go b/pkg/upgrade/uninstallation.go index 315b23c1244..cc5c28e3682 100644 --- a/pkg/upgrade/uninstallation.go +++ b/pkg/upgrade/uninstallation.go @@ -10,6 +10,7 @@ import ( k8serr "k8s.io/apimachinery/pkg/api/errors" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -25,6 +26,7 @@ const ( // OperatorUninstall deletes all the externally generated resources. // This includes DSCI, namespace created by operator (but not workbench or MR's), subscription and CSV. func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster.Platform) error { + log := logf.FromContext(ctx) if err := removeDSCInitialization(ctx, cli); err != nil { return err } @@ -51,7 +53,7 @@ func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster. if err := cli.Delete(ctx, &namespace); err != nil { return fmt.Errorf("error deleting namespace %v: %w", namespace.Name, err) } - ctrl.Log.Info("Namespace " + namespace.Name + " deleted as a part of uninstallation.") + log.Info("Namespace " + namespace.Name + " deleted as a part of uninstallation.") } } @@ -66,7 +68,7 @@ func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster. return err } - ctrl.Log.Info("Removing operator subscription which in turn will remove installplan") + log.Info("Removing operator subscription which in turn will remove installplan") subsName := "opendatahub-operator" if platform == cluster.SelfManagedRhods { subsName = "rhods-operator" @@ -77,10 +79,10 @@ func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster. } } - ctrl.Log.Info("Removing the operator CSV in turn remove operator deployment") + log.Info("Removing the operator CSV in turn remove operator deployment") err = removeCSV(ctx, cli) - ctrl.Log.Info("All resources deleted as part of uninstall.") + log.Info("All resources deleted as part of uninstall.") return err } @@ -126,6 +128,7 @@ func HasDeleteConfigMap(ctx context.Context, c client.Client) bool { } func removeCSV(ctx context.Context, c client.Client) error { + log := logf.FromContext(ctx) // Get watchNamespace operatorNamespace, err := cluster.GetOperatorNamespace() if err != nil { @@ -142,7 +145,7 @@ func removeCSV(ctx context.Context, c client.Client) error { return err } - ctrl.Log.Info("Deleting CSV " + operatorCsv.Name) + log.Info("Deleting CSV " + operatorCsv.Name) err = c.Delete(ctx, operatorCsv) if err != nil { if k8serr.IsNotFound(err) { @@ -151,7 +154,7 @@ func removeCSV(ctx context.Context, c client.Client) error { return fmt.Errorf("error deleting clusterserviceversion: %w", err) } - ctrl.Log.Info("Clusterserviceversion " + operatorCsv.Name + " deleted as a part of uninstall") + log.Info("Clusterserviceversion " + operatorCsv.Name + " deleted as a part of uninstall") return nil } diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index eedbb45cca3..6b8f857c465 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -22,8 +22,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" @@ -115,6 +115,7 @@ func CreateDefaultDSC(ctx context.Context, cli client.Client) error { // If there exists default-dsci instance already, it will not update DSCISpec on it. // Note: DSCI CR modifcations are not supported, as it is the initial prereq setting for the components. func CreateDefaultDSCI(ctx context.Context, cli client.Client, _ cluster.Platform, appNamespace, monNamespace string) error { + log := logf.FromContext(ctx) defaultDsciSpec := &dsciv1.DSCInitializationSpec{ ApplicationsNamespace: appNamespace, Monitoring: dsciv1.Monitoring{ @@ -152,14 +153,14 @@ func CreateDefaultDSCI(ctx context.Context, cli client.Client, _ cluster.Platfor switch { case len(instances.Items) > 1: - ctrl.Log.Info("only one instance of DSCInitialization object is allowed. Please delete other instances.") + log.Info("only one instance of DSCInitialization object is allowed. Please delete other instances.") return nil case len(instances.Items) == 1: // Do not patch/update if DSCI already exists. - ctrl.Log.Info("DSCInitialization resource already exists. It will not be updated with default DSCI.") + log.Info("DSCInitialization resource already exists. It will not be updated with default DSCI.") return nil case len(instances.Items) == 0: - ctrl.Log.Info("create default DSCI CR.") + log.Info("create default DSCI CR.") err := cluster.CreateWithRetry(ctx, cli, defaultDsci, 1) // 1 min timeout if err != nil { return err @@ -294,13 +295,14 @@ func deleteResources(ctx context.Context, c client.Client, resources *[]Resource } func deleteOneResource(ctx context.Context, c client.Client, res ResourceSpec) error { + log := logf.FromContext(ctx) list := &unstructured.UnstructuredList{} list.SetGroupVersionKind(res.Gvk) err := c.List(ctx, list, client.InNamespace(res.Namespace)) if err != nil { if errors.Is(err, &meta.NoKindMatchError{}) { - ctrl.Log.Info("CRD not found, will not delete " + res.Gvk.String()) + log.Info("CRD not found, will not delete " + res.Gvk.String()) return nil } return fmt.Errorf("failed to list %s: %w", res.Gvk.Kind, err) @@ -323,7 +325,7 @@ func deleteOneResource(ctx context.Context, c client.Client, res ResourceSpec) e if err != nil { return fmt.Errorf("failed to delete %s %s/%s: %w", res.Gvk.Kind, res.Namespace, item.GetName(), err) } - ctrl.Log.Info("Deleted object " + item.GetName() + " " + res.Gvk.String() + "in namespace" + res.Namespace) + log.Info("Deleted object " + item.GetName() + " " + res.Gvk.String() + "in namespace" + res.Namespace) } } } @@ -332,6 +334,7 @@ func deleteOneResource(ctx context.Context, c client.Client, res ResourceSpec) e } func deleteDeprecatedResources(ctx context.Context, cli client.Client, namespace string, resourceList []string, resourceType client.ObjectList) error { + log := logf.FromContext(ctx) var multiErr *multierror.Error listOpts := &client.ListOptions{Namespace: namespace} if err := cli.List(ctx, resourceType, listOpts); err != nil { @@ -342,16 +345,16 @@ func deleteDeprecatedResources(ctx context.Context, cli client.Client, namespace item := items.Index(i).Addr().Interface().(client.Object) //nolint:errcheck,forcetypeassert for _, name := range resourceList { if name == item.GetName() { - ctrl.Log.Info("Attempting to delete " + item.GetName() + " in namespace " + namespace) + log.Info("Attempting to delete " + item.GetName() + " in namespace " + namespace) err := cli.Delete(ctx, item) if err != nil { if k8serr.IsNotFound(err) { - ctrl.Log.Info("Could not find " + item.GetName() + " in namespace " + namespace) + log.Info("Could not find " + item.GetName() + " in namespace " + namespace) } else { multiErr = multierror.Append(multiErr, err) } } - ctrl.Log.Info("Successfully deleted " + item.GetName()) + log.Info("Successfully deleted " + item.GetName()) } } } @@ -360,6 +363,7 @@ func deleteDeprecatedResources(ctx context.Context, cli client.Client, namespace // Need to handle ServiceMonitor deletion separately as the generic function does not work for ServiceMonitors because of how the package is built. func deleteDeprecatedServiceMonitors(ctx context.Context, cli client.Client, namespace string, resourceList []string) error { + log := logf.FromContext(ctx) var multiErr *multierror.Error listOpts := &client.ListOptions{Namespace: namespace} servicemonitors := &monitoringv1.ServiceMonitorList{} @@ -371,16 +375,16 @@ func deleteDeprecatedServiceMonitors(ctx context.Context, cli client.Client, nam servicemonitor := servicemonitor for _, name := range resourceList { if name == servicemonitor.Name { - ctrl.Log.Info("Attempting to delete " + servicemonitor.Name + " in namespace " + namespace) + log.Info("Attempting to delete " + servicemonitor.Name + " in namespace " + namespace) err := cli.Delete(ctx, servicemonitor) if err != nil { if k8serr.IsNotFound(err) { - ctrl.Log.Info("Could not find " + servicemonitor.Name + " in namespace " + namespace) + log.Info("Could not find " + servicemonitor.Name + " in namespace " + namespace) } else { multiErr = multierror.Append(multiErr, err) } } - ctrl.Log.Info("Successfully deleted " + servicemonitor.Name) + log.Info("Successfully deleted " + servicemonitor.Name) } } } @@ -451,10 +455,11 @@ func unsetOwnerReference(ctx context.Context, cli client.Client, instanceName st } func updateODCBiasMetrics(ctx context.Context, cli client.Client, instanceName string, oldRelease cluster.Release, odhObject *unstructured.Unstructured) error { + log := logf.FromContext(ctx) // "from version" as oldRelease, if return "0.0.0" meaning running on 2.10- release/dummy CI build // if oldRelease is lower than 2.14.0(e.g 2.13.x-a), flip disableBiasMetrics to false (even the field did not exist) if oldRelease.Version.Minor < 14 { - ctrl.Log.Info("Upgrade force BiasMetrics to false in " + instanceName + " CR due to old release < 2.14.0") + log.Info("Upgrade force BiasMetrics to false in " + instanceName + " CR due to old release < 2.14.0") // flip TrustyAI BiasMetrics to false (.spec.dashboardConfig.disableBiasMetrics) disableBiasMetricsValue := []byte(`{"spec": {"dashboardConfig": {"disableBiasMetrics": false}}}`) if err := cli.Patch(ctx, odhObject, client.RawPatch(types.MergePatchType, disableBiasMetricsValue)); err != nil { @@ -462,22 +467,23 @@ func updateODCBiasMetrics(ctx context.Context, cli client.Client, instanceName s } return nil } - ctrl.Log.Info("Upgrade does not force BiasMetrics to false due to from release >= 2.14.0") + log.Info("Upgrade does not force BiasMetrics to false due to from release >= 2.14.0") return nil } func updateODCModelRegistry(ctx context.Context, cli client.Client, instanceName string, oldRelease cluster.Release, odhObject *unstructured.Unstructured) error { + log := logf.FromContext(ctx) // "from version" as oldRelease, if return "0.0.0" meaning running on 2.10- release/dummy CI build // if oldRelease is lower than 2.14.0(e.g 2.13.x-a), flip disableModelRegistry to false (even the field did not exist) if oldRelease.Version.Minor < 14 { - ctrl.Log.Info("Upgrade force ModelRegistry to false in " + instanceName + " CR due to old release < 2.14.0") + log.Info("Upgrade force ModelRegistry to false in " + instanceName + " CR due to old release < 2.14.0") disableModelRegistryValue := []byte(`{"spec": {"dashboardConfig": {"disableModelRegistry": false}}}`) if err := cli.Patch(ctx, odhObject, client.RawPatch(types.MergePatchType, disableModelRegistryValue)); err != nil { return fmt.Errorf("error enable ModelRegistry in CR %s : %w", instanceName, err) } return nil } - ctrl.Log.Info("Upgrade does not force ModelRegistry to false due to from release >= 2.14.0") + log.Info("Upgrade does not force ModelRegistry to false due to from release >= 2.14.0") return nil } @@ -497,6 +503,7 @@ func RemoveLabel(ctx context.Context, cli client.Client, objectName string, labe } func deleteDeprecatedNamespace(ctx context.Context, cli client.Client, namespace string) error { + log := logf.FromContext(ctx) foundNamespace := &corev1.Namespace{} if err := cli.Get(ctx, client.ObjectKey{Name: namespace}, foundNamespace); err != nil { if k8serr.IsNotFound(err) { @@ -525,7 +532,7 @@ func deleteDeprecatedNamespace(ctx context.Context, cli client.Client, namespace return fmt.Errorf("error getting pods from namespace %s: %w", namespace, err) } if len(podList.Items) != 0 { - ctrl.Log.Info("Skip deletion of namespace " + namespace + " due to running Pods in it") + log.Info("Skip deletion of namespace " + namespace + " due to running Pods in it") return nil } From b91bd2981c5fe5b9182fef006dffbab4d731e772 Mon Sep 17 00:00:00 2001 From: Gerard Ryan Date: Mon, 21 Oct 2024 19:12:13 +0100 Subject: [PATCH 18/22] Tidy up run-nowebhook recipe & make clean PHONY (#1310) * chore: Tidy run-nowebhook recipe The suggestion to tidy up the run-nowebhook recipe comes from this conversation: https://github.com/opendatahub-io/opendatahub-operator/pull/1304/files#r1806731373 * chore: Make `clean` a PHONY target I believe `clean` should be a PHONY target, since it doesn't create a file called `clean` --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 7ba0b5047ed..2c309ea1e3c 100644 --- a/Makefile +++ b/Makefile @@ -188,8 +188,7 @@ run: manifests generate fmt vet ## Run a controller from your host. .PHONY: run-nowebhook run-nowebhook: GO_RUN_ARGS += -tags nowebhook -run-nowebhook: manifests generate fmt vet ## Run a controller from your host without webhook enabled - $(GO_RUN_MAIN) +run-nowebhook: run ## Run a controller from your host without webhook enabled .PHONY: image-build image-build: # unit-test ## Build image with the manager. @@ -380,6 +379,7 @@ CLEANFILES += cover.out e2e-test: ## Run e2e tests for the controller go test ./tests/e2e/ -run ^TestOdhOperator -v --operator-namespace=${OPERATOR_NAMESPACE} ${E2E_TEST_FLAGS} +.PHONY: clean clean: $(GOLANGCI_LINT) $(GOLANGCI_LINT) cache clean chmod u+w -R $(LOCALBIN) # envtest makes its dir RO From a0860f4163500ef559e0c082e7b2a75455007e6e Mon Sep 17 00:00:00 2001 From: Luca Burgazzoli Date: Tue, 22 Oct 2024 12:39:25 +0200 Subject: [PATCH 19/22] refactor the secret generator controller (#1311) The commit is meant to: - make the code easier to understand, reducing complexity caused by nested if/else and error conditions (align happy path to the left) - remove shadowed error vars to avoid reporting misleading errors - add some basic unit test for the reconcile loop --- .../secretgenerator_controller.go | 115 +++++++------ .../secretgenerator_controller_test.go | 154 ++++++++++++++++++ 2 files changed, 217 insertions(+), 52 deletions(-) create mode 100644 controllers/secretgenerator/secretgenerator_controller_test.go diff --git a/controllers/secretgenerator/secretgenerator_controller.go b/controllers/secretgenerator/secretgenerator_controller.go index 37519d734ea..931e45d42f3 100644 --- a/controllers/secretgenerator/secretgenerator_controller.go +++ b/controllers/secretgenerator/secretgenerator_controller.go @@ -104,7 +104,6 @@ func (r *SecretGeneratorReconciler) SetupWithManager(ctx context.Context, mgr ct // based on the specified type and complexity. This will avoid possible race // conditions when a deployment mounts the secret before it is reconciled. func (r *SecretGeneratorReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) { - log := logf.FromContext(ctx).WithName("SecretGenerator") foundSecret := &corev1.Secret{} err := r.Client.Get(ctx, request.NamespacedName, foundSecret) if err != nil { @@ -116,70 +115,82 @@ func (r *SecretGeneratorReconciler) Reconcile(ctx context.Context, request ctrl. return ctrl.Result{}, err } - owner := []metav1.OwnerReference{ - *metav1.NewControllerRef(foundSecret, foundSecret.GroupVersionKind()), - } // Generate the secret if it does not previously exist generatedSecret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: foundSecret.Name + "-generated", - Namespace: foundSecret.Namespace, - Labels: foundSecret.Labels, - OwnerReferences: owner, + Name: foundSecret.Name + "-generated", + Namespace: foundSecret.Namespace, }, } - generatedSecretKey := types.NamespacedName{ - Name: generatedSecret.Name, Namespace: generatedSecret.Namespace, + err = r.Client.Get(ctx, client.ObjectKeyFromObject(generatedSecret), generatedSecret) + if err == nil || !k8serr.IsNotFound(err) { + return ctrl.Result{}, err } - err = r.Client.Get(ctx, generatedSecretKey, generatedSecret) + + err = r.generateSecret(ctx, foundSecret, generatedSecret) if err != nil { - if k8serr.IsNotFound(err) { - // Generate secret random value - log.Info("Generating a random value for a secret in a namespace", - "secret", generatedSecret.Name, "namespace", generatedSecret.Namespace) + return ctrl.Result{}, err + } - secret, err := NewSecretFrom(foundSecret.GetAnnotations()) - if err != nil { - log.Error(err, "error creating secret %s in %s", generatedSecret.Name, generatedSecret.Namespace) - return ctrl.Result{}, err - } + // Don't requeue if secret is created successfully + return ctrl.Result{}, nil +} - generatedSecret.StringData = map[string]string{ - secret.Name: secret.Value, - } +func (r *SecretGeneratorReconciler) generateSecret(ctx context.Context, foundSecret *corev1.Secret, generatedSecret *corev1.Secret) error { + log := logf.FromContext(ctx).WithName("SecretGenerator") - err = r.Client.Create(ctx, generatedSecret) - if err != nil { - return ctrl.Result{}, err - } - log.Info("Done generating secret in namespace", - "secret", generatedSecret.Name, "namespace", generatedSecret.Namespace) - // check if annotation oauth-client-route exists - if secret.OAuthClientRoute != "" { - // Get OauthClient Route - oauthClientRoute, err := r.getRoute(ctx, secret.OAuthClientRoute, request.Namespace) - if err != nil { - log.Error(err, "Unable to retrieve route from OAuthClient", "route-name", secret.OAuthClientRoute) - return ctrl.Result{}, err - } - // Generate OAuthClient for the generated secret - log.Info("Generating an OAuthClient CR for route", "route-name", oauthClientRoute.Name) - err = r.createOAuthClient(ctx, foundSecret.Name, secret.Value, oauthClientRoute.Spec.Host) - if err != nil { - log.Error(err, "error creating oauth client resource. Recreate the Secret", "secret-name", - foundSecret.Name) - - return ctrl.Result{}, err - } - } - } else { - return ctrl.Result{}, err - } + // Generate secret random value + log.Info("Generating a random value for a secret in a namespace", + "secret", generatedSecret.Name, "namespace", generatedSecret.Namespace) + + generatedSecret.Labels = foundSecret.Labels + + generatedSecret.OwnerReferences = []metav1.OwnerReference{ + *metav1.NewControllerRef(foundSecret, foundSecret.GroupVersionKind()), } - // Don't requeue if secret is created successfully - return ctrl.Result{}, err + secret, err := NewSecretFrom(foundSecret.GetAnnotations()) + if err != nil { + log.Error(err, "error creating secret %s in %s", generatedSecret.Name, generatedSecret.Namespace) + return err + } + + generatedSecret.StringData = map[string]string{ + secret.Name: secret.Value, + } + + err = r.Client.Create(ctx, generatedSecret) + if err != nil { + return err + } + + log.Info("Done generating secret in namespace", + "secret", generatedSecret.Name, "namespace", generatedSecret.Namespace) + + // check if annotation oauth-client-route exists + if secret.OAuthClientRoute == "" { + return nil + } + + // Get OauthClient Route + oauthClientRoute, err := r.getRoute(ctx, secret.OAuthClientRoute, foundSecret.Namespace) + if err != nil { + log.Error(err, "Unable to retrieve route from OAuthClient", "route-name", secret.OAuthClientRoute) + return err + } + + // Generate OAuthClient for the generated secret + log.Info("Generating an OAuthClient CR for route", "route-name", oauthClientRoute.Name) + err = r.createOAuthClient(ctx, foundSecret.Name, secret.Value, oauthClientRoute.Spec.Host) + if err != nil { + log.Error(err, "error creating oauth client resource. Recreate the Secret", "secret-name", + foundSecret.Name) + + return err + } + + return nil } // getRoute returns an OpenShift route object. It waits until the .spec.host value exists to avoid possible race conditions, fails otherwise. diff --git a/controllers/secretgenerator/secretgenerator_controller_test.go b/controllers/secretgenerator/secretgenerator_controller_test.go new file mode 100644 index 00000000000..a13bd8d1828 --- /dev/null +++ b/controllers/secretgenerator/secretgenerator_controller_test.go @@ -0,0 +1,154 @@ +package secretgenerator_test + +import ( + "context" + "testing" + + "github.com/onsi/gomega/gstruct" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/secretgenerator" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" + + . "github.com/onsi/gomega" +) + +//nolint:ireturn +func newFakeClient(objs ...client.Object) client.Client { + scheme := runtime.NewScheme() + utilruntime.Must(corev1.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + + return fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objs...). + Build() +} + +func TestGenerateSecret(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + secretName := "foo" + secretNs := "ns" + + // secret expected to be found + existingSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNs, + Labels: map[string]string{ + "foo": "bar", + }, + Annotations: map[string]string{ + annotations.SecretNameAnnotation: "bar", + annotations.SecretTypeAnnotation: "random", + }, + }, + } + + // secret to be generated + generatedSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName + "-generated", + Namespace: secretNs, + }, + } + + cli := newFakeClient(&existingSecret) + + r := secretgenerator.SecretGeneratorReconciler{ + Client: cli, + } + + _, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: existingSecret.Name, + Namespace: existingSecret.Namespace, + }, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + err = cli.Get(ctx, client.ObjectKeyFromObject(&generatedSecret), &generatedSecret) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(generatedSecret.OwnerReferences).To(HaveLen(1)) + g.Expect(generatedSecret.OwnerReferences[0]).To( + gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Name": Equal(existingSecret.Name), + "Kind": Equal(existingSecret.Kind), + "APIVersion": Equal(existingSecret.APIVersion), + }), + ) + + g.Expect(generatedSecret.StringData).To( + HaveKey(existingSecret.Annotations[annotations.SecretNameAnnotation])) + g.Expect(generatedSecret.Labels).To( + gstruct.MatchAllKeys(gstruct.Keys{ + "foo": Equal("bar"), + }), + ) +} + +func TestExistingSecret(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + secretName := "foo" + secretNs := "ns" + + // secret expected to be found + existingSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNs, + Labels: map[string]string{ + "foo": "bar", + }, + Annotations: map[string]string{ + annotations.SecretNameAnnotation: "bar", + annotations.SecretTypeAnnotation: "random", + }, + }, + } + + // secret to be generated + generatedSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName + "-generated", + Namespace: secretNs, + }, + } + + cli := newFakeClient(&existingSecret, &generatedSecret) + + r := secretgenerator.SecretGeneratorReconciler{ + Client: cli, + } + + _, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: existingSecret.Name, + Namespace: existingSecret.Namespace, + }, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + err = cli.Get(ctx, client.ObjectKeyFromObject(&generatedSecret), &generatedSecret) + g.Expect(err).ShouldNot(HaveOccurred()) + + // assuming an existing secret is left untouched + g.Expect(generatedSecret.OwnerReferences).To(BeEmpty()) + g.Expect(generatedSecret.Labels).To(BeEmpty()) + g.Expect(generatedSecret.StringData).To(BeEmpty()) +} From 511977ebff0c43f2d57f5ce4a8a8bb820dae1c99 Mon Sep 17 00:00:00 2001 From: Wen Zhou Date: Tue, 22 Oct 2024 14:59:41 +0200 Subject: [PATCH 20/22] update: rename variables rhods to rhoai (#1313) Signed-off-by: Wen Zhou Co-authored-by: ajaypratap003 --- components/codeflare/codeflare.go | 2 +- components/dashboard/dashboard.go | 20 +++++++++---------- .../datasciencepipelines.go | 2 +- components/kserve/kserve.go | 2 +- components/kueue/kueue.go | 2 +- .../modelmeshserving/modelmeshserving.go | 2 +- components/modelregistry/modelregistry.go | 2 +- components/ray/ray.go | 2 +- .../trainingoperator/trainingoperator.go | 2 +- components/trustyai/trustyai.go | 6 +++--- components/workbenches/workbenches.go | 4 ++-- .../dscinitialization_controller.go | 10 +++++----- controllers/dscinitialization/utils.go | 2 +- main.go | 12 +++++------ pkg/cluster/cluster_config.go | 20 +++++++++---------- pkg/cluster/const.go | 8 ++++---- pkg/upgrade/uninstallation.go | 4 ++-- pkg/upgrade/upgrade.go | 4 ++-- 18 files changed, 53 insertions(+), 53 deletions(-) diff --git a/components/codeflare/codeflare.go b/components/codeflare/codeflare.go index c3c2ec48474..7e114389465 100644 --- a/components/codeflare/codeflare.go +++ b/components/codeflare/codeflare.go @@ -121,7 +121,7 @@ func (c *CodeFlare) ReconcileComponent(ctx context.Context, } // CloudServiceMonitoring handling - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { // inject prometheus codeflare*.rules in to /opt/manifests/monitoring/prometheus/prometheus-configs.yaml if err := c.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { return err diff --git a/components/dashboard/dashboard.go b/components/dashboard/dashboard.go index f8b077d4416..47a2ce2ba04 100644 --- a/components/dashboard/dashboard.go +++ b/components/dashboard/dashboard.go @@ -51,8 +51,8 @@ func (d *Dashboard) Init(ctx context.Context, platform cluster.Platform) error { "odh-dashboard-image": "RELATED_IMAGE_ODH_DASHBOARD_IMAGE", } DefaultPath = map[cluster.Platform]string{ - cluster.SelfManagedRhods: PathDownstream + "/onprem", - cluster.ManagedRhods: PathDownstream + "/addon", + cluster.SelfManagedRhoai: PathDownstream + "/onprem", + cluster.ManagedRhoai: PathDownstream + "/addon", cluster.OpenDataHub: PathUpstream, cluster.Unknown: PathUpstream, }[platform] @@ -135,7 +135,7 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context, // common: Deploy odh-dashboard manifests // TODO: check if we can have the same component name odh-dashboard for both, or still keep rhods-dashboard for RHOAI switch platform { - case cluster.SelfManagedRhods, cluster.ManagedRhods: + case cluster.SelfManagedRhoai, cluster.ManagedRhoai: // anaconda if err := cluster.CreateSecret(ctx, cli, "anaconda-ce-access", dscispec.ApplicationsNamespace); err != nil { return fmt.Errorf("failed to create access-secret for anaconda: %w", err) @@ -153,7 +153,7 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context, } // CloudService Monitoring handling - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { if err := d.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentNameDownstream); err != nil { return err } @@ -185,15 +185,15 @@ func (d *Dashboard) ReconcileComponent(ctx context.Context, func updateKustomizeVariable(ctx context.Context, cli client.Client, platform cluster.Platform, dscispec *dsciv1.DSCInitializationSpec) (map[string]string, error) { adminGroups := map[cluster.Platform]string{ - cluster.SelfManagedRhods: "rhods-admins", - cluster.ManagedRhods: "dedicated-admins", + cluster.SelfManagedRhoai: "rhods-admins", + cluster.ManagedRhoai: "dedicated-admins", cluster.OpenDataHub: "odh-admins", cluster.Unknown: "odh-admins", }[platform] sectionTitle := map[cluster.Platform]string{ - cluster.SelfManagedRhods: "OpenShift Self Managed Services", - cluster.ManagedRhods: "OpenShift Managed Services", + cluster.SelfManagedRhoai: "OpenShift Self Managed Services", + cluster.ManagedRhoai: "OpenShift Managed Services", cluster.OpenDataHub: "OpenShift Open Data Hub", cluster.Unknown: "OpenShift Open Data Hub", }[platform] @@ -203,8 +203,8 @@ func updateKustomizeVariable(ctx context.Context, cli client.Client, platform cl return nil, fmt.Errorf("error getting console route URL %s : %w", consoleLinkDomain, err) } consoleURL := map[cluster.Platform]string{ - cluster.SelfManagedRhods: "https://rhods-dashboard-" + dscispec.ApplicationsNamespace + "." + consoleLinkDomain, - cluster.ManagedRhods: "https://rhods-dashboard-" + dscispec.ApplicationsNamespace + "." + consoleLinkDomain, + cluster.SelfManagedRhoai: "https://rhods-dashboard-" + dscispec.ApplicationsNamespace + "." + consoleLinkDomain, + cluster.ManagedRhoai: "https://rhods-dashboard-" + dscispec.ApplicationsNamespace + "." + consoleLinkDomain, cluster.OpenDataHub: "https://odh-dashboard-" + dscispec.ApplicationsNamespace + "." + consoleLinkDomain, cluster.Unknown: "https://odh-dashboard-" + dscispec.ApplicationsNamespace + "." + consoleLinkDomain, }[platform] diff --git a/components/datasciencepipelines/datasciencepipelines.go b/components/datasciencepipelines/datasciencepipelines.go index ace22681f6b..d11cae6153d 100644 --- a/components/datasciencepipelines/datasciencepipelines.go +++ b/components/datasciencepipelines/datasciencepipelines.go @@ -135,7 +135,7 @@ func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context, } // CloudService Monitoring handling - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { if err := d.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { return err } diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index f7edf65479a..3734551ba56 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -170,7 +170,7 @@ func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, } // CloudService Monitoring handling - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { // kesrve rules if err := k.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { return err diff --git a/components/kueue/kueue.go b/components/kueue/kueue.go index 73147085783..0e63f707548 100644 --- a/components/kueue/kueue.go +++ b/components/kueue/kueue.go @@ -93,7 +93,7 @@ func (k *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, } // CloudService Monitoring handling - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { if err := k.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { return err } diff --git a/components/modelmeshserving/modelmeshserving.go b/components/modelmeshserving/modelmeshserving.go index 898d1f8617a..40034741cfe 100644 --- a/components/modelmeshserving/modelmeshserving.go +++ b/components/modelmeshserving/modelmeshserving.go @@ -156,7 +156,7 @@ func (m *ModelMeshServing) ReconcileComponent(ctx context.Context, } // CloudService Monitoring handling - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { // first model-mesh rules if err := m.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { return err diff --git a/components/modelregistry/modelregistry.go b/components/modelregistry/modelregistry.go index 72c9da1e381..242bea853f1 100644 --- a/components/modelregistry/modelregistry.go +++ b/components/modelregistry/modelregistry.go @@ -165,7 +165,7 @@ func (m *ModelRegistry) ReconcileComponent(ctx context.Context, cli client.Clien } // CloudService Monitoring handling - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { if err := m.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { return err } diff --git a/components/ray/ray.go b/components/ray/ray.go index f11ad8ec55f..8b6e3cc6dfb 100644 --- a/components/ray/ray.go +++ b/components/ray/ray.go @@ -98,7 +98,7 @@ func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, } // CloudService Monitoring handling - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { if err := r.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { return err } diff --git a/components/trainingoperator/trainingoperator.go b/components/trainingoperator/trainingoperator.go index d422bc9a28a..9346a54f1b9 100644 --- a/components/trainingoperator/trainingoperator.go +++ b/components/trainingoperator/trainingoperator.go @@ -96,7 +96,7 @@ func (r *TrainingOperator) ReconcileComponent(ctx context.Context, cli client.Cl } // CloudService Monitoring handling - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { if err := r.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { return err } diff --git a/components/trustyai/trustyai.go b/components/trustyai/trustyai.go index 114d099901b..b0ad4eb2bed 100644 --- a/components/trustyai/trustyai.go +++ b/components/trustyai/trustyai.go @@ -40,8 +40,8 @@ func (t *TrustyAI) Init(ctx context.Context, platform cluster.Platform) error { log := logf.FromContext(ctx).WithName(ComponentName) DefaultPath = map[cluster.Platform]string{ - cluster.SelfManagedRhods: PathDownstream, - cluster.ManagedRhods: PathDownstream, + cluster.SelfManagedRhoai: PathDownstream, + cluster.ManagedRhoai: PathDownstream, cluster.OpenDataHub: PathUpstream, cluster.Unknown: PathUpstream, }[platform] @@ -110,7 +110,7 @@ func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, } // CloudService Monitoring handling - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { if err := t.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { return err } diff --git a/components/workbenches/workbenches.go b/components/workbenches/workbenches.go index b744a1660d6..46671b4b028 100644 --- a/components/workbenches/workbenches.go +++ b/components/workbenches/workbenches.go @@ -124,7 +124,7 @@ func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, return err } } - if platform == cluster.SelfManagedRhods || platform == cluster.ManagedRhods { + if platform == cluster.SelfManagedRhoai || platform == cluster.ManagedRhoai { // Intentionally leaving the ownership unset for this namespace. // Specifying this label triggers its deletion when the operator is uninstalled. _, err := cluster.CreateNamespace(ctx, cli, cluster.DefaultNotebooksNamespace, cluster.WithLabels(labels.ODH.OwnedNamespace, "true")) @@ -171,7 +171,7 @@ func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, } // CloudService Monitoring handling - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { if err := w.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { return err } diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index 023543dd633..47282a484e7 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -183,7 +183,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re switch req.Name { case "prometheus": // prometheus configmap - if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhods { + if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhoai { log.Info("Monitoring enabled to restart deployment", "cluster", "Managed Service Mode") err := r.configureManagedMonitoring(ctx, instance, "updates") if err != nil { @@ -193,7 +193,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil case "addon-managed-odh-parameters": - if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhods { + if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhoai { log.Info("Monitoring enabled when notification updated", "cluster", "Managed Service Mode") err := r.configureManagedMonitoring(ctx, instance, "updates") if err != nil { @@ -203,7 +203,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil case "backup": // revert back to the original prometheus.yml - if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhods { + if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhoai { log.Info("Monitoring enabled to restore back", "cluster", "Managed Service Mode") err := r.configureManagedMonitoring(ctx, instance, "revertbackup") if err != nil { @@ -219,7 +219,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } switch platform { - case cluster.SelfManagedRhods: + case cluster.SelfManagedRhoai: // Check if user opted for disabling creating user groups if !createUsergroup { log.Info("DSCI disabled usergroup creation") @@ -236,7 +236,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re return reconcile.Result{}, err } } - case cluster.ManagedRhods: + case cluster.ManagedRhoai: osdConfigsPath := filepath.Join(deploy.DefaultManifestPath, "osd-configs") err = deploy.DeployManifestsFromPath(ctx, r.Client, instance, osdConfigsPath, r.ApplicationsNamespace, "osd", true) if err != nil { diff --git a/controllers/dscinitialization/utils.go b/controllers/dscinitialization/utils.go index 77e43b024cf..b434278a1ad 100644 --- a/controllers/dscinitialization/utils.go +++ b/controllers/dscinitialization/utils.go @@ -192,7 +192,7 @@ func (r *DSCInitializationReconciler) createDefaultRoleBinding(ctx context.Conte func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context.Context, name string, dscInit *dsciv1.DSCInitialization, platform cluster.Platform) error { log := logf.FromContext(ctx) - if platform == cluster.ManagedRhods || platform == cluster.SelfManagedRhods { + if platform == cluster.ManagedRhoai || platform == cluster.SelfManagedRhoai { // Get operator namepsace operatorNs, err := cluster.GetOperatorNamespace() if err != nil { diff --git a/main.go b/main.go index a59dee6d4a3..e55cc3ba61f 100644 --- a/main.go +++ b/main.go @@ -303,8 +303,8 @@ func main() { //nolint:funlen,maintidx } } - // Create default DSC CR for managed RHODS - if platform == cluster.ManagedRhods { + // Create default DSC CR for managed RHOAI + if platform == cluster.ManagedRhoai { var createDefaultDSCFunc manager.RunnableFunc = func(ctx context.Context) error { err := upgrade.CreateDefaultDSC(ctx, setupClient) if err != nil { @@ -357,7 +357,7 @@ func createSecretCacheConfig(platform cluster.Platform) map[string]cache.Config "openshift-ingress": {}, } switch platform { - case cluster.ManagedRhods: + case cluster.ManagedRhoai: namespaceConfigs["redhat-ods-monitoring"] = cache.Config{} namespaceConfigs["redhat-ods-applications"] = cache.Config{} operatorNs, err := cluster.GetOperatorNamespace() @@ -365,7 +365,7 @@ func createSecretCacheConfig(platform cluster.Platform) map[string]cache.Config operatorNs = "redhat-ods-operator" // fall back case } namespaceConfigs[operatorNs] = cache.Config{} - case cluster.SelfManagedRhods: + case cluster.SelfManagedRhoai: namespaceConfigs["redhat-ods-applications"] = cache.Config{} default: namespaceConfigs["opendatahub"] = cache.Config{} @@ -376,10 +376,10 @@ func createSecretCacheConfig(platform cluster.Platform) map[string]cache.Config func createDeploymentCacheConfig(platform cluster.Platform) map[string]cache.Config { namespaceConfigs := map[string]cache.Config{} switch platform { - case cluster.ManagedRhods: // no need workbench NS, only SFS no Deployment + case cluster.ManagedRhoai: // no need workbench NS, only SFS no Deployment namespaceConfigs["redhat-ods-monitoring"] = cache.Config{} namespaceConfigs["redhat-ods-applications"] = cache.Config{} - case cluster.SelfManagedRhods: + case cluster.SelfManagedRhoai: namespaceConfigs["redhat-ods-applications"] = cache.Config{} default: namespaceConfigs["opendatahub"] = cache.Config{} diff --git a/pkg/cluster/cluster_config.go b/pkg/cluster/cluster_config.go index a2b5aaa4cb4..80271161b06 100644 --- a/pkg/cluster/cluster_config.go +++ b/pkg/cluster/cluster_config.go @@ -137,11 +137,11 @@ func GetClusterServiceVersion(ctx context.Context, c client.Client, namespace st gvk.ClusterServiceVersion.Kind) } -// detectSelfManaged detects if it is Self Managed Rhods or OpenDataHub. +// detectSelfManaged detects if it is Self Managed Rhoai or OpenDataHub. func detectSelfManaged(ctx context.Context, cli client.Client) (Platform, error) { variants := map[string]Platform{ "opendatahub-operator": OpenDataHub, - "rhods-operator": SelfManagedRhods, + "rhods-operator": SelfManagedRhoai, } for k, v := range variants { @@ -157,8 +157,8 @@ func detectSelfManaged(ctx context.Context, cli client.Client) (Platform, error) return Unknown, nil } -// detectManagedRHODS checks if catsrc CR add-on exists ManagedRhods. -func detectManagedRHODS(ctx context.Context, cli client.Client) (Platform, error) { +// detectManagedRhoai checks if catsrc CR add-on exists ManagedRhoai. +func detectManagedRhoai(ctx context.Context, cli client.Client) (Platform, error) { catalogSource := &ofapiv1alpha1.CatalogSource{} operatorNs, err := GetOperatorNamespace() if err != nil { @@ -168,7 +168,7 @@ func detectManagedRHODS(ctx context.Context, cli client.Client) (Platform, error if err != nil { return Unknown, client.IgnoreNotFound(err) } - return ManagedRhods, nil + return ManagedRhoai, nil } func getPlatform(ctx context.Context, cli client.Client) (Platform, error) { @@ -176,15 +176,15 @@ func getPlatform(ctx context.Context, cli client.Client) (Platform, error) { case "OpenDataHub": return OpenDataHub, nil case "ManagedRHOAI": - return ManagedRhods, nil + return SelfManagedRhoai, nil case "SelfManagedRHOAI": - return SelfManagedRhods, nil + return SelfManagedRhoai, nil default: // fall back to detect platform if ODH_PLATFORM_TYPE env is not provided in CSV or set to "" - if platform, err := detectManagedRHODS(ctx, cli); err != nil { + if platform, err := detectManagedRhoai(ctx, cli); err != nil { return Unknown, err - } else if platform == ManagedRhods { - return ManagedRhods, nil + } else if platform == SelfManagedRhoai { + return SelfManagedRhoai, nil } return detectSelfManaged(ctx, cli) } diff --git a/pkg/cluster/const.go b/pkg/cluster/const.go index de5a27de29d..6a6562bff07 100644 --- a/pkg/cluster/const.go +++ b/pkg/cluster/const.go @@ -1,10 +1,10 @@ package cluster const ( - // ManagedRhods defines expected addon catalogsource. - ManagedRhods Platform = "OpenShift AI Cloud Service" - // SelfManagedRhods defines display name in csv. - SelfManagedRhods Platform = "OpenShift AI Self-Managed" + // ManagedRhoai defines expected addon catalogsource. + ManagedRhoai Platform = "OpenShift AI Cloud Service" + // SelfManagedRhoai defines display name in csv. + SelfManagedRhoai Platform = "OpenShift AI Self-Managed" // OpenDataHub defines display name in csv. OpenDataHub Platform = "Open Data Hub" // Unknown indicates that operator is not deployed using OLM. diff --git a/pkg/upgrade/uninstallation.go b/pkg/upgrade/uninstallation.go index cc5c28e3682..0618fd7be45 100644 --- a/pkg/upgrade/uninstallation.go +++ b/pkg/upgrade/uninstallation.go @@ -70,10 +70,10 @@ func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster. log.Info("Removing operator subscription which in turn will remove installplan") subsName := "opendatahub-operator" - if platform == cluster.SelfManagedRhods { + if platform == cluster.SelfManagedRhoai { subsName = "rhods-operator" } - if platform != cluster.ManagedRhods { + if platform != cluster.ManagedRhoai { if err := cluster.DeleteExistingSubscription(ctx, cli, operatorNs, subsName); err != nil { return err } diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index 6b8f857c465..d0e9d44468d 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -217,7 +217,7 @@ func CleanupExistingResource(ctx context.Context, ) error { var multiErr *multierror.Error // Special Handling of cleanup of deprecated model monitoring stack - if platform == cluster.ManagedRhods { + if platform == cluster.ManagedRhoai { deprecatedDeployments := []string{"rhods-prometheus-operator"} multiErr = multierror.Append(multiErr, deleteDeprecatedResources(ctx, cli, dscMonitoringNamespace, deprecatedDeployments, &appsv1.DeploymentList{})) @@ -270,7 +270,7 @@ func CleanupExistingResource(ctx context.Context, }) multiErr = multierror.Append(multiErr, deleteResources(ctx, cli, &odhDocJPH)) // only apply on RHOAI since ODH has a different way to create this CR by dashboard - if platform == cluster.SelfManagedRhods || platform == cluster.ManagedRhods { + if platform == cluster.SelfManagedRhoai || platform == cluster.ManagedRhoai { if err := upgradeODCCR(ctx, cli, "odh-dashboard-config", dscApplicationsNamespace, oldReleaseVersion); err != nil { return err } From 3427b72d83faba7eb448a7c5dd5cafbb7314421d Mon Sep 17 00:00:00 2001 From: Yauheni Kaliuta Date: Tue, 22 Oct 2024 18:25:27 +0300 Subject: [PATCH 21/22] logger: add logLevel to DSCI devFlags (#1307) Jira: https://issues.redhat.com/browse/RHOAIENG-14713 This is a squashed commit of the patchset (original ids): 959f84d33fa9 ("logger: import controller-runtime zap as ctrlzap") b8d9cde99c12 ("logger: add logLevel to DSCI devFlags") 2d3efe27c38b ("components, logger: always use controller's logger") 1ef9266fbf23 ("components, logger: use one function for ctx creation") - logger: import controller-runtime zap as ctrlzap To avoid patch polluting with the next changes where uber's zap is imported as `zap`. - logger: add logLevel to DSCI devFlags Allow to override global zap log level from DSCI devFlags. Accepts the same values as to `--zap-log-level` command line switch. Use zap.AtomicLevel type to store loglevel. Locally use atomic to store its value. We must store the structure itsel since command line flags parser (ctrlzap.(*levelFlag).Set()) stores the structure there. In its order ctrlzap.addDefault() stores pointers, newOptions() always sets the level to avoid setting it by addDefaults(). Otherwise SetLevel should handle both pointer and the structure as logLevel atomic.Value. It is ok to store structure of zap.AtomicLevel since it itself contains a pointer to the atomic.Int32 and common level value is changed then. The parsing code is modified version of the one from controller-runtime/pkg/log/zap/flag.go. Deprecate DSCI.DevFlags.logmode. - components, logger: always use controller's logger Since the log level is overridable with its own field of devFlags, do not use logmode anymore. It was used to create own logger with own zap backend in case if devFlags exist. Just add name and value to the existing logger instead. Rename NewLoggerWithOptions back to NewLogger since former NewLogger is removed. Change component logger name. "DSC.Component" is redundant. It was usuful when component's logger was created from scratch, but now when it is always based on the reconciler's one, it's clear that it is a part of DSC. - components, logger: use one function for ctx creation Move both logger and component creation to one function. Just a cleanup. Signed-off-by: Yauheni Kaliuta --- README.md | 30 +++--- .../v1/dscinitialization_types.go | 4 + ...ion.opendatahub.io_dscinitializations.yaml | 5 + .../datasciencecluster_controller.go | 13 +-- .../dscinitialization_controller.go | 10 ++ docs/api-overview.md | 3 +- main.go | 2 +- pkg/logger/logger.go | 94 +++++++++++++------ 8 files changed, 101 insertions(+), 60 deletions(-) diff --git a/README.md b/README.md index b1b7971d47f..73a43ec3b50 100644 --- a/README.md +++ b/README.md @@ -222,8 +222,6 @@ This will ensure that the doc for the apis are updated accordingly. ### Enabled logging -#### Controller level - Global logger configuration can be changed with a command line switch `--log-mode ` for example from CSV. Valid values for ``: "" (as default) || prod || production || devel || development. @@ -231,12 +229,9 @@ Verbosity level is INFO. To fine tune zap backend [standard operator sdk zap switches](https://sdk.operatorframework.io/docs/building-operators/golang/references/logging/) can be used. -#### Component level - -Logger on components can be changed by DSCI devFlags during runtime. -By default, if not set .spec.devFlags.logmode, it uses INFO level -Modification applies to all components, not only these "Managed" ones. -Update DSCI CR with .spec.devFlags.logmode, see example : +Log level can be changed by DSCI devFlags during runtime by setting +.spec.devFlags.logLevel. It accepts the same values as `--zap-log-level` +command line switch. See example : ```console apiVersion: dscinitialization.opendatahub.io/v1 @@ -245,20 +240,17 @@ metadata: name: default-dsci spec: devFlags: - logmode: development + logLevel: debug ... ``` -Avaiable value for logmode is "devel", "development", "prod", "production". -The first two work the same set to DEBUG level; the later two work the same as using ERROR level. - -| .spec.devFlags.logmode | stacktrace level | verbosity | Output | Comments | -| ---------------------- | ---------------- | --------- | -------- | -------------- | -| devel | WARN | INFO | Console | lowest level, using epoch time | -| development | WARN | INFO | Console | same as devel | -| "" | ERROR | INFO | JSON | default option | -| prod | ERROR | INFO | JSON | highest level, using human readable timestamp | -| production | ERROR | INFO | JSON | same as prod | +| logmode | stacktrace level | verbosity | Output | Comments | +|-------------|------------------|-----------|---------|-----------------------------------------------| +| devel | WARN | INFO | Console | lowest level, using epoch time | +| development | WARN | INFO | Console | same as devel | +| "" | ERROR | INFO | JSON | default option | +| prod | ERROR | INFO | JSON | highest level, using human readable timestamp | +| production | ERROR | INFO | JSON | same as prod | ### Example DSCInitialization diff --git a/apis/dscinitialization/v1/dscinitialization_types.go b/apis/dscinitialization/v1/dscinitialization_types.go index 50f758a5df8..7780bb193e5 100644 --- a/apis/dscinitialization/v1/dscinitialization_types.go +++ b/apis/dscinitialization/v1/dscinitialization_types.go @@ -83,9 +83,13 @@ type DevFlags struct { // Custom manifests uri for odh-manifests // +optional ManifestsUri string `json:"manifestsUri,omitempty"` + // ## DEPRECATED ##: Ignored, use LogLevel instead // +kubebuilder:validation:Enum=devel;development;prod;production;default // +kubebuilder:default="production" LogMode string `json:"logmode,omitempty"` + // Override Zap log level. Can be "debug", "info", "error" or a number (more verbose). + // +optional + LogLevel string `json:"logLevel,omitempty"` } type TrustedCABundleSpec struct { diff --git a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml index dd381696bb1..894772ea9a9 100644 --- a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml +++ b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml @@ -67,8 +67,13 @@ spec: Internal development useful field to test customizations. This is not recommended to be used in production environment. properties: + logLevel: + description: Override Zap log level. Can be "debug", "info", "error" + or a number (more verbose). + type: string logmode: default: production + description: '## DEPRECATED ##: Ignored, use LogLevel instead' enum: - devel - development diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index e8d8750ca99..22a5944676f 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -57,7 +57,6 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/components/modelregistry" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - ctrlogger "github.com/opendatahub-io/opendatahub-operator/v2/pkg/logger" annotations "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" @@ -312,8 +311,7 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context } } // Reconcile component - componentLogger := newComponentLogger(log, componentName, r.DataScienceCluster.DSCISpec) - componentCtx := logf.IntoContext(ctx, componentLogger) + componentCtx := newComponentContext(ctx, log, componentName) err := component.ReconcileComponent(componentCtx, r.Client, instance, r.DataScienceCluster.DSCISpec, platform, installedComponentValue) // TODO: replace this hack with a full refactor of component status in the future @@ -364,13 +362,8 @@ func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context return instance, nil } -// newComponentLogger is a wrapper to add DSC name and extract log mode from DSCISpec. -func newComponentLogger(logger logr.Logger, componentName string, dscispec *dsciv1.DSCInitializationSpec) logr.Logger { - mode := "" - if dscispec.DevFlags != nil { - mode = dscispec.DevFlags.LogMode - } - return ctrlogger.NewNamedLogger(logger, "DSC.Components."+componentName, mode) +func newComponentContext(ctx context.Context, log logr.Logger, componentName string) context.Context { + return logf.IntoContext(ctx, log.WithName(componentName).WithValues("component", componentName)) } func (r *DataScienceClusterReconciler) reportError(ctx context.Context, err error, instance *dscv1.DataScienceCluster, message string) *dscv1.DataScienceCluster { diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index 47282a484e7..b9156aa7c16 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -48,6 +48,7 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/logger" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/trustedcabundle" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" ) @@ -100,6 +101,15 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re instance = &instances.Items[0] } + if instance.Spec.DevFlags != nil { + level := instance.Spec.DevFlags.LogLevel + log.V(1).Info("Setting log level", "level", level) + err := logger.SetLevel(level) + if err != nil { + log.Error(err, "Failed to set log level", "level", level) + } + } + if instance.ObjectMeta.DeletionTimestamp.IsZero() { if !controllerutil.ContainsFinalizer(instance, finalizerName) { log.Info("Adding finalizer for DSCInitialization", "name", instance.Name, "finalizer", finalizerName) diff --git a/docs/api-overview.md b/docs/api-overview.md index 3c963f15088..9c2e8971dde 100644 --- a/docs/api-overview.md +++ b/docs/api-overview.md @@ -669,7 +669,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `manifestsUri` _string_ | Custom manifests uri for odh-manifests | | | -| `logmode` _string_ | | production | Enum: [devel development prod production default]
| +| `logmode` _string_ | ## DEPRECATED ##: Ignored, use LogLevel instead | production | Enum: [devel development prod production default]
| +| `logLevel` _string_ | Override Zap log level. Can be "debug", "info", "error" or a number (more verbose). | | | #### Monitoring diff --git a/main.go b/main.go index e55cc3ba61f..d98dfe8f452 100644 --- a/main.go +++ b/main.go @@ -146,7 +146,7 @@ func main() { //nolint:funlen,maintidx flag.Parse() - ctrl.SetLogger(logger.NewLoggerWithOptions(logmode, &opts)) + ctrl.SetLogger(logger.NewLogger(logmode, &opts)) // root context ctx := ctrl.SetupSignalHandler() diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index ed569084b28..122ce002bff 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -1,57 +1,92 @@ package logger import ( + "errors" "flag" + "fmt" "os" + "strconv" + "strings" + "sync/atomic" "github.com/go-logr/logr" + "go.uber.org/zap" "go.uber.org/zap/zapcore" - "sigs.k8s.io/controller-runtime/pkg/log/zap" + ctrlzap "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -// NewNamedLogger creates a new logger for a component. -// If the mode is set (so can be different from the default one), -// it will create a new logger with the specified mode's options. -func NewNamedLogger(log logr.Logger, name string, mode string) logr.Logger { - if mode != "" { - log = NewLogger(mode) - } - return log.WithName(name) +var logLevel atomic.Value + +// copy from controller-runtime/pkg/log/zap/flag.go. +var levelStrings = map[string]zapcore.Level{ + "debug": zap.DebugLevel, + "info": zap.InfoLevel, + "error": zap.ErrorLevel, } -func NewLoggerWithOptions(mode string, override *zap.Options) logr.Logger { - opts := newOptions(mode) - overrideOptions(opts, override) - return newLogger(opts) +// adjusted copy from controller-runtime/pkg/log/zap/flag.go, keep the same argument name. +func stringToLevel(flagValue string) (zapcore.Level, error) { + level, validLevel := levelStrings[strings.ToLower(flagValue)] + if validLevel { + return level, nil + } + logLevel, err := strconv.Atoi(flagValue) + if err != nil { + return 0, fmt.Errorf("invalid log level \"%s\"", flagValue) + } + if logLevel > 0 { + intLevel := -1 * logLevel + return zapcore.Level(int8(intLevel)), nil + } + + return 0, fmt.Errorf("invalid log level \"%s\"", flagValue) } -// in DSC component, to use different mode for logging, e.g. development, production -// when not set mode it falls to "default" which is used by startup main.go. -func NewLogger(mode string) logr.Logger { - return newLogger(newOptions(mode)) +func SetLevel(levelStr string) error { + if levelStr == "" { + return nil + } + levelNum, err := stringToLevel(levelStr) + if err != nil { + return err + } + + // ctrlzap.addDefauls() uses a pointer to the AtomicLevel, + // but ctrlzap.(*levelFlag).Set() the structure itsef. + // So use the structure and always set the value in newOptions() to addDefaults() call + level, ok := logLevel.Load().(zap.AtomicLevel) + if !ok { + return errors.New("stored loglevel is not of type *zap.AtomicLevel") + } + + level.SetLevel(levelNum) + return nil } -func newLogger(opts *zap.Options) logr.Logger { - return zap.New(zap.UseFlagOptions(opts)) +func NewLogger(mode string, override *ctrlzap.Options) logr.Logger { + opts := newOptions(mode) + overrideOptions(opts, override) + logLevel.Store(opts.Level) + return ctrlzap.New(ctrlzap.UseFlagOptions(opts)) } -func newOptions(mode string) *zap.Options { - var opts zap.Options +func newOptions(mode string) *ctrlzap.Options { + var opts ctrlzap.Options + level := zap.NewAtomicLevelAt(zapcore.InfoLevel) + switch mode { case "devel", "development": // the most logging verbosity - opts = zap.Options{ + opts = ctrlzap.Options{ Development: true, StacktraceLevel: zapcore.WarnLevel, - Level: zapcore.InfoLevel, DestWriter: os.Stdout, } case "prod", "production": // the least logging verbosity - opts = zap.Options{ + opts = ctrlzap.Options{ Development: false, StacktraceLevel: zapcore.ErrorLevel, - Level: zapcore.InfoLevel, DestWriter: os.Stdout, - EncoderConfigOptions: []zap.EncoderConfigOption{func(config *zapcore.EncoderConfig) { + EncoderConfigOptions: []ctrlzap.EncoderConfigOption{func(config *zapcore.EncoderConfig) { config.EncodeTime = zapcore.ISO8601TimeEncoder // human readable not epoch config.EncodeDuration = zapcore.SecondsDurationEncoder config.LevelKey = "LogLevel" @@ -63,17 +98,18 @@ func newOptions(mode string) *zap.Options { }}, } default: - opts = zap.Options{ + opts = ctrlzap.Options{ Development: false, StacktraceLevel: zapcore.ErrorLevel, - Level: zapcore.InfoLevel, DestWriter: os.Stdout, } } + + opts.Level = level return &opts } -func overrideOptions(orig, override *zap.Options) { +func overrideOptions(orig, override *ctrlzap.Options) { // Development is boolean, cannot check for nil, so check if it was set isDevelopmentSet := false flag.Visit(func(f *flag.Flag) { From bde4b4e8478b5d03195e2777b9d550922e3cdcbc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20La=C5=A1=C5=A1=C3=A1k?= <73712209+mlassak@users.noreply.github.com> Date: Thu, 24 Oct 2024 15:51:35 +0200 Subject: [PATCH 22/22] secret-generator controller: avoid reporting 'Secret not found' error in reconcile (#1312) --- .../secretgenerator_controller.go | 30 +++--- .../secretgenerator_controller_test.go | 92 +++++++++++++++++++ 2 files changed, 109 insertions(+), 13 deletions(-) diff --git a/controllers/secretgenerator/secretgenerator_controller.go b/controllers/secretgenerator/secretgenerator_controller.go index 931e45d42f3..409e02e7cc7 100644 --- a/controllers/secretgenerator/secretgenerator_controller.go +++ b/controllers/secretgenerator/secretgenerator_controller.go @@ -107,12 +107,17 @@ func (r *SecretGeneratorReconciler) Reconcile(ctx context.Context, request ctrl. foundSecret := &corev1.Secret{} err := r.Client.Get(ctx, request.NamespacedName, foundSecret) if err != nil { - if k8serr.IsNotFound(err) { - // If Secret is deleted, delete OAuthClient if exists - err = r.deleteOAuthClient(ctx, request.Name) + if !k8serr.IsNotFound(err) { + return ctrl.Result{}, err } - return ctrl.Result{}, err + // If Secret is deleted, delete OAuthClient if exists + err = r.deleteOAuthClient(ctx, request.NamespacedName) + if err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil } // Generate the secret if it does not previously exist @@ -252,17 +257,16 @@ func (r *SecretGeneratorReconciler) createOAuthClient(ctx context.Context, name return err } -func (r *SecretGeneratorReconciler) deleteOAuthClient(ctx context.Context, secretName string) error { - oauthClient := &oauthv1.OAuthClient{} - - err := r.Client.Get(ctx, client.ObjectKey{ - Name: secretName, - }, oauthClient) - if err != nil { - return client.IgnoreNotFound(err) +func (r *SecretGeneratorReconciler) deleteOAuthClient(ctx context.Context, secretNamespacedName types.NamespacedName) error { + oauthClient := &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretNamespacedName.Name, + Namespace: secretNamespacedName.Namespace, + }, } - if err = r.Client.Delete(ctx, oauthClient); err != nil { + err := r.Client.Delete(ctx, oauthClient) + if err != nil && !k8serr.IsNotFound(err) { return fmt.Errorf("error deleting OAuthClient %s: %w", oauthClient.Name, err) } diff --git a/controllers/secretgenerator/secretgenerator_controller_test.go b/controllers/secretgenerator/secretgenerator_controller_test.go index a13bd8d1828..4eac042d7f0 100644 --- a/controllers/secretgenerator/secretgenerator_controller_test.go +++ b/controllers/secretgenerator/secretgenerator_controller_test.go @@ -5,8 +5,10 @@ import ( "testing" "github.com/onsi/gomega/gstruct" + oauthv1 "github.com/openshift/api/oauth/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -26,6 +28,7 @@ func newFakeClient(objs ...client.Object) client.Client { scheme := runtime.NewScheme() utilruntime.Must(corev1.AddToScheme(scheme)) utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(oauthv1.AddToScheme(scheme)) return fake.NewClientBuilder(). WithScheme(scheme). @@ -152,3 +155,92 @@ func TestExistingSecret(t *testing.T) { g.Expect(generatedSecret.Labels).To(BeEmpty()) g.Expect(generatedSecret.StringData).To(BeEmpty()) } + +func TestSecretNotFound(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + secretName := "fooo" + secretNs := "foooNs" + + cli := newFakeClient() + + r := secretgenerator.SecretGeneratorReconciler{ + Client: cli, + } + + _, err := r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: secretName, + Namespace: secretNs, + }, + }) + // secret not found, reconcile should end without error + g.Expect(err).ShouldNot(HaveOccurred()) +} + +func TestDeleteOAuthClientIfSecretNotFound(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + secretName := "fooo" + secretNs := "foooNs" + + // secret expected to be deleted + existingSecret := corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNs, + Labels: map[string]string{ + "fooo": "bar", + }, + Annotations: map[string]string{ + annotations.SecretNameAnnotation: "bar", + annotations.SecretTypeAnnotation: "random", + }, + }, + } + + // future left-over oauth client to be cleaned up during reconcile + existingOauthClient := oauthv1.OAuthClient{ + TypeMeta: metav1.TypeMeta{ + Kind: "OAuthClient", + APIVersion: "oauth.openshift.io/v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: secretNs, + }, + Secret: secretName, + RedirectURIs: []string{"https://foo.bar"}, + GrantMethod: oauthv1.GrantHandlerAuto, + } + + cli := newFakeClient(&existingSecret, &existingOauthClient) + + r := secretgenerator.SecretGeneratorReconciler{ + Client: cli, + } + + // delete secret + err := cli.Delete(ctx, &existingSecret) + g.Expect(err).ShouldNot(HaveOccurred()) + + // ensure the secret is deleted + err = cli.Get(ctx, client.ObjectKeyFromObject(&existingSecret), &existingSecret) + g.Expect(err).To(MatchError(k8serr.IsNotFound, "NotFound")) + + _, err = r.Reconcile(ctx, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: secretName, + Namespace: secretNs, + }, + }) + // secret not found, reconcile should clean-up left-over oauth client and end without error + g.Expect(err).ShouldNot(HaveOccurred()) + + // ensure the leftover OauthClient was deleted during reconcile + foundOauthClient := oauthv1.OAuthClient{} + err = cli.Get(ctx, client.ObjectKeyFromObject(&existingOauthClient), &foundOauthClient) + g.Expect(err).To(MatchError(k8serr.IsNotFound, "NotFound")) +}