From 3da2dd280d58e76feb8cb68fa858ddddd96804d0 Mon Sep 17 00:00:00 2001 From: VaishnaviHire Date: Tue, 14 Jan 2025 09:57:02 +0530 Subject: [PATCH] FEATURE: Improve Operator Reconciliation https://issues.redhat.com/browse/RHOAISTRAT-414 --- .github/workflows/check-file-updates.yaml | 43 +- .github/workflows/comment-on-pr.yaml | 59 + .github/workflows/linter.yaml | 12 +- .github/workflows/unit-tests.yaml | 4 +- .golangci.yml | 23 +- Dockerfiles/Dockerfile | 1 - Makefile | 17 +- PROJECT | 110 +- README.md | 84 +- apis/common/types.go | 84 + .../common}/zz_generated.deepcopy.go | 67 +- apis/components/component.go | 13 + apis/components/v1alpha1/codeflare_types.go | 99 + apis/components/v1alpha1/dashboard_types.go | 107 + .../v1alpha1/datasciencepipelines_types.go | 99 + apis/components/v1alpha1/groupversion_info.go | 36 + apis/components/v1alpha1/kserve_types.go | 137 ++ apis/components/v1alpha1/kueue_types.go | 98 + .../v1alpha1/modelcontroller_types.go | 101 + .../v1alpha1/modelmeshserving_types.go | 98 + .../v1alpha1/modelregistry_types.go | 115 + apis/components/v1alpha1/ray_types.go | 98 + .../v1alpha1/trainingoperator_types.go | 98 + apis/components/v1alpha1/trustyai_types.go | 98 + apis/components/v1alpha1/workbenches_types.go | 104 + .../v1alpha1/zz_generated.deepcopy.go | 1946 +++++++++++++++++ .../components}/zz_generated.deepcopy.go | 13 +- .../v1/datasciencecluster_types.go | 121 +- .../v1/zz_generated.deepcopy.go | 19 +- .../v1/dscinitialization_types.go | 23 +- .../v1/zz_generated.deepcopy.go | 15 - apis/services/service.go | 12 + apis/services/v1alpha1/auth_types.go | 72 + apis/services/v1alpha1/groupversion_info.go | 36 + apis/services/v1alpha1/monitoring_types.go | 99 + .../v1alpha1/zz_generated.deepcopy.go | 248 +++ .../services}/zz_generated.deepcopy.go | 12 +- ...ts.platform.opendatahub.io_codeflares.yaml | 158 ++ ...ts.platform.opendatahub.io_dashboards.yaml | 165 ++ ...m.opendatahub.io_datasciencepipelines.yaml | 161 ++ ...nents.platform.opendatahub.io_kserves.yaml | 243 ++ ...onents.platform.opendatahub.io_kueues.yaml | 159 ++ ...tform.opendatahub.io_modelcontrollers.yaml | 220 ++ ...form.opendatahub.io_modelmeshservings.yaml | 159 ++ ...atform.opendatahub.io_modelregistries.yaml | 168 ++ ...mponents.platform.opendatahub.io_rays.yaml | 159 ++ ...form.opendatahub.io_trainingoperators.yaml | 159 ++ ...nts.platform.opendatahub.io_trustyais.yaml | 159 ++ ...s.platform.opendatahub.io_workbenches.yaml | 159 ++ ...er.opendatahub.io_datascienceclusters.yaml | 228 +- ...ion.opendatahub.io_dscinitializations.yaml | 21 +- .../rhods-operator.clusterserviceversion.yaml | 233 +- ...ervices.platform.opendatahub.io_auths.yaml | 144 ++ ...s.platform.opendatahub.io_monitorings.yaml | 147 ++ codecov.yml | 8 + components/Component Reconcile Workflow.png | Bin 83755 -> 0 bytes components/README.md | 70 - components/codeflare/codeflare.go | 132 -- components/codeflare/zz_generated.deepcopy.go | 39 - components/component.go | 198 -- components/dashboard/dashboard.go | 239 -- .../datasciencepipelines.go | 165 -- .../zz_generated.deepcopy.go | 39 - components/kserve/kserve.go | 185 -- components/kserve/kserve_config_handler.go | 184 -- ...dictor-authorizationpolicy.patch.tmpl.yaml | 8 - components/kserve/serverless_setup.go | 72 - components/kserve/servicemesh_setup.go | 76 - components/kserve/zz_generated.deepcopy.go | 40 - components/kueue/kueue.go | 105 - .../modelmeshserving/modelmeshserving.go | 171 -- .../modelmeshserving/zz_generated.deepcopy.go | 39 - components/modelregistry/modelregistry.go | 222 -- .../resources/servicemesh-member.tmpl.yaml | 9 - .../modelregistry/zz_generated.deepcopy.go | 39 - components/ray/ray.go | 107 - components/ray/zz_generated.deepcopy.go | 39 - .../trainingoperator/trainingoperator.go | 108 - .../trainingoperator/zz_generated.deepcopy.go | 39 - components/trustyai/trustyai.go | 120 - components/trustyai/zz_generated.deepcopy.go | 39 - components/workbenches/workbenches.go | 185 -- .../workbenches/zz_generated.deepcopy.go | 39 - ...ts.platform.opendatahub.io_codeflares.yaml | 152 ++ ...ts.platform.opendatahub.io_dashboards.yaml | 159 ++ ...m.opendatahub.io_datasciencepipelines.yaml | 155 ++ ...nents.platform.opendatahub.io_kserves.yaml | 237 ++ ...onents.platform.opendatahub.io_kueues.yaml | 153 ++ ...tform.opendatahub.io_modelcontrollers.yaml | 214 ++ ...form.opendatahub.io_modelmeshservings.yaml | 153 ++ ...atform.opendatahub.io_modelregistries.yaml | 162 ++ ...mponents.platform.opendatahub.io_rays.yaml | 153 ++ ...form.opendatahub.io_trainingoperators.yaml | 153 ++ ...nts.platform.opendatahub.io_trustyais.yaml | 153 ++ ...s.platform.opendatahub.io_workbenches.yaml | 153 ++ ...er.opendatahub.io_datascienceclusters.yaml | 228 +- ...ion.opendatahub.io_dscinitializations.yaml | 21 +- ...ervices.platform.opendatahub.io_auths.yaml | 138 ++ ...s.platform.opendatahub.io_monitorings.yaml | 141 ++ .../config.openshift.io_authentications.yaml | 2 +- config/crd/kustomization.yaml | 18 + .../cainjection_in_services_auths.yaml | 7 + .../cainjection_in_services_monitorings.yaml | 7 + .../patches/webhook_in_services_auths.yaml | 16 + .../webhook_in_services_monitorings.yaml | 16 + config/manager/kustomization.yaml.in | 11 +- ...atahub-operator.clusterserviceversion.yaml | 0 .../rhods-operator.clusterserviceversion.yaml | 66 + .../prometheus/apps/prometheus-configs.yaml | 148 +- .../components_codeflare_editor_role.yaml | 24 + .../components_codeflare_viewer_role.yaml | 20 + .../components_dashboard_editor_role.yaml | 24 + .../components_dashboard_viewer_role.yaml | 20 + ...ents_datasciencepipelines_editor_role.yaml | 24 + ...ents_datasciencepipelines_viewer_role.yaml | 20 + .../rbac/components_kserve_editor_role.yaml | 24 + .../rbac/components_kserve_viewer_role.yaml | 20 + config/rbac/components_kueue_editor_role.yaml | 24 + config/rbac/components_kueue_viewer_role.yaml | 20 + ...mponents_modelmeshserving_editor_role.yaml | 24 + ...mponents_modelmeshserving_viewer_role.yaml | 20 + .../components_modelregistry_editor_role.yaml | 24 + .../components_modelregistry_viewer_role.yaml | 20 + config/rbac/components_ray_editor_role.yaml | 24 + config/rbac/components_ray_viewer_role.yaml | 20 + ...mponents_trainingoperator_editor_role.yaml | 24 + ...mponents_trainingoperator_viewer_role.yaml | 20 + .../rbac/components_trustyai_editor_role.yaml | 24 + .../rbac/components_trustyai_viewer_role.yaml | 20 + .../components_workbenches_editor_role.yaml | 24 + .../components_workbenches_viewer_role.yaml | 20 + config/rbac/role.yaml | 153 +- config/rbac/services_auth_editor_role.yaml | 31 + config/rbac/services_auth_viewer_role.yaml | 27 + .../rbac/services_monitoring_editor_role.yaml | 24 + .../rbac/services_monitoring_viewer_role.yaml | 20 + config/samples/components_v1_codeflare.yaml | 6 + config/samples/components_v1_dashboard.yaml | 6 + .../components_v1_datasciencepipelines.yaml | 6 + config/samples/components_v1_kserve.yaml | 6 + config/samples/components_v1_kueue.yaml | 6 + .../components_v1_modelmeshserving.yaml | 6 + .../samples/components_v1_modelregistry.yaml | 6 + config/samples/components_v1_ray.yaml | 6 + .../components_v1_trainingoperator.yaml | 6 + config/samples/components_v1_trustyai.yaml | 6 + config/samples/components_v1_workbenches.yaml | 6 + ...asciencecluster_v1_datasciencecluster.yaml | 5 +- config/samples/kustomization.yaml | 14 + config/samples/services_v1_monitoring.yaml | 6 + config/samples/services_v1alpha1_auth.yaml | 7 + config/webhook/service.yaml | 1 - .../certconfigmapgenerator_controller.go | 28 +- controllers/components/codeflare/codeflare.go | 107 + .../codeflare/codeflare_controller.go | 88 + .../codeflare/codeflare_controller_actions.go | 46 + .../components/codeflare/codeflare_support.go | 39 + controllers/components/dashboard/dashboard.go | 109 + .../dashboard/dashboard_controller.go | 125 ++ .../dashboard/dashboard_controller_actions.go | 128 ++ .../components/dashboard/dashboard_support.go | 106 + .../datasciencepipelines.go | 107 + .../datasciencepipelines_controller.go | 85 + ...datasciencepipelines_controller_actions.go | 104 + .../datasciencepipelines_support.go | 67 + controllers/components/kserve/config.go | 26 + .../components}/kserve/feature_resources.go | 0 controllers/components/kserve/kserve.go | 119 + .../components/kserve/kserve_controller.go | 167 ++ .../kserve/kserve_controller_actions.go | 279 +++ .../components/kserve/kserve_support.go | 268 +++ .../activator-envoyfilter.tmpl.yaml | 0 .../envoy-oauth-temp-fix.tmpl.yaml | 0 ...ferencegraph-authorizationpolicy.tmpl.yaml | 23 + ...serve-inferencegraph-envoyfilter.tmpl.yaml | 43 + ...ve-predictor-authorizationpolicy.tmpl.yaml | 2 +- .../routing/istio-ingress-gateway.tmpl.yaml | 0 .../istio-kserve-local-gateway.tmpl.yaml | 0 .../routing/istio-local-gateway.yaml | 0 .../kserve-local-gateway-svc.tmpl.yaml | 0 .../routing/local-gateway-svc.tmpl.yaml | 0 .../serving-install/knative-serving.tmpl.yaml | 0 .../service-mesh-subscription.tmpl.yaml | 0 ...net-istio-secret-filtering.patch.tmpl.yaml | 0 controllers/components/kueue/kueue.go | 107 + .../components/kueue/kueue_controller.go | 88 + .../kueue/kueue_controller_actions.go | 44 + controllers/components/kueue/kueue_support.go | 35 + .../modelcontroller/modelcontroller.go | 121 + .../modelcontroller_actions.go | 101 + .../modelcontroller_controller.go | 91 + .../modelcontroller_support.go | 43 + .../modelmeshserving/modelmeshserving.go | 109 + .../modelmeshserving_actions.go | 69 + .../modelmeshserving_controller.go | 90 + .../modelmeshserving_support.go | 46 + .../components/modelregistry/modelregistry.go | 109 + .../modelregistry/modelregistry_controller.go | 111 + .../modelregistry_controller_actions.go | 163 ++ .../modelregistry/modelregistry_support.go | 60 + .../resources/servicemesh-member.tmpl.yaml | 9 + controllers/components/ray/ray.go | 107 + controllers/components/ray/ray_controller.go | 81 + .../components/ray/ray_controller_actions.go | 61 + controllers/components/ray/ray_support.go | 35 + controllers/components/suite_test.go | 80 + .../trainingoperator/trainingoperator.go | 106 + .../trainingoperator_controller.go | 78 + .../trainingoperator_controller_actions.go | 55 + .../trainingoperator_support.go | 35 + controllers/components/trustyai/trustyai.go | 109 + .../trustyai/trustyai_controller.go | 79 + .../trustyai/trustyai_controller_actions.go | 56 + .../components/trustyai/trustyai_support.go | 44 + .../components/workbenches/workbenches.go | 117 + .../workbenches/workbenches_controller.go | 84 + .../workbenches_controller_actions.go | 112 + .../workbenches/workbenches_support.go | 75 + .../datasciencecluster_controller.go | 704 ++---- .../datasciencecluster/kubebuilder_rbac.go | 214 +- controllers/dscinitialization/auth.go | 36 + .../dscinitialization_controller.go | 121 +- .../dscinitialization_test.go | 37 +- .../dscinitialization/kubebuilder_rbac.go | 48 + controllers/dscinitialization/monitoring.go | 111 +- .../dscinitialization/servicemesh_setup.go | 13 +- controllers/dscinitialization/suite_test.go | 13 +- controllers/dscinitialization/utils.go | 69 +- .../secretgenerator_controller.go | 27 +- .../secretgenerator_controller_test.go | 39 +- controllers/services/auth/auth_controller.go | 96 + .../services/auth/auth_controller_actions.go | 204 ++ .../services/auth/auth_controller_support.go | 14 + .../admingroup-clusterrole.tmpl.yaml | 20 + .../auth/resources/admingroup-role.tmpl.yaml | 21 + .../resources/allowedgroup-role.tmpl.yaml | 20 + .../monitoring/monitoring_controller.go | 104 + .../monitoring_controller_actions.go | 48 + controllers/services/suite_test.go | 80 + .../setupcontroller/setup_controller.go | 83 + controllers/status/status.go | 28 + controllers/webhook/webhook.go | 34 +- controllers/webhook/webhook_suite_test.go | 77 +- docs/DESIGN.md | 15 + docs/api-overview.md | 1902 ++++++++++++++-- get_all_manifests.sh | 38 +- go.mod | 47 +- go.sum | 82 +- main.go | 161 +- pkg/cluster/cert.go | 27 +- pkg/cluster/cluster_config.go | 69 +- pkg/cluster/const.go | 2 + pkg/cluster/gvk/gvk.go | 158 +- pkg/cluster/resources.go | 27 +- pkg/cluster/roles.go | 4 +- pkg/common/common.go | 21 + pkg/componentsregistry/componentsregistry.go | 56 + pkg/controller/actions/actions.go | 24 + .../deleteresource/action_delete_resources.go | 76 + .../action_delete_resources_test.go | 85 + .../actions/deploy/action_deploy.go | 470 ++++ .../actions/deploy/action_deploy_cache.go | 117 + .../deploy/action_deploy_cache_test.go | 261 +++ .../deploy/action_deploy_merge_deployment.go | 108 + .../action_deploy_merge_deployment_test.go | 145 ++ .../actions/deploy/action_deploy_metrics.go | 29 + ...tion_deploy_remove_deployment_resources.go | 46 + ...deploy_remove_deployment_resources_test.go | 57 + .../actions/deploy/action_deploy_support.go | 73 + .../deploy/action_deploy_support_test.go | 189 ++ .../actions/deploy/action_deploy_test.go | 636 ++++++ pkg/controller/actions/errors/errors.go | 24 + pkg/controller/actions/gc/action_gc.go | 144 ++ .../actions/gc/action_gc_metrics.go | 43 + .../actions/gc/action_gc_support.go | 46 + pkg/controller/actions/gc/action_gc_test.go | 255 +++ .../kustomize/action_render_manifests.go | 157 ++ .../kustomize/action_render_manifests_test.go | 247 +++ .../actions/render/render_metrics.go | 32 + .../actions/render/render_support.go | 7 + .../template/action_render_templates.go | 155 ++ .../template/action_render_templates_test.go | 203 ++ .../template/resources/smm-data.tmpl.yaml | 12 + .../render/template/resources/smm.tmpl.yaml | 11 + pkg/controller/actions/security/actions.go | 26 + .../actions/security/actions_test.go | 78 + .../updatestatus/action_update_status.go | 119 + .../updatestatus/action_update_status_test.go | 305 +++ pkg/controller/client/client.go | 123 ++ pkg/controller/handlers/handlers.go | 63 + pkg/controller/manager/manager.go | 42 + .../predicates/clusterrole/clusterrole.go | 41 + .../predicates/component/component.go | 36 + .../predicates/dependent/dependent.go | 109 + .../predicates/generation/generation.go | 32 + pkg/controller/predicates/hash/hash.go | 38 + pkg/controller/predicates/partial/partial.go | 77 + pkg/controller/predicates/predicates.go | 21 + .../predicates/resources/resources.go | 55 + pkg/controller/reconciler/reconciler.go | 220 ++ .../reconciler/reconciler_actions.go | 86 + .../reconciler/reconciler_actions_test.go | 255 +++ .../reconciler/reconciler_metrics.go | 30 + .../reconciler/reconciler_support.go | 280 +++ pkg/controller/types/types.go | 210 ++ pkg/controller/types/types_test.go | 122 ++ pkg/deploy/deploy.go | 183 +- pkg/feature/servicemesh/conditions.go | 6 +- pkg/logger/logger.go | 135 +- pkg/manifests/kustomize/kustomize.go | 28 + pkg/manifests/kustomize/kustomize_engine.go | 88 + pkg/manifests/kustomize/kustomize_filters.go | 28 + pkg/manifests/kustomize/kustomize_opts.go | 21 + .../kustomize/kustomize_render_opts.go | 101 + pkg/manifests/kustomize/kustomize_support.go | 16 + pkg/manifests/kustomize/kustomize_test.go | 68 + pkg/metadata/annotations/annotations.go | 11 + pkg/metadata/labels/types.go | 3 + pkg/plugins/addAnnotationsplugin.go | 20 + pkg/plugins/addLabelsplugin.go | 12 +- pkg/plugins/removerplugin.go | 38 +- pkg/resources/resources.go | 329 +++ pkg/resources/resources_test.go | 138 ++ pkg/resources/resources_types.go | 19 + pkg/services/gc/gc.go | 341 +++ pkg/services/gc/gc_support.go | 78 + pkg/services/gc/gc_test.go | 98 + pkg/services/monitoring/prometheus.go | 126 ++ pkg/trustedcabundle/trustedcabundle.go | 11 +- pkg/upgrade/uninstallation.go | 48 +- pkg/upgrade/upgrade.go | 177 +- pkg/utils/test/fakeclient/fakeclient.go | 53 + pkg/utils/test/matchers/jq/jq_matcher.go | 59 + pkg/utils/test/matchers/jq/jq_matcher_test.go | 89 + pkg/utils/test/matchers/jq/jq_support.go | 129 ++ pkg/utils/test/matchers/jq/jq_support_test.go | 54 + pkg/utils/test/matchers/jq/jq_transform.go | 46 + .../test/matchers/jq/jq_transform_test.go | 60 + pkg/utils/test/matchers/matechers.go | 19 + pkg/utils/test/testf/testf.go | 186 ++ pkg/utils/test/testf/testf_assertions.go | 167 ++ pkg/utils/test/testf/testf_support.go | 105 + pkg/utils/test/testf/testf_support_test.go | 132 ++ pkg/utils/test/testf/testf_witht.go | 171 ++ pkg/utils/test/testf/testf_witht_test.go | 237 ++ tests/e2e/authcontroller_test.go | 189 ++ tests/e2e/codeflare_test.go | 29 + tests/e2e/components_test.go | 280 +++ tests/e2e/controller_test.go | 133 +- tests/e2e/creation_test.go | 453 +--- tests/e2e/dashboard_test.go | 85 + tests/e2e/datasciencepipelines_test.go | 29 + tests/e2e/deletion_test.go | 67 +- tests/e2e/helper_test.go | 151 +- tests/e2e/kserve_test.go | 101 + tests/e2e/kueue_test.go | 29 + tests/e2e/modelcontroller_test.go | 129 ++ tests/e2e/modelmeshserving_test.go | 55 + tests/e2e/modelregistry_test.go | 155 ++ tests/e2e/odh_manager_test.go | 67 + tests/e2e/ray_test.go | 29 + tests/e2e/trainingoperator_test.go | 29 + tests/e2e/trustyai_test.go | 29 + tests/e2e/workbenches_test.go | 325 +++ .../integration/features/cleanup_int_test.go | 10 +- .../features/features_suite_int_test.go | 1 - .../features/serverless_feature_test.go | 38 +- .../features/servicemesh_feature_test.go | 4 +- 368 files changed, 30420 insertions(+), 4926 deletions(-) create mode 100644 .github/workflows/comment-on-pr.yaml create mode 100644 apis/common/types.go rename {components => apis/common}/zz_generated.deepcopy.go (55%) create mode 100644 apis/components/component.go create mode 100644 apis/components/v1alpha1/codeflare_types.go create mode 100644 apis/components/v1alpha1/dashboard_types.go create mode 100644 apis/components/v1alpha1/datasciencepipelines_types.go create mode 100644 apis/components/v1alpha1/groupversion_info.go create mode 100644 apis/components/v1alpha1/kserve_types.go create mode 100644 apis/components/v1alpha1/kueue_types.go create mode 100644 apis/components/v1alpha1/modelcontroller_types.go create mode 100644 apis/components/v1alpha1/modelmeshserving_types.go create mode 100644 apis/components/v1alpha1/modelregistry_types.go create mode 100644 apis/components/v1alpha1/ray_types.go create mode 100644 apis/components/v1alpha1/trainingoperator_types.go create mode 100644 apis/components/v1alpha1/trustyai_types.go create mode 100644 apis/components/v1alpha1/workbenches_types.go create mode 100644 apis/components/v1alpha1/zz_generated.deepcopy.go rename {components/dashboard => apis/components}/zz_generated.deepcopy.go (77%) create mode 100644 apis/services/service.go create mode 100644 apis/services/v1alpha1/auth_types.go create mode 100644 apis/services/v1alpha1/groupversion_info.go create mode 100644 apis/services/v1alpha1/monitoring_types.go create mode 100644 apis/services/v1alpha1/zz_generated.deepcopy.go rename {components/kueue => apis/services}/zz_generated.deepcopy.go (81%) create mode 100644 bundle/manifests/components.platform.opendatahub.io_codeflares.yaml create mode 100644 bundle/manifests/components.platform.opendatahub.io_dashboards.yaml create mode 100644 bundle/manifests/components.platform.opendatahub.io_datasciencepipelines.yaml create mode 100644 bundle/manifests/components.platform.opendatahub.io_kserves.yaml create mode 100644 bundle/manifests/components.platform.opendatahub.io_kueues.yaml create mode 100644 bundle/manifests/components.platform.opendatahub.io_modelcontrollers.yaml create mode 100644 bundle/manifests/components.platform.opendatahub.io_modelmeshservings.yaml create mode 100644 bundle/manifests/components.platform.opendatahub.io_modelregistries.yaml create mode 100644 bundle/manifests/components.platform.opendatahub.io_rays.yaml create mode 100644 bundle/manifests/components.platform.opendatahub.io_trainingoperators.yaml create mode 100644 bundle/manifests/components.platform.opendatahub.io_trustyais.yaml create mode 100644 bundle/manifests/components.platform.opendatahub.io_workbenches.yaml create mode 100644 bundle/manifests/services.platform.opendatahub.io_auths.yaml create mode 100644 bundle/manifests/services.platform.opendatahub.io_monitorings.yaml create mode 100644 codecov.yml delete mode 100644 components/Component Reconcile Workflow.png delete mode 100644 components/README.md delete mode 100644 components/codeflare/codeflare.go delete mode 100644 components/codeflare/zz_generated.deepcopy.go delete mode 100644 components/component.go delete mode 100644 components/dashboard/dashboard.go delete mode 100644 components/datasciencepipelines/datasciencepipelines.go delete mode 100644 components/datasciencepipelines/zz_generated.deepcopy.go delete mode 100644 components/kserve/kserve.go delete mode 100644 components/kserve/kserve_config_handler.go delete mode 100644 components/kserve/resources/servicemesh/z-migrations/kserve-predictor-authorizationpolicy.patch.tmpl.yaml delete mode 100644 components/kserve/serverless_setup.go delete mode 100644 components/kserve/servicemesh_setup.go delete mode 100644 components/kserve/zz_generated.deepcopy.go delete mode 100644 components/kueue/kueue.go delete mode 100644 components/modelmeshserving/modelmeshserving.go delete mode 100644 components/modelmeshserving/zz_generated.deepcopy.go delete mode 100644 components/modelregistry/modelregistry.go delete mode 100644 components/modelregistry/resources/servicemesh-member.tmpl.yaml delete mode 100644 components/modelregistry/zz_generated.deepcopy.go delete mode 100644 components/ray/ray.go delete mode 100644 components/ray/zz_generated.deepcopy.go delete mode 100644 components/trainingoperator/trainingoperator.go delete mode 100644 components/trainingoperator/zz_generated.deepcopy.go delete mode 100644 components/trustyai/trustyai.go delete mode 100644 components/trustyai/zz_generated.deepcopy.go delete mode 100644 components/workbenches/workbenches.go delete mode 100644 components/workbenches/zz_generated.deepcopy.go create mode 100644 config/crd/bases/components.platform.opendatahub.io_codeflares.yaml create mode 100644 config/crd/bases/components.platform.opendatahub.io_dashboards.yaml create mode 100644 config/crd/bases/components.platform.opendatahub.io_datasciencepipelines.yaml create mode 100644 config/crd/bases/components.platform.opendatahub.io_kserves.yaml create mode 100644 config/crd/bases/components.platform.opendatahub.io_kueues.yaml create mode 100644 config/crd/bases/components.platform.opendatahub.io_modelcontrollers.yaml create mode 100644 config/crd/bases/components.platform.opendatahub.io_modelmeshservings.yaml create mode 100644 config/crd/bases/components.platform.opendatahub.io_modelregistries.yaml create mode 100644 config/crd/bases/components.platform.opendatahub.io_rays.yaml create mode 100644 config/crd/bases/components.platform.opendatahub.io_trainingoperators.yaml create mode 100644 config/crd/bases/components.platform.opendatahub.io_trustyais.yaml create mode 100644 config/crd/bases/components.platform.opendatahub.io_workbenches.yaml create mode 100644 config/crd/bases/services.platform.opendatahub.io_auths.yaml create mode 100644 config/crd/bases/services.platform.opendatahub.io_monitorings.yaml create mode 100644 config/crd/patches/cainjection_in_services_auths.yaml create mode 100644 config/crd/patches/cainjection_in_services_monitorings.yaml create mode 100644 config/crd/patches/webhook_in_services_auths.yaml create mode 100644 config/crd/patches/webhook_in_services_monitorings.yaml create mode 100644 config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml create mode 100644 config/rbac/components_codeflare_editor_role.yaml create mode 100644 config/rbac/components_codeflare_viewer_role.yaml create mode 100644 config/rbac/components_dashboard_editor_role.yaml create mode 100644 config/rbac/components_dashboard_viewer_role.yaml create mode 100644 config/rbac/components_datasciencepipelines_editor_role.yaml create mode 100644 config/rbac/components_datasciencepipelines_viewer_role.yaml create mode 100644 config/rbac/components_kserve_editor_role.yaml create mode 100644 config/rbac/components_kserve_viewer_role.yaml create mode 100644 config/rbac/components_kueue_editor_role.yaml create mode 100644 config/rbac/components_kueue_viewer_role.yaml create mode 100644 config/rbac/components_modelmeshserving_editor_role.yaml create mode 100644 config/rbac/components_modelmeshserving_viewer_role.yaml create mode 100644 config/rbac/components_modelregistry_editor_role.yaml create mode 100644 config/rbac/components_modelregistry_viewer_role.yaml create mode 100644 config/rbac/components_ray_editor_role.yaml create mode 100644 config/rbac/components_ray_viewer_role.yaml create mode 100644 config/rbac/components_trainingoperator_editor_role.yaml create mode 100644 config/rbac/components_trainingoperator_viewer_role.yaml create mode 100644 config/rbac/components_trustyai_editor_role.yaml create mode 100644 config/rbac/components_trustyai_viewer_role.yaml create mode 100644 config/rbac/components_workbenches_editor_role.yaml create mode 100644 config/rbac/components_workbenches_viewer_role.yaml create mode 100644 config/rbac/services_auth_editor_role.yaml create mode 100644 config/rbac/services_auth_viewer_role.yaml create mode 100644 config/rbac/services_monitoring_editor_role.yaml create mode 100644 config/rbac/services_monitoring_viewer_role.yaml create mode 100644 config/samples/components_v1_codeflare.yaml create mode 100644 config/samples/components_v1_dashboard.yaml create mode 100644 config/samples/components_v1_datasciencepipelines.yaml create mode 100644 config/samples/components_v1_kserve.yaml create mode 100644 config/samples/components_v1_kueue.yaml create mode 100644 config/samples/components_v1_modelmeshserving.yaml create mode 100644 config/samples/components_v1_modelregistry.yaml create mode 100644 config/samples/components_v1_ray.yaml create mode 100644 config/samples/components_v1_trainingoperator.yaml create mode 100644 config/samples/components_v1_trustyai.yaml create mode 100644 config/samples/components_v1_workbenches.yaml create mode 100644 config/samples/services_v1_monitoring.yaml create mode 100644 config/samples/services_v1alpha1_auth.yaml create mode 100644 controllers/components/codeflare/codeflare.go create mode 100644 controllers/components/codeflare/codeflare_controller.go create mode 100644 controllers/components/codeflare/codeflare_controller_actions.go create mode 100644 controllers/components/codeflare/codeflare_support.go create mode 100644 controllers/components/dashboard/dashboard.go create mode 100644 controllers/components/dashboard/dashboard_controller.go create mode 100644 controllers/components/dashboard/dashboard_controller_actions.go create mode 100644 controllers/components/dashboard/dashboard_support.go create mode 100644 controllers/components/datasciencepipelines/datasciencepipelines.go create mode 100644 controllers/components/datasciencepipelines/datasciencepipelines_controller.go create mode 100644 controllers/components/datasciencepipelines/datasciencepipelines_controller_actions.go create mode 100644 controllers/components/datasciencepipelines/datasciencepipelines_support.go create mode 100644 controllers/components/kserve/config.go rename {components => controllers/components}/kserve/feature_resources.go (100%) create mode 100644 controllers/components/kserve/kserve.go create mode 100644 controllers/components/kserve/kserve_controller.go create mode 100644 controllers/components/kserve/kserve_controller_actions.go create mode 100644 controllers/components/kserve/kserve_support.go rename {components => controllers/components}/kserve/resources/servicemesh/activator-envoyfilter.tmpl.yaml (100%) rename {components => controllers/components}/kserve/resources/servicemesh/envoy-oauth-temp-fix.tmpl.yaml (100%) create mode 100644 controllers/components/kserve/resources/servicemesh/kserve-inferencegraph-authorizationpolicy.tmpl.yaml create mode 100644 controllers/components/kserve/resources/servicemesh/kserve-inferencegraph-envoyfilter.tmpl.yaml rename {components => controllers/components}/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml (91%) rename {components => controllers/components}/kserve/resources/servicemesh/routing/istio-ingress-gateway.tmpl.yaml (100%) rename {components => controllers/components}/kserve/resources/servicemesh/routing/istio-kserve-local-gateway.tmpl.yaml (100%) rename {components => controllers/components}/kserve/resources/servicemesh/routing/istio-local-gateway.yaml (100%) rename {components => controllers/components}/kserve/resources/servicemesh/routing/kserve-local-gateway-svc.tmpl.yaml (100%) rename {components => controllers/components}/kserve/resources/servicemesh/routing/local-gateway-svc.tmpl.yaml (100%) rename {components => controllers/components}/kserve/resources/serving-install/knative-serving.tmpl.yaml (100%) rename {components => controllers/components}/kserve/resources/serving-install/service-mesh-subscription.tmpl.yaml (100%) rename {components => controllers/components}/kserve/resources/serving-net-istio-secret-filtering.patch.tmpl.yaml (100%) create mode 100644 controllers/components/kueue/kueue.go create mode 100644 controllers/components/kueue/kueue_controller.go create mode 100644 controllers/components/kueue/kueue_controller_actions.go create mode 100644 controllers/components/kueue/kueue_support.go create mode 100644 controllers/components/modelcontroller/modelcontroller.go create mode 100644 controllers/components/modelcontroller/modelcontroller_actions.go create mode 100644 controllers/components/modelcontroller/modelcontroller_controller.go create mode 100644 controllers/components/modelcontroller/modelcontroller_support.go create mode 100644 controllers/components/modelmeshserving/modelmeshserving.go create mode 100644 controllers/components/modelmeshserving/modelmeshserving_actions.go create mode 100644 controllers/components/modelmeshserving/modelmeshserving_controller.go create mode 100644 controllers/components/modelmeshserving/modelmeshserving_support.go create mode 100644 controllers/components/modelregistry/modelregistry.go create mode 100644 controllers/components/modelregistry/modelregistry_controller.go create mode 100644 controllers/components/modelregistry/modelregistry_controller_actions.go create mode 100644 controllers/components/modelregistry/modelregistry_support.go create mode 100644 controllers/components/modelregistry/resources/servicemesh-member.tmpl.yaml create mode 100644 controllers/components/ray/ray.go create mode 100644 controllers/components/ray/ray_controller.go create mode 100644 controllers/components/ray/ray_controller_actions.go create mode 100644 controllers/components/ray/ray_support.go create mode 100644 controllers/components/suite_test.go create mode 100644 controllers/components/trainingoperator/trainingoperator.go create mode 100644 controllers/components/trainingoperator/trainingoperator_controller.go create mode 100644 controllers/components/trainingoperator/trainingoperator_controller_actions.go create mode 100644 controllers/components/trainingoperator/trainingoperator_support.go create mode 100644 controllers/components/trustyai/trustyai.go create mode 100644 controllers/components/trustyai/trustyai_controller.go create mode 100644 controllers/components/trustyai/trustyai_controller_actions.go create mode 100644 controllers/components/trustyai/trustyai_support.go create mode 100644 controllers/components/workbenches/workbenches.go create mode 100644 controllers/components/workbenches/workbenches_controller.go create mode 100644 controllers/components/workbenches/workbenches_controller_actions.go create mode 100644 controllers/components/workbenches/workbenches_support.go create mode 100644 controllers/dscinitialization/auth.go create mode 100644 controllers/dscinitialization/kubebuilder_rbac.go create mode 100644 controllers/services/auth/auth_controller.go create mode 100644 controllers/services/auth/auth_controller_actions.go create mode 100644 controllers/services/auth/auth_controller_support.go create mode 100644 controllers/services/auth/resources/admingroup-clusterrole.tmpl.yaml create mode 100644 controllers/services/auth/resources/admingroup-role.tmpl.yaml create mode 100644 controllers/services/auth/resources/allowedgroup-role.tmpl.yaml create mode 100644 controllers/services/monitoring/monitoring_controller.go create mode 100644 controllers/services/monitoring/monitoring_controller_actions.go create mode 100644 controllers/services/suite_test.go create mode 100644 controllers/setupcontroller/setup_controller.go create mode 100644 pkg/componentsregistry/componentsregistry.go create mode 100644 pkg/controller/actions/actions.go create mode 100644 pkg/controller/actions/deleteresource/action_delete_resources.go create mode 100644 pkg/controller/actions/deleteresource/action_delete_resources_test.go create mode 100644 pkg/controller/actions/deploy/action_deploy.go create mode 100644 pkg/controller/actions/deploy/action_deploy_cache.go create mode 100644 pkg/controller/actions/deploy/action_deploy_cache_test.go create mode 100644 pkg/controller/actions/deploy/action_deploy_merge_deployment.go create mode 100644 pkg/controller/actions/deploy/action_deploy_merge_deployment_test.go create mode 100644 pkg/controller/actions/deploy/action_deploy_metrics.go create mode 100644 pkg/controller/actions/deploy/action_deploy_remove_deployment_resources.go create mode 100644 pkg/controller/actions/deploy/action_deploy_remove_deployment_resources_test.go create mode 100644 pkg/controller/actions/deploy/action_deploy_support.go create mode 100644 pkg/controller/actions/deploy/action_deploy_support_test.go create mode 100644 pkg/controller/actions/deploy/action_deploy_test.go create mode 100644 pkg/controller/actions/errors/errors.go create mode 100644 pkg/controller/actions/gc/action_gc.go create mode 100644 pkg/controller/actions/gc/action_gc_metrics.go create mode 100644 pkg/controller/actions/gc/action_gc_support.go create mode 100644 pkg/controller/actions/gc/action_gc_test.go create mode 100644 pkg/controller/actions/render/kustomize/action_render_manifests.go create mode 100644 pkg/controller/actions/render/kustomize/action_render_manifests_test.go create mode 100644 pkg/controller/actions/render/render_metrics.go create mode 100644 pkg/controller/actions/render/render_support.go create mode 100644 pkg/controller/actions/render/template/action_render_templates.go create mode 100644 pkg/controller/actions/render/template/action_render_templates_test.go create mode 100644 pkg/controller/actions/render/template/resources/smm-data.tmpl.yaml create mode 100644 pkg/controller/actions/render/template/resources/smm.tmpl.yaml create mode 100644 pkg/controller/actions/security/actions.go create mode 100644 pkg/controller/actions/security/actions_test.go create mode 100644 pkg/controller/actions/updatestatus/action_update_status.go create mode 100644 pkg/controller/actions/updatestatus/action_update_status_test.go create mode 100644 pkg/controller/client/client.go create mode 100644 pkg/controller/handlers/handlers.go create mode 100644 pkg/controller/manager/manager.go create mode 100644 pkg/controller/predicates/clusterrole/clusterrole.go create mode 100644 pkg/controller/predicates/component/component.go create mode 100644 pkg/controller/predicates/dependent/dependent.go create mode 100644 pkg/controller/predicates/generation/generation.go create mode 100644 pkg/controller/predicates/hash/hash.go create mode 100644 pkg/controller/predicates/partial/partial.go create mode 100644 pkg/controller/predicates/predicates.go create mode 100644 pkg/controller/predicates/resources/resources.go create mode 100644 pkg/controller/reconciler/reconciler.go create mode 100644 pkg/controller/reconciler/reconciler_actions.go create mode 100644 pkg/controller/reconciler/reconciler_actions_test.go create mode 100644 pkg/controller/reconciler/reconciler_metrics.go create mode 100644 pkg/controller/reconciler/reconciler_support.go create mode 100644 pkg/controller/types/types.go create mode 100644 pkg/controller/types/types_test.go create mode 100644 pkg/manifests/kustomize/kustomize.go create mode 100644 pkg/manifests/kustomize/kustomize_engine.go create mode 100644 pkg/manifests/kustomize/kustomize_filters.go create mode 100644 pkg/manifests/kustomize/kustomize_opts.go create mode 100644 pkg/manifests/kustomize/kustomize_render_opts.go create mode 100644 pkg/manifests/kustomize/kustomize_support.go create mode 100644 pkg/manifests/kustomize/kustomize_test.go create mode 100644 pkg/plugins/addAnnotationsplugin.go create mode 100644 pkg/resources/resources.go create mode 100644 pkg/resources/resources_test.go create mode 100644 pkg/resources/resources_types.go create mode 100644 pkg/services/gc/gc.go create mode 100644 pkg/services/gc/gc_support.go create mode 100644 pkg/services/gc/gc_test.go create mode 100644 pkg/services/monitoring/prometheus.go create mode 100644 pkg/utils/test/fakeclient/fakeclient.go create mode 100644 pkg/utils/test/matchers/jq/jq_matcher.go create mode 100644 pkg/utils/test/matchers/jq/jq_matcher_test.go create mode 100644 pkg/utils/test/matchers/jq/jq_support.go create mode 100644 pkg/utils/test/matchers/jq/jq_support_test.go create mode 100644 pkg/utils/test/matchers/jq/jq_transform.go create mode 100644 pkg/utils/test/matchers/jq/jq_transform_test.go create mode 100644 pkg/utils/test/matchers/matechers.go create mode 100644 pkg/utils/test/testf/testf.go create mode 100644 pkg/utils/test/testf/testf_assertions.go create mode 100644 pkg/utils/test/testf/testf_support.go create mode 100644 pkg/utils/test/testf/testf_support_test.go create mode 100644 pkg/utils/test/testf/testf_witht.go create mode 100644 pkg/utils/test/testf/testf_witht_test.go create mode 100644 tests/e2e/authcontroller_test.go create mode 100644 tests/e2e/codeflare_test.go create mode 100644 tests/e2e/components_test.go create mode 100644 tests/e2e/dashboard_test.go create mode 100644 tests/e2e/datasciencepipelines_test.go create mode 100644 tests/e2e/kserve_test.go create mode 100644 tests/e2e/kueue_test.go create mode 100644 tests/e2e/modelcontroller_test.go create mode 100644 tests/e2e/modelmeshserving_test.go create mode 100644 tests/e2e/modelregistry_test.go create mode 100644 tests/e2e/ray_test.go create mode 100644 tests/e2e/trainingoperator_test.go create mode 100644 tests/e2e/trustyai_test.go create mode 100644 tests/e2e/workbenches_test.go diff --git a/.github/workflows/check-file-updates.yaml b/.github/workflows/check-file-updates.yaml index bf142941ecb..8c27a6e6c8d 100644 --- a/.github/workflows/check-file-updates.yaml +++ b/.github/workflows/check-file-updates.yaml @@ -1,47 +1,36 @@ name: Check config and readme updates on: - pull_request_target: + pull_request: jobs: file-updates: - permissions: - pull-requests: write name: Ensure generated files are included runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.2.2 with: ref: ${{github.event.pull_request.head.ref}} repository: ${{github.event.pull_request.head.repo.full_name}} - - name: Generate files - id: generate-files + - name: Save the PR number for artifact upload run: | - CMD="make generate manifests api-docs" - $CMD - echo "CMD=$CMD" >> $GITHUB_OUTPUT + echo ${{ github.event.number }} > pr_number.txt + - name: Upload the PR number as artifact + id: artifact-upload + uses: actions/upload-artifact@v4 + with: + name: pr_number + path: ./pr_number.txt + retention-days: 1 # This will delete the generated artifacts every day. + - name: Generate files + run: make generate manifests api-docs - name: Ensure generated files are up-to-date id: check_generated_files run : | + rm ./pr_number.txt # remove the pr_number.txt before checking "git status", to have correct assessment of the changed files. if [[ -n $(git status -s) ]] then echo "Generated files have been missed in the PR" git diff - echo "missing_generated_files=true" >> $GITHUB_OUTPUT + exit 1 else echo "No new files to commit" - echo "missing_generated_files=false" >> $GITHUB_OUTPUT - fi - - name: Report issue in PR - if: ${{ steps.check_generated_files.outputs.missing_generated_files == 'true' }} - uses: thollander/actions-comment-pull-request@v2 - with: - message: | - ## This PR can't be merged just yet 😢 - - Please run `${{ steps.generate-files.outputs.CMD }}` and commit the changes. - - For more info: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} - - name: Print git status and fail pr - if: ${{ steps.check_generated_files.outputs.missing_generated_files == 'true' }} - run: | - git status - exit 1 + fi \ No newline at end of file diff --git a/.github/workflows/comment-on-pr.yaml b/.github/workflows/comment-on-pr.yaml new file mode 100644 index 00000000000..77272613b05 --- /dev/null +++ b/.github/workflows/comment-on-pr.yaml @@ -0,0 +1,59 @@ +name: Comment on pr +on: + workflow_run: + workflows: ["Check config and readme updates"] + types: + - completed +jobs: + download-artifact-data: + if: ${{ github.event.workflow_run.conclusion == 'failure' }} + runs-on: ubuntu-latest + outputs: + pr_number: ${{ steps.artifact-data.outputs.pr_number }} + steps: + - name: Download artifact + id: artifact-download + uses: actions/github-script@v7 + with: + script: | + let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: context.payload.workflow_run.id, + }); + + let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => { + return artifact.name == "pr_number" + })[0]; + + let download = await github.rest.actions.downloadArtifact({ + owner: context.repo.owner, + repo: context.repo.repo, + artifact_id: matchArtifact.id, + archive_format: 'zip', + }); + let fs = require('fs'); + fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/pr_number.zip`, Buffer.from(download.data)); + - name: Unzip artifact + run: unzip pr_number.zip + - name: Extract data + id: artifact-data + run: | + echo "pr_number=$(head -n 1 pr_number.txt)" >> $GITHUB_OUTPUT + comment-on-pr: + needs: + - download-artifact-data + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Report issue in PR + uses: thollander/actions-comment-pull-request@v3.0.1 + with: + message: | + ## This PR can't be merged just yet 😢 + + Please run `make generate manifests api-docs` and commit the changes. + + For more info: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.event.workflow_run.id }} + pr-number: ${{ needs.download-artifact-data.outputs.pr_number }} \ No newline at end of file diff --git a/.github/workflows/linter.yaml b/.github/workflows/linter.yaml index e7c1495dea8..9527d168172 100644 --- a/.github/workflows/linter.yaml +++ b/.github/workflows/linter.yaml @@ -5,6 +5,10 @@ on: - main - incubation pull_request: +permissions: + contents: read + pull-requests: read + checks: write jobs: golangci: name: golangci-lint @@ -15,8 +19,6 @@ jobs: uses: actions/setup-go@v5 with: go-version-file: go.mod - - name: golangci-lint - uses: golangci/golangci-lint-action@v6 - with: - version: v1.60.2 - args: --timeout 5m0s + - name: lint + run: + make lint diff --git a/.github/workflows/unit-tests.yaml b/.github/workflows/unit-tests.yaml index d14e095d9b3..30d8212f5d1 100644 --- a/.github/workflows/unit-tests.yaml +++ b/.github/workflows/unit-tests.yaml @@ -1,5 +1,5 @@ name: Unit Tests -on: +on: push: branches: - rhoai @@ -21,6 +21,6 @@ jobs: run: make unit-test - name: Upload results to Codecov - uses: codecov/codecov-action@v4.6.0 + uses: codecov/codecov-action@v5.1.2 with: token: ${{ secrets.CODECOV_TOKEN }} diff --git a/.golangci.yml b/.golangci.yml index af1ffddee58..93d110312de 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -58,6 +58,14 @@ linters-settings: - stdlib # also allow generics - generic + - EventHandler # for ToOwner + - discovery.DiscoveryInterface + - dynamic.Interface + - predicate.Predicate + - client.Object + - common.PlatformObject + - types.AsyncAssertion + - kubernetes.Interface revive: rules: - name: dot-imports @@ -66,6 +74,12 @@ linters-settings: perfsprint: sprintf1: false strconcat: false + # Enable gocritic for detecting bugs, performance, and style issues: https://golangci-lint.run/usage/linters/#gocritic + gocritic: + # https://go-critic.com/overview.html#checkers + enabled-checks: + - deferInLoop + - unnecessaryDefer linters: enable-all: true @@ -75,7 +89,6 @@ linters: - forbidigo - gochecknoglobals # Prevents use of global vars. - gofumpt - - gomnd # Doesnot allow hardcoded numbers - gomoddirectives # Doesnot allow replace in go mod file - mnd - nestif @@ -85,11 +98,12 @@ linters: - varnamelen # doesnot allow shorter names like c,k etc. But golang prefers short named vars. - wsl # [too strict and mostly code is not more readable] whitespace linter forces you to use empty lines - wrapcheck # check if this is required. Prevents direct return of err. + - exportloopref # Since Go1.22 (loopvar) this linter is no longer relevant. Replaced by copyloopvar. # Need to check - nlreturn # [too strict and mostly code is not more readable] checks for a new line before return and branch statements to increase code clarity - err113 # [too strict] checks the errors handling expressions - + # To be fixed - gocognit # https://github.com/opendatahub-io/opendatahub-operator/issues/709 - cyclop # https://github.com/opendatahub-io/opendatahub-operator/issues/709 @@ -103,4 +117,7 @@ issues: - path: tests/*/(.+)_test\.go linters: - typecheck - - dupl \ No newline at end of file + - dupl + - path: pkg/utils/test/testf/(.+)\.go + linters: + - containedctx diff --git a/Dockerfiles/Dockerfile b/Dockerfiles/Dockerfile index ce38972f7fc..4ca7003cda6 100644 --- a/Dockerfiles/Dockerfile +++ b/Dockerfiles/Dockerfile @@ -35,7 +35,6 @@ RUN go mod download # Copy the go source COPY apis/ apis/ -COPY components/ components/ COPY controllers/ controllers/ COPY main.go main.go COPY pkg/ pkg/ diff --git a/Makefile b/Makefile index 67226bb0dfa..312238f9df0 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,7 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 2.16.0 +VERSION ?= 2.21.0 # IMAGE_TAG_BASE defines the opendatahub.io namespace and part of the image name for remote images. # This variable is used to construct full image tags for bundle and catalog images. # @@ -69,7 +69,7 @@ YQ ?= $(LOCALBIN)/yq KUSTOMIZE_VERSION ?= v5.0.2 CONTROLLER_GEN_VERSION ?= v0.16.1 OPERATOR_SDK_VERSION ?= v1.31.0 -GOLANGCI_LINT_VERSION ?= v1.61.0 +GOLANGCI_LINT_VERSION ?= v1.63.4 YQ_VERSION ?= v4.12.2 # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. ENVTEST_K8S_VERSION = 1.31.0 @@ -89,7 +89,8 @@ SHELL = /usr/bin/env bash -o pipefail .SHELLFLAGS = -ec # E2E tests additional flags -E2E_TEST_FLAGS = "--skip-deletion=false" -timeout 25m # See README.md, default go test timeout 10m +# See README.md, default go test timeout 10m +E2E_TEST_FLAGS = -timeout 25m # Default image-build is to not use local odh-manifests folder # set to "true" to use local instead @@ -170,8 +171,13 @@ CLEANFILES += $(GOLANGCI_TMP_FILE) vet: ## Run go vet against code. go vet ./... +GOLANGCI_LINT_TIMEOUT ?= 5m0s .PHONY: lint lint: golangci-lint ## Run golangci-lint against code. + $(GOLANGCI_LINT) run --timeout=$(GOLANGCI_LINT_TIMEOUT) --sort-results + +.PHONY: lint-fix +lint-fix: golangci-lint ## Run golangci-lint against code. $(GOLANGCI_LINT) run --fix --sort-results .PHONY: get-manifests @@ -198,7 +204,10 @@ run: manifests generate fmt vet ## Run a controller from your host. .PHONY: run-nowebhook run-nowebhook: GO_RUN_ARGS += -tags nowebhook -run-nowebhook: run ## Run a controller from your host without webhook enabled + +run-nowebhook: manifests generate fmt vet ## Run a controller from your host without webhook enabled + $(GO_RUN_MAIN) + .PHONY: image-build image-build: # unit-test ## Build image with the manager. diff --git a/PROJECT b/PROJECT index c1f485192b9..2a1726df6b0 100644 --- a/PROJECT +++ b/PROJECT @@ -14,7 +14,6 @@ repo: github.com/opendatahub-io/opendatahub-operator resources: - api: crdVersion: v1 - namespaced: false controller: true domain: opendatahub.io group: dscinitialization @@ -26,7 +25,6 @@ resources: webhookVersion: v1 - api: crdVersion: v1 - namespaced: false controller: true domain: opendatahub.io group: datasciencecluster @@ -37,4 +35,112 @@ resources: defaulting: true validation: true webhookVersion: v1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: components + kind: Dashboard + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: components + kind: Workbenches + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: components + kind: ModelMeshServing + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: components + kind: DataSciencePipelines + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: components + kind: Kserve + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: components + kind: Kueue + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: components + kind: CodeFlare + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: components + kind: Ray + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: components + kind: TrustyAI + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: components + kind: ModelRegistry + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: components + kind: TrainingOperator + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: services + kind: Monitoring + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1alpha1 + controller: true + domain: platform.opendatahub.io + group: services + kind: Auth + path: github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1 + version: v1alpha1 +- domain: opendatahub.io + group: services + kind: Auth + version: v1alpha1 version: "3" diff --git a/README.md b/README.md index 2baac7573f2..aaec66a6fef 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ and configure these applications. - [Deployment](#deployment) - [Test with customized manifests](#test-with-customized-manifests) - [Update API docs](#update-api-docs) + - [Enabled logging](#enabled-logging) - [Example DSCInitialization](#example-dscinitialization) - [Example DataScienceCluster](#example-datasciencecluster) - [Run functional Tests](#run-functional-tests) @@ -141,7 +142,7 @@ e.g `make image-build USE_LOCAL=true"` - Custom operator image can be built using your local repository ```commandline - make image -e IMG=quay.io//opendatahub-operator: + make image IMG=quay.io//opendatahub-operator: ``` The default image used is `quay.io/opendatahub/opendatahub-operator:dev-0.0.1` when not supply argument for `make image` @@ -166,7 +167,7 @@ e.g `make image-build USE_LOCAL=true"` - Deploy the created image in your cluster using following command: ```commandline - make deploy -e IMG=quay.io//opendatahub-operator: -e OPERATOR_NAMESPACE= + make deploy IMG=quay.io//opendatahub-operator: OPERATOR_NAMESPACE= ``` - To remove resources created during installation use: @@ -178,7 +179,7 @@ e.g `make image-build USE_LOCAL=true"` **Deploying operator using OLM** - To create a new bundle in defined operator namespace, run following command: - + ```commandline export OPERATOR_NAMESPACE= make bundle @@ -187,13 +188,13 @@ e.g `make image-build USE_LOCAL=true"` **Note** : Skip the above step if you want to run the existing operator bundle. - Build Bundle Image: - + ```commandline make bundle-build bundle-push BUNDLE_IMG=quay.io//opendatahub-operator-bundle: ``` - Run the Bundle on a cluster: - + ```commandline operator-sdk run bundle quay.io//opendatahub-operator-bundle: --namespace $OPERATOR_NAMESPACE --decompression-image quay.io/project-codeflare/busybox:1.36 ``` @@ -209,32 +210,24 @@ There are 2 ways to test your changes with modification: Whenever a new api is added or a new field is added to the CRD, please make sure to run the command: ```commandline - make api-docs + make api-docs ``` This will ensure that the doc for the apis are updated accordingly. ### Enabled logging -#### Controller level - -Logger on all controllers can only be changed from CSV with parameters: --log-mode devel -valid value: "" (as default) || prod || production || devel || development - -This mainly impacts logging for operator pod startup, generating common resource, monitoring deployment. - -| --log-mode value | mapping Log level | Comments | -| ---------------- | ------------------- | -------------- | -| devel | debug / 0 | lowest level | -| "" | info / 1 | default option | -| default | info / 1 | default option | -| prod | error / 2 | highest level | +Global logger configuration can be changed with an environemnt variable `ZAP_LOG_LEVEL` +or a command line switch `--log-mode ` for example from CSV. +Command line switch has higher priority. +Valid values for ``: "" (as default) || prod || production || devel || development. -#### Component level +Verbosity level is INFO. +To fine tune zap backend [standard operator sdk zap switches](https://sdk.operatorframework.io/docs/building-operators/golang/references/logging/) +can be used. -Logger on components can be changed by DSCI devFlags during runtime. -By default, if not set .spec.devFlags.logmode, it uses INFO level -Modification applies to all components, not only these "Managed" ones. -Update DSCI CR with .spec.devFlags.logmode, see example : +Log level can be changed by DSCI devFlags during runtime by setting +.spec.devFlags.logLevel. It accepts the same values as `--zap-log-level` +command line switch. See example : ```console apiVersion: dscinitialization.opendatahub.io/v1 @@ -243,20 +236,17 @@ metadata: name: default-dsci spec: devFlags: - logmode: development + logLevel: debug ... ``` -Avaiable value for logmode is "devel", "development", "prod", "production". -The first two work the same set to DEBUG level; the later two work the same as using ERROR level. - -| .spec.devFlags.logmode | stacktrace level | verbosity | Output | Comments | -| ---------------------- | ---------------- | --------- | -------- | -------------- | -| devel | WARN | INFO | Console | lowest level, using epoch time | -| development | WARN | INFO | Console | same as devel | -| "" | ERROR | INFO | JSON | default option | -| prod | ERROR | INFO | JSON | highest level, using human readable timestamp | -| production | ERROR | INFO | JSON | same as prod | +| logmode | stacktrace level | verbosity | Output | Comments | +|-------------|------------------|-----------|---------|-----------------------------------------------| +| devel | WARN | INFO | Console | lowest level, using epoch time | +| development | WARN | INFO | Console | same as devel | +| "" | ERROR | INFO | JSON | default option | +| prod | ERROR | INFO | JSON | highest level, using human readable timestamp | +| production | ERROR | INFO | JSON | same as prod | ### Example DSCInitialization @@ -288,7 +278,7 @@ Apply this example with modification for your usage. ### Example DataScienceCluster -When the operator is installed successfully in the cluster, a user can create a `DataScienceCluster` CR to enable ODH +When the operator is installed successfully in the cluster, a user can create a `DataScienceCluster` CR to enable ODH components. At a given time, ODH supports only **one** instance of the CR, which can be updated to get custom list of components. 1. Enable all components @@ -308,6 +298,8 @@ spec: managementState: Managed kserve: managementState: Managed + nim: + managementState: Managed serving: ingressGateway: certificate: @@ -386,15 +378,23 @@ make e2e-test Additional flags that can be passed to e2e-tests by setting up `E2E_TEST_FLAGS` variable. Following table lists all the available flags to run the tests: -| Flag | Description | Default value | -|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| -| --skip-deletion | To skip running of `dsc-deletion` test that includes deleting `DataScienceCluster` resources. Assign this variable to `true` to skip DataScienceCluster deletion. | false | +| Flag | Description | Default value | +|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------| +| --skip-deletion | To skip running of `dsc-deletion` test that includes deleting `DataScienceCluster` resources. Assign this variable to `true` to skip DataScienceCluster deletion. | false | +| --test-operator-controller | To configure the execution of tests related to the Operator POD, this is useful to run e2e tests for an operator running out of the cluster i.e. for debugging purposes | true | +| --test-webhook | To configure the execution of tests rellated to the Operator WebHooks, this is useful to run e2e tests for an operator running out of the cluster i.e. for debugging purposes | true | +| --test-component | A repeatable flag that control what component should be tested, by default all component specific test are executed | true | + +Example command to run full test suite skipping the test for DataScienceCluster deletion. + +```shell +make e2e-test OPERATOR_NAMESPACE= E2E_TEST_FLAGS="--skip-deletion=true" +``` -Example command to run full test suite skipping the test -for DataScienceCluster deletion. +Example commands to run test suite for the dashboard `component` only, with the operator running out of the cluster. ```shell -make e2e-test -e OPERATOR_NAMESPACE= -e E2E_TEST_FLAGS="--skip-deletion=true" +make run-nowebhook ``` ## Run Prometheus Unit Tests for Alerts diff --git a/apis/common/types.go b/apis/common/types.go new file mode 100644 index 00000000000..44542d15895 --- /dev/null +++ b/apis/common/types.go @@ -0,0 +1,84 @@ +package common + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ManagementSpec struct defines the component's management configuration. +// +kubebuilder:object:generate=true +type ManagementSpec struct { + // Set to one of the following values: + // + // - "Managed" : the operator is actively managing the component and trying to keep it active. + // It will only upgrade the component if it is safe to do so + // + // - "Removed" : the operator is actively managing the component and will not install it, + // or if it is installed, the operator will try to remove it + // + // +kubebuilder:validation:Enum=Managed;Removed + ManagementState operatorv1.ManagementState `json:"managementState,omitempty"` +} + +// DevFlags defines list of fields that can be used by developers to test customizations. This is not recommended +// to be used in production environment. +// +kubebuilder:object:generate=true +type DevFlags struct { + // List of custom manifests for the given component + // +optional + Manifests []ManifestsConfig `json:"manifests,omitempty"` +} + +// DevFlagsSpec struct defines the component's dev flags configuration. +// +kubebuilder:object:generate=true +type DevFlagsSpec struct { + // Add developer fields + // +optional + DevFlags *DevFlags `json:"devFlags,omitempty"` +} + +type ManifestsConfig struct { + // uri is the URI point to a git repo with tag/branch. e.g. https://github.com/org/repo/tarball/ + // +optional + // +kubebuilder:default:="" + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 + URI string `json:"uri,omitempty"` + + // contextDir is the relative path to the folder containing manifests in a repository, default value "manifests" + // +optional + // +kubebuilder:default:="manifests" + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=2 + ContextDir string `json:"contextDir,omitempty"` + + // sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, `default`, `odh` etc. + // +optional + // +kubebuilder:default:="" + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=3 + SourcePath string `json:"sourcePath,omitempty"` +} + +// +kubebuilder:object:generate=true +type Status struct { + Phase string `json:"phase,omitempty"` + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +type WithStatus interface { + GetStatus() *Status +} + +type WithDevFlags interface { + GetDevFlags() *DevFlags +} + +type PlatformObject interface { + client.Object + WithStatus +} diff --git a/components/zz_generated.deepcopy.go b/apis/common/zz_generated.deepcopy.go similarity index 55% rename from components/zz_generated.deepcopy.go rename to apis/common/zz_generated.deepcopy.go index 92a766ebc26..b55b115b354 100644 --- a/components/zz_generated.deepcopy.go +++ b/apis/common/zz_generated.deepcopy.go @@ -18,12 +18,34 @@ limitations under the License. // Code generated by controller-gen. DO NOT EDIT. -package components +package common -import () +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" +) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Component) DeepCopyInto(out *Component) { +func (in *DevFlags) DeepCopyInto(out *DevFlags) { + *out = *in + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]ManifestsConfig, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevFlags. +func (in *DevFlags) DeepCopy() *DevFlags { + if in == nil { + return nil + } + out := new(DevFlags) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DevFlagsSpec) DeepCopyInto(out *DevFlagsSpec) { *out = *in if in.DevFlags != nil { in, out := &in.DevFlags, &out.DevFlags @@ -32,32 +54,49 @@ func (in *Component) DeepCopyInto(out *Component) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Component. -func (in *Component) DeepCopy() *Component { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevFlagsSpec. +func (in *DevFlagsSpec) DeepCopy() *DevFlagsSpec { if in == nil { return nil } - out := new(Component) + out := new(DevFlagsSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DevFlags) DeepCopyInto(out *DevFlags) { +func (in *ManagementSpec) DeepCopyInto(out *ManagementSpec) { *out = *in - if in.Manifests != nil { - in, out := &in.Manifests, &out.Manifests - *out = make([]ManifestsConfig, len(*in)) - copy(*out, *in) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementSpec. +func (in *ManagementSpec) DeepCopy() *ManagementSpec { + if in == nil { + return nil } + out := new(ManagementSpec) + in.DeepCopyInto(out) + return out } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DevFlags. -func (in *DevFlags) DeepCopy() *DevFlags { +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { if in == nil { return nil } - out := new(DevFlags) + out := new(Status) in.DeepCopyInto(out) return out } diff --git a/apis/components/component.go b/apis/components/component.go new file mode 100644 index 00000000000..c0ae497309a --- /dev/null +++ b/apis/components/component.go @@ -0,0 +1,13 @@ +// +groupName=datasciencecluster.opendatahub.io +package components + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" +) + +// Component struct defines the basis for each OpenDataHub component configuration. +// +kubebuilder:object:generate=true +type Component struct { + common.ManagementSpec `json:",inline"` + common.DevFlagsSpec `json:",inline"` +} diff --git a/apis/components/v1alpha1/codeflare_types.go b/apis/components/v1alpha1/codeflare_types.go new file mode 100644 index 00000000000..9152db29fd0 --- /dev/null +++ b/apis/components/v1alpha1/codeflare_types.go @@ -0,0 +1,99 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + CodeFlareComponentName = "codeflare" + // value should match whats set in the XValidation below + CodeFlareInstanceName = "default-" + CodeFlareComponentName + CodeFlareKind = "CodeFlare" +) + +// CodeFlareCommonStatus defines the shared observed state of CodeFlare +type CodeFlareCommonStatus struct { +} + +// CodeFlareStatus defines the observed state of CodeFlare +type CodeFlareStatus struct { + common.Status `json:",inline"` + CodeFlareCommonStatus `json:",inline"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-codeflare'",message="CodeFlare name must be default-codeflare" + +// CodeFlare is the Schema for the codeflares API +type CodeFlare struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CodeFlareSpec `json:"spec,omitempty"` + Status CodeFlareStatus `json:"status,omitempty"` +} + +type CodeFlareSpec struct { + CodeFlareCommonSpec `json:",inline"` +} + +type CodeFlareCommonSpec struct { + common.DevFlagsSpec `json:",inline"` +} + +func (c *CodeFlare) GetDevFlags() *common.DevFlags { + return c.Spec.DevFlags +} + +func (c *CodeFlare) GetStatus() *common.Status { + return &c.Status.Status +} + +func init() { + SchemeBuilder.Register(&CodeFlare{}, &CodeFlareList{}) +} + +// +kubebuilder:object:root=true + +// CodeFlareList contains a list of CodeFlare +type CodeFlareList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CodeFlare `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CodeFlare{}, &CodeFlareList{}) +} + +type DSCCodeFlare struct { + common.ManagementSpec `json:",inline"` + CodeFlareCommonSpec `json:",inline"` +} + +// DSCCodeFlareStatus contains the observed state of the CodeFlare exposed in the DSC instance +type DSCCodeFlareStatus struct { + common.ManagementSpec `json:",inline"` + *CodeFlareCommonStatus `json:",inline"` +} diff --git a/apis/components/v1alpha1/dashboard_types.go b/apis/components/v1alpha1/dashboard_types.go new file mode 100644 index 00000000000..14afd14337b --- /dev/null +++ b/apis/components/v1alpha1/dashboard_types.go @@ -0,0 +1,107 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + DashboardComponentName = "dashboard" + // DashboardInstanceName the name of the Dashboard instance singleton. + // value should match whats set in the XValidation below + DashboardInstanceName = "default-" + DashboardComponentName + DashboardKind = "Dashboard" +) + +// DashboardCommonSpec spec defines the shared desired state of Dashboard +type DashboardCommonSpec struct { + // dashboard spec exposed to DSC api + common.DevFlagsSpec `json:",inline"` + // dashboard spec exposed only to internal api +} + +// DashboardSpec defines the desired state of Dashboard +type DashboardSpec struct { + // dashboard spec exposed to DSC api + DashboardCommonSpec `json:",inline"` + // dashboard spec exposed only to internal api +} + +// DashboardCommonStatus defines the shared observed state of Dashboard +type DashboardCommonStatus struct { + URL string `json:"url,omitempty"` +} + +// DashboardStatus defines the observed state of Dashboard +type DashboardStatus struct { + common.Status `json:",inline"` + DashboardCommonStatus `json:",inline"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-dashboard'",message="Dashboard name must be default-dashboard" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.status.url`,description="URL" + +// Dashboard is the Schema for the dashboards API +type Dashboard struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DashboardSpec `json:"spec,omitempty"` + Status DashboardStatus `json:"status,omitempty"` +} + +func (c *Dashboard) GetDevFlags() *common.DevFlags { + return c.Spec.DevFlags +} + +func (c *Dashboard) GetStatus() *common.Status { + return &c.Status.Status +} + +// +kubebuilder:object:root=true + +// DashboardList contains a list of Dashboard +type DashboardList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Dashboard `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Dashboard{}, &DashboardList{}) +} + +// DSCDashboard contains all the configuration exposed in DSC instance for Dashboard component +type DSCDashboard struct { + // configuration fields common across components + common.ManagementSpec `json:",inline"` + // dashboard specific field + DashboardCommonSpec `json:",inline"` +} + +// DSCDashboardStatus contains the observed state of the Dashboard exposed in the DSC instance +type DSCDashboardStatus struct { + common.ManagementSpec `json:",inline"` + *DashboardCommonStatus `json:",inline"` +} diff --git a/apis/components/v1alpha1/datasciencepipelines_types.go b/apis/components/v1alpha1/datasciencepipelines_types.go new file mode 100644 index 00000000000..da10f02cc8c --- /dev/null +++ b/apis/components/v1alpha1/datasciencepipelines_types.go @@ -0,0 +1,99 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + DataSciencePipelinesComponentName = "datasciencepipelines" + // value should match whats set in the XValidation below + DataSciencePipelinesInstanceName = "default-" + DataSciencePipelinesComponentName + DataSciencePipelinesKind = "DataSciencePipelines" +) + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-datasciencepipelines'",message="DataSciencePipelines name must be default-datasciencepipelines" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" + +// DataSciencePipelines is the Schema for the datasciencepipelines API +type DataSciencePipelines struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DataSciencePipelinesSpec `json:"spec,omitempty"` + Status DataSciencePipelinesStatus `json:"status,omitempty"` +} + +// DataSciencePipelinesSpec defines the desired state of DataSciencePipelines +type DataSciencePipelinesSpec struct { + DataSciencePipelinesCommonSpec `json:",inline"` +} + +type DataSciencePipelinesCommonSpec struct { + common.DevFlagsSpec `json:",inline"` +} + +// DataSciencePipelinesCommonStatus defines the shared observed state of DataSciencePipelines +type DataSciencePipelinesCommonStatus struct { +} + +// DataSciencePipelinesStatus defines the observed state of DataSciencePipelines +type DataSciencePipelinesStatus struct { + common.Status `json:",inline"` + DataSciencePipelinesCommonStatus `json:",inline"` +} + +func (c *DataSciencePipelines) GetDevFlags() *common.DevFlags { + return c.Spec.DevFlags +} + +func (c *DataSciencePipelines) GetStatus() *common.Status { + return &c.Status.Status +} + +// +kubebuilder:object:root=true + +// DataSciencePipelinesList contains a list of DataSciencePipelines +type DataSciencePipelinesList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataSciencePipelines `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DataSciencePipelines{}, &DataSciencePipelinesList{}) +} + +// DSCDataSciencePipelines contains all the configuration exposed in DSC instance for DataSciencePipelines component +type DSCDataSciencePipelines struct { + // configuration fields common across components + common.ManagementSpec `json:",inline"` + // datasciencepipelines specific field + DataSciencePipelinesCommonSpec `json:",inline"` +} + +// DSCDataSciencePipelinesStatus contains the observed state of the DataSciencePipelines exposed in the DSC instance +type DSCDataSciencePipelinesStatus struct { + common.ManagementSpec `json:",inline"` + *DataSciencePipelinesCommonStatus `json:",inline"` +} diff --git a/apis/components/v1alpha1/groupversion_info.go b/apis/components/v1alpha1/groupversion_info.go new file mode 100644 index 00000000000..3901aec2e35 --- /dev/null +++ b/apis/components/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the components v1 API group +// +kubebuilder:object:generate=true +// +groupName=components.platform.opendatahub.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "components.platform.opendatahub.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/components/v1alpha1/kserve_types.go b/apis/components/v1alpha1/kserve_types.go new file mode 100644 index 00000000000..b69d88aed83 --- /dev/null +++ b/apis/components/v1alpha1/kserve_types.go @@ -0,0 +1,137 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" +) + +const ( + KserveComponentName = "kserve" + // value should match what's set in the XValidation below + KserveInstanceName = "default-" + KserveComponentName + KserveKind = "Kserve" +) + +// +kubebuilder:validation:Pattern=`^(Serverless|RawDeployment)$` +type DefaultDeploymentMode string + +const ( + // Serverless will be used as the default deployment mode for Kserve. This requires Serverless and ServiceMesh operators configured as dependencies. + Serverless DefaultDeploymentMode = "Serverless" + // RawDeployment will be used as the default deployment mode for Kserve. + RawDeployment DefaultDeploymentMode = "RawDeployment" +) + +// KserveCommonSpec spec defines the shared desired state of Kserve +type KserveCommonSpec struct { + common.DevFlagsSpec `json:",inline"` + // Serving configures the KNative-Serving stack used for model serving. A Service + // Mesh (Istio) is prerequisite, since it is used as networking layer. + Serving infrav1.ServingSpec `json:"serving,omitempty"` + // Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'. + // The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve. + // This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. + // +kubebuilder:validation:Enum=Serverless;RawDeployment + DefaultDeploymentMode DefaultDeploymentMode `json:"defaultDeploymentMode,omitempty"` + // Configures and enables NVIDIA NIM integration + NIM NimSpec `json:"nim,omitempty"` +} + +// nimSpec enables NVIDIA NIM integration +type NimSpec struct { + // +kubebuilder:validation:Enum=Managed;Removed + // +kubebuilder:default=Managed + ManagementState operatorv1.ManagementState `json:"managementState,omitempty"` +} + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// KserveSpec defines the desired state of Kserve +type KserveSpec struct { + // kserve spec exposed to DSC api + KserveCommonSpec `json:",inline"` + // kserve spec exposed only to internal api +} + +// KserveCommonStatus defines the shared observed state of Kserve +type KserveCommonStatus struct { + // DefaultDeploymentMode is the value of the defaultDeploymentMode field + // as read from the "deploy" JSON in the inferenceservice-config ConfigMap + DefaultDeploymentMode string `json:"defaultDeploymentMode,omitempty"` +} + +// KserveStatus defines the observed state of Kserve +type KserveStatus struct { + common.Status `json:",inline"` + KserveCommonStatus `json:",inline"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-kserve'",message="Kserve name must be default-kserve" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" + +// Kserve is the Schema for the kserves API +type Kserve struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec KserveSpec `json:"spec,omitempty"` + Status KserveStatus `json:"status,omitempty"` +} + +func (c *Kserve) GetDevFlags() *common.DevFlags { + return c.Spec.DevFlags +} + +func (c *Kserve) GetStatus() *common.Status { + return &c.Status.Status +} + +// +kubebuilder:object:root=true + +// KserveList contains a list of Kserve +type KserveList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Kserve `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Kserve{}, &KserveList{}) +} + +// DSCKserve contains all the configuration exposed in DSC instance for Kserve component +type DSCKserve struct { + // configuration fields common across components + common.ManagementSpec `json:",inline"` + // Kserve specific fields + KserveCommonSpec `json:",inline"` +} + +// DSCKserveStatus contains the observed state of the Kserve exposed in the DSC instance +type DSCKserveStatus struct { + common.ManagementSpec `json:",inline"` + *KserveCommonStatus `json:",inline"` +} diff --git a/apis/components/v1alpha1/kueue_types.go b/apis/components/v1alpha1/kueue_types.go new file mode 100644 index 00000000000..e0be9ff57ac --- /dev/null +++ b/apis/components/v1alpha1/kueue_types.go @@ -0,0 +1,98 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + KueueComponentName = "kueue" + // value should match whats set in the XValidation below + KueueInstanceName = "default-" + KueueComponentName + KueueKind = "Kueue" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-kueue'",message="Kueue name must be default-kueue" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" + +// Kueue is the Schema for the kueues API +type Kueue struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec KueueSpec `json:"spec,omitempty"` + Status KueueStatus `json:"status,omitempty"` +} + +// KueueSpec defines the desired state of Kueue +type KueueSpec struct { + KueueCommonSpec `json:",inline"` +} + +type KueueCommonSpec struct { + common.DevFlagsSpec `json:",inline"` +} + +// KueueCommonStatus defines the shared observed state of Kueue +type KueueCommonStatus struct { +} + +// KueueStatus defines the observed state of Kueue +type KueueStatus struct { + common.Status `json:",inline"` + KueueCommonStatus `json:",inline"` +} + +// +kubebuilder:object:root=true +// KueueList contains a list of Kueue +type KueueList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Kueue `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Kueue{}, &KueueList{}) +} + +func (c *Kueue) GetDevFlags() *common.DevFlags { + return c.Spec.DevFlags +} +func (c *Kueue) GetStatus() *common.Status { + return &c.Status.Status +} + +// DSCKueue contains all the configuration exposed in DSC instance for Kueue component +type DSCKueue struct { + common.ManagementSpec `json:",inline"` + // configuration fields common across components + KueueCommonSpec `json:",inline"` +} + +// DSCKueueStatus contains the observed state of the Kueue exposed in the DSC instance +type DSCKueueStatus struct { + common.ManagementSpec `json:",inline"` + *KueueCommonStatus `json:",inline"` +} diff --git a/apis/components/v1alpha1/modelcontroller_types.go b/apis/components/v1alpha1/modelcontroller_types.go new file mode 100644 index 00000000000..9de70bb81ae --- /dev/null +++ b/apis/components/v1alpha1/modelcontroller_types.go @@ -0,0 +1,101 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ModelControllerComponentName = "modelcontroller" + // shared by kserve and modelmeshserving + // value should match whats set in the XValidation below + ModelControllerInstanceName = "default-" + ModelControllerComponentName + ModelControllerKind = "ModelController" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-modelcontroller'",message="ModelController name must be default-modelcontroller" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" +// +kubebuilder:printcolumn:name="URI",type=string,JSONPath=`.status.URI`,description="devFlag's URI used to download" + +// ModelController is the Schema for the modelcontroller API, it is a shared component between kserve and modelmeshserving +type ModelController struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ModelControllerSpec `json:"spec,omitempty"` + Status ModelControllerStatus `json:"status,omitempty"` +} + +// ModelControllerSpec defines the desired state of ModelController +type ModelControllerSpec struct { + // ModelMeshServing DSCModelMeshServing `json:"modelMeshServing,omitempty"` + Kserve *ModelControllerKerveSpec `json:"kserve,omitempty"` + ModelMeshServing *ModelControllerMMSpec `json:"modelMeshServing,omitempty"` +} + +// a mini version of the DSCKserve only keep devflags and management spec +type ModelControllerKerveSpec struct { + ManagementState operatorv1.ManagementState `json:"managementState,omitempty"` + NIM NimSpec `json:"nim,omitempty"` + common.DevFlagsSpec `json:",inline"` +} + +func (s *ModelControllerKerveSpec) GetDevFlags() *common.DevFlags { + return s.DevFlags +} + +// a mini version of the DSCModelMeshServing only keep devflags and management spec +type ModelControllerMMSpec struct { + ManagementState operatorv1.ManagementState `json:"managementState,omitempty"` + common.DevFlagsSpec `json:",inline"` +} + +func (s *ModelControllerMMSpec) GetDevFlags() *common.DevFlags { + return s.DevFlags +} + +// ModelControllerStatus defines the observed state of ModelController +type ModelControllerStatus struct { + common.Status `json:",inline"` +} + +// +kubebuilder:object:root=true +// ModelControllerList contains a list of ModelController +type ModelControllerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ModelController `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ModelController{}, &ModelControllerList{}) +} + +func (c *ModelController) GetDevFlags() *common.DevFlags { return nil } + +func (c *ModelController) GetStatus() *common.Status { + return &c.Status.Status +} diff --git a/apis/components/v1alpha1/modelmeshserving_types.go b/apis/components/v1alpha1/modelmeshserving_types.go new file mode 100644 index 00000000000..e28649a417c --- /dev/null +++ b/apis/components/v1alpha1/modelmeshserving_types.go @@ -0,0 +1,98 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ModelMeshServingComponentName = "modelmeshserving" + // value should match whats set in the XValidation below + ModelMeshServingInstanceName = "default-" + ModelMeshServingComponentName + ModelMeshServingKind = "ModelMeshServing" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-modelmeshserving'",message="ModelMeshServing name must be default-modelmeshserving" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" + +// ModelMeshServing is the Schema for the modelmeshservings API +type ModelMeshServing struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ModelMeshServingSpec `json:"spec,omitempty"` + Status ModelMeshServingStatus `json:"status,omitempty"` +} + +// ModelMeshServingSpec defines the desired state of ModelMeshServing +type ModelMeshServingSpec struct { + ModelMeshServingCommonSpec `json:",inline"` +} + +type ModelMeshServingCommonSpec struct { + common.DevFlagsSpec `json:",inline"` +} + +// ModelMeshServingCommonStatus defines the shared observed state of ModelMeshServing +type ModelMeshServingCommonStatus struct { +} + +// ModelMeshServingStatus defines the observed state of ModelMeshServing +type ModelMeshServingStatus struct { + common.Status `json:",inline"` + ModelMeshServingCommonStatus `json:",inline"` +} + +// +kubebuilder:object:root=true +// ModelMeshServingList contains a list of ModelMeshServing +type ModelMeshServingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ModelMeshServing `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ModelMeshServing{}, &ModelMeshServingList{}) +} + +func (c *ModelMeshServing) GetDevFlags() *common.DevFlags { + return c.Spec.DevFlags +} +func (c *ModelMeshServing) GetStatus() *common.Status { + return &c.Status.Status +} + +// DSCModelMeshServing contains all the configuration exposed in DSC instance for ModelMeshServing component +type DSCModelMeshServing struct { + common.ManagementSpec `json:",inline"` + // configuration fields common across components + ModelMeshServingCommonSpec `json:",inline"` +} + +// DSCModelMeshServingStatus contains the observed state of the ModelMeshServing exposed in the DSC instance +type DSCModelMeshServingStatus struct { + common.ManagementSpec `json:",inline"` + *ModelMeshServingCommonStatus `json:",inline"` +} diff --git a/apis/components/v1alpha1/modelregistry_types.go b/apis/components/v1alpha1/modelregistry_types.go new file mode 100644 index 00000000000..729b84da7d9 --- /dev/null +++ b/apis/components/v1alpha1/modelregistry_types.go @@ -0,0 +1,115 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ModelRegistryComponentName = "modelregistry" + // ModelRegistryInstanceName the name of the ModelRegistry instance singleton. + // value should match what's set in the XValidation below + ModelRegistryInstanceName = "default-" + ModelRegistryComponentName + ModelRegistryKind = "ModelRegistry" +) + +// ModelRegistryCommonSpec spec defines the shared desired state of ModelRegistry +type ModelRegistryCommonSpec struct { + // model registry spec exposed to DSC api + common.DevFlagsSpec `json:",inline"` + + // Namespace for model registries to be installed, configurable only once when model registry is enabled, defaults to "odh-model-registries" + // +kubebuilder:default="rhoai-model-registries" + // +kubebuilder:validation:Pattern="^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$" + // +kubebuilder:validation:MaxLength=63 + RegistriesNamespace string `json:"registriesNamespace,omitempty"` +} + +// ModelRegistrySpec defines the desired state of ModelRegistry +type ModelRegistrySpec struct { + // model registry spec exposed to DSC api + ModelRegistryCommonSpec `json:",inline"` + // model registry spec exposed only to internal api +} + +// ModelRegistryCommonStatus defines the shared observed state of ModelRegistry +type ModelRegistryCommonStatus struct { + RegistriesNamespace string `json:"registriesNamespace,omitempty"` +} + +// ModelRegistryStatus defines the observed state of ModelRegistry +type ModelRegistryStatus struct { + common.Status `json:",inline"` + ModelRegistryCommonStatus `json:",inline"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-modelregistry'",message="ModelRegistry name must be default-modelregistry" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" + +// ModelRegistry is the Schema for the modelregistries API +type ModelRegistry struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ModelRegistrySpec `json:"spec,omitempty"` + Status ModelRegistryStatus `json:"status,omitempty"` +} + +func (c *ModelRegistry) GetDevFlags() *common.DevFlags { + return c.Spec.DevFlags +} + +func (c *ModelRegistry) GetStatus() *common.Status { + return &c.Status.Status +} + +// +kubebuilder:object:root=true + +// ModelRegistryList contains a list of ModelRegistry +type ModelRegistryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ModelRegistry `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ModelRegistry{}, &ModelRegistryList{}) +} + +// +kubebuilder:object:generate=true +// +kubebuilder:validation:XValidation:rule="(self.managementState != 'Managed') || (oldSelf.registriesNamespace == '') || (oldSelf.managementState != 'Managed')|| (self.registriesNamespace == oldSelf.registriesNamespace)",message="RegistriesNamespace is immutable when model registry is Managed" +//nolint:lll + +// DSCModelRegistry contains all the configuration exposed in DSC instance for ModelRegistry component +type DSCModelRegistry struct { + // configuration fields common across components + common.ManagementSpec `json:",inline"` + // model registry specific field + ModelRegistryCommonSpec `json:",inline"` +} + +// DSCModelRegistryStatus struct holds the status for the ModelRegistry component exposed in the DSC +type DSCModelRegistryStatus struct { + common.ManagementSpec `json:",inline"` + *ModelRegistryCommonStatus `json:",inline"` +} diff --git a/apis/components/v1alpha1/ray_types.go b/apis/components/v1alpha1/ray_types.go new file mode 100644 index 00000000000..a51c29e3cd4 --- /dev/null +++ b/apis/components/v1alpha1/ray_types.go @@ -0,0 +1,98 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + RayComponentName = "ray" + // value should match whats set in the XValidation below + RayInstanceName = "default-" + RayComponentName + RayKind = "Ray" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-ray'",message="Ray name must be default-ray" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" + +// Ray is the Schema for the rays API +type Ray struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec RaySpec `json:"spec,omitempty"` + Status RayStatus `json:"status,omitempty"` +} + +// RaySpec defines the desired state of Ray +type RaySpec struct { + RayCommonSpec `json:",inline"` +} + +type RayCommonSpec struct { + common.DevFlagsSpec `json:",inline"` +} + +// RayCommonStatus defines the shared observed state of Ray +type RayCommonStatus struct { +} + +// RayStatus defines the observed state of Ray +type RayStatus struct { + common.Status `json:",inline"` + RayCommonStatus `json:",inline"` +} + +// +kubebuilder:object:root=true +// RayList contains a list of Ray +type RayList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Ray `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Ray{}, &RayList{}) +} + +func (c *Ray) GetDevFlags() *common.DevFlags { + return c.Spec.DevFlags +} +func (c *Ray) GetStatus() *common.Status { + return &c.Status.Status +} + +// DSCRay contains all the configuration exposed in DSC instance for Ray component +type DSCRay struct { + common.ManagementSpec `json:",inline"` + // configuration fields common across components + RayCommonSpec `json:",inline"` +} + +// DSCRayStatus struct holds the status for the Ray component exposed in the DSC +type DSCRayStatus struct { + common.ManagementSpec `json:",inline"` + *RayCommonStatus `json:",inline"` +} diff --git a/apis/components/v1alpha1/trainingoperator_types.go b/apis/components/v1alpha1/trainingoperator_types.go new file mode 100644 index 00000000000..2d76ca78e6e --- /dev/null +++ b/apis/components/v1alpha1/trainingoperator_types.go @@ -0,0 +1,98 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + TrainingOperatorComponentName = "trainingoperator" + // value should match whats set in the XValidation below + TrainingOperatorInstanceName = "default-" + TrainingOperatorComponentName + TrainingOperatorKind = "TrainingOperator" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-trainingoperator'",message="TrainingOperator name must be default-trainingoperator" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" + +// TrainingOperator is the Schema for the trainingoperators API +type TrainingOperator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TrainingOperatorSpec `json:"spec,omitempty"` + Status TrainingOperatorStatus `json:"status,omitempty"` +} + +// TrainingOperatorSpec defines the desired state of TrainingOperator +type TrainingOperatorSpec struct { + TrainingOperatorCommonSpec `json:",inline"` +} + +type TrainingOperatorCommonSpec struct { + common.DevFlagsSpec `json:",inline"` +} + +// TrainingOperatorCommonStatus defines the shared observed state of TrainingOperator +type TrainingOperatorCommonStatus struct { +} + +// TrainingOperatorStatus defines the observed state of TrainingOperator +type TrainingOperatorStatus struct { + common.Status `json:",inline"` + TrainingOperatorCommonStatus `json:",inline"` +} + +// +kubebuilder:object:root=true +// TrainingOperatorList contains a list of TrainingOperator +type TrainingOperatorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TrainingOperator `json:"items"` +} + +func init() { + SchemeBuilder.Register(&TrainingOperator{}, &TrainingOperatorList{}) +} + +func (c *TrainingOperator) GetDevFlags() *common.DevFlags { + return c.Spec.DevFlags +} +func (c *TrainingOperator) GetStatus() *common.Status { + return &c.Status.Status +} + +// DSCTrainingOperator contains all the configuration exposed in DSC instance for TrainingOperator component +type DSCTrainingOperator struct { + common.ManagementSpec `json:",inline"` + // configuration fields common across components + TrainingOperatorCommonSpec `json:",inline"` +} + +// DSCTrainingOperatorStatus struct holds the status for the TrainingOperator component exposed in the DSC +type DSCTrainingOperatorStatus struct { + common.ManagementSpec `json:",inline"` + *TrainingOperatorCommonStatus `json:",inline"` +} diff --git a/apis/components/v1alpha1/trustyai_types.go b/apis/components/v1alpha1/trustyai_types.go new file mode 100644 index 00000000000..9ac2652fcc1 --- /dev/null +++ b/apis/components/v1alpha1/trustyai_types.go @@ -0,0 +1,98 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + TrustyAIComponentName = "trustyai" + // value should match whats set in the XValidation below + TrustyAIInstanceName = "default-" + TrustyAIComponentName + TrustyAIKind = "TrustyAI" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-trustyai'",message="TrustyAI name must be default-trustyai" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" + +// TrustyAI is the Schema for the trustyais API +type TrustyAI struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TrustyAISpec `json:"spec,omitempty"` + Status TrustyAIStatus `json:"status,omitempty"` +} + +// TrustyAISpec defines the desired state of TrustyAI +type TrustyAISpec struct { + TrustyAICommonSpec `json:",inline"` +} + +type TrustyAICommonSpec struct { + common.DevFlagsSpec `json:",inline"` +} + +// TrustyAICommonStatus defines the shared observed state of TrustyAI +type TrustyAICommonStatus struct { +} + +// TrustyAIStatus defines the observed state of TrustyAI +type TrustyAIStatus struct { + common.Status `json:",inline"` + TrustyAICommonStatus `json:",inline"` +} + +// +kubebuilder:object:root=true +// TrustyAIList contains a list of TrustyAI +type TrustyAIList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TrustyAI `json:"items"` +} + +func init() { + SchemeBuilder.Register(&TrustyAI{}, &TrustyAIList{}) +} + +func (c *TrustyAI) GetDevFlags() *common.DevFlags { + return c.Spec.DevFlags +} +func (c *TrustyAI) GetStatus() *common.Status { + return &c.Status.Status +} + +// DSCTrustyAI contains all the configuration exposed in DSC instance for TrustyAI component +type DSCTrustyAI struct { + common.ManagementSpec `json:",inline"` + // configuration fields common across components + TrustyAICommonSpec `json:",inline"` +} + +// DSCTrustyAIStatus struct holds the status for the TrustyAI component exposed in the DSC +type DSCTrustyAIStatus struct { + common.ManagementSpec `json:",inline"` + *TrustyAICommonStatus `json:",inline"` +} diff --git a/apis/components/v1alpha1/workbenches_types.go b/apis/components/v1alpha1/workbenches_types.go new file mode 100644 index 00000000000..4ea01ce7bbc --- /dev/null +++ b/apis/components/v1alpha1/workbenches_types.go @@ -0,0 +1,104 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + WorkbenchesComponentName = "workbenches" + // WorkbenchesInstanceName the name of the Workbenches instance singleton. + // value should match what is set in the XValidation below. + WorkbenchesInstanceName = "default-" + WorkbenchesComponentName + WorkbenchesKind = "Workbenches" +) + +type WorkbenchesCommonSpec struct { + // workbenches spec exposed to DSC api + common.DevFlagsSpec `json:",inline"` + // workbenches spec exposed only to internal api +} + +// WorkbenchesSpec defines the desired state of Workbenches +type WorkbenchesSpec struct { + // workbenches spec exposed to DSC api + WorkbenchesCommonSpec `json:",inline"` + // workbenches spec exposed only to internal api +} + +// WorkbenchesCommonStatus defines the shared observed state of Workbenches +type WorkbenchesCommonStatus struct { +} + +// WorkbenchesStatus defines the observed state of Workbenches +type WorkbenchesStatus struct { + common.Status `json:",inline"` + WorkbenchesCommonStatus `json:",inline"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-workbenches'",message="Workbenches name must be default-workbenches" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" + +// Workbenches is the Schema for the workbenches API +type Workbenches struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec WorkbenchesSpec `json:"spec,omitempty"` + Status WorkbenchesStatus `json:"status,omitempty"` +} + +func (c *Workbenches) GetDevFlags() *common.DevFlags { + return c.Spec.DevFlags +} + +func (c *Workbenches) GetStatus() *common.Status { + return &c.Status.Status +} + +// +kubebuilder:object:root=true + +// WorkbenchesList contains a list of Workbenches +type WorkbenchesList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workbenches `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Workbenches{}, &WorkbenchesList{}) +} + +// DSCWorkbenches contains all the configuration exposed in DSC instance for Workbenches component +type DSCWorkbenches struct { + // configuration fields common across components + common.ManagementSpec `json:",inline"` + // workbenches specific field + WorkbenchesCommonSpec `json:",inline"` +} + +// DSCWorkbenchesStatus struct holds the status for the Workbenches component exposed in the DSC +type DSCWorkbenchesStatus struct { + common.ManagementSpec `json:",inline"` + *WorkbenchesCommonStatus `json:",inline"` +} diff --git a/apis/components/v1alpha1/zz_generated.deepcopy.go b/apis/components/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..9de0b610abc --- /dev/null +++ b/apis/components/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1946 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeFlare) DeepCopyInto(out *CodeFlare) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeFlare. +func (in *CodeFlare) DeepCopy() *CodeFlare { + if in == nil { + return nil + } + out := new(CodeFlare) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CodeFlare) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeFlareCommonSpec) DeepCopyInto(out *CodeFlareCommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeFlareCommonSpec. +func (in *CodeFlareCommonSpec) DeepCopy() *CodeFlareCommonSpec { + if in == nil { + return nil + } + out := new(CodeFlareCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeFlareCommonStatus) DeepCopyInto(out *CodeFlareCommonStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeFlareCommonStatus. +func (in *CodeFlareCommonStatus) DeepCopy() *CodeFlareCommonStatus { + if in == nil { + return nil + } + out := new(CodeFlareCommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeFlareList) DeepCopyInto(out *CodeFlareList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CodeFlare, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeFlareList. +func (in *CodeFlareList) DeepCopy() *CodeFlareList { + if in == nil { + return nil + } + out := new(CodeFlareList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CodeFlareList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeFlareSpec) DeepCopyInto(out *CodeFlareSpec) { + *out = *in + in.CodeFlareCommonSpec.DeepCopyInto(&out.CodeFlareCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeFlareSpec. +func (in *CodeFlareSpec) DeepCopy() *CodeFlareSpec { + if in == nil { + return nil + } + out := new(CodeFlareSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CodeFlareStatus) DeepCopyInto(out *CodeFlareStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.CodeFlareCommonStatus = in.CodeFlareCommonStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeFlareStatus. +func (in *CodeFlareStatus) DeepCopy() *CodeFlareStatus { + if in == nil { + return nil + } + out := new(CodeFlareStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCCodeFlare) DeepCopyInto(out *DSCCodeFlare) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.CodeFlareCommonSpec.DeepCopyInto(&out.CodeFlareCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCCodeFlare. +func (in *DSCCodeFlare) DeepCopy() *DSCCodeFlare { + if in == nil { + return nil + } + out := new(DSCCodeFlare) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCCodeFlareStatus) DeepCopyInto(out *DSCCodeFlareStatus) { + *out = *in + out.ManagementSpec = in.ManagementSpec + if in.CodeFlareCommonStatus != nil { + in, out := &in.CodeFlareCommonStatus, &out.CodeFlareCommonStatus + *out = new(CodeFlareCommonStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCCodeFlareStatus. +func (in *DSCCodeFlareStatus) DeepCopy() *DSCCodeFlareStatus { + if in == nil { + return nil + } + out := new(DSCCodeFlareStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCDashboard) DeepCopyInto(out *DSCDashboard) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.DashboardCommonSpec.DeepCopyInto(&out.DashboardCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCDashboard. +func (in *DSCDashboard) DeepCopy() *DSCDashboard { + if in == nil { + return nil + } + out := new(DSCDashboard) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCDashboardStatus) DeepCopyInto(out *DSCDashboardStatus) { + *out = *in + out.ManagementSpec = in.ManagementSpec + if in.DashboardCommonStatus != nil { + in, out := &in.DashboardCommonStatus, &out.DashboardCommonStatus + *out = new(DashboardCommonStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCDashboardStatus. +func (in *DSCDashboardStatus) DeepCopy() *DSCDashboardStatus { + if in == nil { + return nil + } + out := new(DSCDashboardStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCDataSciencePipelines) DeepCopyInto(out *DSCDataSciencePipelines) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.DataSciencePipelinesCommonSpec.DeepCopyInto(&out.DataSciencePipelinesCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCDataSciencePipelines. +func (in *DSCDataSciencePipelines) DeepCopy() *DSCDataSciencePipelines { + if in == nil { + return nil + } + out := new(DSCDataSciencePipelines) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCDataSciencePipelinesStatus) DeepCopyInto(out *DSCDataSciencePipelinesStatus) { + *out = *in + out.ManagementSpec = in.ManagementSpec + if in.DataSciencePipelinesCommonStatus != nil { + in, out := &in.DataSciencePipelinesCommonStatus, &out.DataSciencePipelinesCommonStatus + *out = new(DataSciencePipelinesCommonStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCDataSciencePipelinesStatus. +func (in *DSCDataSciencePipelinesStatus) DeepCopy() *DSCDataSciencePipelinesStatus { + if in == nil { + return nil + } + out := new(DSCDataSciencePipelinesStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCKserve) DeepCopyInto(out *DSCKserve) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.KserveCommonSpec.DeepCopyInto(&out.KserveCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCKserve. +func (in *DSCKserve) DeepCopy() *DSCKserve { + if in == nil { + return nil + } + out := new(DSCKserve) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCKserveStatus) DeepCopyInto(out *DSCKserveStatus) { + *out = *in + out.ManagementSpec = in.ManagementSpec + if in.KserveCommonStatus != nil { + in, out := &in.KserveCommonStatus, &out.KserveCommonStatus + *out = new(KserveCommonStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCKserveStatus. +func (in *DSCKserveStatus) DeepCopy() *DSCKserveStatus { + if in == nil { + return nil + } + out := new(DSCKserveStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCKueue) DeepCopyInto(out *DSCKueue) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.KueueCommonSpec.DeepCopyInto(&out.KueueCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCKueue. +func (in *DSCKueue) DeepCopy() *DSCKueue { + if in == nil { + return nil + } + out := new(DSCKueue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCKueueStatus) DeepCopyInto(out *DSCKueueStatus) { + *out = *in + out.ManagementSpec = in.ManagementSpec + if in.KueueCommonStatus != nil { + in, out := &in.KueueCommonStatus, &out.KueueCommonStatus + *out = new(KueueCommonStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCKueueStatus. +func (in *DSCKueueStatus) DeepCopy() *DSCKueueStatus { + if in == nil { + return nil + } + out := new(DSCKueueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCModelMeshServing) DeepCopyInto(out *DSCModelMeshServing) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.ModelMeshServingCommonSpec.DeepCopyInto(&out.ModelMeshServingCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCModelMeshServing. +func (in *DSCModelMeshServing) DeepCopy() *DSCModelMeshServing { + if in == nil { + return nil + } + out := new(DSCModelMeshServing) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCModelMeshServingStatus) DeepCopyInto(out *DSCModelMeshServingStatus) { + *out = *in + out.ManagementSpec = in.ManagementSpec + if in.ModelMeshServingCommonStatus != nil { + in, out := &in.ModelMeshServingCommonStatus, &out.ModelMeshServingCommonStatus + *out = new(ModelMeshServingCommonStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCModelMeshServingStatus. +func (in *DSCModelMeshServingStatus) DeepCopy() *DSCModelMeshServingStatus { + if in == nil { + return nil + } + out := new(DSCModelMeshServingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCModelRegistry) DeepCopyInto(out *DSCModelRegistry) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.ModelRegistryCommonSpec.DeepCopyInto(&out.ModelRegistryCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCModelRegistry. +func (in *DSCModelRegistry) DeepCopy() *DSCModelRegistry { + if in == nil { + return nil + } + out := new(DSCModelRegistry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCModelRegistryStatus) DeepCopyInto(out *DSCModelRegistryStatus) { + *out = *in + out.ManagementSpec = in.ManagementSpec + if in.ModelRegistryCommonStatus != nil { + in, out := &in.ModelRegistryCommonStatus, &out.ModelRegistryCommonStatus + *out = new(ModelRegistryCommonStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCModelRegistryStatus. +func (in *DSCModelRegistryStatus) DeepCopy() *DSCModelRegistryStatus { + if in == nil { + return nil + } + out := new(DSCModelRegistryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCRay) DeepCopyInto(out *DSCRay) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.RayCommonSpec.DeepCopyInto(&out.RayCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCRay. +func (in *DSCRay) DeepCopy() *DSCRay { + if in == nil { + return nil + } + out := new(DSCRay) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCRayStatus) DeepCopyInto(out *DSCRayStatus) { + *out = *in + out.ManagementSpec = in.ManagementSpec + if in.RayCommonStatus != nil { + in, out := &in.RayCommonStatus, &out.RayCommonStatus + *out = new(RayCommonStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCRayStatus. +func (in *DSCRayStatus) DeepCopy() *DSCRayStatus { + if in == nil { + return nil + } + out := new(DSCRayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCTrainingOperator) DeepCopyInto(out *DSCTrainingOperator) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.TrainingOperatorCommonSpec.DeepCopyInto(&out.TrainingOperatorCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCTrainingOperator. +func (in *DSCTrainingOperator) DeepCopy() *DSCTrainingOperator { + if in == nil { + return nil + } + out := new(DSCTrainingOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCTrainingOperatorStatus) DeepCopyInto(out *DSCTrainingOperatorStatus) { + *out = *in + out.ManagementSpec = in.ManagementSpec + if in.TrainingOperatorCommonStatus != nil { + in, out := &in.TrainingOperatorCommonStatus, &out.TrainingOperatorCommonStatus + *out = new(TrainingOperatorCommonStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCTrainingOperatorStatus. +func (in *DSCTrainingOperatorStatus) DeepCopy() *DSCTrainingOperatorStatus { + if in == nil { + return nil + } + out := new(DSCTrainingOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCTrustyAI) DeepCopyInto(out *DSCTrustyAI) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.TrustyAICommonSpec.DeepCopyInto(&out.TrustyAICommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCTrustyAI. +func (in *DSCTrustyAI) DeepCopy() *DSCTrustyAI { + if in == nil { + return nil + } + out := new(DSCTrustyAI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCTrustyAIStatus) DeepCopyInto(out *DSCTrustyAIStatus) { + *out = *in + out.ManagementSpec = in.ManagementSpec + if in.TrustyAICommonStatus != nil { + in, out := &in.TrustyAICommonStatus, &out.TrustyAICommonStatus + *out = new(TrustyAICommonStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCTrustyAIStatus. +func (in *DSCTrustyAIStatus) DeepCopy() *DSCTrustyAIStatus { + if in == nil { + return nil + } + out := new(DSCTrustyAIStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCWorkbenches) DeepCopyInto(out *DSCWorkbenches) { + *out = *in + out.ManagementSpec = in.ManagementSpec + in.WorkbenchesCommonSpec.DeepCopyInto(&out.WorkbenchesCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCWorkbenches. +func (in *DSCWorkbenches) DeepCopy() *DSCWorkbenches { + if in == nil { + return nil + } + out := new(DSCWorkbenches) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCWorkbenchesStatus) DeepCopyInto(out *DSCWorkbenchesStatus) { + *out = *in + out.ManagementSpec = in.ManagementSpec + if in.WorkbenchesCommonStatus != nil { + in, out := &in.WorkbenchesCommonStatus, &out.WorkbenchesCommonStatus + *out = new(WorkbenchesCommonStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCWorkbenchesStatus. +func (in *DSCWorkbenchesStatus) DeepCopy() *DSCWorkbenchesStatus { + if in == nil { + return nil + } + out := new(DSCWorkbenchesStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Dashboard) DeepCopyInto(out *Dashboard) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dashboard. +func (in *Dashboard) DeepCopy() *Dashboard { + if in == nil { + return nil + } + out := new(Dashboard) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Dashboard) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardCommonSpec) DeepCopyInto(out *DashboardCommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardCommonSpec. +func (in *DashboardCommonSpec) DeepCopy() *DashboardCommonSpec { + if in == nil { + return nil + } + out := new(DashboardCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardCommonStatus) DeepCopyInto(out *DashboardCommonStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardCommonStatus. +func (in *DashboardCommonStatus) DeepCopy() *DashboardCommonStatus { + if in == nil { + return nil + } + out := new(DashboardCommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardList) DeepCopyInto(out *DashboardList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Dashboard, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardList. +func (in *DashboardList) DeepCopy() *DashboardList { + if in == nil { + return nil + } + out := new(DashboardList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DashboardList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardSpec) DeepCopyInto(out *DashboardSpec) { + *out = *in + in.DashboardCommonSpec.DeepCopyInto(&out.DashboardCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardSpec. +func (in *DashboardSpec) DeepCopy() *DashboardSpec { + if in == nil { + return nil + } + out := new(DashboardSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DashboardStatus) DeepCopyInto(out *DashboardStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.DashboardCommonStatus = in.DashboardCommonStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DashboardStatus. +func (in *DashboardStatus) DeepCopy() *DashboardStatus { + if in == nil { + return nil + } + out := new(DashboardStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSciencePipelines) DeepCopyInto(out *DataSciencePipelines) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSciencePipelines. +func (in *DataSciencePipelines) DeepCopy() *DataSciencePipelines { + if in == nil { + return nil + } + out := new(DataSciencePipelines) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSciencePipelines) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSciencePipelinesCommonSpec) DeepCopyInto(out *DataSciencePipelinesCommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSciencePipelinesCommonSpec. +func (in *DataSciencePipelinesCommonSpec) DeepCopy() *DataSciencePipelinesCommonSpec { + if in == nil { + return nil + } + out := new(DataSciencePipelinesCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSciencePipelinesCommonStatus) DeepCopyInto(out *DataSciencePipelinesCommonStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSciencePipelinesCommonStatus. +func (in *DataSciencePipelinesCommonStatus) DeepCopy() *DataSciencePipelinesCommonStatus { + if in == nil { + return nil + } + out := new(DataSciencePipelinesCommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSciencePipelinesList) DeepCopyInto(out *DataSciencePipelinesList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataSciencePipelines, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSciencePipelinesList. +func (in *DataSciencePipelinesList) DeepCopy() *DataSciencePipelinesList { + if in == nil { + return nil + } + out := new(DataSciencePipelinesList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataSciencePipelinesList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSciencePipelinesSpec) DeepCopyInto(out *DataSciencePipelinesSpec) { + *out = *in + in.DataSciencePipelinesCommonSpec.DeepCopyInto(&out.DataSciencePipelinesCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSciencePipelinesSpec. +func (in *DataSciencePipelinesSpec) DeepCopy() *DataSciencePipelinesSpec { + if in == nil { + return nil + } + out := new(DataSciencePipelinesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataSciencePipelinesStatus) DeepCopyInto(out *DataSciencePipelinesStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.DataSciencePipelinesCommonStatus = in.DataSciencePipelinesCommonStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSciencePipelinesStatus. +func (in *DataSciencePipelinesStatus) DeepCopy() *DataSciencePipelinesStatus { + if in == nil { + return nil + } + out := new(DataSciencePipelinesStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Kserve) DeepCopyInto(out *Kserve) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kserve. +func (in *Kserve) DeepCopy() *Kserve { + if in == nil { + return nil + } + out := new(Kserve) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Kserve) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KserveCommonSpec) DeepCopyInto(out *KserveCommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) + out.Serving = in.Serving + out.NIM = in.NIM +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KserveCommonSpec. +func (in *KserveCommonSpec) DeepCopy() *KserveCommonSpec { + if in == nil { + return nil + } + out := new(KserveCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KserveCommonStatus) DeepCopyInto(out *KserveCommonStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KserveCommonStatus. +func (in *KserveCommonStatus) DeepCopy() *KserveCommonStatus { + if in == nil { + return nil + } + out := new(KserveCommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KserveList) DeepCopyInto(out *KserveList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Kserve, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KserveList. +func (in *KserveList) DeepCopy() *KserveList { + if in == nil { + return nil + } + out := new(KserveList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KserveList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KserveSpec) DeepCopyInto(out *KserveSpec) { + *out = *in + in.KserveCommonSpec.DeepCopyInto(&out.KserveCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KserveSpec. +func (in *KserveSpec) DeepCopy() *KserveSpec { + if in == nil { + return nil + } + out := new(KserveSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KserveStatus) DeepCopyInto(out *KserveStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.KserveCommonStatus = in.KserveCommonStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KserveStatus. +func (in *KserveStatus) DeepCopy() *KserveStatus { + if in == nil { + return nil + } + out := new(KserveStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Kueue) DeepCopyInto(out *Kueue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kueue. +func (in *Kueue) DeepCopy() *Kueue { + if in == nil { + return nil + } + out := new(Kueue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Kueue) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KueueCommonSpec) DeepCopyInto(out *KueueCommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KueueCommonSpec. +func (in *KueueCommonSpec) DeepCopy() *KueueCommonSpec { + if in == nil { + return nil + } + out := new(KueueCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KueueCommonStatus) DeepCopyInto(out *KueueCommonStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KueueCommonStatus. +func (in *KueueCommonStatus) DeepCopy() *KueueCommonStatus { + if in == nil { + return nil + } + out := new(KueueCommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KueueList) DeepCopyInto(out *KueueList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Kueue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KueueList. +func (in *KueueList) DeepCopy() *KueueList { + if in == nil { + return nil + } + out := new(KueueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KueueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KueueSpec) DeepCopyInto(out *KueueSpec) { + *out = *in + in.KueueCommonSpec.DeepCopyInto(&out.KueueCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KueueSpec. +func (in *KueueSpec) DeepCopy() *KueueSpec { + if in == nil { + return nil + } + out := new(KueueSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KueueStatus) DeepCopyInto(out *KueueStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.KueueCommonStatus = in.KueueCommonStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KueueStatus. +func (in *KueueStatus) DeepCopy() *KueueStatus { + if in == nil { + return nil + } + out := new(KueueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelController) DeepCopyInto(out *ModelController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelController. +func (in *ModelController) DeepCopy() *ModelController { + if in == nil { + return nil + } + out := new(ModelController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ModelController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelControllerKerveSpec) DeepCopyInto(out *ModelControllerKerveSpec) { + *out = *in + out.NIM = in.NIM + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelControllerKerveSpec. +func (in *ModelControllerKerveSpec) DeepCopy() *ModelControllerKerveSpec { + if in == nil { + return nil + } + out := new(ModelControllerKerveSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelControllerList) DeepCopyInto(out *ModelControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ModelController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelControllerList. +func (in *ModelControllerList) DeepCopy() *ModelControllerList { + if in == nil { + return nil + } + out := new(ModelControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ModelControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelControllerMMSpec) DeepCopyInto(out *ModelControllerMMSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelControllerMMSpec. +func (in *ModelControllerMMSpec) DeepCopy() *ModelControllerMMSpec { + if in == nil { + return nil + } + out := new(ModelControllerMMSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelControllerSpec) DeepCopyInto(out *ModelControllerSpec) { + *out = *in + if in.Kserve != nil { + in, out := &in.Kserve, &out.Kserve + *out = new(ModelControllerKerveSpec) + (*in).DeepCopyInto(*out) + } + if in.ModelMeshServing != nil { + in, out := &in.ModelMeshServing, &out.ModelMeshServing + *out = new(ModelControllerMMSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelControllerSpec. +func (in *ModelControllerSpec) DeepCopy() *ModelControllerSpec { + if in == nil { + return nil + } + out := new(ModelControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelControllerStatus) DeepCopyInto(out *ModelControllerStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelControllerStatus. +func (in *ModelControllerStatus) DeepCopy() *ModelControllerStatus { + if in == nil { + return nil + } + out := new(ModelControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelMeshServing) DeepCopyInto(out *ModelMeshServing) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelMeshServing. +func (in *ModelMeshServing) DeepCopy() *ModelMeshServing { + if in == nil { + return nil + } + out := new(ModelMeshServing) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ModelMeshServing) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelMeshServingCommonSpec) DeepCopyInto(out *ModelMeshServingCommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelMeshServingCommonSpec. +func (in *ModelMeshServingCommonSpec) DeepCopy() *ModelMeshServingCommonSpec { + if in == nil { + return nil + } + out := new(ModelMeshServingCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelMeshServingCommonStatus) DeepCopyInto(out *ModelMeshServingCommonStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelMeshServingCommonStatus. +func (in *ModelMeshServingCommonStatus) DeepCopy() *ModelMeshServingCommonStatus { + if in == nil { + return nil + } + out := new(ModelMeshServingCommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelMeshServingList) DeepCopyInto(out *ModelMeshServingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ModelMeshServing, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelMeshServingList. +func (in *ModelMeshServingList) DeepCopy() *ModelMeshServingList { + if in == nil { + return nil + } + out := new(ModelMeshServingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ModelMeshServingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelMeshServingSpec) DeepCopyInto(out *ModelMeshServingSpec) { + *out = *in + in.ModelMeshServingCommonSpec.DeepCopyInto(&out.ModelMeshServingCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelMeshServingSpec. +func (in *ModelMeshServingSpec) DeepCopy() *ModelMeshServingSpec { + if in == nil { + return nil + } + out := new(ModelMeshServingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelMeshServingStatus) DeepCopyInto(out *ModelMeshServingStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.ModelMeshServingCommonStatus = in.ModelMeshServingCommonStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelMeshServingStatus. +func (in *ModelMeshServingStatus) DeepCopy() *ModelMeshServingStatus { + if in == nil { + return nil + } + out := new(ModelMeshServingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelRegistry) DeepCopyInto(out *ModelRegistry) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelRegistry. +func (in *ModelRegistry) DeepCopy() *ModelRegistry { + if in == nil { + return nil + } + out := new(ModelRegistry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ModelRegistry) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelRegistryCommonSpec) DeepCopyInto(out *ModelRegistryCommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelRegistryCommonSpec. +func (in *ModelRegistryCommonSpec) DeepCopy() *ModelRegistryCommonSpec { + if in == nil { + return nil + } + out := new(ModelRegistryCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelRegistryCommonStatus) DeepCopyInto(out *ModelRegistryCommonStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelRegistryCommonStatus. +func (in *ModelRegistryCommonStatus) DeepCopy() *ModelRegistryCommonStatus { + if in == nil { + return nil + } + out := new(ModelRegistryCommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelRegistryList) DeepCopyInto(out *ModelRegistryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ModelRegistry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelRegistryList. +func (in *ModelRegistryList) DeepCopy() *ModelRegistryList { + if in == nil { + return nil + } + out := new(ModelRegistryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ModelRegistryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelRegistrySpec) DeepCopyInto(out *ModelRegistrySpec) { + *out = *in + in.ModelRegistryCommonSpec.DeepCopyInto(&out.ModelRegistryCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelRegistrySpec. +func (in *ModelRegistrySpec) DeepCopy() *ModelRegistrySpec { + if in == nil { + return nil + } + out := new(ModelRegistrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModelRegistryStatus) DeepCopyInto(out *ModelRegistryStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.ModelRegistryCommonStatus = in.ModelRegistryCommonStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelRegistryStatus. +func (in *ModelRegistryStatus) DeepCopy() *ModelRegistryStatus { + if in == nil { + return nil + } + out := new(ModelRegistryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NimSpec) DeepCopyInto(out *NimSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NimSpec. +func (in *NimSpec) DeepCopy() *NimSpec { + if in == nil { + return nil + } + out := new(NimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ray) DeepCopyInto(out *Ray) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ray. +func (in *Ray) DeepCopy() *Ray { + if in == nil { + return nil + } + out := new(Ray) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ray) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RayCommonSpec) DeepCopyInto(out *RayCommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RayCommonSpec. +func (in *RayCommonSpec) DeepCopy() *RayCommonSpec { + if in == nil { + return nil + } + out := new(RayCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RayCommonStatus) DeepCopyInto(out *RayCommonStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RayCommonStatus. +func (in *RayCommonStatus) DeepCopy() *RayCommonStatus { + if in == nil { + return nil + } + out := new(RayCommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RayList) DeepCopyInto(out *RayList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ray, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RayList. +func (in *RayList) DeepCopy() *RayList { + if in == nil { + return nil + } + out := new(RayList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RayList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RaySpec) DeepCopyInto(out *RaySpec) { + *out = *in + in.RayCommonSpec.DeepCopyInto(&out.RayCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RaySpec. +func (in *RaySpec) DeepCopy() *RaySpec { + if in == nil { + return nil + } + out := new(RaySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RayStatus) DeepCopyInto(out *RayStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.RayCommonStatus = in.RayCommonStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RayStatus. +func (in *RayStatus) DeepCopy() *RayStatus { + if in == nil { + return nil + } + out := new(RayStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrainingOperator) DeepCopyInto(out *TrainingOperator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrainingOperator. +func (in *TrainingOperator) DeepCopy() *TrainingOperator { + if in == nil { + return nil + } + out := new(TrainingOperator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrainingOperator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrainingOperatorCommonSpec) DeepCopyInto(out *TrainingOperatorCommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrainingOperatorCommonSpec. +func (in *TrainingOperatorCommonSpec) DeepCopy() *TrainingOperatorCommonSpec { + if in == nil { + return nil + } + out := new(TrainingOperatorCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrainingOperatorCommonStatus) DeepCopyInto(out *TrainingOperatorCommonStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrainingOperatorCommonStatus. +func (in *TrainingOperatorCommonStatus) DeepCopy() *TrainingOperatorCommonStatus { + if in == nil { + return nil + } + out := new(TrainingOperatorCommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrainingOperatorList) DeepCopyInto(out *TrainingOperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TrainingOperator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrainingOperatorList. +func (in *TrainingOperatorList) DeepCopy() *TrainingOperatorList { + if in == nil { + return nil + } + out := new(TrainingOperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrainingOperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrainingOperatorSpec) DeepCopyInto(out *TrainingOperatorSpec) { + *out = *in + in.TrainingOperatorCommonSpec.DeepCopyInto(&out.TrainingOperatorCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrainingOperatorSpec. +func (in *TrainingOperatorSpec) DeepCopy() *TrainingOperatorSpec { + if in == nil { + return nil + } + out := new(TrainingOperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrainingOperatorStatus) DeepCopyInto(out *TrainingOperatorStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.TrainingOperatorCommonStatus = in.TrainingOperatorCommonStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrainingOperatorStatus. +func (in *TrainingOperatorStatus) DeepCopy() *TrainingOperatorStatus { + if in == nil { + return nil + } + out := new(TrainingOperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustyAI) DeepCopyInto(out *TrustyAI) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustyAI. +func (in *TrustyAI) DeepCopy() *TrustyAI { + if in == nil { + return nil + } + out := new(TrustyAI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrustyAI) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustyAICommonSpec) DeepCopyInto(out *TrustyAICommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustyAICommonSpec. +func (in *TrustyAICommonSpec) DeepCopy() *TrustyAICommonSpec { + if in == nil { + return nil + } + out := new(TrustyAICommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustyAICommonStatus) DeepCopyInto(out *TrustyAICommonStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustyAICommonStatus. +func (in *TrustyAICommonStatus) DeepCopy() *TrustyAICommonStatus { + if in == nil { + return nil + } + out := new(TrustyAICommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustyAIList) DeepCopyInto(out *TrustyAIList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TrustyAI, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustyAIList. +func (in *TrustyAIList) DeepCopy() *TrustyAIList { + if in == nil { + return nil + } + out := new(TrustyAIList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TrustyAIList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustyAISpec) DeepCopyInto(out *TrustyAISpec) { + *out = *in + in.TrustyAICommonSpec.DeepCopyInto(&out.TrustyAICommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustyAISpec. +func (in *TrustyAISpec) DeepCopy() *TrustyAISpec { + if in == nil { + return nil + } + out := new(TrustyAISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustyAIStatus) DeepCopyInto(out *TrustyAIStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.TrustyAICommonStatus = in.TrustyAICommonStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustyAIStatus. +func (in *TrustyAIStatus) DeepCopy() *TrustyAIStatus { + if in == nil { + return nil + } + out := new(TrustyAIStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workbenches) DeepCopyInto(out *Workbenches) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workbenches. +func (in *Workbenches) DeepCopy() *Workbenches { + if in == nil { + return nil + } + out := new(Workbenches) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workbenches) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkbenchesCommonSpec) DeepCopyInto(out *WorkbenchesCommonSpec) { + *out = *in + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkbenchesCommonSpec. +func (in *WorkbenchesCommonSpec) DeepCopy() *WorkbenchesCommonSpec { + if in == nil { + return nil + } + out := new(WorkbenchesCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkbenchesCommonStatus) DeepCopyInto(out *WorkbenchesCommonStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkbenchesCommonStatus. +func (in *WorkbenchesCommonStatus) DeepCopy() *WorkbenchesCommonStatus { + if in == nil { + return nil + } + out := new(WorkbenchesCommonStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkbenchesList) DeepCopyInto(out *WorkbenchesList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workbenches, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkbenchesList. +func (in *WorkbenchesList) DeepCopy() *WorkbenchesList { + if in == nil { + return nil + } + out := new(WorkbenchesList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkbenchesList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkbenchesSpec) DeepCopyInto(out *WorkbenchesSpec) { + *out = *in + in.WorkbenchesCommonSpec.DeepCopyInto(&out.WorkbenchesCommonSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkbenchesSpec. +func (in *WorkbenchesSpec) DeepCopy() *WorkbenchesSpec { + if in == nil { + return nil + } + out := new(WorkbenchesSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkbenchesStatus) DeepCopyInto(out *WorkbenchesStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + out.WorkbenchesCommonStatus = in.WorkbenchesCommonStatus +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkbenchesStatus. +func (in *WorkbenchesStatus) DeepCopy() *WorkbenchesStatus { + if in == nil { + return nil + } + out := new(WorkbenchesStatus) + in.DeepCopyInto(out) + return out +} diff --git a/components/dashboard/zz_generated.deepcopy.go b/apis/components/zz_generated.deepcopy.go similarity index 77% rename from components/dashboard/zz_generated.deepcopy.go rename to apis/components/zz_generated.deepcopy.go index 9e4b4fc3a9d..c6333b6ad5f 100644 --- a/components/dashboard/zz_generated.deepcopy.go +++ b/apis/components/zz_generated.deepcopy.go @@ -18,22 +18,23 @@ limitations under the License. // Code generated by controller-gen. DO NOT EDIT. -package dashboard +package components import () // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Dashboard) DeepCopyInto(out *Dashboard) { +func (in *Component) DeepCopyInto(out *Component) { *out = *in - in.Component.DeepCopyInto(&out.Component) + out.ManagementSpec = in.ManagementSpec + in.DevFlagsSpec.DeepCopyInto(&out.DevFlagsSpec) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dashboard. -func (in *Dashboard) DeepCopy() *Dashboard { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Component. +func (in *Component) DeepCopy() *Component { if in == nil { return nil } - out := new(Dashboard) + out := new(Component) in.DeepCopyInto(out) return out } diff --git a/apis/datasciencecluster/v1/datasciencecluster_types.go b/apis/datasciencecluster/v1/datasciencecluster_types.go index dce4b6760e7..fd5956758a2 100644 --- a/apis/datasciencecluster/v1/datasciencecluster_types.go +++ b/apis/datasciencecluster/v1/datasciencecluster_types.go @@ -17,26 +17,11 @@ limitations under the License. package v1 import ( - "errors" - "reflect" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/components/codeflare" - "github.com/opendatahub-io/opendatahub-operator/v2/components/dashboard" - "github.com/opendatahub-io/opendatahub-operator/v2/components/datasciencepipelines" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kueue" - "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" - "github.com/opendatahub-io/opendatahub-operator/v2/components/modelregistry" - "github.com/opendatahub-io/opendatahub-operator/v2/components/ray" - "github.com/opendatahub-io/opendatahub-operator/v2/components/trainingoperator" - "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" - "github.com/opendatahub-io/opendatahub-operator/v2/components/workbenches" - "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" ) @@ -49,47 +34,77 @@ type DataScienceClusterSpec struct { type Components struct { // Dashboard component configuration. - Dashboard dashboard.Dashboard `json:"dashboard,omitempty"` + Dashboard componentApi.DSCDashboard `json:"dashboard,omitempty"` // Workbenches component configuration. - Workbenches workbenches.Workbenches `json:"workbenches,omitempty"` + Workbenches componentApi.DSCWorkbenches `json:"workbenches,omitempty"` // ModelMeshServing component configuration. - ModelMeshServing modelmeshserving.ModelMeshServing `json:"modelmeshserving,omitempty"` + ModelMeshServing componentApi.DSCModelMeshServing `json:"modelmeshserving,omitempty"` - // DataServicePipeline component configuration. - // Require OpenShift Pipelines Operator to be installed before enable component - DataSciencePipelines datasciencepipelines.DataSciencePipelines `json:"datasciencepipelines,omitempty"` + // DataSciencePipeline component configuration. + // Requires OpenShift Pipelines Operator to be installed before enable component + DataSciencePipelines componentApi.DSCDataSciencePipelines `json:"datasciencepipelines,omitempty"` // Kserve component configuration. - // Require OpenShift Serverless and OpenShift Service Mesh Operators to be installed before enable component + // Requires OpenShift Serverless and OpenShift Service Mesh Operators to be installed before enable component // Does not support enabled ModelMeshServing at the same time - Kserve kserve.Kserve `json:"kserve,omitempty"` + Kserve componentApi.DSCKserve `json:"kserve,omitempty"` // Kueue component configuration. - Kueue kueue.Kueue `json:"kueue,omitempty"` + Kueue componentApi.DSCKueue `json:"kueue,omitempty"` // CodeFlare component configuration. - // If CodeFlare Operator has been installed in the cluster, it should be uninstalled first before enabled component. - CodeFlare codeflare.CodeFlare `json:"codeflare,omitempty"` + // If CodeFlare Operator has been installed in the cluster, it should be uninstalled first before enabling component. + CodeFlare componentApi.DSCCodeFlare `json:"codeflare,omitempty"` // Ray component configuration. - Ray ray.Ray `json:"ray,omitempty"` + Ray componentApi.DSCRay `json:"ray,omitempty"` // TrustyAI component configuration. - TrustyAI trustyai.TrustyAI `json:"trustyai,omitempty"` - - //Training Operator component configuration. - TrainingOperator trainingoperator.TrainingOperator `json:"trainingoperator,omitempty"` + TrustyAI componentApi.DSCTrustyAI `json:"trustyai,omitempty"` // ModelRegistry component configuration. - ModelRegistry modelregistry.ModelRegistry `json:"modelregistry,omitempty"` + ModelRegistry componentApi.DSCModelRegistry `json:"modelregistry,omitempty"` + + // Training Operator component configuration. + TrainingOperator componentApi.DSCTrainingOperator `json:"trainingoperator,omitempty"` } // ComponentsStatus defines the custom status of DataScienceCluster components. type ComponentsStatus struct { - // ModelRegistry component status - ModelRegistry *status.ModelRegistryStatus `json:"modelregistry,omitempty"` + // Dashboard component status. + Dashboard componentApi.DSCDashboardStatus `json:"dashboard,omitempty"` + + // Workbenches component status. + Workbenches componentApi.DSCWorkbenchesStatus `json:"workbenches,omitempty"` + + // ModelMeshServing component status. + ModelMeshServing componentApi.DSCModelMeshServingStatus `json:"modelmeshserving,omitempty"` + + // DataSciencePipeline component status. + DataSciencePipelines componentApi.DSCDataSciencePipelinesStatus `json:"datasciencepipelines,omitempty"` + + // Kserve component status. + Kserve componentApi.DSCKserveStatus `json:"kserve,omitempty"` + + // Kueue component status. + Kueue componentApi.DSCKueueStatus `json:"kueue,omitempty"` + + // CodeFlare component status. + CodeFlare componentApi.DSCCodeFlareStatus `json:"codeflare,omitempty"` + + // Ray component status. + Ray componentApi.DSCRayStatus `json:"ray,omitempty"` + + // TrustyAI component status. + TrustyAI componentApi.DSCTrustyAIStatus `json:"trustyai,omitempty"` + + // ModelRegistry component status. + ModelRegistry componentApi.DSCModelRegistryStatus `json:"modelregistry,omitempty"` + + // Training Operator component status. + TrainingOperator componentApi.DSCTrainingOperatorStatus `json:"trainingoperator,omitempty"` } // DataScienceClusterStatus defines the observed state of DataScienceCluster. @@ -102,6 +117,9 @@ type DataScienceClusterStatus struct { // +optional Conditions []conditionsv1.Condition `json:"conditions,omitempty"` + // The generation observed by the deployment controller. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // RelatedObjects is a list of objects created and maintained by this operator. // Object references will be added to this list after they have been created AND found in the cluster. // +optional @@ -113,16 +131,16 @@ type DataScienceClusterStatus struct { // Expose component's specific status // +optional - Components ComponentsStatus `json:"components,omitempty"` + Components ComponentsStatus `json:"components"` // Version and release type Release cluster.Release `json:"release,omitempty"` } -//+kubebuilder:object:root=true -//+kubebuilder:subresource:status -//+kubebuilder:resource:scope=Cluster,shortName=dsc -//+kubebuilder:storageversion +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,shortName=dsc +// +kubebuilder:storageversion // DataScienceCluster is the Schema for the datascienceclusters API. type DataScienceCluster struct { @@ -133,7 +151,7 @@ type DataScienceCluster struct { Status DataScienceClusterStatus `json:"status,omitempty"` } -//+kubebuilder:object:root=true +// +kubebuilder:object:root=true // DataScienceClusterList contains a list of DataScienceCluster. type DataScienceClusterList struct { @@ -145,24 +163,3 @@ type DataScienceClusterList struct { func init() { SchemeBuilder.Register(&DataScienceCluster{}, &DataScienceClusterList{}) } - -func (d *DataScienceCluster) GetComponents() ([]components.ComponentInterface, error) { - var allComponents []components.ComponentInterface - - c := &d.Spec.Components - - definedComponents := reflect.ValueOf(c).Elem() - for i := 0; i < definedComponents.NumField(); i++ { - c := definedComponents.Field(i) - if c.CanAddr() { - component, ok := c.Addr().Interface().(components.ComponentInterface) - if !ok { - return allComponents, errors.New("this is not a pointer to ComponentInterface") - } - - allComponents = append(allComponents, component) - } - } - - return allComponents, nil -} diff --git a/apis/datasciencecluster/v1/zz_generated.deepcopy.go b/apis/datasciencecluster/v1/zz_generated.deepcopy.go index 035c38f876c..1d797ff3283 100644 --- a/apis/datasciencecluster/v1/zz_generated.deepcopy.go +++ b/apis/datasciencecluster/v1/zz_generated.deepcopy.go @@ -21,7 +21,6 @@ limitations under the License. package v1 import ( - "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -39,8 +38,8 @@ func (in *Components) DeepCopyInto(out *Components) { in.CodeFlare.DeepCopyInto(&out.CodeFlare) in.Ray.DeepCopyInto(&out.Ray) in.TrustyAI.DeepCopyInto(&out.TrustyAI) - in.TrainingOperator.DeepCopyInto(&out.TrainingOperator) in.ModelRegistry.DeepCopyInto(&out.ModelRegistry) + in.TrainingOperator.DeepCopyInto(&out.TrainingOperator) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Components. @@ -56,11 +55,17 @@ func (in *Components) DeepCopy() *Components { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ComponentsStatus) DeepCopyInto(out *ComponentsStatus) { *out = *in - if in.ModelRegistry != nil { - in, out := &in.ModelRegistry, &out.ModelRegistry - *out = new(status.ModelRegistryStatus) - **out = **in - } + in.Dashboard.DeepCopyInto(&out.Dashboard) + in.Workbenches.DeepCopyInto(&out.Workbenches) + in.ModelMeshServing.DeepCopyInto(&out.ModelMeshServing) + in.DataSciencePipelines.DeepCopyInto(&out.DataSciencePipelines) + in.Kserve.DeepCopyInto(&out.Kserve) + in.Kueue.DeepCopyInto(&out.Kueue) + in.CodeFlare.DeepCopyInto(&out.CodeFlare) + in.Ray.DeepCopyInto(&out.Ray) + in.TrustyAI.DeepCopyInto(&out.TrustyAI) + in.ModelRegistry.DeepCopyInto(&out.ModelRegistry) + in.TrainingOperator.DeepCopyInto(&out.TrainingOperator) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentsStatus. diff --git a/apis/dscinitialization/v1/dscinitialization_types.go b/apis/dscinitialization/v1/dscinitialization_types.go index a249ccc6006..b4693dae4a2 100644 --- a/apis/dscinitialization/v1/dscinitialization_types.go +++ b/apis/dscinitialization/v1/dscinitialization_types.go @@ -17,6 +17,7 @@ limitations under the License. package v1 import ( + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" operatorv1 "github.com/openshift/api/operator/v1" conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" @@ -40,7 +41,7 @@ type DSCInitializationSpec struct { // Enable monitoring on specified namespace // +operator-sdk:csv:customresourcedefinitions:type=spec,order=2 // +optional - Monitoring Monitoring `json:"monitoring,omitempty"` + Monitoring serviceApi.DSCMonitoring `json:"monitoring,omitempty"` // Configures Service Mesh as networking layer for Data Science Clusters components. // The Service Mesh is a mandatory prerequisite for single model serving (KServe) and // you should review this configuration if you are planning to use KServe. @@ -62,30 +63,20 @@ type DSCInitializationSpec struct { DevFlags *DevFlags `json:"devFlags,omitempty"` } -type Monitoring struct { - // Set to one of the following values: - // - "Managed" : the operator is actively managing the component and trying to keep it active. - // It will only upgrade the component if it is safe to do so. - // - "Removed" : the operator is actively managing the component and will not install it, - // or if it is installed, the operator will try to remove it. - // +kubebuilder:validation:Enum=Managed;Removed - ManagementState operatorv1.ManagementState `json:"managementState,omitempty"` - // Namespace for monitoring if it is enabled - // +kubebuilder:default=redhat-ods-monitoring - // +kubebuilder:validation:Pattern="^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$" - // +kubebuilder:validation:MaxLength=63 - Namespace string `json:"namespace,omitempty"` -} - // DevFlags defines list of fields that can be used by developers to test customizations. This is not recommended // to be used in production environment. type DevFlags struct { + // ## DEPRECATED ## : ManifestsUri set on DSCI is not maintained. // Custom manifests uri for odh-manifests // +optional ManifestsUri string `json:"manifestsUri,omitempty"` + // ## DEPRECATED ##: Ignored, use LogLevel instead // +kubebuilder:validation:Enum=devel;development;prod;production;default // +kubebuilder:default="production" LogMode string `json:"logmode,omitempty"` + // Override Zap log level. Can be "debug", "info", "error" or a number (more verbose). + // +optional + LogLevel string `json:"logLevel,omitempty"` } type TrustedCABundleSpec struct { diff --git a/apis/dscinitialization/v1/zz_generated.deepcopy.go b/apis/dscinitialization/v1/zz_generated.deepcopy.go index 4f9bb201778..7beab4cc85f 100644 --- a/apis/dscinitialization/v1/zz_generated.deepcopy.go +++ b/apis/dscinitialization/v1/zz_generated.deepcopy.go @@ -160,21 +160,6 @@ func (in *DevFlags) DeepCopy() *DevFlags { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Monitoring) DeepCopyInto(out *Monitoring) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitoring. -func (in *Monitoring) DeepCopy() *Monitoring { - if in == nil { - return nil - } - out := new(Monitoring) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TrustedCABundleSpec) DeepCopyInto(out *TrustedCABundleSpec) { *out = *in diff --git a/apis/services/service.go b/apis/services/service.go new file mode 100644 index 00000000000..2703b79ab20 --- /dev/null +++ b/apis/services/service.go @@ -0,0 +1,12 @@ +// +groupName=dscinitialization.opendatahub.io +package services + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" +) + +// Service struct defines the basis for each OpenDataHub component configuration. +// +kubebuilder:object:generate=true +type Service struct { + common.ManagementSpec `json:",inline"` +} diff --git a/apis/services/v1alpha1/auth_types.go b/apis/services/v1alpha1/auth_types.go new file mode 100644 index 00000000000..2de3cfef6ae --- /dev/null +++ b/apis/services/v1alpha1/auth_types.go @@ -0,0 +1,72 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + AuthServiceName = "auth" + AuthInstanceName = "auth" + AuthKind = "Auth" +) + +// AuthSpec defines the desired state of Auth +type AuthSpec struct { + AdminGroups []string `json:"adminGroups"` + AllowedGroups []string `json:"allowedGroups"` +} + +// AuthStatus defines the observed state of Auth +type AuthStatus struct { + common.Status `json:",inline"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'auth'",message="Auth name must be auth" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" + +// Auth is the Schema for the auths API +type Auth struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AuthSpec `json:"spec,omitempty"` + Status AuthStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// AuthList contains a list of Auth +type AuthList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Auth `json:"items"` +} + +func (m *Auth) GetStatus() *common.Status { + return &m.Status.Status +} + +func init() { + SchemeBuilder.Register(&Auth{}, &AuthList{}) +} diff --git a/apis/services/v1alpha1/groupversion_info.go b/apis/services/v1alpha1/groupversion_info.go new file mode 100644 index 00000000000..c40f275a50a --- /dev/null +++ b/apis/services/v1alpha1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the services v1 API group +// +kubebuilder:object:generate=true +// +groupName=services.platform.opendatahub.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "services.platform.opendatahub.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/services/v1alpha1/monitoring_types.go b/apis/services/v1alpha1/monitoring_types.go new file mode 100644 index 00000000000..6a0ae354b93 --- /dev/null +++ b/apis/services/v1alpha1/monitoring_types.go @@ -0,0 +1,99 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + MonitoringServiceName = "monitoring" + // MonitoringInstanceName the name of the Dashboard instance singleton. + // value should match whats set in the XValidation below + MonitoringInstanceName = "default-monitoring" + MonitoringKind = "Monitoring" +) + +// MonitoringSpec defines the desired state of Monitoring +type MonitoringSpec struct { + // monitoring spec exposed to DSCI api + MonitoringCommonSpec `json:",inline"` + // monitoring spec exposed only to internal api +} + +// MonitoringStatus defines the observed state of Monitoring +type MonitoringStatus struct { + common.Status `json:",inline"` + + URL string `json:"url,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'default-monitoring'",message="Monitoring name must be default-monitoring" +// +kubebuilder:printcolumn:name="Ready",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].status`,description="Ready" +// +kubebuilder:printcolumn:name="Reason",type=string,JSONPath=`.status.conditions[?(@.type=="Ready")].reason`,description="Reason" +// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.status.url`,description="URL" + +// Monitoring is the Schema for the monitorings API +type Monitoring struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MonitoringSpec `json:"spec,omitempty"` + Status MonitoringStatus `json:"status,omitempty"` +} + +// MonitoringCommonSpec spec defines the shared desired state of Dashboard +type MonitoringCommonSpec struct { + // monitoring spec exposed to DSCI api + // Namespace for monitoring if it is enabled + // +kubebuilder:default=opendatahub + // +kubebuilder:validation:Pattern="^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$" + // +kubebuilder:validation:MaxLength=63 + Namespace string `json:"namespace,omitempty"` +} + +//+kubebuilder:object:root=true + +// MonitoringList contains a list of Monitoring +type MonitoringList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Monitoring `json:"items"` +} + +func (m *Monitoring) GetDevFlags() *common.DevFlags { + return nil +} + +func (m *Monitoring) GetStatus() *common.Status { + return &m.Status.Status +} + +func init() { + SchemeBuilder.Register(&Monitoring{}, &MonitoringList{}) +} + +type DSCMonitoring struct { + // configuration fields common across services + common.ManagementSpec `json:",inline"` + // monitoring specific fields + MonitoringCommonSpec `json:",inline"` +} diff --git a/apis/services/v1alpha1/zz_generated.deepcopy.go b/apis/services/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..30795175fec --- /dev/null +++ b/apis/services/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,248 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Auth) DeepCopyInto(out *Auth) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Auth. +func (in *Auth) DeepCopy() *Auth { + if in == nil { + return nil + } + out := new(Auth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Auth) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthList) DeepCopyInto(out *AuthList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Auth, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthList. +func (in *AuthList) DeepCopy() *AuthList { + if in == nil { + return nil + } + out := new(AuthList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuthList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthSpec) DeepCopyInto(out *AuthSpec) { + *out = *in + if in.AdminGroups != nil { + in, out := &in.AdminGroups, &out.AdminGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedGroups != nil { + in, out := &in.AllowedGroups, &out.AllowedGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthSpec. +func (in *AuthSpec) DeepCopy() *AuthSpec { + if in == nil { + return nil + } + out := new(AuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthStatus) DeepCopyInto(out *AuthStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthStatus. +func (in *AuthStatus) DeepCopy() *AuthStatus { + if in == nil { + return nil + } + out := new(AuthStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DSCMonitoring) DeepCopyInto(out *DSCMonitoring) { + *out = *in + out.ManagementSpec = in.ManagementSpec + out.MonitoringCommonSpec = in.MonitoringCommonSpec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSCMonitoring. +func (in *DSCMonitoring) DeepCopy() *DSCMonitoring { + if in == nil { + return nil + } + out := new(DSCMonitoring) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Monitoring) DeepCopyInto(out *Monitoring) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Monitoring. +func (in *Monitoring) DeepCopy() *Monitoring { + if in == nil { + return nil + } + out := new(Monitoring) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Monitoring) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringCommonSpec) DeepCopyInto(out *MonitoringCommonSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringCommonSpec. +func (in *MonitoringCommonSpec) DeepCopy() *MonitoringCommonSpec { + if in == nil { + return nil + } + out := new(MonitoringCommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringList) DeepCopyInto(out *MonitoringList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Monitoring, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringList. +func (in *MonitoringList) DeepCopy() *MonitoringList { + if in == nil { + return nil + } + out := new(MonitoringList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MonitoringList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringSpec) DeepCopyInto(out *MonitoringSpec) { + *out = *in + out.MonitoringCommonSpec = in.MonitoringCommonSpec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringSpec. +func (in *MonitoringSpec) DeepCopy() *MonitoringSpec { + if in == nil { + return nil + } + out := new(MonitoringSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MonitoringStatus) DeepCopyInto(out *MonitoringStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringStatus. +func (in *MonitoringStatus) DeepCopy() *MonitoringStatus { + if in == nil { + return nil + } + out := new(MonitoringStatus) + in.DeepCopyInto(out) + return out +} diff --git a/components/kueue/zz_generated.deepcopy.go b/apis/services/zz_generated.deepcopy.go similarity index 81% rename from components/kueue/zz_generated.deepcopy.go rename to apis/services/zz_generated.deepcopy.go index 9ab2279f9bf..3c3d917ccaf 100644 --- a/components/kueue/zz_generated.deepcopy.go +++ b/apis/services/zz_generated.deepcopy.go @@ -18,22 +18,22 @@ limitations under the License. // Code generated by controller-gen. DO NOT EDIT. -package kueue +package services import () // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Kueue) DeepCopyInto(out *Kueue) { +func (in *Service) DeepCopyInto(out *Service) { *out = *in - in.Component.DeepCopyInto(&out.Component) + out.ManagementSpec = in.ManagementSpec } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kueue. -func (in *Kueue) DeepCopy() *Kueue { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { if in == nil { return nil } - out := new(Kueue) + out := new(Service) in.DeepCopyInto(out) return out } diff --git a/bundle/manifests/components.platform.opendatahub.io_codeflares.yaml b/bundle/manifests/components.platform.opendatahub.io_codeflares.yaml new file mode 100644 index 00000000000..caec1b33965 --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_codeflares.yaml @@ -0,0 +1,158 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: codeflares.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: CodeFlare + listKind: CodeFlareList + plural: codeflares + singular: codeflare + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: CodeFlare is the Schema for the codeflares API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: CodeFlareStatus defines the observed state of CodeFlare + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: CodeFlare name must be default-codeflare + rule: self.metadata.name == 'default-codeflare' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/components.platform.opendatahub.io_dashboards.yaml b/bundle/manifests/components.platform.opendatahub.io_dashboards.yaml new file mode 100644 index 00000000000..335119bc596 --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_dashboards.yaml @@ -0,0 +1,165 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: dashboards.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: Dashboard + listKind: DashboardList + plural: dashboards + singular: dashboard + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + - description: URL + jsonPath: .status.url + name: URL + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Dashboard is the Schema for the dashboards API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DashboardSpec defines the desired state of Dashboard + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: DashboardStatus defines the observed state of Dashboard + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + url: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Dashboard name must be default-dashboard + rule: self.metadata.name == 'default-dashboard' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/components.platform.opendatahub.io_datasciencepipelines.yaml b/bundle/manifests/components.platform.opendatahub.io_datasciencepipelines.yaml new file mode 100644 index 00000000000..458b38f5e52 --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_datasciencepipelines.yaml @@ -0,0 +1,161 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: datasciencepipelines.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: DataSciencePipelines + listKind: DataSciencePipelinesList + plural: datasciencepipelines + singular: datasciencepipelines + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: DataSciencePipelines is the Schema for the datasciencepipelines + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataSciencePipelinesSpec defines the desired state of DataSciencePipelines + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: DataSciencePipelinesStatus defines the observed state of + DataSciencePipelines + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: DataSciencePipelines name must be default-datasciencepipelines + rule: self.metadata.name == 'default-datasciencepipelines' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/components.platform.opendatahub.io_kserves.yaml b/bundle/manifests/components.platform.opendatahub.io_kserves.yaml new file mode 100644 index 00000000000..92ac4530345 --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_kserves.yaml @@ -0,0 +1,243 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: kserves.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: Kserve + listKind: KserveList + plural: kserves + singular: kserve + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Kserve is the Schema for the kserves API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KserveSpec defines the desired state of Kserve + properties: + defaultDeploymentMode: + description: |- + Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'. + The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve. + This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. + enum: + - Serverless + - RawDeployment + pattern: ^(Serverless|RawDeployment)$ + type: string + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + nim: + description: Configures and enables NVIDIA NIM integration + properties: + managementState: + default: Managed + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + serving: + description: |- + Serving configures the KNative-Serving stack used for model serving. A Service + Mesh (Istio) is prerequisite, since it is used as networking layer. + properties: + ingressGateway: + description: |- + IngressGateway allows to customize some parameters for the Istio Ingress Gateway + that is bound to KNative-Serving. + properties: + certificate: + description: |- + Certificate specifies configuration of the TLS certificate securing communication + for the gateway. + properties: + secretName: + description: |- + SecretName specifies the name of the Kubernetes Secret resource that contains a + TLS certificate secure HTTP communications for the KNative network. + type: string + type: + default: OpenshiftDefaultIngress + description: |- + Type specifies if the TLS certificate should be generated automatically, or if the certificate + is provided by the user. Allowed values are: + * SelfSigned: A certificate is going to be generated using an own private key. + * Provided: Pre-existence of the TLS Secret (see SecretName) with a valid certificate is assumed. + * OpenshiftDefaultIngress: Default ingress certificate configured for OpenShift + enum: + - SelfSigned + - Provided + - OpenshiftDefaultIngress + type: string + type: object + domain: + description: |- + Domain specifies the host name for intercepting incoming requests. + Most likely, you will want to use a wildcard name, like *.example.com. + If not set, the domain of the OpenShift Ingress is used. + If you choose to generate a certificate, this is the domain used for the certificate request. + type: string + type: object + managementState: + default: Managed + enum: + - Managed + - Unmanaged + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + name: + default: knative-serving + description: |- + Name specifies the name of the KNativeServing resource that is going to be + created to instruct the KNative Operator to deploy KNative serving components. + This resource is created in the "knative-serving" namespace. + type: string + type: object + type: object + status: + description: KserveStatus defines the observed state of Kserve + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + defaultDeploymentMode: + description: |- + DefaultDeploymentMode is the value of the defaultDeploymentMode field + as read from the "deploy" JSON in the inferenceservice-config ConfigMap + type: string + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Kserve name must be default-kserve + rule: self.metadata.name == 'default-kserve' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/components.platform.opendatahub.io_kueues.yaml b/bundle/manifests/components.platform.opendatahub.io_kueues.yaml new file mode 100644 index 00000000000..87ada19027e --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_kueues.yaml @@ -0,0 +1,159 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: kueues.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: Kueue + listKind: KueueList + plural: kueues + singular: kueue + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Kueue is the Schema for the kueues API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KueueSpec defines the desired state of Kueue + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: KueueStatus defines the observed state of Kueue + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Kueue name must be default-kueue + rule: self.metadata.name == 'default-kueue' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/components.platform.opendatahub.io_modelcontrollers.yaml b/bundle/manifests/components.platform.opendatahub.io_modelcontrollers.yaml new file mode 100644 index 00000000000..80555f29a06 --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_modelcontrollers.yaml @@ -0,0 +1,220 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: modelcontrollers.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: ModelController + listKind: ModelControllerList + plural: modelcontrollers + singular: modelcontroller + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + - description: devFlag's URI used to download + jsonPath: .status.URI + name: URI + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ModelController is the Schema for the modelcontroller API, it + is a shared component between kserve and modelmeshserving + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ModelControllerSpec defines the desired state of ModelController + properties: + kserve: + description: ModelMeshServing DSCModelMeshServing `json:"modelMeshServing,omitempty"` + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the + folder containing manifests in a repository, default + value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any + sub-folder or path: `base`, `overlays/dev`, `default`, + `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with + tag/branch. e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + managementState: + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + nim: + description: nimSpec enables NVIDIA NIM integration + properties: + managementState: + default: Managed + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + type: object + modelMeshServing: + description: a mini version of the DSCModelMeshServing only keep devflags + and management spec + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the + folder containing manifests in a repository, default + value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any + sub-folder or path: `base`, `overlays/dev`, `default`, + `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with + tag/branch. e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + managementState: + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + type: object + status: + description: ModelControllerStatus defines the observed state of ModelController + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: ModelController name must be default-modelcontroller + rule: self.metadata.name == 'default-modelcontroller' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/components.platform.opendatahub.io_modelmeshservings.yaml b/bundle/manifests/components.platform.opendatahub.io_modelmeshservings.yaml new file mode 100644 index 00000000000..a9be9f4a080 --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_modelmeshservings.yaml @@ -0,0 +1,159 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: modelmeshservings.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: ModelMeshServing + listKind: ModelMeshServingList + plural: modelmeshservings + singular: modelmeshserving + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ModelMeshServing is the Schema for the modelmeshservings API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ModelMeshServingSpec defines the desired state of ModelMeshServing + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: ModelMeshServingStatus defines the observed state of ModelMeshServing + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: ModelMeshServing name must be default-modelmeshserving + rule: self.metadata.name == 'default-modelmeshserving' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/components.platform.opendatahub.io_modelregistries.yaml b/bundle/manifests/components.platform.opendatahub.io_modelregistries.yaml new file mode 100644 index 00000000000..3853185b0f1 --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_modelregistries.yaml @@ -0,0 +1,168 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: modelregistries.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: ModelRegistry + listKind: ModelRegistryList + plural: modelregistries + singular: modelregistry + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ModelRegistry is the Schema for the modelregistries API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ModelRegistrySpec defines the desired state of ModelRegistry + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + registriesNamespace: + default: rhoai-model-registries + description: Namespace for model registries to be installed, configurable + only once when model registry is enabled, defaults to "odh-model-registries" + maxLength: 63 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$ + type: string + type: object + status: + description: ModelRegistryStatus defines the observed state of ModelRegistry + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + registriesNamespace: + type: string + type: object + type: object + x-kubernetes-validations: + - message: ModelRegistry name must be default-modelregistry + rule: self.metadata.name == 'default-modelregistry' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/components.platform.opendatahub.io_rays.yaml b/bundle/manifests/components.platform.opendatahub.io_rays.yaml new file mode 100644 index 00000000000..e1073bed74b --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_rays.yaml @@ -0,0 +1,159 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: rays.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: Ray + listKind: RayList + plural: rays + singular: ray + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Ray is the Schema for the rays API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RaySpec defines the desired state of Ray + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: RayStatus defines the observed state of Ray + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Ray name must be default-ray + rule: self.metadata.name == 'default-ray' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/components.platform.opendatahub.io_trainingoperators.yaml b/bundle/manifests/components.platform.opendatahub.io_trainingoperators.yaml new file mode 100644 index 00000000000..7dc395ac2b1 --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_trainingoperators.yaml @@ -0,0 +1,159 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: trainingoperators.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: TrainingOperator + listKind: TrainingOperatorList + plural: trainingoperators + singular: trainingoperator + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: TrainingOperator is the Schema for the trainingoperators API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TrainingOperatorSpec defines the desired state of TrainingOperator + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: TrainingOperatorStatus defines the observed state of TrainingOperator + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: TrainingOperator name must be default-trainingoperator + rule: self.metadata.name == 'default-trainingoperator' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/components.platform.opendatahub.io_trustyais.yaml b/bundle/manifests/components.platform.opendatahub.io_trustyais.yaml new file mode 100644 index 00000000000..522954961fe --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_trustyais.yaml @@ -0,0 +1,159 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: trustyais.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: TrustyAI + listKind: TrustyAIList + plural: trustyais + singular: trustyai + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: TrustyAI is the Schema for the trustyais API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TrustyAISpec defines the desired state of TrustyAI + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: TrustyAIStatus defines the observed state of TrustyAI + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: TrustyAI name must be default-trustyai + rule: self.metadata.name == 'default-trustyai' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/components.platform.opendatahub.io_workbenches.yaml b/bundle/manifests/components.platform.opendatahub.io_workbenches.yaml new file mode 100644 index 00000000000..d75be60e4af --- /dev/null +++ b/bundle/manifests/components.platform.opendatahub.io_workbenches.yaml @@ -0,0 +1,159 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: workbenches.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: Workbenches + listKind: WorkbenchesList + plural: workbenches + singular: workbenches + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Workbenches is the Schema for the workbenches API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkbenchesSpec defines the desired state of Workbenches + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: WorkbenchesStatus defines the observed state of Workbenches + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Workbenches name must be default-workbenches + rule: self.metadata.name == 'default-workbenches' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/datasciencecluster.opendatahub.io_datascienceclusters.yaml b/bundle/manifests/datasciencecluster.opendatahub.io_datascienceclusters.yaml index a9727177128..0ee9f3163e9 100644 --- a/bundle/manifests/datasciencecluster.opendatahub.io_datascienceclusters.yaml +++ b/bundle/manifests/datasciencecluster.opendatahub.io_datascienceclusters.yaml @@ -48,7 +48,7 @@ spec: codeflare: description: |- CodeFlare component configuration. - If CodeFlare Operator has been installed in the cluster, it should be uninstalled first before enabled component. + If CodeFlare Operator has been installed in the cluster, it should be uninstalled first before enabling component. properties: devFlags: description: Add developer fields @@ -141,8 +141,8 @@ spec: type: object datasciencepipelines: description: |- - DataServicePipeline component configuration. - Require OpenShift Pipelines Operator to be installed before enable component + DataSciencePipeline component configuration. + Requires OpenShift Pipelines Operator to be installed before enable component properties: devFlags: description: Add developer fields @@ -190,7 +190,7 @@ spec: kserve: description: |- Kserve component configuration. - Require OpenShift Serverless and OpenShift Service Mesh Operators to be installed before enable component + Requires OpenShift Serverless and OpenShift Service Mesh Operators to be installed before enable component Does not support enabled ModelMeshServing at the same time properties: defaultDeploymentMode: @@ -245,6 +245,17 @@ spec: - Removed pattern: ^(Managed|Unmanaged|Force|Removed)$ type: string + nim: + description: Configures and enables NVIDIA NIM integration + properties: + managementState: + default: Managed + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object serving: description: |- Serving configures the KNative-Serving stack used for model serving. A Service @@ -445,7 +456,7 @@ spec: default: rhoai-model-registries description: Namespace for model registries to be installed, configurable only once when model registry is enabled, defaults - to "rhoai-model-registries" + to "odh-model-registries" maxLength: 63 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$ type: string @@ -648,12 +659,213 @@ spec: components: description: Expose component's specific status properties: + codeflare: + description: CodeFlare component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + dashboard: + description: Dashboard component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + url: + type: string + type: object + datasciencepipelines: + description: DataSciencePipeline component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + kserve: + description: Kserve component status. + properties: + defaultDeploymentMode: + description: |- + DefaultDeploymentMode is the value of the defaultDeploymentMode field + as read from the "deploy" JSON in the inferenceservice-config ConfigMap + type: string + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + kueue: + description: Kueue component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + modelmeshserving: + description: ModelMeshServing component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object modelregistry: - description: ModelRegistry component status + description: ModelRegistry component status. properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string registriesNamespace: type: string type: object + ray: + description: Ray component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + trainingoperator: + description: Training Operator component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + trustyai: + description: TrustyAI component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + workbenches: + description: Workbenches component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object type: object conditions: description: Conditions describes the state of the DataScienceCluster @@ -691,6 +903,10 @@ spec: type: boolean description: List of components with status if installed or not type: object + observedGeneration: + description: The generation observed by the deployment controller. + format: int64 + type: integer phase: description: |- Phase describes the Phase of DataScienceCluster reconciliation state diff --git a/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml b/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml index 787774ca475..13b7e23ef7c 100644 --- a/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml +++ b/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml @@ -67,8 +67,13 @@ spec: Internal development useful field to test customizations. This is not recommended to be used in production environment. properties: + logLevel: + description: Override Zap log level. Can be "debug", "info", "error" + or a number (more verbose). + type: string logmode: default: production + description: '## DEPRECATED ##: Ignored, use LogLevel instead' enum: - devel - development @@ -77,7 +82,9 @@ spec: - default type: string manifestsUri: - description: Custom manifests uri for odh-manifests + description: |- + ## DEPRECATED ## : ManifestsUri set on DSCI is not maintained. + Custom manifests uri for odh-manifests type: string type: object monitoring: @@ -86,18 +93,22 @@ spec: managementState: description: |- Set to one of the following values: + - "Managed" : the operator is actively managing the component and trying to keep it active. - It will only upgrade the component if it is safe to do so. + It will only upgrade the component if it is safe to do so + - "Removed" : the operator is actively managing the component and will not install it, - or if it is installed, the operator will try to remove it. + or if it is installed, the operator will try to remove it enum: - Managed - Removed pattern: ^(Managed|Unmanaged|Force|Removed)$ type: string namespace: - default: redhat-ods-monitoring - description: Namespace for monitoring if it is enabled + default: opendatahub + description: |- + monitoring spec exposed to DSCI api + Namespace for monitoring if it is enabled maxLength: 63 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$ type: string diff --git a/bundle/manifests/rhods-operator.clusterserviceversion.yaml b/bundle/manifests/rhods-operator.clusterserviceversion.yaml index 2973e8927af..59b204652a7 100644 --- a/bundle/manifests/rhods-operator.clusterserviceversion.yaml +++ b/bundle/manifests/rhods-operator.clusterserviceversion.yaml @@ -30,6 +30,9 @@ metadata: }, "kserve": { "managementState": "Managed", + "nim": { + "managementState": "Managed" + }, "serving": { "ingressGateway": { "certificate": { @@ -54,7 +57,7 @@ metadata: "managementState": "Managed" }, "trainingoperator": { - "managementState": "Removed" + "managementState": "Managed" }, "trustyai": { "managementState": "Managed" @@ -103,7 +106,7 @@ metadata: categories: AI/Machine Learning, Big Data certified: "False" containerImage: REPLACE_IMAGE:latest - createdAt: "2024-11-05T15:13:29Z" + createdAt: "2025-01-14T06:42:57Z" description: Operator for deployment and management of Red Hat OpenShift AI features.operators.openshift.io/cnf: "false" features.operators.openshift.io/cni: "false" @@ -184,12 +187,27 @@ metadata: operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 repository: https://github.com/red-hat-data-services/rhods-operator support: Red Hat OpenShift AI - name: rhods-operator.v2.16.0 + name: rhods-operator.v2.21.0 namespace: placeholder spec: apiservicedefinitions: {} customresourcedefinitions: owned: + - description: Auth is the Schema for the auths API + displayName: Auth + kind: Auth + name: auths.services.platform.opendatahub.io + version: v1alpha1 + - description: CodeFlare is the Schema for the codeflares API + displayName: Code Flare + kind: CodeFlare + name: codeflares.components.platform.opendatahub.io + version: v1alpha1 + - description: Dashboard is the Schema for the dashboards API + displayName: Dashboard + kind: Dashboard + name: dashboards.components.platform.opendatahub.io + version: v1alpha1 - description: DataScienceCluster is the Schema for the datascienceclusters API. displayName: Data Science Cluster kind: DataScienceCluster @@ -199,6 +217,12 @@ spec: displayName: Components path: components version: v1 + - description: DataSciencePipelines is the Schema for the datasciencepipelines + API + displayName: Data Science Pipelines + kind: DataSciencePipelines + name: datasciencepipelines.components.platform.opendatahub.io + version: v1alpha1 - description: DSCInitialization is the Schema for the dscinitializations API. displayName: DSC Initialization kind: DSCInitialization @@ -237,6 +261,54 @@ spec: - kind: FeatureTracker name: featuretrackers.features.opendatahub.io version: v1 + - description: Kserve is the Schema for the kserves API + displayName: Kserve + kind: Kserve + name: kserves.components.platform.opendatahub.io + version: v1alpha1 + - description: Kueue is the Schema for the kueues API + displayName: Kueue + kind: Kueue + name: kueues.components.platform.opendatahub.io + version: v1alpha1 + - kind: ModelController + name: modelcontrollers.components.platform.opendatahub.io + version: v1alpha1 + - description: ModelMeshServing is the Schema for the modelmeshservings API + displayName: Model Mesh Serving + kind: ModelMeshServing + name: modelmeshservings.components.platform.opendatahub.io + version: v1alpha1 + - description: ModelRegistry is the Schema for the modelregistries API + displayName: Model Registry + kind: ModelRegistry + name: modelregistries.components.platform.opendatahub.io + version: v1alpha1 + - description: Monitoring is the Schema for the monitorings API + displayName: Monitoring + kind: Monitoring + name: monitorings.services.platform.opendatahub.io + version: v1alpha1 + - description: Ray is the Schema for the rays API + displayName: Ray + kind: Ray + name: rays.components.platform.opendatahub.io + version: v1alpha1 + - description: TrainingOperator is the Schema for the trainingoperators API + displayName: Training Operator + kind: TrainingOperator + name: trainingoperators.components.platform.opendatahub.io + version: v1alpha1 + - description: TrustyAI is the Schema for the trustyais API + displayName: Trusty AI + kind: TrustyAI + name: trustyais.components.platform.opendatahub.io + version: v1alpha1 + - description: Workbenches is the Schema for the workbenches API + displayName: Workbenches + kind: Workbenches + name: workbenches.components.platform.opendatahub.io + version: v1alpha1 description: |- Red Hat OpenShift AI is a complete platform for the entire lifecycle of your AI/ML projects. @@ -268,14 +340,6 @@ spec: spec: clusterPermissions: - rules: - - apiGroups: - - '*' - resources: - - customresourcedefinitions - verbs: - - get - - list - - watch - apiGroups: - '*' resources: @@ -296,12 +360,6 @@ spec: - patch - update - watch - - apiGroups: - - addons.managed.openshift.io - resources: - - addons - verbs: - - get - apiGroups: - admissionregistration.k8s.io resources: @@ -325,6 +383,7 @@ spec: - get - list - patch + - update - watch - apiGroups: - apiregistration.k8s.io @@ -447,32 +506,94 @@ spec: - create - patch - apiGroups: - - config.openshift.io + - components.platform.opendatahub.io resources: - - authentications - - clusterversions + - codeflares + - dashboards + - datasciencepipelines + - kserves + - kueues + - modelcontrollers + - modelmeshservings + - modelregistries + - rays + - trainingoperators + - trustyais + - workbenches verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - - config.openshift.io + - components.platform.opendatahub.io resources: - - ingresses + - codeflares/finalizers + - datasciencepipelines/finalizers + - kserves/finalizers + - kueues/finalizers + - modelcontrollers/finalizers + - modelmeshservings/finalizers + - modelregistries/finalizers + - rays/finalizers + - trainingoperators/finalizers + - trustyais/finalizers + - workbenches/finalizers + verbs: + - update + - apiGroups: + - components.platform.opendatahub.io + resources: + - codeflares/status + - dashboards/status + - datasciencepipelines/status + - kserves/status + - kueues/status + - modelcontrollers/status + - modelmeshservings/status + - modelregistries/status + - rays/status + - trainingoperators/status + - trustyais/status + - workbenches/status verbs: - get + - patch + - update - apiGroups: - - console.openshift.io + - components.platform.opendatahub.io resources: - - consolelinks + - dashboards/finalizers verbs: - create - - delete - get + - list - patch + - update + - use + - watch + - apiGroups: + - config.openshift.io + resources: + - authentications + - clusterversions + verbs: + - get + - list + - watch + - apiGroups: + - config.openshift.io + resources: + - ingresses + verbs: + - get - apiGroups: - console.openshift.io resources: + - consolelinks - odhquickstarts verbs: - create @@ -480,6 +601,7 @@ spec: - get - list - patch + - watch - apiGroups: - controller-runtime.sigs.k8s.io resources: @@ -595,6 +717,7 @@ spec: - get - list - patch + - watch - apiGroups: - datasciencecluster.opendatahub.io resources: @@ -602,6 +725,7 @@ spec: verbs: - create - delete + - deletecollection - get - list - patch @@ -650,6 +774,7 @@ spec: verbs: - create - delete + - deletecollection - get - list - patch @@ -753,7 +878,6 @@ spec: resources: - servicemeshcontrolplanes - servicemeshmemberrolls - - servicemeshmembers - servicemeshmembers/finalizers verbs: - create @@ -763,6 +887,19 @@ spec: - update - use - watch + - apiGroups: + - maistra.io + resources: + - servicemeshmembers + verbs: + - create + - delete + - get + - list + - patch + - update + - use + - watch - apiGroups: - modelregistry.opendatahub.io resources: @@ -801,7 +938,6 @@ spec: - prometheuses - prometheuses/finalizers - prometheuses/status - - prometheusrules - thanosrulers - thanosrulers/finalizers - thanosrulers/status @@ -823,6 +959,18 @@ spec: - patch - update - watch + - apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - watch - apiGroups: - monitoring.coreos.com resources: @@ -1032,6 +1180,35 @@ spec: - securitycontextconstraints verbs: - '*' + - apiGroups: + - services.platform.opendatahub.io + resources: + - auths + - monitorings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - services.platform.opendatahub.io + resources: + - auths/finalizers + - monitorings/finalizers + verbs: + - update + - apiGroups: + - services.platform.opendatahub.io + resources: + - auths/status + - monitorings/status + verbs: + - get + - patch + - update - apiGroups: - serving.knative.dev resources: @@ -1266,7 +1443,7 @@ spec: minKubeVersion: 1.25.0 provider: name: Red Hat - version: 2.16.0 + version: 2.21.0 webhookdefinitions: - admissionReviewVersions: - v1 diff --git a/bundle/manifests/services.platform.opendatahub.io_auths.yaml b/bundle/manifests/services.platform.opendatahub.io_auths.yaml new file mode 100644 index 00000000000..f58d81f1fea --- /dev/null +++ b/bundle/manifests/services.platform.opendatahub.io_auths.yaml @@ -0,0 +1,144 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: auths.services.platform.opendatahub.io +spec: + group: services.platform.opendatahub.io + names: + kind: Auth + listKind: AuthList + plural: auths + singular: auth + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Auth is the Schema for the auths API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AuthSpec defines the desired state of Auth + properties: + adminGroups: + items: + type: string + type: array + allowedGroups: + items: + type: string + type: array + required: + - adminGroups + - allowedGroups + type: object + status: + description: AuthStatus defines the observed state of Auth + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Auth name must be auth + rule: self.metadata.name == 'auth' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/bundle/manifests/services.platform.opendatahub.io_monitorings.yaml b/bundle/manifests/services.platform.opendatahub.io_monitorings.yaml new file mode 100644 index 00000000000..9aac206e770 --- /dev/null +++ b/bundle/manifests/services.platform.opendatahub.io_monitorings.yaml @@ -0,0 +1,147 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + creationTimestamp: null + name: monitorings.services.platform.opendatahub.io +spec: + group: services.platform.opendatahub.io + names: + kind: Monitoring + listKind: MonitoringList + plural: monitorings + singular: monitoring + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + - description: URL + jsonPath: .status.url + name: URL + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Monitoring is the Schema for the monitorings API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitoringSpec defines the desired state of Monitoring + properties: + namespace: + default: opendatahub + description: |- + monitoring spec exposed to DSCI api + Namespace for monitoring if it is enabled + maxLength: 63 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$ + type: string + type: object + status: + description: MonitoringStatus defines the observed state of Monitoring + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + url: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Monitoring name must be default-monitoring + rule: self.metadata.name == 'default-monitoring' + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: null + storedVersions: null diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000000..bfdc9877d9a --- /dev/null +++ b/codecov.yml @@ -0,0 +1,8 @@ +coverage: + status: + project: + default: + informational: true + patch: + default: + informational: true diff --git a/components/Component Reconcile Workflow.png b/components/Component Reconcile Workflow.png deleted file mode 100644 index e3b37957fc05a4d2d8484fe7c1165114c3df463b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 83755 zcmeFY2|QKn{y#1xLTwR=WY%EbhGfVb36*4~jcuDZHnvTXF=fb5hD;4888Rf(MiiNq zA~PA2DO;vY|7RIG?m6cUzx%!4d(Zj)@9T81p0(Dqp5ZgTKkw((1&y=Rdk^m=A|j$z zRZ+Y|M6_#~h=>$NP6k?rLPJEruU(i+XXS~qnwZCkh)xeXE9pAhxmj4*B8j-*3Y&j% z2@6`HG0t3YMJ{1sgo6XWxfKHCgs{W#+asMp6S!}OHn*~{LYi+rBP=K^!Y2s*yDTKe z1(y?+06*YD{1U>#`kT)qP)Pf&4%N}_R<^bXE@5R+enHUHF?EEQmAx|>W61@V2iL0h z&PZGE8#IHTnp)t8F8E(a&_qboMEp4TQ_jJ`7O8_YRkH%4Q5J=Z^TWkJ^D#A*%j)O3 zgcZQwwpKPs@artn%mxj;qG0KSwgWB7Lc+rQg3y1UQ43*#aI)Ib1vE3HGXk|aD-*YK z3JQE$N~S_r#1-9??9L+`ls9MNigdzQq3yROFTxMH+`Qq8b3kskqR?oRE%+eRCB_T^ z{@hqiq0JUkw39i~Y3qHc39iH?CdBfzLP-n;;okZq1rTGzc)`H?i;EOM!ose#*2S@JAc{Kvq)e{ zoSkrBW1Wz;2xlwToxKPPBDMq?_47ICWB?IAgW%&fhY1rpMZ zI$UH+#9&5mKnO*9P2-$f?KNfcU5r9=|F1$z6rkfzpjG+h!p-JixVK=F(`r2AIDH_L6^u(L8Zhg^re6B1+PjxgPDBannS zK{!z1DO7bghdG2fy3=%1<&axa?M8?Ia&UX_IG3=PEl3RIO~D^1=#Oo4q2dBH zZQKLy^XKi~2=-qlw|@XD@-zF{wj$dZ(+(^0^O>DyWZU{|OUHjcTNU1lwRRZne`kvH zGYis#k|2KZ|GgY(%dq@i10=K+OZ^)y5Tqi1ADr5VFgE~*za8KHqd3LZg6vRmReLjA z7jsC%t?V(*2!IzmLx3w#c(j>|9dPyA*A{3e(5i?w138P}WhBDx&!@6*XwjjR@$adu z#MX0v9F-N?M&|x30O8gO{u?#wZ)?bA&bIjz(w6NEa0_<$brdhM?c=va;7{w*xAUnT zIxlB#4wT@x2<7j4ZbdBxEe9kh5&apx_OEeG__jCxdnRD}x$XV`193&IGvqCF^z01?7CJE3im+l}T(3xvx?W?+uMSZ)-yAUWHHK5dZ}P$IE8h~GiX z|Am00@UOP@r#IMPHGX=Gow2^?Hni{uG8#uzs80`AzfhjOcfo{)K}Cz$sX(pA}$bz>&ECND;>X zW9Da6^^c4dN?v|hG~vGkiy$x{ZUd{TXkq0BnbUuZ?Su<&6}q-uqS&@a++jO~wh^

AINrYO=ZWDLl!{mf0vf=&xBn60o%ESnf!flWy_@e-?yDxE3zYE+RU^rI3Yn5 z)g0I`v^_vieq$AnA^9OAs$dC7bl|tc2BZq40OP!AT|r5AgOCH>1y$*v2IF^3){Z8# zt=|4NaJvQiAkBALq8+{bCgl4=nw;&ZYs=(_ZijC>P0lu4`xho>yYUZXa<+hz9YS;g zP(*<@{QCm5DQz330EiJ_dqJ`UQK&4DpcV&tX?qJRlpVt1Prcc{@-L^h_i*QC|5^OY z7OJ;nvlX0>2xkb6-B$FUC6Y^!5itY&;q5yhzWI3*yrBFuK>Jmc061b$Xtjg*x1Cb` zE7&9utq?FyAaW6ut^H+6x0UDpPU`n>N#nM;LtA#?_tH3_?eJu$UD&oK{}Hp`sDqZf zEP6#z*j-pv2yU*RX^C>&ro;Su1Ch-=`jy`XHszlni~^$i2J*f$TKL_G0@?kic#Qwc zM4@e%c-ymVNtDn}@!1xYKP^tYy+b=}(WXCf-sG%q`V*uuWPT8K4nWoIO)=1~&5hk5 znSZ#_EqvP@Z137nly0ZbfJ`oAId>X@|0oLl*Y!hK@He}(GXsY_|F8FGGiv;+DeaDA z+X-cQ3=SLZ|Jd^y=#5B63$YPfZ;Iwmi~t&s3ewgU36Ui? zzbEGeK?xfp#ejGCFi0mWi{Bs0#`zb}!zMy0EU09Mw*Dt(@mJo1RA+NRv0H>{XhFq* z)7rQLxoy5JPYvAw)mf)a0Q9dxAkgM)Jh**`VEe(1UE6fhP)|Za4#1v)uW#Q6e{GO7 z0GZzepgktPfp9>mvDx`=pp@T3E&eNI;ONvJPs(-(fXJ2r{7b}-ABfL3q36Hi0MzQg zMhCJ}zcxC;f5qs4j)&3)V0*FcQ^FYDRGYoJMVQ}zc zBbeBZIDT>lcSanLcl}R992;jF{>s6+t#c1Mjxg?AhmE`2hbA{~Y`p*-Y~4J6)KGpi zpNNQ+NL5kpvYYWl!tO$CrG@oTZBi0m#q(5jB_t&!G$lDgvL?J#iWhmQCX^>MCiYEC zP07EJy-a#Gq&x1hs`RgC0R8x_%;ki zEH6t=zKgA&ns}WC=kfR?#R*cs8&LBrc2+{0B8-@k^;^>V3XiXGEAl>hO=kU1N zkdaJ+%^L|>B0}3hB8d_wr_c8|Shn>`ie2Fa+`h0HqFsDbj5iWZqg&9ouSUN(8IDyw z_#A->xfXSq7$|>?*IexSGhy7I~i~``^deiV4!i= zm*cL-S5wJdRZ9-wqA$-Jc6~{1>DWxap6n6r8L6y08H5+z<0nf7H4=TNE4k~bMOu<< zbPKQc%#B*ofb>NnD#)v)RIOZN{4+q;)L<*CVH@v9Ro?u(=F&0 z^7QNB1tT~9?=0_PapC&+51JMqpI)?>DjF4=>?&xcf5W^mmF|h5N>fail^x1F&IY#q z!TYLG%xF`TR3C2i2z>6NRu>I^eQntyT!~(&eCn~(`7cUJuW)4U(=t9 z<##>NJPJ3jfx%lZYgKjXM1}M?zCB`5%M~qRRN`u<60AJebz#)Kkd8bMulCS_S{O|( zQe3FeduHXEFQbTMn8bX$+2prgk7?<=FOPz%HG0piEk2I*T3h17?Prl{KQG?nJW|DA zBEn?%B3xo9OrhZxPhyO?+jtZzEhQ?~&Aw#vrAtADZ^+kzhfQ-1$6+vBw0z@X^In*x zv*S&?c;7V)rs|+h%$+l9?fSRx4N+IHxV9rkeD});GT9eClvtj~YkYHywvpbz7GYmB zOb>6CnvL;X{Sn*ex!h>zK9(}_ynIdACNn>(Xzj;%=ofKeK>}s%yf5C#k0g1m2|bB-x?MCBq1NC> zLEF$IIc=*Uvt+IoW|NT<(19*rZO}s{Agk{jMI$>rrb{y?TC4kV%+7W-foYq`EKk5^ z-zn_Z>j>C?)V@i2!SLj`A*K--K+R;)W`Hg&exm;PM`nqcwC-V2#om;Q`DFKbNDmuz zv!PYUE0E zu-kF>u(yw=3SxV4Pl_gT>nFHLh@b4Q@wg*BpRGvUfgNvaj0g8dM&i&f;~1-(-pjQw zg3Wgu=R5V;k9LCl^DC15FVn!g@FW3h0a~If+R?}^{{0k zBth7r(iK9zfpFRCd{<`Gc=w2oF)l@7wgKK4ZQ^OgcR2&?xei4sB3nB>9kJf}@^rat&ybrNsEmbl%#fh4Cu@LR0 zmoj^Bq-HtiEeqV{dFjg@zjp%0MCN;$)|D8H%vrov981C>t2LQ_BTLbMATDOG8>#l% zavMrxF>8^UGC6IhnRXd=(ea|1y4VbA;_}?E%F{K=K&7aUoEaUC$0>ZH3&)ya^fXbv zH) zO+vhSnB_%c!tHDa$0DqqqzL-_5Q9|voZ?QEbJcM zF|H5WcijT4sL0tPE_aK@QsXl4H6dzoGDYVDTA}TXyS~sPeixtf{dM)7W$9pi5nj2( z(8nfOHN!#3&PyjxlCjmDohTTmW6E`p=iRQ;iP%=~wJH>G;wAEFm65K#1Q=|TR#0Df8Ki;eymQR%80$Igg#>LeU+v0Qa%pgc=dU%QR(hQA?jpI>i_ZM8sO@2m#(eCf7j_ezAjAIfPrN5S) zP4jB9N&K^+MtaIs?Y!9j;AJ4s1(O*1>AguoGgb`t?)+c8P3~T5UYt49Kfk}X@J@?P zwub(XOl|&Ffqlg6(*s{Famj4N#&{?|n zSqj~SQ@?jp;Hl|4TDJwAGZ%OB{Vn|LgWa_kPJ`%#!C?5;jdjI^wCC>CqwjX%g9#`J6P7iO|-Y~2JNj7ax2}zH>Zw@+sB7I@$7bdE9!5}b>;N?E^_(T zBi<;VvD3Hw0<@cXEd1Q1LHuEN`y>9Y)R?$Hj7q2*4Gi}-=!rfpaX75z!@xcqtxIrs zA3yw%In{C6yZ+fJVd6#kwcnIFHB@HTmZuA5T2$%egTJ##yX!8TPTs2*nIGhuQ^sZy z7~^Fbbj&ij&o8-FSD9S9a~LNao^vb4DmB*juE-^4`{J?G#ugX(6-6|MhJ?^Z;8@yI zgU!R~!W>e-P(D9=wiFC}1d0@2%qLEa(+3W7AW)$fCV-H;bzS2TtmVdOLO?c~p*c_T zSXRlD_2bO&URuIPcYIh%*qCdV9ZH-y6t4;|u0Cv|>oZcs_p-fG{|mna`SJ4-W<2~q zq-Gn;%%09Riob9A;v1sy&aR2<#^R*kg{pz05>Cl_t{q4WhK67B2j4>6mG1XJ_@H!y z5Z<@sLsr070SgGXH+)xEK!LqoHD0q^Nh$CmP4Sj=)3o`l)6kLg%c?)#Y3mrZ!ZbvK z@oa-v`upgJ9Ah>o?0}2 zTWqx=ppY}jGZn*XcS)vOsa%Wtw`;U83AklNWVvRHU`3+K9yLKDp3yGyavt8`XJC=_a2gn?2w!v6o61pPP1{9jGsvkTMdn7maZxet2WIBb*(l0CS; zNdl#}6DzCQ3ugDWV;>Y`hZ0!YzN(Vnr3X4~rTyHS#}9}%+tt<#0EDcA+nF7#qytK_ zLq`M>t<}lNy-$E%MEU$=@!1;^>}_c3_2v0gyRMfIB$Emd7mG?i3XASSM~j9S zp;V9<`kV*ie;W9qPZaSiw8Zickfhn!H>ns+*VdNNY9C?labk82h?_?&G$e-UDtJi! zWJ!RIFcE3q*$b2*l*qg%O^avtlZlt8w)Lm11V#`sXV|MVv%O;ldP27A;~>xzKaLx7 zjY|s<_;FcF9obxjbg`)>_pSzC6%T!=53osAQttRU77j z>N`E&_8mq%0~9iJzgj*g7{Ni(>@ttJuMobFnr9E(42%|hEat{#IQZD*v;6?#$uDTi z!2>`?s{(p~3Jlqd=%PBnMPVA<09uHZSX&%yq~M9asvLZXjrfx}4Y3d__`ohjQ8rS) zS3~4i9TmDmvrn#@!~j!p;SA{hN6v>xXfCWU<&W$R>Wpl#{g5SaxO@_{t{f%Kq;a+> zr&M8MCDlJGJJfSp`5EX*R+0440nA5tni6eMHV~f!^h2wNjF&V*?!guLtb9-5OX=Ed z{XD=~o|)rL^OwznJ&g_=zs|4Y8!ZT-jf%aDH?sKsWy4{XcW*FVFH_OA=9OnX0Y3ax zB`p!GP$V0SVWzq@^fD)DosTRnZhiF3`tkZ0HOVGmAn&jLxKBTG-Jb3xBqI`YLW|7BELa{gYS*c*c6QvBRIjuugmaHiX z5BLddASG&i@j=7jU`8Cetiu)nDn+DDKgOPjLs7h*by|vWx8A7*t-b`*_)3(^9|*UIV(-wX5twqEzCSB#?O7Jwn+;g(>>ZX7 z04-*lOIOip#XL!LuR9=Eo^*{+JL0{E1+;puxW!E`yglkA0wAd(yXmKoLvuM#17idM zvU-&Pw#(ng%pe2`J{SzY&k=g6QtHQ-$F3wqGqVL(Hv&ZT^V0d;$zW!QmI#7vUZcgj z1PG-89%5^zac0#S?mD4+@@O2XVV5lny}l(|9zaGrWR{+IEKF%HUk6~H!rOXoWZ(6L z{Gwo<^D$T7?&dG(aW!b1^kx`51(u=#K#AxmBiD`^jQ-FO#uM69tYHiT4MEKJndMmp zplL&>6TC9ssqT2D(s&IM!LF~YlxuAAWmnUIAuvbFL;#n-uPr**#40=ugI#e{8NjyNb7bg)jnk3O?H;`FwSSg1rpDMVX>s>&_m%ZeHv%ZlQdK#g1>~ld(tR zJP8iK%Y$u|YXFa>P7atW1&_u}mn}C%@Y->}cN0nh);f{Ok**sWi05PJKuHL-gHhjY0taJH8mjwaA~ky-lTlz}Mk!^A1{N>1bR7)7n` z9$5y1vbkd9qOJC#$)DOvVeKZ1`%{T84}3F}4s%YguPu~A;Ieft9xR?wUJZLtjDdYo zl|AW+dI-at@AFzuJ!9fnb+GMGK+N&=mDxL`pQ&+XJ!X13Q3bv3^~KXg!J{=%A!PO* zD$LL2^F0}KuX?|m8L%%W)Sb-q*F?|~u}Dr8nteH?Z>>3$F#0L&RPOM-W3wl897Udo zFZ#%u4)vt#2+RVIRb9C^UZEKvv5*5A0anoHge))M4?M5Xze>9>0H!mPfy3BuXsm^iZST3_#c|n;VXQl;dxj zmf#p&PgqX$GUGDAG>Sf4EdzSYZHrIOe0jf=QKN*;@QdxGc1ypI$0_rQy=8d)wYpfW zPeaMIHEZLDWj|SkA?k;G00nQXmVC>M(eb1+&(|2SOZ^qTjfoypRy_nmTLm#O z3fL%s^{go3US=1TFO4vH?DOn#`NWvv(A1ejn1RK+wCO!u9kAceB*5@Ft8`u;^Fdu) zPqPxj@=9UR#_m=|lQ|ATEasWR29621~;!U+Vt{r#%=IR4DZ1FHj z@i#O3E{FkN3)OK9#>*4u0fuehk(XGHn^Iw|<2;BvE0@;oa*ac=Z-{SUqUFn=GH?&} zm&y6FuCFbP26XjTpevU$_U8H?Gf=x*i843jeVs4!;XK^`wqHuq6FuGLdv;E-)Fyw_7>%I1K}9`JtOz_=ft!5%)8zj*yFi@@x4c}E8**L{5Y`E$*bp( zZvtuN$M+p%yPyd5b!PQ@#&Tt#PGU;i z&bQ>ItvbUT-dqQjs;(t{iKA{wXLD+@ zF;_j3WvJ^dqF!_=^{R5jPtW_JD)2#^GijXY2*QPF%EwJO zT}URgFEBAhO%K=8K1~&?xh;b z_A~)iN{h+xIyi3}V{)Q74UXrQeOXr){!JD4%fmR@BjKcH3gcf3f1z0@Z^ItkPdC)a zeWd=-e#U8;#8Z{*v&Q|yk620AZnL4qnp%5TqdFgZ4%5W)=L~yzo-6(VQwhPdS!JM9 zJ{^p34YQ*hsI(g~zRI5YS$?pp7ZerZ_ zLybkoG0YPy9hrF*55jH*8}@UmKh(_d9Lm6dr*9J&I%0cA={Rp(2Z%R~lgKz^xCo)- zmn)uWb7Tn;xFe6AJ?9o!DZD^)JsXUhL277HtV*`w8fP<+-n=DkP`poTdCR$zdW7o7 z!!gfR7)lsAlKMl9&>X_76TaQdmQA zye_u&zD<_Z>wA~n5~HzDHF;<FN4dWx^=g%omG*X5`;$+V6t2wRm(wzM@?W;3| z^{PhhEvh_v1^APEa|Wut@vS$JaGS1|c8@+>97;HbFS3;i&VCjyK20%Y6Z+U~p;EZ_ zv;VU>qZH|yxJD5PN}r*|_lKfH)>FS}lP~D&9zEsze159nqwyi!hnM71(H}0$x88!Z zq=Xtv&xp^qkO=AomDSAMjs6D>9T$4AN!RsiC)Hc_+21r%#VA*!b<5)=HQ|jSVoInm zd=YuI5} zvbzg2c0L^mPv=t5l`f{oee%6u-uy!GawxuoQ!D#rQMZ!ut`S~_O0W;%cF)_e_hu&r z2^L2E5r^Lzb_;b_JxQ48B*Nu&WJ2>`N%QV0VQCQlB66H~0jr@#7$Hwq?sZn{RL{^+ ziNOdM%eNRfSxl%fMvJ4Gb>HDXg2*jMI%yCeT*>EDlc-s?AwJqyEXO*jW%Q6HcdXb<=MFrr49?3$2=o7QTa0~)%olrK+}G&aDSyZubnRuAw< zx3lc`N5a+IO2aP=bV{!l@&PD>7ta=8>SQhknNBkLfp>-l&L z6L-1vn(BM~kuvvQ?Mt&)IMAWBr}!(i0AgRDhkM!Wq^356*Fj8ss!h&>lJ(75Ck4C= z(6qBme+jE0gA6tsL0&!}UbQ68jGk16t%5LoMLbK?KCYxPD3~(NAXm4E56SFH#i~P3 z$^U9*KgycUaEhNqPgLSr+6@`Ug&9M&=sNwc1-U)e9SspzyhI5BFN!+$%b6}=as+~W zZu;hF+AqU-3Kd=zojA)5F*xKo(aXV+G0%|rpjjhop9T+24evT^Mill;yYV-p=cx^+ z90{(uDh|(SR1)y%4~OYV4d0!pWrcB~&vwGNy+)6gq2C&WD^;|N)R2tdL3AvRF%VA_c5qm%@8nMZTR_j{mqAslduG)EMFz2$kJc z?320Bj6Nf$h}62m#iH*X>)zGh4X>|ENkK*%wRu+GWQvyD58Hi3B8kLl>CPb{L)bM? zIiBou$j31FoMy(|ElO5WRPxaIP?_0)(K6E{^EM2_3M*)(%aO98%VOW^4-+SyWY+v< zWgJzto04sjtgpgrd9q;Mz&*p3pU3H9Sf#pG_ZV}+)ijKksV}vd;CNO^mNfTqj2JGPCwL5{l?3kO00(onBq~xZfsT z?RFMyD$1Z%_p5G?t@7gc?1{7dxvb+JX?_OR3&+wXXpTwaX4Cc-v;`UKOU_n58=g2- ze@V5saB;~v>C^&L$}l+!TMcSnSma*|(xJ9{Y;ggKYFt3&1!X~Uc~XC%oomG%n@(g8^69RdcF7L{plY@$cxZ*zN+P=r|R!~V^!iy7%QpR#hC z-!d@!l2PYQvj{QPxV#cNuY1c!i`OTOhAj+6?9YyFvS%~0XVD>3NxO%yJx@=SePFj& z#vyz8NFhEn*YiHy*$RpSqgrCTiaM-zGF*cer$+})tSda=G9B>Tb#}k4ZY?d5H=xrz zANi=InV&@Ev;vb#RoSZ(s#tSUn>AD@(zTOIOd0o5*M+e zeS{q@QUu+mRKW-G3X?|kU&#r&t2lw=MRu1M8nWB*1fAkk8xmXdSe^S+K)+k@FlV3j z+^u6u9)8AEqT>T%AJAMl9W`}b#)Bab;JPcMl2*reo6?`t@yqAE(45sQ=)>M1Fs*{@ zwLkk6zrVYeXOw7q*R6BM@JW6LQ~gr%hmD3yIba8-$(E>|m15mgPo=UQqHPY#@N@t8 z_MWDSxs1_}WwP+3z{LZ#>aRxPDMN<#ver`AkO)7|tvlg5+3(?aAQ48*udCSI(WpAd zEI=F^DK3!Calsx&YrvRWEnQcB3v+(TgjgAt?aTD=TBl#v-KSUMTXQ^aUc2=+xGNrW zN#$d>xa2qMxg5bSabC1Ww-@WeL?~f$I^DB~GzufU%aaH|UFekw;zb;Xo$Q&Nc?QRe zhGX?rMxP{ED^_twFV*s3@A)(LDROYGm3as})zmdPaP#W>15KxRGx_|2L>oO+3bolE zYI{tp)T#~1MeK9r^`T6l79edxD%Qj`E8~)Uti1Fn9r9>}?=2*vEJ+6T(RX?mvbk5{ zi`fU7n!E3EhH!*Q;PMVZBpBD|=AwfIAw{ae%S@E2tY*NT(#>P8YLUlq4CRVWkVXIN(NnatWlf)ao`tqZH|f?$9T(29#p)JaPp~ zw@+qSQY67`_KtZKEx%Ge6*C`As{Do@#j&d>&$WLI%Vva&!iH+@G4WhNGMVbUnHJbERoS0_?_H6H>?;Yf z#3$Ll(Bm-tygFR7cmIt*$)mHHmpFZ8M}{UNIJ335-;s_Q$Lo~@>P^%YsWQVTq)?{{l+^^n&rG;U#FcV{`e`m zxkfK1XGKr-s9a$M`JGy&gNCkju1m@NZ$c)!Kk}aD6sWjP_K?;)s5wxDOTW>M^$t8EhjuWHA9$xWR#}s5bfnk1+h}Udhg6lio9-mEVfxUVVSfeK| zCU7q^CZ+qO$PUp)@!swp(R+|sE_38zX7Sf&VoF&j6>|qYDUwGAj(!s-k9>T*`<<=R zafFp!(YI;|asF>ny;?QVAtVg-h1aT=n>`i*4X$&vP{71ABzY`U`0rUdC){vd~YXp44to;`e@&n>uFgiiEosfkRlP1 zTf*?W< zoAm=>eQB3{iKvp|V2Pc~IQv~Te}^Xs@w0e&<(FGLG>#&bVud;oioQZ_EAWWPASA|Gp`kAzR)Zg_|3gE9& zB%UVC?q_mh&(2ruHT;fXexdM1c_8J(8KK5o9TPQi4$mQ?BjXuQJoQ4LXwqg*>exB& z54#?ocM7h+QOiV}7A9{06|i$+Kw{n94N^(k0DP8th&=miJQHiM!9%^ z%FtW<#KO!TxnO(bBS+8UmtK3=V z=03%Cn9Z8kC6JjgxU7}oW(t)`2Gq-EsQevLU_y>9Id|ENYiR4^T|wx%UoBUzK!P!l z6~ms9Su}FLEkPsLxhXHFEG>vRzBP}=!G6~Ic7{KGhAM1dxEC#1%c|EUTE(FRy4)&T z@#mzmIz_i7C`F!0afI!~l`u5vJ>Cbyt_4@Au@l!YQG#L(xeg~R-9t{!v@5LF z4GB%TDf^-XYY&op9p-JVVG=(TN7DUivNJyqHN(k1){*ywy7{XUt=W*iU>naHvy|bWWt7)Sm(9FBulb*P8jefXLfS}|4}S0`~&) zMVW1XD*S)iNzSCwh)6u-LaX(JsWBkQEwUQH+hlM&E2QhTAP3BGg zx4ex(m`$iv&l5gB902?xu`ofwBNm`J)>~MXt91ZSor1lI8F)czv`x*03~8 zd`JeMc;j^e{e}J2!+B6G!a}SVAo!DmQb8Wq0rfs7DyyWu8eQ12T`Xlr0e&^4oEc@NsZ1nWgdNzo6v>t#P*g<R1gC_PR!LzSmw- zPITxdgwW?6&p~{aH`bLSli}is(`!*0N)V~6LZ7i?S1&0au(`m&djMEu>>)QfLK)zw z2KhPi1Gu!w|1h3`oljmp{pj?;N4xIQdv}M4Cj~gx2G81Npzm$c08z09YWH;`?aLdA zKePw=%G%eIoC+JkaI#Nd`&y-w7Qx=0+&Om;Rm(?O3A0r0^+{_dKYw2&t)1lKmrF{l z_K!kJN-tMjK*_TrN$=N44tv6PJ|Kd2E^#fkn{v%*=KCz-YOtT%omjFb2xg@kpfyic+mh4&%Ioe@2{4@&KT8}LH8A#;_V*Z&60@(3Wv zj2}u10HtJ6c9`-z_Nl|(mtELkJ~wI%v=jg}KoRzo)0zg$E|~s40&ruYBR95eVPwRi z*$~w|E)xv^X$w%MP=9PQ$40t01dR9H*VcYN*?Dr={^VKdvE)dQfut)Opd$sJlA}vQ zkb6sUJO~WHwS}6`(s;LoE&Q6`fVoCH?(yZ_!&Fe1uz2@CF4d&mQ|9XhVM_0Az{Y7R zz0LUmMSeE`4^ngS58x2OhqCJ)A70uSz^y+X`v8ida6~0397wu?%*?(cr`=LzZk;4u z0kq@En~9t$XP`GPQqa$bu_-iwx>rx~LovI~nAiIx8=$h_nx%Tg-d)XLxja$@Z^d~) zHE-R%gwrmF!b?AtE#xpG~k8`-Ris-34&Q z!8-Kx*JFA;R!w4a+WHV#K)aMI5k{QCiO!fFdy>=}d_?jJs)s_ISM(&Q4-gp9qY!0D zBwSn|`dki8e~Ifswy@oR9Kd@)CrL^90VB@FQ6TGCz8k$xflToE+2%cbs>(@VZp+D; zWag~{9Yk*KFA$_6>xTm_P?LLGfF9L^*(canY@&|@-wc0_>qzSqn^ zS{t+koEX_4lqy}q2ZZ(7@MWO`^#)*qz0b15D)Qah0-~+o2HhAj!Mi;Z1r-$jTIP2H zdIyw7on!8MNoWVZApjy*LBX?hbqBu0{Ld+!GwctR7&f?D(2)21cgX+mtK5GOw+`y% zsp@fJE&4~uEoU-FM5GTefgQ_{OAGM3p#)JlpfeqyFlYmKAJ_BV$y2sc4MQbOZ~_Pg z>WTMjnxWGw7Pq*f1S7fCZ?7}}0IYE|$;&GaIZ39GnNCk&F-i;>V zMi@T;vMQvlv`E7WS1V_T&~fzo_i#OMXvaD#CkpImn@`l&>JT6i-GR?ab2$L@Lm?0_ zl}&nvt`ll`;=mb%PZ4GxFYU*90f3Jz*AN_pdhGh)^7NA=PvIkyt|EQcWSfC&?Nho{0Oxl-<75UmkEr zlzeYdYwWanZa0OW)ldr?tPX(#no4NVLa&GV6ODw1%z|EPh)e|^zN^()o&;zj>QUlt z-$B%Dm(&4p%m#owi^r<10vJIM7^u>*PtTL|%tAyP06vduIz7${2Rsaj{*?!MG`MWJ zPxIXYT{dqGt_YzEB-7 zy$yxW>H&auGj!@{1k6_)6wRZHKuf0Kccpc(lVz*RT+xQ6;xx9(Ebd>}@<7o$_VeqT z2EmW$`4TUNUn=Eae-cR|nm*q=YHwfo znQRttFyf4`J+0Tk#zbXg;|1{FtyE= zc$pUm7Gu>SQe8}FG|?kV&pce|{Bz^d25|Vy4AA4=osR}JK0Fs5I@DJDqy&&L04GNZ z1B8YtaJBi&x(J|i2Xp|+PXqWk(r5xq@;R4{c?YO%H-ht1_O*=BtNJ#X$YqH7lS$=9 zk{zEOF8-FH^(!d7y;h%;9|~T8PAUx|Nt|ogZtCi+0wzudmF>P)-`~(`itxlc_~<#m zS89Z4BrO!1^m@404ri3-brz|j-jJcUWY}T zXgC$;-X&R;%(EuGxQ_G^ucn2=pBgD;v{Jj7JrmHXeeoi7_4f)d7LMO8FqGWOb zb>{ng_bQO9T!IWB+_arO%g@{CI@u`-I3n$~hjjf|-No3$*3|?`FW*e$la~THryZQRyTc)67&_FXKk$Z7%|~5?IId;v z_IdaC?I%VV`s4z-fG~7boT}oI6>g6}Ku!!EptBY;RSe=Dlljpu3LnU>NM*Sjj#t3Z z=2e8MAK(DfQf9#eF2cU_qsqO2h?5Zu(2h)lB>67GyB^R1K*`m5f$|rmnuy25EN;7h zM=nj-(B%6Ua9lXES|{2PbYI!LcuguG$djd?w!}$Zx;~lzsR3XUHnnlHn6;!?hrL(0 zQC(q>TG_UNV~pHEEYVV^uEI|S?C8>9G72lG0IId|dyenK>vChtcoaJOf z$m>gLdl?X4Zxgpqo~xi^B3U>#);0{Y-r1=fp%5%~_lKCl62PQ_-y89jrO3y^$OLql!Ab5*P2Gr3IYi@$ENWyttGh};%Po>$FHw$(8?1YRu71&J zo!+r~3hZ~z6>%2QaO`zjLrtVF(QGW%|M82Lc3t|-h5*c?o=s9qg{l$LS?`zd z;&a;@U03J~m`dXvOyA@E2XnE5#Xr8LHv&k?IA4Z^^wShn|L1d-+7Av2=QGhY)rI>u z)<`dOn@v*#M7XIl;U0_9oRfjFoT|%!;tlFAAUK@R;c2iUPheHMdBinF`u-(At9{@} z`jvfJzr=T?@M`rwy#8kld~XyQ`I&4H6c_AjrF2;x17oKPiI5Fn7(bvn%EO9 z<@F@{qW8xR^8Tigquo=8l$0M8uD;EKj#tBd8{WEHq1xsG3=hT>_z8!inw<7j8n+zUAD7iKa4_Hlz922QzPP z$z}{?W|+*SlTrGsSvw|T_N36@K0RlW?l?r;V8@?-;EAkw1JmnEnv}b4%GwSYX%2)* zJ^c)LQQ6`RMXYU?DB5f|4fOJ53Trjuv1OAZm}1V`HIfvU$I*G=lu>gF&v-s4K8T_U zw5m%fyX`GIRvUNqq@s9wFtJWr@6}S}+y=pSKhLUmo=DG#XwNu+;~}#%qE=7AiWg(ux(AzoCmMte&~iWgNOB zr`kyP-c{*=%)GR#@uYf)U|VkT<#UT?eV%7aP}IMqk2mJ0m#WZ%_?qLD&)Zm-4pcT0 z)Fm0)3{QV}s-+#IdO5izjpur@FXl?AO1U1JNuuXCH=4IKJHteA_iIAcv(&3XksnDe z`H@nbJl8@^9#($D<>ST7hysrQPIee$+9?oTn#7GS*YYI2xY*rJ8urSU;pS=GN|^A{ zo7cHcyL&$Cz9-eJDCWwh8VZ@CUfMHW;VLyC2mh%5CZ&&Lg-;Qm=;9*%T$bg#$#VRyti9?6&w>j zaR1&(l{(#TZhOLd!i8A;1PRsBq2mdMhO%Hk=<1V#P>7DU%9^eIWD`up`tKS{;v~@j1^#|We&so9Z;#wxP)4b!L_R5w`vR~?5C{2BI;ZX7tX9xLc zF--8G>@A}*;5PLLSy4t*gniW-H}Q_)B_Cr$@p?m!29cJ2@<+RGj0VT&4P?Jzkt4`_ zf2Yt0;wSWA&|ojm%O4<`p$G0JOM>{7{#%k)lb7W25g+0w5D1~Z*ELKwXELOxXhhES zmi2h@5r}n%a)uFzdVR7naT{v7^7Z1{IH?Z^z4B8FT)Xyg29_|S#|7!dpKJyPu*Hsw zjyCA#CwINseKg95_XG}8L8T+j?@S&y`<;@%@KuD^)G}C>M_eZvlWgST&3_D>XA+l+ zI*R%lAH8Q>0^pAC3SnnW3TJcTCC;vc%yokM(G>y+GKv(sz>iVI-rep;^9vbNVzmR< zSLqGPafgh0h8l4g-8KPxQ-x&1&PTGT9LmAj(H{zjcIy?Fo4B-UTfA3d$S2j~E77GR zR6l#$iGug@I{jd8tmx=$;XrL(qq>96{-8ukkD4Yj zZ#8Frn*)AjAb9NT?!Q_4w**O^8hZIOh4J;PMEFQyL{{Pr}%do1Vb!}J> zq?MA8PH6;W(MUH4D6lALP#OVgrKP(|r33^dr4f)=!~&(HL1Gb#z@owU#zgly`|NY} zxvuZ~@m}AL_a|#E=9+ViXN>X0ec#VKxsRic0LNpg%?88_vTtgO-R0&}@L+STCMqj5 zwmbt%ncRFf!%e3OJaD4{K8ovnf)t4o8OBQ{SkbywDNN2LizbX8a_{W4?i*!3ir29G zK~YZWm`+~Q*a{r#r+y@FKaqO*Ay!r_mkWk;Jk3_C6?L37@X*RiNrI z2Z9Ee_>JtKFbbm*t}RwLlXpzCj@D8N#^ozh85^NaY|-hpqmh85PTcm@1%fhi$`lmy zA>|JLj&DP3Tx>*eC{-m7*4^b7_U{!_d53+wJs4k-$i_!h!qY}eo(={l_Zr^8Y+~C1 z{^?M8%lBqj1_9r9drl=hKQ&R^;LZdc!u(>s=_VE88dR1;7AQqbeY3MZW*#uE2Vf3s zv5-c*m4Rh>4g<^AWPGVamwqpROsnIG!5QoFjl2+!QguAaY{fk{fju2mNiz`*4GkP6 z)ETsUNsoI-WVBa5T5p8?5TIuS0nxy(R(X#nQU2v>${9p7 zd#|M$WQn0NScWSFouXs$XuJf8uc#-kI~NDO*=NUktiHf11u=h}>TX-c8K?_H6=_(5 z{O@iFoCWf|k(-)l8ow-bn761ihg9@tZuqp^F5r0rC4eiwlfF*aBj{q+;N-t)Vc1_8 zPLQ`-l;Gu1s-DOhz5LhOjh!4r$(}5{hNLTo&l?fvfY<#lTf6I|M$ENOF2O6E&(v{a zu@OHTA2!^oegI$r3wM@(uPCx;J=4MWW&Vx98KAhJ_^FIx1;GoDz}+&>TR-T=SO&Rj zX^kch%uA--I(^qf{M;}51ZBNuAKQu-D-MVOTKch~-{FUp54>L?HMLrJ)N(#apyQI7 zQ{Z0>$r&O)UKs1l)b5FSzl9rFar)GcN{Bp>W{x*DNZZAYr7D-;{j&1?FJA{)A+J4s zJ)?yLj_E^JGpqXK&3IB)m&c?B+q0k9CoITh83uW3W2S-Jg=5ZB-5<FjnPzLr_ zBT6(*)>0;w3ADI70gDT!7;fW3^-;FTz&t6x8y>4_V2a1ZDt-Hz6O7T}JU*rU6Le{oDd}P9=cxt{ zs`bSQs8^eND2y`%ta_)gaf8j2Mm6}#%OqjxOh0*J=uGz5Cc(?S_7V3ow;)khHgpg&xK%8{PQ zMN-!B@w7I&y$T~x1;p3Zgr|ab)lUtK9u={pd*vtE{Ji}7Rv;u(p9~5jqL)!yb7L#U7#pqBl4+osoAb}s8^y~CU_#Qi*XJpQ4F(l-vHd5fEUvmt{Oe4 z>{u+T2jJjv8pcYOC1z%)0S;Gb7?Tl?<@d6xCML5>$M+v5v#Km9G8FQn7`{)1#=ano z@}B5OJr}MEic|TienoNeTHFXcL}B66qRWIMAygiSCgLA!bX)m`*^9ZPBF59h!GBW$ zS()i*33j6A0eI7U$p@ifQIvUvX5T(e8ycw7>iVGCKzL$p#+oLPjil-ny?NM7~{!O6Ds(!T>}H!V8r0qchCazU;0>{ve2@Q~|; zQFa|o>xS1T46R98@R+|cyoahT`nPNF(O$RkXznPxCx}c+)A-HU?Z;_6&Q^Z*LQm)c zDCzImFZ#$gehWWc$Qsp+`UI`H%^ph~i^LCMJ?~HTX(=hw_JBv=%>VH|cj_u~nnlpO z^xl-)is1T{9T2ZMyS%$MC21bEB>rK`8Hs*Us=lIUQ8($&D#u_GR~*}4+bY{dd|g?9 z+@zXs;nFczRYWxp=a5giEiSE!>dqdx^LKDPmi+xN$_$S4qQjGT)G+r>snC-FgUmu(jfz za(p)R%5BrY^O1Qu=YcBE3MQbFbBJs$eQpr7zXfu4qonl8_uyRz&%?f2FVWHs(i=cx z4UUT}2=a8X0IR0`9_6YfrVjNm!p>UP;dwU$`62j(qv%xOy(!izrX}hH?)1fWVf0mB z{HlcMLZPR)bGfoBlXf|+uW7F>iMi8U1@Mb+A9_c;gK69oy~YhIeN~y4h|lg>!1B8iJDw12i*u(du@bFCa>N3QN^|c@!y(1?-9E7$NIv< zDiRx7NQT93t*_?XkspP0pQMam2b4R0k1^j|e$Y#IwO`Joabzz`_9&HvwDS@-MT2BE zR>VHgwW(R)8GbSSFqAH=`m`hrAgb00X&e}?VWWdDQ=%f05LZIP;osr&jlaR>LmyPN z_Y7d2U#-ScUcHOybcHCCd1SuC-Q+L{3YB@t9MPbek`~Z!m>FoCTN~Ohc zpxBErLKl#pIVY>MjiAL0hr5bcmEL1i;Y-AJk>2+ze*ns$KZh3Qj+YZex=pABskpB% z7l<~%^f|0!vR}Mo+^`QDv^Np_Q1J~v8`I=Fpu1>p=uqWVP9m}g-H6%qU~@mQw7QHt z?D9?CU8aPI5{o~3eP?y492Bu@t1rE++H?N}z+NkRD@xLN4NKI|{$qm&gmsF=xw`Lv zPTcDcMy8~H|8Q3yGGQg4i1R)f&KP0K9h4}eO+6HK@4b%9+|v+3-ggnw9QmTzA^yUp z{gl0hXJ%9f4hUGZSA5)~bA3t~{XHqYqIHqfOGoopqpFk3xI?}{}mHK6$-{7P@ z=kA~hCL>?+-X_JOc8i=gWmn7S6$^vYN(l~1S<@4R*uY!T>9-HR3TY*^Jp~o7L#y?P z$km$CCy`uAQ|9e&*oQbshE81auaPc=-F+94r{re% zi_x#W9@XO7u6R?9HgPP1OpBR>vLH8*%fC;H0V{`Tz+~6&pdfCYDN|%jhe7xy|qR-J&g- zU(U0Lk4h$Pa$U<0Uf;p&*2c|MTl9x1MHuL&Hjj6}&#lR1e#zuTdPsbWXqw=?^FxV|@|O}F>6=2m(Qy|eE8RP%K*B7S zKY*1zHa0MX%ALdR-hvQ|y0&*{7D@y@~(o8%7oE`7p18q2=? z_4oZ0S03%tRQjv53E~^y5126G*ZeujmAyrI8Pv`N4yLv^E~c_cPw|IY*zG=j)djwJ z_IyU1_0QHs1ZjZHp-%HP7yh-el(VtfEj1Z|mBeO<=k~dOLs@cHZIFgiq*PWFZ_7Dt z|96g8@@fc-qjARVgOv|iOU}ucoO=0Q_(Z6w4VkW?$|v<0_i_&t6oBtUmnFf!f~kUI z8(1v>#M_aQUM$|$pUtH{1vSOhKU*{lfJ9;|s4g0Pm_Glc;5u@Suun=0QWGp^LlSxX zLd|Uvi@A-Smd8-7H*PYy!7e*uF+TOmj62A0+*j=qe7vfnzK~;Xv^4n@U8G~>zJKAgRzFk4!n;cENY)t2x zG5Qy|o;wTkM`7iTVGLY2%Zcx>l3Dj^29^yOD8kJo8w z1fOtfiY$0rWzXgixPBkXj(!CHLJq@+kCIDkuDoaRz~BB3YC%W(;@^Fb310w1Ur(;D zQ_DKjbkNwkX$H1|{i68p!oL9|KF{bBqq@m*AxVWk?xb7-mGla;r(Hc7R0cC4kipQc z4}_X`B+$0*ofU4*r9h=}2uXjlf`bAI&a!e{?ok@td2ks+UlkMlzgH3e zr>+_cp@*NVBr$VhnfUe<&cwIOGxG<6B+T4?N8eGQ8AClPeMob4JAEqY9U(5c4+al< zYT%ZLF-^h2>wImRM|lX0h?%DnrcP>q(XU_UDGw zC|d`sl&8k>D(^+2zyYg+b?9&T_l~65#Pq^EKYa^%?kZmQ@hfW-jDR9AgcbLPIY+0M zQ5Of?&0{4naE@GK)`HD=h({Yu62~R7ba7q5#VH8QB%FI%iHq zOg|SmKy^WhWPBea$1m=v?kMLyUUl#_u}1Q`-s*Pg@&MzLM9L24_zFVz+Ij)C_Z3Wq zi!{Jtn&$}il@`E?YP}=~OSNQKGFJFt=}_QZK;}bv_jMSxPGWz{Eww07;!Qzj?J{;O zX0Y$a;ii-o#RpRSI1sp7ro1W0X+7a=2%b7!Z=XKKn4ld;Es7)GgPMdxj|`Ipe6Eb8 zHZiI!L-6!0UX~d6!|o&^LuISx$dD?K;@2 z4TX0)8|#+EzdeOpEKH( z($-ZVDH;TK1=?r8r;-2zl_rg8v^&-(SxR3tSeT&KVgp9nR3GVg%bE9to`W+#V$=rQ zRTEc10n~lKK&dle@Sbd1<=u}Twb4bC&DTJvtv4tsss;-GYboS93jcTb|Gy1CP<3$H zKUs;tIng?E9kU3iNSTVKo(q{LHr#2p*VNst5e_|QAub^T(+mNnq9C&%`4-xe#7A}o zj!eG_KHt3meI&4c>Ac|0Bx@=kw6F<+W&j@RGr&K+jsVMmyi==@dsEONaKe8Re4ehg z_KOfs(Efja;Q!x4l+hv%>B#`;!65*T(p+p0h#gz|_)1>$_phHw029%8|MaKjcReIt z5E>kQeDRfSG^G%JQyA|DPCFWBxD0?| zJdu!5<--yLwcXrm%<%wG$my}e`41GMQ{)Xfm$Bklv2Rr$hJY6G^6?UD8!>_k*jz8Z z_A&EL{xAlj8HkHq$bj6i1R}pf8=nenJSp`yZy4Bvo$_9Y5#2;oAkU5w$)H(4xy$hT zn_4*!i?i&x{DuMA@tJRu!Y|<59No1mN`$X&zb^}!Y$$jBeOOAeiUWS-sRxrP7qD1L zE49X2mt0Ru`4=}7rNIa5^4F5`@xfMUsX|t`6L!rkR{z^~585Bje-tN%<6M$o%@y={)3O0{*W%(* z?ScN3HXqot*G3x*3cwjy#D~WX6qmYimtJ`;7$tgjnH#_uuoNo_)$U_1+*l%bvjHzH24C(`mT}%E^yF#q z@z_;d_*AW{3E&w6VS#g~fBDTQXaKQ{O((*^UJh%$Zwzi|$->y+6MjbIo(&0j!`Aoa z+((?;8Q)OdBk)OX+JYpvf#7h(OQ5+WgG)||VG18fs(<|hJtD~RnneSs2y z@ICo(A&U-_#03d>s$?AEx4gg@r~%xog!~##C#1(jWQ}{FpU$_O@C{0GUj-_a?!cq% zW!{7k7PIF-UE1D$B=Hzv++Aks9$B>mF|3IsMWmBmcms_ATaSb{JyQ@=k zKfw3&B@nJay4^I6RkGG3OgpLiM*XzjOF_uI&ABG`a*6jOlPh5%%Y!GR$W zry>drq<09I9qKYJ++UDR637SHOM^=OF-BN_38RETlHSwcWiJGoAaS(| zL3U8|&jJvyxnHrthr{ekz!`=?Lh6ukEfUf*uef`*0eB2e06~s~RGlF;whOe~xUPC8 zq#h0FdnUhHMw~-VF1+dJ7Ev)!GD}muQmDS`4TQozFse7f4wOf-Q$A`T#L2 z!zPI*-yS}b02N+d*#JN~MuB{QV7w$cBGWeO(GaBOYO^G=dbSjD?c0^`GkB{qty`!e zP>XkDSi;tcXSlQA2w;@&=wLFE5OOI42mm2+K$QiFIFA)ss1jv5emux)*Fl$G$@3!? z*4b9`4OoON_#Ky;4zF1!kq>tyYk|^62EcQUKsq$~e0e~Hrws`}cTQU3o2*~)GJtrw z#?C^U)46Pb2~eZIfcinS`AmSg;LS@nc^sK}%OV&eIv_#K-%%i6$|Lkw290?yyUhUS z>mgUz8wX%JHF7ePlQnV+RO(lVDW=o9qTF$I0B9jRB?{@$ zi$d5Cc!7s_6zcSXRVBEQoSK>0vuj&H& zrS|Ig6K<{_fP_|Yr`q#!*PoCaY_V~JGYY6z2|#)p6$Ea4AV^IM1o|jCA;&?BjLNQo zxYPbvgAg1nl6tx8aPgHqj>F;Dm2C1}DvxY405jEGZCj4w&f+s|hG#OVnrx=nJKINi z$BChC0odciEqXcZdnzxG-9T5&^4ZF7H>7;8w+7YCl{TOuRuMO~TUBZ?1|WwbaR0q3 zfUK1WM`n)U&#|h@T35gg{2wA)Nj=17qjGiErk|5dj zh*tN4S|tDhKc~c2CWZ(iO71iYf<_jS@;fS&!QoXR)7E5up9q2jc^FA>2b+*gvL4PN1)L)+fNV3Hr^Z#zh;pFEy%;BUe30W0j=Vzp8( z_p@3g%t3trYSJ+{YmQcNC|4Az~(RjDjm+X$N&i0+GW??_--nt>ec1T z5Oi)J2wMGHVz|=Ic!J}4Qb@{4j2Pu`aTlphgM}Vu$(eTCH1CZbvERAz` z4g5pn0Czvlo7}Th?BFi@N~MLyr_;C1zdS|};aSOso0b&NuVSj+0mTpGOt(>=abQkd z#FVSdz)~O$EjoOBir5@`Y#jnUvIpIXbw6DVDS<87nMsOwV0xIeK!;Kp+gzpR$N(3Y1HL470BodMwC?XI5?WbpU^ zQcM1=)yWkCf0RBu&}h$O$0Ige%iW(XWKk}0)-70EWtaha9%ukma#>P?#6F793v8(G zCfO!g);E8MVBvj2FOEaf1{K2b*n_9SBQM-~sozOI3n>S5!J8j^_ zcQUFhG)6H!F87(iVUT|Nf$myn(=1AMkY;pU@PTv?Vz()b#Nt2AJ_ElF18o*}Z zXxk58B>9+k2+b_Pko;ps&|onLqK3$l)>=I#4ejbHT_SEA!|d8*(I_9j7}-n2lxd<0 zF*H zV-)MPh-jRkW@1w3FOV8q60JUKcht>G1mq4h%pv7Wz9u^{LB4b7rvTZKHQWjc3#&t- zj2^vFv$KZB7HJ}OQ#lMFyvJCLRJ|Ec#h@0PAIj`5Fa<{q*Oq-AaZsjC1S{ul{o$v} zefW-LCw-eCZp}h2(yCVSmy5sWGYJO2K}dW^*Eb~y%3*}Oil`F8(cp;zL%Ur+dzH_* zHgan4(wt*mMy+zG;Ve#KYCd=AwXRrSAR0aMzII9eexoUqJztp{ERIbhz)L>2c~1N7 zlMl@|e605N@F?_IL;b*EU?f){Bl;tEe*;plGx|U*5~gnU0|v?x)3wejc$lx3!uzsi zp5@TETL*^47%vzu_|wF`4isp1sosp=dyG*Dcwpb}3E=JlXQyFGr>g7C{J3*J`UyQj zp4}YQ#1~PfA)wK6Y3EYI+A02}>FGw-JY;y!=H;9@uLWJ-O%-4n4!4c4W^azNFQi3xCm;yu6ma#T zE6m<18>lBxyv5=A0$5bUm?Xprvyzx9s(LzVA2$$Dtp!#^S&&=D0NlF4|L#?;=Dvzk zRQc3NssGohjEcY5xZuwF8%xpu`0o?~@Za3&?QJ(I5VPE#s&KHw>4GRfmw5{Cvw`Oe zF=3!zp}}ZE3XQ*=LxDT`DEFxegJl$f=Z zgP$Ye1_hou&NK=qzj9mNL#KI41E&L&vIjVc}gmh-{X82_~M7$ zkS`u!G6hOV%}V!$oUv}3Ei2VYZf|xO`FL#4t6W@~Zi&=k3=%fOFC0pF4hYiJuAY>1 z7z~uKfBm(`*>)vnO~`_fos}-szy2KMq~Q452a_91=MkwO`Tse>9wMZu&qYwQNF{oO zpG^$jpz~*_M(dz9Zs;HzNj~~V(tNUPxCwdvsk4&c75E&b2WU-M%BV#!v-sUujPi~= zEGi_MkDLoHE`6Kt=1Azs;>Xst9grvSc}>{Z932;UF_{1L*ZyO6kKHWx|K;ai6F5+B z8Sp!RQRyD|*O3O3{d3DE_G)`P{FKApBeukL4)j!OvmIDT6+cJoGT{tipm}T!@NRAuK{Gi=Z#hW`g3Dp_5Xfi z!E$(dSfhlqs9gSE=XGkk-zo3a(=}K;cm>GF{%Ks&UQ2=1aS=*iT=EkUz-O&OsRyNG zqO}zkI4@Uz&u3ZsALjMno6!_xM*n_dL45e$*AN&Bi~fI|*KbFl=Z*IJ|IywD$AG=3 zDt}&x9lsc|wrnbFQuuz6%z%au2lGw}hJn{7lHLD4um6h~eFJ91`)?)?zgb=X zZ0~85nII$qNdwL7GV_K5V~=fXgE6nu5X72IyY*|$Llb^z>GMDK<#WUGH@ajxD5@o!VpR@s{&Ib<5wJsxD9r0MKF)K=txS}<{TUGU2jcY?1EtvTcEC5| zb6*O=$pV$l5gyRd2hTymXkQln3p~LPP|LNku}XdnPP{CqhWSaX-}Ds|-|X4M%xY*- zeYNnxR+=malWPH!d1>=^*-2v+8|OZxD+`*3U2})HXTYiJ12CZLoeA^A=%(95Bp$3=Molv8310Vt{B$}a$qzTiFC z85r7JpXKj<3f$i;Q0Dp23YgSa?sfoo*&H_KDi6i;vq)^`ZK%bU3E&vnrWx7_fTAl5 zht4Ab4DvHaWjf&qz_c5^7nT=g%NCDc2_x;XiC{;n6*0m5k{U}~jJF{T2o-0>snL;2~{KcE3yt+Oc5erFvoJkF4%ysUp)wt?$vC%3q|{ znExK>NlDaRyO3Bel*1xmOyda+webaXLgPV7(!!-YRI%7Wva$t9+e*J`_ zs3A%Nu~o12TrVT_oM-odXL}8$Rx~aX6PgORfJT-iI3F zRX9o#0nHdeGY}yIT};sgE1yvNy}$*;?E*4h=X>MENrVo+FGl%1RP;RThuCL8?Fyls zG-BR?QcceEdH!fs^(hFd)wbKiCSLAu-3<7R<$AHH=YYML1(xiHGkqg&_#i{lBEZ&e zH~CWeG_XEPXcZfIO~=>P^xo>Hd(hSCMuLt;2*$iWcK6P-rZSaJ2)AghY-fcQ4ecE$ zy;l7`M|CKDD(rqplPqLDdQAT?p9E^=b^x@@o?E^uYx1!LwC_6w>vH>)hk_aVn4hwS z(J~^r)+8b&c<=`Ewew+#nATH?^>4#YTE-^QD^Enj_T5arJCZjN_ZbR>K6hXs$Z>ae z4wOGZCRftQCqwSQtl?=H5GYiD= zwXUjh?pA!&pPp8^J|Qe!p>qw}_m;%_;RGVfJdNR7)A@*a#aLrnN}np%>1&1^Y+FS_ zFwTR1-ls4M)l{B!bzLjnmVS_?*Sb2ZZ#+3T!=<2k-T%M)h5z1D^O*sj8hT@b@*BP-WU^#xDm2=sh9eFB9`N8B1 zMZaU#v#6{Fev!kOA>wVc#bPv+U) zJ^yxQpX}nGart}D=-8mL9i?-bG2wGz#_MzC;jNoEn9L-7O26aVY6H;)h=nI~D!A?5 zu3lbVaQK`dZLmZr*7J^Mmpj=SaVWrtUUqKRnBc*zSm@l>?PKoZ$z~%ScMZHe4c>bW z({^ci_WQYfHa^xrZ2NtdE(W%Dr^b8ftD1gTu@+X^YjXM5rG2c%QdYM+?sa<2`@bDV z{utg`&vh{h=%0!0F8M9#v}I%O(*)3v@i_5-Yxk8IBt&Yx1<@OYYIvHa$FkvFTp@2 znua89k3_06;E`TDk9DZ?+Uya4i_h%5$Ain8whP_N;gPk>-&wClPz&K{owR3P;=_8x zR5AoH-@XEB|K~_U#OOd#D(dy8^aGa|`|vn0(e!MfUgEp^wXv#*T4?ba+0*0=D|lc6 zLpM>T46Ah?vo@maO{Mr1IFy2q?bCdj2zhCw(j;T9`@IA@-47{s9rc)x1^P7@VQ0bL z7~O`y)Y=k+I99hpS?ZQo=KC1ay#%2|B)ZQ#1RJONmlaR$)p#W~5m-ur)cLk3E^dfH zuqJ-zeQ!QB57PR$Ad_+vYn1ra>sM?~vTcV#DSC>K zih|{?<#zc+78bgUB0RfCe4c2_eE`@X|>=$BL zac-x4x~odNdPCK>tM}D}j*P;Bj=iR3l>6k!l`90D_U|khiuz&Nr`? z69*IaTZLh7bpE+yU6QO{{@B9m!#UzM4KAIHQa6#=pHKOgWc zR@vD-UJ%5hQYVihZUs23jcW+m4i~s_TlrY|pz$a}m2Gd~>vK+uGAXZwY3Fv6K{bi( zx2l7Y=M7Rm%6qL%#5mOo*ul$Vw|VAI!s@)4Bl`$|6u8vD zvSL8%Wk_T~iXc8&xjF>K>E$r0-u1pNr-z_epWi>X~2J<(lyPvzom;JV4I)iyZ13 zm3oel0V3BO{ztc^ylzx&QkT^X_CBOeEIEvIHd$mFs1vjvm8TP$HNIrJt7{EPVS`65 zs;kIwW`}pTOh!LBxtpBjEUqlaEpEikp&F-{Y|A}SoovG8N1{GLUVNiElxXbdckzfyn^~6fu@3}B@`~hL!f{o|Mllf>GYW-ghaH-%zN`k z2SvL4MORj?gBVV#i~k?H5S~1$^CFcQjY@j(kS*S1bW8nV#v|!d5caP3Y4ehY%Ft21 z*AQPWp%HA+?V_|b zS!JAXosrN6}$b9@# zK-%kI=`+b2@i?(Ih5KW|n4^@RBf{rFFB1ASkb%3{-6!0JHrevCDoGOB8d`##ghW6M2Tpn=IJ2oKg7*q@cyZWi+|B zyX`lxW_nQM+!!wY09clqzVkj9or~9GL9_c($(juVjAXZ?C03FTaMTg5uGEU*XNQ8i@Ekjn>VR|F?L2SM$pa%)gRSX7 z55L-$)M1K`^*^uxP{CAkEBNo6G{;b**;yS@N~2kCs!n$^E?2yp8A#%pZiEzQ$b5)R z!V`+24(8XnraM&te~<3w4ixJb&&KfVc)xG#$2(LbJo4Tg@E�_q3l>G%E{%*D5ok zk2s#Sl>=*G(f6cw_e1yU<)&zYCYxAE)Z!i`P&oz()$hq7iI+V=tS+1 z4ldEu;Qw;M?v1+T*K4^0amR_G8 zGcbR<73<4T?#NN=Tgh${RjEj|N95S>j{-Evq|81mX3{l5gLAdgv%$+#(!Wxv)vNsE zM6|3N?fE&K%Zg@sY87_Oq1S{Q3o!$)5@)5wpJ@MiwKZqZ4TuYA)#Kct$vr57<(GBK zcv>I}s19n1ao)$l|H~r1{SEZIpbl41v}LjFn;CoPZ=o}@MW>|6}CH2=6$2;BhWLyo!DA+(b7e6#RV!bcdp zrTmZjN_;0kg~C*`>EV50&6yFg$BhUK*HnStANWUs+(mP^`RME9|GAH8G z*qtd#Amvjh&rq-c;&uD?4(ql7pL){U$))(~%?QCRMj7HzroRTei=A9?A7oHJXP71* zL;dKg@>SK6-h~3WP3eAvcT<+pb@z52OPQUGu3>zYeG446Eaj8KKckQ;fP%W(d;^R+ zm?*wmm+H8zf*_BJt{3TQC=fGHU>T%jV6##G;(;tfqK{{hOt*<}(DU|ce_6P;y zSaRZbYohjIVL>JU3{eOh%CK2NZ&?!3wz&r#6!1FiC^oc-+<+jU!zR za~X)d($Gz+KBd6ftzkDNLoU))MjDcPNdWZ0%BpWoM&*vvPa``*BHSPibe)wb9-9sI z#1(_tH3+pyxg+2^_IH;)$Y5GCw9V{EqU-htXo{%_+k~sDno)NVDJ2hCZ@iKw=}Q`Q zkh3hr0-fC*hZP ztT3azfs~S?G1O0r2ql*tL&wa{i6?8scvR>{T0x|1dszS^a%R1^ejI%LJks2tdQ+|N z9n_*5iz`9I$;K&OfiuPrAbRR7^rt_P8%9m0Gxt@#8-(CfXaSe6I)67zZr>piPDqSB zoIC@bPig9r$fl1thoJbA%%6}E`bQwuHbXn7x+xq98jxHfEi?f=%F<|en%3#F{a3+W zBSL(Gb*|k!GfC|Ll1P4LSu11m7?J&C`aZEz>SL&&`zB||D&ZK)*C5t3O?8t-DfUK_ zdm~ea!&%kn)aE>3*Sv2`RS{EP&5uobDYGy>GDBZI1FY+JTv_oQkjg6hW@;W#ljnGW9~(mw1FJnYLe2s5mc$Qrn@1qZB|FihXYJ9mv^7P9#wpK5C9i90 zBi`yV--M!fVXl|1U&GvE(7q^wQr<}%faN~$qo}Q zK)e{!-o06r8|jLPIA$c4XI@-#;fNkuuJ(J)vd<%T}qemj10Jn;a;43dl*m>7k)|Atg@g{ z{!e3tCvTC^Cvv~gR9D{D0&TubyNzGLOqm!Pnz&h8J^~+gQARzvr(eRNJt+*VSe)wcM2w2o&VIDJQLC?_TH}5WR$y^ynkpB#%muGq-*(8TWSK#OeAgMlkwpv>*e^sn|QvJqF z%E_5zkw}$?$wav6C=Li())sbmU@Q(buj7~V+I=$~sr>0hbm}Yq%|+3Pa&dt1j3*ua zyZ$FL0KrCg#$!w~{l>>aj~BN)?DS1;vDDj5%9UxOUEFIW9x%GiO_?1IywZzA4i(N-whLw!V*Nl32tO!1?&9eTyX4DvbZy*J|G+ zC=>&`{FW%{fVAICh}k6~P355`|7d!#gUsjkw*(HReOh-ZVj#~{Jg&)HAnwx-AR_kd z)E^+jTCCU7#U=&MlG;;(9%lqZw0=l&Ag(RDI0SKC@d)RJ9uyS^@85O=UKGaNhR4~j z!5KwhOxKt5fVpfMAA|4v-iP!{{`q}9@9PxLk|;PQE`#LGehH4RAvpGw%gdxuflJ;GNfH@%=~m(Wweu&zLxsEwMJz+yBI9_5>PoeBk96Kt z@;?&LZhH(NszcldAN)NW^#X=$!hdP-Z%ztV+n%mE^s}YYuAHOmwikf4q5#t5HUu$y z9#32qhHzmO4{BvXg9{=~BCtZ9Riwx=*5DVidX5cEyV(1tX5uvOe?8y>=T$mnS)gor zagILfjd8fn9jpDi(Fp2Pcsa;5^9?t3Llspdyw~d^+`mr!9LtR9BR-P?UR;;R%wCKk z$UGYjx&a4)WE62rNllx%ghV0i1jYaUM-9{u(urzJzoEzYFuHrTfimFUFR0_UYay1t zp<{JVul7}GfpgFR^byczyg{QHC%OhOn}J^>yrdl@Kz~dpQT=v#J7=AaG_t#rpRQyx z`O>QDpDg6;be>+xh2g^FpWNO;0`d>Vx&CTyN@9#A_F|V1U;DGyM9w4f3-EbF!d~&| z5KnA-;WMlIwW?4~B)QPEwh4Ir>O!`N=jN=$bcy|pE^`$M2U}{ryol#AHx+I|h(@Pw zfr#i(HlW|r-wtU1`dWsl?>BVd%*cj!fWgA%n$Ll_Xt1Dm(h$U za;i%Bt&MJ|+c|CQAb|@ruhI0`auN#RVxW5GJB8>&C8h@=3NpnNmDY32DV+Lyz^Aan zBWmDRz*XHd6^1m7IQ!;Nsw*8)&mF5brtc>ZV-X~)*#5ZFt)en{wBDuqlOEKctm>X0 z#&Jv1FVc3ckO`Kr#vy2Sx`BeAqiWWMGt{L>ljsZ-A`BZuLN&njH&$~-%%NhaxnD>w zV>vO!aJfyp4K^_E#0gece_?DS)Rzo{aVCgh(=qmNHT6hvgCMvTc5sXWIH4wG!@>Ww z!1-@ndN{wk;iuhMi9}=*VP#)Ir+exD zW8h_>P-no1-vNY}5qeny?vq1Z@jWU*b&TdbyI|axB_*^L5-8I+zMJCW=Z9*~K+g;6 zy_Eud&O~(L<3{KhqNIGfUvy3s%#rMeG>KjwBr$`g-t-7T3MW3QJ61jUv1)z|KET`g z)>|m$<4%7Eln$wT@!}fS9_>K=8*5v|m^?QiU{c`NDzdU2km4Y|4}yINnUCx*Wf%@g2$L<_IjGybeYle1zuEw|9v z&UEUnx|QyZ^7bR9V?`EU&SxW|9+B&E6s$=Sq1spYj?&rwbm>9?B@gJt608A%~^Vn#pIf{VBu5Hd1Ri4T) z9JIok_hiWNQe#!vA&$j<&R~o&JT!r_RPGVYXmHA@KWCo6n+egRl*2p!-mR@gY*(c|#l--?+x+!db|Kxt$peL~edO6$W)|X^)=^f19M~2Ad6_o)M zrItEjfP#px4*I|ybO*GFn@5 z{BM+Z)i;dLycny2%?a&TtV5*5pq(X#HK>;)NIj|OnPNE+{$_NhEC0%ewq_1>9b<9j z*0`rE?n41S+$ovt)OIYcDbnI10tdFha=xq(@CYcxOpitpKcBXvEe-RI?SN@5A1(%O zFE$3v!`jhw8EoT^b-?Grow^NrS^3;Z*)mp+b_=jsw^|*gWGMOEa2Gfp91?Bi^qu^# z9Qqru5-xAaoslNoQqPD&v|uG9H-8-5BKbkuoaV27N$e$GAwLFQZD=XN=>aA})fD0M z9lADYX@ZHs@MH?WQ^{bvt4KMi2C!3MvEbpV*1q~afs$(f`>=sGrI$ULs;97uU;13- zu&GWIhH^sySPsdq)Kz&?%205t;6S1}!%g*V{)pC`zN3)~N?>p5c8Q9b81`&^s+C!rVFn$6;Xtiu8y z|MQe3&bC6YpD;Vea>C|O(bM?PLjxRZXs&$?rGm}mJ}0^0G2)De9k4N4q}{rn=!M4^ z-6|FQt=fUNm%j6@#pluV8~DhTgf-Y(+!OleuPO=TWTM?AFye6rrqKN051${ z&rh`A5k!5mwgw;Bj^ao94al*V_ZP*DV!m=qFtDVz zpUAYe2W)|S!1SigA?{N=Q}uSXKw53e(e%Jj!>;=Z3v7Wo<6+R607J8C{GU^%p$OIj zWS(H`{@)K9JVp=ne!~L#ow-HLTQDkUi^VsCZuQ2&1sBnEaHUQBDlX*Ua%k{c(svqs zULa0b@C$iBhXeG|XA5rCNBCfwnfl4g`0fz-deRbKq;!j+bqReHD6KS#Cim{O6g#sR*j?;$9^bm_~*Ns7Vn<0oAhk-+D&Wxr93QbV=REQg=`9eB? z3uaHGjC&u#uu2U;<)EcEUq^|lV2&9V;3>#60w^O%A@5!D3)yf06oNW*Ux0@CS(8&> z^q#C(ax#$Ppr-R(9{P7(l8!EE2(MqFQ^2DmzcJtJDFiiU2O7S@?FP300h%(7>J!7er*6v&}~77oSJ6!)KGB(Z0NDnbpJjUNyi!L#|60uQqNtV z17D&H5ho;uMnStID~Y%SAJgrtvA&mHVZPfxO+y#1g59F{*UGAd+jT}4o|!;D*dqKr zx3Mw)km6)Ru56aiMq`aX;rGU4HedPL2ma4vz5?!@2Bpv02Gt^2GCEb>29p%+M@F>f%4fC9wq~Xl8iFGByf17txL!B5fg*AMmL(7 zIN`zdAbW4174spv)b>6prjfNeX0ye-%G(?4yM$u^SbG^>`DVcB`wg9E+m90eANJle zs;Z@369q&vl9UV*1e6>XIZ6}(QOQ9vDmiBbBuWqvQG%#oB+f`*AAN6+bGyeK`;Ws>%vH0hX3d&a@B4(W(IM^f+Qlkqt_`)2fl&DADPKq@ zMi+c1(73E1RVthz{tGX?zRZJ|_EG7_OHA+M(pmHKKBn^L3qA@=)C)AwtGy#~6+X2} z(vK+q!{E{4F5NlGPL4p-1YCgp7NG3o;H&&+`2HEbe`eo*{mWY;X0i_~oOy^?i6I^( z!uMq*h76ThJA@N+3&Vo$HR76_fCPwsLL}IsnV5&aEOVs#Q@Z7cYvCD6cVV6MV|e+K zV!5UJmbHPYX389pSugL5dFgop`L2T82f-T;P_5t|O%tt%tYbiXa2xbP(*eAB4EQ1{q}t17RqC7@~Hg+4t>Iv=D6CO;>6PzZ?=4J}-1w83BtK0N_k4s4%A2l7EyE z!6~FlZ|S>7h(5QlGNJYSvtir{5a_TVjcv0JnCG~&=M9fxOPLK!fVJjSBnmz9d|#z z-nTyj7#hK9KY&Tbmp($d%x+}rO$>MnrF+!w4w1no?yrp+ACh3OCc5^lH4mJ>1V z!7fC?@JoijCBA9$H;5a%8d~@mILY^dP`itTk|0t6tfc12xXiIPkze$()5_)a0K!Ly zeJ`z(<#6NIMn-T34?d9H1R&jjeFK0IQsN z)@1p z+P@nmC^7zc;G5vRjo{;m^Sc5;1Y4!9azv~B)GpkcfEjsiALM)n0g+`{`Mu{QKAL+~ zRvt^1lxiXE71oPE%zX($aJdKoD0w$W9KQ{s$vWLZZV~f{%{6ohNU~{*AR33qmn#); z;}`O-hvcEX7+_2@#CoLO6qGcR%)_&+{uKOW`9qg4`A&t_B`V#?ZGhRDtQ_~B#4U&1 z(LKJRnr7BaHssbakq88`Dlr5DzS9f3ZQ^N+om)M74w#vMWIwUl0u$O$rF=D76{-1q zwldz5Giy;NJwo&pc4r55uwIBv{cQFZWCn~y!$M0oo@KhqXx9K@I&&9r{LVp?L}{~% zzjyV@JGe8AMP(rQg_W9#w?zzR?dAjSrWk)4r1Qp8x{zT8B)!ji0i-+wz}b(mE=P91 z0Y9C3O@&mz?)q!M2b<(Xk%8E<>j8Y-7H@3k_3!;x43NVgrTh(@Mm%%dJrt52!m8!( zG$vq4*ES#&XlnDbO>w{fRKN^22YJbiqxhXeSqHZmf^l(uM6t`BO~_t}fevWpFktu( z%w=>HcJ1N_nIdB9qzCUIKTPIfXQcg$gazi2Z^w4fmwpW_EGG7CZ=TviHlw60CXtT* z_x1hl<8SRKxr42K0>~M8#DIi!lU6}1nr1@ytfYhwM+MNa3!F=6z zj~KJ0(zrii;Fm>fz%-MJfm%f-Fd;seesuPwA4LQO5t~EG)(XzPY$ok=T@1C~Kz#~1 zbIijs#&n~O-5;ZQ6oWIy+4zHS0>KGb1Kf*ohU4m3UMG=X)`h({iOH+05L8%ud56C= zhCTMt>1bs7mQ$!fgsU9{xaX<}TzxNPUJhdZ{<>Lm>B*b~{`K#X|7Qdej=~E^& zv3cv&MDQdeY-o74q%_?9)R-1MD6y`x;Lo3a5WfkIJ!O_L&kJg%Zz6_5btck2e#0E{ zrGL<-&D3$5x{bJN4-)XznG__C6C`og30cX_H&YjFa@t2XNryw{;g}G~kB$j5MP?n; zp2onwSTzxe^SxL?QfkO96T?`2N>vdge~p``x~uYMS(14co*PF~1$ZgMMQX*~mORl? zi*da|V%tLU3y1=yrsKvRDCq^QNbF+b?k}*dw{Pibk87=d^7&}tk{xZhHfxwBDF|lc z1?EJ@DR2ipDo%x`HqnRAKWA&cAxr$e?MBlk?pLW-ZJc37_%@EYH%3{D58pkMe4PID z_+;>VeT@XQajRR79J=FKcV~QJMh-i!os9YG8Nm@oB@G?g6JP0$#3;8Kni#tmSd zp8NS=ed)@ffb$&dN)lI$+xc zlSb>h_*{wGJe#A0L^8S5?o<*MmU$d4TVN6aA$*!9utysa*7Jgk=a3f-%>Y@-%szkS#F^*q6FBEXCI$St*t$5qBb6MDpbfm_P1qxHsq(}Ue| z;wf&a>g}0{05LZV)nqe+LMA&#T>sH=%-Jln3o>9c<0|>d`}XSbYI*vy+8NK|bW`pv z)k0f6wLu!EUA1Oe%Dg42Rj3x7ljp{ma++0Kj~kGYGM2ra)0PeXNFFd%gf*?XtR6ej zt^1DP?%el?cDQ+`cc z;srkp)dKGr$Y5VUf|1p}knY{z=UY=NuPpu3V}%OJ+V2F`Tm_3<5W{CKyV7daD?U6w zE0;KtB4v6&ySOZ8H0uW>Ov?u2mZ*Z;EVR?m5%|w3ju;){m!l3Mnq!r;CT*cCxdO8;GDT7#k7 z3>~}nZbK&X=U~CK%D1___8}OjkR=1dw;2~uG^p&am(sVkeM>-if^R#WI!umeMVV|{ zcCF^kwP$DTgd4QxgWLtrQf=IL$Qx{4O$SYs_=?KDvfqt&H=dJI^2AC;Vz= zZ!_bS|M=Raz$LCda>;wik0Z66x*sSBbSKa9L`cms`Od!~IMmD&OIrP|qo%cb7r!*t z(fqU_zVFnaR3~}%nojuw zo2rWYRxDNjM|Wu&&BLD8flDXQMENlCxpR^cO06>()-HahVjAG}?wjQONE=0-N9;% zL2u&LUojWo%JymQ=*bu#R-vESC#`E5PpK-*G}EnFjV&cf2|AM)N(fq}JW>^@c`*UD zwR>cCYGL=>i*Iw&IMo>t2#meeV(3^T^b4t@Oi0l5MDwV95D3fZgl&fkMN8FYKk=3` z^j5&)=sG{%5X$t(nms@>lgMdOL$&gIx}^RtzJ)z5kA!3G<_Y>FURq&xDF9Bp zj<=tmf1SiOXnUSOpvU93Xo~Vr^Pe|8!)(b zu?5iUc`a}HFqA@r?ya+-*~+g*(`$xQO;uI(Fl@jVrRinX#C7B_vS z*gRg%9vNu8Z>np-ZmEWPxgo>HJrT@)yU>u;5vrA zqDj$}+*`rsopNn*ttIE}YIdqJ>3L{C%&o_a@x=LJXLgi>v}SeA=r$Bav3aPqX^#ZB zV?OcHFby}Q{LtZW>#3LKL+nEV>)UGa5ho_K6eoM+q%k<07{j-ZZiyD@)0reoB>Hc) z$BZqucdL0kF%oRe!Q>%LD%Lr6Ncp0PdNmmdfD|%eJj1r-Me$?pHKcTVUa+50@MsR{ zYI$z>xUD50&Nr9INLiecAJZr2XKtH~Ox`YhOlE3)8zAVB(w<@OxDRi=5o^S*B#OqD z?lvC#wC`0~u^oCD7K}HHm%t&St@D(wgIHO#nyVM)gq=m)R>18G@-m5&?RXpxhgycE zekV|umkc+VQT5iewqVwdPO>~=wd;+#`Bqf$CSSd$2yLal00La)%6+j3M4Rd&``G-n z0-LN?t)y?mj%ogAf9yY`bz!DTdUa~Pm6|Klpw2s0nePR*KUZUP+c&(p8`Py^A2np^(t3nB$BVT+XTE81y^TyJEiSxOjxXLu^O)q|6kM-nClLCYr;YHV!vym%jkM^Z<} za+&8a-_RzqvR#JYBGv@|vC;X669?UQU2OS+{=%i*7MdrWT%f0aiaR!{U&v~AiuhidP3Np3(WqsVUcOXCM38?QwxRc_wx!&iN z?!%X!u%_|zaV1(2aMqKkr}pr0Z22C!@`K>3&C|BLWeKjXJniS%Gq){2TyV9Iht#p? zIo-w6u;#!az%;JFR_Q~Qno=N_n&u6N4Lkv3wFo6r7_Tz|Hb*v>vmhZrW>s6nTV3m<`m6Gp+TigW_HX?4BqlF+=bw@~?QV2^ zt+4Di-!NFV?kGDyP2?avkZ+~B6wCqkd`VB;&IvgL7UWr8DEs-F`Ul99VIg#2>Em%n zrU52SCM4dwygv^LluiI0xFa5Ra_9tx0gSPQnf#K$i<~?IM>ymiz58dgAQ89nbGz3K z0fMU1&|wy7w`6kOk{j>_ypy(kBBMm7!X645ffN~!7Zbvf9q5XIxRw)WLfxxcfHSC{ z*i-~fh8B1FTS$cqrH{^qAr)8JA#?!NpX|L!gT%}o_l6tx9wA0v~|!gvgb zKb22KF^#Gi;$6x#Mm9L~>x zy|+Li3J2F2z(9zi_!m<6LNTNFJ3kRLx+5*ze&|j!Gf-$l%E1^lgD2a+xuC3*cOFg& zVl-!kmFMS?`6(jdCyKQ0B=qg5(&*SB_fKu#O;Na;X^xjk&OR))N8oVZbq_?m-XW%| z14OYz?ssr1Jm_f;Y7E7lM==%%jkmVA`7O_s<1I zm*eq!KzY{6;)%FLHwkAGG2|V6AS8g;@nAox6W5P0ovtsJxV;9nc+!D2-0e!R$h3Z# zme(r;(pKFhdxm02_4xT(KzfO{s;LMT<1-4&G+L7C(8XwV$k?apmU>fnqTR<%YC!Eu6v*DNMt|>)5uh{rX{o7p3IhUzXgsg^U0NrmS9g zIV9|5Z1!q<5yU)mxp6mEb%R`7yD zbUM`DwBrL(Sf(FEm2&}JCTwu3Gu_iP<_ zBb%IW06~paZUQ;?)$(Dh+>Q0?zeiUfeu82s<-aa8-o^GGy>QlgFKDBQP@o|RY)!|n zw!KleN7``^%Ddfiv@7dc5`|Z8@nwbXTl?Pm8W&TQLU{X8l^zjw z^YphQlGz0NTLxsr2PP+Vr44X+x6-ykVJZ`s$`1v8q2>0=7ldTC9n76iO8t(iNSW`5 zI^^^lY>Z1(zadjE9df3Gc9&}qkj?;`y@7q$v*u($<`t{BzeyoWi(xf>t$!e33Xy*| z4@U9JNyG5I@+bP^4lye)9&N>DU5N|Oh=^4q6LeN*`vA1Ok5h;6+f?CjI@6)i-(qZeBN!nwHrW>A=MbmLO{7f=Me3Wzz&Z@@F9CbnkkTD@q0<=S&YCgKtti!DVXMv4O2!Swt;usN_lKXc zyN!)#ZQIe@$n;5W5zuocJXA#ICK!XVozA*y%n`*zcn_W)B_~utG&G-tCnbOGA;V7kAkkkef1iA7SbxFXIW}3cH&@^v&HW<1AR3U{DY=HqJL6%vEgd3J^99B`&h(wghvTkEVlmsW}kD2!;M-q0ZoEu|YXlAs#X@6#l znchg$CuhB`G^2Xhfys$wMt7cr*$0te_sH;L%vhQe#=4E&e@>>gkdrd7dbYFgU?d$o z&2w({Sh5?ExbDLy;^WJP($;j1pslD9wtMX?WXe;^2@UqT89cO#+X?2|&gVBog?(K+ zXAIk}&ex@j`7A5%IG?*Gq(NH9KV>-arkuF(>N9c-38es2#LffPN_gfZ!t0OsIqE#F zdj%^sy=ivw(!7g3Cw{y&8C%((zF_NLZfGq#JP%jQ;Q zs+e6*IY46HwqWh|dxo!`4_jp8bd_z(^_{vozs-lifhl1$&)^xQJ=!n%F8&77N8=5P zQ-$+dvd*+M_So+;bj@s5?PO*4{Lb_>yFJ*iMYM?oL->;|F|J;Im4$}AQ;7>U4ENYc z!&|IE%KKw_pXDlSrcH89iXL~pmMBmY-(DZVA2{b}c{TsX0hWVd>CeKl-6_hk-fq() zBj;Lf+oZyr^)c@~A#MYvF{qh}2xud8YZ~XamJLGA^=Xy^{io;Vy*EDsMOea@YG2fq z&BoL5W%ZW6Jnz2kSl1s?OUjmk#k3HsXORvmu8*Unz&_f8A|_pv1C zBo0AHG&sgMCHs&)&$vA&z@nLew&PJ}jzu=!gLBXms=tmY8zV;pqIm!^a{@N$Y53l6 zUM5>goSkHN3Vct35_2O6=}F}5kG$jfOtF^~lx7*p|0&oD!mYvI=K@WI90b4fel4*x zIpu*bM!#eroa3!Pm0azm`A+gVotV6#LGdXQLkpF!%n!cgWqb13z6VTTn5W9|ztSnD zcK;p7!iWGXx zXBKFv0%W7bLv93$yt6g4T?Ox+QY$C zpThANA(RKe#X{xbVRXu){b%_88NPpJ-(M)S|H7A-QybJM@$qssZVBd?RGN697|G#3 z0KWe{j3OXKjLel7Ed@Zrb1e7X!HEUI9@|I%z#h@=`hSLELZ+elJ;ywP@R8##)k`jZ z@K8E{Sl&Zx+5u|w55)2$AjoK+TS*A8pW}iy8>B?FQsV(slJfC$X`-;3pgLbv6DnT! zA5nqeAyNIONi=5d)KqUbvCjy%E<%I*;%O^W`IoKi%KGzJC4PGrb1` zt+|O2N<=8cbw+=76&h&4i1#LnTnmYf`=e+l~k_0D~DL44j@ID?W46W8~5 zpDh7xOnxElgGE5;mC$2qC}X>5V@P9s_FwQ%2EY=oYMp^WYxCpF3S?2Q&2d)nz3BpCDKK?73!zA+WlZb&5J)0z9 zi94QDNpw%l>oNy2zvP`t>5i0cnS zKg#m#LGY{08@{zP0osHz0J&V%SFV^m01c!(K{2s*Gnqbi)ToP;P#XekCRhJep2P?d zac%NDyd+^FFe|k=|C31B)zM<|zp()SB~i12ml4a-3QGp^bB3+Y=P*Kt<{}jSI>rBc z>{1-DzZg?5Y7kW@Da94M|0n49C+PUk@cnPaZ2uX)e}?aW%oxz-(&nlshyv!5@98FE z1#f*`SQ4+WYO>$qh==U&X~OdO%Mh|G9b9DEa6CYWRupL-{8~D*i}YSeh3c%3Iz*9N z_UCj6q`#K~EFl8;I{?a10to4t%}qH(QGMO}L0|27(M?N(quJ^d+OWz`LkJV$5Q;iE z_8<_Q9?PB44hj)0D3Bt8zFh~ZC;-E>9sfIIp1phDZ~V#;xecZ5evRYy*D&jcr~;6K z-T_h*Vz&jcw-x|$=)4y)-jMe_*x*av%^~rSvxtUq=&wAOL9)H-xE?6Lm{><1Tj56{ zy<)?5x6^5KrJ7Qt-d;nhs6q|jn+qC#EYD|o^@sqJFgJ|omi&&f7XXRUUp&p|%z_WS z0|ZsY|AsImoc|z&^6=R=b9reY|E+I2`bjtn&~Ml!QcQ^!?u;hIw_t^Y7SpD2vZM(%y@Y zE~*vhg+{gtbEEG&U{L^Yr@E9MjDwlfi!{>EjTlZB;9bPGri321+D??R;-wmJG&AG`e7CVYlzP8HDhOcE>p?%sHP5L3Z>&P1tZ zInc0~hsrArAPFTe29e%jFFXD|NLBTiHc<+_B_>9AR2NsT_H6`X>YeUXfT*%_J<8bZ zADP~Rpf47N<=zm*+Yb61M|zrHrl&yXbOWmCAfw{m<`utzv~W>qw}LPa(_i?} zN(QvUF?EoxFnyy|8Dsa#Z{78Gf>{vg(v_^)p7Cy4g#i&oq7;Ktc5)y#WY#69pJUW8 zUwrKylGBX7eU&(@l=68e$LMmmtili|_4K3|diy5K)k3mv)XS>frE1tg4D47cU1t$a z*EGktzdur|YchJa0kD^20eaI|;oz>n-6zo)9`_fh`Wf&0D z1@G0mD^oAT+y{NC;`FjOj>PiUVvz?`n3;hd8OmH-07*INmzSC0Z{Cry)|FPsBsvjP`}a&yJ1+I?SRNk z(#AMYIiHtfWWiLQ%}WhPkmN<*fvP{-{Fkm}5jdqqwVzn3pCbL{pwAf-wuS1EnvTw7 z95!wg$BTAQhJqT@`M$oR`U$zx`dxUGtF=J`JZ4%mQ(0NsJ~dw)A&qwNK`lR9Lp?{Na?5{Nx`?m3)>cU#1Nm^y@3?*f^(4kMN1j1G{7BQq z3s@8sE1$I(bbexF&u+1P4Gvmq57gJGfK$cVL{a0kWYE}*m6oW*dg0w$@TkRZKQtf_;e9yXQ_ z$>SSZPM?Nz4c#pd8;U@YgR{>>*{m0D%2i5l`z()E4>?)eL(euoN!lYzmgK(R+me}w zri=X~3{H9Ur_)Rn!-D&zN5rfDp&Q^w2rKpfgFMs)0p49pJ7RT^(NHDIO}fscoxWbS zyR(Z@ADk0X4^@wIQV~xCy(6$bgXs6A!aPaDS_;p#2Mhfh(;lC-O5&SvrEyK|ZKpr3 zY-DOOt~I06sie+vl;aUqe57!`o#qUUNNdZ2jcU}4NrT=qO7FIpSTO~>A){xQgRY1{DiFeN}Voc&)^QC>dX_>H4W5}QP?-=aC* z^sSSU7Sa`V4HJpfVX8Nx3N)LWCbaNX#yoqvUyL>+J&LnqQMJnz^mxN=d*(d1FqpH~ zol-W^+zWt@;HVYfYR9YGP3pcU1_Yuu&ydfX15#m%Al0{xEZ3Jt+ZsIDeVww~Q*WY2 z*Wb<9I*dK$d~$?|-ktLe!*IL*4hS1n4|frfYp?iTYN@u|&8Yr?h0upAlVzR&KCkk) zep+={7FQs)$)RRu%B5b3xbz4MAAeXJ+y+PIqsf0kIeXP;r^DLW zJXFF|l9G(}nggUvDsoheFBB}E)TWoNuiY5Tt}9)PmC$`FM$+HTCdEGw`yp--njsX= z;4D&OXmjrn0{)C`vY*CGJ*Y96Ik3Gkb=4cM>16jInbO@z?5?_;EXV%7Dfy0=BctT$6` z%XUf;l)8ds%t6Z{{bAHT$Ja6N%xae%8(Z_IyQcwYc&2aPk@6hutTciYZuy0(azR?? zfeCPE>KBjR|M8yB?Y3dt3m^(V3x9F%OrS_y(+7=w1^W&jJ(R&jGiY_##XD^e;1Xud z3h)^qBbH-RzQ$$|0|wKNdm|pQg%r5*e*jHY;`rB7Kjv+WF0!d%@M=Dlj+@!z*}iM6 zmAWD=tsV7~!4@PnG-g&OUSH=3@JiHe6?Ppyz2(uB=`oFQ%+4~Qv1K`*cz01MG=V&m z#x?uKF;?Q%j@rN=qoCoJa~Fy`v-5v+MFE0sc*E1w%+>3ed!S4AH;%ZkT|5_pTh$<4 zdP^GA6;@xf3!S%Ad2BusKFKZ!2+^06vUD9KPfJrhqBRcR{Q+Dij3K5J*^6a!-d380 z#Pj8JVlq1U^bQ19A00z`fY`bZF58bceN)5Y3r*`6 z8#Kuv4Tn*T#Mt11W+-P22%WwU$G%|hTf!e@pXB;#r=fp5BEyqJ>06R z^Im8_6|2KL9=s8j4s(81 zy`ge(S%Pt8*Por+OEty6R_>00f8|6@AYahuO#l7|-B|&aMM?y_<^}bdgHQa)S?q!Z z(5C(3b$78IW?BBLn=x1CZZ!|-VTmYdem5v|NvZVb^LrV7T0w7=Z}*o7%IB4lNA==5 zRKEwu6I})1x0wr9m)}H$D_0KlgFdP!g(0D7W!!#n92xbX46NbMM zjAvm#_fZYQY9D{@sSl5v9HDV-@GBj=3bZwD^>K7nAn9KOhzIK)F{`qDS=!5GgTa8j zYra5p?@dfE=b6R$5j^=8RVG-5ug&gGVy6{ODQao~dUU>zc+&h+$EmA@!p^sMxA5rM z;c_mvyS`d=*#AC5;iR9k8K=ZMmbdn?_(8}%5Q0jp+jiyGW&l|+`1H*QJ6@5dE4RJV zH!JSF{gw@miq71E?NK|#7H4?RojE_E8(z0jDE=TzKPG=4_Uj`nma1Ny4pLVzv7N0G zL@yYHD4Cb!V`Ex*oPt&>@9MamxvCeQpTul8+n(&3t$O~D(|kgt4jW^uR#iCwwLUzR zc+!h7H|j~80=(-CUb<(lpVqK0mv2nSpY%Dlex1&u|Ib zW!gReK7vSN6p(<0w&xMuA7Xf zeKHUY}3h8zpkYErY2Af4-nO!xhQi!oSOUC*;uAqdzp*gHM8jyRI5YoRN-kS zPU@dDZLSv;)V#lU$eEGe@2I4F+_L?Le14aF*Lv8=1f@Rs*H8ekfj;Qp=v8K(3ScKX zGGLYxmqBEcz9%t_PCSnagmdx?3?Qw_8c9M?!Ua zhY}-F9~;05&Q$bE5ejpD5iDH$0p~OHIbKx)$l8rRAckzKS&gD;+9HOoHRcXUz(gu|QIQ_e?*C@C2d17<9&e(ImO*h*|dgB3V4Fi`FYxPpi?I%lqv1e8At0O**r zqTK1T-q&um;jEPW+)ZS~bKq1xiUA`&>e4=Tx13QmlXo#!qY3dTa6P6#tx=!P7DH%q z#<0F~plW&Wk6?q|U#a)c9HarZ`s%esu}SJNdPF!Ju_?|;IVtp0Nt`FN!!E`s>oO%7 zr?xSrG~?YGFtzcx?Lx*eJF!YCXnAcS8+4`I&1R~u11Fc?gZl|XPbOarmXLEyG-B3bbG3SgtPha_P~Si1@L`z zs4-W!_6l>fxvhgK9mEiu40%r)w%zHum(wWZPr$L2*cFqtz*kt2@|AicsNkBN3>=&7 z2kHs}@4WCZI6{Eb9Mn@YG>3`oEbQ1V&0CCZQ;|r$;EHj4%`K4p-C|HZ*1LRo%5yhX zZfKL#0nRzQ0<`=K<5Ww13Wg#~Q9H9NpD-XZzNB({Al^oVW|&<2cKEC82)4)5Z_l^z zhQPYB^7O`a4e4r52J8E-aCF=`n;o9XAcX(jJ?-qg7)d5M$Ft3EJ9M9c2DAZh?TErI z@htW}&O8;5Tg*LD_lm>5{qtZSRpWTbjaY;I#LcD)jZZl`tFzT4%a;w(sthEVA=xt3 zeqQVpLs&ta^DKskxE;}ldt+;@BKH`lW1dwWt9jfek0!Ee>A_R1xPqY`*Ajhtc+5AOf2JRgh;hV0_8)SG zo$0|0ncVC?sWqkI!Y_~=_OO}qjAqL??LDvaKx;`4rk$bK+K=|Fc~?9pI-;y*2cnNs z8pQr5=y@c^a;4p;lDfi@+d^e5w31Q`ikb5(GxCLD^DwV3_2+B(xu7hfZWljR%=Ns3 zivJi!k2i@>&_^&s&2hryS*SsrlVn-dOmEve8{hY?@O9`}(Grq@tfa9XC# zbgLlHKrUtZ5${2=lNzN60lIHOXVO!_zIGDl=FHUdT3Vu`clJ|f$P`|E{`Bgbu}Jss zPck>+HY$r#CSUXTLKf41a6&fk>)l5Vlf-a9%?_ogGg~s4#Pb!GPp;-<`*3y}C|IL% z>32nJmkbzAek@1I9GyD~w#>k1NOH+R|ufWARl#&Sf1CpvH zWki8{E?gr{2+>s}A4q~KjZjLULTH?gKWH3Ubj5pm_{H`~Irs-eyZ$Z$-1bx~h{_cg z9kftF9RWz1Z==NBvMK5VB@qI7#IQ+_qk2OrFCPK$4WjEFLzVt#O?xhK@P+n=&orQC z4q6LE2xS7+82{EsIM+rV>`gvYqCT)uBIF{a@>NDuFD`o@+JUj!C43EmeAh_!?z%`D zv+eJ~2~xtP8{?#fhfE0v3J!6&QOp(!LenoNgsw$sibW{Nz^|6v&kI0n&nSVCK3@AW z2N9Gh$LQat#It5&+d6VOF+*(=06uYT#`18ZBovrF^YCIJDq||f!&4%n6r;hbJPkLMO}^5Y zjTGkaEI+{m5yn9GuS8O6BEvWq-8?RMK*UgsHjaq}KFknVB9W`qDr{s>iFLbv`AJr| z=_Y+XA@t!q09kd3Q}ZNH*^Ge_4pG=L^r0w`Up(b10zXv+i_5V0orIqzP*-zjfN6eNV#|CAD?CLryL=mW&WYSk@Ti~XGthQfaGANyayPgU=U)c&}u5~0t zIPB+Gpy)$#r22Um^q;g_>OA<2k?;=aPY>F8LBo4+vsq#0Gzvfis{QjS-Jy}ud}i3Q zMDLHlO;=T);s&X}mWtTlhYvrZ5xGS_Ii|wK0+Y^L>O~rKiwkN2VM`w>d?;gk`&^4C zOc)xf8g&-7cTob1W}__GNe;R6HX;QBZ86$JsNt?_xv|OliotE zby*qSo>Zvi0^XCW2~hUQ{}?5X%0n3_%I73acszpsBXB%Hmq`4N25Mo)Jk03(hvejW z;Y~Dyez|U5bee)0a>S@ABc1X&8l{ogvxjz&{vplAvLW1|6dJ6}_y4>1g8F|&8ZqJl z%$HS|9NJtkBQ~NML211^^@8pd?reV>F*uo|H{Br21nNl!5VSHN#-E8~R}jx|^!t0r zR`SAl=ABWWp~fRu#v$E9s#JiaC{rbW__u0*Rs zJdT|_$R}n}ajI8Tj{OV7mso1(HHGGbMp{NJ<#Pc%6$mAEQ?_X&E)c}^Sj#)u`%M0nCZC3v~C3i7p?B&)^MBJmb z^tTp+q`3Es-W`gt4#fRD0CgY-Qsl@4DqC{8BWndzMq8@|$A8Jo!GxXaxF|+Kgi9B< z$K+OL&F0pLz(RwUu7^0z>I4z8`aeXbzb_*Kjj|mDHCTKu-jyUhq#GqbuI=wGh(L^S7X;7V1p{iw z0Rr!06_LlrtKBXkCzMPenT=9Up>+tQp1fdtlkvIeRnJ+84;SY_u2q~=ux8S@_uX$u z$T*Zr0OnIY!=MmcGYkQCGM8Uqv-8xkH+vpmx!g2;A&n;a``h*3j#|m&jog{eq67h_OAz7*xt}A7wTmL$s&E)$`rQC|aYfo= zNOA8TLe(HfkrzDJwmXdTcZ%~pp5On0dj6nWe3}6DJo+c1L)%r+J|5F6Fm6^u%A^Ov-g0g=`E}z>ql;d(A9=gJ7i9f zjwI>$OB(U!0Y!!Z=fs)-K&6e2tpU^}7rAMzu1f`715nlq{Zp8CA}3h=GEc7UaP}`h}~Wbqrs2S+75efky^LqBSKKAuLtkW z*VIh)Pn7suf@?_|03y2rRB_)pdcid?C2vD!1P*(v)shhJO_v6+(CS3?_5A=uO*h?Y z6^))Od3egW2Vi(-qi4xC%W*GX8V}yq8Kt0WVsmL70Kt=TP^cOa`@Y!CeD^C@z}XOA zOVQ#k9TcIJU`q4{6|Wh)YsgZq5N~s))9w0FBI#vF*fr6w_7P^|*3l4x2%t2es=7hs zxPUO+>?;eF@b2rl6r%>g)`ys7*vP%g_I&r8DO@SL1xZ+@MX)cxS95|RN`mxM2()|9 z{uiLFGLZVo{wn;h%CU)Z0;Mm7B9yNlH6KDe`{-FL!xkWm+*pHFME1yB-Cav;J5;jZ z-OQ{fiZWw1lDFN3w_~(jR_5-4GbkH(%-iuX!wiE%53~4SE!R2ahTg?fRRz z*3NaKXfP7IyN+d$qjxtCt;9;X)l5Z+xTgDwKY&ztZ!E!la`A24=gA|mLn|s=6yM-x zOLFvRlkce&ju-~9Oj5dqkLL7m?8#nuYLS=C)cJ$CxCoN;;&K!8-9);aZbG&CZjr|f z>Er5Pgk8%4Vl+y62~mc9tQ8?$R<#POgcc;@xZ_62|G-Z<#+kR~Z7X&S5E?I61C~88 z*vOaHf6g3tK(S^fA`H_e-c(uoqzaScnzM{l?gBNFp`_rof?fYdm)aR_9io-&^?H#I z=asbw5q<37{o}{XR{$z$GaYOmvYxHcpwo99E5ZQeii*=j7wN;~;`Uzee-#o#%8s=y zxmTt$<_qq|*=32GHa}|KODR1IkQvn&pR*?Lf#`-Txtm)mxQV_3G^t6ol#6G;3V;}^ z4cL@o=%=rPxZvt_c3qNRp8L$#vp4x`XH({3NvQbBwg-X>wgq}siBi)-{tWXDFMV_nXc+xz zp|r(4-%aI|Xfd~@U8W}c#G%x6 zU8M2*h-}07m&EB+otMs;_~$ioZp+5i6OX;}>m5HbAX`lN&_C!f9}^cYwXjMkpFZb% zdLjEUhLb?f!tyh_c6D+Fah zRDzjR$8c{}7dC4-YO<9OpQ}40d#z&J4}RHTi_O*y#b;FxoyM}O;H^Wt1>!I8uPX>!gbMjbExz8PCQ_8onl* zH117Fjl;w>8c@W_W2uIBV)79D^0<-%T@@NCHTord;-MRGD^`!kbmKfqn>z}&J4$dF zH?%VIp?~@eG?7ISpC}Z+;B@G;%YEZMY9|^PJz0f{M0{p7sQ0=1yWykI-)A_i`6)QW z_q5?MLA@%6OK_vT*0k(orA8QALi6^^B@f_1{ld@w1ecwFdx6L%?l&mdv=6H1%DG)INJ3Un z0EL&-{a<@~y-GW0@o2f=X?hiD+BGYKeB@p zmxEv1j%MS(hBd5_3#ar52}w$(q#L(XvO-9p^!0r?qfcQ>hNf%wP@&Kvhi#bHx41(; zNbbCLn`jWY^6u$}5Z9uY$FT`UQs{N4Eq%6d!ioVe)@Z`q$Bdg_{B1LI{?D4kL*>32 z;#EAVrT?s09mYH%#a15LcY)aI>!*g0pxD^hDa+~?cEk5r;E${H20xAp;{zvBaB6A_ z*H(!9s{n3%LP?46gm33K9t^lFTeB+X>KUBrgl6KAA}Y)pU0algukLfLO<$GNrCfVr z_wXv`%RgP|+oC2;TU|K>tIZLLgmPJ2-9F!2B_6+}F!AMHDmpi-X>!gU7tju+sjxpi zC`t$ z;}M*j)lc4>o>|d_T0U_)k0~aS&#YvfyFI-caq^u;7xg5*%W&WKPr`}hucI3W2+q|B zu&^>Z`avzLtkyI89?tZR3lSMd-FQ^f6-?7`crJHEwXR2bIDePNPs6~hT-xZ9dPM`Z zd?RA|cFXpSlq}Et$NsYHonQLP@THW^FyhxF<89xpvzJuh9(B`>j&2>k54F6~7^=C> z%+xw8{fN6|lvw}itu(Gi;*qjPo3E^ynXXZUJt2p8Rz^;|XANo@m2YnSH8UjK(?cwA zYFsx)G^Bzbmrt(`CQb^k0q?Kd6_e#KdyLcaS;)$_hDr`iD18f?W?7_q{0mp7@i@(w zmp-4tj&CBBn1)+(q9NpXd$C#^?fF_yd*GG^l{o<)^;B#^!X2BIn}@}(>m^*Oyn5%T zfaygf#jTeg7+kN|8r7y=7Cr%Q)vu^*mfN8*irkCy*vzih1ST%MY&p-3N#&c>gomgk z`2gU5#{ruWaIS0U$v!<(E5E(Kb+owNkl8yC_M3X+D}Ko4v|2}+K&I8<;TjKH>EQ1r zM|m%57VZM%@-duT%N%=IPhD!LLqcF%fNNdqMAV`?(9qAlrd7_Tz=I?S^o6*GJ7on0 zb)je6(MMQPV4vOvEE+Di@jA~hi~0eex;+ag-!#0#B<*iwX`ZM=l8D;2l1X3tS(BxB z-LlGMl)I+%q)|~8#5h!4RD5w(Rn^ow;6<+Ntq=Nk2B=i_TeBVEWH>8pARty?Sr0l& z;j=d=6q0oB%XGkTqcHu3pvB9}vfr<3YQ}i44C#+%OS)x`c&v^{z`K_~#)LUrjkggH zOU-51a`PB02UL|;1|Dlsv$6_OQBiHdflTY{+4%PbChziRhCbT9$?qxgcn7bSjz%Ye zR$~ZWL-s>QYzEhbXSyGb`E{glNrbS{(mo|P?G39+9!FKko^QICW1=PLrh10drG{W? zW@dg^r^+DCXg5vn0W2H3C*YVeP-G#?pm?p$t~c940r0lRHaOI7wof+Quz&YdXQZ{` zHqTeFF`|g5sJxTDYhxy#C%3VglKCtMia&x0`Qcr;04GU(HkQmPktDeBFR=NEo~z)*l?hYliN-mIxn0E)6Jg&)>`%2P6{G_NQY zSr)OX)qnTT>w1amS9Kd|fm^RUCNVmJ#HmvCVI(wx7V0JV-ax{lGIP2$z%Y)3$UBpY zdAe>|)BgA~$3ULaze#dicb>m;`Si-f3eV_0on~HT3A5T>yKWN&%BwkeGd~`FTNAMI zC0_*IhsXlKoKX+NQ?uV>FjJ^Rfz28zHNxT60JOH0ixbTE`7F$6j|lnZ&VR#*1DWG@j4N6q^x{w3l(?J*xuvF^`0@e;y> zTJ@!~?6>*%F@E^wadT%>hj!b7g5{|o$c=k7l>)he+GM#tZ9RYKLZfCmQdOhEtPFA9 zgE#kiVes$$NnrpPmWk9vA3jQlB!!Xh))fzXh{8bk8O@T~>ZoL`va7B%ITsdY)OGK- zfOL4jBeEx(=L^s6>6VN{1=As5V8+ecpI*yg{FV*aR8S+lqhN|`uxi)c*sGpYzXJ94 z#$(weyCEX;;C6l%@7rp0V)4C4YNf<%7eA1eZ|PrAMC>9=m`>mAfe0PO(?JBP+k6>oK`ve^DBK$W zRgT)J!d>SS$G!b~N~e>K%5+ZwI8$^mJnIyc@zV!S*8t0@VSIXg>?GpN*^E=7plNSv z7hkEWTNu+=*2#tfV7AqNl9oYZ?HT|OmV-&9dPkJ%L-VcM4B3m0RS zj00f1FS+RcL^148-H?LfYUbTLn&x{5beK}9kB1`vNbfN!7_!uRpU;sN@Q-(n_0CXE zNaJ5su0|$pFztbO{SJRHysFDeCI=_PD6w8jU1+wqx$fGPS6YBl2;K0Ur0I(n!@1>} zv3h^Mhv8-ZnFppEemz+PurYFIj7TWeI6eLaz-(rmA3>*lXDU<;ggSlMVi_IKD}4Ed zqzPk{2-dk$`dGi^L5s;c8Oy-GokyayCn+v zc(C?|I-3;_To$_0CD8kGI3i-nWjH?r!&Yg~Ur0GpY9`r3;Cj-wat5fpzeZXV&lNtl z9m)%r*31Te-ed93S-NdBSSjaTYUUFfa>#QgIXY9fgD}iLq;n=k-w<-Bt!5RsG$3q# zufi6QKb4PGxHGi)-?m@*k}KYxuPU-EY?+}F;%E5heWz)uAAc%VGRADgNNAQj;W=A6 zKzo1v|IT>LyA2LWWz*^O$6lu6-~(J3A9U|rFFl<0QK@A`e~~TmIDMF`vDO$~XY%Vu z#J77HmaIE}l!HcdWGl=Jw`ym88DS9^(!cole<_6)c0YG&ONadsRQmZ@li2Q!<~GGz z!h92h63Cv*{YW_TKGCVdz9#7<&+noLe)>39e{Jm%sVu=+#1%QNhFj0OS!8F|;`*W% z6CIF9cQ6ckv3K*Q(CUoe3L@>13sCh;*qf&E zX4%N9y4{^|J)^hBvtI|Xehv3cOP*OwHiV|;9T2PkV&J+JN{$dJsI?byHpt0zH7n+S z&{*wu)ZCxwxA;EwGuO}f$19- zRsBc2+ND07(bA7cX%Y{{sSo^O@CHA5B-R{1?991ntfMJM<7BGzYXl9P6#G-I59UcOMOjcpt$GE-n9 z8nB7v$CRu4pUt|W!Ej0$m%$h-=kb7M5b+X!5Lb#%W9c$|+8Rio-J8NO_(SMBDwtIKD=_)-fTCDe z3NG)4O^XMTQ!!Y7tor01wOke6Nw|$KGz;x$(g3PFsynE$5nr+wWd6P`ULM@!g*QD? zk35ae6t@yuII|5=cITE{6zd_-$c^z*X3(ho(?(jU0SCD)=a4DUAAD%OfQ-Xkd{(V= z=(bAY4LM7Cp6N1)#fdiOs;3(xb~k4)GxQA17df>`cPHz;gB4WZjkwfBA2fRTD|;d= zer&5c41R<6?NZ!`4I4xW4&+Fnok-jh?Wwfn_G_Z)?-!W-WVfRp_5WH;i#-Nsr#3_31jw&gXhdAThVvx=~QGZME-HI#D@r!=<){ z@))9ygVE3i-?UTxy;SjQD$=T}U)b^NqiqW&_nC37e8LW|wwBz9U7c1)g-uCrZO4w) ziiE9b(E@Z2+WpRsAm2I?l(>I+hv6&a66^F=S?@9P-W0^wGZ{~>5sBBZH$}k&+4oOH zB+p+E^wzE2hBv_kHd_0H#HQc{eon@RR-GGu`fEN11**z?dgQI#(;frX#S}KNSSgjK zFKEBD%nchNwlqgD&i<2)o>;DQ$=A${ZEiQMrm&C+LEVE>!)oL8$7HcLyP2vZTNC8|x2l4? zYU6-#0-iPa(#HSka{HqN6;+L#-Fl@SWyuJanuq*71Yu+$7lJR%yp8mc2&x^}0l*pi^oKUYRIYN@!kykP;dcgR+fc{eZ>uqnruKa#ay0;t;5 zV==-SZ|!1F@?D=YFSpq*o@YkZA)&&v{;QvJn3zhOza8HQAy2yeSX!6aTPf7tbCZ4? zmN{Cn*J@sAZGXRsSSaF*TFdY|e6%bKDEsAg0ybgWEQmO3m@Q!=*ptkLWG)@rREkEE zdxb-%!#YyE@|6b%E!<^i5Y3N1AIkO+knN-uB^XIQmH)vl%G7hL+uIyB1g(E|XT~#b z8)t)XWdP7U3v0l<3U}U%gJ=a4;gcKEQ&zRSov$pc8<;|FO+&5{3#~)8Q zk4WEG`ZibZDt$;-FW6n1F}}!o@n{T(i{EIk8m|@s-za@Xxlun`^fgN(we3GgyBoY8 zLO+%fC~QMQk2kw$6G2?A#HrtrO1<5L4p|;#gC%24mt4N&A*-y=8_JEmUU#pO3!%EH zS%-TY28j=K_@8aas@@6U^J-T?^2XRBEN*g_rz`z78`xMXl4KM__3l``*Xmi2vIVDL zJZEwu%qKAc*jnoyE1#|-O=C9f+@8Mj(!3oAtGMwsgaVucs;REk$oA&;_*tvCv1*pe zRmwA#b{s^ilg?uCRiT}2PVj>ZTM}oaghvB;lTe=KRbrqbR|qfoa8704EqNV#sp))g zTvyTr#$GG}-zraOdki^lx^HKH?~j0vqiS|<_CUppdl7hOUW zDP?*uu@n9$_)=exyR~~$3aF}m7q0w40%oS()f7C=2?08L z0npixlqj}lCxOJGtbEGw`z<^MzT|YSCi3vj>jBz#LNm8$=kH;yq7eavcH%0<`%o7k z_7J!J_%DC{0|6SKya8fQzk?cRa4a{c>aQT^-dq~vYZ9Clc>YL+&oLHE2p!EcNr2`, under `components/` directory to define code specific to the new component. Example -can be found [here](https://github.com/opendatahub-io/opendatahub-operator/tree/main/components/datasciencepipelines) -- Define `Path` and `ComponentName` [variables](https://github.com/opendatahub-io/opendatahub-operator/blob/main/components/datasciencepipelines/datasciencepipelines.go#L11) for the new component. - -### Implement common Interface - -- Define struct that includes a shared struct `Component` with common fields. -- Implement [interface](https://github.com/opendatahub-io/opendatahub-operator/blob/main/components/component.go#L15) methods according to your component - - ```go - type ComponentInterface interface { - ReconcileComponent(ctx context.Context, cli client.Client, logger logr.Logger, owner metav1.Object, DSCISpec *dsciv1.DSCInitializationSpec, currentComponentStatus bool) error - Cleanup(cli client.Client, DSCISpec *dsciv1.DSCInitializationSpec) error - GetComponentName() string - GetManagementState() operatorv1.ManagementState - OverrideManifests(platform cluster.Platform) error - UpdatePrometheusConfig(cli client.Client, enable bool, component string) error - ConfigComponentLogger(logger logr.Logger, component string, dscispec *dsciv1.DSCInitializationSpec) logr.Logger - } - ``` - -### Add reconcile and Events - -- Once you set up the new component module, add the component to [Reconcile](https://github.com/opendatahub-io/opendatahub-operator/blob/acaaf31f43e371456363f3fd272aec91ba413482/controllers/datasciencecluster/datasciencecluster_controller.go#L135) - function in order to deploy manifests. -- This will also enable/add status updates of the component in the operator. - -### Reconcile Workflow -![Component Reconcile Workflow.png](Component%20Reconcile%20Workflow.png) - -### Add Unit and e2e tests - -- Components should add `unit` tests for any component specific functions added to the codebase -- Components should update [e2e tests](https://github.com/opendatahub-io/opendatahub-operator/tree/main/tests/e2e) to - capture deployments introduced by the new component - -## Integrated Components - -- [Dashboard](https://github.com/opendatahub-io/opendatahub-operator/tree/main/components/dashboard) -- [Codeflare](https://github.com/opendatahub-io/opendatahub-operator/tree/main/components/codeflare) -- [Ray](https://github.com/opendatahub-io/opendatahub-operator/tree/main/components/ray) -- [Data Science Pipelines](https://github.com/opendatahub-io/opendatahub-operator/tree/main/components/datasciencepipelines) -- [KServe](https://github.com/opendatahub-io/opendatahub-operator/tree/main/components/kserve) -- [ModelMesh Serving](https://github.com/opendatahub-io/opendatahub-operator/tree/main/components/modelmeshserving) -- [Workbenches](https://github.com/opendatahub-io/opendatahub-operator/tree/main/components/workbenches) -- [TrustyAI](https://github.com/opendatahub-io/opendatahub-operator/tree/main/components/trustyai) -- [ModelRegistry](https://github.com/opendatahub-io/opendatahub-operator/tree/main/components/modelregistry) -- [Kueue](https://github.com/opendatahub-io/kueue) diff --git a/components/codeflare/codeflare.go b/components/codeflare/codeflare.go deleted file mode 100644 index 3b4ca637b2a..00000000000 --- a/components/codeflare/codeflare.go +++ /dev/null @@ -1,132 +0,0 @@ -// Package codeflare provides utility functions to config CodeFlare as part of the stack -// which makes managing distributed compute infrastructure in the cloud easy and intuitive for Data Scientists -// +groupName=datasciencecluster.opendatahub.io -package codeflare - -import ( - "context" - "fmt" - "path/filepath" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" -) - -var ( - ComponentName = "codeflare" - CodeflarePath = deploy.DefaultManifestPath + "/" + ComponentName + "/default" - CodeflareOperator = "codeflare-operator" - ParamsPath = deploy.DefaultManifestPath + "/" + ComponentName + "/manager" -) - -// Verifies that CodeFlare implements ComponentInterface. -var _ components.ComponentInterface = (*CodeFlare)(nil) - -// CodeFlare struct holds the configuration for the CodeFlare component. -// +kubebuilder:object:generate=true -type CodeFlare struct { - components.Component `json:""` -} - -func (c *CodeFlare) OverrideManifests(ctx context.Context, _ cluster.Platform) error { - // If devflags are set, update default manifests path - if len(c.DevFlags.Manifests) != 0 { - manifestConfig := c.DevFlags.Manifests[0] - if err := deploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "default" - if manifestConfig.SourcePath != "" { - defaultKustomizePath = manifestConfig.SourcePath - } - CodeflarePath = filepath.Join(deploy.DefaultManifestPath, ComponentName, defaultKustomizePath) - } - - return nil -} - -func (c *CodeFlare) GetComponentName() string { - return ComponentName -} - -func (c *CodeFlare) ReconcileComponent(ctx context.Context, - cli client.Client, - logger logr.Logger, - owner metav1.Object, - dscispec *dsciv1.DSCInitializationSpec, - platform cluster.Platform, - _ bool) error { - l := c.ConfigComponentLogger(logger, ComponentName, dscispec) - var imageParamMap = map[string]string{ - "codeflare-operator-controller-image": "RELATED_IMAGE_ODH_CODEFLARE_OPERATOR_IMAGE", // no need mcad, embedded in cfo - } - - enabled := c.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - - if enabled { - if c.DevFlags != nil { - // Download manifests and update paths - if err := c.OverrideManifests(ctx, platform); err != nil { - return err - } - } - // check if the CodeFlare operator is installed: it should not be installed - // Both ODH and RHOAI should have the same operator name - dependentOperator := CodeflareOperator - - if found, err := cluster.OperatorExists(ctx, cli, dependentOperator); err != nil { - return fmt.Errorf("operator exists throws error %w", err) - } else if found { - return fmt.Errorf("operator %s is found. Please uninstall the operator before enabling %s component", - dependentOperator, ComponentName) - } - - // Update image parameters only when we do not have customized manifests set - if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (c.DevFlags == nil || len(c.DevFlags.Manifests) == 0) { - if err := deploy.ApplyParams(ParamsPath, imageParamMap, map[string]string{"namespace": dscispec.ApplicationsNamespace}); err != nil { - return fmt.Errorf("failed update image from %s : %w", CodeflarePath+"/bases", err) - } - } - } - - // Deploy Codeflare - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, //nolint:revive,nolintlint - CodeflarePath, - dscispec.ApplicationsNamespace, - ComponentName, enabled); err != nil { - return err - } - l.Info("apply manifests done") - - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) - } - } - - // CloudServiceMonitoring handling - if platform == cluster.ManagedRhoai { - // inject prometheus codeflare*.rules in to /opt/manifests/monitoring/prometheus/prometheus-configs.yaml - if err := c.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { - return err - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - filepath.Join(deploy.DefaultManifestPath, "monitoring", "prometheus", "apps"), - dscispec.Monitoring.Namespace, - "prometheus", true); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - - return nil -} diff --git a/components/codeflare/zz_generated.deepcopy.go b/components/codeflare/zz_generated.deepcopy.go deleted file mode 100644 index f761b2dbbd5..00000000000 --- a/components/codeflare/zz_generated.deepcopy.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !ignore_autogenerated - -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package codeflare - -import () - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CodeFlare) DeepCopyInto(out *CodeFlare) { - *out = *in - in.Component.DeepCopyInto(&out.Component) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CodeFlare. -func (in *CodeFlare) DeepCopy() *CodeFlare { - if in == nil { - return nil - } - out := new(CodeFlare) - in.DeepCopyInto(out) - return out -} diff --git a/components/component.go b/components/component.go deleted file mode 100644 index cd6d44c0972..00000000000 --- a/components/component.go +++ /dev/null @@ -1,198 +0,0 @@ -// +groupName=datasciencecluster.opendatahub.io -package components - -import ( - "context" - "os" - "path/filepath" - "strings" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - "gopkg.in/yaml.v2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - ctrlogger "github.com/opendatahub-io/opendatahub-operator/v2/pkg/logger" -) - -// Component struct defines the basis for each OpenDataHub component configuration. -// +kubebuilder:object:generate=true -type Component struct { - // Set to one of the following values: - // - // - "Managed" : the operator is actively managing the component and trying to keep it active. - // It will only upgrade the component if it is safe to do so - // - // - "Removed" : the operator is actively managing the component and will not install it, - // or if it is installed, the operator will try to remove it - // - // +kubebuilder:validation:Enum=Managed;Removed - ManagementState operatorv1.ManagementState `json:"managementState,omitempty"` - // Add any other common fields across components below - - // Add developer fields - // +optional - // +operator-sdk:csv:customresourcedefinitions:type=spec,order=2 - DevFlags *DevFlags `json:"devFlags,omitempty"` -} - -func (c *Component) GetManagementState() operatorv1.ManagementState { - return c.ManagementState -} - -func (c *Component) Cleanup(_ context.Context, _ client.Client, _ metav1.Object, _ *dsciv1.DSCInitializationSpec) error { - // noop - return nil -} - -// DevFlags defines list of fields that can be used by developers to test customizations. This is not recommended -// to be used in production environment. -// +kubebuilder:object:generate=true -type DevFlags struct { - // List of custom manifests for the given component - // +optional - Manifests []ManifestsConfig `json:"manifests,omitempty"` -} - -type ManifestsConfig struct { - // uri is the URI point to a git repo with tag/branch. e.g. https://github.com/org/repo/tarball/ - // +optional - // +kubebuilder:default:="" - // +operator-sdk:csv:customresourcedefinitions:type=spec,order=1 - URI string `json:"uri,omitempty"` - - // contextDir is the relative path to the folder containing manifests in a repository, default value "manifests" - // +optional - // +kubebuilder:default:="manifests" - // +operator-sdk:csv:customresourcedefinitions:type=spec,order=2 - ContextDir string `json:"contextDir,omitempty"` - - // sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, `default`, `odh` etc. - // +optional - // +kubebuilder:default:="" - // +operator-sdk:csv:customresourcedefinitions:type=spec,order=3 - SourcePath string `json:"sourcePath,omitempty"` -} - -type ComponentInterface interface { - ReconcileComponent(ctx context.Context, cli client.Client, logger logr.Logger, - owner metav1.Object, DSCISpec *dsciv1.DSCInitializationSpec, platform cluster.Platform, currentComponentStatus bool) error - Cleanup(ctx context.Context, cli client.Client, owner metav1.Object, DSCISpec *dsciv1.DSCInitializationSpec) error - GetComponentName() string - GetManagementState() operatorv1.ManagementState - OverrideManifests(ctx context.Context, platform cluster.Platform) error - UpdatePrometheusConfig(cli client.Client, logger logr.Logger, enable bool, component string) error - ConfigComponentLogger(logger logr.Logger, component string, dscispec *dsciv1.DSCInitializationSpec) logr.Logger -} - -// extend origal ConfigLoggers to include component name. -func (c *Component) ConfigComponentLogger(logger logr.Logger, component string, dscispec *dsciv1.DSCInitializationSpec) logr.Logger { - if dscispec.DevFlags != nil { - return ctrlogger.ConfigLoggers(dscispec.DevFlags.LogMode).WithName("DSC.Components." + component) - } - return logger.WithName("DSC.Components." + component) -} - -// UpdatePrometheusConfig update prometheus-configs.yaml to include/exclude .rules -// parameter enable when set to true to add new rules, when set to false to remove existing rules. -func (c *Component) UpdatePrometheusConfig(_ client.Client, logger logr.Logger, enable bool, component string) error { - prometheusconfigPath := filepath.Join("/opt/manifests", "monitoring", "prometheus", "apps", "prometheus-configs.yaml") - - // create a struct to mock poremtheus.yml - type ConfigMap struct { - APIVersion string `yaml:"apiVersion"` - Kind string `yaml:"kind"` - Metadata struct { - Name string `yaml:"name"` - Namespace string `yaml:"namespace"` - } `yaml:"metadata"` - Data struct { - PrometheusYML string `yaml:"prometheus.yml"` - OperatorRules string `yaml:"operator-recording.rules"` - DeadManSnitchRules string `yaml:"deadmanssnitch-alerting.rules"` - DashboardRRules string `yaml:"rhods-dashboard-recording.rules"` - DashboardARules string `yaml:"rhods-dashboard-alerting.rules"` - DSPRRules string `yaml:"data-science-pipelines-operator-recording.rules"` - DSPARules string `yaml:"data-science-pipelines-operator-alerting.rules"` - MMRRules string `yaml:"model-mesh-recording.rules"` - MMARules string `yaml:"model-mesh-alerting.rules"` - OdhModelRRules string `yaml:"odh-model-controller-recording.rules"` - OdhModelARules string `yaml:"odh-model-controller-alerting.rules"` - CFORRules string `yaml:"codeflare-recording.rules"` - CFOARules string `yaml:"codeflare-alerting.rules"` - RayARules string `yaml:"ray-alerting.rules"` - KueueARules string `yaml:"kueue-alerting.rules"` - TrainingOperatorARules string `yaml:"trainingoperator-alerting.rules"` - WorkbenchesRRules string `yaml:"workbenches-recording.rules"` - WorkbenchesARules string `yaml:"workbenches-alerting.rules"` - TrustyAIRRules string `yaml:"trustyai-recording.rules"` - TrustyAIARules string `yaml:"trustyai-alerting.rules"` - KserveRRules string `yaml:"kserve-recording.rules"` - KserveARules string `yaml:"kserve-alerting.rules"` - ModelRegistryRRules string `yaml:"model-registry-operator-recording.rules"` - ModelRegistryARules string `yaml:"model-registry-operator-alerting.rules"` - } `yaml:"data"` - } - var configMap ConfigMap - // prometheusContent will represent content of prometheus.yml due to its dynamic struct - var prometheusContent map[interface{}]interface{} - - // read prometheus.yml from local disk /opt/mainfests/monitoring/prometheus/apps/ - yamlData, err := os.ReadFile(prometheusconfigPath) - if err != nil { - return err - } - if err := yaml.Unmarshal(yamlData, &configMap); err != nil { - return err - } - - // get prometheus.yml part from configmap - if err := yaml.Unmarshal([]byte(configMap.Data.PrometheusYML), &prometheusContent); err != nil { - return err - } - - // to add component rules when it is not there yet - if enable { - // Check if the rule not yet exists in rule_files - if !strings.Contains(configMap.Data.PrometheusYML, component+"*.rules") { - // check if have rule_files - if ruleFiles, ok := prometheusContent["rule_files"]; ok { - if ruleList, isList := ruleFiles.([]interface{}); isList { - // add new component rules back to rule_files - ruleList = append(ruleList, component+"*.rules") - prometheusContent["rule_files"] = ruleList - } - } - } - } else { // to remove component rules if it is there - logger.Info("Removing prometheus rule: " + component + "*.rules") - if ruleList, ok := prometheusContent["rule_files"].([]interface{}); ok { - for i, item := range ruleList { - if rule, isStr := item.(string); isStr && rule == component+"*.rules" { - ruleList = append(ruleList[:i], ruleList[i+1:]...) - break - } - } - prometheusContent["rule_files"] = ruleList - } - } - - // Marshal back - newDataYAML, err := yaml.Marshal(&prometheusContent) - if err != nil { - return err - } - configMap.Data.PrometheusYML = string(newDataYAML) - - newyamlData, err := yaml.Marshal(&configMap) - if err != nil { - return err - } - - // Write the modified content back to the file - err = os.WriteFile(prometheusconfigPath, newyamlData, 0) - return err -} diff --git a/components/dashboard/dashboard.go b/components/dashboard/dashboard.go deleted file mode 100644 index 429ae26924e..00000000000 --- a/components/dashboard/dashboard.go +++ /dev/null @@ -1,239 +0,0 @@ -// Package dashboard provides utility functions to config Open Data Hub Dashboard: A web dashboard that displays -// installed Open Data Hub components with easy access to component UIs and documentation -// +groupName=datasciencecluster.opendatahub.io -package dashboard - -import ( - "context" - "errors" - "fmt" - "path/filepath" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - corev1 "k8s.io/api/core/v1" - k8serr "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" -) - -var ( - ComponentNameUpstream = "dashboard" - PathUpstream = deploy.DefaultManifestPath + "/" + ComponentNameUpstream + "/odh" - - ComponentNameDownstream = "rhods-dashboard" - PathDownstream = deploy.DefaultManifestPath + "/" + ComponentNameUpstream + "/rhoai" - PathSelfDownstream = PathDownstream + "/onprem" - PathManagedDownstream = PathDownstream + "/addon" - OverridePath = "" -) - -// Verifies that Dashboard implements ComponentInterface. -var _ components.ComponentInterface = (*Dashboard)(nil) - -// Dashboard struct holds the configuration for the Dashboard component. -// +kubebuilder:object:generate=true -type Dashboard struct { - components.Component `json:""` -} - -func (d *Dashboard) OverrideManifests(ctx context.Context, platform cluster.Platform) error { - // If devflags are set, update default manifests path - if len(d.DevFlags.Manifests) != 0 { - manifestConfig := d.DevFlags.Manifests[0] - if err := deploy.DownloadManifests(ctx, ComponentNameUpstream, manifestConfig); err != nil { - return err - } - if manifestConfig.SourcePath != "" { - OverridePath = filepath.Join(deploy.DefaultManifestPath, ComponentNameUpstream, manifestConfig.SourcePath) - } - } - return nil -} - -func (d *Dashboard) GetComponentName() string { - return ComponentNameUpstream -} - -func (d *Dashboard) ReconcileComponent(ctx context.Context, - cli client.Client, - logger logr.Logger, - owner metav1.Object, - dscispec *dsciv1.DSCInitializationSpec, - platform cluster.Platform, - currentComponentExist bool, -) error { - var l logr.Logger - - if platform == cluster.SelfManagedRhoai || platform == cluster.ManagedRhoai { - l = d.ConfigComponentLogger(logger, ComponentNameDownstream, dscispec) - } else { - l = d.ConfigComponentLogger(logger, ComponentNameUpstream, dscispec) - } - - entryPath := map[cluster.Platform]string{ - cluster.SelfManagedRhoai: PathDownstream + "/onprem", - cluster.ManagedRhoai: PathDownstream + "/addon", - cluster.OpenDataHub: PathUpstream, - cluster.Unknown: PathUpstream, - }[platform] - - enabled := d.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - imageParamMap := make(map[string]string) - - if enabled { - // 1. cleanup OAuth client related secret and CR if dashboard is in 'installed false' status - if err := d.cleanOauthClient(ctx, cli, dscispec, currentComponentExist, l); err != nil { - return err - } - if d.DevFlags != nil && len(d.DevFlags.Manifests) != 0 { - // Download manifests and update paths - if err := d.OverrideManifests(ctx, platform); err != nil { - return err - } - if OverridePath != "" { - entryPath = OverridePath - } - } else { // Update image parameters if devFlags is not provided - imageParamMap["odh-dashboard-image"] = "RELATED_IMAGE_ODH_DASHBOARD_IMAGE" - } - - // 2. platform specific RBAC - if platform == cluster.OpenDataHub || platform == "" { - if err := cluster.UpdatePodSecurityRolebinding(ctx, cli, dscispec.ApplicationsNamespace, "odh-dashboard"); err != nil { - return err - } - } else { - if err := cluster.UpdatePodSecurityRolebinding(ctx, cli, dscispec.ApplicationsNamespace, "rhods-dashboard"); err != nil { - return err - } - } - - // 3. Append or Update variable for component to consume - extraParamsMap, err := updateKustomizeVariable(ctx, cli, platform, dscispec) - if err != nil { - return errors.New("failed to set variable for extraParamsMap") - } - - // 4. update params.env regardless devFlags is provided of not - if err := deploy.ApplyParams(entryPath, imageParamMap, extraParamsMap); err != nil { - return fmt.Errorf("failed to update params.env from %s : %w", entryPath, err) - } - } - - // common: Deploy odh-dashboard manifests - // TODO: check if we can have the same component name odh-dashboard for both, or still keep rhods-dashboard for RHOAI - switch platform { - case cluster.SelfManagedRhoai, cluster.ManagedRhoai: - // anaconda - if err := cluster.CreateSecret(ctx, cli, "anaconda-ce-access", dscispec.ApplicationsNamespace); err != nil { - return fmt.Errorf("failed to create access-secret for anaconda: %w", err) - } - // Deploy RHOAI manifests - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, entryPath, dscispec.ApplicationsNamespace, ComponentNameDownstream, enabled); err != nil { - return fmt.Errorf("failed to apply manifests from %s: %w", PathDownstream, err) - } - l.Info("apply manifests done") - - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentNameDownstream, dscispec.ApplicationsNamespace, 20, 3); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentNameDownstream, err) - } - } - - // CloudService Monitoring handling - if platform == cluster.ManagedRhoai { - if err := d.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentNameDownstream); err != nil { - return err - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - filepath.Join(deploy.DefaultManifestPath, "monitoring", "prometheus", "apps"), - dscispec.Monitoring.Namespace, - "prometheus", true); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - return nil - - default: - // Deploy ODH manifests - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, entryPath, dscispec.ApplicationsNamespace, ComponentNameUpstream, enabled); err != nil { - return err - } - l.Info("apply manifests done") - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentNameUpstream, dscispec.ApplicationsNamespace, 20, 3); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentNameUpstream, err) - } - } - - return nil - } -} - -func updateKustomizeVariable(ctx context.Context, cli client.Client, platform cluster.Platform, dscispec *dsciv1.DSCInitializationSpec) (map[string]string, error) { - adminGroups := map[cluster.Platform]string{ - cluster.SelfManagedRhoai: "rhods-admins", - cluster.ManagedRhoai: "dedicated-admins", - cluster.OpenDataHub: "odh-admins", - cluster.Unknown: "odh-admins", - }[platform] - - sectionTitle := map[cluster.Platform]string{ - cluster.SelfManagedRhoai: "OpenShift Self Managed Services", - cluster.ManagedRhoai: "OpenShift Managed Services", - cluster.OpenDataHub: "OpenShift Open Data Hub", - cluster.Unknown: "OpenShift Open Data Hub", - }[platform] - - consoleLinkDomain, err := cluster.GetDomain(ctx, cli) - if err != nil { - return nil, fmt.Errorf("error getting console route URL %s : %w", consoleLinkDomain, err) - } - consoleURL := map[cluster.Platform]string{ - cluster.SelfManagedRhoai: "https://rhods-dashboard-" + dscispec.ApplicationsNamespace + "." + consoleLinkDomain, - cluster.ManagedRhoai: "https://rhods-dashboard-" + dscispec.ApplicationsNamespace + "." + consoleLinkDomain, - cluster.OpenDataHub: "https://odh-dashboard-" + dscispec.ApplicationsNamespace + "." + consoleLinkDomain, - cluster.Unknown: "https://odh-dashboard-" + dscispec.ApplicationsNamespace + "." + consoleLinkDomain, - }[platform] - - return map[string]string{ - "admin_groups": adminGroups, - "dashboard-url": consoleURL, - "section-title": sectionTitle, - }, nil -} - -func (d *Dashboard) cleanOauthClient(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec, currentComponentExist bool, l logr.Logger) error { - // Remove previous oauth-client secrets - // Check if component is going from state of `Not Installed --> Installed` - // Assumption: Component is currently set to enabled - name := "dashboard-oauth-client" - if !currentComponentExist { - l.Info("Cleanup any left secret") - // Delete client secrets from previous installation - oauthClientSecret := &corev1.Secret{} - err := cli.Get(ctx, client.ObjectKey{ - Namespace: dscispec.ApplicationsNamespace, - Name: name, - }, oauthClientSecret) - if err != nil { - if !k8serr.IsNotFound(err) { - return fmt.Errorf("error getting secret %s: %w", name, err) - } - } else { - if err := cli.Delete(ctx, oauthClientSecret); err != nil { - return fmt.Errorf("error deleting secret %s: %w", name, err) - } - l.Info("successfully deleted secret", "secret", name) - } - } - return nil -} diff --git a/components/datasciencepipelines/datasciencepipelines.go b/components/datasciencepipelines/datasciencepipelines.go deleted file mode 100644 index e3bb2286203..00000000000 --- a/components/datasciencepipelines/datasciencepipelines.go +++ /dev/null @@ -1,165 +0,0 @@ -// Package datasciencepipelines provides utility functions to config Data Science Pipelines: -// Pipeline solution for end to end MLOps workflows that support the Kubeflow Pipelines SDK and Argo Workflows. -// +groupName=datasciencecluster.opendatahub.io -package datasciencepipelines - -import ( - "context" - "fmt" - "path/filepath" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - k8serr "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" -) - -var ( - ComponentName = "data-science-pipelines-operator" - Path = deploy.DefaultManifestPath + "/" + ComponentName + "/base" - OverlayPath = deploy.DefaultManifestPath + "/" + ComponentName + "/overlays" - ArgoWorkflowCRD = "workflows.argoproj.io" -) - -// Verifies that Dashboard implements ComponentInterface. -var _ components.ComponentInterface = (*DataSciencePipelines)(nil) - -// DataSciencePipelines struct holds the configuration for the DataSciencePipelines component. -// +kubebuilder:object:generate=true -type DataSciencePipelines struct { - components.Component `json:""` -} - -func (d *DataSciencePipelines) OverrideManifests(ctx context.Context, _ cluster.Platform) error { - // If devflags are set, update default manifests path - if len(d.DevFlags.Manifests) != 0 { - manifestConfig := d.DevFlags.Manifests[0] - if err := deploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "base" - if manifestConfig.SourcePath != "" { - defaultKustomizePath = manifestConfig.SourcePath - } - Path = filepath.Join(deploy.DefaultManifestPath, ComponentName, defaultKustomizePath) - } - - return nil -} - -func (d *DataSciencePipelines) GetComponentName() string { - return ComponentName -} - -func (d *DataSciencePipelines) ReconcileComponent(ctx context.Context, - cli client.Client, - logger logr.Logger, - owner metav1.Object, - dscispec *dsciv1.DSCInitializationSpec, - platform cluster.Platform, - _ bool, -) error { - l := d.ConfigComponentLogger(logger, ComponentName, dscispec) - var imageParamMap = map[string]string{ - "IMAGES_DSPO": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_OPERATOR_CONTROLLER_IMAGE", - "IMAGES_APISERVER": "RELATED_IMAGE_ODH_ML_PIPELINES_API_SERVER_V2_IMAGE", - "IMAGES_PERSISTENCEAGENT": "RELATED_IMAGE_ODH_ML_PIPELINES_PERSISTENCEAGENT_V2_IMAGE", - "IMAGES_SCHEDULEDWORKFLOW": "RELATED_IMAGE_ODH_ML_PIPELINES_SCHEDULEDWORKFLOW_V2_IMAGE", - "IMAGES_ARGO_EXEC": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_ARGO_ARGOEXEC_IMAGE", - "IMAGES_ARGO_WORKFLOWCONTROLLER": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_ARGO_WORKFLOWCONTROLLER_IMAGE", - "IMAGES_DRIVER": "RELATED_IMAGE_ODH_ML_PIPELINES_DRIVER_IMAGE", - "IMAGES_LAUNCHER": "RELATED_IMAGE_ODH_ML_PIPELINES_LAUNCHER_IMAGE", - "IMAGES_MLMDGRPC": "RELATED_IMAGE_ODH_MLMD_GRPC_SERVER_IMAGE", - } - - enabled := d.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - - if enabled { - if d.DevFlags != nil { - // Download manifests and update paths - if err := d.OverrideManifests(ctx, platform); err != nil { - return err - } - } - // skip check if the dependent operator has beeninstalled, this is done in dashboard - // Update image parameters only when we do not have customized manifests set - if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (d.DevFlags == nil || len(d.DevFlags.Manifests) == 0) { - if err := deploy.ApplyParams(Path, imageParamMap); err != nil { - return fmt.Errorf("failed to update image from %s : %w", Path, err) - } - } - // Check for existing Argo Workflows - if err := UnmanagedArgoWorkFlowExists(ctx, cli); err != nil { - return err - } - } - - // new overlay - manifestsPath := filepath.Join(OverlayPath, "rhoai") - if platform == cluster.OpenDataHub || platform == "" { - manifestsPath = filepath.Join(OverlayPath, "odh") - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, manifestsPath, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil { - return err - } - l.Info("apply manifests done") - - // Wait for deployment available - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) - } - } - - // CloudService Monitoring handling - if platform == cluster.ManagedRhoai { - if err := d.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { - return err - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - filepath.Join(deploy.DefaultManifestPath, "monitoring", "prometheus", "apps"), - dscispec.Monitoring.Namespace, - "prometheus", true); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - - return nil -} - -func UnmanagedArgoWorkFlowExists(ctx context.Context, - cli client.Client) error { - workflowCRD := &apiextensionsv1.CustomResourceDefinition{} - if err := cli.Get(ctx, client.ObjectKey{Name: ArgoWorkflowCRD}, workflowCRD); err != nil { - if k8serr.IsNotFound(err) { - return nil - } - return fmt.Errorf("failed to get existing Workflow CRD : %w", err) - } - // Verify if existing workflow is deployed by ODH with label - odhLabelValue, odhLabelExists := workflowCRD.Labels[labels.ODH.Component(ComponentName)] - if odhLabelExists && odhLabelValue == "true" { - return nil - } - return fmt.Errorf("%s CRD already exists but not deployed by this operator. "+ - "Remove existing Argo workflows or set `spec.components.datasciencepipelines.managementState` to Removed to proceed ", ArgoWorkflowCRD) -} - -func SetExistingArgoCondition(conditions *[]conditionsv1.Condition, reason, message string) { - status.SetCondition(conditions, string(status.CapabilityDSPv2Argo), reason, message, corev1.ConditionFalse) - status.SetComponentCondition(conditions, ComponentName, status.ReconcileFailed, message, corev1.ConditionFalse) -} diff --git a/components/datasciencepipelines/zz_generated.deepcopy.go b/components/datasciencepipelines/zz_generated.deepcopy.go deleted file mode 100644 index 11c4e758555..00000000000 --- a/components/datasciencepipelines/zz_generated.deepcopy.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !ignore_autogenerated - -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package datasciencepipelines - -import () - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DataSciencePipelines) DeepCopyInto(out *DataSciencePipelines) { - *out = *in - in.Component.DeepCopyInto(&out.Component) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataSciencePipelines. -func (in *DataSciencePipelines) DeepCopy() *DataSciencePipelines { - if in == nil { - return nil - } - out := new(DataSciencePipelines) - in.DeepCopyInto(out) - return out -} diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go deleted file mode 100644 index 2668810e7ff..00000000000 --- a/components/kserve/kserve.go +++ /dev/null @@ -1,185 +0,0 @@ -// Package kserve provides utility functions to config Kserve as the Controller for serving ML models on arbitrary frameworks -// +groupName=datasciencecluster.opendatahub.io -package kserve - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" -) - -var ( - ComponentName = "kserve" - Path = deploy.DefaultManifestPath + "/" + ComponentName + "/overlays/odh" - DependentComponentName = "odh-model-controller" - DependentPath = deploy.DefaultManifestPath + "/" + DependentComponentName + "/base" - ServiceMeshOperator = "servicemeshoperator" - ServerlessOperator = "serverless-operator" -) - -// Verifies that Kserve implements ComponentInterface. -var _ components.ComponentInterface = (*Kserve)(nil) - -// +kubebuilder:validation:Pattern=`^(Serverless|RawDeployment)$` -type DefaultDeploymentMode string - -var ( - // Serverless will be used as the default deployment mode for Kserve. This requires Serverless and ServiceMesh operators configured as dependencies. - Serverless DefaultDeploymentMode = "Serverless" - // RawDeployment will be used as the default deployment mode for Kserve. - RawDeployment DefaultDeploymentMode = "RawDeployment" -) - -// Kserve struct holds the configuration for the Kserve component. -// +kubebuilder:object:generate=true -type Kserve struct { - components.Component `json:""` - // Serving configures the KNative-Serving stack used for model serving. A Service - // Mesh (Istio) is prerequisite, since it is used as networking layer. - Serving infrav1.ServingSpec `json:"serving,omitempty"` - // Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'. - // The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve. - // This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. - // +kubebuilder:validation:Enum=Serverless;RawDeployment - DefaultDeploymentMode DefaultDeploymentMode `json:"defaultDeploymentMode,omitempty"` -} - -func (k *Kserve) OverrideManifests(ctx context.Context, _ cluster.Platform) error { - // Download manifests if defined by devflags - // Go through each manifest and set the overlays if defined - for _, subcomponent := range k.DevFlags.Manifests { - if strings.Contains(subcomponent.URI, DependentComponentName) { - // Download subcomponent - if err := deploy.DownloadManifests(ctx, DependentComponentName, subcomponent); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "base" - if subcomponent.SourcePath != "" { - defaultKustomizePath = subcomponent.SourcePath - } - DependentPath = filepath.Join(deploy.DefaultManifestPath, DependentComponentName, defaultKustomizePath) - } - - if strings.Contains(subcomponent.URI, ComponentName) { - // Download subcomponent - if err := deploy.DownloadManifests(ctx, ComponentName, subcomponent); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "overlays/odh" - if subcomponent.SourcePath != "" { - defaultKustomizePath = subcomponent.SourcePath - } - Path = filepath.Join(deploy.DefaultManifestPath, ComponentName, defaultKustomizePath) - } - } - return nil -} - -func (k *Kserve) GetComponentName() string { - return ComponentName -} - -func (k *Kserve) ReconcileComponent(ctx context.Context, cli client.Client, - logger logr.Logger, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { - l := k.ConfigComponentLogger(logger, ComponentName, dscispec) - - // dependentParamMap for odh-model-controller to use. - var dependentParamMap = map[string]string{ - "odh-model-controller": "RELATED_IMAGE_ODH_MODEL_CONTROLLER_IMAGE", - } - - enabled := k.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - - if !enabled { - if err := k.removeServerlessFeatures(ctx, cli, owner, dscispec); err != nil { - return err - } - } else { - // Configure dependencies - if err := k.configureServerless(ctx, cli, l, owner, dscispec); err != nil { - return err - } - if k.DevFlags != nil { - // Download manifests and update paths - if err := k.OverrideManifests(ctx, platform); err != nil { - return err - } - } - } - - if err := k.configureServiceMesh(ctx, cli, owner, dscispec); err != nil { - return fmt.Errorf("failed configuring service mesh while reconciling kserve component. cause: %w", err) - } - - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, Path, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil { - return fmt.Errorf("failed to apply manifests from %s : %w", Path, err) - } - - l.WithValues("Path", Path).Info("apply manifests done for kserve") - - if enabled { - if err := k.setupKserveConfig(ctx, cli, l, dscispec); err != nil { - return err - } - - // For odh-model-controller - if err := cluster.UpdatePodSecurityRolebinding(ctx, cli, dscispec.ApplicationsNamespace, "odh-model-controller"); err != nil { - return err - } - // Update image parameters for odh-model-controller - if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (k.DevFlags == nil || len(k.DevFlags.Manifests) == 0) { - if err := deploy.ApplyParams(DependentPath, dependentParamMap); err != nil { - return fmt.Errorf("failed to update image %s: %w", DependentPath, err) - } - } - } - - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, DependentPath, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil { - if !strings.Contains(err.Error(), "spec.selector") || !strings.Contains(err.Error(), "field is immutable") { - // explicitly ignore error if error contains keywords "spec.selector" and "field is immutable" and return all other error. - return err - } - } - l.WithValues("Path", Path).Info("apply manifests done for odh-model-controller") - - // Wait for deployment available - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 3); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) - } - } - - // CloudService Monitoring handling - if platform == cluster.ManagedRhoai { - // kesrve rules - if err := k.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - - return nil -} - -func (k *Kserve) Cleanup(ctx context.Context, cli client.Client, owner metav1.Object, instance *dsciv1.DSCInitializationSpec) error { - if removeServerlessErr := k.removeServerlessFeatures(ctx, cli, owner, instance); removeServerlessErr != nil { - return removeServerlessErr - } - - return k.removeServiceMeshConfigurations(ctx, cli, owner, instance) -} diff --git a/components/kserve/kserve_config_handler.go b/components/kserve/kserve_config_handler.go deleted file mode 100644 index a93f4cb675b..00000000000 --- a/components/kserve/kserve_config_handler.go +++ /dev/null @@ -1,184 +0,0 @@ -package kserve - -import ( - "context" - "encoding/json" - "errors" - "fmt" - - "github.com/go-logr/logr" - "github.com/hashicorp/go-multierror" - operatorv1 "github.com/openshift/api/operator/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" -) - -const ( - KserveConfigMapName string = "inferenceservice-config" -) - -func (k *Kserve) setupKserveConfig(ctx context.Context, cli client.Client, logger logr.Logger, dscispec *dsciv1.DSCInitializationSpec) error { - // as long as Kserve.Serving is not 'Removed', we will setup the dependencies - - switch k.Serving.ManagementState { - case operatorv1.Managed, operatorv1.Unmanaged: - if k.DefaultDeploymentMode == "" { - // if the default mode is empty in the DSC, assume mode is "Serverless" since k.Serving is Managed - if err := k.setDefaultDeploymentMode(ctx, cli, dscispec, Serverless); err != nil { - return err - } - } else { - // if the default mode is explicitly specified, respect that - if err := k.setDefaultDeploymentMode(ctx, cli, dscispec, k.DefaultDeploymentMode); err != nil { - return err - } - } - case operatorv1.Removed: - if k.DefaultDeploymentMode == Serverless { - return errors.New("setting defaultdeployment mode as Serverless is incompatible with having Serving 'Removed'") - } - if k.DefaultDeploymentMode == "" { - logger.Info("Serving is removed, Kserve will default to rawdeployment") - } - if err := k.setDefaultDeploymentMode(ctx, cli, dscispec, RawDeployment); err != nil { - return err - } - } - return nil -} - -func (k *Kserve) setDefaultDeploymentMode(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec, defaultmode DefaultDeploymentMode) error { - inferenceServiceConfigMap := &corev1.ConfigMap{} - err := cli.Get(ctx, client.ObjectKey{ - Namespace: dscispec.ApplicationsNamespace, - Name: KserveConfigMapName, - }, inferenceServiceConfigMap) - if err != nil { - return fmt.Errorf("error getting configmap %v: %w", KserveConfigMapName, err) - } - - // set data.deploy.defaultDeploymentMode to the model specified in the Kserve spec - var deployData map[string]interface{} - if err = json.Unmarshal([]byte(inferenceServiceConfigMap.Data["deploy"]), &deployData); err != nil { - return fmt.Errorf("error retrieving value for key 'deploy' from configmap %s. %w", KserveConfigMapName, err) - } - modeFound := deployData["defaultDeploymentMode"] - if modeFound != string(defaultmode) { - deployData["defaultDeploymentMode"] = defaultmode - deployDataBytes, err := json.MarshalIndent(deployData, "", " ") - if err != nil { - return fmt.Errorf("could not set values in configmap %s. %w", KserveConfigMapName, err) - } - inferenceServiceConfigMap.Data["deploy"] = string(deployDataBytes) - - var ingressData map[string]interface{} - if err = json.Unmarshal([]byte(inferenceServiceConfigMap.Data["ingress"]), &ingressData); err != nil { - return fmt.Errorf("error retrieving value for key 'ingress' from configmap %s. %w", KserveConfigMapName, err) - } - if defaultmode == RawDeployment { - ingressData["disableIngressCreation"] = true - } else { - ingressData["disableIngressCreation"] = false - } - ingressDataBytes, err := json.MarshalIndent(ingressData, "", " ") - if err != nil { - return fmt.Errorf("could not set values in configmap %s. %w", KserveConfigMapName, err) - } - inferenceServiceConfigMap.Data["ingress"] = string(ingressDataBytes) - - if err = cli.Update(ctx, inferenceServiceConfigMap); err != nil { - return fmt.Errorf("could not set default deployment mode for Kserve. %w", err) - } - - // Restart the pod if configmap is updated so that kserve boots with the correct value - podList := &corev1.PodList{} - listOpts := []client.ListOption{ - client.InNamespace(dscispec.ApplicationsNamespace), - client.MatchingLabels{ - labels.ODH.Component(ComponentName): "true", - "control-plane": "kserve-controller-manager", - }, - } - if err := cli.List(ctx, podList, listOpts...); err != nil { - return fmt.Errorf("failed to list pods: %w", err) - } - for _, pod := range podList.Items { - if err := cli.Delete(ctx, &pod); err != nil { - return fmt.Errorf("failed to delete pod %s: %w", pod.Name, err) - } - } - } - - return nil -} - -func (k *Kserve) configureServerless(ctx context.Context, cli client.Client, logger logr.Logger, owner metav1.Object, instance *dsciv1.DSCInitializationSpec) error { - switch k.Serving.ManagementState { - case operatorv1.Unmanaged: // Bring your own CR - logger.Info("Serverless CR is not configured by the operator, we won't do anything") - - case operatorv1.Removed: // we remove serving CR - logger.Info("existing Serverless CR (owned by operator) will be removed") - if err := k.removeServerlessFeatures(ctx, cli, owner, instance); err != nil { - return err - } - - case operatorv1.Managed: // standard workflow to create CR - if instance.ServiceMesh == nil { - return errors.New("ServiceMesh needs to be configured and 'Managed' in DSCI CR, " + - "it is required by KServe serving") - } - - switch instance.ServiceMesh.ManagementState { - case operatorv1.Unmanaged, operatorv1.Removed: - return fmt.Errorf("ServiceMesh is currently set to '%s'. It needs to be set to 'Managed' in DSCI CR, "+ - "as it is required by the KServe serving field", instance.ServiceMesh.ManagementState) - } - - // check on dependent operators if all installed in cluster - dependOpsErrors := checkDependentOperators(ctx, cli).ErrorOrNil() - if dependOpsErrors != nil { - return dependOpsErrors - } - - serverlessFeatures := feature.ComponentFeaturesHandler(owner, k.GetComponentName(), instance.ApplicationsNamespace, k.configureServerlessFeatures(instance)) - - if err := serverlessFeatures.Apply(ctx, cli); err != nil { - return err - } - } - return nil -} - -func (k *Kserve) removeServerlessFeatures(ctx context.Context, cli client.Client, owner metav1.Object, instance *dsciv1.DSCInitializationSpec) error { - serverlessFeatures := feature.ComponentFeaturesHandler(owner, k.GetComponentName(), instance.ApplicationsNamespace, k.configureServerlessFeatures(instance)) - - return serverlessFeatures.Delete(ctx, cli) -} - -func checkDependentOperators(ctx context.Context, cli client.Client) *multierror.Error { - var multiErr *multierror.Error - - if found, err := cluster.OperatorExists(ctx, cli, ServiceMeshOperator); err != nil { - multiErr = multierror.Append(multiErr, err) - } else if !found { - err = fmt.Errorf("operator %s not found. Please install the operator before enabling %s component", - ServiceMeshOperator, ComponentName) - multiErr = multierror.Append(multiErr, err) - } - - if found, err := cluster.OperatorExists(ctx, cli, ServerlessOperator); err != nil { - multiErr = multierror.Append(multiErr, err) - } else if !found { - err = fmt.Errorf("operator %s not found. Please install the operator before enabling %s component", - ServerlessOperator, ComponentName) - multiErr = multierror.Append(multiErr, err) - } - return multiErr -} diff --git a/components/kserve/resources/servicemesh/z-migrations/kserve-predictor-authorizationpolicy.patch.tmpl.yaml b/components/kserve/resources/servicemesh/z-migrations/kserve-predictor-authorizationpolicy.patch.tmpl.yaml deleted file mode 100644 index 0d141dd8d17..00000000000 --- a/components/kserve/resources/servicemesh/z-migrations/kserve-predictor-authorizationpolicy.patch.tmpl.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: security.istio.io/v1beta1 -kind: AuthorizationPolicy -metadata: - name: kserve-predictor - namespace: {{ .ControlPlane.Namespace }} -spec: - provider: - name: {{ .AuthExtensionName }} diff --git a/components/kserve/serverless_setup.go b/components/kserve/serverless_setup.go deleted file mode 100644 index ee3766ebe23..00000000000 --- a/components/kserve/serverless_setup.go +++ /dev/null @@ -1,72 +0,0 @@ -package kserve - -import ( - "path" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/manifest" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" -) - -func (k *Kserve) configureServerlessFeatures(dsciSpec *dsciv1.DSCInitializationSpec) feature.FeaturesProvider { - return func(registry feature.FeaturesRegistry) error { - servingDeployment := feature.Define("serverless-serving-deployment"). - Manifests( - manifest.Location(Resources.Location). - Include( - path.Join(Resources.InstallDir), - ), - ). - WithData( - serverless.FeatureData.IngressDomain.Define(&k.Serving).AsAction(), - serverless.FeatureData.Serving.Define(&k.Serving).AsAction(), - servicemesh.FeatureData.ControlPlane.Define(dsciSpec).AsAction(), - ). - PreConditions( - serverless.EnsureServerlessOperatorInstalled, - serverless.EnsureServerlessAbsent, - servicemesh.EnsureServiceMeshInstalled, - feature.CreateNamespaceIfNotExists(serverless.KnativeServingNamespace), - ). - PostConditions( - feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), - ) - - istioSecretFiltering := feature.Define("serverless-net-istio-secret-filtering"). - Manifests( - manifest.Location(Resources.Location). - Include( - path.Join(Resources.BaseDir, "serving-net-istio-secret-filtering.patch.tmpl.yaml"), - ), - ). - WithData(serverless.FeatureData.Serving.Define(&k.Serving).AsAction()). - PreConditions(serverless.EnsureServerlessServingDeployed). - PostConditions( - feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), - ) - - servingGateway := feature.Define("serverless-serving-gateways"). - Manifests( - manifest.Location(Resources.Location). - Include( - path.Join(Resources.GatewaysDir), - ), - ). - WithData( - serverless.FeatureData.IngressDomain.Define(&k.Serving).AsAction(), - serverless.FeatureData.CertificateName.Define(&k.Serving).AsAction(), - serverless.FeatureData.Serving.Define(&k.Serving).AsAction(), - servicemesh.FeatureData.ControlPlane.Define(dsciSpec).AsAction(), - ). - WithResources(serverless.ServingCertificateResource). - PreConditions(serverless.EnsureServerlessServingDeployed) - - return registry.Add( - servingDeployment, - istioSecretFiltering, - servingGateway, - ) - } -} diff --git a/components/kserve/servicemesh_setup.go b/components/kserve/servicemesh_setup.go deleted file mode 100644 index 126e23d88ea..00000000000 --- a/components/kserve/servicemesh_setup.go +++ /dev/null @@ -1,76 +0,0 @@ -package kserve - -import ( - "context" - "fmt" - "path" - - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/manifest" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" -) - -func (k *Kserve) configureServiceMesh(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec) error { - if dscispec.ServiceMesh != nil { - if dscispec.ServiceMesh.ManagementState == operatorv1.Managed && k.GetManagementState() == operatorv1.Managed { - serviceMeshInitializer := feature.ComponentFeaturesHandler(owner, k.GetComponentName(), dscispec.ApplicationsNamespace, k.defineServiceMeshFeatures(ctx, cli, dscispec)) - return serviceMeshInitializer.Apply(ctx, cli) - } - if dscispec.ServiceMesh.ManagementState == operatorv1.Unmanaged && k.GetManagementState() == operatorv1.Managed { - return nil - } - } - - return k.removeServiceMeshConfigurations(ctx, cli, owner, dscispec) -} - -func (k *Kserve) removeServiceMeshConfigurations(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec) error { - serviceMeshInitializer := feature.ComponentFeaturesHandler(owner, k.GetComponentName(), dscispec.ApplicationsNamespace, k.defineServiceMeshFeatures(ctx, cli, dscispec)) - return serviceMeshInitializer.Delete(ctx, cli) -} - -func (k *Kserve) defineServiceMeshFeatures(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec) feature.FeaturesProvider { - return func(registry feature.FeaturesRegistry) error { - authorinoInstalled, err := cluster.SubscriptionExists(ctx, cli, "authorino-operator") - if err != nil { - return fmt.Errorf("failed to list subscriptions %w", err) - } - - if authorinoInstalled { - kserveExtAuthzErr := registry.Add(feature.Define("kserve-external-authz"). - Manifests( - manifest.Location(Resources.Location). - Include( - path.Join(Resources.ServiceMeshDir, "activator-envoyfilter.tmpl.yaml"), - path.Join(Resources.ServiceMeshDir, "envoy-oauth-temp-fix.tmpl.yaml"), - path.Join(Resources.ServiceMeshDir, "kserve-predictor-authorizationpolicy.tmpl.yaml"), - path.Join(Resources.ServiceMeshDir, "z-migrations"), - ), - ). - Managed(). - WithData( - feature.Entry("Domain", cluster.GetDomain), - servicemesh.FeatureData.ControlPlane.Define(dscispec).AsAction(), - ). - WithData( - servicemesh.FeatureData.Authorization.All(dscispec)..., - ), - ) - - if kserveExtAuthzErr != nil { - return kserveExtAuthzErr - } - } else { - ctrl.Log.Info("WARN: Authorino operator is not installed on the cluster, skipping authorization capability") - } - - return nil - } -} diff --git a/components/kserve/zz_generated.deepcopy.go b/components/kserve/zz_generated.deepcopy.go deleted file mode 100644 index da6e99960b7..00000000000 --- a/components/kserve/zz_generated.deepcopy.go +++ /dev/null @@ -1,40 +0,0 @@ -//go:build !ignore_autogenerated - -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package kserve - -import () - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Kserve) DeepCopyInto(out *Kserve) { - *out = *in - in.Component.DeepCopyInto(&out.Component) - out.Serving = in.Serving -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Kserve. -func (in *Kserve) DeepCopy() *Kserve { - if in == nil { - return nil - } - out := new(Kserve) - in.DeepCopyInto(out) - return out -} diff --git a/components/kueue/kueue.go b/components/kueue/kueue.go deleted file mode 100644 index 9b59436fa2c..00000000000 --- a/components/kueue/kueue.go +++ /dev/null @@ -1,105 +0,0 @@ -// +groupName=datasciencecluster.opendatahub.io -package kueue - -import ( - "context" - "fmt" - "path/filepath" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" -) - -var ( - ComponentName = "kueue" - Path = deploy.DefaultManifestPath + "/" + ComponentName + "/rhoai" // same path for both odh and rhoai -) - -// Verifies that Kueue implements ComponentInterface. -var _ components.ComponentInterface = (*Kueue)(nil) - -// Kueue struct holds the configuration for the Kueue component. -// +kubebuilder:object:generate=true -type Kueue struct { - components.Component `json:""` -} - -func (k *Kueue) OverrideManifests(ctx context.Context, _ cluster.Platform) error { - // If devflags are set, update default manifests path - if len(k.DevFlags.Manifests) != 0 { - manifestConfig := k.DevFlags.Manifests[0] - if err := deploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "rhoai" - if manifestConfig.SourcePath != "" { - defaultKustomizePath = manifestConfig.SourcePath - } - Path = filepath.Join(deploy.DefaultManifestPath, ComponentName, defaultKustomizePath) - } - - return nil -} - -func (k *Kueue) GetComponentName() string { - return ComponentName -} - -func (k *Kueue) ReconcileComponent(ctx context.Context, cli client.Client, logger logr.Logger, - owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { - l := k.ConfigComponentLogger(logger, ComponentName, dscispec) - var imageParamMap = map[string]string{ - "odh-kueue-controller-image": "RELATED_IMAGE_ODH_KUEUE_CONTROLLER_IMAGE", // new kueue image - } - - enabled := k.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - if enabled { - if k.DevFlags != nil { - // Download manifests and update paths - if err := k.OverrideManifests(ctx, platform); err != nil { - return err - } - } - if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (k.DevFlags == nil || len(k.DevFlags.Manifests) == 0) { - if err := deploy.ApplyParams(Path, imageParamMap); err != nil { - return fmt.Errorf("failed to update image from %s : %w", Path, err) - } - } - } - // Deploy Kueue Operator - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, Path, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil { - return fmt.Errorf("failed to apply manifetss %s: %w", Path, err) - } - l.Info("apply manifests done") - - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) - } - } - - // CloudService Monitoring handling - if platform == cluster.ManagedRhoai { - if err := k.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { - return err - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - filepath.Join(deploy.DefaultManifestPath, "monitoring", "prometheus", "apps"), - dscispec.Monitoring.Namespace, - "prometheus", true); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - - return nil -} diff --git a/components/modelmeshserving/modelmeshserving.go b/components/modelmeshserving/modelmeshserving.go deleted file mode 100644 index 6a95f9115b7..00000000000 --- a/components/modelmeshserving/modelmeshserving.go +++ /dev/null @@ -1,171 +0,0 @@ -// Package modelmeshserving provides utility functions to config MoModelMesh, a general-purpose model serving management/routing layer -// +groupName=datasciencecluster.opendatahub.io -package modelmeshserving - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" -) - -var ( - ComponentName = "model-mesh" - Path = deploy.DefaultManifestPath + "/" + ComponentName + "/overlays/odh" - DependentComponentName = "odh-model-controller" - DependentPath = deploy.DefaultManifestPath + "/" + DependentComponentName + "/base" -) - -// Verifies that Dashboard implements ComponentInterface. -var _ components.ComponentInterface = (*ModelMeshServing)(nil) - -// ModelMeshServing struct holds the configuration for the ModelMeshServing component. -// +kubebuilder:object:generate=true -type ModelMeshServing struct { - components.Component `json:""` -} - -func (m *ModelMeshServing) OverrideManifests(ctx context.Context, _ cluster.Platform) error { - // Go through each manifest and set the overlays if defined - for _, subcomponent := range m.DevFlags.Manifests { - if strings.Contains(subcomponent.URI, DependentComponentName) { - // Download subcomponent - if err := deploy.DownloadManifests(ctx, DependentComponentName, subcomponent); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "base" - if subcomponent.SourcePath != "" { - defaultKustomizePath = subcomponent.SourcePath - } - DependentPath = filepath.Join(deploy.DefaultManifestPath, DependentComponentName, defaultKustomizePath) - } - - if strings.Contains(subcomponent.URI, ComponentName) { - // Download subcomponent - if err := deploy.DownloadManifests(ctx, ComponentName, subcomponent); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "overlays/odh" - if subcomponent.SourcePath != "" { - defaultKustomizePath = subcomponent.SourcePath - } - Path = filepath.Join(deploy.DefaultManifestPath, ComponentName, defaultKustomizePath) - } - } - return nil -} - -func (m *ModelMeshServing) GetComponentName() string { - return ComponentName -} - -func (m *ModelMeshServing) ReconcileComponent(ctx context.Context, - cli client.Client, - logger logr.Logger, - owner metav1.Object, - dscispec *dsciv1.DSCInitializationSpec, - platform cluster.Platform, - _ bool, -) error { - l := m.ConfigComponentLogger(logger, ComponentName, dscispec) - var imageParamMap = map[string]string{ - "odh-mm-rest-proxy": "RELATED_IMAGE_ODH_MM_REST_PROXY_IMAGE", - "odh-modelmesh-runtime-adapter": "RELATED_IMAGE_ODH_MODELMESH_RUNTIME_ADAPTER_IMAGE", - "odh-modelmesh": "RELATED_IMAGE_ODH_MODELMESH_IMAGE", - "odh-modelmesh-controller": "RELATED_IMAGE_ODH_MODELMESH_CONTROLLER_IMAGE", - } - - // odh-model-controller to use - var dependentImageParamMap = map[string]string{ - "odh-model-controller": "RELATED_IMAGE_ODH_MODEL_CONTROLLER_IMAGE", - } - - enabled := m.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - - // Update Default rolebinding - if enabled { - if m.DevFlags != nil { - // Download manifests and update paths - if err := m.OverrideManifests(ctx, platform); err != nil { - return err - } - } - - if err := cluster.UpdatePodSecurityRolebinding(ctx, cli, dscispec.ApplicationsNamespace, - "modelmesh", - "modelmesh-controller"); err != nil { - return err - } - // Update image parameters - if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (m.DevFlags == nil || len(m.DevFlags.Manifests) == 0) { - if err := deploy.ApplyParams(Path, imageParamMap); err != nil { - return fmt.Errorf("failed update image from %s : %w", Path, err) - } - } - } - - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, Path, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil { - return fmt.Errorf("failed to apply manifests from %s : %w", Path, err) - } - l.WithValues("Path", Path).Info("apply manifests done for modelmesh") - // For odh-model-controller - if enabled { - if err := cluster.UpdatePodSecurityRolebinding(ctx, cli, dscispec.ApplicationsNamespace, - "odh-model-controller"); err != nil { - return err - } - // Update image parameters for odh-model-controller - if dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "" { - if err := deploy.ApplyParams(DependentPath, dependentImageParamMap); err != nil { - return err - } - } - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, DependentPath, dscispec.ApplicationsNamespace, m.GetComponentName(), enabled); err != nil { - // explicitly ignore error if error contains keywords "spec.selector" and "field is immutable" and return all other error. - if !strings.Contains(err.Error(), "spec.selector") || !strings.Contains(err.Error(), "field is immutable") { - return err - } - } - - l.WithValues("Path", DependentPath).Info("apply manifests done for odh-model-controller") - - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) - } - } - - // CloudService Monitoring handling - if platform == cluster.ManagedRhoai { - // first model-mesh rules - if err := m.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { - return err - } - // then odh-model-controller rules - if err := m.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, DependentComponentName); err != nil { - return err - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - filepath.Join(deploy.DefaultManifestPath, "monitoring", "prometheus", "apps"), - dscispec.Monitoring.Namespace, - "prometheus", true); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - return nil -} diff --git a/components/modelmeshserving/zz_generated.deepcopy.go b/components/modelmeshserving/zz_generated.deepcopy.go deleted file mode 100644 index fee91980836..00000000000 --- a/components/modelmeshserving/zz_generated.deepcopy.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !ignore_autogenerated - -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package modelmeshserving - -import () - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ModelMeshServing) DeepCopyInto(out *ModelMeshServing) { - *out = *in - in.Component.DeepCopyInto(&out.Component) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelMeshServing. -func (in *ModelMeshServing) DeepCopy() *ModelMeshServing { - if in == nil { - return nil - } - out := new(ModelMeshServing) - in.DeepCopyInto(out) - return out -} diff --git a/components/modelregistry/modelregistry.go b/components/modelregistry/modelregistry.go deleted file mode 100644 index 457bf996be7..00000000000 --- a/components/modelregistry/modelregistry.go +++ /dev/null @@ -1,222 +0,0 @@ -// Package modelregistry provides utility functions to config ModelRegistry, an ML Model metadata repository service -// +groupName=datasciencecluster.opendatahub.io -package modelregistry - -import ( - "context" - "errors" - "fmt" - "path/filepath" - "strings" - "text/template" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/conversion" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - - _ "embed" -) - -const DefaultModelRegistryCert = "default-modelregistry-cert" - -var ( - ComponentName = "model-registry-operator" - DefaultModelRegistriesNamespace = "rhoai-model-registries" - Path = deploy.DefaultManifestPath + "/" + ComponentName + "/overlays/odh" - // we should not apply this label to the namespace, as it triggered namspace deletion during operator uninstall - // modelRegistryLabels = cluster.WithLabels( - // labels.ODH.OwnedNamespace, "true", - // ). -) - -// Verifies that ModelRegistry implements ComponentInterface. -var _ components.ComponentInterface = (*ModelRegistry)(nil) - -// ModelRegistry struct holds the configuration for the ModelRegistry component. -// The property `registriesNamespace` is immutable when `managementState` is `Managed` - -// +kubebuilder:object:generate=true -// +kubebuilder:validation:XValidation:rule="(self.managementState != 'Managed') || (oldSelf.registriesNamespace == '') || (oldSelf.managementState != 'Managed')|| (self.registriesNamespace == oldSelf.registriesNamespace)",message="RegistriesNamespace is immutable when model registry is Managed" -//nolint:lll - -type ModelRegistry struct { - components.Component `json:""` - - // Namespace for model registries to be installed, configurable only once when model registry is enabled, defaults to "rhoai-model-registries" - // +kubebuilder:default="rhoai-model-registries" - // +kubebuilder:validation:Pattern="^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$" - // +kubebuilder:validation:MaxLength=63 - RegistriesNamespace string `json:"registriesNamespace,omitempty"` -} - -func (m *ModelRegistry) OverrideManifests(ctx context.Context, _ cluster.Platform) error { - // If devflags are set, update default manifests path - if len(m.DevFlags.Manifests) != 0 { - manifestConfig := m.DevFlags.Manifests[0] - if err := deploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "overlays/odh" - if manifestConfig.SourcePath != "" { - defaultKustomizePath = manifestConfig.SourcePath - } - Path = filepath.Join(deploy.DefaultManifestPath, ComponentName, defaultKustomizePath) - } - - return nil -} - -func (m *ModelRegistry) GetComponentName() string { - return ComponentName -} - -func (m *ModelRegistry) ReconcileComponent(ctx context.Context, cli client.Client, logger logr.Logger, - owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { - l := m.ConfigComponentLogger(logger, ComponentName, dscispec) - var imageParamMap = map[string]string{ - "IMAGES_MODELREGISTRY_OPERATOR": "RELATED_IMAGE_ODH_MODEL_REGISTRY_OPERATOR_IMAGE", - "IMAGES_GRPC_SERVICE": "RELATED_IMAGE_ODH_MLMD_GRPC_SERVER_IMAGE", - "IMAGES_REST_SERVICE": "RELATED_IMAGE_ODH_MODEL_REGISTRY_IMAGE", - } - enabled := m.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - - if enabled { - // return error if ServiceMesh is not enabled, as it's a required feature - if dscispec.ServiceMesh == nil || dscispec.ServiceMesh.ManagementState != operatorv1.Managed { - return errors.New("ServiceMesh needs to be set to 'Managed' in DSCI CR, it is required by Model Registry") - } - - if err := m.createDependencies(ctx, cli, dscispec); err != nil { - return err - } - - if m.DevFlags != nil { - // Download manifests and update paths - if err := m.OverrideManifests(ctx, platform); err != nil { - return err - } - } - - // Update image parameters only when we do not have customized manifests set - if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (m.DevFlags == nil || len(m.DevFlags.Manifests) == 0) { - extraParamsMap := map[string]string{ - "DEFAULT_CERT": DefaultModelRegistryCert, - } - if err := deploy.ApplyParams(Path, imageParamMap, extraParamsMap); err != nil { - return fmt.Errorf("failed to update image from %s : %w", Path, err) - } - } - - // Create model registries namespace - // We do not delete this namespace even when ModelRegistry is Removed or when operator is uninstalled. - ns, err := cluster.CreateNamespace(ctx, cli, m.RegistriesNamespace) - if err != nil { - return err - } - l.Info("created model registry namespace", "namespace", m.RegistriesNamespace) - // create servicemeshmember here, for now until post MVP solution - err = enrollToServiceMesh(ctx, cli, dscispec, ns) - if err != nil { - return err - } - l.Info("created model registry servicemesh member", "namespace", m.RegistriesNamespace) - } else { - err := m.removeDependencies(ctx, cli, dscispec) - if err != nil { - return err - } - } - - // Deploy ModelRegistry Operator - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, Path, dscispec.ApplicationsNamespace, m.GetComponentName(), enabled); err != nil { - return err - } - l.Info("apply manifests done") - - // Create additional model registry resources, componentEnabled=true because these extras are never deleted! - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, Path+"/extras", dscispec.ApplicationsNamespace, m.GetComponentName(), true); err != nil { - return err - } - l.Info("apply extra manifests done") - - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, m.GetComponentName(), dscispec.ApplicationsNamespace, 10, 1); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) - } - } - - // CloudService Monitoring handling - if platform == cluster.ManagedRhoai { - if err := m.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { - return err - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - filepath.Join(deploy.DefaultManifestPath, "monitoring", "prometheus", "apps"), - dscispec.Monitoring.Namespace, - "prometheus", true); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - return nil -} - -func (m *ModelRegistry) createDependencies(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec) error { - // create DefaultModelRegistryCert - if err := cluster.PropagateDefaultIngressCertificate(ctx, cli, DefaultModelRegistryCert, dscispec.ServiceMesh.ControlPlane.Namespace); err != nil { - return err - } - return nil -} - -func (m *ModelRegistry) removeDependencies(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec) error { - // delete DefaultModelRegistryCert - certSecret := corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: DefaultModelRegistryCert, - Namespace: dscispec.ServiceMesh.ControlPlane.Namespace, - }, - } - // ignore error if the secret has already been removed - if err := cli.Delete(ctx, &certSecret); client.IgnoreNotFound(err) != nil { - return err - } - return nil -} - -//go:embed resources/servicemesh-member.tmpl.yaml -var smmTemplate string - -func enrollToServiceMesh(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec, namespace *corev1.Namespace) error { - tmpl, err := template.New("servicemeshmember").Parse(smmTemplate) - if err != nil { - return fmt.Errorf("error parsing servicemeshmember template: %w", err) - } - builder := strings.Builder{} - controlPlaneData := struct { - Namespace string - ControlPlane *infrav1.ControlPlaneSpec - }{Namespace: namespace.Name, ControlPlane: &dscispec.ServiceMesh.ControlPlane} - - if err = tmpl.Execute(&builder, controlPlaneData); err != nil { - return fmt.Errorf("error executing servicemeshmember template: %w", err) - } - - unstrObj, err := conversion.StrToUnstructured(builder.String()) - if err != nil || len(unstrObj) != 1 { - return fmt.Errorf("error converting servicemeshmember template: %w", err) - } - - return client.IgnoreAlreadyExists(cli.Create(ctx, unstrObj[0])) -} diff --git a/components/modelregistry/resources/servicemesh-member.tmpl.yaml b/components/modelregistry/resources/servicemesh-member.tmpl.yaml deleted file mode 100644 index 8665f2ba54f..00000000000 --- a/components/modelregistry/resources/servicemesh-member.tmpl.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: maistra.io/v1 -kind: ServiceMeshMember -metadata: - name: default - namespace: {{.Namespace}} -spec: - controlPlaneRef: - namespace: {{ .ControlPlane.Namespace }} - name: {{ .ControlPlane.Name }} diff --git a/components/modelregistry/zz_generated.deepcopy.go b/components/modelregistry/zz_generated.deepcopy.go deleted file mode 100644 index 86c4a17e14c..00000000000 --- a/components/modelregistry/zz_generated.deepcopy.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !ignore_autogenerated - -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package modelregistry - -import () - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ModelRegistry) DeepCopyInto(out *ModelRegistry) { - *out = *in - in.Component.DeepCopyInto(&out.Component) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModelRegistry. -func (in *ModelRegistry) DeepCopy() *ModelRegistry { - if in == nil { - return nil - } - out := new(ModelRegistry) - in.DeepCopyInto(out) - return out -} diff --git a/components/ray/ray.go b/components/ray/ray.go deleted file mode 100644 index a0daa5e134e..00000000000 --- a/components/ray/ray.go +++ /dev/null @@ -1,107 +0,0 @@ -// Package ray provides utility functions to config Ray as part of the stack -// which makes managing distributed compute infrastructure in the cloud easy and intuitive for Data Scientists -// +groupName=datasciencecluster.opendatahub.io -package ray - -import ( - "context" - "fmt" - "path/filepath" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" -) - -var ( - ComponentName = "ray" - RayPath = deploy.DefaultManifestPath + "/" + ComponentName + "/openshift" -) - -// Verifies that Ray implements ComponentInterface. -var _ components.ComponentInterface = (*Ray)(nil) - -// Ray struct holds the configuration for the Ray component. -// +kubebuilder:object:generate=true -type Ray struct { - components.Component `json:""` -} - -func (r *Ray) OverrideManifests(ctx context.Context, _ cluster.Platform) error { - // If devflags are set, update default manifests path - if len(r.DevFlags.Manifests) != 0 { - manifestConfig := r.DevFlags.Manifests[0] - if err := deploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "openshift" - if manifestConfig.SourcePath != "" { - defaultKustomizePath = manifestConfig.SourcePath - } - RayPath = filepath.Join(deploy.DefaultManifestPath, ComponentName, defaultKustomizePath) - } - return nil -} - -func (r *Ray) GetComponentName() string { - return ComponentName -} - -func (r *Ray) ReconcileComponent(ctx context.Context, cli client.Client, logger logr.Logger, - owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { - l := r.ConfigComponentLogger(logger, ComponentName, dscispec) - - var imageParamMap = map[string]string{ - "odh-kuberay-operator-controller-image": "RELATED_IMAGE_ODH_KUBERAY_OPERATOR_CONTROLLER_IMAGE", - } - - enabled := r.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - - if enabled { - if r.DevFlags != nil { - // Download manifests and update paths - if err := r.OverrideManifests(ctx, platform); err != nil { - return err - } - } - if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (r.DevFlags == nil || len(r.DevFlags.Manifests) == 0) { - if err := deploy.ApplyParams(RayPath, imageParamMap, map[string]string{"namespace": dscispec.ApplicationsNamespace}); err != nil { - return fmt.Errorf("failed to update image from %s : %w", RayPath, err) - } - } - } - // Deploy Ray Operator - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, RayPath, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil { - return fmt.Errorf("failed to apply manifets from %s : %w", RayPath, err) - } - l.Info("apply manifests done") - - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) - } - } - - // CloudService Monitoring handling - if platform == cluster.ManagedRhoai { - if err := r.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { - return err - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - filepath.Join(deploy.DefaultManifestPath, "monitoring", "prometheus", "apps"), - dscispec.Monitoring.Namespace, - "prometheus", true); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - return nil -} diff --git a/components/ray/zz_generated.deepcopy.go b/components/ray/zz_generated.deepcopy.go deleted file mode 100644 index f7688cd81a5..00000000000 --- a/components/ray/zz_generated.deepcopy.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !ignore_autogenerated - -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package ray - -import () - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Ray) DeepCopyInto(out *Ray) { - *out = *in - in.Component.DeepCopyInto(&out.Component) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ray. -func (in *Ray) DeepCopy() *Ray { - if in == nil { - return nil - } - out := new(Ray) - in.DeepCopyInto(out) - return out -} diff --git a/components/trainingoperator/trainingoperator.go b/components/trainingoperator/trainingoperator.go deleted file mode 100644 index 5f989e9d810..00000000000 --- a/components/trainingoperator/trainingoperator.go +++ /dev/null @@ -1,108 +0,0 @@ -// Package trainingoperator provides utility functions to config trainingoperator as part of the stack -// which makes managing distributed compute infrastructure in the cloud easy and intuitive for Data Scientists -// +groupName=datasciencecluster.opendatahub.io -package trainingoperator - -import ( - "context" - "fmt" - "path/filepath" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" -) - -var ( - ComponentName = "trainingoperator" - TrainingOperatorPath = deploy.DefaultManifestPath + "/" + ComponentName + "/rhoai" -) - -// Verifies that TrainingOperator implements ComponentInterface. -var _ components.ComponentInterface = (*TrainingOperator)(nil) - -// TrainingOperator struct holds the configuration for the TrainingOperator component. -// +kubebuilder:object:generate=true -type TrainingOperator struct { - components.Component `json:""` -} - -func (r *TrainingOperator) OverrideManifests(ctx context.Context, _ cluster.Platform) error { - // If devflags are set, update default manifests path - if len(r.DevFlags.Manifests) != 0 { - manifestConfig := r.DevFlags.Manifests[0] - if err := deploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "rhoai" - if manifestConfig.SourcePath != "" { - defaultKustomizePath = manifestConfig.SourcePath - } - TrainingOperatorPath = filepath.Join(deploy.DefaultManifestPath, ComponentName, defaultKustomizePath) - } - - return nil -} - -func (r *TrainingOperator) GetComponentName() string { - return ComponentName -} - -func (r *TrainingOperator) ReconcileComponent(ctx context.Context, cli client.Client, logger logr.Logger, - owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { - l := r.ConfigComponentLogger(logger, ComponentName, dscispec) - var imageParamMap = map[string]string{ - "odh-training-operator-controller-image": "RELATED_IMAGE_ODH_TRAINING_OPERATOR_IMAGE", - } - - enabled := r.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - - if enabled { - if r.DevFlags != nil { - // Download manifests and update paths - if err := r.OverrideManifests(ctx, platform); err != nil { - return err - } - } - if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (r.DevFlags == nil || len(r.DevFlags.Manifests) == 0) { - if err := deploy.ApplyParams(TrainingOperatorPath, imageParamMap); err != nil { - return fmt.Errorf("failed to update image from %s : %w", TrainingOperatorPath, err) - } - } - } - // Deploy Training Operator - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, TrainingOperatorPath, dscispec.ApplicationsNamespace, ComponentName, enabled); err != nil { - return err - } - l.Info("apply manifests done") - - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 20, 2); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) - } - } - - // CloudService Monitoring handling - if platform == cluster.ManagedRhoai { - if err := r.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { - return err - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - filepath.Join(deploy.DefaultManifestPath, "monitoring", "prometheus", "apps"), - dscispec.Monitoring.Namespace, - "prometheus", true); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - - return nil -} diff --git a/components/trainingoperator/zz_generated.deepcopy.go b/components/trainingoperator/zz_generated.deepcopy.go deleted file mode 100644 index 57245a95044..00000000000 --- a/components/trainingoperator/zz_generated.deepcopy.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !ignore_autogenerated - -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package trainingoperator - -import () - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TrainingOperator) DeepCopyInto(out *TrainingOperator) { - *out = *in - in.Component.DeepCopyInto(&out.Component) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrainingOperator. -func (in *TrainingOperator) DeepCopy() *TrainingOperator { - if in == nil { - return nil - } - out := new(TrainingOperator) - in.DeepCopyInto(out) - return out -} diff --git a/components/trustyai/trustyai.go b/components/trustyai/trustyai.go deleted file mode 100644 index 98e5ab53fa5..00000000000 --- a/components/trustyai/trustyai.go +++ /dev/null @@ -1,120 +0,0 @@ -// Package trustyai provides utility functions to config TrustyAI, a bias/fairness and explainability toolkit -// +groupName=datasciencecluster.opendatahub.io -package trustyai - -import ( - "context" - "fmt" - "path/filepath" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" -) - -var ( - ComponentName = "trustyai" - ComponentPathName = "trustyai-service-operator" - PathUpstream = deploy.DefaultManifestPath + "/" + ComponentPathName + "/overlays/odh" - PathDownstream = deploy.DefaultManifestPath + "/" + ComponentPathName + "/overlays/rhoai" - OverridePath = "" -) - -// Verifies that TrustyAI implements ComponentInterface. -var _ components.ComponentInterface = (*TrustyAI)(nil) - -// TrustyAI struct holds the configuration for the TrustyAI component. -// +kubebuilder:object:generate=true -type TrustyAI struct { - components.Component `json:""` -} - -func (t *TrustyAI) OverrideManifests(ctx context.Context, _ cluster.Platform) error { - // If devflags are set, update default manifests path - if len(t.DevFlags.Manifests) != 0 { - manifestConfig := t.DevFlags.Manifests[0] - if err := deploy.DownloadManifests(ctx, ComponentPathName, manifestConfig); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "base" - if manifestConfig.SourcePath != "" { - defaultKustomizePath = manifestConfig.SourcePath - } - OverridePath = filepath.Join(deploy.DefaultManifestPath, ComponentPathName, defaultKustomizePath) - } - return nil -} - -func (t *TrustyAI) GetComponentName() string { - return ComponentName -} - -func (t *TrustyAI) ReconcileComponent(ctx context.Context, cli client.Client, logger logr.Logger, - owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { - var imageParamMap = map[string]string{ - "trustyaiServiceImage": "RELATED_IMAGE_ODH_TRUSTYAI_SERVICE_IMAGE", - "trustyaiOperatorImage": "RELATED_IMAGE_ODH_TRUSTYAI_SERVICE_OPERATOR_IMAGE", - } - entryPath := map[cluster.Platform]string{ - cluster.SelfManagedRhoai: PathDownstream, - cluster.ManagedRhoai: PathDownstream, - cluster.OpenDataHub: PathUpstream, - cluster.Unknown: PathUpstream, - }[platform] - - l := t.ConfigComponentLogger(logger, ComponentName, dscispec) - - enabled := t.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - - if enabled { - if t.DevFlags != nil { - // Download manifests and update paths - if err := t.OverrideManifests(ctx, platform); err != nil { - return err - } - if OverridePath != "" { - entryPath = OverridePath - } - } - if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (t.DevFlags == nil || len(t.DevFlags.Manifests) == 0) { - if err := deploy.ApplyParams(entryPath, imageParamMap); err != nil { - return fmt.Errorf("failed to update image %s: %w", entryPath, err) - } - } - } - // Deploy TrustyAI Operator - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, entryPath, dscispec.ApplicationsNamespace, t.GetComponentName(), enabled); err != nil { - return err - } - l.Info("apply manifests done") - - // Wait for deployment available - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 10, 2); err != nil { - return fmt.Errorf("deployment for %s is not ready to server: %w", ComponentName, err) - } - } - - // CloudService Monitoring handling - if platform == cluster.ManagedRhoai { - if err := t.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { - return err - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - filepath.Join(deploy.DefaultManifestPath, "monitoring", "prometheus", "apps"), - dscispec.Monitoring.Namespace, - "prometheus", true); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - return nil -} diff --git a/components/trustyai/zz_generated.deepcopy.go b/components/trustyai/zz_generated.deepcopy.go deleted file mode 100644 index 98dab130477..00000000000 --- a/components/trustyai/zz_generated.deepcopy.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !ignore_autogenerated - -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package trustyai - -import () - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TrustyAI) DeepCopyInto(out *TrustyAI) { - *out = *in - in.Component.DeepCopyInto(&out.Component) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustyAI. -func (in *TrustyAI) DeepCopy() *TrustyAI { - if in == nil { - return nil - } - out := new(TrustyAI) - in.DeepCopyInto(out) - return out -} diff --git a/components/workbenches/workbenches.go b/components/workbenches/workbenches.go deleted file mode 100644 index 96748b9695e..00000000000 --- a/components/workbenches/workbenches.go +++ /dev/null @@ -1,185 +0,0 @@ -// Package workbenches provides utility functions to config Workbenches to secure Jupyter Notebook in Kubernetes environments with support for OAuth -// +groupName=datasciencecluster.opendatahub.io -package workbenches - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/go-logr/logr" - operatorv1 "github.com/openshift/api/operator/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" -) - -var ( - ComponentName = "workbenches" - DependentComponentName = "notebooks" - // manifests for nbc in ODH and RHOAI + downstream use it for imageparams. - notebookControllerPath = deploy.DefaultManifestPath + "/odh-notebook-controller/odh-notebook-controller/base" - // manifests for ODH nbc + downstream use it for imageparams. - kfnotebookControllerPath = deploy.DefaultManifestPath + "/odh-notebook-controller/kf-notebook-controller/overlays/openshift" - // notebook image manifests. - notebookImagesPath = deploy.DefaultManifestPath + "/notebooks/overlays/additional" -) - -// Verifies that Workbench implements ComponentInterface. -var _ components.ComponentInterface = (*Workbenches)(nil) - -// Workbenches struct holds the configuration for the Workbenches component. -// +kubebuilder:object:generate=true -type Workbenches struct { - components.Component `json:""` -} - -func (w *Workbenches) OverrideManifests(ctx context.Context, platform cluster.Platform) error { - // Download manifests if defined by devflags - // Go through each manifest and set the overlays if defined - // first on odh-notebook-controller and kf-notebook-controller last to notebook-images - for _, subcomponent := range w.DevFlags.Manifests { - if strings.Contains(subcomponent.ContextDir, "components/odh-notebook-controller") { - // Download subcomponent - if err := deploy.DownloadManifests(ctx, "odh-notebook-controller/odh-notebook-controller", subcomponent); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePathNbc := "base" - if subcomponent.SourcePath != "" { - defaultKustomizePathNbc = subcomponent.SourcePath - } - notebookControllerPath = filepath.Join(deploy.DefaultManifestPath, "odh-notebook-controller/odh-notebook-controller", defaultKustomizePathNbc) - } - - if strings.Contains(subcomponent.ContextDir, "components/notebook-controller") { - // Download subcomponent - if err := deploy.DownloadManifests(ctx, "odh-notebook-controller/kf-notebook-controller", subcomponent); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePathKfNbc := "overlays/openshift" - if subcomponent.SourcePath != "" { - defaultKustomizePathKfNbc = subcomponent.SourcePath - } - kfnotebookControllerPath = filepath.Join(deploy.DefaultManifestPath, "odh-notebook-controller/kf-notebook-controller", defaultKustomizePathKfNbc) - } - if strings.Contains(subcomponent.URI, DependentComponentName) { - // Download subcomponent - if err := deploy.DownloadManifests(ctx, DependentComponentName, subcomponent); err != nil { - return err - } - // If overlay is defined, update paths - defaultKustomizePath := "overlays/additional" - if subcomponent.SourcePath != "" { - defaultKustomizePath = subcomponent.SourcePath - } - notebookImagesPath = filepath.Join(deploy.DefaultManifestPath, DependentComponentName, defaultKustomizePath) - } - } - return nil -} - -func (w *Workbenches) GetComponentName() string { - return ComponentName -} - -func (w *Workbenches) ReconcileComponent(ctx context.Context, cli client.Client, logger logr.Logger, - owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec, platform cluster.Platform, _ bool) error { - l := w.ConfigComponentLogger(logger, ComponentName, dscispec) - var imageParamMap = map[string]string{ - "odh-notebook-controller-image": "RELATED_IMAGE_ODH_NOTEBOOK_CONTROLLER_IMAGE", - "odh-kf-notebook-controller-image": "RELATED_IMAGE_ODH_KF_NOTEBOOK_CONTROLLER_IMAGE", - } - - // Set default notebooks namespace - // Create rhods-notebooks namespace in managed platforms - enabled := w.GetManagementState() == operatorv1.Managed - monitoringEnabled := dscispec.Monitoring.ManagementState == operatorv1.Managed - if enabled { - if w.DevFlags != nil { - // Download manifests and update paths - if err := w.OverrideManifests(ctx, platform); err != nil { - return err - } - } - if platform == cluster.SelfManagedRhoai || platform == cluster.ManagedRhoai { - // Intentionally leaving the ownership unset for this namespace. - // Specifying this label triggers its deletion when the operator is uninstalled. - _, err := cluster.CreateNamespace(ctx, cli, cluster.DefaultNotebooksNamespace, cluster.WithLabels(labels.ODH.OwnedNamespace, "true")) - if err != nil { - return err - } - } - // Update Default rolebinding - err := cluster.UpdatePodSecurityRolebinding(ctx, cli, dscispec.ApplicationsNamespace, "notebook-controller-service-account") - if err != nil { - return err - } - } - - // Update image parameters for nbc - if enabled { - if (dscispec.DevFlags == nil || dscispec.DevFlags.ManifestsUri == "") && (w.DevFlags == nil || len(w.DevFlags.Manifests) == 0) { - // for kf-notebook-controller image - if err := deploy.ApplyParams(notebookControllerPath, imageParamMap); err != nil { - return fmt.Errorf("failed to update image %s: %w", notebookControllerPath, err) - } - // for odh-notebook-controller image - if err := deploy.ApplyParams(kfnotebookControllerPath, imageParamMap); err != nil { - return fmt.Errorf("failed to update image %s: %w", kfnotebookControllerPath, err) - } - } - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - notebookControllerPath, - dscispec.ApplicationsNamespace, - ComponentName, enabled); err != nil { - return fmt.Errorf("failed to apply manifetss %s: %w", notebookControllerPath, err) - } - l.WithValues("Path", notebookControllerPath).Info("apply manifests done notebook controller done") - - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - kfnotebookControllerPath, - dscispec.ApplicationsNamespace, - ComponentName, enabled); err != nil { - return fmt.Errorf("failed to apply manifetss %s: %w", kfnotebookControllerPath, err) - } - l.WithValues("Path", kfnotebookControllerPath).Info("apply manifests done kf-notebook controller done") - - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - notebookImagesPath, - dscispec.ApplicationsNamespace, - ComponentName, enabled); err != nil { - return err - } - l.WithValues("Path", notebookImagesPath).Info("apply manifests done notebook image done") - - // Wait for deployment available - if enabled { - if err := cluster.WaitForDeploymentAvailable(ctx, cli, ComponentName, dscispec.ApplicationsNamespace, 10, 2); err != nil { - return fmt.Errorf("deployments for %s are not ready to server: %w", ComponentName, err) - } - } - - // CloudService Monitoring handling - if platform == cluster.ManagedRhoai { - if err := w.UpdatePrometheusConfig(cli, l, enabled && monitoringEnabled, ComponentName); err != nil { - return err - } - if err := deploy.DeployManifestsFromPath(ctx, cli, owner, - filepath.Join(deploy.DefaultManifestPath, "monitoring", "prometheus", "apps"), - dscispec.Monitoring.Namespace, - "prometheus", true); err != nil { - return err - } - l.Info("updating SRE monitoring done") - } - return nil -} diff --git a/components/workbenches/zz_generated.deepcopy.go b/components/workbenches/zz_generated.deepcopy.go deleted file mode 100644 index 61045d7db98..00000000000 --- a/components/workbenches/zz_generated.deepcopy.go +++ /dev/null @@ -1,39 +0,0 @@ -//go:build !ignore_autogenerated - -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package workbenches - -import () - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Workbenches) DeepCopyInto(out *Workbenches) { - *out = *in - in.Component.DeepCopyInto(&out.Component) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workbenches. -func (in *Workbenches) DeepCopy() *Workbenches { - if in == nil { - return nil - } - out := new(Workbenches) - in.DeepCopyInto(out) - return out -} diff --git a/config/crd/bases/components.platform.opendatahub.io_codeflares.yaml b/config/crd/bases/components.platform.opendatahub.io_codeflares.yaml new file mode 100644 index 00000000000..999e5ccfdd4 --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_codeflares.yaml @@ -0,0 +1,152 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: codeflares.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: CodeFlare + listKind: CodeFlareList + plural: codeflares + singular: codeflare + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: CodeFlare is the Schema for the codeflares API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: CodeFlareStatus defines the observed state of CodeFlare + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: CodeFlare name must be default-codeflare + rule: self.metadata.name == 'default-codeflare' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/components.platform.opendatahub.io_dashboards.yaml b/config/crd/bases/components.platform.opendatahub.io_dashboards.yaml new file mode 100644 index 00000000000..79da59747d8 --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_dashboards.yaml @@ -0,0 +1,159 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: dashboards.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: Dashboard + listKind: DashboardList + plural: dashboards + singular: dashboard + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + - description: URL + jsonPath: .status.url + name: URL + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Dashboard is the Schema for the dashboards API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DashboardSpec defines the desired state of Dashboard + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: DashboardStatus defines the observed state of Dashboard + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + url: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Dashboard name must be default-dashboard + rule: self.metadata.name == 'default-dashboard' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/components.platform.opendatahub.io_datasciencepipelines.yaml b/config/crd/bases/components.platform.opendatahub.io_datasciencepipelines.yaml new file mode 100644 index 00000000000..495d178bb28 --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_datasciencepipelines.yaml @@ -0,0 +1,155 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: datasciencepipelines.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: DataSciencePipelines + listKind: DataSciencePipelinesList + plural: datasciencepipelines + singular: datasciencepipelines + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: DataSciencePipelines is the Schema for the datasciencepipelines + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: DataSciencePipelinesSpec defines the desired state of DataSciencePipelines + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: DataSciencePipelinesStatus defines the observed state of + DataSciencePipelines + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: DataSciencePipelines name must be default-datasciencepipelines + rule: self.metadata.name == 'default-datasciencepipelines' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/components.platform.opendatahub.io_kserves.yaml b/config/crd/bases/components.platform.opendatahub.io_kserves.yaml new file mode 100644 index 00000000000..c64e35fe0ee --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_kserves.yaml @@ -0,0 +1,237 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: kserves.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: Kserve + listKind: KserveList + plural: kserves + singular: kserve + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Kserve is the Schema for the kserves API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KserveSpec defines the desired state of Kserve + properties: + defaultDeploymentMode: + description: |- + Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'. + The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve. + This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. + enum: + - Serverless + - RawDeployment + pattern: ^(Serverless|RawDeployment)$ + type: string + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + nim: + description: Configures and enables NVIDIA NIM integration + properties: + managementState: + default: Managed + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + serving: + description: |- + Serving configures the KNative-Serving stack used for model serving. A Service + Mesh (Istio) is prerequisite, since it is used as networking layer. + properties: + ingressGateway: + description: |- + IngressGateway allows to customize some parameters for the Istio Ingress Gateway + that is bound to KNative-Serving. + properties: + certificate: + description: |- + Certificate specifies configuration of the TLS certificate securing communication + for the gateway. + properties: + secretName: + description: |- + SecretName specifies the name of the Kubernetes Secret resource that contains a + TLS certificate secure HTTP communications for the KNative network. + type: string + type: + default: OpenshiftDefaultIngress + description: |- + Type specifies if the TLS certificate should be generated automatically, or if the certificate + is provided by the user. Allowed values are: + * SelfSigned: A certificate is going to be generated using an own private key. + * Provided: Pre-existence of the TLS Secret (see SecretName) with a valid certificate is assumed. + * OpenshiftDefaultIngress: Default ingress certificate configured for OpenShift + enum: + - SelfSigned + - Provided + - OpenshiftDefaultIngress + type: string + type: object + domain: + description: |- + Domain specifies the host name for intercepting incoming requests. + Most likely, you will want to use a wildcard name, like *.example.com. + If not set, the domain of the OpenShift Ingress is used. + If you choose to generate a certificate, this is the domain used for the certificate request. + type: string + type: object + managementState: + default: Managed + enum: + - Managed + - Unmanaged + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + name: + default: knative-serving + description: |- + Name specifies the name of the KNativeServing resource that is going to be + created to instruct the KNative Operator to deploy KNative serving components. + This resource is created in the "knative-serving" namespace. + type: string + type: object + type: object + status: + description: KserveStatus defines the observed state of Kserve + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + defaultDeploymentMode: + description: |- + DefaultDeploymentMode is the value of the defaultDeploymentMode field + as read from the "deploy" JSON in the inferenceservice-config ConfigMap + type: string + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Kserve name must be default-kserve + rule: self.metadata.name == 'default-kserve' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/components.platform.opendatahub.io_kueues.yaml b/config/crd/bases/components.platform.opendatahub.io_kueues.yaml new file mode 100644 index 00000000000..1232c4dcadb --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_kueues.yaml @@ -0,0 +1,153 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: kueues.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: Kueue + listKind: KueueList + plural: kueues + singular: kueue + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Kueue is the Schema for the kueues API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: KueueSpec defines the desired state of Kueue + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: KueueStatus defines the observed state of Kueue + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Kueue name must be default-kueue + rule: self.metadata.name == 'default-kueue' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/components.platform.opendatahub.io_modelcontrollers.yaml b/config/crd/bases/components.platform.opendatahub.io_modelcontrollers.yaml new file mode 100644 index 00000000000..500450f1e68 --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_modelcontrollers.yaml @@ -0,0 +1,214 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: modelcontrollers.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: ModelController + listKind: ModelControllerList + plural: modelcontrollers + singular: modelcontroller + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + - description: devFlag's URI used to download + jsonPath: .status.URI + name: URI + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ModelController is the Schema for the modelcontroller API, it + is a shared component between kserve and modelmeshserving + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ModelControllerSpec defines the desired state of ModelController + properties: + kserve: + description: ModelMeshServing DSCModelMeshServing `json:"modelMeshServing,omitempty"` + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the + folder containing manifests in a repository, default + value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any + sub-folder or path: `base`, `overlays/dev`, `default`, + `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with + tag/branch. e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + managementState: + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + nim: + description: nimSpec enables NVIDIA NIM integration + properties: + managementState: + default: Managed + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + type: object + modelMeshServing: + description: a mini version of the DSCModelMeshServing only keep devflags + and management spec + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the + folder containing manifests in a repository, default + value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any + sub-folder or path: `base`, `overlays/dev`, `default`, + `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with + tag/branch. e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + managementState: + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + type: object + status: + description: ModelControllerStatus defines the observed state of ModelController + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: ModelController name must be default-modelcontroller + rule: self.metadata.name == 'default-modelcontroller' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/components.platform.opendatahub.io_modelmeshservings.yaml b/config/crd/bases/components.platform.opendatahub.io_modelmeshservings.yaml new file mode 100644 index 00000000000..8a8b82f876f --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_modelmeshservings.yaml @@ -0,0 +1,153 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: modelmeshservings.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: ModelMeshServing + listKind: ModelMeshServingList + plural: modelmeshservings + singular: modelmeshserving + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ModelMeshServing is the Schema for the modelmeshservings API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ModelMeshServingSpec defines the desired state of ModelMeshServing + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: ModelMeshServingStatus defines the observed state of ModelMeshServing + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: ModelMeshServing name must be default-modelmeshserving + rule: self.metadata.name == 'default-modelmeshserving' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/components.platform.opendatahub.io_modelregistries.yaml b/config/crd/bases/components.platform.opendatahub.io_modelregistries.yaml new file mode 100644 index 00000000000..86860cf1cf8 --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_modelregistries.yaml @@ -0,0 +1,162 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: modelregistries.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: ModelRegistry + listKind: ModelRegistryList + plural: modelregistries + singular: modelregistry + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ModelRegistry is the Schema for the modelregistries API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ModelRegistrySpec defines the desired state of ModelRegistry + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + registriesNamespace: + default: rhoai-model-registries + description: Namespace for model registries to be installed, configurable + only once when model registry is enabled, defaults to "odh-model-registries" + maxLength: 63 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$ + type: string + type: object + status: + description: ModelRegistryStatus defines the observed state of ModelRegistry + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + registriesNamespace: + type: string + type: object + type: object + x-kubernetes-validations: + - message: ModelRegistry name must be default-modelregistry + rule: self.metadata.name == 'default-modelregistry' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/components.platform.opendatahub.io_rays.yaml b/config/crd/bases/components.platform.opendatahub.io_rays.yaml new file mode 100644 index 00000000000..92f3b3c62c3 --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_rays.yaml @@ -0,0 +1,153 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: rays.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: Ray + listKind: RayList + plural: rays + singular: ray + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Ray is the Schema for the rays API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: RaySpec defines the desired state of Ray + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: RayStatus defines the observed state of Ray + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Ray name must be default-ray + rule: self.metadata.name == 'default-ray' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/components.platform.opendatahub.io_trainingoperators.yaml b/config/crd/bases/components.platform.opendatahub.io_trainingoperators.yaml new file mode 100644 index 00000000000..82b7262849b --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_trainingoperators.yaml @@ -0,0 +1,153 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: trainingoperators.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: TrainingOperator + listKind: TrainingOperatorList + plural: trainingoperators + singular: trainingoperator + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: TrainingOperator is the Schema for the trainingoperators API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TrainingOperatorSpec defines the desired state of TrainingOperator + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: TrainingOperatorStatus defines the observed state of TrainingOperator + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: TrainingOperator name must be default-trainingoperator + rule: self.metadata.name == 'default-trainingoperator' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/components.platform.opendatahub.io_trustyais.yaml b/config/crd/bases/components.platform.opendatahub.io_trustyais.yaml new file mode 100644 index 00000000000..10f42dfb24e --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_trustyais.yaml @@ -0,0 +1,153 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: trustyais.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: TrustyAI + listKind: TrustyAIList + plural: trustyais + singular: trustyai + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: TrustyAI is the Schema for the trustyais API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: TrustyAISpec defines the desired state of TrustyAI + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: TrustyAIStatus defines the observed state of TrustyAI + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: TrustyAI name must be default-trustyai + rule: self.metadata.name == 'default-trustyai' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/components.platform.opendatahub.io_workbenches.yaml b/config/crd/bases/components.platform.opendatahub.io_workbenches.yaml new file mode 100644 index 00000000000..e928970082d --- /dev/null +++ b/config/crd/bases/components.platform.opendatahub.io_workbenches.yaml @@ -0,0 +1,153 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: workbenches.components.platform.opendatahub.io +spec: + group: components.platform.opendatahub.io + names: + kind: Workbenches + listKind: WorkbenchesList + plural: workbenches + singular: workbenches + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Workbenches is the Schema for the workbenches API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WorkbenchesSpec defines the desired state of Workbenches + properties: + devFlags: + description: Add developer fields + properties: + manifests: + description: List of custom manifests for the given component + items: + properties: + contextDir: + default: manifests + description: contextDir is the relative path to the folder + containing manifests in a repository, default value "manifests" + type: string + sourcePath: + default: "" + description: 'sourcePath is the subpath within contextDir + where kustomize builds start. Examples include any sub-folder + or path: `base`, `overlays/dev`, `default`, `odh` etc.' + type: string + uri: + default: "" + description: uri is the URI point to a git repo with tag/branch. + e.g. https://github.com/org/repo/tarball/ + type: string + type: object + type: array + type: object + type: object + status: + description: WorkbenchesStatus defines the observed state of Workbenches + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Workbenches name must be default-workbenches + rule: self.metadata.name == 'default-workbenches' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml b/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml index 59cfce63c3d..09efb4b9132 100644 --- a/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml +++ b/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml @@ -48,7 +48,7 @@ spec: codeflare: description: |- CodeFlare component configuration. - If CodeFlare Operator has been installed in the cluster, it should be uninstalled first before enabled component. + If CodeFlare Operator has been installed in the cluster, it should be uninstalled first before enabling component. properties: devFlags: description: Add developer fields @@ -141,8 +141,8 @@ spec: type: object datasciencepipelines: description: |- - DataServicePipeline component configuration. - Require OpenShift Pipelines Operator to be installed before enable component + DataSciencePipeline component configuration. + Requires OpenShift Pipelines Operator to be installed before enable component properties: devFlags: description: Add developer fields @@ -190,7 +190,7 @@ spec: kserve: description: |- Kserve component configuration. - Require OpenShift Serverless and OpenShift Service Mesh Operators to be installed before enable component + Requires OpenShift Serverless and OpenShift Service Mesh Operators to be installed before enable component Does not support enabled ModelMeshServing at the same time properties: defaultDeploymentMode: @@ -245,6 +245,17 @@ spec: - Removed pattern: ^(Managed|Unmanaged|Force|Removed)$ type: string + nim: + description: Configures and enables NVIDIA NIM integration + properties: + managementState: + default: Managed + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object serving: description: |- Serving configures the KNative-Serving stack used for model serving. A Service @@ -445,7 +456,7 @@ spec: default: rhoai-model-registries description: Namespace for model registries to be installed, configurable only once when model registry is enabled, defaults - to "rhoai-model-registries" + to "odh-model-registries" maxLength: 63 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$ type: string @@ -648,12 +659,213 @@ spec: components: description: Expose component's specific status properties: + codeflare: + description: CodeFlare component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + dashboard: + description: Dashboard component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + url: + type: string + type: object + datasciencepipelines: + description: DataSciencePipeline component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + kserve: + description: Kserve component status. + properties: + defaultDeploymentMode: + description: |- + DefaultDeploymentMode is the value of the defaultDeploymentMode field + as read from the "deploy" JSON in the inferenceservice-config ConfigMap + type: string + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + kueue: + description: Kueue component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + modelmeshserving: + description: ModelMeshServing component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object modelregistry: - description: ModelRegistry component status + description: ModelRegistry component status. properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string registriesNamespace: type: string type: object + ray: + description: Ray component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + trainingoperator: + description: Training Operator component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + trustyai: + description: TrustyAI component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object + workbenches: + description: Workbenches component status. + properties: + managementState: + description: |- + Set to one of the following values: + + - "Managed" : the operator is actively managing the component and trying to keep it active. + It will only upgrade the component if it is safe to do so + + - "Removed" : the operator is actively managing the component and will not install it, + or if it is installed, the operator will try to remove it + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object type: object conditions: description: Conditions describes the state of the DataScienceCluster @@ -691,6 +903,10 @@ spec: type: boolean description: List of components with status if installed or not type: object + observedGeneration: + description: The generation observed by the deployment controller. + format: int64 + type: integer phase: description: |- Phase describes the Phase of DataScienceCluster reconciliation state diff --git a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml index 453b1ac191e..882d7ca7e91 100644 --- a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml +++ b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml @@ -67,8 +67,13 @@ spec: Internal development useful field to test customizations. This is not recommended to be used in production environment. properties: + logLevel: + description: Override Zap log level. Can be "debug", "info", "error" + or a number (more verbose). + type: string logmode: default: production + description: '## DEPRECATED ##: Ignored, use LogLevel instead' enum: - devel - development @@ -77,7 +82,9 @@ spec: - default type: string manifestsUri: - description: Custom manifests uri for odh-manifests + description: |- + ## DEPRECATED ## : ManifestsUri set on DSCI is not maintained. + Custom manifests uri for odh-manifests type: string type: object monitoring: @@ -86,18 +93,22 @@ spec: managementState: description: |- Set to one of the following values: + - "Managed" : the operator is actively managing the component and trying to keep it active. - It will only upgrade the component if it is safe to do so. + It will only upgrade the component if it is safe to do so + - "Removed" : the operator is actively managing the component and will not install it, - or if it is installed, the operator will try to remove it. + or if it is installed, the operator will try to remove it enum: - Managed - Removed pattern: ^(Managed|Unmanaged|Force|Removed)$ type: string namespace: - default: redhat-ods-monitoring - description: Namespace for monitoring if it is enabled + default: opendatahub + description: |- + monitoring spec exposed to DSCI api + Namespace for monitoring if it is enabled maxLength: 63 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$ type: string diff --git a/config/crd/bases/services.platform.opendatahub.io_auths.yaml b/config/crd/bases/services.platform.opendatahub.io_auths.yaml new file mode 100644 index 00000000000..704a6fa5dda --- /dev/null +++ b/config/crd/bases/services.platform.opendatahub.io_auths.yaml @@ -0,0 +1,138 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: auths.services.platform.opendatahub.io +spec: + group: services.platform.opendatahub.io + names: + kind: Auth + listKind: AuthList + plural: auths + singular: auth + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Auth is the Schema for the auths API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: AuthSpec defines the desired state of Auth + properties: + adminGroups: + items: + type: string + type: array + allowedGroups: + items: + type: string + type: array + required: + - adminGroups + - allowedGroups + type: object + status: + description: AuthStatus defines the observed state of Auth + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Auth name must be auth + rule: self.metadata.name == 'auth' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/services.platform.opendatahub.io_monitorings.yaml b/config/crd/bases/services.platform.opendatahub.io_monitorings.yaml new file mode 100644 index 00000000000..56e4caecb62 --- /dev/null +++ b/config/crd/bases/services.platform.opendatahub.io_monitorings.yaml @@ -0,0 +1,141 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: monitorings.services.platform.opendatahub.io +spec: + group: services.platform.opendatahub.io + names: + kind: Monitoring + listKind: MonitoringList + plural: monitorings + singular: monitoring + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Ready + jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - description: Reason + jsonPath: .status.conditions[?(@.type=="Ready")].reason + name: Reason + type: string + - description: URL + jsonPath: .status.url + name: URL + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Monitoring is the Schema for the monitorings API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MonitoringSpec defines the desired state of Monitoring + properties: + namespace: + default: opendatahub + description: |- + monitoring spec exposed to DSCI api + Namespace for monitoring if it is enabled + maxLength: 63 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$ + type: string + type: object + status: + description: MonitoringStatus defines the observed state of Monitoring + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + format: int64 + type: integer + phase: + type: string + url: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Monitoring name must be default-monitoring + rule: self.metadata.name == 'default-monitoring' + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/external/config.openshift.io_authentications.yaml b/config/crd/external/config.openshift.io_authentications.yaml index 5755ad090c7..86ad306c7fd 100644 --- a/config/crd/external/config.openshift.io_authentications.yaml +++ b/config/crd/external/config.openshift.io_authentications.yaml @@ -172,4 +172,4 @@ spec: - spec type: object served: true - storage: true \ No newline at end of file + storage: true diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 4ae07b7fe22..b13754a12a2 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -5,6 +5,20 @@ resources: - bases/dscinitialization.opendatahub.io_dscinitializations.yaml - bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml - bases/features.opendatahub.io_featuretrackers.yaml +- bases/components.platform.opendatahub.io_dashboards.yaml +- bases/components.platform.opendatahub.io_workbenches.yaml +- bases/components.platform.opendatahub.io_modelcontrollers.yaml +- bases/components.platform.opendatahub.io_modelmeshservings.yaml +- bases/components.platform.opendatahub.io_datasciencepipelines.yaml +- bases/components.platform.opendatahub.io_kserves.yaml +- bases/components.platform.opendatahub.io_kueues.yaml +- bases/components.platform.opendatahub.io_codeflares.yaml +- bases/components.platform.opendatahub.io_rays.yaml +- bases/components.platform.opendatahub.io_trustyais.yaml +- bases/components.platform.opendatahub.io_modelregistries.yaml +- bases/components.platform.opendatahub.io_trainingoperators.yaml +- bases/services.platform.opendatahub.io_monitorings.yaml +- bases/services.platform.opendatahub.io_auths.yaml #+kubebuilder:scaffold:crdkustomizeresource # patches: @@ -13,6 +27,8 @@ resources: #- patches/webhook_in_dscinitiatlizations.yaml #- patches/webhook_in_dscinitializations.yaml #- patches/webhook_in_datascienceclusters.yaml +#- patches/webhook_in_monitorings.yaml +#- patches/webhook_in_auths.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. @@ -20,6 +36,8 @@ resources: #- patches/cainjection_in_dscinitiatlizations.yaml #- patches/cainjection_in_dscinitializations.yaml #- patches/cainjection_in_datascienceclusters.yaml +#- patches/cainjection_in_monitorings.yaml +#- patches/cainjection_in_auths.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_services_auths.yaml b/config/crd/patches/cainjection_in_services_auths.yaml new file mode 100644 index 00000000000..e000d31baee --- /dev/null +++ b/config/crd/patches/cainjection_in_services_auths.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: auths.services.opendatahub.io diff --git a/config/crd/patches/cainjection_in_services_monitorings.yaml b/config/crd/patches/cainjection_in_services_monitorings.yaml new file mode 100644 index 00000000000..24df40f09b0 --- /dev/null +++ b/config/crd/patches/cainjection_in_services_monitorings.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: monitorings.services.platform.opendatahub.io diff --git a/config/crd/patches/webhook_in_services_auths.yaml b/config/crd/patches/webhook_in_services_auths.yaml new file mode 100644 index 00000000000..0076c3ff995 --- /dev/null +++ b/config/crd/patches/webhook_in_services_auths.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: auths.services.opendatahub.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_services_monitorings.yaml b/config/crd/patches/webhook_in_services_monitorings.yaml new file mode 100644 index 00000000000..00aaa6ac03a --- /dev/null +++ b/config/crd/patches/webhook_in_services_monitorings.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: monitorings.services.platform.opendatahub.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/manager/kustomization.yaml.in b/config/manager/kustomization.yaml.in index 2663ca77593..f3dcf81a77b 100644 --- a/config/manager/kustomization.yaml.in +++ b/config/manager/kustomization.yaml.in @@ -1,12 +1,11 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -images: -- name: controller - newName: REPLACE_IMAGE - resources: - manager.yaml generatorOptions: disableNameSuffixHash: true +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +images: +- name: controller + newName: REPLACE_IMAGE diff --git a/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml b/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/config/manifests/bases/rhods-operator.clusterserviceversion.yaml b/config/manifests/bases/rhods-operator.clusterserviceversion.yaml index 5223f9e2b3f..06cecf24fee 100644 --- a/config/manifests/bases/rhods-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/rhods-operator.clusterserviceversion.yaml @@ -92,6 +92,21 @@ spec: apiservicedefinitions: {} customresourcedefinitions: owned: + - description: Auth is the Schema for the auths API + displayName: Auth + kind: Auth + name: auths.services.platform.opendatahub.io + version: v1alpha1 + - description: CodeFlare is the Schema for the codeflares API + displayName: Code Flare + kind: CodeFlare + name: codeflares.components.platform.opendatahub.io + version: v1alpha1 + - description: Dashboard is the Schema for the dashboards API + displayName: Dashboard + kind: Dashboard + name: dashboards.components.platform.opendatahub.io + version: v1alpha1 - description: DataScienceCluster is the Schema for the datascienceclusters API. displayName: Data Science Cluster kind: DataScienceCluster @@ -101,6 +116,12 @@ spec: displayName: Components path: components version: v1 + - description: DataSciencePipelines is the Schema for the datasciencepipelines + API + displayName: Data Science Pipelines + kind: DataSciencePipelines + name: datasciencepipelines.components.platform.opendatahub.io + version: v1alpha1 - description: DSCInitialization is the Schema for the dscinitializations API. displayName: DSC Initialization kind: DSCInitialization @@ -136,6 +157,51 @@ spec: displayName: Conditions path: conditions version: v1 + - description: Kserve is the Schema for the kserves API + displayName: Kserve + kind: Kserve + name: kserves.components.platform.opendatahub.io + version: v1alpha1 + - description: Kueue is the Schema for the kueues API + displayName: Kueue + kind: Kueue + name: kueues.components.platform.opendatahub.io + version: v1alpha1 + - description: ModelMeshServing is the Schema for the modelmeshservings API + displayName: Model Mesh Serving + kind: ModelMeshServing + name: modelmeshservings.components.platform.opendatahub.io + version: v1alpha1 + - description: ModelRegistry is the Schema for the modelregistries API + displayName: Model Registry + kind: ModelRegistry + name: modelregistries.components.platform.opendatahub.io + version: v1alpha1 + - description: Monitoring is the Schema for the monitorings API + displayName: Monitoring + kind: Monitoring + name: monitorings.services.platform.opendatahub.io + version: v1alpha1 + - description: Ray is the Schema for the rays API + displayName: Ray + kind: Ray + name: rays.components.platform.opendatahub.io + version: v1alpha1 + - description: TrainingOperator is the Schema for the trainingoperators API + displayName: Training Operator + kind: TrainingOperator + name: trainingoperators.components.platform.opendatahub.io + version: v1alpha1 + - description: TrustyAI is the Schema for the trustyais API + displayName: Trusty AI + kind: TrustyAI + name: trustyais.components.platform.opendatahub.io + version: v1alpha1 + - description: Workbenches is the Schema for the workbenches API + displayName: Workbenches + kind: Workbenches + name: workbenches.components.platform.opendatahub.io + version: v1alpha1 description: This will be replaced by Kustomize displayName: Red Hat OpenShift AI icon: diff --git a/config/monitoring/prometheus/apps/prometheus-configs.yaml b/config/monitoring/prometheus/apps/prometheus-configs.yaml index 8693a7797a6..a9d8f089536 100644 --- a/config/monitoring/prometheus/apps/prometheus-configs.yaml +++ b/config/monitoring/prometheus/apps/prometheus-configs.yaml @@ -534,11 +534,11 @@ data: groups: - name: SLOs-probe_success_codeflare rules: - - alert: CodeFlare Operator Probe Success 5m and 1h Burn Rate high + - alert: CodeFlare Operator Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: 'https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Distributed-Workloads/codeflare-operator-availability.md' - summary: CodeFlare Operator Probe Success 5m and 1h Burn Rate high + summary: CodeFlare Operator Probe Success Burn Rate expr: | sum(probe_success:burnrate5m{instance=~"codeflare-operator"}) by (instance) > (14.40 * (1-0.99950)) and @@ -547,11 +547,11 @@ data: labels: severity: info namespace: redhat-ods-applications - - alert: CodeFlare Operator Probe Success 30m and 6h Burn Rate high + - alert: CodeFlare Operator Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: 'https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Distributed-Workloads/codeflare-operator-probe-success-burn-rate.md' - summary: CodeFlare Operator Probe Success 30m and 6h Burn Rate high + summary: CodeFlare Operator Probe Success Burn Rate expr: | sum(probe_success:burnrate30m{instance=~"codeflare-operator"}) by (instance) > (6.00 * (1-0.99950)) and @@ -560,11 +560,11 @@ data: labels: severity: info namespace: redhat-ods-applications - - alert: CodeFlare Operator Probe Success 2h and 1d Burn Rate high + - alert: CodeFlare Operator Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: 'https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Distributed-Workloads/codeflare-operator-probe-success-burn-rate.md' - summary: CodeFlare Operator Probe Success 2h and 1d Burn Rate high + summary: CodeFlare Operator Probe Success Burn Rate expr: | sum(probe_success:burnrate2h{instance=~"codeflare-operator"}) by (instance) > (3.00 * (1-0.99950)) and @@ -703,11 +703,11 @@ data: groups: - name: SLOs-haproxy_backend_http_responses_dashboard rules: - - alert: RHODS Dashboard Route Error 5m and 1h Burn Rate high + - alert: RHODS Dashboard Route Error Burn Rate annotations: message: 'High error budget burn for {{ $labels.route }} (current value: {{ $value }}).' triage: 'https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/RHODS-Dashboard/rhods-error-burn-rate.md' - summary: RHODS Dashboard Route Error 5m and 1h Burn Rate high + summary: RHODS Dashboard Route Error Burn Rate expr: | sum(haproxy_backend_http_responses_total:burnrate5m{route=~"rhods-dashboard"}) by (route) > (14.40 * (1-0.99950)) and @@ -716,11 +716,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: RHODS Dashboard Route Error 30m and 6h Burn Rate high + - alert: RHODS Dashboard Route Error Burn Rate annotations: message: 'High error budget burn for {{ $labels.route }} (current value: {{ $value }}).' triage: 'https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/RHODS-Dashboard/rhods-error-burn-rate.md' - summary: RHODS Dashboard Route Error 30m and 6h Burn Rate high + summary: RHODS Dashboard Route Error Burn Rate expr: | sum(haproxy_backend_http_responses_total:burnrate30m{route=~"rhods-dashboard"}) by (route) > (6.00 * (1-0.99950)) and @@ -729,11 +729,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: RHODS Dashboard Route Error 2h and 1d Burn Rate high + - alert: RHODS Dashboard Route Error Burn Rate annotations: message: 'High error budget burn for {{ $labels.route }} (current value: {{ $value }}).' triage: 'https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/RHODS-Dashboard/rhods-error-burn-rate.md' - summary: RHODS Dashboard Route Error 2h and 1d Burn Rate high + summary: RHODS Dashboard Route Error Burn Rate expr: | sum(haproxy_backend_http_responses_total:burnrate2h{route=~"rhods-dashboard"}) by (route) > (3.00 * (1-0.99950)) and @@ -742,11 +742,11 @@ data: labels: severity: warning namespace: redhat-ods-applications - - alert: RHODS Dashboard Route Error 6h and 3d Burn Rate high + - alert: RHODS Dashboard Route Error Burn Rate annotations: message: 'High error budget burn for {{ $labels.route }} (current value: {{ $value }}).' triage: 'https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/RHODS-Dashboard/rhods-error-burn-rate.md' - summary: RHODS Dashboard Route Error 6h and 3d Burn Rate high + summary: RHODS Dashboard Route Error Burn Rate expr: | sum(haproxy_backend_http_responses_total:burnrate6h{route=~"rhods-dashboard"}) by (route) > (1.00 * (1-0.99950)) and @@ -757,11 +757,11 @@ data: namespace: redhat-ods-applications - name: SLOs-probe_success_dashboard rules: - - alert: RHODS Dashboard Probe Success 5m and 1h Burn Rate high + - alert: RHODS Dashboard Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.name }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/RHODS-Dashboard/rhods-dashboard-probe-success-burn-rate.md" - summary: RHODS Dashboard Probe Success 5m and 1h Burn Rate high + summary: RHODS Dashboard Probe Success Burn Rate expr: | sum(probe_success:burnrate5m{name=~"rhods-dashboard"}) by (name) > (14.40 * (1-0.98)) and @@ -770,11 +770,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: RHODS Dashboard Probe Success 30m and 6h Burn Rate high + - alert: RHODS Dashboard Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.name }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/RHODS-Dashboard/rhods-dashboard-probe-success-burn-rate.md" - summary: RHODS Dashboard Probe Success 30m and 6h Burn Rate high + summary: RHODS Dashboard Probe Success Burn Rate expr: | sum(probe_success:burnrate30m{name=~"rhods-dashboard"}) by (name) > (6.00 * (1-0.98)) and @@ -783,11 +783,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: RHODS Dashboard Probe Success 2h and 1d Burn Rate high + - alert: RHODS Dashboard Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.name }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/RHODS-Dashboard/rhods-dashboard-probe-success-burn-rate.md" - summary: RHODS Dashboard Probe Success 2h and 1d Burn Rate high + summary: RHODS Dashboard Probe Success Burn Rate expr: | sum(probe_success:burnrate2h{name=~"rhods-dashboard"}) by (name) > (3.00 * (1-0.98)) and @@ -796,11 +796,11 @@ data: labels: severity: warning namespace: redhat-ods-applications - - alert: RHODS Dashboard Probe Success 6h and 3d Burn Rate high + - alert: RHODS Dashboard Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.name }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/RHODS-Dashboard/rhods-dashboard-probe-success-burn-rate.md" - summary: RHODS Dashboard Probe Success 6h and 3d Burn Rate high + summary: RHODS Dashboard Probe Success Burn Rate expr: | sum(probe_success:burnrate6h{name=~"rhods-dashboard"}) by (name) > (1.00 * (1-0.98)) and @@ -906,11 +906,11 @@ data: groups: - name: SLOs-haproxy_backend_http_responses_dsp rules: - - alert: Data Science Pipelines Application Route Error 5m and 1h Burn Rate high + - alert: Data Science Pipelines Application Route Error Burn Rate annotations: message: 'High error budget burn for {{ $labels.route }} (current value: {{ $value }}).' triage: 'https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Data-Science-Pipelines/data-science-pipelines-application-error-burn-rate.md' - summary: Data Science Pipelines Application Route Error 5m and 1h Burn Rate high + summary: Data Science Pipelines Application Route Error Burn Rate expr: | sum(haproxy_backend_http_responses_total:burnrate5m{component="dsp"}) by (exported_namespace) > (14.40 * (1-0.99950)) and @@ -919,11 +919,11 @@ data: labels: severity: info namespace: redhat-ods-applications - - alert: Data Science Pipelines Application Route Error 30m and 6h Burn Rate high + - alert: Data Science Pipelines Application Route Error Burn Rate annotations: message: 'High error budget burn for {{ $labels.route }} (current value: {{ $value }}).' triage: 'https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Data-Science-Pipelines/data-science-pipelines-application-error-burn-rate.md' - summary: Data Science Pipelines Application Route Error 30m and 6h Burn Rate high + summary: Data Science Pipelines Application Route Error Burn Rate expr: | sum(haproxy_backend_http_responses_total:burnrate30m{component="dsp"}) by (exported_namespace) > (6.00 * (1-0.99950)) and @@ -932,11 +932,11 @@ data: labels: severity: info namespace: redhat-ods-applications - - alert: Data Science Pipelines Application Route Error 2h and 1d Burn Rate high + - alert: Data Science Pipelines Application Route Error Burn Rate annotations: message: 'High error budget burn for {{ $labels.route }} (current value: {{ $value }}).' triage: 'https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Data-Science-Pipelines/data-science-pipelines-application-error-burn-rate.md' - summary: Data Science Pipelines Application Route Error 2h and 1d Burn Rate high + summary: Data Science Pipelines Application Route Error Burn Rate expr: | sum(haproxy_backend_http_responses_total:burnrate2h{component="dsp"}) by (exported_namespace) > (3.00 * (1-0.99950)) and @@ -945,11 +945,11 @@ data: labels: severity: info namespace: redhat-ods-applications - - alert: Data Science Pipelines Application Route Error 6h and 3d Burn Rate high + - alert: Data Science Pipelines Application Route Error Burn Rate annotations: message: 'High error budget burn for {{ $labels.route }} (current value: {{ $value }}).' triage: 'https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Data-Science-Pipelines/data-science-pipelines-application-error-burn-rate.md' - summary: Data Science Pipelines Application Route Error 6h and 3d Burn Rate high + summary: Data Science Pipelines Application Route Error Burn Rate expr: | sum(haproxy_backend_http_responses_total:burnrate6h{component="dsp"}) by (exported_namespace) > (1.00 * (1-0.99950)) and @@ -960,11 +960,11 @@ data: namespace: redhat-ods-applications - name: SLOs-probe_success_dsp rules: - - alert: Data Science Pipelines Operator Probe Success 5m and 1h Burn Rate high + - alert: Data Science Pipelines Operator Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Data-Science-Pipelines/data-science-pipelines-operator-probe-success-burn-rate.md" - summary: Data Science Pipelines Operator Probe Success 5m and 1h Burn Rate high + summary: Data Science Pipelines Operator Probe Success Burn Rate expr: | sum(probe_success:burnrate5m{instance=~"data-science-pipelines-operator"}) by (instance) > (14.40 * (1-0.98000)) and @@ -973,11 +973,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: Data Science Pipelines Operator Probe Success 30m and 6h Burn Rate high + - alert: Data Science Pipelines Operator Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Data-Science-Pipelines/data-science-pipelines-operator-probe-success-burn-rate.md" - summary: Data Science Pipelines Operator Probe Success 30m and 6h Burn Rate high + summary: Data Science Pipelines Operator Probe Success Burn Rate expr: | sum(probe_success:burnrate30m{instance=~"data-science-pipelines-operator"}) by (instance) > (6.00 * (1-0.98000)) and @@ -986,11 +986,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: Data Science Pipelines Operator Probe Success 2h and 1d Burn Rate high + - alert: Data Science Pipelines Operator Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Data-Science-Pipelines/data-science-pipelines-operator-probe-success-burn-rate.md" - summary: Data Science Pipelines Operator Probe Success 2h and 1d Burn Rate high + summary: Data Science Pipelines Operator Probe Success Burn Rate expr: | sum(probe_success:burnrate2h{instance=~"data-science-pipelines-operator"}) by (instance) > (3.00 * (1-0.98000)) and @@ -1091,11 +1091,11 @@ data: groups: - name: SLOs-probe_success_modelmesh rules: - - alert: Modelmesh Controller Probe Success 5m and 1h Burn Rate high + - alert: Modelmesh Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhods-modelmesh-controller-probe-success-burn-rate.md" - summary: Modelmesh Controller Probe Success 5m and 1h Burn Rate high + summary: Modelmesh Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate5m{instance=~"modelmesh-controller"}) by (instance) > (14.40 * (1-0.98000)) and @@ -1104,11 +1104,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: Modelmesh Controller Probe Success 30m and 6h Burn Rate high + - alert: Modelmesh Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhods-modelmesh-controller-probe-success-burn-rate.md" - summary: Modelmesh Controller Probe Success 30m and 6h Burn Rate high + summary: Modelmesh Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate30m{instance=~"modelmesh-controller"}) by (instance) > (6.00 * (1-0.98000)) and @@ -1117,11 +1117,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: Modelmesh Controller Probe Success 2h and 1d Burn Rate high + - alert: Modelmesh Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhods-modelmesh-controller-probe-success-burn-rate.md" - summary: Modelmesh Controller Probe Success 2h and 1d Burn Rate high + summary: Modelmesh Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate2h{instance=~"modelmesh-controller"}) by (instance) > (3.00 * (1-0.98000)) and @@ -1180,11 +1180,11 @@ data: groups: - name: SLOs-probe_success_model_controller rules: - - alert: ODH Model Controller Probe Success 5m and 1h Burn Rate high + - alert: ODH Model Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhods-odh-controller-probe-success-burn-rate.md" - summary: ODH Model Controller Probe Success 5m and 1h Burn Rate high + summary: ODH Model Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate5m{instance=~"odh-model-controller"}) by (instance) > (14.40 * (1-0.98000)) and @@ -1193,11 +1193,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: ODH Model Controller Probe Success 30m and 6h Burn Rate high + - alert: ODH Model Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhods-odh-controller-probe-success-burn-rate.md" - summary: ODH Model Controller Probe Success 30m and 6h Burn Rate high + summary: ODH Model Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate30m{instance=~"odh-model-controller"}) by (instance) > (6.00 * (1-0.98000)) and @@ -1206,11 +1206,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: ODH Model Controller Probe Success 2h and 1d Burn Rate high + - alert: ODH Model Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhods-odh-controller-probe-success-burn-rate.md" - summary: ODH Model Controller Probe Success 2h and 1d Burn Rate high + summary: ODH Model Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate2h{instance=~"odh-model-controller"}) by (instance) > (3.00 * (1-0.98000)) and @@ -1269,11 +1269,11 @@ data: groups: - name: SLOs-probe_success_kserve rules: - - alert: Kserve Controller Probe Success 5m and 1h Burn Rate high + - alert: Kserve Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhods-kserve-controller-probe-success-burn-rate.md" - summary: Kserve Controller Probe Success 5m and 1h Burn Rate high + summary: Kserve Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate5m{instance=~"kserve-controller-manager"}) by (instance) > (14.40 * (1-0.98000)) and @@ -1281,11 +1281,11 @@ data: for: 2m labels: severity: critical - - alert: Kserve Controller Probe Success 30m and 6h Burn Rate high + - alert: Kserve Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhods-kserve-controller-probe-success-burn-rate.md" - summary: Kserve Controller Probe Success 30m and 6h Burn Rate high + summary: Kserve Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate30m{instance=~"kserve-controller-manager"}) by (instance) > (6.00 * (1-0.98000)) and @@ -1293,11 +1293,11 @@ data: for: 15m labels: severity: critical - - alert: Kserve Controller Probe Success 2h and 1d Burn Rate high + - alert: Kserve Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhods-kserve-controller-probe-success-burn-rate.md" - summary: Kserve Controller Probe Success 2h and 1d Burn Rate high + summary: Kserve Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate2h{instance=~"kserve-controller-manager"}) by (instance) > (3.00 * (1-0.98000)) and @@ -1462,11 +1462,11 @@ data: - name: SLOs-probe_success_workbench rules: - - alert: RHODS Jupyter Probe Success 5m and 1h Burn Rate high + - alert: RHODS Jupyter Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Jupyter/rhods-jupyter-probe-success-burn-rate.md" - summary: RHODS Jupyter Probe Success 5m and 1h Burn Rate high + summary: RHODS Jupyter Probe Success Burn Rate expr: | sum(probe_success:burnrate5m{instance=~"notebook-spawner"}) by (instance) > (14.40 * (1-0.98000)) and @@ -1475,11 +1475,11 @@ data: labels: severity: critical instance: notebook-spawner - - alert: RHODS Jupyter Probe Success 30m and 6h Burn Rate high + - alert: RHODS Jupyter Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Jupyter/rhods-jupyter-probe-success-burn-rate.md" - summary: RHODS Jupyter Probe Success 30m and 6h Burn Rate high + summary: RHODS Jupyter Probe Success Burn Rate expr: | sum(probe_success:burnrate30m{instance=~"notebook-spawner"}) by (instance) > (6.00 * (1-0.98000)) and @@ -1488,11 +1488,11 @@ data: labels: severity: critical instance: notebook-spawner - - alert: RHODS Jupyter Probe Success 2h and 1d Burn Rate high + - alert: RHODS Jupyter Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Jupyter/rhods-jupyter-probe-success-burn-rate.md" - summary: RHODS Jupyter Probe Success 2h and 1d Burn Rate high + summary: RHODS Jupyter Probe Success Burn Rate expr: | sum(probe_success:burnrate2h{instance=~"notebook-spawner"}) by (instance) > (3.00 * (1-0.98000)) and @@ -1501,11 +1501,11 @@ data: labels: severity: warning instance: notebook-spawner - - alert: RHODS Jupyter Probe Success 6h and 3d Burn Rate high + - alert: RHODS Jupyter Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Jupyter/rhods-jupyter-probe-success-burn-rate.md" - summary: RHODS Jupyter Probe Success 6h and 3d Burn Rate high + summary: RHODS Jupyter Probe Success Burn Rate expr: | sum(probe_success:burnrate6h{instance=~"notebook-spawner"}) by (instance) > (1.00 * (1-0.98000)) and @@ -1563,11 +1563,11 @@ data: groups: - name: SLOs-probe_success_trustyai rules: - - alert: TrustyAI Controller Probe Success 5m and 1h Burn Rate high + - alert: TrustyAI Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhoai-trustyai-controller-probe-success-burn-rate.md" - summary: TrustyAI Controller Probe Success 5m and 1h Burn Rate high + summary: TrustyAI Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate5m{instance=~"trustyai-service-operator-controller-manager"}) by (instance) > (14.40 * (1-0.98000)) and @@ -1576,11 +1576,11 @@ data: labels: severity: critical instance: trustyai-service-operator-controller-manager - - alert: TrustyAI Controller Probe Success 30m and 6h Burn Rate high + - alert: TrustyAI Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhoai-trustyai-controller-probe-success-burn-rate.md" - summary: TrustyAI Controller Probe Success 30m and 6h Burn Rate high + summary: TrustyAI Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate30m{instance=~"trustyai-service-operator-controller-manager"}) by (instance) > (6.00 * (1-0.98000)) and @@ -1589,11 +1589,11 @@ data: labels: severity: critical instance: trustyai-service-operator-controller-manager - - alert: TrustyAI Controller Probe Success 2h and 1d Burn Rate high + - alert: TrustyAI Controller Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhoai-trustyai-controller-probe-success-burn-rate.md" - summary: TrustyAI Controller Probe Success 2h and 1d Burn Rate high + summary: TrustyAI Controller Probe Success Burn Rate expr: | sum(probe_success:burnrate2h{instance=~"trustyai-service-operator-controller-manager"}) by (instance) > (3.00 * (1-0.98000)) and @@ -1652,11 +1652,11 @@ data: groups: - name: SLOs-probe_success_model_controller rules: - - alert: Model Registry Operator Probe Success 5m and 1h Burn Rate high + - alert: Model Registry Operator Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhoai-model-registry-operator-probe-success-burn-rate.md" - summary: Model Registry Operator Probe Success 5m and 1h Burn Rate high + summary: Model Registry Operator Probe Success Burn Rate expr: | sum(probe_success:burnrate5m{instance=~"model-registry-operator"}) by (instance) > (14.40 * (1-0.98000)) and @@ -1665,11 +1665,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: Model Registry Operator Probe Success 30m and 6h Burn Rate high + - alert: Model Registry Operator Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhoai-model-registry-operator-probe-success-burn-rate.md" - summary: Model Registry Operator Probe Success 30m and 6h Burn Rate high + summary: Model Registry Operator Probe Success Burn Rate expr: | sum(probe_success:burnrate30m{instance=~"model-registry-operator"}) by (instance) > (6.00 * (1-0.98000)) and @@ -1678,11 +1678,11 @@ data: labels: severity: critical namespace: redhat-ods-applications - - alert: Model Registry Operator Probe Success 2h and 1d Burn Rate high + - alert: Model Registry Operator Probe Success Burn Rate annotations: message: 'High error budget burn for {{ $labels.instance }} (current value: {{ $value }}).' triage: "https://gitlab.cee.redhat.com/service/managed-tenants-sops/-/blob/main/RHODS/Model-Serving/rhoai-model-registry-operator-probe-success-burn-rate.md" - summary: Model Registry Operator Probe Success 2h and 1d Burn Rate high + summary: Model Registry Operator Probe Success Burn Rate expr: | sum(probe_success:burnrate2h{instance=~"model-registry-operator"}) by (instance) > (3.00 * (1-0.98000)) and diff --git a/config/rbac/components_codeflare_editor_role.yaml b/config/rbac/components_codeflare_editor_role.yaml new file mode 100644 index 00000000000..ebef0ad6ffe --- /dev/null +++ b/config/rbac/components_codeflare_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit codeflares. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: codeflare-editor-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - codeflares + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - codeflares/status + verbs: + - get diff --git a/config/rbac/components_codeflare_viewer_role.yaml b/config/rbac/components_codeflare_viewer_role.yaml new file mode 100644 index 00000000000..70bb490e2f5 --- /dev/null +++ b/config/rbac/components_codeflare_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view codeflares. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: codeflare-viewer-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - codeflares + verbs: + - get + - list + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - codeflares/status + verbs: + - get diff --git a/config/rbac/components_dashboard_editor_role.yaml b/config/rbac/components_dashboard_editor_role.yaml new file mode 100644 index 00000000000..a145369cdd5 --- /dev/null +++ b/config/rbac/components_dashboard_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit dashboards. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dashboard-editor-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - dashboards + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - dashboards/status + verbs: + - get diff --git a/config/rbac/components_dashboard_viewer_role.yaml b/config/rbac/components_dashboard_viewer_role.yaml new file mode 100644 index 00000000000..5f83b735e08 --- /dev/null +++ b/config/rbac/components_dashboard_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view dashboards. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: dashboard-viewer-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - dashboards + verbs: + - get + - list + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - dashboards/status + verbs: + - get diff --git a/config/rbac/components_datasciencepipelines_editor_role.yaml b/config/rbac/components_datasciencepipelines_editor_role.yaml new file mode 100644 index 00000000000..f012953fe41 --- /dev/null +++ b/config/rbac/components_datasciencepipelines_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit datasciencepipelines. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: datasciencepipelines-editor-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - datasciencepipelines + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - datasciencepipelines/status + verbs: + - get diff --git a/config/rbac/components_datasciencepipelines_viewer_role.yaml b/config/rbac/components_datasciencepipelines_viewer_role.yaml new file mode 100644 index 00000000000..36a70b6bc13 --- /dev/null +++ b/config/rbac/components_datasciencepipelines_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view datasciencepipelines. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: datasciencepipelines-viewer-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - datasciencepipelines + verbs: + - get + - list + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - datasciencepipelines/status + verbs: + - get diff --git a/config/rbac/components_kserve_editor_role.yaml b/config/rbac/components_kserve_editor_role.yaml new file mode 100644 index 00000000000..046f013a024 --- /dev/null +++ b/config/rbac/components_kserve_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit kserves. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kserve-editor-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - kserves + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - kserves/status + verbs: + - get diff --git a/config/rbac/components_kserve_viewer_role.yaml b/config/rbac/components_kserve_viewer_role.yaml new file mode 100644 index 00000000000..a6a8f4b6d71 --- /dev/null +++ b/config/rbac/components_kserve_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view kserves. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kserve-viewer-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - kserves + verbs: + - get + - list + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - kserves/status + verbs: + - get diff --git a/config/rbac/components_kueue_editor_role.yaml b/config/rbac/components_kueue_editor_role.yaml new file mode 100644 index 00000000000..621b0843f6c --- /dev/null +++ b/config/rbac/components_kueue_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit kueues. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kueue-editor-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - kueues + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - kueues/status + verbs: + - get diff --git a/config/rbac/components_kueue_viewer_role.yaml b/config/rbac/components_kueue_viewer_role.yaml new file mode 100644 index 00000000000..251fca65ab2 --- /dev/null +++ b/config/rbac/components_kueue_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view kueues. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: kueue-viewer-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - kueues + verbs: + - get + - list + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - kueues/status + verbs: + - get diff --git a/config/rbac/components_modelmeshserving_editor_role.yaml b/config/rbac/components_modelmeshserving_editor_role.yaml new file mode 100644 index 00000000000..3012db0c602 --- /dev/null +++ b/config/rbac/components_modelmeshserving_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit modelmeshservings. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: modelmeshserving-editor-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - modelmeshservings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - modelmeshservings/status + verbs: + - get diff --git a/config/rbac/components_modelmeshserving_viewer_role.yaml b/config/rbac/components_modelmeshserving_viewer_role.yaml new file mode 100644 index 00000000000..34708d12ee2 --- /dev/null +++ b/config/rbac/components_modelmeshserving_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view modelmeshservings. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: modelmeshserving-viewer-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - modelmeshservings + verbs: + - get + - list + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - modelmeshservings/status + verbs: + - get diff --git a/config/rbac/components_modelregistry_editor_role.yaml b/config/rbac/components_modelregistry_editor_role.yaml new file mode 100644 index 00000000000..8cbe9a06f73 --- /dev/null +++ b/config/rbac/components_modelregistry_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit modelregistries. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: modelregistry-editor-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - modelregistries + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - modelregistries/status + verbs: + - get diff --git a/config/rbac/components_modelregistry_viewer_role.yaml b/config/rbac/components_modelregistry_viewer_role.yaml new file mode 100644 index 00000000000..d9457a107c0 --- /dev/null +++ b/config/rbac/components_modelregistry_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view modelregistries. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: modelregistry-viewer-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - modelregistries + verbs: + - get + - list + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - modelregistries/status + verbs: + - get diff --git a/config/rbac/components_ray_editor_role.yaml b/config/rbac/components_ray_editor_role.yaml new file mode 100644 index 00000000000..6b1c9f98f5b --- /dev/null +++ b/config/rbac/components_ray_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit rays. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ray-editor-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - rays + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - rays/status + verbs: + - get diff --git a/config/rbac/components_ray_viewer_role.yaml b/config/rbac/components_ray_viewer_role.yaml new file mode 100644 index 00000000000..081ffef5d60 --- /dev/null +++ b/config/rbac/components_ray_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view rays. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ray-viewer-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - rays + verbs: + - get + - list + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - rays/status + verbs: + - get diff --git a/config/rbac/components_trainingoperator_editor_role.yaml b/config/rbac/components_trainingoperator_editor_role.yaml new file mode 100644 index 00000000000..0069f722c93 --- /dev/null +++ b/config/rbac/components_trainingoperator_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit trainingoperators. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: trainingoperator-editor-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - trainingoperators + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - trainingoperators/status + verbs: + - get diff --git a/config/rbac/components_trainingoperator_viewer_role.yaml b/config/rbac/components_trainingoperator_viewer_role.yaml new file mode 100644 index 00000000000..f7d43ec2126 --- /dev/null +++ b/config/rbac/components_trainingoperator_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view trainingoperators. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: trainingoperator-viewer-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - trainingoperators + verbs: + - get + - list + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - trainingoperators/status + verbs: + - get diff --git a/config/rbac/components_trustyai_editor_role.yaml b/config/rbac/components_trustyai_editor_role.yaml new file mode 100644 index 00000000000..19ebadaf6ec --- /dev/null +++ b/config/rbac/components_trustyai_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit trustyais. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: trustyai-editor-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - trustyais + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - trustyais/status + verbs: + - get diff --git a/config/rbac/components_trustyai_viewer_role.yaml b/config/rbac/components_trustyai_viewer_role.yaml new file mode 100644 index 00000000000..0cf799214c2 --- /dev/null +++ b/config/rbac/components_trustyai_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view trustyais. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: trustyai-viewer-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - trustyais + verbs: + - get + - list + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - trustyais/status + verbs: + - get diff --git a/config/rbac/components_workbenches_editor_role.yaml b/config/rbac/components_workbenches_editor_role.yaml new file mode 100644 index 00000000000..d40f1eb1cec --- /dev/null +++ b/config/rbac/components_workbenches_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit workbenches. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: workbenches-editor-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - workbenches + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - workbenches/status + verbs: + - get diff --git a/config/rbac/components_workbenches_viewer_role.yaml b/config/rbac/components_workbenches_viewer_role.yaml new file mode 100644 index 00000000000..5aa60cde271 --- /dev/null +++ b/config/rbac/components_workbenches_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view workbenches. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: workbenches-viewer-role +rules: +- apiGroups: + - components.platform.opendatahub.io + resources: + - workbenches + verbs: + - get + - list + - watch +- apiGroups: + - components.platform.opendatahub.io + resources: + - workbenches/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index a27e6ebdc5c..8e9c9080573 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -4,14 +4,6 @@ kind: ClusterRole metadata: name: rhods-operator-role rules: -- apiGroups: - - '*' - resources: - - customresourcedefinitions - verbs: - - get - - list - - watch - apiGroups: - '*' resources: @@ -32,12 +24,6 @@ rules: - patch - update - watch -- apiGroups: - - addons.managed.openshift.io - resources: - - addons - verbs: - - get - apiGroups: - admissionregistration.k8s.io resources: @@ -61,6 +47,7 @@ rules: - get - list - patch + - update - watch - apiGroups: - apiregistration.k8s.io @@ -183,32 +170,94 @@ rules: - create - patch - apiGroups: - - config.openshift.io + - components.platform.opendatahub.io resources: - - authentications - - clusterversions + - codeflares + - dashboards + - datasciencepipelines + - kserves + - kueues + - modelcontrollers + - modelmeshservings + - modelregistries + - rays + - trainingoperators + - trustyais + - workbenches verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - - config.openshift.io + - components.platform.opendatahub.io resources: - - ingresses + - codeflares/finalizers + - datasciencepipelines/finalizers + - kserves/finalizers + - kueues/finalizers + - modelcontrollers/finalizers + - modelmeshservings/finalizers + - modelregistries/finalizers + - rays/finalizers + - trainingoperators/finalizers + - trustyais/finalizers + - workbenches/finalizers + verbs: + - update +- apiGroups: + - components.platform.opendatahub.io + resources: + - codeflares/status + - dashboards/status + - datasciencepipelines/status + - kserves/status + - kueues/status + - modelcontrollers/status + - modelmeshservings/status + - modelregistries/status + - rays/status + - trainingoperators/status + - trustyais/status + - workbenches/status verbs: - get + - patch + - update - apiGroups: - - console.openshift.io + - components.platform.opendatahub.io resources: - - consolelinks + - dashboards/finalizers verbs: - create - - delete - get + - list - patch + - update + - use + - watch +- apiGroups: + - config.openshift.io + resources: + - authentications + - clusterversions + verbs: + - get + - list + - watch +- apiGroups: + - config.openshift.io + resources: + - ingresses + verbs: + - get - apiGroups: - console.openshift.io resources: + - consolelinks - odhquickstarts verbs: - create @@ -216,6 +265,7 @@ rules: - get - list - patch + - watch - apiGroups: - controller-runtime.sigs.k8s.io resources: @@ -331,6 +381,7 @@ rules: - get - list - patch + - watch - apiGroups: - datasciencecluster.opendatahub.io resources: @@ -338,6 +389,7 @@ rules: verbs: - create - delete + - deletecollection - get - list - patch @@ -386,6 +438,7 @@ rules: verbs: - create - delete + - deletecollection - get - list - patch @@ -489,7 +542,6 @@ rules: resources: - servicemeshcontrolplanes - servicemeshmemberrolls - - servicemeshmembers - servicemeshmembers/finalizers verbs: - create @@ -499,6 +551,19 @@ rules: - update - use - watch +- apiGroups: + - maistra.io + resources: + - servicemeshmembers + verbs: + - create + - delete + - get + - list + - patch + - update + - use + - watch - apiGroups: - modelregistry.opendatahub.io resources: @@ -537,7 +602,6 @@ rules: - prometheuses - prometheuses/finalizers - prometheuses/status - - prometheusrules - thanosrulers - thanosrulers/finalizers - thanosrulers/status @@ -559,6 +623,18 @@ rules: - patch - update - watch +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - watch - apiGroups: - monitoring.coreos.com resources: @@ -768,6 +844,35 @@ rules: - securitycontextconstraints verbs: - '*' +- apiGroups: + - services.platform.opendatahub.io + resources: + - auths + - monitorings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - services.platform.opendatahub.io + resources: + - auths/finalizers + - monitorings/finalizers + verbs: + - update +- apiGroups: + - services.platform.opendatahub.io + resources: + - auths/status + - monitorings/status + verbs: + - get + - patch + - update - apiGroups: - serving.knative.dev resources: diff --git a/config/rbac/services_auth_editor_role.yaml b/config/rbac/services_auth_editor_role.yaml new file mode 100644 index 00000000000..6f5449195d6 --- /dev/null +++ b/config/rbac/services_auth_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit auths. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: auth-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: opendatahub-operator + app.kubernetes.io/part-of: opendatahub-operator + app.kubernetes.io/managed-by: kustomize + name: auth-editor-role +rules: +- apiGroups: + - services.opendatahub.io + resources: + - auths + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - services.opendatahub.io + resources: + - auths/status + verbs: + - get diff --git a/config/rbac/services_auth_viewer_role.yaml b/config/rbac/services_auth_viewer_role.yaml new file mode 100644 index 00000000000..2a932d677e5 --- /dev/null +++ b/config/rbac/services_auth_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view auths. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: auth-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: opendatahub-operator + app.kubernetes.io/part-of: opendatahub-operator + app.kubernetes.io/managed-by: kustomize + name: auth-viewer-role +rules: +- apiGroups: + - services.opendatahub.io + resources: + - auths + verbs: + - get + - list + - watch +- apiGroups: + - services.opendatahub.io + resources: + - auths/status + verbs: + - get diff --git a/config/rbac/services_monitoring_editor_role.yaml b/config/rbac/services_monitoring_editor_role.yaml new file mode 100644 index 00000000000..9615fa2cce6 --- /dev/null +++ b/config/rbac/services_monitoring_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit monitorings. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: monitoring-editor-role +rules: +- apiGroups: + - services.platform.opendatahub.io + resources: + - monitorings + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - services.platform.opendatahub.io + resources: + - monitorings/status + verbs: + - get diff --git a/config/rbac/services_monitoring_viewer_role.yaml b/config/rbac/services_monitoring_viewer_role.yaml new file mode 100644 index 00000000000..526140304a7 --- /dev/null +++ b/config/rbac/services_monitoring_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view monitorings. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: monitoring-viewer-role +rules: +- apiGroups: + - services.platform.opendatahub.io + resources: + - monitorings + verbs: + - get + - list + - watch +- apiGroups: + - services.platform.opendatahub.io + resources: + - monitorings/status + verbs: + - get diff --git a/config/samples/components_v1_codeflare.yaml b/config/samples/components_v1_codeflare.yaml new file mode 100644 index 00000000000..0a80790af5c --- /dev/null +++ b/config/samples/components_v1_codeflare.yaml @@ -0,0 +1,6 @@ +apiVersion: components.platform.opendatahub.io/v1 +kind: CodeFlare +metadata: + name: codeflare-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/components_v1_dashboard.yaml b/config/samples/components_v1_dashboard.yaml new file mode 100644 index 00000000000..a8cd990c49a --- /dev/null +++ b/config/samples/components_v1_dashboard.yaml @@ -0,0 +1,6 @@ +apiVersion: components.platform.opendatahub.io/v1 +kind: Dashboard +metadata: + name: dashboard-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/components_v1_datasciencepipelines.yaml b/config/samples/components_v1_datasciencepipelines.yaml new file mode 100644 index 00000000000..7a1d88d8e03 --- /dev/null +++ b/config/samples/components_v1_datasciencepipelines.yaml @@ -0,0 +1,6 @@ +apiVersion: components.platform.opendatahub.io/v1 +kind: DataSciencePipelines +metadata: + name: datasciencepipelines-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/components_v1_kserve.yaml b/config/samples/components_v1_kserve.yaml new file mode 100644 index 00000000000..cad5c2698c1 --- /dev/null +++ b/config/samples/components_v1_kserve.yaml @@ -0,0 +1,6 @@ +apiVersion: components.platform.opendatahub.io/v1 +kind: Kserve +metadata: + name: kserve-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/components_v1_kueue.yaml b/config/samples/components_v1_kueue.yaml new file mode 100644 index 00000000000..add77909808 --- /dev/null +++ b/config/samples/components_v1_kueue.yaml @@ -0,0 +1,6 @@ +apiVersion: components.platform.opendatahub.io/v1 +kind: Kueue +metadata: + name: kueue-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/components_v1_modelmeshserving.yaml b/config/samples/components_v1_modelmeshserving.yaml new file mode 100644 index 00000000000..ac829f52746 --- /dev/null +++ b/config/samples/components_v1_modelmeshserving.yaml @@ -0,0 +1,6 @@ +apiVersion: components.platform.opendatahub.io/v1 +kind: ModelMeshServing +metadata: + name: modelmeshserving-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/components_v1_modelregistry.yaml b/config/samples/components_v1_modelregistry.yaml new file mode 100644 index 00000000000..6465009094f --- /dev/null +++ b/config/samples/components_v1_modelregistry.yaml @@ -0,0 +1,6 @@ +apiVersion: components.platform.opendatahub.io/v1 +kind: ModelRegistry +metadata: + name: modelregistry-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/components_v1_ray.yaml b/config/samples/components_v1_ray.yaml new file mode 100644 index 00000000000..7c36f927e58 --- /dev/null +++ b/config/samples/components_v1_ray.yaml @@ -0,0 +1,6 @@ +apiVersion: components.platform.opendatahub.io/v1 +kind: Ray +metadata: + name: ray-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/components_v1_trainingoperator.yaml b/config/samples/components_v1_trainingoperator.yaml new file mode 100644 index 00000000000..c50f580a9a1 --- /dev/null +++ b/config/samples/components_v1_trainingoperator.yaml @@ -0,0 +1,6 @@ +apiVersion: components.platform.opendatahub.io/v1 +kind: TrainingOperator +metadata: + name: trainingoperator-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/components_v1_trustyai.yaml b/config/samples/components_v1_trustyai.yaml new file mode 100644 index 00000000000..1228a1cda6c --- /dev/null +++ b/config/samples/components_v1_trustyai.yaml @@ -0,0 +1,6 @@ +apiVersion: components.platform.opendatahub.io/v1 +kind: TrustyAI +metadata: + name: trustyai-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/components_v1_workbenches.yaml b/config/samples/components_v1_workbenches.yaml new file mode 100644 index 00000000000..1565728cdfa --- /dev/null +++ b/config/samples/components_v1_workbenches.yaml @@ -0,0 +1,6 @@ +apiVersion: components.platform.opendatahub.io/v1 +kind: Workbenches +metadata: + name: workbenches-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/datasciencecluster_v1_datasciencecluster.yaml b/config/samples/datasciencecluster_v1_datasciencecluster.yaml index 86a6b24bbb4..85c05bd811f 100644 --- a/config/samples/datasciencecluster_v1_datasciencecluster.yaml +++ b/config/samples/datasciencecluster_v1_datasciencecluster.yaml @@ -18,6 +18,9 @@ spec: managementState: "Managed" kserve: { managementState: "Managed", + nim: { + managementState: "Managed" + }, serving: { ingressGateway: { certificate: { @@ -33,7 +36,7 @@ spec: kueue: managementState: "Managed" trainingoperator: - managementState: "Removed" + managementState: "Managed" ray: managementState: "Managed" workbenches: diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 51284dc78c5..0e5e1ae1dda 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -5,4 +5,18 @@ kind: Kustomization resources: - datasciencecluster_v1_datasciencecluster.yaml - dscinitialization_v1_dscinitialization.yaml +#- components_v1_dashboard.yaml +#- components_v1_workbenches.yaml +#- components_v1_modelmeshserving.yaml +#- components_v1_datasciencepipelines.yaml +#- components_v1_kserve.yaml +#- components_v1_kueue.yaml +#- components_v1_codeflare.yaml +#- components_v1_ray.yaml +#- components_v1_trustyai.yaml +#- components_v1_modelregistry.yaml +#- components_v1_trainingoperator.yaml +#- services_v1_dscmonitoring.yaml +- services_v1_monitoring.yaml +- services_v1alpha1_auth.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/services_v1_monitoring.yaml b/config/samples/services_v1_monitoring.yaml new file mode 100644 index 00000000000..edffaa50e33 --- /dev/null +++ b/config/samples/services_v1_monitoring.yaml @@ -0,0 +1,6 @@ +apiVersion: services.platform.opendatahub.io/v1 +kind: Monitoring +metadata: + name: monitoring-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/services_v1alpha1_auth.yaml b/config/samples/services_v1alpha1_auth.yaml new file mode 100644 index 00000000000..8ae722549e6 --- /dev/null +++ b/config/samples/services_v1alpha1_auth.yaml @@ -0,0 +1,7 @@ +apiVersion: services.platform.opendatahub.io/v1 +kind: Auth +metadata: + labels: + name: auth +spec: + # TODO(user): Add fields here diff --git a/config/webhook/service.yaml b/config/webhook/service.yaml index 4a1ee47be15..380ed824882 100644 --- a/config/webhook/service.yaml +++ b/config/webhook/service.yaml @@ -1,4 +1,3 @@ - apiVersion: v1 kind: Service metadata: diff --git a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go index 87fb116326b..283bd24db90 100644 --- a/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go +++ b/controllers/certconfigmapgenerator/certconfigmapgenerator_controller.go @@ -5,7 +5,6 @@ import ( "context" "reflect" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -16,24 +15,25 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + odhClient "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" annotation "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/trustedcabundle" ) // CertConfigmapGeneratorReconciler holds the controller configuration. type CertConfigmapGeneratorReconciler struct { - Client client.Client + *odhClient.Client Scheme *runtime.Scheme - Log logr.Logger } // SetupWithManager sets up the controller with the Manager. -func (r *CertConfigmapGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { - r.Log.Info("Adding controller for Configmap Generation.") +func (r *CertConfigmapGeneratorReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + logf.FromContext(ctx).Info("Adding controller for Configmap Generation.") return ctrl.NewControllerManagedBy(mgr). Named("cert-configmap-generator-controller"). Watches(&corev1.ConfigMap{}, handler.EnqueueRequestsFromMapFunc(r.watchTrustedCABundleConfigMapResource), builder.WithPredicates(ConfigMapChangedPredicate)). @@ -44,8 +44,9 @@ func (r *CertConfigmapGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) er // Reconcile will generate new configmap, odh-trusted-ca-bundle, that includes cluster-wide trusted-ca bundle and custom // ca bundle in every new namespace created. func (r *CertConfigmapGeneratorReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := logf.FromContext(ctx).WithName("CertConfigmapGenerator") // Request includes namespace that is newly created or where odh-trusted-ca-bundle configmap is updated. - r.Log.Info("Reconciling CertConfigMapGenerator.", " Request.Namespace", req.NamespacedName) + log.Info("Reconciling CertConfigMapGenerator.", " Request.Namespace", req.NamespacedName) // Get namespace instance userNamespace := &corev1.Namespace{} if err := r.Client.Get(ctx, client.ObjectKey{Name: req.Namespace}, userNamespace); err != nil { @@ -55,7 +56,7 @@ func (r *CertConfigmapGeneratorReconciler) Reconcile(ctx context.Context, req ct // Get DSCI instance dsciInstances := &dsciv1.DSCInitializationList{} if err := r.Client.List(ctx, dsciInstances); err != nil { - r.Log.Error(err, "Failed to retrieve DSCInitialization resource for CertConfigMapGenerator ", "Request.Name", req.Name) + log.Error(err, "Failed to retrieve DSCInitialization resource for CertConfigMapGenerator ", "Request.Name", req.Name) return ctrl.Result{}, err } @@ -73,10 +74,10 @@ func (r *CertConfigmapGeneratorReconciler) Reconcile(ctx context.Context, req ct // Delete odh-trusted-ca-bundle Configmap if namespace has annotation set to opt-out CA bundle injection if trustedcabundle.HasCABundleAnnotationDisabled(userNamespace) { - r.Log.Info("Namespace has opted-out of CA bundle injection using annotation", "namespace", userNamespace.Name, + log.Info("Namespace has opted-out of CA bundle injection using annotation", "namespace", userNamespace.Name, "annotation", annotation.InjectionOfCABundleAnnotatoion) if err := trustedcabundle.DeleteOdhTrustedCABundleConfigMap(ctx, r.Client, req.Namespace); client.IgnoreNotFound(err) != nil { - r.Log.Error(err, "error deleting existing configmap from namespace", "name", trustedcabundle.CAConfigMapName, "namespace", userNamespace.Name) + log.Error(err, "error deleting existing configmap from namespace", "name", trustedcabundle.CAConfigMapName, "namespace", userNamespace.Name) return reconcile.Result{}, err } @@ -85,11 +86,11 @@ func (r *CertConfigmapGeneratorReconciler) Reconcile(ctx context.Context, req ct // Add odh-trusted-ca-bundle Configmap if trustedcabundle.ShouldInjectTrustedBundle(userNamespace) { - r.Log.Info("Adding trusted CA bundle configmap to the new or existing namespace ", "namespace", userNamespace.Name, + log.Info("Adding trusted CA bundle configmap to the new or existing namespace ", "namespace", userNamespace.Name, "configmap", trustedcabundle.CAConfigMapName) trustCAData := dsciInstance.Spec.TrustedCABundle.CustomCABundle if err := trustedcabundle.CreateOdhTrustedCABundleConfigMap(ctx, r.Client, req.Namespace, trustCAData); err != nil { - r.Log.Error(err, "error adding configmap to namespace", "name", trustedcabundle.CAConfigMapName, "namespace", userNamespace.Name) + log.Error(err, "error adding configmap to namespace", "name", trustedcabundle.CAConfigMapName, "namespace", userNamespace.Name) return reconcile.Result{}, err } } @@ -107,9 +108,10 @@ func (r *CertConfigmapGeneratorReconciler) watchNamespaceResource(_ context.Cont return nil } -func (r *CertConfigmapGeneratorReconciler) watchTrustedCABundleConfigMapResource(_ context.Context, a client.Object) []reconcile.Request { +func (r *CertConfigmapGeneratorReconciler) watchTrustedCABundleConfigMapResource(ctx context.Context, a client.Object) []reconcile.Request { + log := logf.FromContext(ctx) if a.GetName() == trustedcabundle.CAConfigMapName { - r.Log.Info("Cert configmap has been updated, start reconcile") + log.Info("Cert configmap has been updated, start reconcile") return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: a.GetName(), Namespace: a.GetNamespace()}}} } return nil diff --git a/controllers/components/codeflare/codeflare.go b/controllers/components/codeflare/codeflare.go new file mode 100644 index 00000000000..0920a8b45b1 --- /dev/null +++ b/controllers/components/codeflare/codeflare.go @@ -0,0 +1,107 @@ +package codeflare + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + cr.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentApi.CodeFlareComponentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.CodeFlare.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + return &componentApi.CodeFlare{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.CodeFlareKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.CodeFlareInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.CodeFlareSpec{ + CodeFlareCommonSpec: dsc.Spec.Components.CodeFlare.CodeFlareCommonSpec, + }, + } +} + +func (s *componentHandler) Init(_ cluster.Platform) error { + if err := odhdeploy.ApplyParams(paramsPath, imageParamMap); err != nil { + return fmt.Errorf("failed to update images on path %s: %w", paramsPath, err) + } + + return nil +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.CodeFlare) + if !ok { + return errors.New("failed to convert to CodeFlare") + } + + dsc.Status.InstalledComponents[LegacyComponentName] = false + dsc.Status.Components.CodeFlare.ManagementSpec.ManagementState = s.GetManagementState(dsc) + dsc.Status.Components.CodeFlare.CodeFlareCommonStatus = nil + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + dsc.Status.InstalledComponents[LegacyComponentName] = true + dsc.Status.Components.CodeFlare.CodeFlareCommonStatus = c.Status.CodeFlareCommonStatus.DeepCopy() + + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/codeflare/codeflare_controller.go b/controllers/components/codeflare/codeflare_controller.go new file mode 100644 index 00000000000..f46f5effac6 --- /dev/null +++ b/controllers/components/codeflare/codeflare_controller.go @@ -0,0 +1,88 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package codeflare + +import ( + "context" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +// CodeFlareReconciler reconciles a CodeFlare object. + +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor( + mgr, + &componentApi.CodeFlare{}, + ). + // customized Owns() for Component with new predicates + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Secret{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&corev1.ServiceAccount{}). + Owns(&corev1.Service{}). + Owns(&admissionregistrationv1.MutatingWebhookConfiguration{}). + Owns(&admissionregistrationv1.ValidatingWebhookConfiguration{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.CodeFlareInstanceName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(LegacyComponentName), labels.True)), + ). + // Add CodeFlare-specific actions + WithAction(initialize). + WithAction(devFlags). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + kustomize.WithLabel(labels.ODH.Component(LegacyComponentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, LegacyComponentName), + )). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(updatestatus.NewAction()). + // must be final action + WithAction(gc.NewAction()). + Build(ctx) + + if err != nil { + return err // no need customize error, it is done in the caller main + } + + return nil +} diff --git a/controllers/components/codeflare/codeflare_controller_actions.go b/controllers/components/codeflare/codeflare_controller_actions.go new file mode 100644 index 00000000000..5c93bf07a7c --- /dev/null +++ b/controllers/components/codeflare/codeflare_controller_actions.go @@ -0,0 +1,46 @@ +package codeflare + +import ( + "context" + "fmt" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +func initialize(_ context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Manifests = append(rr.Manifests, manifestsPath()) + + if err := odhdeploy.ApplyParams(paramsPath, nil, map[string]string{"namespace": rr.DSCI.Spec.ApplicationsNamespace}); err != nil { + return fmt.Errorf("failed to update params.env from %s : %w", paramsPath, err) + } + + return nil +} + +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + codeflare, ok := rr.Instance.(*componentApi.CodeFlare) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.CodeFlare)", rr.Instance) + } + + if codeflare.Spec.DevFlags == nil { + return nil + } + // Implement devflags support logic + // If dev flags are set, update default manifests path + if len(codeflare.Spec.DevFlags.Manifests) != 0 { + manifestConfig := codeflare.Spec.DevFlags.Manifests[0] + if err := odhdeploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { + return err + } + if manifestConfig.SourcePath != "" { + rr.Manifests[0].Path = odhdeploy.DefaultManifestPath + rr.Manifests[0].ContextDir = ComponentName + rr.Manifests[0].SourcePath = manifestConfig.SourcePath + } + } + + return nil +} diff --git a/controllers/components/codeflare/codeflare_support.go b/controllers/components/codeflare/codeflare_support.go new file mode 100644 index 00000000000..213e0d02fb7 --- /dev/null +++ b/controllers/components/codeflare/codeflare_support.go @@ -0,0 +1,39 @@ +package codeflare + +import ( + "path" + + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +const ( + ComponentName = componentApi.CodeFlareComponentName + + ReadyConditionType = conditionsv1.ConditionType(componentApi.CodeFlareKind + status.ReadySuffix) + + // LegacyComponentName is the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + LegacyComponentName = "codeflare" +) + +var ( + paramsPath = path.Join(odhdeploy.DefaultManifestPath, ComponentName, "manager") + + imageParamMap = map[string]string{ + "codeflare-operator-controller-image": "RELATED_IMAGE_ODH_CODEFLARE_OPERATOR_IMAGE", + } +) + +func manifestsPath() odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: "default", + } +} diff --git a/controllers/components/dashboard/dashboard.go b/controllers/components/dashboard/dashboard.go new file mode 100644 index 00000000000..7070c87d489 --- /dev/null +++ b/controllers/components/dashboard/dashboard.go @@ -0,0 +1,109 @@ +package dashboard + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + cr.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentApi.DashboardComponentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.Dashboard.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +func (s *componentHandler) Init(platform cluster.Platform) error { + mi := defaultManifestInfo(platform) + + if err := odhdeploy.ApplyParams(mi.String(), imagesMap); err != nil { + return fmt.Errorf("failed to update images on path %s: %w", mi, err) + } + + return nil +} + +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + return &componentApi.Dashboard{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.DashboardKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.DashboardInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.DashboardSpec{ + DashboardCommonSpec: dsc.Spec.Components.Dashboard.DashboardCommonSpec, + }, + } +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.Dashboard) + if !ok { + return errors.New("failed to convert to Dashboard") + } + + dsc.Status.InstalledComponents[LegacyComponentNameUpstream] = false + dsc.Status.Components.Dashboard.ManagementSpec.ManagementState = s.GetManagementState(dsc) + dsc.Status.Components.Dashboard.DashboardCommonStatus = nil + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + dsc.Status.InstalledComponents[LegacyComponentNameUpstream] = true + dsc.Status.Components.Dashboard.DashboardCommonStatus = c.Status.DashboardCommonStatus.DeepCopy() + + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/dashboard/dashboard_controller.go b/controllers/components/dashboard/dashboard_controller.go new file mode 100644 index 00000000000..ab00e315642 --- /dev/null +++ b/controllers/components/dashboard/dashboard_controller.go @@ -0,0 +1,125 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dashboard + +import ( + "context" + "fmt" + + consolev1 "github.com/openshift/api/console/v1" + routev1 "github.com/openshift/api/route/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/security" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +// NewComponentReconciler creates a ComponentReconciler for the Dashboard API. +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + componentName := computeComponentName() + + _, err := reconciler.ReconcilerFor(mgr, &componentApi.Dashboard{}). + // operands - owned + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Secret{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&corev1.ServiceAccount{}). + Owns(&corev1.Service{}). + // By default, a predicated for changed generation is added by the Owns() + // method, however for deployments, we also need to retrieve status info + // hence we need a dedicated predicate to react to replicas status change + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + // operands - openshift + Owns(&routev1.Route{}). + Owns(&consolev1.ConsoleLink{}). + // Those APIs are provided by the component itself hence they should + // be watched dynamically + OwnsGVK(gvk.AcceleratorProfile, reconciler.Dynamic()). + OwnsGVK(gvk.OdhApplication, reconciler.Dynamic()). + OwnsGVK(gvk.OdhDocument, reconciler.Dynamic()). + OwnsGVK(gvk.OdhQuickStart, reconciler.Dynamic()). + // CRDs are not owned by the component and should be left on the cluster, + // so by default, the deploy action won't add all the annotation added to + // other resources. Hence, a custom handling is required in order to minimize + // chattering and avoid noisy neighborhoods + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.DashboardInstanceName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(componentName), labels.True)), + ). + // The OdhDashboardConfig resource is expected to be created by the operator + // but then owned by the user so we only re-create it with factory values if + // it gets deleted + WatchesGVK(gvk.OdhDashboardConfig, + reconciler.Dynamic(), + reconciler.WithPredicates(resources.Deleted()), + ). + // actions + WithAction(initialize). + WithAction(devFlags). + WithAction(configureDependencies). + WithAction(security.NewUpdatePodSecurityRoleBindingAction(serviceAccounts)). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + // Those are the default labels added by the legacy deploy method + // and should be preserved as the original plugin were affecting + // deployment selectors that are immutable once created, so it won't + // be possible to actually amend the labels in a non-disruptive + // manner. + // + // Additional labels/annotations MUST be added by the deploy action + // so they would affect only objects metadata without side effects + kustomize.WithLabel(labels.ODH.Component(componentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, componentName), + )). + WithAction(customizeResources). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(updatestatus.NewAction()). + WithAction(updateStatus). + // must be the final action + WithAction(gc.NewAction( + gc.WithUnremovables(gvk.OdhDashboardConfig), + )). + Build(ctx) + + if err != nil { + return fmt.Errorf("could not create the dashboard controller: %w", err) + } + + return nil +} diff --git a/controllers/components/dashboard/dashboard_controller_actions.go b/controllers/components/dashboard/dashboard_controller_actions.go new file mode 100644 index 00000000000..9e924ccadfc --- /dev/null +++ b/controllers/components/dashboard/dashboard_controller_actions.go @@ -0,0 +1,128 @@ +package dashboard + +import ( + "context" + "errors" + "fmt" + "strings" + + routev1 "github.com/openshift/api/route/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +func initialize(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Manifests = []odhtypes.ManifestInfo{defaultManifestInfo(rr.Release.Name)} + + extraParamsMap, err := computeKustomizeVariable(ctx, rr.Client, rr.Release.Name, &rr.DSCI.Spec) + if err != nil { + return errors.New("failed to set variable for extraParamsMap") + } + + if err := odhdeploy.ApplyParams(rr.Manifests[0].String(), nil, extraParamsMap); err != nil { + return fmt.Errorf("failed to update params.env from %s : %w", rr.Manifests[0].String(), err) + } + + return nil +} + +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + dashboard, ok := rr.Instance.(*componentApi.Dashboard) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.Dashboard)", rr.Instance) + } + + if dashboard.Spec.DevFlags == nil { + return nil + } + // Implement devflags support logic + // If dev flags are set, update default manifests path + if len(dashboard.Spec.DevFlags.Manifests) != 0 { + manifestConfig := dashboard.Spec.DevFlags.Manifests[0] + if err := odhdeploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { + return err + } + if manifestConfig.SourcePath != "" { + rr.Manifests[0].Path = odhdeploy.DefaultManifestPath + rr.Manifests[0].ContextDir = ComponentName + rr.Manifests[0].SourcePath = manifestConfig.SourcePath + } + } + + return nil +} + +func customizeResources(_ context.Context, rr *odhtypes.ReconciliationRequest) error { + for i := range rr.Resources { + if rr.Resources[i].GroupVersionKind() == gvk.OdhDashboardConfig { + // mark the resource as not supposed to be managed by the operator + resources.SetAnnotation(&rr.Resources[i], annotations.ManagedByODHOperator, "false") + break + } + } + + return nil +} + +func configureDependencies(_ context.Context, rr *odhtypes.ReconciliationRequest) error { + if rr.Release.Name == cluster.Unknown || rr.Release.Name == cluster.OpenDataHub { + return nil + } + + err := rr.AddResources(&corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "anaconda-ce-access", + Namespace: rr.DSCI.Spec.ApplicationsNamespace, + }, + Type: corev1.SecretTypeOpaque, + }) + + if err != nil { + return fmt.Errorf("failed to create access-secret for anaconda: %w", err) + } + + return nil +} + +func updateStatus(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + d, ok := rr.Instance.(*componentApi.Dashboard) + if !ok { + return errors.New("instance is not of type *odhTypes.Dashboard") + } + + // url + rl := routev1.RouteList{} + err := rr.Client.List( + ctx, + &rl, + client.InNamespace(rr.DSCI.Spec.ApplicationsNamespace), + client.MatchingLabels(map[string]string{ + labels.PlatformPartOf: strings.ToLower(componentApi.DashboardKind), + }), + ) + + if err != nil { + return fmt.Errorf("failed to list routes: %w", err) + } + + d.Status.URL = "" + if len(rl.Items) == 1 { + d.Status.URL = resources.IngressHost(rl.Items[0]) + } + + return nil +} diff --git a/controllers/components/dashboard/dashboard_support.go b/controllers/components/dashboard/dashboard_support.go new file mode 100644 index 00000000000..4b2068703b3 --- /dev/null +++ b/controllers/components/dashboard/dashboard_support.go @@ -0,0 +1,106 @@ +package dashboard + +import ( + "context" + "fmt" + + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +const ( + ComponentName = componentApi.DashboardComponentName + + ReadyConditionType = conditionsv1.ConditionType(componentApi.DashboardKind + status.ReadySuffix) + + // Legacy component names are the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + + LegacyComponentNameUpstream = "dashboard" + LegacyComponentNameDownstream = "rhods-dashboard" +) + +var ( + adminGroups = map[cluster.Platform]string{ + cluster.SelfManagedRhoai: "rhods-admins", + cluster.ManagedRhoai: "dedicated-admins", + cluster.OpenDataHub: "odh-admins", + cluster.Unknown: "odh-admins", + } + + sectionTitle = map[cluster.Platform]string{ + cluster.SelfManagedRhoai: "OpenShift Self Managed Services", + cluster.ManagedRhoai: "OpenShift Managed Services", + cluster.OpenDataHub: "OpenShift Open Data Hub", + cluster.Unknown: "OpenShift Open Data Hub", + } + + baseConsoleURL = map[cluster.Platform]string{ + cluster.SelfManagedRhoai: "https://rhods-dashboard-", + cluster.ManagedRhoai: "https://rhods-dashboard-", + cluster.OpenDataHub: "https://odh-dashboard-", + cluster.Unknown: "https://odh-dashboard-", + } + + overlaysSourcePaths = map[cluster.Platform]string{ + cluster.SelfManagedRhoai: "/rhoai/onprem", + cluster.ManagedRhoai: "/rhoai/addon", + cluster.OpenDataHub: "/odh", + cluster.Unknown: "/odh", + } + + serviceAccounts = map[cluster.Platform][]string{ + cluster.SelfManagedRhoai: {"rhods-dashboard"}, + cluster.ManagedRhoai: {"rhods-dashboard"}, + cluster.OpenDataHub: {"odh-dashboard"}, + cluster.Unknown: {"odh-dashboard"}, + } + + imagesMap = map[string]string{ + "odh-dashboard-image": "RELATED_IMAGE_ODH_DASHBOARD_IMAGE", + } +) + +func defaultManifestInfo(p cluster.Platform) odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: overlaysSourcePaths[p], + } +} + +func computeKustomizeVariable(ctx context.Context, cli client.Client, platform cluster.Platform, dscispec *dsciv1.DSCInitializationSpec) (map[string]string, error) { + consoleLinkDomain, err := cluster.GetDomain(ctx, cli) + if err != nil { + return nil, fmt.Errorf("error getting console route URL %s : %w", consoleLinkDomain, err) + } + + return map[string]string{ + "admin_groups": adminGroups[platform], + "dashboard-url": baseConsoleURL[platform] + dscispec.ApplicationsNamespace + "." + consoleLinkDomain, + "section-title": sectionTitle[platform], + }, nil +} + +func computeComponentName() string { + release := cluster.GetRelease() + + name := LegacyComponentNameUpstream + if release.Name == cluster.SelfManagedRhoai || release.Name == cluster.ManagedRhoai { + name = LegacyComponentNameDownstream + } + + return name +} + +func GetAdminGroup() string { + return adminGroups[cluster.GetRelease().Name] +} diff --git a/controllers/components/datasciencepipelines/datasciencepipelines.go b/controllers/components/datasciencepipelines/datasciencepipelines.go new file mode 100644 index 00000000000..7ee17348a28 --- /dev/null +++ b/controllers/components/datasciencepipelines/datasciencepipelines.go @@ -0,0 +1,107 @@ +package datasciencepipelines + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + cr.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentApi.DataSciencePipelinesComponentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.DataSciencePipelines.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +func (s *componentHandler) Init(_ cluster.Platform) error { + if err := deploy.ApplyParams(paramsPath().String(), imageParamMap); err != nil { + return fmt.Errorf("failed to update images on path %s: %w", paramsPath(), err) + } + + return nil +} + +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + return &componentApi.DataSciencePipelines{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.DataSciencePipelinesKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.DataSciencePipelinesInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.DataSciencePipelinesSpec{ + DataSciencePipelinesCommonSpec: dsc.Spec.Components.DataSciencePipelines.DataSciencePipelinesCommonSpec, + }, + } +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.DataSciencePipelines) + if !ok { + return errors.New("failed to convert to DataSciencePipelines") + } + + dsc.Status.InstalledComponents[LegacyComponentName] = false + dsc.Status.Components.DataSciencePipelines.ManagementSpec.ManagementState = s.GetManagementState(dsc) + dsc.Status.Components.DataSciencePipelines.DataSciencePipelinesCommonStatus = nil + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + dsc.Status.InstalledComponents[LegacyComponentName] = true + dsc.Status.Components.DataSciencePipelines.DataSciencePipelinesCommonStatus = c.Status.DataSciencePipelinesCommonStatus.DeepCopy() + + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/datasciencepipelines/datasciencepipelines_controller.go b/controllers/components/datasciencepipelines/datasciencepipelines_controller.go new file mode 100644 index 00000000000..12db851377b --- /dev/null +++ b/controllers/components/datasciencepipelines/datasciencepipelines_controller.go @@ -0,0 +1,85 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datasciencepipelines + +import ( + "context" + + securityv1 "github.com/openshift/api/security/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor(mgr, &componentApi.DataSciencePipelines{}). + // customized Owns() for Component with new predicates + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Secret{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&corev1.ServiceAccount{}). + Owns(&corev1.Service{}). + Owns(&monitoringv1.ServiceMonitor{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + Owns(&securityv1.SecurityContextConstraints{}). + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.DataSciencePipelinesInstanceName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(LegacyComponentName), labels.True)), + ). + // Add datasciencepipelines-specific actions + WithAction(checkPreConditions). + WithAction(initialize). + WithAction(devFlags). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + kustomize.WithLabel(labels.ODH.Component(LegacyComponentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, LegacyComponentName), + )). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(updatestatus.NewAction()). + // must be the final action + WithAction(gc.NewAction()). + Build(ctx) + + if err != nil { + return err // no need customize error, it is done in the caller main + } + + return nil +} diff --git a/controllers/components/datasciencepipelines/datasciencepipelines_controller_actions.go b/controllers/components/datasciencepipelines/datasciencepipelines_controller_actions.go new file mode 100644 index 00000000000..dfa0a9defc4 --- /dev/null +++ b/controllers/components/datasciencepipelines/datasciencepipelines_controller_actions.go @@ -0,0 +1,104 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package datasciencepipelines + +import ( + "context" + "fmt" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + odherrors "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/errors" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func checkPreConditions(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + dsp, ok := rr.Instance.(*componentApi.DataSciencePipelines) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.DataSciencePipelines", rr.Instance) + } + + workflowCRD := &apiextensionsv1.CustomResourceDefinition{} + if err := rr.Client.Get(ctx, client.ObjectKey{Name: ArgoWorkflowCRD}, workflowCRD); err != nil { + if k8serr.IsNotFound(err) { + return nil + } + return odherrors.NewStopError("failed to get existing Workflow CRD : %v", err) + } + + // Verify if existing workflow is deployed by ODH with label + // if not then set Argo capability status condition to false + odhLabelValue, odhLabelExists := workflowCRD.Labels[labels.ODH.Component(LegacyComponentName)] + if !odhLabelExists || odhLabelValue != "true" { + s := dsp.GetStatus() + s.Phase = "NotReady" + + meta.SetStatusCondition(&s.Conditions, metav1.Condition{ + Type: status.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: status.DataSciencePipelinesDoesntOwnArgoCRDReason, + Message: status.DataSciencePipelinesDoesntOwnArgoCRDMessage, + ObservedGeneration: s.ObservedGeneration, + }) + + return odherrors.NewStopError(status.DataSciencePipelinesDoesntOwnArgoCRDMessage) + } + + return nil +} + +func initialize(_ context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Manifests = append(rr.Manifests, manifestPath(rr.Release.Name)) + + return nil +} + +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + dsp, ok := rr.Instance.(*componentApi.DataSciencePipelines) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.DataSciencePipelines)", rr.Instance) + } + + if dsp.Spec.DevFlags == nil { + return nil + } + + // Implement devflags support logic + // If dev flags are set, update default manifests path + if len(dsp.Spec.DevFlags.Manifests) != 0 { + manifestConfig := dsp.Spec.DevFlags.Manifests[0] + if err := odhdeploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { + return err + } + + if manifestConfig.SourcePath != "" { + rr.Manifests[0].Path = odhdeploy.DefaultManifestPath + rr.Manifests[0].ContextDir = ComponentName + rr.Manifests[0].SourcePath = manifestConfig.SourcePath + } + } + + return nil +} diff --git a/controllers/components/datasciencepipelines/datasciencepipelines_support.go b/controllers/components/datasciencepipelines/datasciencepipelines_support.go new file mode 100644 index 00000000000..985721eb866 --- /dev/null +++ b/controllers/components/datasciencepipelines/datasciencepipelines_support.go @@ -0,0 +1,67 @@ +package datasciencepipelines + +import ( + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +const ( + ArgoWorkflowCRD = "workflows.argoproj.io" + ComponentName = componentApi.DataSciencePipelinesComponentName + + ReadyConditionType = conditionsv1.ConditionType(componentApi.DataSciencePipelinesKind + status.ReadySuffix) + + // LegacyComponentName is the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + LegacyComponentName = "data-science-pipelines-operator" +) + +var ( + imageParamMap = map[string]string{ + // v1 + "IMAGES_APISERVER": "RELATED_IMAGE_ODH_ML_PIPELINES_API_SERVER_IMAGE", + "IMAGES_ARTIFACT": "RELATED_IMAGE_ODH_ML_PIPELINES_ARTIFACT_MANAGER_IMAGE", + "IMAGES_PERSISTENTAGENT": "RELATED_IMAGE_ODH_ML_PIPELINES_PERSISTENCEAGENT_IMAGE", + "IMAGES_SCHEDULEDWORKFLOW": "RELATED_IMAGE_ODH_ML_PIPELINES_SCHEDULEDWORKFLOW_IMAGE", + "IMAGES_CACHE": "RELATED_IMAGE_ODH_ML_PIPELINES_CACHE_IMAGE", + "IMAGES_DSPO": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_OPERATOR_CONTROLLER_IMAGE", + // v2 + "IMAGESV2_ARGO_APISERVER": "RELATED_IMAGE_ODH_ML_PIPELINES_API_SERVER_V2_IMAGE", + "IMAGESV2_ARGO_PERSISTENCEAGENT": "RELATED_IMAGE_ODH_ML_PIPELINES_PERSISTENCEAGENT_V2_IMAGE", + "IMAGESV2_ARGO_SCHEDULEDWORKFLOW": "RELATED_IMAGE_ODH_ML_PIPELINES_SCHEDULEDWORKFLOW_V2_IMAGE", + "IMAGESV2_ARGO_ARGOEXEC": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_ARGO_ARGOEXEC_IMAGE", + "IMAGESV2_ARGO_WORKFLOWCONTROLLER": "RELATED_IMAGE_ODH_DATA_SCIENCE_PIPELINES_ARGO_WORKFLOWCONTROLLER_IMAGE", + "V2_DRIVER_IMAGE": "RELATED_IMAGE_ODH_ML_PIPELINES_DRIVER_IMAGE", + "V2_LAUNCHER_IMAGE": "RELATED_IMAGE_ODH_ML_PIPELINES_LAUNCHER_IMAGE", + "IMAGESV2_ARGO_MLMDGRPC": "RELATED_IMAGE_ODH_MLMD_GRPC_SERVER_IMAGE", + } + + overlaysSourcePaths = map[cluster.Platform]string{ + cluster.SelfManagedRhoai: "overlays/rhoai", + cluster.ManagedRhoai: "overlays/rhoai", + cluster.OpenDataHub: "overlays/odh", + cluster.Unknown: "overlays/odh", + } +) + +func paramsPath() types.ManifestInfo { + return types.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: "base", + } +} + +func manifestPath(p cluster.Platform) types.ManifestInfo { + return types.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: overlaysSourcePaths[p], + } +} diff --git a/controllers/components/kserve/config.go b/controllers/components/kserve/config.go new file mode 100644 index 00000000000..900c72f5a54 --- /dev/null +++ b/controllers/components/kserve/config.go @@ -0,0 +1,26 @@ +package kserve + +import ( + "encoding/json" + "fmt" + + corev1 "k8s.io/api/core/v1" +) + +// ConfigMap Keys. +const ( + DeployConfigName = "deploy" + IngressConfigKeyName = "ingress" +) + +type DeployConfig struct { + DefaultDeploymentMode string `json:"defaultDeploymentMode,omitempty"` +} + +func getDeployConfig(cm *corev1.ConfigMap) (*DeployConfig, error) { + deployConfig := DeployConfig{} + if err := json.Unmarshal([]byte(cm.Data[DeployConfigName]), &deployConfig); err != nil { + return nil, fmt.Errorf("error retrieving value for key '%s' from ConfigMap %s. %w", DeployConfigName, cm.Name, err) + } + return &deployConfig, nil +} diff --git a/components/kserve/feature_resources.go b/controllers/components/kserve/feature_resources.go similarity index 100% rename from components/kserve/feature_resources.go rename to controllers/components/kserve/feature_resources.go diff --git a/controllers/components/kserve/kserve.go b/controllers/components/kserve/kserve.go new file mode 100644 index 00000000000..8c1ddb51bc8 --- /dev/null +++ b/controllers/components/kserve/kserve.go @@ -0,0 +1,119 @@ +package kserve + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +const ( + componentName = componentApi.KserveComponentName + serviceMeshOperator = "servicemeshoperator" + serverlessOperator = "serverless-operator" + kserveConfigMapName = "inferenceservice-config" + kserveManifestSourcePath = "overlays/odh" + + // LegacyComponentName is the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + LegacyComponentName = "kserve" + + ReadyConditionType = conditionsv1.ConditionType(componentApi.KserveKind + status.ReadySuffix) +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + cr.Add(&componentHandler{}) +} + +// Init for set images. +func (s *componentHandler) Init(platform cluster.Platform) error { + return nil +} + +func (s *componentHandler) GetName() string { + return componentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.Kserve.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +// for DSC to get compoment Kserve's CR. +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + return &componentApi.Kserve{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.KserveKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.KserveInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.KserveSpec{ + KserveCommonSpec: dsc.Spec.Components.Kserve.KserveCommonSpec, + }, + } +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.Kserve) + if !ok { + return errors.New("failed to convert to Kserve") + } + + dsc.Status.InstalledComponents[LegacyComponentName] = false + dsc.Status.Components.Kserve.ManagementSpec.ManagementState = s.GetManagementState(dsc) + dsc.Status.Components.Kserve.KserveCommonStatus = nil + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + dsc.Status.InstalledComponents[LegacyComponentName] = true + dsc.Status.Components.Kserve.KserveCommonStatus = c.Status.KserveCommonStatus.DeepCopy() + + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/kserve/kserve_controller.go b/controllers/components/kserve/kserve_controller.go new file mode 100644 index 00000000000..c5a62beed3a --- /dev/null +++ b/controllers/components/kserve/kserve_controller.go @@ -0,0 +1,167 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kserve + +import ( + "context" + + templatev1 "github.com/openshift/api/template/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/handler" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + featuresv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/clusterrole" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/hash" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +// NewComponentReconciler creates a ComponentReconciler for the Dashboard API. +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + ownedViaFTMapFunc := ownedViaFT(mgr.GetClient()) + + _, err := reconciler.ReconcilerFor(mgr, &componentApi.Kserve{}). + // operands - owned + Owns(&corev1.Secret{}). + Owns(&corev1.Service{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.ServiceAccount{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&rbacv1.ClusterRole{}, reconciler.WithPredicates(clusterrole.IgnoreIfAggregationRule())). + Owns(&rbacv1.ClusterRoleBinding{}). + // The ovms template gets a new resourceVersion periodically without any other + // changes. The compareHashPredicate ensures that we don't needlessly enqueue + // requests if there are no changes that we don't care about. + Owns(&templatev1.Template{}, reconciler.WithPredicates(hash.Updated())). + // The FeatureTrackers are created slightly differently, and have + // ownerRefs set by controllerutil.SetOwnerReference() rather than + // controllerutil.SetControllerReference(), which means that the default + // eventHandler for Owns won't work, so a slightly modified variant is + // added here + Owns(&featuresv1.FeatureTracker{}, reconciler.WithEventHandler( + handler.EnqueueRequestForOwner( + mgr.GetScheme(), + mgr.GetRESTMapper(), + &componentApi.Kserve{}, + ))). + Owns(&networkingv1.NetworkPolicy{}). + Owns(&monitoringv1.ServiceMonitor{}). + Owns(&admissionregistrationv1.MutatingWebhookConfiguration{}). + Owns(&admissionregistrationv1.ValidatingWebhookConfiguration{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + // operands - watched + // + // By default the Watches functions adds: + // - an event handler mapping to a cluster scope resource identified by the + // components.platform.opendatahub.io/managed-by annotation + // - a predicate that check for generation change for Delete/Updates events + // for to objects that have the label components.platform.opendatahub.io/managed-by + // set to the current owner + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.KserveInstanceName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(LegacyComponentName), labels.True)), + ). + + // operands - dynamically watched + // + // A watch will be created dynamically for these kinds, if they exist on the cluster + // (they come from ServiceMesh and Serverless operators). + // + // They're owned by FeatureTrackers, which are owned by a Kserve; so there's a + // custom event mapper to enqueue a reconcile request for a Kserve object, if + // applicable. + // + // They also don't have the "partOf" label that Watches expects in the + // implicit predicate, so the simpler "DefaultPredicate" is also added. + WatchesGVK( + gvk.KnativeServing, + reconciler.Dynamic(), + reconciler.WithEventMapper(ownedViaFTMapFunc), + reconciler.WithPredicates(predicates.DefaultPredicate)). + WatchesGVK( + gvk.ServiceMeshMember, + reconciler.Dynamic(), + reconciler.WithEventMapper(ownedViaFTMapFunc), + reconciler.WithPredicates(predicates.DefaultPredicate)). + WatchesGVK( + gvk.EnvoyFilter, + reconciler.Dynamic(), + reconciler.WithEventMapper(ownedViaFTMapFunc), + reconciler.WithPredicates(predicates.DefaultPredicate)). + WatchesGVK( + gvk.AuthorizationPolicy, + reconciler.Dynamic(), + reconciler.WithEventMapper(ownedViaFTMapFunc), + reconciler.WithPredicates(predicates.DefaultPredicate)). + WatchesGVK( + gvk.Gateway, + reconciler.Dynamic(), + reconciler.WithEventMapper(ownedViaFTMapFunc), + reconciler.WithPredicates(predicates.DefaultPredicate)). + + // actions + WithAction(checkPreConditions). + WithAction(initialize). + WithAction(devFlags). + WithAction(configureServerless). + WithAction(configureServiceMesh). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + // These are the default labels added by the legacy deploy method + // and should be preserved as the original plugin were affecting + // deployment selectors that are immutable once created, so it won't + // be possible to actually amend the labels in a non-disruptive + // manner. + // + // Additional labels/annotations MUST be added by the deploy action + // so they would affect only objects metadata without side effects + kustomize.WithLabel(labels.ODH.Component(LegacyComponentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, LegacyComponentName), + )). + WithAction(customizeKserveConfigMap). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(setStatusFields). + WithAction(updatestatus.NewAction()). + // must be the final action + WithAction(gc.NewAction()). + Build(ctx) + + return err +} diff --git a/controllers/components/kserve/kserve_controller_actions.go b/controllers/components/kserve/kserve_controller_actions.go new file mode 100644 index 00000000000..b12562a0f87 --- /dev/null +++ b/controllers/components/kserve/kserve_controller_actions.go @@ -0,0 +1,279 @@ +package kserve + +import ( + "context" + "errors" + "fmt" + "strings" + + operatorv1 "github.com/openshift/api/operator/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + odherrors "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/errors" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func checkPreConditions(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + k, ok := rr.Instance.(*componentApi.Kserve) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.Kserve)", rr.Instance) + } + + if k.Spec.Serving.ManagementState != operatorv1.Managed { + return nil + } + + if rr.DSCI.Spec.ServiceMesh == nil || rr.DSCI.Spec.ServiceMesh.ManagementState != operatorv1.Managed { + s := k.GetStatus() + s.Phase = status.PhaseNotReady + + meta.SetStatusCondition(&s.Conditions, metav1.Condition{ + Type: status.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: status.ServiceMeshNotConfiguredReason, + Message: status.ServiceMeshNotConfiguredMessage, + ObservedGeneration: s.ObservedGeneration, + }) + + return odherrors.NewStopError(status.ServiceMeshNotConfiguredMessage) + } + + if found, err := cluster.OperatorExists(ctx, rr.Client, serviceMeshOperator); err != nil || !found { + s := k.GetStatus() + s.Phase = status.PhaseNotReady + + meta.SetStatusCondition(&s.Conditions, metav1.Condition{ + Type: status.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: status.ServiceMeshOperatorNotInstalledReason, + Message: status.ServiceMeshOperatorNotInstalledMessage, + ObservedGeneration: s.ObservedGeneration, + }) + + if err != nil { + return odherrors.NewStopErrorW(err) + } + + return odherrors.NewStopError(status.ServiceMeshOperatorNotInstalledMessage) + } + + if found, err := cluster.OperatorExists(ctx, rr.Client, serverlessOperator); err != nil || !found { + s := k.GetStatus() + s.Phase = status.PhaseNotReady + + meta.SetStatusCondition(&s.Conditions, metav1.Condition{ + Type: status.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: status.ServerlessOperatorNotInstalledReason, + Message: status.ServerlessOperatorNotInstalledMessage, + ObservedGeneration: s.ObservedGeneration, + }) + + if err != nil { + return odherrors.NewStopErrorW(err) + } + + return odherrors.NewStopError(status.ServerlessOperatorNotInstalledMessage) + } + + return nil +} + +func initialize(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Manifests = []odhtypes.ManifestInfo{ + kserveManifestInfo(kserveManifestSourcePath), + } + + return nil +} + +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + k, ok := rr.Instance.(*componentApi.Kserve) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.Kserve)", rr.Instance) + } + + df := k.GetDevFlags() + if df == nil { + return nil + } + if len(df.Manifests) == 0 { + return nil + } + + kSourcePath := kserveManifestSourcePath + + for _, subcomponent := range df.Manifests { + if !strings.Contains(subcomponent.URI, componentName) && !strings.Contains(subcomponent.URI, LegacyComponentName) { + continue + } + + if err := deploy.DownloadManifests(ctx, componentName, subcomponent); err != nil { + return err + } + + if subcomponent.SourcePath != "" { + kSourcePath = subcomponent.SourcePath + } + + break + } + + rr.Manifests = []odhtypes.ManifestInfo{ + kserveManifestInfo(kSourcePath), + } + + return nil +} + +func configureServerless(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + k, ok := rr.Instance.(*componentApi.Kserve) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.Kserve)", rr.Instance) + } + + logger := logf.FromContext(ctx) + cli := rr.Client + + switch k.Spec.Serving.ManagementState { + case operatorv1.Unmanaged: // Bring your own CR + logger.Info("Serverless CR is not configured by the operator, we won't do anything") + + case operatorv1.Removed: // we remove serving CR + logger.Info("existing Serverless CR (owned by operator) will be removed") + if err := removeServerlessFeatures(ctx, rr.Client, k, &rr.DSCI.Spec); err != nil { + return err + } + + case operatorv1.Managed: // standard workflow to create CR + if rr.DSCI.Spec.ServiceMesh == nil { + return errors.New("ServiceMesh needs to be configured and 'Managed' in DSCI CR, " + + "it is required by KServe serving") + } + + switch rr.DSCI.Spec.ServiceMesh.ManagementState { + case operatorv1.Unmanaged, operatorv1.Removed: + return fmt.Errorf("ServiceMesh is currently set to '%s'. It needs to be set to 'Managed' in DSCI CR, "+ + "as it is required by the KServe serving field", rr.DSCI.Spec.ServiceMesh.ManagementState) + } + + serverlessFeatures := feature.ComponentFeaturesHandler(rr.Instance, componentName, rr.DSCI.Spec.ApplicationsNamespace, configureServerlessFeatures(&rr.DSCI.Spec, k)) + + if err := serverlessFeatures.Apply(ctx, cli); err != nil { + return err + } + } + return nil +} + +func configureServiceMesh(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + k, ok := rr.Instance.(*componentApi.Kserve) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.Kserve)", rr.Instance) + } + + cli := rr.Client + + if rr.DSCI.Spec.ServiceMesh != nil { + if rr.DSCI.Spec.ServiceMesh.ManagementState == operatorv1.Managed { + serviceMeshInitializer := feature.ComponentFeaturesHandler(k, componentName, rr.DSCI.Spec.ApplicationsNamespace, defineServiceMeshFeatures(ctx, cli, &rr.DSCI.Spec)) + return serviceMeshInitializer.Apply(ctx, cli) + } + if rr.DSCI.Spec.ServiceMesh.ManagementState == operatorv1.Unmanaged { + return nil + } + } + + return removeServiceMeshConfigurations(ctx, cli, k, &rr.DSCI.Spec) +} + +func customizeKserveConfigMap(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + k, ok := rr.Instance.(*componentApi.Kserve) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.Kserve)", rr.Instance) + } + + logger := logf.FromContext(ctx) + + kserveConfigMap := corev1.ConfigMap{} + cmidx, err := getIndexedResource(rr.Resources, &kserveConfigMap, gvk.ConfigMap, kserveConfigMapName) + if err != nil { + return err + } + + switch k.Spec.Serving.ManagementState { + case operatorv1.Managed, operatorv1.Unmanaged: + if k.Spec.DefaultDeploymentMode == "" { + // if the default mode is empty in the DSC, assume mode is "Serverless" since k.Serving is Managed + if err := setDefaultDeploymentMode(&kserveConfigMap, componentApi.Serverless); err != nil { + return err + } + } else { + // if the default mode is explicitly specified, respect that + if err := setDefaultDeploymentMode(&kserveConfigMap, k.Spec.DefaultDeploymentMode); err != nil { + return err + } + } + case operatorv1.Removed: + if k.Spec.DefaultDeploymentMode == componentApi.Serverless { + return errors.New("setting defaultdeployment mode as Serverless is incompatible with having Serving 'Removed'") + } + if k.Spec.DefaultDeploymentMode == "" { + logger.Info("Serving is removed, Kserve will default to RawDeployment") + } + if err := setDefaultDeploymentMode(&kserveConfigMap, componentApi.RawDeployment); err != nil { + return err + } + } + + err = replaceResourceAtIndex(rr.Resources, cmidx, &kserveConfigMap) + if err != nil { + return err + } + + kserveConfigHash, err := hashConfigMap(&kserveConfigMap) + if err != nil { + return err + } + + kserveDeployment := appsv1.Deployment{} + deployidx, err := getIndexedResource(rr.Resources, &kserveDeployment, gvk.Deployment, "kserve-controller-manager") + if err != nil { + return err + } + + kserveDeployment.Spec.Template.Annotations[labels.ODHAppPrefix+"/KserveConfigHash"] = kserveConfigHash + + err = replaceResourceAtIndex(rr.Resources, deployidx, &kserveDeployment) + if err != nil { + return err + } + + return nil +} + +func setStatusFields(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + k, ok := rr.Instance.(*componentApi.Kserve) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.Kserve)", rr.Instance) + } + + ddm, err := getDefaultDeploymentMode(ctx, rr.Client, &rr.DSCI.Spec) + if err != nil { + return err + } + + k.Status.DefaultDeploymentMode = ddm + return nil +} diff --git a/controllers/components/kserve/kserve_support.go b/controllers/components/kserve/kserve_support.go new file mode 100644 index 00000000000..2f12314db33 --- /dev/null +++ b/controllers/components/kserve/kserve_support.go @@ -0,0 +1,268 @@ +package kserve + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "path" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + featuresv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/manifest" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +func kserveManifestInfo(sourcePath string) odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: deploy.DefaultManifestPath, + ContextDir: componentName, + SourcePath: sourcePath, + } +} + +func configureServerlessFeatures(dsciSpec *dsciv1.DSCInitializationSpec, kserve *componentApi.Kserve) feature.FeaturesProvider { + return func(registry feature.FeaturesRegistry) error { + servingDeployment := feature.Define("serverless-serving-deployment"). + Manifests( + manifest.Location(Resources.Location). + Include( + path.Join(Resources.InstallDir), + ), + ). + WithData( + serverless.FeatureData.IngressDomain.Define(&kserve.Spec.Serving).AsAction(), + serverless.FeatureData.Serving.Define(&kserve.Spec.Serving).AsAction(), + servicemesh.FeatureData.ControlPlane.Define(dsciSpec).AsAction(), + ). + PreConditions( + serverless.EnsureServerlessOperatorInstalled, + serverless.EnsureServerlessAbsent, + servicemesh.EnsureServiceMeshInstalled, + feature.CreateNamespaceIfNotExists(serverless.KnativeServingNamespace), + ). + PostConditions( + feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), + ) + + istioSecretFiltering := feature.Define("serverless-net-istio-secret-filtering"). + Manifests( + manifest.Location(Resources.Location). + Include( + path.Join(Resources.BaseDir, "serving-net-istio-secret-filtering.patch.tmpl.yaml"), + ), + ). + WithData(serverless.FeatureData.Serving.Define(&kserve.Spec.Serving).AsAction()). + PreConditions(serverless.EnsureServerlessServingDeployed). + PostConditions( + feature.WaitForPodsToBeReady(serverless.KnativeServingNamespace), + ) + + servingGateway := feature.Define("serverless-serving-gateways"). + Manifests( + manifest.Location(Resources.Location). + Include( + path.Join(Resources.GatewaysDir), + ), + ). + WithData( + serverless.FeatureData.IngressDomain.Define(&kserve.Spec.Serving).AsAction(), + serverless.FeatureData.CertificateName.Define(&kserve.Spec.Serving).AsAction(), + serverless.FeatureData.Serving.Define(&kserve.Spec.Serving).AsAction(), + servicemesh.FeatureData.ControlPlane.Define(dsciSpec).AsAction(), + ). + WithResources(serverless.ServingCertificateResource). + PreConditions(serverless.EnsureServerlessServingDeployed) + + return registry.Add( + servingDeployment, + istioSecretFiltering, + servingGateway, + ) + } +} + +func defineServiceMeshFeatures(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec) feature.FeaturesProvider { + return func(registry feature.FeaturesRegistry) error { + authorinoInstalled, err := cluster.SubscriptionExists(ctx, cli, "authorino-operator") + if err != nil { + return fmt.Errorf("failed to list subscriptions %w", err) + } + + if authorinoInstalled { + kserveExtAuthzErr := registry.Add(feature.Define("kserve-external-authz"). + Manifests( + manifest.Location(Resources.Location). + Include( + path.Join(Resources.ServiceMeshDir, "activator-envoyfilter.tmpl.yaml"), + path.Join(Resources.ServiceMeshDir, "envoy-oauth-temp-fix.tmpl.yaml"), + path.Join(Resources.ServiceMeshDir, "kserve-predictor-authorizationpolicy.tmpl.yaml"), + path.Join(Resources.ServiceMeshDir, "kserve-inferencegraph-envoyfilter.tmpl.yaml"), + path.Join(Resources.ServiceMeshDir, "kserve-inferencegraph-authorizationpolicy.tmpl.yaml"), + ), + ). + Managed(). + WithData( + feature.Entry("Domain", cluster.GetDomain), + servicemesh.FeatureData.ControlPlane.Define(dscispec).AsAction(), + ). + WithData( + servicemesh.FeatureData.Authorization.All(dscispec)..., + ), + ) + + if kserveExtAuthzErr != nil { + return kserveExtAuthzErr + } + } else { + ctrl.Log.Info("WARN: Authorino operator is not installed on the cluster, skipping authorization capability") + } + + return nil + } +} + +func removeServiceMeshConfigurations(ctx context.Context, cli client.Client, owner metav1.Object, dscispec *dsciv1.DSCInitializationSpec) error { + serviceMeshInitializer := feature.ComponentFeaturesHandler(owner, componentName, dscispec.ApplicationsNamespace, defineServiceMeshFeatures(ctx, cli, dscispec)) + return serviceMeshInitializer.Delete(ctx, cli) +} + +func removeServerlessFeatures(ctx context.Context, cli client.Client, k *componentApi.Kserve, dscispec *dsciv1.DSCInitializationSpec) error { + serverlessFeatures := feature.ComponentFeaturesHandler(k, componentName, dscispec.ApplicationsNamespace, configureServerlessFeatures(dscispec, k)) + return serverlessFeatures.Delete(ctx, cli) +} + +func getDefaultDeploymentMode(ctx context.Context, cli client.Client, dscispec *dsciv1.DSCInitializationSpec) (string, error) { + kserveConfigMap := corev1.ConfigMap{} + if err := cli.Get(ctx, client.ObjectKey{Name: kserveConfigMapName, Namespace: dscispec.ApplicationsNamespace}, &kserveConfigMap); err != nil { + return "", err + } + + deployConfig, err := getDeployConfig(&kserveConfigMap) + if err != nil { + return "", err + } + + return deployConfig.DefaultDeploymentMode, nil +} + +func setDefaultDeploymentMode(inferenceServiceConfigMap *corev1.ConfigMap, defaultmode componentApi.DefaultDeploymentMode) error { + deployData, err := getDeployConfig(inferenceServiceConfigMap) + if err != nil { + return err + } + + if deployData.DefaultDeploymentMode != string(defaultmode) { + deployData.DefaultDeploymentMode = string(defaultmode) + deployDataBytes, err := json.MarshalIndent(deployData, "", " ") + if err != nil { + return fmt.Errorf("could not set values in configmap %s. %w", kserveConfigMapName, err) + } + inferenceServiceConfigMap.Data[DeployConfigName] = string(deployDataBytes) + + var ingressData map[string]interface{} + if err = json.Unmarshal([]byte(inferenceServiceConfigMap.Data[IngressConfigKeyName]), &ingressData); err != nil { + return fmt.Errorf("error retrieving value for key '%s' from configmap %s. %w", IngressConfigKeyName, kserveConfigMapName, err) + } + if defaultmode == componentApi.RawDeployment { + ingressData["disableIngressCreation"] = true + } else { + ingressData["disableIngressCreation"] = false + } + ingressDataBytes, err := json.MarshalIndent(ingressData, "", " ") + if err != nil { + return fmt.Errorf("could not set values in configmap %s. %w", kserveConfigMapName, err) + } + inferenceServiceConfigMap.Data[IngressConfigKeyName] = string(ingressDataBytes) + } + + return nil +} + +func getIndexedResource(rs []unstructured.Unstructured, obj any, g schema.GroupVersionKind, name string) (int, error) { + var idx = -1 + for i, r := range rs { + if r.GroupVersionKind() == g && r.GetName() == name { + idx = i + break + } + } + + if idx == -1 { + return -1, fmt.Errorf("could not find %T with name %v in resources list", obj, name) + } + + err := runtime.DefaultUnstructuredConverter.FromUnstructured(rs[idx].Object, obj) + if err != nil { + return idx, fmt.Errorf("failed converting to %T from Unstructured.Object: %v", obj, rs[idx].Object) + } + + return idx, nil +} + +func replaceResourceAtIndex(rs []unstructured.Unstructured, idx int, obj any) error { + u, err := resources.ToUnstructured(obj) + if err != nil { + return err + } + + rs[idx] = *u + return nil +} + +func hashConfigMap(cm *corev1.ConfigMap) (string, error) { + u, err := resources.ToUnstructured(cm) + if err != nil { + return "", err + } + + h, err := resources.Hash(u) + if err != nil { + return "", err + } + + return base64.RawURLEncoding.EncodeToString(h), nil +} + +func ownedViaFT(cli client.Client) handler.MapFunc { + return func(ctx context.Context, a client.Object) []reconcile.Request { + for _, or := range a.GetOwnerReferences() { + if or.Kind == "FeatureTracker" { + ft := featuresv1.FeatureTracker{} + if err := cli.Get(ctx, client.ObjectKey{Name: or.Name}, &ft); err != nil { + return []reconcile.Request{} + } + + for _, ftor := range ft.GetOwnerReferences() { + if ftor.Kind == componentApi.KserveKind && ftor.Name != "" { + return []reconcile.Request{{ + NamespacedName: types.NamespacedName{ + Name: ftor.Name, + }, + }} + } + } + } + } + + return []reconcile.Request{} + } +} diff --git a/components/kserve/resources/servicemesh/activator-envoyfilter.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/activator-envoyfilter.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/activator-envoyfilter.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/activator-envoyfilter.tmpl.yaml diff --git a/components/kserve/resources/servicemesh/envoy-oauth-temp-fix.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/envoy-oauth-temp-fix.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/envoy-oauth-temp-fix.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/envoy-oauth-temp-fix.tmpl.yaml diff --git a/controllers/components/kserve/resources/servicemesh/kserve-inferencegraph-authorizationpolicy.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/kserve-inferencegraph-authorizationpolicy.tmpl.yaml new file mode 100644 index 00000000000..64de723fd19 --- /dev/null +++ b/controllers/components/kserve/resources/servicemesh/kserve-inferencegraph-authorizationpolicy.tmpl.yaml @@ -0,0 +1,23 @@ +apiVersion: security.istio.io/v1beta1 +kind: AuthorizationPolicy +metadata: + name: kserve-inferencegraph + namespace: {{ .ControlPlane.Namespace }} + labels: + app.opendatahub.io/kserve: "true" + app.kubernetes.io/part-of: kserve +spec: + action: CUSTOM + provider: + name: {{ .AuthExtensionName }} + rules: + - to: + - operation: + notPaths: + - /healthz + - /debug/pprof/ + - /metrics + - /wait-for-drain + selector: + matchLabels: + serving.kserve.io/kind: InferenceGraph diff --git a/controllers/components/kserve/resources/servicemesh/kserve-inferencegraph-envoyfilter.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/kserve-inferencegraph-envoyfilter.tmpl.yaml new file mode 100644 index 00000000000..827057bf312 --- /dev/null +++ b/controllers/components/kserve/resources/servicemesh/kserve-inferencegraph-envoyfilter.tmpl.yaml @@ -0,0 +1,43 @@ +apiVersion: networking.istio.io/v1alpha3 +kind: EnvoyFilter +metadata: + name: kserve-inferencegraph-host-header + namespace: {{ .ControlPlane.Namespace }} + labels: + app.opendatahub.io/kserve: "true" + app.kubernetes.io/part-of: kserve +spec: + priority: 20 + workloadSelector: + labels: + serving.kserve.io/kind: InferenceGraph + configPatches: + - applyTo: HTTP_FILTER + match: + listener: + filterChain: + filter: + name: envoy.filters.network.http_connection_manager + patch: + operation: INSERT_BEFORE + value: + name: envoy.filters.http.lua + typed_config: + '@type': type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua + inlineCode: | + function envoy_on_request(request_handle) + local headers = request_handle:headers() + if not headers then + return + end + + local original_host = headers:get("k-original-host") + if original_host then + + port_seperator = string.find(original_host, ":", 7) + if port_seperator then + original_host = string.sub(original_host, 0, port_seperator-1) + end + headers:replace('host', original_host) + end + end diff --git a/components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml similarity index 91% rename from components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml index a79057f26a9..49002669468 100644 --- a/components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml +++ b/controllers/components/kserve/resources/servicemesh/kserve-predictor-authorizationpolicy.tmpl.yaml @@ -9,7 +9,7 @@ metadata: spec: action: CUSTOM provider: - name: opendatahub-odh-auth-provider + name: {{ .AuthExtensionName }} rules: - to: - operation: diff --git a/components/kserve/resources/servicemesh/routing/istio-ingress-gateway.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/routing/istio-ingress-gateway.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/routing/istio-ingress-gateway.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/routing/istio-ingress-gateway.tmpl.yaml diff --git a/components/kserve/resources/servicemesh/routing/istio-kserve-local-gateway.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/routing/istio-kserve-local-gateway.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/routing/istio-kserve-local-gateway.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/routing/istio-kserve-local-gateway.tmpl.yaml diff --git a/components/kserve/resources/servicemesh/routing/istio-local-gateway.yaml b/controllers/components/kserve/resources/servicemesh/routing/istio-local-gateway.yaml similarity index 100% rename from components/kserve/resources/servicemesh/routing/istio-local-gateway.yaml rename to controllers/components/kserve/resources/servicemesh/routing/istio-local-gateway.yaml diff --git a/components/kserve/resources/servicemesh/routing/kserve-local-gateway-svc.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/routing/kserve-local-gateway-svc.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/routing/kserve-local-gateway-svc.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/routing/kserve-local-gateway-svc.tmpl.yaml diff --git a/components/kserve/resources/servicemesh/routing/local-gateway-svc.tmpl.yaml b/controllers/components/kserve/resources/servicemesh/routing/local-gateway-svc.tmpl.yaml similarity index 100% rename from components/kserve/resources/servicemesh/routing/local-gateway-svc.tmpl.yaml rename to controllers/components/kserve/resources/servicemesh/routing/local-gateway-svc.tmpl.yaml diff --git a/components/kserve/resources/serving-install/knative-serving.tmpl.yaml b/controllers/components/kserve/resources/serving-install/knative-serving.tmpl.yaml similarity index 100% rename from components/kserve/resources/serving-install/knative-serving.tmpl.yaml rename to controllers/components/kserve/resources/serving-install/knative-serving.tmpl.yaml diff --git a/components/kserve/resources/serving-install/service-mesh-subscription.tmpl.yaml b/controllers/components/kserve/resources/serving-install/service-mesh-subscription.tmpl.yaml similarity index 100% rename from components/kserve/resources/serving-install/service-mesh-subscription.tmpl.yaml rename to controllers/components/kserve/resources/serving-install/service-mesh-subscription.tmpl.yaml diff --git a/components/kserve/resources/serving-net-istio-secret-filtering.patch.tmpl.yaml b/controllers/components/kserve/resources/serving-net-istio-secret-filtering.patch.tmpl.yaml similarity index 100% rename from components/kserve/resources/serving-net-istio-secret-filtering.patch.tmpl.yaml rename to controllers/components/kserve/resources/serving-net-istio-secret-filtering.patch.tmpl.yaml diff --git a/controllers/components/kueue/kueue.go b/controllers/components/kueue/kueue.go new file mode 100644 index 00000000000..23287e9898f --- /dev/null +++ b/controllers/components/kueue/kueue.go @@ -0,0 +1,107 @@ +package kueue + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + cr.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentApi.KueueComponentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.Kueue.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + return &componentApi.Kueue{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.KueueKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.KueueInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.KueueSpec{ + KueueCommonSpec: dsc.Spec.Components.Kueue.KueueCommonSpec, + }, + } +} + +func (s *componentHandler) Init(platform cluster.Platform) error { + if err := odhdeploy.ApplyParams(manifestsPath().String(), imageParamMap); err != nil { + return fmt.Errorf("failed to update images on path %s: %w", manifestsPath(), err) + } + + return nil +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.Kueue) + if !ok { + return errors.New("failed to convert to Kueue") + } + + dsc.Status.InstalledComponents[LegacyComponentName] = false + dsc.Status.Components.Kueue.ManagementSpec.ManagementState = s.GetManagementState(dsc) + dsc.Status.Components.Kueue.KueueCommonStatus = nil + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + dsc.Status.InstalledComponents[LegacyComponentName] = true + dsc.Status.Components.Kueue.KueueCommonStatus = c.Status.KueueCommonStatus.DeepCopy() + + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/kueue/kueue_controller.go b/controllers/components/kueue/kueue_controller.go new file mode 100644 index 00000000000..f8cbe32a105 --- /dev/null +++ b/controllers/components/kueue/kueue_controller.go @@ -0,0 +1,88 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kueue + +import ( + "context" + + promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor(mgr, &componentApi.Kueue{}). + // customized Owns() for Component with new predicates + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Secret{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&corev1.ServiceAccount{}). + Owns(&corev1.Service{}). + Owns(&networkingv1.NetworkPolicy{}). + Owns(&promv1.PodMonitor{}). + Owns(&promv1.PrometheusRule{}). + Owns(&admissionregistrationv1.MutatingWebhookConfiguration{}). + Owns(&admissionregistrationv1.ValidatingWebhookConfiguration{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.KueueComponentName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(LegacyComponentName), labels.True)), + ). + // Add Kueue-specific actions + WithAction(initialize). + WithAction(devFlags). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + kustomize.WithLabel(labels.ODH.Component(LegacyComponentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, LegacyComponentName), + )). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(updatestatus.NewAction()). + // must be the final action + WithAction(gc.NewAction()). + Build(ctx) + + if err != nil { + return err // no need customize error, it is done in the caller main + } + + return nil +} diff --git a/controllers/components/kueue/kueue_controller_actions.go b/controllers/components/kueue/kueue_controller_actions.go new file mode 100644 index 00000000000..35ed0e1562b --- /dev/null +++ b/controllers/components/kueue/kueue_controller_actions.go @@ -0,0 +1,44 @@ +package kueue + +import ( + "context" + "fmt" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +func initialize(_ context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Manifests = append(rr.Manifests, manifestsPath()) + + return nil +} + +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + kueue, ok := rr.Instance.(*componentApi.Kueue) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.Kueue)", rr.Instance) + } + + if kueue.Spec.DevFlags == nil { + return nil + } + + // Implement devflags support logic + // If dev flags are set, update default manifests path + if len(kueue.Spec.DevFlags.Manifests) != 0 { + manifestConfig := kueue.Spec.DevFlags.Manifests[0] + if err := odhdeploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { + return err + } + + if manifestConfig.SourcePath != "" { + rr.Manifests[0].Path = odhdeploy.DefaultManifestPath + rr.Manifests[0].ContextDir = ComponentName + rr.Manifests[0].SourcePath = manifestConfig.SourcePath + } + } + + return nil +} diff --git a/controllers/components/kueue/kueue_support.go b/controllers/components/kueue/kueue_support.go new file mode 100644 index 00000000000..40169e4bd25 --- /dev/null +++ b/controllers/components/kueue/kueue_support.go @@ -0,0 +1,35 @@ +package kueue + +import ( + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +const ( + ComponentName = componentApi.KueueComponentName + + ReadyConditionType = conditionsv1.ConditionType(componentApi.KueueKind + status.ReadySuffix) + + // LegacyComponentName is the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + LegacyComponentName = "kueue" +) + +var ( + imageParamMap = map[string]string{ + "odh-kueue-controller-image": "RELATED_IMAGE_ODH_KUEUE_CONTROLLER_IMAGE", + } +) + +func manifestsPath() odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: "rhoai", + } +} diff --git a/controllers/components/modelcontroller/modelcontroller.go b/controllers/components/modelcontroller/modelcontroller.go new file mode 100644 index 00000000000..234281939ef --- /dev/null +++ b/controllers/components/modelcontroller/modelcontroller.go @@ -0,0 +1,121 @@ +package modelcontroller + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + componentsregistry.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentApi.ModelControllerComponentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.ModelMeshServing.ManagementState == operatorv1.Managed || dsc.Spec.Components.Kserve.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + // extra logic to set the management .spec.component.managementState, to not leave blank {} + kState := operatorv1.Removed + if dsc.Spec.Components.Kserve.ManagementState == operatorv1.Managed { + kState = operatorv1.Managed + } + + mState := operatorv1.Removed + if dsc.Spec.Components.ModelMeshServing.ManagementState == operatorv1.Managed { + mState = operatorv1.Managed + } + + return &componentApi.ModelController{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.ModelControllerKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.ModelControllerInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.ModelControllerSpec{ + ModelMeshServing: &componentApi.ModelControllerMMSpec{ + ManagementState: mState, + DevFlagsSpec: dsc.Spec.Components.ModelMeshServing.DevFlagsSpec, + }, + Kserve: &componentApi.ModelControllerKerveSpec{ + ManagementState: kState, + DevFlagsSpec: dsc.Spec.Components.Kserve.DevFlagsSpec, + NIM: dsc.Spec.Components.Kserve.NIM, + }, + }, + } +} + +// Init for set images. +func (s *componentHandler) Init(_ cluster.Platform) error { + // Update image parameters + if err := odhdeploy.ApplyParams(manifestsPath().String(), imageParamMap); err != nil { + return fmt.Errorf("failed to update images on path %s: %w", manifestsPath(), err) + } + + return nil +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.ModelController) + if !ok { + return errors.New("failed to convert to ModelController") + } + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/modelcontroller/modelcontroller_actions.go b/controllers/components/modelcontroller/modelcontroller_actions.go new file mode 100644 index 00000000000..100c79833bd --- /dev/null +++ b/controllers/components/modelcontroller/modelcontroller_actions.go @@ -0,0 +1,101 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modelcontroller + +import ( + "context" + "fmt" + "strings" + + operatorv1 "github.com/openshift/api/operator/v1" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +func initialize(_ context.Context, rr *odhtypes.ReconciliationRequest) error { + // early exist + mc, ok := rr.Instance.(*componentApi.ModelController) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.ModelController)", rr.Instance) + } + rr.Manifests = append(rr.Manifests, manifestsPath()) + + nimState := operatorv1.Removed + if mc.Spec.Kserve.ManagementState == operatorv1.Managed { + nimState = mc.Spec.Kserve.NIM.ManagementState + } + extraParamsMap := map[string]string{ + "nim-state": strings.ToLower(string(nimState)), + } + if err := odhdeploy.ApplyParams(rr.Manifests[0].String(), nil, extraParamsMap); err != nil { + return fmt.Errorf("failed to update images on path %s: %w", rr.Manifests[0].String(), err) + } + + return nil +} + +// download devflag from kserve or modelmeshserving. +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + mc, ok := rr.Instance.(*componentApi.ModelController) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.ModelController)", rr.Instance) + } + + l := logf.FromContext(ctx) + + var df *common.DevFlags + + ks := mc.Spec.Kserve + ms := mc.Spec.ModelMeshServing + + switch { + case ks != nil && ks.ManagementState == operatorv1.Managed && resources.HasDevFlags(ks): + l.V(3).Info("Using DevFlags from KServe") + df = ks.GetDevFlags() + case ms != nil && ms.ManagementState == operatorv1.Managed && resources.HasDevFlags(ms): + l.V(3).Info("Using DevFlags from ModelMesh") + df = ms.GetDevFlags() + default: + return nil + } + + for _, subcomponent := range df.Manifests { + if !strings.Contains(subcomponent.URI, ComponentName) && !strings.Contains(subcomponent.URI, LegacyComponentName) { + continue + } + + l.V(3).Info("Downloading manifests", "uri", subcomponent.URI) + + if err := odhdeploy.DownloadManifests(ctx, ComponentName, subcomponent); err != nil { + return err + } + + // If overlay is defined, update paths + if subcomponent.SourcePath != "" { + rr.Manifests[0].SourcePath = subcomponent.SourcePath + } + + break + } + + return nil +} diff --git a/controllers/components/modelcontroller/modelcontroller_controller.go b/controllers/components/modelcontroller/modelcontroller_controller.go new file mode 100644 index 00000000000..a31254708b3 --- /dev/null +++ b/controllers/components/modelcontroller/modelcontroller_controller.go @@ -0,0 +1,91 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modelcontroller + +import ( + "context" + + templatev1 "github.com/openshift/api/template/v1" + promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/security" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor( + mgr, + &componentApi.ModelController{}, + ). + // customized Owns() for Component with new predicates + Owns(&corev1.ConfigMap{}). + Owns(&corev1.ServiceAccount{}). + Owns(&promv1.ServiceMonitor{}). + Owns(&networkingv1.NetworkPolicy{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&corev1.Service{}). + Owns(&admissionregistrationv1.ValidatingWebhookConfiguration{}). + Owns(&templatev1.Template{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.ModelControllerInstanceName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(LegacyComponentName), labels.True)), + ). + // Add ModelController specific actions + WithAction(initialize). + WithAction(devFlags). // devFlags triggerd by changes in DSC kserve and ModelMeshServing, also update .status.devflagurl + WithAction(security.NewUpdatePodSecurityRoleBindingAction(serviceAccounts)). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + kustomize.WithLabel(labels.ODH.Component(LegacyComponentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, LegacyComponentName), + )). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(updatestatus.NewAction()). + WithAction(gc.NewAction()). + Build(ctx) // include GenerationChangedPredicate no need set in each Owns() above + + if err != nil { + return err // no need customize error, it is done in the caller main + } + + return nil +} diff --git a/controllers/components/modelcontroller/modelcontroller_support.go b/controllers/components/modelcontroller/modelcontroller_support.go new file mode 100644 index 00000000000..36affb1e4a1 --- /dev/null +++ b/controllers/components/modelcontroller/modelcontroller_support.go @@ -0,0 +1,43 @@ +package modelcontroller + +import ( + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +const ( + ComponentName = componentApi.ModelControllerComponentName + + ReadyConditionType = conditionsv1.ConditionType(componentApi.ModelControllerKind + status.ReadySuffix) + + // LegacyComponentName is the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + LegacyComponentName = "odh-model-controller" +) + +var ( + imageParamMap = map[string]string{ + "odh-model-controller": "RELATED_IMAGE_ODH_MODEL_CONTROLLER_IMAGE", + } + + serviceAccounts = map[cluster.Platform][]string{ + cluster.SelfManagedRhoai: {LegacyComponentName}, + cluster.ManagedRhoai: {LegacyComponentName}, + cluster.OpenDataHub: {LegacyComponentName}, + cluster.Unknown: {LegacyComponentName}, + } +) + +func manifestsPath() types.ManifestInfo { + return types.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: "base", + } +} diff --git a/controllers/components/modelmeshserving/modelmeshserving.go b/controllers/components/modelmeshserving/modelmeshserving.go new file mode 100644 index 00000000000..22bbb6aceea --- /dev/null +++ b/controllers/components/modelmeshserving/modelmeshserving.go @@ -0,0 +1,109 @@ +package modelmeshserving + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + componentsregistry.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentApi.ModelMeshServingComponentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.ModelMeshServing.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +func (s *componentHandler) Init(_ cluster.Platform) error { + // Update image parameters + if err := odhdeploy.ApplyParams(manifestsPath().String(), imageParamMap); err != nil { + return fmt.Errorf("failed to update images on path %s: %w", manifestsPath(), err) + } + + return nil +} + +// for DSC to get compoment ModelMeshServing's CR. +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + return &componentApi.ModelMeshServing{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.ModelMeshServingKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.ModelMeshServingInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.ModelMeshServingSpec{ + ModelMeshServingCommonSpec: dsc.Spec.Components.ModelMeshServing.ModelMeshServingCommonSpec, + }, + } +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.ModelMeshServing) + if !ok { + return errors.New("failed to convert to ModelMeshServing") + } + + dsc.Status.InstalledComponents[LegacyComponentName] = false + dsc.Status.Components.ModelMeshServing.ManagementSpec.ManagementState = s.GetManagementState(dsc) + dsc.Status.Components.ModelMeshServing.ModelMeshServingCommonStatus = nil + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + dsc.Status.InstalledComponents[LegacyComponentName] = true + dsc.Status.Components.ModelMeshServing.ModelMeshServingCommonStatus = c.Status.ModelMeshServingCommonStatus.DeepCopy() + + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/modelmeshserving/modelmeshserving_actions.go b/controllers/components/modelmeshserving/modelmeshserving_actions.go new file mode 100644 index 00000000000..45567294739 --- /dev/null +++ b/controllers/components/modelmeshserving/modelmeshserving_actions.go @@ -0,0 +1,69 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modelmeshserving + +import ( + "context" + "fmt" + "strings" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +func initialize(_ context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Manifests = append(rr.Manifests, manifestsPath()) + + return nil +} + +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + mm, ok := rr.Instance.(*componentApi.ModelMeshServing) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.ModelMeshServing)", rr.Instance) + } + + df := mm.GetDevFlags() + if df == nil { + return nil + } + if len(df.Manifests) == 0 { + return nil + } + + // Implement devflags support logic + // If dev flags are set, update default manifests path + for _, subcomponent := range df.Manifests { + if !strings.Contains(subcomponent.URI, ComponentName) && !strings.Contains(subcomponent.URI, LegacyComponentName) { + continue + } + + // Download modelmeshserving + if err := odhdeploy.DownloadManifests(ctx, ComponentName, subcomponent); err != nil { + return err + } + // If overlay is defined, update paths + if subcomponent.SourcePath != "" { + rr.Manifests[0].SourcePath = subcomponent.SourcePath + } + + break + } + + return nil +} diff --git a/controllers/components/modelmeshserving/modelmeshserving_controller.go b/controllers/components/modelmeshserving/modelmeshserving_controller.go new file mode 100644 index 00000000000..662ae1206fb --- /dev/null +++ b/controllers/components/modelmeshserving/modelmeshserving_controller.go @@ -0,0 +1,90 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modelmeshserving + +import ( + "context" + + promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/security" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/clusterrole" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor( + mgr, + &componentApi.ModelMeshServing{}, + ). + // customized Owns() for Component with new predicates + Owns(&corev1.ConfigMap{}). + Owns(&corev1.ServiceAccount{}). + Owns(&promv1.ServiceMonitor{}). + Owns(&networkingv1.NetworkPolicy{}). + Owns(&admissionregistrationv1.ValidatingWebhookConfiguration{}). + Owns(&corev1.Service{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.ClusterRole{}, reconciler.WithPredicates(clusterrole.IgnoreIfAggregationRule())). + Owns(&rbacv1.RoleBinding{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.ModelMeshServingInstanceName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(LegacyComponentName), labels.True)), + ). + // Add ModelMeshServing specific actions + WithAction(initialize). + WithAction(devFlags). + WithAction(security.NewUpdatePodSecurityRoleBindingAction(serviceAccounts)). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + kustomize.WithLabel(labels.ODH.Component(LegacyComponentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, LegacyComponentName), + )). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(updatestatus.NewAction()). + WithAction(gc.NewAction()). + Build(ctx) // include GenerationChangedPredicate no need set in each Owns() above + + if err != nil { + return err // no need customize error, it is done in the caller main + } + + return nil +} diff --git a/controllers/components/modelmeshserving/modelmeshserving_support.go b/controllers/components/modelmeshserving/modelmeshserving_support.go new file mode 100644 index 00000000000..1f8d8a150a0 --- /dev/null +++ b/controllers/components/modelmeshserving/modelmeshserving_support.go @@ -0,0 +1,46 @@ +package modelmeshserving + +import ( + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +const ( + ComponentName = componentApi.ModelMeshServingComponentName + + ReadyConditionType = conditionsv1.ConditionType(componentApi.ModelMeshServingKind + status.ReadySuffix) + + // LegacyComponentName is the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + LegacyComponentName = "model-mesh" +) + +var ( + imageParamMap = map[string]string{ + "odh-mm-rest-proxy": "RELATED_IMAGE_ODH_MM_REST_PROXY_IMAGE", + "odh-modelmesh-runtime-adapter": "RELATED_IMAGE_ODH_MODELMESH_RUNTIME_ADAPTER_IMAGE", + "odh-modelmesh": "RELATED_IMAGE_ODH_MODELMESH_IMAGE", + "odh-modelmesh-controller": "RELATED_IMAGE_ODH_MODELMESH_CONTROLLER_IMAGE", + } + + serviceAccounts = map[cluster.Platform][]string{ + cluster.SelfManagedRhoai: {"modelmesh", "modelmesh-controller"}, + cluster.ManagedRhoai: {"modelmesh", "modelmesh-controller"}, + cluster.OpenDataHub: {"modelmesh", "modelmesh-controller"}, + cluster.Unknown: {"modelmesh", "modelmesh-controller"}, + } +) + +func manifestsPath() odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: "overlays/odh", + } +} diff --git a/controllers/components/modelregistry/modelregistry.go b/controllers/components/modelregistry/modelregistry.go new file mode 100644 index 00000000000..31b13a1ed2c --- /dev/null +++ b/controllers/components/modelregistry/modelregistry.go @@ -0,0 +1,109 @@ +package modelregistry + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + cr.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentApi.ModelRegistryComponentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.ModelRegistry.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +func (s *componentHandler) Init(_ cluster.Platform) error { + mi := baseManifestInfo(BaseManifestsSourcePath) + + if err := odhdeploy.ApplyParams(mi.String(), imagesMap, extraParamsMap); err != nil { + return fmt.Errorf("failed to update params on path %s: %w", mi, err) + } + + return nil +} + +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + return &componentApi.ModelRegistry{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.ModelRegistryKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.ModelRegistryInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.ModelRegistrySpec{ + ModelRegistryCommonSpec: dsc.Spec.Components.ModelRegistry.ModelRegistryCommonSpec, + }, + } +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.ModelRegistry) + if !ok { + return errors.New("failed to convert to ModelRegistry") + } + + dsc.Status.InstalledComponents[LegacyComponentName] = false + dsc.Status.Components.ModelRegistry.ManagementSpec.ManagementState = s.GetManagementState(dsc) + dsc.Status.Components.ModelRegistry.ModelRegistryCommonStatus = nil + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + dsc.Status.InstalledComponents[LegacyComponentName] = true + dsc.Status.Components.ModelRegistry.ModelRegistryCommonStatus = c.Status.ModelRegistryCommonStatus.DeepCopy() + + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/modelregistry/modelregistry_controller.go b/controllers/components/modelregistry/modelregistry_controller.go new file mode 100644 index 00000000000..800ef932028 --- /dev/null +++ b/controllers/components/modelregistry/modelregistry_controller.go @@ -0,0 +1,111 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package modelregistry + +import ( + "context" + "fmt" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/template" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/generation" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor(mgr, &componentApi.ModelRegistry{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Secret{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&corev1.Service{}). + Owns(&corev1.ServiceAccount{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + Owns(&admissionregistrationv1.MutatingWebhookConfiguration{}). + Owns(&admissionregistrationv1.ValidatingWebhookConfiguration{}). + // MR also depends on DSCInitialization to properly configure the SMM + // resource + Watches( + &dsciv1.DSCInitialization{}, + reconciler.WithEventHandler(handlers.ToNamed(componentApi.ModelRegistryInstanceName)), + reconciler.WithPredicates(generation.New()), + ). + Watches(&corev1.Namespace{}). + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.ModelRegistryInstanceName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(LegacyComponentName), labels.True)), + ). + // Some ClusterRoles are part of the component deployment, but not owned by + // the operator (overlays/odh/extras), so in order to properly keep them + // in sync with the manifests, we should also create an additional watcher + Watches(&rbacv1.ClusterRole{}). + // This component adds a ServiceMeshMember resource to the registries + // namespaces that may not be known when the controller is started, hence + // it should be watched dynamically + WatchesGVK(gvk.ServiceMeshMember, reconciler.Dynamic()). + // actions + WithAction(checkPreConditions). + WithAction(initialize). + WithAction(configureDependencies). + WithAction(template.NewAction( + template.WithCache(), + )). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + kustomize.WithLabel(labels.ODH.Component(LegacyComponentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, LegacyComponentName), + )). + WithAction(customizeResources). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(updatestatus.NewAction()). + WithAction(updateStatus). + // must be the final action + WithAction(gc.NewAction( + gc.WithUnremovables(gvk.ServiceMeshMember), + )). + Build(ctx) + + if err != nil { + return fmt.Errorf("could not create the model registry controller: %w", err) + } + + return nil +} diff --git a/controllers/components/modelregistry/modelregistry_controller_actions.go b/controllers/components/modelregistry/modelregistry_controller_actions.go new file mode 100644 index 00000000000..a371c6dfbf1 --- /dev/null +++ b/controllers/components/modelregistry/modelregistry_controller_actions.go @@ -0,0 +1,163 @@ +package modelregistry + +import ( + "context" + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + odherrors "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/errors" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + + _ "embed" +) + +func checkPreConditions(_ context.Context, rr *odhtypes.ReconciliationRequest) error { + mr, ok := rr.Instance.(*componentApi.ModelRegistry) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.ModelRegistry", rr.Instance) + } + + if rr.DSCI.Spec.ServiceMesh != nil && rr.DSCI.Spec.ServiceMesh.ManagementState == operatorv1.Managed { + return nil + } + + s := mr.GetStatus() + s.Phase = "NotReady" + + meta.SetStatusCondition(&s.Conditions, metav1.Condition{ + Type: status.ConditionTypeReady, + Status: metav1.ConditionFalse, + Reason: status.ServiceMeshNotConfiguredReason, + Message: status.ServiceMeshNotConfiguredMessage, + ObservedGeneration: s.ObservedGeneration, + }) + + return odherrors.NewStopError(status.ServiceMeshNotConfiguredMessage) +} + +func initialize(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + mr, ok := rr.Instance.(*componentApi.ModelRegistry) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.ModelRegistry)", rr.Instance) + } + + rr.Manifests = []odhtypes.ManifestInfo{ + baseManifestInfo(BaseManifestsSourcePath), + extraManifestInfo(BaseManifestsSourcePath), + } + + rr.Templates = []odhtypes.TemplateInfo{{ + FS: resourcesFS, + Path: ServiceMeshMemberTemplate, + }} + + df := mr.GetDevFlags() + + if df == nil { + return nil + } + if len(df.Manifests) == 0 { + return nil + } + if len(df.Manifests) > 1 { + return fmt.Errorf("unexpected number of manifests found: %d, expected 1)", len(df.Manifests)) + } + + if err := odhdeploy.DownloadManifests(ctx, ComponentName, df.Manifests[0]); err != nil { + return err + } + + if df.Manifests[0].SourcePath != "" { + rr.Manifests = []odhtypes.ManifestInfo{ + baseManifestInfo(df.Manifests[0].SourcePath), + extraManifestInfo(df.Manifests[0].SourcePath), + } + } + + return nil +} + +func configureDependencies(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + mr, ok := rr.Instance.(*componentApi.ModelRegistry) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.ModelRegistry)", rr.Instance) + } + + // Namespace + + if err := rr.AddResources( + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: mr.Spec.RegistriesNamespace, + }, + }, + ); err != nil { + return fmt.Errorf("failed to add namespace %s to manifests", mr.Spec.RegistriesNamespace) + } + + // Secret + + // TODO: this should be done by a dedicated controller + is, err := cluster.FindDefaultIngressSecret(ctx, rr.Client) + if err != nil { + return fmt.Errorf("failed to find default ingress secret for model registry: %w", err) + } + + if err := rr.AddResources( + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: DefaultModelRegistryCert, + Namespace: rr.DSCI.Spec.ServiceMesh.ControlPlane.Namespace, + }, + Data: is.Data, + Type: is.Type, + }, + ); err != nil { + return fmt.Errorf("failed to add default ingress secret for model registry: %w", err) + } + + return nil +} + +func customizeResources(_ context.Context, rr *odhtypes.ReconciliationRequest) error { + // Some ClusterRoles are part of the component deployment, but not owned by the + // operator (overlays/odh/extras) and we expect them to be left on the cluster + // even if the component is removed, hence we should mark them a not managed by + // the operator. By doing so the deploy action won't set ownership and won't + // patch them, just recreate if missing + for i := range rr.Resources { + r := rr.Resources[i] + + switch { + case r.GroupVersionKind() == gvk.ClusterRole && r.GetName() == "modelregistry-editor-role": + resources.SetAnnotation(&rr.Resources[i], annotations.ManagedByODHOperator, "false") + case r.GroupVersionKind() == gvk.ClusterRole && r.GetName() == "modelregistry-viewer-role": + resources.SetAnnotation(&rr.Resources[i], annotations.ManagedByODHOperator, "false") + } + } + + return nil +} + +func updateStatus(_ context.Context, rr *odhtypes.ReconciliationRequest) error { + mr, ok := rr.Instance.(*componentApi.ModelRegistry) + if !ok { + return errors.New("instance is not of type *odhTypes.ModelRegistry") + } + + mr.Status.RegistriesNamespace = mr.Spec.RegistriesNamespace + + return nil +} diff --git a/controllers/components/modelregistry/modelregistry_support.go b/controllers/components/modelregistry/modelregistry_support.go new file mode 100644 index 00000000000..8e330973962 --- /dev/null +++ b/controllers/components/modelregistry/modelregistry_support.go @@ -0,0 +1,60 @@ +package modelregistry + +import ( + "embed" + "path" + + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +const ( + ComponentName = componentApi.ModelRegistryComponentName + + ReadyConditionType = conditionsv1.ConditionType(componentApi.ModelRegistryKind + status.ReadySuffix) + + DefaultModelRegistriesNamespace = "rhoai-model-registries" + DefaultModelRegistryCert = "default-modelregistry-cert" + BaseManifestsSourcePath = "overlays/odh" + ServiceMeshMemberTemplate = "resources/servicemesh-member.tmpl.yaml" + + // LegacyComponentName is the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + LegacyComponentName = "model-registry-operator" +) + +var ( + imagesMap = map[string]string{ + "IMAGES_MODELREGISTRY_OPERATOR": "RELATED_IMAGE_ODH_MODEL_REGISTRY_OPERATOR_IMAGE", + "IMAGES_GRPC_SERVICE": "RELATED_IMAGE_ODH_MLMD_GRPC_SERVER_IMAGE", + "IMAGES_REST_SERVICE": "RELATED_IMAGE_ODH_MODEL_REGISTRY_IMAGE", + } + + extraParamsMap = map[string]string{ + "DEFAULT_CERT": DefaultModelRegistryCert, + } +) + +//go:embed resources +var resourcesFS embed.FS + +func baseManifestInfo(sourcePath string) odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: deploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: sourcePath, + } +} + +func extraManifestInfo(sourcePath string) odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: deploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: path.Join(sourcePath, "extras"), + } +} diff --git a/controllers/components/modelregistry/resources/servicemesh-member.tmpl.yaml b/controllers/components/modelregistry/resources/servicemesh-member.tmpl.yaml new file mode 100644 index 00000000000..83a5b1c292a --- /dev/null +++ b/controllers/components/modelregistry/resources/servicemesh-member.tmpl.yaml @@ -0,0 +1,9 @@ +apiVersion: maistra.io/v1 +kind: ServiceMeshMember +metadata: + name: default + namespace: {{.Component.Spec.RegistriesNamespace}} +spec: + controlPlaneRef: + namespace: {{ .DSCI.Spec.ServiceMesh.ControlPlane.Namespace }} + name: {{ .DSCI.Spec.ServiceMesh.ControlPlane.Name }} diff --git a/controllers/components/ray/ray.go b/controllers/components/ray/ray.go new file mode 100644 index 00000000000..551961a84ab --- /dev/null +++ b/controllers/components/ray/ray.go @@ -0,0 +1,107 @@ +package ray + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + cr.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentApi.RayComponentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.Ray.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + return &componentApi.Ray{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.RayKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.RayInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.RaySpec{ + RayCommonSpec: dsc.Spec.Components.Ray.RayCommonSpec, + }, + } +} + +func (s *componentHandler) Init(_ cluster.Platform) error { + if err := odhdeploy.ApplyParams(manifestPath().String(), imageParamMap); err != nil { + return fmt.Errorf("failed to update images on path %s: %w", manifestPath(), err) + } + + return nil +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.Ray) + if !ok { + return errors.New("failed to convert to Ray") + } + + dsc.Status.InstalledComponents[LegacyComponentName] = false + dsc.Status.Components.Ray.ManagementSpec.ManagementState = s.GetManagementState(dsc) + dsc.Status.Components.Ray.RayCommonStatus = nil + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + dsc.Status.InstalledComponents[LegacyComponentName] = true + dsc.Status.Components.Ray.RayCommonStatus = c.Status.RayCommonStatus.DeepCopy() + + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/ray/ray_controller.go b/controllers/components/ray/ray_controller.go new file mode 100644 index 00000000000..adc4ad65271 --- /dev/null +++ b/controllers/components/ray/ray_controller.go @@ -0,0 +1,81 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ray + +import ( + "context" + + securityv1 "github.com/openshift/api/security/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor(mgr, &componentApi.Ray{}). + // customized Owns() for Component with new predicates + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Secret{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&corev1.ServiceAccount{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + Owns(&securityv1.SecurityContextConstraints{}). + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.RayInstanceName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(LegacyComponentName), labels.True)), + ). + // Add Ray-specific actions + WithAction(initialize). + WithAction(devFlags). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + kustomize.WithLabel(labels.ODH.Component(LegacyComponentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, LegacyComponentName), + )). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(updatestatus.NewAction()). + // must be the final action + WithAction(gc.NewAction()). + Build(ctx) + + if err != nil { + return err // no need customize error, it is done in the caller main + } + + return nil +} diff --git a/controllers/components/ray/ray_controller_actions.go b/controllers/components/ray/ray_controller_actions.go new file mode 100644 index 00000000000..92137c00bda --- /dev/null +++ b/controllers/components/ray/ray_controller_actions.go @@ -0,0 +1,61 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ray + +import ( + "context" + "fmt" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +func initialize(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Manifests = append(rr.Manifests, manifestPath()) + + if err := odhdeploy.ApplyParams(manifestPath().String(), nil, map[string]string{"namespace": rr.DSCI.Spec.ApplicationsNamespace}); err != nil { + return fmt.Errorf("failed to update params.env from %s : %w", manifestPath(), err) + } + return nil +} + +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + ray, ok := rr.Instance.(*componentApi.Ray) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.Ray)", rr.Instance) + } + + if ray.Spec.DevFlags == nil { + return nil + } + // Implement devflags support logic + // If dev flags are set, update default manifests path + if len(ray.Spec.DevFlags.Manifests) != 0 { + manifestConfig := ray.Spec.DevFlags.Manifests[0] + if err := odhdeploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { + return err + } + if manifestConfig.SourcePath != "" { + rr.Manifests[0].Path = odhdeploy.DefaultManifestPath + rr.Manifests[0].ContextDir = ComponentName + rr.Manifests[0].SourcePath = manifestConfig.SourcePath + } + } + // TODO: Implement devflags logmode logic + return nil +} diff --git a/controllers/components/ray/ray_support.go b/controllers/components/ray/ray_support.go new file mode 100644 index 00000000000..34a9f33136f --- /dev/null +++ b/controllers/components/ray/ray_support.go @@ -0,0 +1,35 @@ +package ray + +import ( + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +const ( + ComponentName = componentApi.RayComponentName + + ReadyConditionType = conditionsv1.ConditionType(componentApi.RayKind + status.ReadySuffix) + + // LegacyComponentName is the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + LegacyComponentName = "ray" +) + +var ( + imageParamMap = map[string]string{ + "odh-kuberay-operator-controller-image": "RELATED_IMAGE_ODH_KUBERAY_OPERATOR_CONTROLLER_IMAGE", + } +) + +func manifestPath() types.ManifestInfo { + return types.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: "openshift", + } +} diff --git a/controllers/components/suite_test.go b/controllers/components/suite_test.go new file mode 100644 index 00000000000..0fe54e8efff --- /dev/null +++ b/controllers/components/suite_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package components_test + +//revive:disable:dot-imports +import ( + "path/filepath" + "testing" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = componentApi.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/controllers/components/trainingoperator/trainingoperator.go b/controllers/components/trainingoperator/trainingoperator.go new file mode 100644 index 00000000000..316d686b0c2 --- /dev/null +++ b/controllers/components/trainingoperator/trainingoperator.go @@ -0,0 +1,106 @@ +package trainingoperator + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + cr.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentApi.TrainingOperatorComponentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.TrainingOperator.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + return &componentApi.TrainingOperator{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.TrainingOperatorKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.TrainingOperatorInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.TrainingOperatorSpec{ + TrainingOperatorCommonSpec: dsc.Spec.Components.TrainingOperator.TrainingOperatorCommonSpec, + }, + } +} + +func (s *componentHandler) Init(platform cluster.Platform) error { + if err := odhdeploy.ApplyParams(manifestPath().String(), imageParamMap); err != nil { + return fmt.Errorf("failed to update images on path %s: %w", manifestPath(), err) + } + + return nil +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.TrainingOperator) + if !ok { + return errors.New("failed to convert to TrainingOperator") + } + + dsc.Status.InstalledComponents[LegacyComponentName] = false + dsc.Status.Components.TrainingOperator.ManagementSpec.ManagementState = s.GetManagementState(dsc) + dsc.Status.Components.TrainingOperator.TrainingOperatorCommonStatus = nil + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + dsc.Status.InstalledComponents[LegacyComponentName] = true + dsc.Status.Components.TrainingOperator.TrainingOperatorCommonStatus = c.Status.TrainingOperatorCommonStatus.DeepCopy() + + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/trainingoperator/trainingoperator_controller.go b/controllers/components/trainingoperator/trainingoperator_controller.go new file mode 100644 index 00000000000..096ef8a5198 --- /dev/null +++ b/controllers/components/trainingoperator/trainingoperator_controller.go @@ -0,0 +1,78 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trainingoperator + +import ( + "context" + + promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor(mgr, &componentApi.TrainingOperator{}). + // customized Owns() for Component with new predicates + Owns(&corev1.ConfigMap{}). + Owns(&promv1.PodMonitor{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&corev1.ServiceAccount{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.TrainingOperatorInstanceName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(LegacyComponentName), labels.True)), + ). + // Add TrainingOperator-specific actions + WithAction(initialize). + WithAction(devFlags). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + kustomize.WithLabel(labels.ODH.Component(LegacyComponentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, LegacyComponentName), + )). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(updatestatus.NewAction()). + // must be the final action + WithAction(gc.NewAction()). + Build(ctx) + + if err != nil { + return err // no need customize error, it is done in the caller main + } + + return nil +} diff --git a/controllers/components/trainingoperator/trainingoperator_controller_actions.go b/controllers/components/trainingoperator/trainingoperator_controller_actions.go new file mode 100644 index 00000000000..21b999fed50 --- /dev/null +++ b/controllers/components/trainingoperator/trainingoperator_controller_actions.go @@ -0,0 +1,55 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trainingoperator + +import ( + "context" + "fmt" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +func initialize(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Manifests = append(rr.Manifests, manifestPath()) + return nil +} + +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + trainingoperator, ok := rr.Instance.(*componentApi.TrainingOperator) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.TrainingOperator)", rr.Instance) + } + + if trainingoperator.Spec.DevFlags == nil { + return nil + } + if len(trainingoperator.Spec.DevFlags.Manifests) != 0 { + manifestConfig := trainingoperator.Spec.DevFlags.Manifests[0] + if err := odhdeploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { + return err + } + if manifestConfig.SourcePath != "" { + rr.Manifests[0].Path = odhdeploy.DefaultManifestPath + rr.Manifests[0].ContextDir = ComponentName + rr.Manifests[0].SourcePath = manifestConfig.SourcePath + } + } + // TODO: Implement devflags logmode logic + return nil +} diff --git a/controllers/components/trainingoperator/trainingoperator_support.go b/controllers/components/trainingoperator/trainingoperator_support.go new file mode 100644 index 00000000000..c99b7e9ff22 --- /dev/null +++ b/controllers/components/trainingoperator/trainingoperator_support.go @@ -0,0 +1,35 @@ +package trainingoperator + +import ( + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +const ( + ComponentName = componentApi.TrainingOperatorComponentName + + ReadyConditionType = conditionsv1.ConditionType(componentApi.TrainingOperatorKind + status.ReadySuffix) + + // LegacyComponentName is the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + LegacyComponentName = "trainingoperator" +) + +var ( + imageParamMap = map[string]string{ + "odh-training-operator-controller-image": "RELATED_IMAGE_ODH_TRAINING_OPERATOR_IMAGE", + } +) + +func manifestPath() types.ManifestInfo { + return types.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: "rhoai", + } +} diff --git a/controllers/components/trustyai/trustyai.go b/controllers/components/trustyai/trustyai.go new file mode 100644 index 00000000000..2dd4c138eb1 --- /dev/null +++ b/controllers/components/trustyai/trustyai.go @@ -0,0 +1,109 @@ +package trustyai + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + cr.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentApi.TrustyAIComponentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.TrustyAI.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + return &componentApi.TrustyAI{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.TrustyAIKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.TrustyAIInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.TrustyAISpec{ + TrustyAICommonSpec: dsc.Spec.Components.TrustyAI.TrustyAICommonSpec, + }, + } +} + +func (s *componentHandler) Init(platform cluster.Platform) error { + mp := manifestsPath(platform) + + if err := odhdeploy.ApplyParams(mp.String(), imageParamMap); err != nil { + return fmt.Errorf("failed to update images on path %s: %w", mp, err) + } + + return nil +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.TrustyAI) + if !ok { + return errors.New("failed to convert to TrustyAI") + } + + dsc.Status.InstalledComponents[LegacyComponentName] = false + dsc.Status.Components.TrustyAI.ManagementSpec.ManagementState = s.GetManagementState(dsc) + dsc.Status.Components.TrustyAI.TrustyAICommonStatus = nil + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + dsc.Status.InstalledComponents[LegacyComponentName] = true + dsc.Status.Components.TrustyAI.TrustyAICommonStatus = c.Status.TrustyAICommonStatus.DeepCopy() + + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/trustyai/trustyai_controller.go b/controllers/components/trustyai/trustyai_controller.go new file mode 100644 index 00000000000..e77bdd151de --- /dev/null +++ b/controllers/components/trustyai/trustyai_controller.go @@ -0,0 +1,79 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trustyai + +import ( + "context" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor(mgr, &componentApi.TrustyAI{}). + // customized Owns() for Component with new predicates + Owns(&corev1.ConfigMap{}). + Owns(&corev1.ServiceAccount{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&corev1.Service{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.TrustyAIInstanceName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(LegacyComponentName), labels.True)), + ). + // Add TrustyAI-specific actions + WithAction(initialize). + WithAction(devFlags). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + kustomize.WithLabel(labels.ODH.Component(LegacyComponentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, LegacyComponentName), + )). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(updatestatus.NewAction()). + // must be the final action + WithAction(gc.NewAction()). + Build(ctx) + + if err != nil { + return err // no need customize error, it is done in the caller main + } + + return nil +} diff --git a/controllers/components/trustyai/trustyai_controller_actions.go b/controllers/components/trustyai/trustyai_controller_actions.go new file mode 100644 index 00000000000..384e9112f58 --- /dev/null +++ b/controllers/components/trustyai/trustyai_controller_actions.go @@ -0,0 +1,56 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trustyai + +import ( + "context" + "fmt" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +func initialize(_ context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Manifests = append(rr.Manifests, manifestsPath(rr.Release.Name)) + return nil +} + +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + trustyai, ok := rr.Instance.(*componentApi.TrustyAI) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.TrustyAI)", rr.Instance) + } + + if trustyai.Spec.DevFlags == nil { + return nil + } + + // Implement devflags support logic + // If dev flags are set, update default manifests path + if len(trustyai.Spec.DevFlags.Manifests) != 0 { + manifestConfig := trustyai.Spec.DevFlags.Manifests[0] + if err := odhdeploy.DownloadManifests(ctx, ComponentName, manifestConfig); err != nil { + return err + } + if manifestConfig.SourcePath != "" { + rr.Manifests[0].SourcePath = manifestConfig.SourcePath + } + } + + return nil +} diff --git a/controllers/components/trustyai/trustyai_support.go b/controllers/components/trustyai/trustyai_support.go new file mode 100644 index 00000000000..f758e443c57 --- /dev/null +++ b/controllers/components/trustyai/trustyai_support.go @@ -0,0 +1,44 @@ +package trustyai + +import ( + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +const ( + ComponentName = componentApi.TrustyAIComponentName + + ReadyConditionType = conditionsv1.ConditionType(componentApi.TrustyAIKind + status.ReadySuffix) + + // LegacyComponentName is the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + LegacyComponentName = "trustyai" +) + +var ( + imageParamMap = map[string]string{ + "trustyaiServiceImage": "RELATED_IMAGE_ODH_TRUSTYAI_SERVICE_IMAGE", + "trustyaiOperatorImage": "RELATED_IMAGE_ODH_TRUSTYAI_SERVICE_OPERATOR_IMAGE", + } + + overlaysSourcePaths = map[cluster.Platform]string{ + cluster.SelfManagedRhoai: "/overlays/rhoai", + cluster.ManagedRhoai: "/overlays/rhoai", + cluster.OpenDataHub: "/overlays/odh", + cluster.Unknown: "/overlays/odh", + } +) + +func manifestsPath(p cluster.Platform) types.ManifestInfo { + return types.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: ComponentName, + SourcePath: overlaysSourcePaths[p], + } +} diff --git a/controllers/components/workbenches/workbenches.go b/controllers/components/workbenches/workbenches.go new file mode 100644 index 00000000000..9e2f95cc8f3 --- /dev/null +++ b/controllers/components/workbenches/workbenches.go @@ -0,0 +1,117 @@ +package workbenches + +import ( + "errors" + "fmt" + + operatorv1 "github.com/openshift/api/operator/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" +) + +type componentHandler struct{} + +func init() { //nolint:gochecknoinits + cr.Add(&componentHandler{}) +} + +func (s *componentHandler) GetName() string { + return componentApi.WorkbenchesComponentName +} + +func (s *componentHandler) GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState { + if dsc.Spec.Components.Workbenches.ManagementState == operatorv1.Managed { + return operatorv1.Managed + } + return operatorv1.Removed +} + +func (s *componentHandler) NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject { + return &componentApi.Workbenches{ + TypeMeta: metav1.TypeMeta{ + Kind: componentApi.WorkbenchesKind, + APIVersion: componentApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: componentApi.WorkbenchesInstanceName, + Annotations: map[string]string{ + annotations.ManagementStateAnnotation: string(s.GetManagementState(dsc)), + }, + }, + Spec: componentApi.WorkbenchesSpec{ + WorkbenchesCommonSpec: dsc.Spec.Components.Workbenches.WorkbenchesCommonSpec, + }, + } +} + +func (s *componentHandler) Init(platform cluster.Platform) error { + nbcManifestInfo := notebookControllerManifestInfo(notebookControllerManifestSourcePath) + if err := odhdeploy.ApplyParams(nbcManifestInfo.String(), map[string]string{ + "odh-notebook-controller-image": "RELATED_IMAGE_ODH_NOTEBOOK_CONTROLLER_IMAGE", + }); err != nil { + return fmt.Errorf("failed to update params.env from %s : %w", nbcManifestInfo.String(), err) + } + + kfNbcManifestInfo := kfNotebookControllerManifestInfo(kfNotebookControllerManifestSourcePath) + if err := odhdeploy.ApplyParams(kfNbcManifestInfo.String(), map[string]string{ + "odh-kf-notebook-controller-image": "RELATED_IMAGE_ODH_KF_NOTEBOOK_CONTROLLER_IMAGE", + }); err != nil { + return fmt.Errorf("failed to update params.env from %s : %w", kfNbcManifestInfo.String(), err) + } + + return nil +} + +func (s *componentHandler) UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error { + c, ok := obj.(*componentApi.Workbenches) + if !ok { + return errors.New("failed to convert to Workbenches") + } + + dsc.Status.InstalledComponents[LegacyComponentName] = false + dsc.Status.Components.Workbenches.ManagementSpec.ManagementState = s.GetManagementState(dsc) + dsc.Status.Components.Workbenches.WorkbenchesCommonStatus = nil + + nc := conditionsv1.Condition{ + Type: ReadyConditionType, + Status: corev1.ConditionFalse, + Reason: "Unknown", + Message: "Not Available", + } + + switch s.GetManagementState(dsc) { + case operatorv1.Managed: + dsc.Status.InstalledComponents[LegacyComponentName] = true + dsc.Status.Components.Workbenches.WorkbenchesCommonStatus = c.Status.WorkbenchesCommonStatus.DeepCopy() + + if rc := meta.FindStatusCondition(c.Status.Conditions, status.ConditionTypeReady); rc != nil { + nc.Status = corev1.ConditionStatus(rc.Status) + nc.Reason = rc.Reason + nc.Message = rc.Message + } + + case operatorv1.Removed: + nc.Status = corev1.ConditionFalse + nc.Reason = string(operatorv1.Removed) + nc.Message = "Component ManagementState is set to " + string(operatorv1.Removed) + + default: + return fmt.Errorf("unknown state %s ", s.GetManagementState(dsc)) + } + + conditionsv1.SetStatusCondition(&dsc.Status.Conditions, nc) + + return nil +} diff --git a/controllers/components/workbenches/workbenches_controller.go b/controllers/components/workbenches/workbenches_controller.go new file mode 100644 index 00000000000..6eb7a099cec --- /dev/null +++ b/controllers/components/workbenches/workbenches_controller.go @@ -0,0 +1,84 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workbenches + +import ( + "context" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/security" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +// NewComponentReconciler creates a ComponentReconciler for the Workbenches API. +func (s *componentHandler) NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor(mgr, &componentApi.Workbenches{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Secret{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&corev1.ServiceAccount{}). + Owns(&corev1.Service{}). + Owns(&admissionregistrationv1.MutatingWebhookConfiguration{}). + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + Watches( + &extv1.CustomResourceDefinition{}, + reconciler.WithEventHandler( + handlers.ToNamed(componentApi.WorkbenchesInstanceName)), + reconciler.WithPredicates( + component.ForLabel(labels.ODH.Component(LegacyComponentName), labels.True)), + ). + WithAction(initialize). + WithAction(devFlags). + WithAction(configureDependencies). + WithAction(security.NewUpdatePodSecurityRoleBindingAction(serviceAccounts)). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + kustomize.WithLabel(labels.ODH.Component(LegacyComponentName), labels.True), + kustomize.WithLabel(labels.K8SCommon.PartOf, LegacyComponentName), + )). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(updatestatus.NewAction()). + // must be the final action + WithAction(gc.NewAction()). + Build(ctx) + + if err != nil { + return err + } + + return nil +} diff --git a/controllers/components/workbenches/workbenches_controller_actions.go b/controllers/components/workbenches/workbenches_controller_actions.go new file mode 100644 index 00000000000..82707874cfb --- /dev/null +++ b/controllers/components/workbenches/workbenches_controller_actions.go @@ -0,0 +1,112 @@ +package workbenches + +import ( + "context" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +func initialize(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Manifests = []odhtypes.ManifestInfo{ + notebookControllerManifestInfo(notebookControllerManifestSourcePath), + kfNotebookControllerManifestInfo(kfNotebookControllerManifestSourcePath), + notebookImagesManifestInfo(notebookImagesManifestSourcePath), + } + + return nil +} + +func devFlags(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + workbenches, ok := rr.Instance.(*componentApi.Workbenches) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.Workbenches)", rr.Instance) + } + + if workbenches.Spec.DevFlags == nil || len(workbenches.Spec.DevFlags.Manifests) == 0 { + return nil + } + + // Download manifests if defined by devflags + // Go through each manifest and set the overlays if defined + // first on odh-notebook-controller and kf-notebook-controller last to notebook-images + nbcSourcePath := notebookControllerManifestSourcePath + kfNbcSourcePath := kfNotebookControllerManifestSourcePath + nbImgsSourcePath := notebookImagesManifestSourcePath + + for _, subcomponent := range workbenches.Spec.DevFlags.Manifests { + if strings.Contains(subcomponent.ContextDir, "components/odh-notebook-controller") { + // Download subcomponent + if err := odhdeploy.DownloadManifests(ctx, notebookControllerContextDir, subcomponent); err != nil { + return err + } + // If overlay is defined, update paths + if subcomponent.SourcePath != "" { + nbcSourcePath = subcomponent.SourcePath + } + } + + if strings.Contains(subcomponent.ContextDir, "components/notebook-controller") { + // Download subcomponent + if err := odhdeploy.DownloadManifests(ctx, kfNotebookControllerContextDir, subcomponent); err != nil { + return err + } + // If overlay is defined, update paths + if subcomponent.SourcePath != "" { + kfNbcSourcePath = subcomponent.SourcePath + } + } + + if strings.Contains(subcomponent.URI, notebooksPath) { + // Download subcomponent + if err := odhdeploy.DownloadManifests(ctx, notebookContextDir, subcomponent); err != nil { + return err + } + // If overlay is defined, update paths + if subcomponent.SourcePath != "" { + nbImgsSourcePath = subcomponent.SourcePath + } + } + } + + rr.Manifests = []odhtypes.ManifestInfo{ + notebookControllerManifestInfo(nbcSourcePath), + kfNotebookControllerManifestInfo(kfNbcSourcePath), + notebookImagesManifestInfo(nbImgsSourcePath), + } + + return nil +} + +func configureDependencies(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + _, ok := rr.Instance.(*componentApi.Workbenches) + if !ok { + return fmt.Errorf("resource instance %v is not a componentApi.Workbenches)", rr.Instance) + } + + platform := rr.Release.Name + if platform == cluster.SelfManagedRhoai || platform == cluster.ManagedRhoai { + // Intentionally leaving the ownership unset for this namespace. + // Specifying this label triggers its deletion when the operator is uninstalled. + if err := rr.AddResources(&corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: cluster.DefaultNotebooksNamespace, + Labels: map[string]string{ + labels.ODH.OwnedNamespace: "true", + }, + }, + }); err != nil { + return fmt.Errorf("failed to add namespace %s to manifests", cluster.DefaultNotebooksNamespace) + } + } + + return nil +} diff --git a/controllers/components/workbenches/workbenches_support.go b/controllers/components/workbenches/workbenches_support.go new file mode 100644 index 00000000000..7e3a33edc70 --- /dev/null +++ b/controllers/components/workbenches/workbenches_support.go @@ -0,0 +1,75 @@ +package workbenches + +import ( + "path" + + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhdeploy "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +const ( + ComponentName = componentApi.WorkbenchesComponentName + + ReadyConditionType = conditionsv1.ConditionType(componentApi.WorkbenchesKind + status.ReadySuffix) + + notebooksPath = "notebooks" + notebookImagesManifestSourcePath = "overlays/additional" + + notebookControllerPath = "odh-notebook-controller" + notebookControllerManifestSourcePath = "base" + + kfNotebookControllerPath = "kf-notebook-controller" + kfNotebookControllerManifestSourcePath = "overlays/openshift" + + nbcServiceAccountName = "notebook-controller-service-account" + + // LegacyComponentName is the name of the component that is assigned to deployments + // via Kustomize. Since a deployment selector is immutable, we can't upgrade existing + // deployment to the new component name, so keep it around till we figure out a solution. + LegacyComponentName = "workbenches" +) + +var ( + notebookControllerContextDir = path.Join(ComponentName, notebookControllerPath) + kfNotebookControllerContextDir = path.Join(ComponentName, kfNotebookControllerPath) + notebookContextDir = path.Join(ComponentName, notebooksPath) + + serviceAccounts = map[cluster.Platform][]string{ + cluster.SelfManagedRhoai: {nbcServiceAccountName}, + cluster.ManagedRhoai: {nbcServiceAccountName}, + cluster.OpenDataHub: {nbcServiceAccountName}, + cluster.Unknown: {nbcServiceAccountName}, + } +) + +// manifests for nbc in ODH and RHOAI + downstream use it for imageparams. +func notebookControllerManifestInfo(sourcePath string) odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: notebookControllerContextDir, + SourcePath: sourcePath, + } +} + +// manifests for ODH nbc + downstream use it for imageparams. +func kfNotebookControllerManifestInfo(sourcePath string) odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: kfNotebookControllerContextDir, + SourcePath: sourcePath, + } +} + +// notebook image manifests. +func notebookImagesManifestInfo(sourcePath string) odhtypes.ManifestInfo { + return odhtypes.ManifestInfo{ + Path: odhdeploy.DefaultManifestPath, + ContextDir: notebookContextDir, + SourcePath: sourcePath, + } +} diff --git a/controllers/datasciencecluster/datasciencecluster_controller.go b/controllers/datasciencecluster/datasciencecluster_controller.go index 1e3bbfb0b67..331deddc526 100644 --- a/controllers/datasciencecluster/datasciencecluster_controller.go +++ b/controllers/datasciencecluster/datasciencecluster_controller.go @@ -19,622 +19,296 @@ package datasciencecluster import ( "context" - "errors" "fmt" + "slices" "strings" - "time" - "github.com/go-logr/logr" - "github.com/hashicorp/go-multierror" - buildv1 "github.com/openshift/api/build/v1" - imagev1 "github.com/openshift/api/image/v1" operatorv1 "github.com/openshift/api/operator/v1" - admissionregistrationv1 "k8s.io/api/admissionregistration/v1" - appsv1 "k8s.io/api/apps/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" - networkingv1 "k8s.io/api/networking/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" - apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/components/datasciencepipelines" - "github.com/opendatahub-io/opendatahub-operator/v2/components/modelregistry" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - annotations "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhClient "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/dependent" ) // DataScienceClusterReconciler reconciles a DataScienceCluster object. type DataScienceClusterReconciler struct { - client.Client + *odhClient.Client Scheme *runtime.Scheme - Log logr.Logger // Recorder to generate events - Recorder record.EventRecorder - DataScienceCluster *DataScienceClusterConfig -} - -// DataScienceClusterConfig passing Spec of DSCI for reconcile DataScienceCluster. -type DataScienceClusterConfig struct { - DSCISpec *dsciv1.DSCInitializationSpec + Recorder record.EventRecorder } const ( finalizerName = "datasciencecluster.opendatahub.io/finalizer" + fieldOwner = "datasciencecluster.opendatahub.io" ) +// TODO: all the logic about the deletion configmap should be moved to another controller +// https://issues.redhat.com/browse/RHOAIENG-16674 + // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. -func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { //nolint:maintidx,gocyclo - r.Log.Info("Reconciling DataScienceCluster resources", "Request.Name", req.Name) +func (r *DataScienceClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := logf.FromContext(ctx).WithName("DataScienceCluster") + log.Info("Reconciling DataScienceCluster resources", "Request.Name", req.Name) + instance := &dscv1.DataScienceCluster{} + err := r.Client.Get(ctx, req.NamespacedName, instance) - // Get information on version and platform - currentOperatorRelease, err := cluster.GetRelease(ctx, r.Client) - if err != nil { - r.Log.Error(err, "failed to get operator release version") + switch { + case k8serr.IsNotFound(err): + return ctrl.Result{}, nil + case err != nil: return ctrl.Result{}, err } - // Set platform - platform := currentOperatorRelease.Name - instances := &dscv1.DataScienceClusterList{} + if controllerutil.RemoveFinalizer(instance, finalizerName) { + if err := r.Client.Update(ctx, instance); err != nil { + return ctrl.Result{}, err + } + } - if err := r.Client.List(ctx, instances); err != nil { - return ctrl.Result{}, err + if !instance.ObjectMeta.DeletionTimestamp.IsZero() { + log.Info("Finalization DataScienceCluster start deleting instance", "name", instance.Name) + return ctrl.Result{}, nil } - if len(instances.Items) == 0 { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. - // For additional cleanup logic use operatorUninstall function. - // Return and don't requeue - if upgrade.HasDeleteConfigMap(ctx, r.Client) { - if uninstallErr := upgrade.OperatorUninstall(ctx, r.Client, platform); uninstallErr != nil { - return ctrl.Result{}, fmt.Errorf("error while operator uninstall: %w", uninstallErr) - } - } + // validate pre-requisites + if err := r.validate(ctx, instance); err != nil { + log.Info(err.Error()) + status.SetCondition(&instance.Status.Conditions, "Degraded", status.ReconcileFailed, err.Error(), corev1.ConditionTrue) + } - return ctrl.Result{}, nil + // deploy components + if err := r.reconcileComponents(ctx, instance); err != nil { + log.Info(err.Error()) + status.SetCondition(&instance.Status.Conditions, "Degraded", status.ReconcileFailed, err.Error(), corev1.ConditionTrue) } - instance := &instances.Items[0] + // keep conditions sorted + slices.SortFunc(instance.Status.Conditions, func(a, b conditionsv1.Condition) int { + return strings.Compare(string(a.Type), string(b.Type)) + }) - allComponents, err := instance.GetComponents() - if err != nil { + err = r.Client.ApplyStatus(ctx, instance, client.FieldOwner(fieldOwner), client.ForceOwnership) + switch { + case err == nil: + return ctrl.Result{}, nil + case k8serr.IsNotFound(err): + return ctrl.Result{}, nil + default: + r.reportError(ctx, err, instance, "failed to update DataScienceCluster status") return ctrl.Result{}, err } +} - // If DSC CR exist and deletion CM exist - // delete DSC CR and let reconcile requeue - // sometimes with finalizer DSC CR won't get deleted, force to remove finalizer here - if upgrade.HasDeleteConfigMap(ctx, r.Client) { - if controllerutil.ContainsFinalizer(instance, finalizerName) { - if controllerutil.RemoveFinalizer(instance, finalizerName) { - if err := r.Update(ctx, instance); err != nil { - r.Log.Info("Error to remove DSC finalizer", "error", err) - return ctrl.Result{}, err - } - r.Log.Info("Removed finalizer for DataScienceCluster", "name", instance.Name, "finalizer", finalizerName) - } - } - if err := r.Client.Delete(ctx, instance, []client.DeleteOption{}...); err != nil { - if !k8serr.IsNotFound(err) { - return reconcile.Result{}, err - } - } - for _, component := range allComponents { - if err := component.Cleanup(ctx, r.Client, instance, r.DataScienceCluster.DSCISpec); err != nil { - return ctrl.Result{}, err - } - } - return reconcile.Result{Requeue: true}, nil +func (r *DataScienceClusterReconciler) validate(ctx context.Context, _ *dscv1.DataScienceCluster) error { + // This case should not happen, since there is a webhook that blocks the creation + // of more than one instance of the DataScienceCluster, however one can create a + // DataScienceCluster instance while the operator is stopped, hence this extra check + + dscInstances := &dscv1.DataScienceClusterList{} + if err := r.Client.List(ctx, dscInstances); err != nil { + return fmt.Errorf("failed to retrieve DataScienceCluster resource: %w", err) + } + + if len(dscInstances.Items) != 1 { + return fmt.Errorf("failed to get a valid DataScienceCluster instance, expected to find 1 instance, found %d", len(dscInstances.Items)) } - // Verify a valid DSCInitialization instance is created dsciInstances := &dsciv1.DSCInitializationList{} - err = r.Client.List(ctx, dsciInstances) + err := r.Client.List(ctx, dsciInstances) if err != nil { - r.Log.Error(err, "Failed to retrieve DSCInitialization resource.", "DSCInitialization Request.Name", req.Name) - r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "Failed to retrieve DSCInitialization instance") - return ctrl.Result{}, err + return fmt.Errorf("failed to retrieve DSCInitialization resource: %w", err) } - // Update phase to error state if DataScienceCluster is created without valid DSCInitialization - switch len(dsciInstances.Items) { // only handle number as 0 or 1, others won't be existed since webhook block creation - case 0: - reason := status.ReconcileFailed - message := "Failed to get a valid DSCInitialization instance, please create a DSCI instance" - r.Log.Info(message) - instance, err = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { - status.SetProgressingCondition(&saved.Status.Conditions, reason, message) - // Patch Degraded with True status - status.SetCondition(&saved.Status.Conditions, "Degraded", reason, message, corev1.ConditionTrue) - saved.Status.Phase = status.PhaseError - }) - if err != nil { - r.reportError(err, instance, "failed to update DataScienceCluster condition") - return ctrl.Result{}, err - } - return ctrl.Result{}, nil - case 1: - dscInitializationSpec := dsciInstances.Items[0].Spec - dscInitializationSpec.DeepCopyInto(r.DataScienceCluster.DSCISpec) + if len(dsciInstances.Items) != 1 { + return fmt.Errorf("failed to get a valid DSCInitialization instance, expected to find 1 instance, found %d", len(dscInstances.Items)) } - if instance.ObjectMeta.DeletionTimestamp.IsZero() { - if !controllerutil.ContainsFinalizer(instance, finalizerName) { - r.Log.Info("Adding finalizer for DataScienceCluster", "name", instance.Name, "finalizer", finalizerName) - controllerutil.AddFinalizer(instance, finalizerName) - if err := r.Update(ctx, instance); err != nil { - return ctrl.Result{}, err - } - } - } else { - r.Log.Info("Finalization DataScienceCluster start deleting instance", "name", instance.Name, "finalizer", finalizerName) - for _, component := range allComponents { - if err := component.Cleanup(ctx, r.Client, instance, r.DataScienceCluster.DSCISpec); err != nil { - return ctrl.Result{}, err - } - } - if controllerutil.ContainsFinalizer(instance, finalizerName) { - controllerutil.RemoveFinalizer(instance, finalizerName) - if err := r.Update(ctx, instance); err != nil { - return ctrl.Result{}, err - } - } - if upgrade.HasDeleteConfigMap(ctx, r.Client) { - // if delete configmap exists, requeue the request to handle operator uninstall - return reconcile.Result{Requeue: true}, nil - } - return ctrl.Result{}, nil - } - // Check preconditions if this is an upgrade - if instance.Status.Phase == status.PhaseReady { - // Check for existence of Argo Workflows if DSP is - if instance.Spec.Components.DataSciencePipelines.ManagementState == operatorv1.Managed { - if err := datasciencepipelines.UnmanagedArgoWorkFlowExists(ctx, r.Client); err != nil { - message := fmt.Sprintf("Failed upgrade: %v ", err.Error()) - - _, err = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { - datasciencepipelines.SetExistingArgoCondition(&saved.Status.Conditions, status.ArgoWorkflowExist, message) - status.SetErrorCondition(&saved.Status.Conditions, status.ArgoWorkflowExist, message) - saved.Status.InstalledComponents[datasciencepipelines.ComponentName] = false - saved.Status.Phase = status.PhaseError - }) - return ctrl.Result{}, err - } - } - } + return nil +} - // Start reconciling - if instance.Status.Conditions == nil { - reason := status.ReconcileInit - message := "Initializing DataScienceCluster resource" - instance, err = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { - status.SetProgressingCondition(&saved.Status.Conditions, reason, message) - saved.Status.Phase = status.PhaseProgressing - saved.Status.Release = currentOperatorRelease - }) +func (r *DataScienceClusterReconciler) reconcileComponents(ctx context.Context, instance *dscv1.DataScienceCluster) error { + log := logf.FromContext(ctx).WithName("DataScienceCluster") + + notReadyComponents := make([]string, 0) + + // all DSC defined components + componentErrors := cr.ForEach(func(component cr.ComponentHandler) error { + ci, err := r.reconcileComponent(ctx, instance, component) if err != nil { - _ = r.reportError(err, instance, fmt.Sprintf("failed to add conditions to status of DataScienceCluster resource name %s", req.Name)) - return ctrl.Result{}, err + return err } - } - - // Initialize error list, instead of returning errors after every component is deployed - var componentErrors *multierror.Error - for _, component := range allComponents { - if instance, err = r.reconcileSubComponent(ctx, instance, platform, component); err != nil { - componentErrors = multierror.Append(componentErrors, err) + if !cr.IsManaged(component, instance) { + return nil } - } - // Process errors for components - if componentErrors != nil { - r.Log.Info("DataScienceCluster Deployment Incomplete.") - instance, err = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { - status.SetCompleteCondition(&saved.Status.Conditions, status.ReconcileCompletedWithComponentErrors, - fmt.Sprintf("DataScienceCluster resource reconciled with component errors: %v", componentErrors)) - saved.Status.Phase = status.PhaseReady - saved.Status.Release = currentOperatorRelease - }) - if err != nil { - r.Log.Error(err, "failed to update DataScienceCluster conditions with incompleted reconciliation") - return ctrl.Result{}, err + if !meta.IsStatusConditionTrue(ci.GetStatus().Conditions, status.ConditionTypeReady) { + notReadyComponents = append(notReadyComponents, component.GetName()) } - r.Recorder.Eventf(instance, corev1.EventTypeNormal, "DataScienceClusterComponentFailures", - "DataScienceCluster instance %s created, but have some failures in component %v", instance.Name, componentErrors) - return ctrl.Result{RequeueAfter: time.Second * 30}, componentErrors - } - // finalize reconciliation - instance, err = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { - status.SetCompleteCondition(&saved.Status.Conditions, status.ReconcileCompleted, "DataScienceCluster resource reconciled successfully") - saved.Status.Phase = status.PhaseReady - saved.Status.Release = currentOperatorRelease + return nil }) - if err != nil { - r.Log.Error(err, "failed to update DataScienceCluster conditions after successfully completed reconciliation") - return ctrl.Result{}, err - } - - r.Log.Info("DataScienceCluster Deployment Completed.") - r.Recorder.Eventf(instance, corev1.EventTypeNormal, "DataScienceClusterCreationSuccessful", - "DataScienceCluster instance %s created and deployed successfully", instance.Name) + // Process errors for components + if componentErrors != nil { + log.Info("DataScienceCluster Deployment Incomplete.") - return ctrl.Result{}, nil -} + status.SetCompleteCondition( + &instance.Status.Conditions, + status.ReconcileCompletedWithComponentErrors, + fmt.Sprintf("DataScienceCluster resource reconciled with component errors: %v", componentErrors), + ) -func (r *DataScienceClusterReconciler) reconcileSubComponent(ctx context.Context, instance *dscv1.DataScienceCluster, - platform cluster.Platform, component components.ComponentInterface, -) (*dscv1.DataScienceCluster, error) { - componentName := component.GetComponentName() + r.Recorder.Eventf(instance, corev1.EventTypeNormal, + "DataScienceClusterComponentFailures", + "DataScienceCluster instance %s created, but have some failures in component %v", instance.Name, componentErrors) + } else { + log.Info("DataScienceCluster Deployment Completed.") + + // finalize reconciliation + status.SetCompleteCondition( + &instance.Status.Conditions, + status.ReconcileCompleted, + "DataScienceCluster resource reconciled successfully", + ) + } - enabled := component.GetManagementState() == operatorv1.Managed - installedComponentValue, isExistStatus := instance.Status.InstalledComponents[componentName] + if len(notReadyComponents) != 0 { + instance.Status.Phase = status.PhaseNotReady - // First set conditions to reflect a component is about to be reconciled - // only set to init condition e.g Unknonw for the very first time when component is not in the list - if !isExistStatus { - message := "Component is disabled" - if enabled { - message = "Component is enabled" - } - instance, err := status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { - status.SetComponentCondition(&saved.Status.Conditions, componentName, status.ReconcileInit, message, corev1.ConditionUnknown) + conditionsv1.SetStatusCondition(&instance.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionType(status.ConditionTypeReady), + Status: corev1.ConditionFalse, + Reason: "NotReady", + Message: fmt.Sprintf("Some components are not ready: %s", strings.Join(notReadyComponents, ",")), }) - if err != nil { - _ = r.reportError(err, instance, "failed to update DataScienceCluster conditions before first time reconciling "+componentName) - // try to continue with reconciliation, as further updates can fix the status - } - } - // Reconcile component - err := component.ReconcileComponent(ctx, r.Client, r.Log, instance, r.DataScienceCluster.DSCISpec, platform, installedComponentValue) - - // TODO: replace this hack with a full refactor of component status in the future + } else { + instance.Status.Phase = status.PhaseReady - if err != nil { - // reconciliation failed: log errors, raise event and update status accordingly - instance = r.reportError(err, instance, "failed to reconcile "+componentName+" on DataScienceCluster") - instance, _ = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { - if enabled { - if strings.Contains(err.Error(), datasciencepipelines.ArgoWorkflowCRD+" CRD already exists") { - datasciencepipelines.SetExistingArgoCondition(&saved.Status.Conditions, status.ArgoWorkflowExist, fmt.Sprintf("Component update failed: %v", err)) - } else { - status.SetComponentCondition(&saved.Status.Conditions, componentName, status.ReconcileFailed, fmt.Sprintf("Component reconciliation failed: %v", err), corev1.ConditionFalse) - } - } else { - status.SetComponentCondition(&saved.Status.Conditions, componentName, status.ReconcileFailed, fmt.Sprintf("Component removal failed: %v", err), corev1.ConditionFalse) - } + conditionsv1.SetStatusCondition(&instance.Status.Conditions, conditionsv1.Condition{ + Type: conditionsv1.ConditionType(status.ConditionTypeReady), + Status: corev1.ConditionTrue, + Reason: "Ready", + Message: "Ready", }) - return instance, err } - // reconciliation succeeded: update status accordingly - instance, err = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dscv1.DataScienceCluster) { - if saved.Status.InstalledComponents == nil { - saved.Status.InstalledComponents = make(map[string]bool) - } - saved.Status.InstalledComponents[componentName] = enabled - switch { - case enabled: - status.SetComponentCondition(&saved.Status.Conditions, componentName, status.ReconcileCompleted, "Component reconciled successfully", corev1.ConditionTrue) - default: - status.RemoveComponentCondition(&saved.Status.Conditions, componentName) - } - // TODO: replace this hack with a full refactor of component status in the future - if mr, isMR := component.(*modelregistry.ModelRegistry); isMR { - if enabled { - saved.Status.Components.ModelRegistry = &status.ModelRegistryStatus{RegistriesNamespace: mr.RegistriesNamespace} - } else { - saved.Status.Components.ModelRegistry = nil - } - } - }) - if err != nil { - instance = r.reportError(err, instance, "failed to update DataScienceCluster status after reconciling "+componentName) + instance.Status.Release = cluster.GetRelease() + instance.Status.ObservedGeneration = instance.Generation - return instance, err + if componentErrors != nil { + return componentErrors } - return instance, nil -} -func (r *DataScienceClusterReconciler) reportError(err error, instance *dscv1.DataScienceCluster, message string) *dscv1.DataScienceCluster { - r.Log.Error(err, message, "instance.Name", instance.Name) - r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DataScienceClusterReconcileError", - "%s for instance %s", message, instance.Name) - return instance + return nil } -var configMapPredicates = predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - // Do not reconcile on prometheus configmap update, since it is handled by DSCI - if e.ObjectNew.GetName() == "prometheus" && e.ObjectNew.GetNamespace() == "redhat-ods-monitoring" { - return false +func (r *DataScienceClusterReconciler) reconcileComponent( + ctx context.Context, + instance *dscv1.DataScienceCluster, + component cr.ComponentHandler, +) (common.PlatformObject, error) { + ms := component.GetManagementState(instance) + componentCR := component.NewCRObject(instance) + + switch ms { + case operatorv1.Managed: + err := ctrl.SetControllerReference(instance, componentCR, r.Scheme) + if err != nil { + return nil, err } - // Do not reconcile on kserver's inferenceservice-config CM updates, for rawdeployment - namespace := e.ObjectNew.GetNamespace() - if e.ObjectNew.GetName() == "inferenceservice-config" && (namespace == "redhat-ods-applications" || namespace == "opendatahub") { //nolint:goconst - return false + err = r.Client.Apply(ctx, componentCR, client.FieldOwner(fieldOwner), client.ForceOwnership) + if err != nil { + return nil, err } - return true - }, -} - -// reduce unnecessary reconcile triggered by odh component's deployment change due to ManagedByODHOperator annotation. -var componentDeploymentPredicates = predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - namespace := e.ObjectNew.GetNamespace() - if namespace == "opendatahub" || namespace == "redhat-ods-applications" { - oldManaged, oldExists := e.ObjectOld.GetAnnotations()[annotations.ManagedByODHOperator] - newManaged := e.ObjectNew.GetAnnotations()[annotations.ManagedByODHOperator] - // only reoncile if annotation from "not exist" to "set to true", or from "non-true" value to "true" - if newManaged == "true" && (!oldExists || oldManaged != "true") { - return true - } - return false + case operatorv1.Removed: + err := r.Client.Delete(ctx, componentCR, client.PropagationPolicy(metav1.DeletePropagationForeground)) + if err != nil && !k8serr.IsNotFound(err) { + return nil, err } - return true - }, -} - -// a workaround for 2.5 due to odh-model-controller serivceaccount keeps updates with label. -var saPredicates = predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - namespace := e.ObjectNew.GetNamespace() - if e.ObjectNew.GetName() == "odh-model-controller" && (namespace == "redhat-ods-applications" || namespace == "opendatahub") { - return false - } - return true - }, -} - -// a workaround for 2.5 due to modelmesh-servingruntime.serving.kserve.io keeps updates. -var modelMeshwebhookPredicates = predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - return e.ObjectNew.GetName() != "modelmesh-servingruntime.serving.kserve.io" - }, -} + default: + return nil, fmt.Errorf("unsupported management state: %s", ms) + } -var modelMeshRolePredicates = predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - notAllowedNames := []string{"leader-election-role", "proxy-role", "metrics-reader", "kserve-prometheus-k8s", "odh-model-controller-role"} - for _, notallowedName := range notAllowedNames { - if e.ObjectNew.GetName() == notallowedName { - return false - } - } - return true - }, -} + if instance.Status.InstalledComponents == nil { + instance.Status.InstalledComponents = make(map[string]bool) + } -// a workaround for modelmesh and kserve both create same odh-model-controller NWP. -var networkpolicyPredicates = predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - return e.ObjectNew.GetName() != "odh-model-controller" - }, -} + err := component.UpdateDSCStatus(instance, componentCR) + if err != nil { + return nil, fmt.Errorf("failed to update status of DataScienceCluster component %s: %w", component.GetName(), err) + } -var modelMeshRBPredicates = predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - notAllowedNames := []string{"leader-election-rolebinding", "proxy-rolebinding", "odh-model-controller-rolebinding-opendatahub"} - for _, notallowedName := range notAllowedNames { - if e.ObjectNew.GetName() == notallowedName { - return false - } - } - return true - }, + return componentCR, nil } -// ignore label updates if it is from application namespace. -var modelMeshGeneralPredicates = predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - if strings.Contains(e.ObjectNew.GetName(), "odh-model-controller") || strings.Contains(e.ObjectNew.GetName(), "kserve") { - return false - } - return true - }, +func (r *DataScienceClusterReconciler) reportError(ctx context.Context, err error, instance *dscv1.DataScienceCluster, message string) { + logf.FromContext(ctx).Error(err, message, "instance.Name", instance.Name) + r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DataScienceClusterReconcileError", + "%s for instance %s", message, instance.Name) } // SetupWithManager sets up the controller with the Manager. -func (r *DataScienceClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { +func (r *DataScienceClusterReconciler) SetupWithManager(_ context.Context, mgr ctrl.Manager) error { + componentsPredicate := dependent.New(dependent.WithWatchStatus(true)) + return ctrl.NewControllerManagedBy(mgr). - For(&dscv1.DataScienceCluster{}). - Owns(&corev1.Namespace{}). - Owns(&corev1.Secret{}). - Owns( - &corev1.ConfigMap{}, - builder.WithPredicates(configMapPredicates), - ). - Owns( - &networkingv1.NetworkPolicy{}, - builder.WithPredicates(networkpolicyPredicates), - ). - Owns( - &rbacv1.Role{}, - builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, modelMeshRolePredicates))). - Owns( - &rbacv1.RoleBinding{}, - builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, modelMeshRBPredicates))). - Owns( - &rbacv1.ClusterRole{}, - builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, modelMeshRolePredicates))). - Owns( - &rbacv1.ClusterRoleBinding{}, - builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, modelMeshRBPredicates))). - Owns( - &appsv1.Deployment{}, - builder.WithPredicates(componentDeploymentPredicates)). - Owns(&corev1.PersistentVolumeClaim{}). - Owns( - &corev1.Service{}, - builder.WithPredicates(predicate.Or(predicate.GenerationChangedPredicate{}, modelMeshGeneralPredicates))). - Owns(&appsv1.StatefulSet{}). - Owns(&imagev1.ImageStream{}). - Owns(&buildv1.BuildConfig{}). - Owns(&apiregistrationv1.APIService{}). - Owns(&operatorv1.IngressController{}). - Owns(&admissionregistrationv1.MutatingWebhookConfiguration{}). - Owns( - &admissionregistrationv1.ValidatingWebhookConfiguration{}, - builder.WithPredicates(modelMeshwebhookPredicates), - ). - Owns( - &corev1.ServiceAccount{}, - builder.WithPredicates(saPredicates), - ). + For(&dscv1.DataScienceCluster{}, builder.WithPredicates(predicates.DefaultPredicate)). + // components + Owns(&componentApi.Dashboard{}, builder.WithPredicates(componentsPredicate)). + Owns(&componentApi.Workbenches{}, builder.WithPredicates(componentsPredicate)). + Owns(&componentApi.Ray{}, builder.WithPredicates(componentsPredicate)). + Owns(&componentApi.ModelRegistry{}, builder.WithPredicates(componentsPredicate)). + Owns(&componentApi.TrustyAI{}, builder.WithPredicates(componentsPredicate)). + Owns(&componentApi.Kueue{}, builder.WithPredicates(componentsPredicate)). + Owns(&componentApi.CodeFlare{}, builder.WithPredicates(componentsPredicate)). + Owns(&componentApi.TrainingOperator{}, builder.WithPredicates(componentsPredicate)). + Owns(&componentApi.DataSciencePipelines{}, builder.WithPredicates(componentsPredicate)). + Owns(&componentApi.Kserve{}, builder.WithPredicates(componentsPredicate)). + Owns(&componentApi.ModelMeshServing{}, builder.WithPredicates(componentsPredicate)). + Owns(&componentApi.ModelController{}, builder.WithPredicates(componentsPredicate)). + // others Watches( &dsciv1.DSCInitialization{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request { - return r.watchDataScienceClusterForDSCI(ctx, a) - }, - )). - Watches( - &corev1.ConfigMap{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request { - return r.watchDataScienceClusterResources(ctx, a) - }), - builder.WithPredicates(configMapPredicates), - ). - Watches( - &apiextensionsv1.CustomResourceDefinition{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request { - return r.watchDataScienceClusterResources(ctx, a) - }), - builder.WithPredicates(argoWorkflowCRDPredicates), - ). - Watches( - &corev1.Secret{}, - handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request { - return r.watchDefaultIngressSecret(ctx, a) - }), - builder.WithPredicates(defaultIngressCertSecretPredicates)). - // this predicates prevents meaningless reconciliations from being triggered - WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})). + handlers.Fn(r.watchDataScienceClusters)). Complete(r) } -func (r *DataScienceClusterReconciler) watchDataScienceClusterForDSCI(ctx context.Context, a client.Object) []reconcile.Request { - requestName, err := r.getRequestName(ctx) - if err != nil { - return nil - } - // When DSCI CR gets created, trigger reconcile function - if a.GetObjectKind().GroupVersionKind().Kind == "DSCInitialization" || a.GetName() == "default-dsci" { - return []reconcile.Request{{ - NamespacedName: types.NamespacedName{Name: requestName}, - }} - } - return nil -} - -func (r *DataScienceClusterReconciler) watchDataScienceClusterResources(ctx context.Context, a client.Object) []reconcile.Request { - requestName, err := r.getRequestName(ctx) - if err != nil { - return nil - } - - if a.GetObjectKind().GroupVersionKind().Kind == "CustomResourceDefinition" || a.GetName() == "ArgoWorkflowCRD" { - return []reconcile.Request{{ - NamespacedName: types.NamespacedName{Name: requestName}, - }} - } - - // Trigger reconcile function when uninstall configmap is created - operatorNs, err := cluster.GetOperatorNamespace() - if err != nil { - return nil - } - if a.GetNamespace() == operatorNs { - cmLabels := a.GetLabels() - if val, ok := cmLabels[upgrade.DeleteConfigMapLabel]; ok && val == "true" { - return []reconcile.Request{{ - NamespacedName: types.NamespacedName{Name: requestName}, - }} - } - } - return nil -} - -func (r *DataScienceClusterReconciler) getRequestName(ctx context.Context) (string, error) { +func (r *DataScienceClusterReconciler) watchDataScienceClusters(ctx context.Context, _ client.Object) []reconcile.Request { instanceList := &dscv1.DataScienceClusterList{} err := r.Client.List(ctx, instanceList) - if err != nil { - return "", err - } - - switch { - case len(instanceList.Items) == 1: - return instanceList.Items[0].Name, nil - case len(instanceList.Items) == 0: - return "default-dsc", nil - default: - return "", errors.New("multiple DataScienceCluster instances found") - } -} - -// argoWorkflowCRDPredicates filters the delete events to trigger reconcile when Argo Workflow CRD is deleted. -var argoWorkflowCRDPredicates = predicate.Funcs{ - DeleteFunc: func(e event.DeleteEvent) bool { - if e.Object.GetName() == datasciencepipelines.ArgoWorkflowCRD { - labelList := e.Object.GetLabels() - // CRD to be deleted with label "app.opendatahub.io/datasciencepipeline":"true", should not trigger reconcile - if value, exist := labelList[labels.ODH.Component(datasciencepipelines.ComponentName)]; exist && value == "true" { - return false - } - } - // CRD to be deleted either not with label or label value is not "true", should trigger reconcile - return true - }, -} - -func (r *DataScienceClusterReconciler) watchDefaultIngressSecret(ctx context.Context, a client.Object) []reconcile.Request { - requestName, err := r.getRequestName(ctx) - if err != nil { - return nil - } - // When ingress secret gets created/deleted, trigger reconcile function - ingressCtrl, err := cluster.FindAvailableIngressController(ctx, r.Client) if err != nil { return nil } - defaultIngressSecretName := cluster.GetDefaultIngressCertSecretName(ingressCtrl) - if a.GetName() == defaultIngressSecretName && a.GetNamespace() == "openshift-ingress" { - return []reconcile.Request{{ - NamespacedName: types.NamespacedName{Name: requestName}, - }} - } - return nil -} -// defaultIngressCertSecretPredicates filters delete and create events to trigger reconcile when default ingress cert secret is expired -// or created. -var defaultIngressCertSecretPredicates = predicate.Funcs{ - CreateFunc: func(createEvent event.CreateEvent) bool { - return true + requests := make([]reconcile.Request, len(instanceList.Items)) + for i := range instanceList.Items { + requests[i] = reconcile.Request{NamespacedName: types.NamespacedName{Name: instanceList.Items[i].Name}} + } - }, - DeleteFunc: func(e event.DeleteEvent) bool { - return true - }, + return requests } diff --git a/controllers/datasciencecluster/kubebuilder_rbac.go b/controllers/datasciencecluster/kubebuilder_rbac.go index ba4f0a20127..c7595fcce82 100644 --- a/controllers/datasciencecluster/kubebuilder_rbac.go +++ b/controllers/datasciencecluster/kubebuilder_rbac.go @@ -1,80 +1,22 @@ package datasciencecluster -//+kubebuilder:rbac:groups="datasciencecluster.opendatahub.io",resources=datascienceclusters/status,verbs=get;update;patch -//+kubebuilder:rbac:groups="datasciencecluster.opendatahub.io",resources=datascienceclusters/finalizers,verbs=update;patch -//+kubebuilder:rbac:groups="datasciencecluster.opendatahub.io",resources=datascienceclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="datasciencecluster.opendatahub.io",resources=datascienceclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="datasciencecluster.opendatahub.io",resources=datascienceclusters/finalizers,verbs=update;patch +// +kubebuilder:rbac:groups="datasciencecluster.opendatahub.io",resources=datascienceclusters,verbs=get;list;watch;create;update;patch;delete;deletecollection -/* Serverless prerequisite */ -// +kubebuilder:rbac:groups="networking.istio.io",resources=gateways,verbs=* -// +kubebuilder:rbac:groups="operator.knative.dev",resources=knativeservings,verbs=* -// +kubebuilder:rbac:groups="config.openshift.io",resources=ingresses,verbs=get - -/* Service Mesh Integration */ -// +kubebuilder:rbac:groups="maistra.io",resources=servicemeshcontrolplanes,verbs=create;get;list;patch;update;use;watch -// +kubebuilder:rbac:groups="maistra.io",resources=servicemeshmemberrolls,verbs=create;get;list;patch;update;use;watch -// +kubebuilder:rbac:groups="maistra.io",resources=servicemeshmembers,verbs=create;get;list;patch;update;use;watch -// +kubebuilder:rbac:groups="maistra.io",resources=servicemeshmembers/finalizers,verbs=create;get;list;patch;update;use;watch -// +kubebuilder:rbac:groups="networking.istio.io",resources=virtualservices/status,verbs=update;patch;delete;get -// +kubebuilder:rbac:groups="networking.istio.io",resources=virtualservices/finalizers,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups="networking.istio.io",resources=virtualservices,verbs=* -// +kubebuilder:rbac:groups="networking.istio.io",resources=gateways,verbs=* -// +kubebuilder:rbac:groups="networking.istio.io",resources=envoyfilters,verbs=* -// +kubebuilder:rbac:groups="security.istio.io",resources=authorizationpolicies,verbs=* -// +kubebuilder:rbac:groups="authorino.kuadrant.io",resources=authconfigs,verbs=* -// +kubebuilder:rbac:groups="operator.authorino.kuadrant.io",resources=authorinos,verbs=* - -/* This is for DSP */ -//+kubebuilder:rbac:groups="datasciencepipelinesapplications.opendatahub.io",resources=datasciencepipelinesapplications/status,verbs=update;patch;get -//+kubebuilder:rbac:groups="datasciencepipelinesapplications.opendatahub.io",resources=datasciencepipelinesapplications/finalizers,verbs=update;patch;get -//+kubebuilder:rbac:groups="datasciencepipelinesapplications.opendatahub.io",resources=datasciencepipelinesapplications,verbs=create;delete;list;update;watch;patch;get -//+kubebuilder:rbac:groups="image.openshift.io",resources=imagestreamtags,verbs=get -//+kubebuilder:rbac:groups="authentication.k8s.io",resources=tokenreviews,verbs=create;get -//+kubebuilder:rbac:groups="authorization.k8s.io",resources=subjectaccessreviews,verbs=create;get - -/* This is for dashboard */ -// +kubebuilder:rbac:groups="opendatahub.io",resources=odhdashboardconfigs,verbs=create;get;patch;watch;update;delete;list -// +kubebuilder:rbac:groups="console.openshift.io",resources=odhquickstarts,verbs=create;get;patch;list;delete -// +kubebuilder:rbac:groups="dashboard.opendatahub.io",resources=odhdocuments,verbs=create;get;patch;list;delete -// +kubebuilder:rbac:groups="dashboard.opendatahub.io",resources=odhapplications,verbs=create;get;patch;list;delete -// +kubebuilder:rbac:groups="dashboard.opendatahub.io",resources=acceleratorprofiles,verbs=create;get;patch;list;delete +// +kubebuilder:rbac:groups="authentication.k8s.io",resources=tokenreviews,verbs=create;get +// +kubebuilder:rbac:groups="authorization.k8s.io",resources=subjectaccessreviews,verbs=create;get // +kubebuilder:rbac:groups="operators.coreos.com",resources=clusterserviceversions,verbs=get;list;watch;delete;update // +kubebuilder:rbac:groups="operators.coreos.com",resources=customresourcedefinitions,verbs=create;get;patch;delete // +kubebuilder:rbac:groups="operators.coreos.com",resources=subscriptions,verbs=get;list;watch;delete // +kubebuilder:rbac:groups="operators.coreos.com",resources=operatorconditions,verbs=get;list;watch - -/* This is for operator */ // +kubebuilder:rbac:groups="operators.coreos.com",resources=catalogsources,verbs=get;list;watch -// +kubebuilder:rbac:groups="apiextensions.k8s.io",resources=customresourcedefinitions,verbs=get;list;watch - -// +kubebuilder:rbac:groups="user.openshift.io",resources=users,verbs=list;watch;patch;delete;get - -// +kubebuilder:rbac:groups="template.openshift.io",resources=templates,verbs=* +// +kubebuilder:rbac:groups="apiextensions.k8s.io",resources=customresourcedefinitions,verbs=get;list;watch;create;patch;delete;update // +kubebuilder:rbac:groups="snapshot.storage.k8s.io",resources=volumesnapshots,verbs=create;delete;patch;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=trainedmodels/status,verbs=update;patch;delete;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=trainedmodels,verbs=create;delete;list;update;watch;patch;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=servingruntimes/status,verbs=update;patch;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=servingruntimes/finalizers,verbs=create;delete;list;update;watch;patch;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=servingruntimes,verbs=* -// +kubebuilder:rbac:groups="serving.kserve.io",resources=predictors/status,verbs=update;patch;delete;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=predictors/finalizers,verbs=update;patch;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=predictors,verbs=create;delete;list;update;watch;patch;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=inferenceservices/status,verbs=update;patch;delete;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=inferenceservices/finalizers,verbs=create;delete;list;update;watch;patch;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=inferenceservices,verbs=create;delete;list;update;watch;patch;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=inferencegraphs/status,verbs=update;patch;delete;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=inferencegraphs,verbs=create;delete;list;update;watch;patch;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=clusterservingruntimes/status,verbs=update;patch;delete;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=clusterservingruntimes/finalizers,verbs=create;delete;list;update;watch;patch;get -// +kubebuilder:rbac:groups="serving.kserve.io",resources=clusterservingruntimes,verbs=create;delete;list;update;watch;patch;get - -// +kubebuilder:rbac:groups="serving.knative.dev",resources=services/status,verbs=update;patch;delete;get -// +kubebuilder:rbac:groups="serving.knative.dev",resources=services/finalizers,verbs=create;delete;list;watch;update;patch;get -// +kubebuilder:rbac:groups="serving.knative.dev",resources=services,verbs=create;delete;list;watch;update;patch;get - // +kubebuilder:rbac:groups="security.openshift.io",resources=securitycontextconstraints,verbs=*,resourceNames=restricted // +kubebuilder:rbac:groups="security.openshift.io",resources=securitycontextconstraints,verbs=*,resourceNames=anyuid // +kubebuilder:rbac:groups="security.openshift.io",resources=securitycontextconstraints,verbs=* @@ -89,10 +31,6 @@ package datasciencecluster // +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=clusterrolebindings,verbs=* -// +kubebuilder:rbac:groups="ray.io",resources=rayservices,verbs=create;delete;list;watch;update;patch;get -// +kubebuilder:rbac:groups="ray.io",resources=rayjobs,verbs=create;delete;list;update;watch;patch;get -// +kubebuilder:rbac:groups="ray.io",resources=rayclusters,verbs=create;delete;list;patch;get - // +kubebuilder:rbac:groups="apiregistration.k8s.io",resources=apiservices,verbs=create;delete;list;watch;update;patch;get // +kubebuilder:rbac:groups="operator.openshift.io",resources=consoles,verbs=get;list;watch;patch;delete @@ -142,7 +80,6 @@ package datasciencecluster // +kubebuilder:rbac:groups="machine.openshift.io",resources=machineautoscalers,verbs=list;patch;delete;get // +kubebuilder:rbac:groups="integreatly.org",resources=rhmis,verbs=list;watch;patch;delete;get - // +kubebuilder:rbac:groups="image.openshift.io",resources=imagestreams,verbs=patch;create;update;delete;get // +kubebuilder:rbac:groups="image.openshift.io",resources=imagestreams,verbs=create;list;watch;patch;delete;get @@ -180,6 +117,7 @@ package datasciencecluster // +kubebuilder:rbac:groups="core",resources=configmaps,verbs=get;create;update;watch;patch;delete;list // +kubebuilder:rbac:groups="core",resources=clusterversions,verbs=watch;list;get + // +kubebuilder:rbac:groups="config.openshift.io",resources=clusterversions,verbs=watch;list;get // +kubebuilder:rbac:groups="coordination.k8s.io",resources=leases,verbs=get;list;watch;create;update;patch;delete @@ -207,11 +145,8 @@ package datasciencecluster // +kubebuilder:rbac:groups="authorization.openshift.io",resources=clusterroles,verbs=* // +kubebuilder:rbac:groups="authorization.openshift.io",resources=clusterrolebindings,verbs=* -// +kubebuilder:rbac:groups="argoproj.io",resources=workflows,verbs=* - -// +kubebuilder:rbac:groups="apps",resources=statefulsets,verbs=* - // +kubebuilder:rbac:groups="apps",resources=replicasets,verbs=* +// +kubebuilder:rbac:groups="*",resources=replicasets,verbs=* // +kubebuilder:rbac:groups="apps",resources=deployments/finalizers,verbs=* // +kubebuilder:rbac:groups="core",resources=deployments,verbs=* @@ -219,21 +154,134 @@ package datasciencecluster // +kubebuilder:rbac:groups="*",resources=deployments,verbs=* // +kubebuilder:rbac:groups="extensions",resources=deployments,verbs=* -// +kubebuilder:rbac:groups="apiextensions.k8s.io",resources=customresourcedefinitions,verbs=get;list;watch;create;patch;delete - // +kubebuilder:rbac:groups="admissionregistration.k8s.io",resources=validatingwebhookconfigurations,verbs=get;list;watch;create;update;delete;patch // +kubebuilder:rbac:groups="admissionregistration.k8s.io",resources=mutatingwebhookconfigurations,verbs=create;delete;list;update;watch;patch;get -/* This is needed to derterminiate cluster type */ -// +kubebuilder:rbac:groups="addons.managed.openshift.io",resources=addons,verbs=get - // +kubebuilder:rbac:groups="*",resources=statefulsets,verbs=create;update;get;list;watch;patch;delete +// +kubebuilder:rbac:groups="apps",resources=statefulsets,verbs=* -// +kubebuilder:rbac:groups="*",resources=replicasets,verbs=* +/* Only for RHOAI */ +// +kubebuilder:rbac:groups="user.openshift.io",resources=users,verbs=list;watch;patch;delete;get +// +kubebuilder:rbac:groups="user.openshift.io",resources=groups,verbs=get;create;list;watch;patch;delete +// +kubebuilder:rbac:groups="console.openshift.io",resources=consolelinks,verbs=create;get;patch;list;delete;watch + +// Ray +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=rays,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=rays/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=rays/finalizers,verbs=update +// +kubebuilder:rbac:groups="ray.io",resources=rayservices,verbs=create;delete;list;watch;update;patch;get +// +kubebuilder:rbac:groups="ray.io",resources=rayjobs,verbs=create;delete;list;update;watch;patch;get +// +kubebuilder:rbac:groups="ray.io",resources=rayclusters,verbs=create;delete;list;patch;get +// +kubebuilder:rbac:groups="autoscaling",resources=horizontalpodautoscalers,verbs=watch;create;update;delete;list;patch;get +// +kubebuilder:rbac:groups="autoscaling.openshift.io",resources=machinesets,verbs=list;patch;delete;get +// +kubebuilder:rbac:groups="autoscaling.openshift.io",resources=machineautoscalers,verbs=list;patch;delete;get +// +kubebuilder:rbac:groups="batch",resources=jobs/status,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="batch",resources=jobs,verbs=* +// +kubebuilder:rbac:groups="batch",resources=cronjobs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="batch",resources=cronjobs,verbs=create;get;patch + +// Dashboard +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=dashboards,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=dashboards/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=dashboards/finalizers,verbs=create;get;list;patch;update;use;watch +// +kubebuilder:rbac:groups="opendatahub.io",resources=odhdashboardconfigs,verbs=create;get;patch;watch;update;delete;list +// +kubebuilder:rbac:groups="console.openshift.io",resources=odhquickstarts,verbs=create;get;patch;list;delete;watch +// +kubebuilder:rbac:groups="dashboard.opendatahub.io",resources=odhdocuments,verbs=create;get;patch;list;delete;watch +// +kubebuilder:rbac:groups="dashboard.opendatahub.io",resources=odhapplications,verbs=create;get;patch;list;delete;watch +// +kubebuilder:rbac:groups="dashboard.opendatahub.io",resources=acceleratorprofiles,verbs=create;get;patch;list;delete;watch + +// ModelRegistry +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=modelregistries,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=modelregistries/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=modelregistries/finalizers,verbs=update +// +kubebuilder:rbac:groups=modelregistry.opendatahub.io,resources=modelregistries,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=modelregistry.opendatahub.io,resources=modelregistries/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=modelregistry.opendatahub.io,resources=modelregistries/finalizers,verbs=update;get +// +kubebuilder:rbac:groups=maistra.io,resources=servicemeshmembers,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups="*",resources=customresourcedefinitions,verbs=get;list;watch +// Kueue +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=kueues,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=kueues/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=kueues/finalizers,verbs=update +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=prometheusrules,verbs=get;create;patch;delete;deletecollection;list;watch +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=podmonitors,verbs=get;create;delete;update;watch;list;patch -/* Only for RHOAI */ +// CFO +//+kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=codeflares,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=codeflares/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=codeflares/finalizers,verbs=update -// +kubebuilder:rbac:groups="user.openshift.io",resources=groups,verbs=get;create;list;watch;patch;delete -// +kubebuilder:rbac:groups="console.openshift.io",resources=consolelinks,verbs=create;get;patch;delete +// Kserve +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=kserves,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=kserves/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=kserves/finalizers,verbs=update +// +kubebuilder:rbac:groups="serving.kserve.io",resources=trainedmodels/status,verbs=update;patch;delete;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=trainedmodels,verbs=create;delete;list;update;watch;patch;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=servingruntimes/status,verbs=update;patch;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=servingruntimes/finalizers,verbs=create;delete;list;update;watch;patch;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=servingruntimes,verbs=* +// +kubebuilder:rbac:groups="serving.kserve.io",resources=predictors/status,verbs=update;patch;delete;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=predictors/finalizers,verbs=update;patch;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=predictors,verbs=create;delete;list;update;watch;patch;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=inferenceservices/status,verbs=update;patch;delete;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=inferenceservices/finalizers,verbs=create;delete;list;update;watch;patch;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=inferenceservices,verbs=create;delete;list;update;watch;patch;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=inferencegraphs/status,verbs=update;patch;delete;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=inferencegraphs,verbs=create;delete;list;update;watch;patch;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=clusterservingruntimes/status,verbs=update;patch;delete;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=clusterservingruntimes/finalizers,verbs=create;delete;list;update;watch;patch;get +// +kubebuilder:rbac:groups="serving.kserve.io",resources=clusterservingruntimes,verbs=create;delete;list;update;watch;patch;get +// +kubebuilder:rbac:groups="template.openshift.io",resources=templates,verbs=* +// +kubebuilder:rbac:groups="serving.knative.dev",resources=services/status,verbs=update;patch;delete;get +// +kubebuilder:rbac:groups="serving.knative.dev",resources=services/finalizers,verbs=create;delete;list;watch;update;patch;get +// +kubebuilder:rbac:groups="serving.knative.dev",resources=services,verbs=create;delete;list;watch;update;patch;get +/* Serverless prerequisite */ +// +kubebuilder:rbac:groups="networking.istio.io",resources=gateways,verbs=* +// +kubebuilder:rbac:groups="operator.knative.dev",resources=knativeservings,verbs=* +// +kubebuilder:rbac:groups="config.openshift.io",resources=ingresses,verbs=get + +// TODO: WB +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=workbenches,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=workbenches/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=workbenches/finalizers,verbs=update +// +kubebuilder:rbac:groups="image.openshift.io",resources=imagestreamtags,verbs=get +// +kubebuilder:rbac:groups="image.openshift.io",resources=imagestreams,verbs=patch;create;update;delete;get +// +kubebuilder:rbac:groups="image.openshift.io",resources=imagestreams,verbs=create;list;watch;patch;delete;get +// OpenVino still need buildconfig +// +kubebuilder:rbac:groups="build.openshift.io",resources=builds,verbs=create;patch;delete;list;watch;get +// +kubebuilder:rbac:groups="build.openshift.io",resources=buildconfigs/instantiate,verbs=create;patch;delete;get;list;watch +// +kubebuilder:rbac:groups="build.openshift.io",resources=buildconfigs,verbs=list;watch;create;patch;delete;get + +// DataSciencePipelines +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=datasciencepipelines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=datasciencepipelines/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=datasciencepipelines/finalizers,verbs=update +// +kubebuilder:rbac:groups="datasciencepipelinesapplications.opendatahub.io",resources=datasciencepipelinesapplications/status,verbs=update;patch;get +// +kubebuilder:rbac:groups="datasciencepipelinesapplications.opendatahub.io",resources=datasciencepipelinesapplications/finalizers,verbs=update;patch;get +// +kubebuilder:rbac:groups="datasciencepipelinesapplications.opendatahub.io",resources=datasciencepipelinesapplications,verbs=create;delete;list;update;watch;patch;get +// +kubebuilder:rbac:groups="argoproj.io",resources=workflows,verbs=* + +// TrainingOperator +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=trainingoperators,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=trainingoperators/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=trainingoperators/finalizers,verbs=update + +// ModelMeshServing +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=modelmeshservings,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=modelmeshservings/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=modelmeshservings/finalizers,verbs=update + +// TrustyAI +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=trustyais,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=trustyais/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=trustyais/finalizers,verbs=update + +// ModelController +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=modelcontrollers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=modelcontrollers/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=components.platform.opendatahub.io,resources=modelcontrollers/finalizers,verbs=update + +// Auth +// +kubebuilder:rbac:groups=services.platform.opendatahub.io,resources=auths,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=services.platform.opendatahub.io,resources=auths/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=services.platform.opendatahub.io,resources=auths/finalizers,verbs=update diff --git a/controllers/dscinitialization/auth.go b/controllers/dscinitialization/auth.go new file mode 100644 index 00000000000..c5cc01dbe3c --- /dev/null +++ b/controllers/dscinitialization/auth.go @@ -0,0 +1,36 @@ +package dscinitialization + +import ( + "context" + + k8serr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/dashboard" +) + +func (r *DSCInitializationReconciler) createAuth(ctx context.Context) error { + // Create Auth CR singleton + defaultAuth := client.Object(&serviceApi.Auth{ + TypeMeta: metav1.TypeMeta{ + Kind: serviceApi.AuthKind, + APIVersion: serviceApi.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceApi.AuthInstanceName, + }, + Spec: serviceApi.AuthSpec{ + AdminGroups: []string{dashboard.GetAdminGroup()}, + AllowedGroups: []string{"system:authenticated"}, + }, + }, + ) + err := r.Create(ctx, defaultAuth) + if err != nil && !k8serr.IsAlreadyExists(err) { + return err + } + + return nil +} diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index ac331d6cf7a..dc99f32081b 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -22,7 +22,6 @@ import ( "path/filepath" "reflect" - "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" appsv1 "k8s.io/api/apps/v1" @@ -40,14 +39,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + odhClient "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/logger" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/trustedcabundle" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" ) @@ -62,36 +65,26 @@ var managementStateChangeTrustedCA = false // DSCInitializationReconciler reconciles a DSCInitialization object. type DSCInitializationReconciler struct { - client.Client + *odhClient.Client Scheme *runtime.Scheme - Log logr.Logger Recorder record.EventRecorder ApplicationsNamespace string } -// +kubebuilder:rbac:groups="dscinitialization.opendatahub.io",resources=dscinitializations/status,verbs=get;update;patch;delete -// +kubebuilder:rbac:groups="dscinitialization.opendatahub.io",resources=dscinitializations/finalizers,verbs=get;update;patch;delete -// +kubebuilder:rbac:groups="dscinitialization.opendatahub.io",resources=dscinitializations,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups="features.opendatahub.io",resources=featuretrackers,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups="features.opendatahub.io",resources=featuretrackers/status,verbs=get;update;patch;delete -// +kubebuilder:rbac:groups="config.openshift.io",resources=authentications,verbs=get;watch;list - // Reconcile contains controller logic specific to DSCInitialization instance updates. func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { //nolint:funlen,gocyclo,maintidx - r.Log.Info("Reconciling DSCInitialization.", "DSCInitialization Request.Name", req.Name) + log := logf.FromContext(ctx).WithName("DSCInitialization") + log.Info("Reconciling DSCInitialization.", "DSCInitialization Request.Name", req.Name) - currentOperatorRelease, err := cluster.GetRelease(ctx, r.Client) - if err != nil { - r.Log.Error(err, "failed to get operator release version") - return ctrl.Result{}, err - } + currentOperatorRelease := cluster.GetRelease() // Set platform platform := currentOperatorRelease.Name instances := &dsciv1.DSCInitializationList{} if err := r.Client.List(ctx, instances); err != nil { - r.Log.Error(err, "Failed to retrieve DSCInitialization resource.", "DSCInitialization Request.Name", req.Name) + log.Error(err, "Failed to retrieve DSCInitialization resource.", "DSCInitialization Request.Name", req.Name) r.Recorder.Eventf(instances, corev1.EventTypeWarning, "DSCInitializationReconcileError", "Failed to retrieve DSCInitialization instance") + return ctrl.Result{}, err } @@ -103,16 +96,25 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re instance = &instances.Items[0] } + if instance.Spec.DevFlags != nil { + level := instance.Spec.DevFlags.LogLevel + log.V(1).Info("Setting log level", "level", level) + err := logger.SetLevel(level) + if err != nil { + log.Error(err, "Failed to set log level", "level", level) + } + } + if instance.ObjectMeta.DeletionTimestamp.IsZero() { if !controllerutil.ContainsFinalizer(instance, finalizerName) { - r.Log.Info("Adding finalizer for DSCInitialization", "name", instance.Name, "finalizer", finalizerName) + log.Info("Adding finalizer for DSCInitialization", "name", instance.Name, "finalizer", finalizerName) controllerutil.AddFinalizer(instance, finalizerName) if err := r.Update(ctx, instance); err != nil { return ctrl.Result{}, err } } } else { - r.Log.Info("Finalization DSCInitialization start deleting instance", "name", instance.Name, "finalizer", finalizerName) + log.Info("Finalization DSCInitialization start deleting instance", "name", instance.Name, "finalizer", finalizerName) if err := r.removeServiceMesh(ctx, instance); err != nil { return reconcile.Result{}, err } @@ -131,7 +133,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re return nil }) if err != nil { - r.Log.Error(err, "Failed to remove finalizer when deleting DSCInitialization instance") + log.Error(err, "Failed to remove finalizer when deleting DSCInitialization instance") return ctrl.Result{}, err } @@ -142,15 +144,16 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re if instance.Status.Conditions == nil { reason := status.ReconcileInit message := "Initializing DSCInitialization resource" - instance, err = status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dsciv1.DSCInitialization) { + instance, err := status.UpdateWithRetry(ctx, r.Client, instance, func(saved *dsciv1.DSCInitialization) { status.SetProgressingCondition(&saved.Status.Conditions, reason, message) saved.Status.Phase = status.PhaseProgressing saved.Status.Release = currentOperatorRelease }) if err != nil { - r.Log.Error(err, "Failed to add conditions to status of DSCInitialization resource.", "DSCInitialization", req.Namespace, "Request.Name", req.Name) + log.Error(err, "Failed to add conditions to status of DSCInitialization resource.", "DSCInitialization", req.Namespace, "Request.Name", req.Name) r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "%s for instance %s", message, instance.Name) + return reconcile.Result{}, err } } @@ -162,7 +165,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re saved.Status.Release = currentOperatorRelease }) if err != nil { - r.Log.Error(err, "Failed to update release version for DSCInitialization resource.", "DSCInitialization", req.Namespace, "Request.Name", req.Name) + log.Error(err, "Failed to update release version for DSCInitialization resource.", "DSCInitialization", req.Namespace, "Request.Name", req.Name) r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "%s for instance %s", message, instance.Name) return reconcile.Result{}, err @@ -171,14 +174,14 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re // Check namespace is not exist, then create namespace := instance.Spec.ApplicationsNamespace - err = r.createOdhNamespace(ctx, instance, namespace, platform) + err := r.createOdhNamespace(ctx, instance, namespace, platform) if err != nil { // no need to log error as it was already logged in createOdhNamespace return reconcile.Result{}, err } // Check ManagementState to verify if odh-trusted-ca-bundle Configmap should be configured for namespaces - if err := trustedcabundle.ConfigureTrustedCABundle(ctx, r.Client, r.Log, instance, managementStateChangeTrustedCA); err != nil { + if err := trustedcabundle.ConfigureTrustedCABundle(ctx, r.Client, log, instance, managementStateChangeTrustedCA); err != nil { return reconcile.Result{}, err } managementStateChangeTrustedCA = false @@ -186,41 +189,45 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re switch req.Name { case "prometheus": // prometheus configmap if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhoai { - r.Log.Info("Monitoring enabled to restart deployment", "cluster", "Managed Service Mode") + log.Info("Monitoring enabled to restart deployment", "cluster", "Managed Service Mode") err := r.configureManagedMonitoring(ctx, instance, "updates") if err != nil { return reconcile.Result{}, err } } + return ctrl.Result{}, nil case "addon-managed-odh-parameters": if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhoai { - r.Log.Info("Monitoring enabled when notification updated", "cluster", "Managed Service Mode") + log.Info("Monitoring enabled when notification updated", "cluster", "Managed Service Mode") err := r.configureManagedMonitoring(ctx, instance, "updates") if err != nil { return reconcile.Result{}, err } } + return ctrl.Result{}, nil case "backup": // revert back to the original prometheus.yml if instance.Spec.Monitoring.ManagementState == operatorv1.Managed && platform == cluster.ManagedRhoai { - r.Log.Info("Monitoring enabled to restore back", "cluster", "Managed Service Mode") + log.Info("Monitoring enabled to restore back", "cluster", "Managed Service Mode") err := r.configureManagedMonitoring(ctx, instance, "revertbackup") if err != nil { return reconcile.Result{}, err } } + return ctrl.Result{}, nil default: createUsergroup, err := cluster.IsDefaultAuthMethod(ctx, r.Client) if err != nil && !k8serr.IsNotFound(err) { // only keep reconcile if real error but not missing CRD or missing CR return ctrl.Result{}, err } + switch platform { case cluster.SelfManagedRhoai: // Check if user opted for disabling creating user groups if !createUsergroup { - r.Log.Info("DSCI disabled usergroup creation") + log.Info("DSCI disabled usergroup creation") } else { err := r.createUserGroup(ctx, instance, "rhods-admins") if err != nil { @@ -228,7 +235,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } } if instance.Spec.Monitoring.ManagementState == operatorv1.Managed { - r.Log.Info("Monitoring enabled, won't apply changes", "cluster", "Self-Managed RHODS Mode") + log.Info("Monitoring enabled, won't apply changes", "cluster", "Self-Managed RHODS Mode") err = r.configureCommonMonitoring(ctx, instance) if err != nil { return reconcile.Result{}, err @@ -238,12 +245,13 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re osdConfigsPath := filepath.Join(deploy.DefaultManifestPath, "osd-configs") err = deploy.DeployManifestsFromPath(ctx, r.Client, instance, osdConfigsPath, r.ApplicationsNamespace, "osd", true) if err != nil { - r.Log.Error(err, "Failed to apply osd specific configs from manifests", "Manifests path", osdConfigsPath) + log.Error(err, "Failed to apply osd specific configs from manifests", "Manifests path", osdConfigsPath) r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "Failed to apply "+osdConfigsPath) + return reconcile.Result{}, err } if instance.Spec.Monitoring.ManagementState == operatorv1.Managed { - r.Log.Info("Monitoring enabled in initialization stage", "cluster", "Managed Service Mode") + log.Info("Monitoring enabled in initialization stage", "cluster", "Managed Service Mode") err := r.configureManagedMonitoring(ctx, instance, "init") if err != nil { return reconcile.Result{}, err @@ -256,7 +264,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re default: // Check if user opted for disabling creating user groups if !createUsergroup { - r.Log.Info("DSCI disabled usergroup creation") + log.Info("DSCI disabled usergroup creation") } else { err := r.createUserGroup(ctx, instance, "odh-admins") if err != nil { @@ -264,7 +272,7 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } } if instance.Spec.Monitoring.ManagementState == operatorv1.Managed { - r.Log.Info("Monitoring enabled, won't apply changes", "cluster", "ODH Mode") + log.Info("Monitoring enabled, won't apply changes", "cluster", "ODH Mode") } } @@ -273,15 +281,22 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re return reconcile.Result{}, errServiceMesh } + err = r.createAuth(ctx) + if err != nil { + log.Info("failed to create Auth") + return ctrl.Result{}, err + } + // Finish reconciling _, err = status.UpdateWithRetry[*dsciv1.DSCInitialization](ctx, r.Client, instance, func(saved *dsciv1.DSCInitialization) { status.SetCompleteCondition(&saved.Status.Conditions, status.ReconcileCompleted, status.ReconcileCompletedMessage) saved.Status.Phase = status.PhaseReady }) if err != nil { - r.Log.Error(err, "failed to update DSCInitialization status after successfully completed reconciliation") + log.Error(err, "failed to update DSCInitialization status after successfully completed reconciliation") r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "Failed to update DSCInitialization status") } + return ctrl.Result{}, nil } } @@ -348,6 +363,10 @@ func (r *DSCInitializationReconciler) SetupWithManager(ctx context.Context, mgr handler.EnqueueRequestsFromMapFunc(r.watchMonitoringConfigMapResource), builder.WithPredicates(CMContentChangedPredicate), ). + Watches( + &serviceApi.Auth{}, + handler.EnqueueRequestsFromMapFunc(r.watchAuthResource), + ). Complete(r) } @@ -387,23 +406,25 @@ var dsciPredicateStateChangeTrustedCA = predicate.Funcs{ }, } -func (r *DSCInitializationReconciler) watchMonitoringConfigMapResource(_ context.Context, a client.Object) []reconcile.Request { +func (r *DSCInitializationReconciler) watchMonitoringConfigMapResource(ctx context.Context, a client.Object) []reconcile.Request { + log := logf.FromContext(ctx) if a.GetName() == "prometheus" && a.GetNamespace() == "redhat-ods-monitoring" { - r.Log.Info("Found monitoring configmap has updated, start reconcile") + log.Info("Found monitoring configmap has updated, start reconcile") return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: "prometheus", Namespace: "redhat-ods-monitoring"}}} } return nil } -func (r *DSCInitializationReconciler) watchMonitoringSecretResource(_ context.Context, a client.Object) []reconcile.Request { +func (r *DSCInitializationReconciler) watchMonitoringSecretResource(ctx context.Context, a client.Object) []reconcile.Request { + log := logf.FromContext(ctx) operatorNs, err := cluster.GetOperatorNamespace() if err != nil { return nil } if a.GetName() == "addon-managed-odh-parameters" && a.GetNamespace() == operatorNs { - r.Log.Info("Found monitoring secret has updated, start reconcile") + log.Info("Found monitoring secret has updated, start reconcile") return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: "addon-managed-odh-parameters", Namespace: operatorNs}}} } @@ -411,16 +432,34 @@ func (r *DSCInitializationReconciler) watchMonitoringSecretResource(_ context.Co } func (r *DSCInitializationReconciler) watchDSCResource(ctx context.Context) []reconcile.Request { + log := logf.FromContext(ctx) instanceList := &dscv1.DataScienceClusterList{} if err := r.Client.List(ctx, instanceList); err != nil { // do not handle if cannot get list - r.Log.Error(err, "Failed to get DataScienceClusterList") + log.Error(err, "Failed to get DataScienceClusterList") return nil } if len(instanceList.Items) == 0 && !upgrade.HasDeleteConfigMap(ctx, r.Client) { - r.Log.Info("Found no DSC instance in cluster but not in uninstalltion process, reset monitoring stack config") + log.Info("Found no DSC instance in cluster but not in uninstalltion process, reset monitoring stack config") return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: "backup"}}} } return nil } + +func (r *DSCInitializationReconciler) watchAuthResource(ctx context.Context, a client.Object) []reconcile.Request { + log := logf.FromContext(ctx) + instanceList := &serviceApi.AuthList{} + if err := r.Client.List(ctx, instanceList); err != nil { + // do not handle if cannot get list + log.Error(err, "Failed to get AuthList") + return nil + } + if len(instanceList.Items) == 0 { + log.Info("Found no Auth instance in cluster, reconciling to recreate") + + return []reconcile.Request{{NamespacedName: types.NamespacedName{Name: "auth", Namespace: r.ApplicationsNamespace}}} + } + + return nil +} diff --git a/controllers/dscinitialization/dscinitialization_test.go b/controllers/dscinitialization/dscinitialization_test.go index a00e31bed81..4984dc3e626 100644 --- a/controllers/dscinitialization/dscinitialization_test.go +++ b/controllers/dscinitialization/dscinitialization_test.go @@ -12,8 +12,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -21,10 +22,10 @@ import ( const ( workingNamespace = "test-operator-ns" + applicationName = "default-dsci" applicationNamespace = "test-application-ns" usergroupName = "odh-admins" configmapName = "odh-common-config" - applicationName = "default-dsci" monitoringNamespace = "test-monitoring-ns" readyPhase = "Ready" ) @@ -35,7 +36,7 @@ var _ = Describe("DataScienceCluster initialization", func() { BeforeEach(func(ctx context.Context) { // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(ctx, desiredDsci)).Should(Succeed()) foundDsci := &dsciv1.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -129,7 +130,6 @@ var _ = Describe("DataScienceCluster initialization", func() { WithPolling(interval). Should(BeFalse()) }) - }) Context("Monitoring Resource", func() { @@ -138,7 +138,7 @@ var _ = Describe("DataScienceCluster initialization", func() { const applicationName = "default-dsci" It("Should not create monitoring namespace if monitoring is disabled", func(ctx context.Context) { // when - desiredDsci := createDSCI(applicationName, operatorv1.Removed, operatorv1.Managed, monitoringNamespace2) + desiredDsci := createDSCI(operatorv1.Removed, operatorv1.Managed, monitoringNamespace2) Expect(k8sClient.Create(ctx, desiredDsci)).Should(Succeed()) foundDsci := &dsciv1.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -156,7 +156,7 @@ var _ = Describe("DataScienceCluster initialization", func() { }) It("Should create default monitoring namespace if monitoring enabled", func(ctx context.Context) { // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace2) + desiredDsci := createDSCI(operatorv1.Managed, operatorv1.Managed, monitoringNamespace2) Expect(k8sClient.Create(ctx, desiredDsci)).Should(Succeed()) foundDsci := &dsciv1.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -177,8 +177,9 @@ var _ = Describe("DataScienceCluster initialization", func() { Context("Handling existing resources", func() { AfterEach(cleanupResources) + const applicationName = "default-dsci" + It("Should not update rolebinding if it exists", func(ctx context.Context) { - applicationName := envtestutil.AppendRandomNameTo("rolebinding-test") // given desiredRoleBinding := &rbacv1.RoleBinding{ @@ -206,7 +207,7 @@ var _ = Describe("DataScienceCluster initialization", func() { Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(ctx, desiredDsci)).Should(Succeed()) foundDsci := &dsciv1.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -249,7 +250,7 @@ var _ = Describe("DataScienceCluster initialization", func() { Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(ctx, desiredDsci)).Should(Succeed()) foundDsci := &dsciv1.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -288,7 +289,7 @@ var _ = Describe("DataScienceCluster initialization", func() { Should(BeTrue()) // when - desiredDsci := createDSCI(applicationName, operatorv1.Managed, operatorv1.Managed, monitoringNamespace) + desiredDsci := createDSCI(operatorv1.Managed, operatorv1.Managed, monitoringNamespace) Expect(k8sClient.Create(ctx, desiredDsci)).Should(Succeed()) foundDsci := &dsciv1.DSCInitialization{} Eventually(dscInitializationIsReady(applicationName, workingNamespace, foundDsci)). @@ -361,29 +362,31 @@ func namespaceExists(ns string, obj client.Object) func(ctx context.Context) boo } } -func objectExists(ns string, name string, obj client.Object) func(ctx context.Context) bool { +func objectExists(name string, namespace string, obj client.Object) func(ctx context.Context) bool { return func(ctx context.Context) bool { - err := k8sClient.Get(ctx, client.ObjectKey{Name: ns, Namespace: name}, obj) + err := k8sClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, obj) return err == nil } } -func createDSCI(appName string, enableMonitoring operatorv1.ManagementState, enableTrustedCABundle operatorv1.ManagementState, monitoringNS string) *dsciv1.DSCInitialization { +func createDSCI(enableMonitoring operatorv1.ManagementState, enableTrustedCABundle operatorv1.ManagementState, monitoringNS string) *dsciv1.DSCInitialization { return &dsciv1.DSCInitialization{ TypeMeta: metav1.TypeMeta{ Kind: "DSCInitialization", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ - Name: appName, + Name: applicationName, Namespace: workingNamespace, }, Spec: dsciv1.DSCInitializationSpec{ ApplicationsNamespace: applicationNamespace, - Monitoring: dsciv1.Monitoring{ - Namespace: monitoringNS, - ManagementState: enableMonitoring, + Monitoring: serviceApi.DSCMonitoring{ + ManagementSpec: common.ManagementSpec{ManagementState: enableMonitoring}, + MonitoringCommonSpec: serviceApi.MonitoringCommonSpec{ + Namespace: monitoringNS, + }, }, TrustedCABundle: &dsciv1.TrustedCABundleSpec{ ManagementState: enableTrustedCABundle, diff --git a/controllers/dscinitialization/kubebuilder_rbac.go b/controllers/dscinitialization/kubebuilder_rbac.go new file mode 100644 index 00000000000..7f5ac8d662e --- /dev/null +++ b/controllers/dscinitialization/kubebuilder_rbac.go @@ -0,0 +1,48 @@ +package dscinitialization + +// +kubebuilder:rbac:groups="dscinitialization.opendatahub.io",resources=dscinitializations/status,verbs=get;update;patch;delete +// +kubebuilder:rbac:groups="dscinitialization.opendatahub.io",resources=dscinitializations/finalizers,verbs=get;update;patch;delete +// +kubebuilder:rbac:groups="dscinitialization.opendatahub.io",resources=dscinitializations,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups="features.opendatahub.io",resources=featuretrackers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="features.opendatahub.io",resources=featuretrackers/status,verbs=get;update;patch;delete + +/* Auth */ +// +kubebuilder:rbac:groups="config.openshift.io",resources=authentications,verbs=get;watch;list + +/* Service Mesh Integration */ +// +kubebuilder:rbac:groups="maistra.io",resources=servicemeshcontrolplanes,verbs=create;get;list;patch;update;use;watch +// +kubebuilder:rbac:groups="maistra.io",resources=servicemeshmemberrolls,verbs=create;get;list;patch;update;use;watch +// +kubebuilder:rbac:groups="maistra.io",resources=servicemeshmembers,verbs=create;get;list;patch;update;use;watch +// +kubebuilder:rbac:groups="maistra.io",resources=servicemeshmembers/finalizers,verbs=create;get;list;patch;update;use;watch +// +kubebuilder:rbac:groups="networking.istio.io",resources=virtualservices/status,verbs=update;patch;delete;get +// +kubebuilder:rbac:groups="networking.istio.io",resources=virtualservices/finalizers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="networking.istio.io",resources=virtualservices,verbs=* +// +kubebuilder:rbac:groups="networking.istio.io",resources=gateways,verbs=* +// +kubebuilder:rbac:groups="networking.istio.io",resources=envoyfilters,verbs=* +// +kubebuilder:rbac:groups="security.istio.io",resources=authorizationpolicies,verbs=* +// +kubebuilder:rbac:groups="authorino.kuadrant.io",resources=authconfigs,verbs=* +// +kubebuilder:rbac:groups="operator.authorino.kuadrant.io",resources=authorinos,verbs=* + +// TODO: move to monitoring own file +// +kubebuilder:rbac:groups="route.openshift.io",resources=routers/metrics,verbs=get +// +kubebuilder:rbac:groups="route.openshift.io",resources=routers/federate,verbs=get +// +kubebuilder:rbac:groups="image.openshift.io",resources=registry/metrics,verbs=get + +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=servicemonitors,verbs=get;create;delete;update;watch;list;patch;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=podmonitors,verbs=get;create;delete;update;watch;list;patch +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=prometheusrules,verbs=get;create;patch;delete;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=prometheuses,verbs=get;create;patch;delete;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=prometheuses/finalizers,verbs=get;create;patch;delete;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=prometheuses/status,verbs=get;create;patch;delete;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=alertmanagers,verbs=get;create;patch;delete;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=alertmanagers/finalizers,verbs=get;create;patch;delete;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=alertmanagers/status,verbs=get;create;patch;delete;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=alertmanagerconfigs,verbs=get;create;patch;delete;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=thanosrulers,verbs=get;create;patch;delete;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=thanosrulers/finalizers,verbs=get;create;patch;delete;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=thanosrulers/status,verbs=get;create;patch;delete;deletecollection +// +kubebuilder:rbac:groups="monitoring.coreos.com",resources=probes,verbs=get;create;patch;delete;deletecollection + +//+kubebuilder:rbac:groups=services.platform.opendatahub.io,resources=monitorings,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=services.platform.opendatahub.io,resources=monitorings/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=services.platform.opendatahub.io,resources=monitorings/finalizers,verbs=update diff --git a/controllers/dscinitialization/monitoring.go b/controllers/dscinitialization/monitoring.go index 5e362051b98..f3f0df71636 100644 --- a/controllers/dscinitialization/monitoring.go +++ b/controllers/dscinitialization/monitoring.go @@ -15,6 +15,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -22,10 +23,6 @@ import ( "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" ) -// +kubebuilder:rbac:groups="route.openshift.io",resources=routers/metrics,verbs=get -// +kubebuilder:rbac:groups="route.openshift.io",resources=routers/federate,verbs=get -// +kubebuilder:rbac:groups="image.openshift.io",resources=registry/metrics,verbs=get - var ( ComponentName = "monitoring" alertManagerPath = filepath.Join(deploy.DefaultManifestPath, ComponentName, "alertmanager") @@ -39,6 +36,7 @@ var ( // only when reconcile on DSCI CR, initial set to true // if reconcile from monitoring, initial set to false, skip blackbox and rolebinding. func (r *DSCInitializationReconciler) configureManagedMonitoring(ctx context.Context, dscInit *dsciv1.DSCInitialization, initial string) error { + log := logf.FromContext(ctx) if initial == "init" { // configure Blackbox exporter if err := configureBlackboxExporter(ctx, dscInit, r); err != nil { @@ -63,7 +61,7 @@ func (r *DSCInitializationReconciler) configureManagedMonitoring(ctx context.Con "(.*)-(.*)trainingoperator(.*).rules": "", }) if err != nil { - r.Log.Error(err, "error to remove previous enabled component rules") + log.Error(err, "error to remove previous enabled component rules") return err } } @@ -85,34 +83,35 @@ func (r *DSCInitializationReconciler) configureManagedMonitoring(ctx context.Con } } - r.Log.Info("Success: finish config managed monitoring stack!") + log.Info("Success: finish config managed monitoring stack!") return nil } func configureAlertManager(ctx context.Context, dsciInit *dsciv1.DSCInitialization, r *DSCInitializationReconciler) error { + log := logf.FromContext(ctx) // Get Deadmansnitch secret deadmansnitchSecret, err := r.waitForManagedSecret(ctx, "redhat-rhods-deadmanssnitch", dsciInit.Spec.Monitoring.Namespace) if err != nil { - r.Log.Error(err, "error getting deadmansnitch secret from namespace "+dsciInit.Spec.Monitoring.Namespace) + log.Error(err, "error getting deadmansnitch secret from namespace "+dsciInit.Spec.Monitoring.Namespace) return err } - // r.Log.Info("Success: got deadmansnitch secret") + // log.Info("Success: got deadmansnitch secret") // Get PagerDuty Secret pagerDutySecret, err := r.waitForManagedSecret(ctx, "redhat-rhods-pagerduty", dsciInit.Spec.Monitoring.Namespace) if err != nil { - r.Log.Error(err, "error getting pagerduty secret from namespace "+dsciInit.Spec.Monitoring.Namespace) + log.Error(err, "error getting pagerduty secret from namespace "+dsciInit.Spec.Monitoring.Namespace) return err } - // r.Log.Info("Success: got pagerduty secret") + // log.Info("Success: got pagerduty secret") // Get Smtp Secret smtpSecret, err := r.waitForManagedSecret(ctx, "redhat-rhods-smtp", dsciInit.Spec.Monitoring.Namespace) if err != nil { - r.Log.Error(err, "error getting smtp secret from namespace "+dsciInit.Spec.Monitoring.Namespace) + log.Error(err, "error getting smtp secret from namespace "+dsciInit.Spec.Monitoring.Namespace) return err } - // r.Log.Info("Success: got smtp secret") + // log.Info("Success: got smtp secret") // Replace variables in alertmanager configmap for the initial time // TODO: Following variables can later be exposed by the API @@ -126,10 +125,10 @@ func configureAlertManager(ctx context.Context, dsciInit *dsciv1.DSCInitializati "": string(smtpSecret.Data["password"]), }) if err != nil { - r.Log.Error(err, "error to inject data to alertmanager-configs.yaml") + log.Error(err, "error to inject data to alertmanager-configs.yaml") return err } - // r.Log.Info("Success: inject alertmanage-configs.yaml") + // log.Info("Success: inject alertmanage-configs.yaml") // special handling for dev-mod consolelinkDomain, err := cluster.GetDomain(ctx, r.Client) @@ -137,33 +136,33 @@ func configureAlertManager(ctx context.Context, dsciInit *dsciv1.DSCInitializati return fmt.Errorf("error getting console route URL : %w", err) } if strings.Contains(consolelinkDomain, "devshift.org") { - r.Log.Info("inject alertmanage-configs.yaml for dev mode1") + log.Info("inject alertmanage-configs.yaml for dev mode1") err = common.ReplaceStringsInFile(filepath.Join(alertManagerPath, "alertmanager-configs.yaml"), map[string]string{ "@devshift.net": "@rhmw.io", }) if err != nil { - r.Log.Error(err, "error to replace data for dev mode1 to alertmanager-configs.yaml") + log.Error(err, "error to replace data for dev mode1 to alertmanager-configs.yaml") return err } } if strings.Contains(consolelinkDomain, "aisrhods") { - r.Log.Info("inject alertmanage-configs.yaml for dev mode2") + log.Info("inject alertmanage-configs.yaml for dev mode2") err = common.ReplaceStringsInFile(filepath.Join(alertManagerPath, "alertmanager-configs.yaml"), map[string]string{ "receiver: PagerDuty": "receiver: alerts-sink", }) if err != nil { - r.Log.Error(err, "error to replace data for dev mode2 to alertmanager-configs.yaml") + log.Error(err, "error to replace data for dev mode2 to alertmanager-configs.yaml") return err } } - // r.Log.Info("Success: inject alertmanage-configs.yaml for dev mode") + // log.Info("Success: inject alertmanage-configs.yaml for dev mode") operatorNs, err := cluster.GetOperatorNamespace() if err != nil { - r.Log.Error(err, "error getting operator namespace for smtp secret") + log.Error(err, "error getting operator namespace for smtp secret") return err } @@ -172,41 +171,42 @@ func configureAlertManager(ctx context.Context, dsciInit *dsciv1.DSCInitializati if err != nil { return fmt.Errorf("error getting smtp receiver email secret: %w", err) } - // r.Log.Info("Success: got smpt email secret") + // log.Info("Success: got smpt email secret") // replace smtpEmailSecret in alertmanager-configs.yaml if err = common.MatchLineInFile(filepath.Join(alertManagerPath, "alertmanager-configs.yaml"), map[string]string{ "- to: ": "- to: " + string(smtpEmailSecret.Data["notification-email"]), }, ); err != nil { - r.Log.Error(err, "error to update with new notification-email") + log.Error(err, "error to update with new notification-email") return err } - // r.Log.Info("Success: update alertmanage-configs.yaml with email") + // log.Info("Success: update alertmanage-configs.yaml with email") err = deploy.DeployManifestsFromPath(ctx, r.Client, dsciInit, alertManagerPath, dsciInit.Spec.Monitoring.Namespace, "alertmanager", true) if err != nil { - r.Log.Error(err, "error to deploy manifests", "path", alertManagerPath) + log.Error(err, "error to deploy manifests", "path", alertManagerPath) return err } - // r.Log.Info("Success: update alertmanager with manifests") + // log.Info("Success: update alertmanager with manifests") // Create alertmanager-proxy secret if err := createMonitoringProxySecret(ctx, r.Client, "alertmanager-proxy", dsciInit); err != nil { - r.Log.Error(err, "error to create secret alertmanager-proxy") + log.Error(err, "error to create secret alertmanager-proxy") return err } - // r.Log.Info("Success: create alertmanager-proxy secret") + // log.Info("Success: create alertmanager-proxy secret") return nil } func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization, r *DSCInitializationReconciler) error { + log := logf.FromContext(ctx) // Update rolebinding-viewer err := common.ReplaceStringsInFile(filepath.Join(prometheusManifestsPath, "prometheus-rolebinding-viewer.yaml"), map[string]string{ "": dsciInit.Spec.Monitoring.Namespace, }) if err != nil { - r.Log.Error(err, "error to inject data to prometheus-rolebinding-viewer.yaml") + log.Error(err, "error to inject data to prometheus-rolebinding-viewer.yaml") return err } // Update prometheus-config for dashboard, dsp and workbench @@ -221,7 +221,7 @@ func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization "": consolelinkDomain, }) if err != nil { - r.Log.Error(err, "error to inject data to prometheus-configs.yaml") + log.Error(err, "error to inject data to prometheus-configs.yaml") return err } @@ -234,10 +234,10 @@ func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization dsciInit.Spec.Monitoring.Namespace, "prometheus", dsciInit.Spec.Monitoring.ManagementState == operatorv1.Managed); err != nil { - r.Log.Error(err, "error to deploy manifests for prometheus configs", "path", prometheusConfigPath) + log.Error(err, "error to deploy manifests for prometheus configs", "path", prometheusConfigPath) return err } - // r.Log.Info("Success: create prometheus configmap 'prometheus'") + // log.Info("Success: create prometheus configmap 'prometheus'") // Get prometheus configmap prometheusConfigMap := &corev1.ConfigMap{} @@ -246,18 +246,18 @@ func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization Name: "prometheus", }, prometheusConfigMap) if err != nil { - r.Log.Error(err, "error to get configmap 'prometheus'") + log.Error(err, "error to get configmap 'prometheus'") return err } - // r.Log.Info("Success: got prometheus configmap") + // log.Info("Success: got prometheus configmap") // Get encoded prometheus data from configmap 'prometheus' prometheusData, err := common.GetMonitoringData(fmt.Sprint(prometheusConfigMap.Data)) if err != nil { - r.Log.Error(err, "error to get prometheus data") + log.Error(err, "error to get prometheus data") return err } - // r.Log.Info("Success: read encoded prometheus data from prometheus.yml in configmap") + // log.Info("Success: read encoded prometheus data from prometheus.yml in configmap") // Get alertmanager host alertmanagerRoute := &routev1.Route{} @@ -266,10 +266,10 @@ func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization Name: "alertmanager", }, alertmanagerRoute) if err != nil { - r.Log.Error(err, "error to get alertmanager route") + log.Error(err, "error to get alertmanager route") return err } - // r.Log.Info("Success: got alertmanager route") + // log.Info("Success: got alertmanager route") // Get alertmanager configmap alertManagerConfigMap := &corev1.ConfigMap{} @@ -278,17 +278,17 @@ func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization Name: "alertmanager", }, alertManagerConfigMap) if err != nil { - r.Log.Error(err, "error to get configmap 'alertmanager'") + log.Error(err, "error to get configmap 'alertmanager'") return err } - // r.Log.Info("Success: got configmap 'alertmanager'") + // log.Info("Success: got configmap 'alertmanager'") alertmanagerData, err := common.GetMonitoringData(alertManagerConfigMap.Data["alertmanager.yml"]) if err != nil { - r.Log.Error(err, "error to get encoded alertmanager data from alertmanager.yml") + log.Error(err, "error to get encoded alertmanager data from alertmanager.yml") return err } - // r.Log.Info("Success: read alertmanager data from alertmanage.yml") + // log.Info("Success: read alertmanager data from alertmanage.yml") // Update prometheus deployment with alertmanager and prometheus data err = common.ReplaceStringsInFile(filepath.Join(prometheusManifestsPath, "prometheus-deployment.yaml"), @@ -296,20 +296,20 @@ func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization "": alertmanagerRoute.Spec.Host, }) if err != nil { - r.Log.Error(err, "error to inject set_alertmanager_host to prometheus-deployment.yaml") + log.Error(err, "error to inject set_alertmanager_host to prometheus-deployment.yaml") return err } - // r.Log.Info("Success: update set_alertmanager_host in prometheus-deployment.yaml") + // log.Info("Success: update set_alertmanager_host in prometheus-deployment.yaml") err = common.MatchLineInFile(filepath.Join(prometheusManifestsPath, "prometheus-deployment.yaml"), map[string]string{ "alertmanager: ": "alertmanager: " + alertmanagerData, "prometheus: ": "prometheus: " + prometheusData, }) if err != nil { - r.Log.Error(err, "error to update annotations in prometheus-deployment.yaml") + log.Error(err, "error to update annotations in prometheus-deployment.yaml") return err } - // r.Log.Info("Success: update annotations in prometheus-deployment.yaml") + // log.Info("Success: update annotations in prometheus-deployment.yaml") // final apply prometheus manifests including prometheus deployment // Check if Prometheus deployment from legacy version exists(check for initContainer) @@ -334,7 +334,7 @@ func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization err = deploy.DeployManifestsFromPath(ctx, r.Client, dsciInit, prometheusManifestsPath, dsciInit.Spec.Monitoring.Namespace, "prometheus", true) if err != nil { - r.Log.Error(err, "error to deploy manifests for prometheus", "path", prometheusManifestsPath) + log.Error(err, "error to deploy manifests for prometheus", "path", prometheusManifestsPath) return err } @@ -342,11 +342,12 @@ func configurePrometheus(ctx context.Context, dsciInit *dsciv1.DSCInitialization if err := createMonitoringProxySecret(ctx, r.Client, "prometheus-proxy", dsciInit); err != nil { return err } - // r.Log.Info("Success: create prometheus-proxy secret") + // log.Info("Success: create prometheus-proxy secret") return nil } func configureBlackboxExporter(ctx context.Context, dsciInit *dsciv1.DSCInitialization, r *DSCInitializationReconciler) error { + log := logf.FromContext(ctx) consoleRoute := &routev1.Route{} err := r.Client.Get(ctx, client.ObjectKey{Name: "console", Namespace: "openshift-console"}, consoleRoute) if err != nil { @@ -382,7 +383,7 @@ func configureBlackboxExporter(ctx context.Context, dsciInit *dsciv1.DSCInitiali dsciInit.Spec.Monitoring.Namespace, "blackbox-exporter", dsciInit.Spec.Monitoring.ManagementState == operatorv1.Managed); err != nil { - r.Log.Error(err, "error to deploy manifests: %w", "error", err) + log.Error(err, "error to deploy manifests: %w", "error", err) return err } } else { @@ -392,7 +393,7 @@ func configureBlackboxExporter(ctx context.Context, dsciInit *dsciv1.DSCInitiali dsciInit.Spec.Monitoring.Namespace, "blackbox-exporter", dsciInit.Spec.Monitoring.ManagementState == operatorv1.Managed); err != nil { - r.Log.Error(err, "error to deploy manifests: %w", "error", err) + log.Error(err, "error to deploy manifests: %w", "error", err) return err } } @@ -416,7 +417,7 @@ func createMonitoringProxySecret(ctx context.Context, cli client.Client, name st } foundProxySecret := &corev1.Secret{} - err = cli.Get(ctx, client.ObjectKey{Name: name, Namespace: dsciInit.Spec.Monitoring.Namespace}, foundProxySecret) + err = cli.Get(ctx, client.ObjectKeyFromObject(desiredProxySecret), foundProxySecret) if err != nil { if k8serr.IsNotFound(err) { // Set Controller reference @@ -436,6 +437,7 @@ func createMonitoringProxySecret(ctx context.Context, cli client.Client, name st } func (r *DSCInitializationReconciler) configureSegmentIO(ctx context.Context, dsciInit *dsciv1.DSCInitialization) error { + log := logf.FromContext(ctx) // create segment.io only when configmap does not exist in the cluster segmentioConfigMap := &corev1.ConfigMap{} if err := r.Client.Get(ctx, client.ObjectKey{ @@ -443,7 +445,7 @@ func (r *DSCInitializationReconciler) configureSegmentIO(ctx context.Context, ds Name: "odh-segment-key-config", }, segmentioConfigMap); err != nil { if !k8serr.IsNotFound(err) { - r.Log.Error(err, "error to get configmap 'odh-segment-key-config'") + log.Error(err, "error to get configmap 'odh-segment-key-config'") return err } else { segmentPath := filepath.Join(deploy.DefaultManifestPath, "monitoring", "segment") @@ -455,7 +457,7 @@ func (r *DSCInitializationReconciler) configureSegmentIO(ctx context.Context, ds dsciInit.Spec.ApplicationsNamespace, "segment-io", dsciInit.Spec.Monitoring.ManagementState == operatorv1.Managed); err != nil { - r.Log.Error(err, "error to deploy manifests under "+segmentPath) + log.Error(err, "error to deploy manifests under "+segmentPath) return err } } @@ -464,6 +466,7 @@ func (r *DSCInitializationReconciler) configureSegmentIO(ctx context.Context, ds } func (r *DSCInitializationReconciler) configureCommonMonitoring(ctx context.Context, dsciInit *dsciv1.DSCInitialization) error { + log := logf.FromContext(ctx) if err := r.configureSegmentIO(ctx, dsciInit); err != nil { return err } @@ -475,7 +478,7 @@ func (r *DSCInitializationReconciler) configureCommonMonitoring(ctx context.Cont "": dsciInit.Spec.Monitoring.Namespace, }) if err != nil { - r.Log.Error(err, "error to inject namespace to common monitoring") + log.Error(err, "error to inject namespace to common monitoring") return err } @@ -488,7 +491,7 @@ func (r *DSCInitializationReconciler) configureCommonMonitoring(ctx context.Cont "", "monitoring-base", dsciInit.Spec.Monitoring.ManagementState == operatorv1.Managed); err != nil { - r.Log.Error(err, "error to deploy manifests under "+monitoringBasePath) + log.Error(err, "error to deploy manifests under "+monitoringBasePath) return err } return nil diff --git a/controllers/dscinitialization/servicemesh_setup.go b/controllers/dscinitialization/servicemesh_setup.go index 4e45b96a0ca..ed3e5a9424b 100644 --- a/controllers/dscinitialization/servicemesh_setup.go +++ b/controllers/dscinitialization/servicemesh_setup.go @@ -9,6 +9,7 @@ import ( conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" @@ -19,11 +20,12 @@ import ( ) func (r *DSCInitializationReconciler) configureServiceMesh(ctx context.Context, instance *dsciv1.DSCInitialization) error { + log := logf.FromContext(ctx) serviceMeshManagementState := operatorv1.Removed if instance.Spec.ServiceMesh != nil { serviceMeshManagementState = instance.Spec.ServiceMesh.ManagementState } else { - r.Log.Info("ServiceMesh is not configured in DSCI, same as default to 'Removed'") + log.Info("ServiceMesh is not configured in DSCI, same as default to 'Removed'") } switch serviceMeshManagementState { @@ -42,16 +44,16 @@ func (r *DSCInitializationReconciler) configureServiceMesh(ctx context.Context, for _, capability := range capabilities { capabilityErr := capability.Apply(ctx, r.Client) if capabilityErr != nil { - r.Log.Error(capabilityErr, "failed applying service mesh resources") + log.Error(capabilityErr, "failed applying service mesh resources") r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed applying service mesh resources") return capabilityErr } } case operatorv1.Unmanaged: - r.Log.Info("ServiceMesh CR is not configured by the operator, we won't do anything") + log.Info("ServiceMesh CR is not configured by the operator, we won't do anything") case operatorv1.Removed: - r.Log.Info("existing ServiceMesh CR (owned by operator) will be removed") + log.Info("existing ServiceMesh CR (owned by operator) will be removed") if err := r.removeServiceMesh(ctx, instance); err != nil { return err } @@ -61,6 +63,7 @@ func (r *DSCInitializationReconciler) configureServiceMesh(ctx context.Context, } func (r *DSCInitializationReconciler) removeServiceMesh(ctx context.Context, instance *dsciv1.DSCInitialization) error { + log := logf.FromContext(ctx) // on condition of Managed, do not handle Removed when set to Removed it trigger DSCI reconcile to clean up if instance.Spec.ServiceMesh == nil { return nil @@ -80,7 +83,7 @@ func (r *DSCInitializationReconciler) removeServiceMesh(ctx context.Context, ins for _, capability := range capabilities { capabilityErr := capability.Delete(ctx, r.Client) if capabilityErr != nil { - r.Log.Error(capabilityErr, "failed deleting service mesh resources") + log.Error(capabilityErr, "failed deleting service mesh resources") r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed deleting service mesh resources") return capabilityErr diff --git a/controllers/dscinitialization/suite_test.go b/controllers/dscinitialization/suite_test.go index 985618bacf8..eb960d6eb22 100644 --- a/controllers/dscinitialization/suite_test.go +++ b/controllers/dscinitialization/suite_test.go @@ -24,6 +24,7 @@ import ( configv1 "github.com/openshift/api/config/v1" routev1 "github.com/openshift/api/route/v1" + templatev1 "github.com/openshift/api/template/v1" userv1 "github.com/openshift/api/user/v1" ofapi "github.com/operator-framework/api/pkg/operators/v1alpha1" ofapiv2 "github.com/operator-framework/api/pkg/operators/v2" @@ -46,7 +47,9 @@ import ( dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" dscictrl "github.com/opendatahub-io/opendatahub-operator/v2/controllers/dscinitialization" + odhClient "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" . "github.com/onsi/ginkgo/v2" @@ -79,6 +82,7 @@ var testScheme = runtime.NewScheme() var _ = BeforeSuite(func() { // can't use suite's context as the manager should survive the function + //nolint:fatcontext gCtx, gCancel = context.WithCancel(context.Background()) logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) @@ -117,13 +121,19 @@ var _ = BeforeSuite(func() { utilruntime.Must(routev1.Install(testScheme)) utilruntime.Must(userv1.Install(testScheme)) utilruntime.Must(monitoringv1.AddToScheme(testScheme)) + utilruntime.Must(templatev1.Install(testScheme)) utilruntime.Must(configv1.Install(testScheme)) + utilruntime.Must(serviceApi.AddToScheme(testScheme)) // +kubebuilder:scaffold:scheme k8sClient, err = client.New(cfg, client.Options{Scheme: testScheme}) Expect(err).NotTo(HaveOccurred()) Expect(k8sClient).NotTo(BeNil()) + odhClient, err := odhClient.NewFromConfig(cfg, k8sClient) + Expect(err).NotTo(HaveOccurred()) + Expect(odhClient).NotTo(BeNil()) + webhookInstallOptions := &testEnv.WebhookInstallOptions mgr, err := ctrl.NewManager(cfg, ctrl.Options{ Scheme: testScheme, @@ -137,9 +147,8 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) err = (&dscictrl.DSCInitializationReconciler{ - Client: k8sClient, + Client: odhClient, Scheme: testScheme, - Log: ctrl.Log.WithName("controllers").WithName("DSCInitialization"), Recorder: mgr.GetEventRecorderFor("dscinitialization-controller"), }).SetupWithManager(gCtx, mgr) diff --git a/controllers/dscinitialization/utils.go b/controllers/dscinitialization/utils.go index 03d2b9ff539..b434278a1ad 100644 --- a/controllers/dscinitialization/utils.go +++ b/controllers/dscinitialization/utils.go @@ -18,6 +18,7 @@ import ( "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" @@ -37,6 +38,7 @@ var ( // - Network Policies 'opendatahub' that allow traffic between the ODH namespaces // - RoleBinding 'opendatahub'. func (r *DSCInitializationReconciler) createOdhNamespace(ctx context.Context, dscInit *dsciv1.DSCInitialization, name string, platform cluster.Platform) error { + log := logf.FromContext(ctx) // Expected application namespace for the given name desiredNamespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -50,28 +52,28 @@ func (r *DSCInitializationReconciler) createOdhNamespace(ctx context.Context, ds // Create Application Namespace if it doesn't exist foundNamespace := &corev1.Namespace{} - err := r.Get(ctx, client.ObjectKey{Name: name}, foundNamespace) + err := r.Get(ctx, client.ObjectKeyFromObject(desiredNamespace), foundNamespace) if err != nil { if k8serr.IsNotFound(err) { - r.Log.Info("Creating namespace", "name", name) + log.Info("Creating namespace", "name", name) // Set Controller reference // err = ctrl.SetControllerReference(dscInit, desiredNamespace, r.Scheme) // if err != nil { - // r.Log.Error(err, "Unable to add OwnerReference to the Namespace") + // log.Error(err, "Unable to add OwnerReference to the Namespace") // return err // } err = r.Create(ctx, desiredNamespace) if err != nil && !k8serr.IsAlreadyExists(err) { - r.Log.Error(err, "Unable to create namespace", "name", name) + log.Error(err, "Unable to create namespace", "name", name) return err } } else { - r.Log.Error(err, "Unable to fetch namespace", "name", name) + log.Error(err, "Unable to fetch namespace", "name", name) return err } // Patch Application Namespace if it exists } else if dscInit.Spec.Monitoring.ManagementState == operatorv1.Managed { - r.Log.Info("Patching application namespace for Managed cluster", "name", name) + log.Info("Patching application namespace for Managed cluster", "name", name) labelPatch := `{"metadata":{"labels":{"openshift.io/cluster-monitoring":"true","pod-security.kubernetes.io/enforce":"baseline","opendatahub.io/generated-namespace": "true"}}}` err = r.Patch(ctx, foundNamespace, client.RawPatch(types.MergePatchType, []byte(labelPatch))) @@ -86,7 +88,7 @@ func (r *DSCInitializationReconciler) createOdhNamespace(ctx context.Context, ds err := r.Get(ctx, client.ObjectKey{Name: monitoringName}, foundMonitoringNamespace) if err != nil { if k8serr.IsNotFound(err) { - r.Log.Info("Not found monitoring namespace", "name", monitoringName) + log.Info("Not found monitoring namespace", "name", monitoringName) desiredMonitoringNamespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: monitoringName, @@ -99,15 +101,15 @@ func (r *DSCInitializationReconciler) createOdhNamespace(ctx context.Context, ds } err = r.Create(ctx, desiredMonitoringNamespace) if err != nil && !k8serr.IsAlreadyExists(err) { - r.Log.Error(err, "Unable to create namespace", "name", monitoringName) + log.Error(err, "Unable to create namespace", "name", monitoringName) return err } } else { - r.Log.Error(err, "Unable to fetch monitoring namespace", "name", monitoringName) + log.Error(err, "Unable to fetch monitoring namespace", "name", monitoringName) return err } } else { // force to patch monitoring namespace with label for cluster-monitoring - r.Log.Info("Patching monitoring namespace", "name", monitoringName) + log.Info("Patching monitoring namespace", "name", monitoringName) labelPatch := `{"metadata":{"labels":{"openshift.io/cluster-monitoring":"true", "pod-security.kubernetes.io/enforce":"baseline","opendatahub.io/generated-namespace": "true"}}}` err = r.Patch(ctx, foundMonitoringNamespace, client.RawPatch(types.MergePatchType, []byte(labelPatch))) @@ -120,27 +122,28 @@ func (r *DSCInitializationReconciler) createOdhNamespace(ctx context.Context, ds // Create default NetworkPolicy for the namespace err = r.reconcileDefaultNetworkPolicy(ctx, name, dscInit, platform) if err != nil { - r.Log.Error(err, "error reconciling network policy ", "name", name) + log.Error(err, "error reconciling network policy ", "name", name) return err } // Create odh-common-config Configmap for the Namespace err = r.createOdhCommonConfigMap(ctx, name, dscInit) if err != nil { - r.Log.Error(err, "error creating configmap", "name", "odh-common-config") + log.Error(err, "error creating configmap", "name", "odh-common-config") return err } // Create default Rolebinding for the namespace err = r.createDefaultRoleBinding(ctx, name, dscInit) if err != nil { - r.Log.Error(err, "error creating rolebinding", "name", name) + log.Error(err, "error creating rolebinding", "name", name) return err } return nil } func (r *DSCInitializationReconciler) createDefaultRoleBinding(ctx context.Context, name string, dscInit *dsciv1.DSCInitialization) error { + log := logf.FromContext(ctx) // Expected namespace for the given name desiredRoleBinding := &rbacv1.RoleBinding{ TypeMeta: metav1.TypeMeta{ @@ -167,16 +170,13 @@ func (r *DSCInitializationReconciler) createDefaultRoleBinding(ctx context.Conte // Create RoleBinding if doesn't exists foundRoleBinding := &rbacv1.RoleBinding{} - err := r.Client.Get(ctx, client.ObjectKey{ - Name: name, - Namespace: name, - }, foundRoleBinding) + err := r.Client.Get(ctx, client.ObjectKeyFromObject(desiredRoleBinding), foundRoleBinding) if err != nil { if k8serr.IsNotFound(err) { // Set Controller reference err = ctrl.SetControllerReference(dscInit, desiredRoleBinding, r.Scheme) if err != nil { - r.Log.Error(err, "Unable to add OwnerReference to the rolebinding") + log.Error(err, "Unable to add OwnerReference to the rolebinding") return err } err = r.Client.Create(ctx, desiredRoleBinding) @@ -191,29 +191,30 @@ func (r *DSCInitializationReconciler) createDefaultRoleBinding(ctx context.Conte } func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context.Context, name string, dscInit *dsciv1.DSCInitialization, platform cluster.Platform) error { + log := logf.FromContext(ctx) if platform == cluster.ManagedRhoai || platform == cluster.SelfManagedRhoai { // Get operator namepsace operatorNs, err := cluster.GetOperatorNamespace() if err != nil { - r.Log.Error(err, "error getting operator namespace for networkplicy creation") + log.Error(err, "error getting operator namespace for networkplicy creation") return err } // Deploy networkpolicy for operator namespace err = deploy.DeployManifestsFromPath(ctx, r.Client, dscInit, networkpolicyPath+"/operator", operatorNs, "networkpolicy", true) if err != nil { - r.Log.Error(err, "error to set networkpolicy in operator namespace", "path", networkpolicyPath) + log.Error(err, "error to set networkpolicy in operator namespace", "path", networkpolicyPath) return err } // Deploy networkpolicy for monitoring namespace err = deploy.DeployManifestsFromPath(ctx, r.Client, dscInit, networkpolicyPath+"/monitoring", dscInit.Spec.Monitoring.Namespace, "networkpolicy", true) if err != nil { - r.Log.Error(err, "error to set networkpolicy in monitroing namespace", "path", networkpolicyPath) + log.Error(err, "error to set networkpolicy in monitroing namespace", "path", networkpolicyPath) return err } // Deploy networkpolicy for applications namespace err = deploy.DeployManifestsFromPath(ctx, r.Client, dscInit, networkpolicyPath+"/applications", dscInit.Spec.ApplicationsNamespace, "networkpolicy", true) if err != nil { - r.Log.Error(err, "error to set networkpolicy in applications namespace", "path", networkpolicyPath) + log.Error(err, "error to set networkpolicy in applications namespace", "path", networkpolicyPath) return err } } else { // Expected namespace for the given name in ODH @@ -289,16 +290,13 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. // Create NetworkPolicy if it doesn't exist foundNetworkPolicy := &networkingv1.NetworkPolicy{} justCreated := false - err := r.Client.Get(ctx, client.ObjectKey{ - Name: name, - Namespace: name, - }, foundNetworkPolicy) + err := r.Client.Get(ctx, client.ObjectKeyFromObject(desiredNetworkPolicy), foundNetworkPolicy) if err != nil { if k8serr.IsNotFound(err) { // Set Controller reference err = ctrl.SetControllerReference(dscInit, desiredNetworkPolicy, r.Scheme) if err != nil { - r.Log.Error(err, "Unable to add OwnerReference to the Network policy") + log.Error(err, "Unable to add OwnerReference to the Network policy") return err } err = r.Client.Create(ctx, desiredNetworkPolicy) @@ -313,7 +311,7 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. // Reconcile the NetworkPolicy spec if it has been manually modified if !justCreated && !CompareNotebookNetworkPolicies(*desiredNetworkPolicy, *foundNetworkPolicy) { - r.Log.Info("Reconciling Network policy", "name", foundNetworkPolicy.Name) + log.Info("Reconciling Network policy", "name", foundNetworkPolicy.Name) // Retry the update operation when the ingress controller eventually // updates the resource version field err := retry.RetryOnConflict(retry.DefaultRetry, func() error { @@ -330,7 +328,7 @@ func (r *DSCInitializationReconciler) reconcileDefaultNetworkPolicy(ctx context. return r.Update(ctx, foundNetworkPolicy) }) if err != nil { - r.Log.Error(err, "Unable to reconcile the Network Policy") + log.Error(err, "Unable to reconcile the Network Policy") return err } } @@ -378,6 +376,7 @@ func GenerateRandomHex(length int) ([]byte, error) { } func (r *DSCInitializationReconciler) createOdhCommonConfigMap(ctx context.Context, name string, dscInit *dsciv1.DSCInitialization) error { + log := logf.FromContext(ctx) // Expected configmap for the given namespace desiredConfigMap := &corev1.ConfigMap{ TypeMeta: metav1.TypeMeta{ @@ -393,16 +392,13 @@ func (r *DSCInitializationReconciler) createOdhCommonConfigMap(ctx context.Conte // Create Configmap if doesn't exists foundConfigMap := &corev1.ConfigMap{} - err := r.Client.Get(ctx, client.ObjectKey{ - Name: name, - Namespace: name, - }, foundConfigMap) + err := r.Client.Get(ctx, client.ObjectKeyFromObject(desiredConfigMap), foundConfigMap) if err != nil { if k8serr.IsNotFound(err) { // Set Controller reference err = ctrl.SetControllerReference(dscInit, foundConfigMap, r.Scheme) if err != nil { - r.Log.Error(err, "Unable to add OwnerReference to the odh-common-config ConfigMap") + log.Error(err, "Unable to add OwnerReference to the odh-common-config ConfigMap") return err } err = r.Client.Create(ctx, desiredConfigMap) @@ -426,10 +422,7 @@ func (r *DSCInitializationReconciler) createUserGroup(ctx context.Context, dscIn // Otherwise is errors with "error": "Group.user.openshift.io \"odh-admins\" is invalid: users: Invalid value: \"null\": users in body must be of type array: \"null\""} Users: []string{}, } - err := r.Client.Get(ctx, client.ObjectKey{ - Name: userGroup.Name, - Namespace: dscInit.Spec.ApplicationsNamespace, - }, userGroup) + err := r.Client.Get(ctx, client.ObjectKeyFromObject(userGroup), userGroup) if err != nil { if k8serr.IsNotFound(err) { err = r.Client.Create(ctx, userGroup) diff --git a/controllers/secretgenerator/secretgenerator_controller.go b/controllers/secretgenerator/secretgenerator_controller.go index f86f9a243f8..a9fb0235513 100644 --- a/controllers/secretgenerator/secretgenerator_controller.go +++ b/controllers/secretgenerator/secretgenerator_controller.go @@ -23,7 +23,6 @@ import ( "fmt" "time" - "github.com/go-logr/logr" oauthv1 "github.com/openshift/api/oauth/v1" routev1 "github.com/openshift/api/route/v1" corev1 "k8s.io/api/core/v1" @@ -37,9 +36,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + odhClient "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" annotation "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" ) @@ -50,14 +51,13 @@ const ( // SecretGeneratorReconciler holds the controller configuration. type SecretGeneratorReconciler struct { - Client client.Client + *odhClient.Client Scheme *runtime.Scheme - Log logr.Logger } // SetupWithManager sets up the controller with the Manager. -func (r *SecretGeneratorReconciler) SetupWithManager(mgr ctrl.Manager) error { - r.Log.Info("Adding controller for Secret Generation.") +func (r *SecretGeneratorReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + logf.FromContext(ctx).Info("Adding controller for Secret Generation.") // Watch only new secrets with the corresponding annotation predicates := predicate.Funcs{ @@ -144,8 +144,10 @@ func (r *SecretGeneratorReconciler) Reconcile(ctx context.Context, request ctrl. } func (r *SecretGeneratorReconciler) generateSecret(ctx context.Context, foundSecret *corev1.Secret, generatedSecret *corev1.Secret) error { + log := logf.FromContext(ctx).WithName("SecretGenerator") + // Generate secret random value - r.Log.Info("Generating a random value for a secret in a namespace", + log.Info("Generating a random value for a secret in a namespace", "secret", generatedSecret.Name, "namespace", generatedSecret.Namespace) generatedSecret.Labels = foundSecret.Labels @@ -156,7 +158,7 @@ func (r *SecretGeneratorReconciler) generateSecret(ctx context.Context, foundSec secret, err := NewSecretFrom(foundSecret.GetAnnotations()) if err != nil { - r.Log.Error(err, "error creating secret %s in %s", generatedSecret.Name, generatedSecret.Namespace) + log.Error(err, "error creating secret %s in %s", generatedSecret.Name, generatedSecret.Namespace) return err } @@ -169,7 +171,7 @@ func (r *SecretGeneratorReconciler) generateSecret(ctx context.Context, foundSec return err } - r.Log.Info("Done generating secret in namespace", + log.Info("Done generating secret in namespace", "secret", generatedSecret.Name, "namespace", generatedSecret.Namespace) // check if annotation oauth-client-route exists @@ -180,15 +182,15 @@ func (r *SecretGeneratorReconciler) generateSecret(ctx context.Context, foundSec // Get OauthClient Route oauthClientRoute, err := r.getRoute(ctx, secret.OAuthClientRoute, foundSecret.Namespace) if err != nil { - r.Log.Error(err, "Unable to retrieve route from OAuthClient", "route-name", secret.OAuthClientRoute) + log.Error(err, "Unable to retrieve route from OAuthClient", "route-name", secret.OAuthClientRoute) return err } // Generate OAuthClient for the generated secret - r.Log.Info("Generating an OAuthClient CR for route", "route-name", oauthClientRoute.Name) + log.Info("Generating an OAuthClient CR for route", "route-name", oauthClientRoute.Name) err = r.createOAuthClient(ctx, foundSecret.Name, secret.Value, oauthClientRoute.Spec.Host) if err != nil { - r.Log.Error(err, "error creating oauth client resource. Recreate the Secret", "secret-name", + log.Error(err, "error creating oauth client resource. Recreate the Secret", "secret-name", foundSecret.Name) return err @@ -222,6 +224,7 @@ func (r *SecretGeneratorReconciler) getRoute(ctx context.Context, name string, n } func (r *SecretGeneratorReconciler) createOAuthClient(ctx context.Context, name string, secretName string, uri string) error { + log := logf.FromContext(ctx) // Create OAuthClient resource oauthClient := &oauthv1.OAuthClient{ TypeMeta: metav1.TypeMeta{ @@ -239,7 +242,7 @@ func (r *SecretGeneratorReconciler) createOAuthClient(ctx context.Context, name err := r.Client.Create(ctx, oauthClient) if err != nil { if k8serr.IsAlreadyExists(err) { - r.Log.Info("OAuth client resource already exists, patch it", "name", oauthClient.Name) + log.Info("OAuth client resource already exists, patch it", "name", oauthClient.Name) data, err := json.Marshal(oauthClient) if err != nil { return fmt.Errorf("failed to get DataScienceCluster custom resource data: %w", err) diff --git a/controllers/secretgenerator/secretgenerator_controller_test.go b/controllers/secretgenerator/secretgenerator_controller_test.go index 4eac042d7f0..c45343e60c0 100644 --- a/controllers/secretgenerator/secretgenerator_controller_test.go +++ b/controllers/secretgenerator/secretgenerator_controller_test.go @@ -6,36 +6,20 @@ import ( "github.com/onsi/gomega/gstruct" oauthv1 "github.com/openshift/api/oauth/v1" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/secretgenerator" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/fakeclient" . "github.com/onsi/gomega" ) -//nolint:ireturn -func newFakeClient(objs ...client.Object) client.Client { - scheme := runtime.NewScheme() - utilruntime.Must(corev1.AddToScheme(scheme)) - utilruntime.Must(appsv1.AddToScheme(scheme)) - utilruntime.Must(oauthv1.AddToScheme(scheme)) - - return fake.NewClientBuilder(). - WithScheme(scheme). - WithObjects(objs...). - Build() -} - func TestGenerateSecret(t *testing.T) { g := NewWithT(t) ctx := context.Background() @@ -66,13 +50,13 @@ func TestGenerateSecret(t *testing.T) { }, } - cli := newFakeClient(&existingSecret) - + cli, err := fakeclient.New(&existingSecret) r := secretgenerator.SecretGeneratorReconciler{ Client: cli, } + g.Expect(err).ShouldNot(HaveOccurred()) - _, err := r.Reconcile(ctx, reconcile.Request{ + _, err = r.Reconcile(ctx, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: existingSecret.Name, Namespace: existingSecret.Namespace, @@ -132,13 +116,14 @@ func TestExistingSecret(t *testing.T) { }, } - cli := newFakeClient(&existingSecret, &generatedSecret) + cli, err := fakeclient.New(&existingSecret, &generatedSecret) + g.Expect(err).ShouldNot(HaveOccurred()) r := secretgenerator.SecretGeneratorReconciler{ Client: cli, } - _, err := r.Reconcile(ctx, reconcile.Request{ + _, err = r.Reconcile(ctx, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: existingSecret.Name, Namespace: existingSecret.Namespace, @@ -163,13 +148,14 @@ func TestSecretNotFound(t *testing.T) { secretName := "fooo" secretNs := "foooNs" - cli := newFakeClient() + cli, err := fakeclient.New() + g.Expect(err).ShouldNot(HaveOccurred()) r := secretgenerator.SecretGeneratorReconciler{ Client: cli, } - _, err := r.Reconcile(ctx, reconcile.Request{ + _, err = r.Reconcile(ctx, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: secretName, Namespace: secretNs, @@ -216,14 +202,15 @@ func TestDeleteOAuthClientIfSecretNotFound(t *testing.T) { GrantMethod: oauthv1.GrantHandlerAuto, } - cli := newFakeClient(&existingSecret, &existingOauthClient) + cli, err := fakeclient.New(&existingSecret, &existingOauthClient) + g.Expect(err).ShouldNot(HaveOccurred()) r := secretgenerator.SecretGeneratorReconciler{ Client: cli, } // delete secret - err := cli.Delete(ctx, &existingSecret) + err = cli.Delete(ctx, &existingSecret) g.Expect(err).ShouldNot(HaveOccurred()) // ensure the secret is deleted diff --git a/controllers/services/auth/auth_controller.go b/controllers/services/auth/auth_controller.go new file mode 100644 index 00000000000..c1bce1d1140 --- /dev/null +++ b/controllers/services/auth/auth_controller.go @@ -0,0 +1,96 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "context" + "fmt" + + rbacv1 "k8s.io/api/rbac/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + componentsApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/template" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +const ( + odhDashboardConfigCRDName = "odhdashboardconfigs.opendatahub.io" +) + +// NewServiceReconciler creates a ServiceReconciler for the Auth API. +func NewServiceReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor(mgr, &serviceApi.Auth{}). + // operands - owned + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + WatchesGVK(gvk.Dashboard). + WatchesGVK( + gvk.CustomResourceDefinition, + reconciler.WithEventHandler(handlers.ToNamed(serviceApi.AuthInstanceName)), + reconciler.WithPredicates(predicate.NewPredicateFuncs(func(object client.Object) bool { + return object.GetName() == odhDashboardConfigCRDName + }))). + WatchesGVK( + gvk.OdhDashboardConfig, + reconciler.Dynamic(shouldWatchDashboardConfig), + reconciler.WithEventHandler(handlers.ToNamed(serviceApi.AuthInstanceName)), + reconciler.WithPredicates(predicates.DefaultPredicate)). + // actions + WithAction(initialize). + WithAction(template.NewAction( + template.WithCache(), + )). + WithAction(copyGroups). + WithAction(managePermissions). + WithAction(deploy.NewAction( + deploy.WithCache(), + )). + WithAction(setStatus). + Build(ctx) + + if err != nil { + return fmt.Errorf("could not create the auth controller: %w", err) + } + + return nil +} + +func shouldWatchDashboardConfig(ctx context.Context, request *types.ReconciliationRequest) bool { + d := resources.GvkToUnstructured(gvk.Dashboard) + if err := request.Client.Get(ctx, client.ObjectKey{Name: componentsApi.DashboardInstanceName}, d); err != nil { + return false + } + + c := resources.GvkToUnstructured(gvk.CustomResourceDefinition) + if err := request.Client.Get(ctx, client.ObjectKey{Name: odhDashboardConfigCRDName}, c); err != nil { + return false + } + + return true +} diff --git a/controllers/services/auth/auth_controller_actions.go b/controllers/services/auth/auth_controller_actions.go new file mode 100644 index 00000000000..2d860d1c9ce --- /dev/null +++ b/controllers/services/auth/auth_controller_actions.go @@ -0,0 +1,204 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "context" + "errors" + + rbacv1 "k8s.io/api/rbac/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + common "github.com/opendatahub-io/opendatahub-operator/v2/pkg/common" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" +) + +func initialize(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + rr.Templates = []odhtypes.TemplateInfo{ + { + FS: resourcesFS, + Path: AdminGroupRoleTemplate, + }, + { + FS: resourcesFS, + Path: AllowedGroupRoleTemplate, + }, + { + FS: resourcesFS, + Path: AdminGroupClusterRoleTemplate, + }, + } + + return nil +} + +// We only really expect this to copy once, the fields in the dashboardConfig will be immutable +// but there may be edge cases where the dashboardConfig is created or edited later. +// This function can be removed entirely when the dashboard team deprecates +// the fields in question. +func copyGroups(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + ai, ok := rr.Instance.(*serviceApi.Auth) + if !ok { + return errors.New("instance is not of type *services.Auth") + } + + // check for the dashboardConfig kind + crd := &apiextv1.CustomResourceDefinition{} + if err := rr.Client.Get(ctx, client.ObjectKey{Name: "odhdashboardconfigs.opendatahub.io"}, crd); err != nil { + return client.IgnoreNotFound(err) + } + + // Get groups from the dashboardConfig + odhObject := &unstructured.Unstructured{} + odhObject.SetGroupVersionKind(gvk.OdhDashboardConfig) + + err := rr.Client.Get(ctx, client.ObjectKey{ + Name: "odh-dashboard-config", + Namespace: rr.DSCI.Spec.ApplicationsNamespace, + }, odhObject) + // if the kind exists but there is no odh-dashboard-config then return + if err != nil { + return client.IgnoreNotFound(err) + } + foundGroups, found, _ := unstructured.NestedStringMap(odhObject.Object, "spec", "groupsConfig") + if !found { + return errors.New("no groupsConfig found in dashboardConfig") + } + + added := common.AddMissing(&ai.Spec.AdminGroups, foundGroups["adminGroups"]) + added += common.AddMissing(&ai.Spec.AllowedGroups, foundGroups["allowedGroups"]) + + if added == 0 { + return nil + } + + // only update if we found a new group in the list + err = rr.Client.Update(ctx, ai) + if err != nil { + return errors.New("error adding groups to Auth CR") + } + + return nil +} + +func bindRole(ctx context.Context, rr *odhtypes.ReconciliationRequest, groups []string, roleBindingName string, roleName string) error { + groupsToBind := []rbacv1.Subject{} + for _, e := range groups { + // we want to disallow adding system:authenticated to the adminGroups + if roleName == "admingroup-role" && e == "system:authenticated" { + log := logf.FromContext(ctx) + log.Info("system:authenticated cannot be added to adminGroups") + continue + } + rs := rbacv1.Subject{ + Kind: "Group", + APIGroup: "rbac.authorization.k8s.io", + Name: e, + } + groupsToBind = append(groupsToBind, rs) + } + + rb := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleBindingName, + Namespace: rr.DSCI.Spec.ApplicationsNamespace, + }, + Subjects: groupsToBind, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: roleName, + }, + } + err := rr.AddResources(rb) + if err != nil { + return errors.New("error creating RoleBinding for group") + } + + return nil +} + +func bindClusterRole(rr *odhtypes.ReconciliationRequest, groups []string, roleBindingName string, roleName string) error { + groupsToBind := []rbacv1.Subject{} + for _, e := range groups { + rs := rbacv1.Subject{ + Kind: "Group", + APIGroup: "rbac.authorization.k8s.io", + Name: e, + } + groupsToBind = append(groupsToBind, rs) + } + + crb := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleBindingName, + }, + Subjects: groupsToBind, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: roleName, + }, + } + err := rr.AddResources(crb) + if err != nil { + return errors.New("error creating RoleBinding for group") + } + + return nil +} + +func managePermissions(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + ai, ok := rr.Instance.(*serviceApi.Auth) + if !ok { + return errors.New("instance is not of type *services.Auth") + } + + err := bindRole(ctx, rr, ai.Spec.AdminGroups, "admingroup-rolebinding", "admingroup-role") + if err != nil { + return err + } + + err = bindClusterRole(rr, ai.Spec.AdminGroups, "admingroupcluster-rolebinding", "admingroupcluster-role") + if err != nil { + return err + } + + err = bindRole(ctx, rr, ai.Spec.AllowedGroups, "allowedgroup-rolebinding", "allowedgroup-role") + if err != nil { + return err + } + + return nil +} + +func setStatus(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + ai, ok := rr.Instance.(*serviceApi.Auth) + if !ok { + return errors.New("instance is not of type *services.Auth") + } + + ai.Status.Phase = "Ready" + ai.Status.ObservedGeneration = ai.GetObjectMeta().GetGeneration() + return nil +} diff --git a/controllers/services/auth/auth_controller_support.go b/controllers/services/auth/auth_controller_support.go new file mode 100644 index 00000000000..e74a8c45922 --- /dev/null +++ b/controllers/services/auth/auth_controller_support.go @@ -0,0 +1,14 @@ +package auth + +import ( + "embed" +) + +const ( + AdminGroupRoleTemplate = "resources/admingroup-role.tmpl.yaml" + AllowedGroupRoleTemplate = "resources/allowedgroup-role.tmpl.yaml" + AdminGroupClusterRoleTemplate = "resources/admingroup-clusterrole.tmpl.yaml" +) + +//go:embed resources +var resourcesFS embed.FS diff --git a/controllers/services/auth/resources/admingroup-clusterrole.tmpl.yaml b/controllers/services/auth/resources/admingroup-clusterrole.tmpl.yaml new file mode 100644 index 00000000000..36c12758108 --- /dev/null +++ b/controllers/services/auth/resources/admingroup-clusterrole.tmpl.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: admingroupcluster-role +rules: +- apiGroups: + - services.platform.opendatahub.io + resources: + - auths + verbs: + - get + - list + - watch + - patch +- apiGroups: + - services.platform.opendatahub.io + resources: + - auths/status + verbs: + - get diff --git a/controllers/services/auth/resources/admingroup-role.tmpl.yaml b/controllers/services/auth/resources/admingroup-role.tmpl.yaml new file mode 100644 index 00000000000..2d89ae55db4 --- /dev/null +++ b/controllers/services/auth/resources/admingroup-role.tmpl.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: admingroup-role + namespace: {{.DSCI.Spec.ApplicationsNamespace}} +rules: +- apiGroups: + - services.platform.opendatahub.io + resources: + - auths + verbs: + - get + - list + - watch + - patch +- apiGroups: + - services.opendatahub.io + resources: + - auths/status + verbs: + - get diff --git a/controllers/services/auth/resources/allowedgroup-role.tmpl.yaml b/controllers/services/auth/resources/allowedgroup-role.tmpl.yaml new file mode 100644 index 00000000000..d1941c7ed87 --- /dev/null +++ b/controllers/services/auth/resources/allowedgroup-role.tmpl.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: allowedgroup-role + namespace: {{.DSCI.Spec.ApplicationsNamespace}} +rules: +- apiGroups: + - services.platform.opendatahub.io + resources: + - auths + verbs: + - get + - list + - watch +- apiGroups: + - services.opendatahub.io + resources: + - auths/status + verbs: + - get diff --git a/controllers/services/monitoring/monitoring_controller.go b/controllers/services/monitoring/monitoring_controller.go new file mode 100644 index 00000000000..4188e6bdc83 --- /dev/null +++ b/controllers/services/monitoring/monitoring_controller.go @@ -0,0 +1,104 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package monitoring + +import ( + "context" + "fmt" + + routev1 "github.com/openshift/api/route/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + ctrl "sigs.k8s.io/controller-runtime" + + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/reconciler" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +const serviceName = "monitoring" + +// NewServiceReconciler creates a ServiceReconciler for the Monitoring API. +func NewServiceReconciler(ctx context.Context, mgr ctrl.Manager) error { + _, err := reconciler.ReconcilerFor(mgr, &serviceApi.Monitoring{}). + // operands - owned + Owns(&corev1.ConfigMap{}). + Owns(&corev1.Secret{}). + Owns(&rbacv1.ClusterRoleBinding{}). + Owns(&rbacv1.ClusterRole{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&corev1.ServiceAccount{}). + Owns(&corev1.Service{}). + Owns(&corev1.PersistentVolumeClaim{}). + Owns(&monitoringv1.ServiceMonitor{}). + Owns(&monitoringv1.PrometheusRule{}). + // By default, a predicated for changed generation is added by the Owns() + // method, however for deployments, we also need to retrieve status info + // hence we need a dedicated predicate to react to replicas status change + Owns(&appsv1.Deployment{}, reconciler.WithPredicates(resources.NewDeploymentPredicate())). + // operands - openshift + Owns(&routev1.Route{}). + // operands - watched + // + // By default the Watches functions adds: + // - an event handler mapping to a cluster scope resource identified by the + // components.platform.opendatahub.io/part-of annotation + // - a predicate that check for generation change for Delete/Updates events + // for to objects that have the label components.platform.opendatahub.io/part-of + // or services.platform.opendatahub.io/part-of set to the current owner + // + Watches(&extv1.CustomResourceDefinition{}). + // actions + WithAction(initialize). + WithAction(kustomize.NewAction( + kustomize.WithCache(), + // Those are the default labels added by the legacy deploy method + // and should be preserved as the original plugin were affecting + // deployment selectors that are immutable once created, so it won't + // be possible to actually amend the labels in a non-disruptive + // manner. + // + // Additional labels/annotations MUST be added by the deploy action + // so they would affect only objects metadata without side effects + // kustomize.WithLabel(labels.ODH.Component(componentName), "true"), + kustomize.WithLabel(labels.K8SCommon.PartOf, serviceName), + )). + WithAction(deploy.NewAction( + deploy.WithCache(), + deploy.WithFieldOwner(serviceApi.MonitoringInstanceName), + deploy.WithLabel(labels.PlatformPartOf, serviceApi.MonitoringServiceName), + )). + WithAction(updatestatus.NewAction( + updatestatus.WithSelectorLabel(labels.PlatformPartOf, serviceApi.MonitoringServiceName), + )). + WithAction(updateStatus). + Build(ctx) + + if err != nil { + return fmt.Errorf("could not create the monitoring controller: %w", err) + } + + return nil +} diff --git a/controllers/services/monitoring/monitoring_controller_actions.go b/controllers/services/monitoring/monitoring_controller_actions.go new file mode 100644 index 00000000000..b6e6d58502d --- /dev/null +++ b/controllers/services/monitoring/monitoring_controller_actions.go @@ -0,0 +1,48 @@ +package monitoring + +import ( + "context" + "errors" + "fmt" + + routev1 "github.com/openshift/api/route/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" + odhtypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +func initialize(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + return nil +} + +func updateStatus(ctx context.Context, rr *odhtypes.ReconciliationRequest) error { + d, ok := rr.Instance.(*serviceApi.Monitoring) + if !ok { + return errors.New("instance is not of type *services.Monitoring") + } + + // url + rl := routev1.RouteList{} + err := rr.Client.List( + ctx, + &rl, + client.InNamespace(rr.DSCI.Spec.Monitoring.Namespace), + client.MatchingLabels(map[string]string{ + labels.PlatformPartOf: serviceApi.MonitoringServiceName, + }), + ) + + if err != nil { + return fmt.Errorf("failed to list routes: %w", err) + } + + d.Status.URL = "" + if len(rl.Items) == 1 { + d.Status.URL = resources.IngressHost(rl.Items[0]) + } + + return nil +} diff --git a/controllers/services/suite_test.go b/controllers/services/suite_test.go new file mode 100644 index 00000000000..3dffe197bdd --- /dev/null +++ b/controllers/services/suite_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package services_test + +//revive:disable:dot-imports +import ( + "path/filepath" + "testing" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Monitoring Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = serviceApi.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/controllers/setupcontroller/setup_controller.go b/controllers/setupcontroller/setup_controller.go new file mode 100644 index 00000000000..767e7e1b30a --- /dev/null +++ b/controllers/setupcontroller/setup_controller.go @@ -0,0 +1,83 @@ +package setupcontroller + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + odhClient "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" +) + +type SetupControllerReconciler struct { + *odhClient.Client +} + +func (r *SetupControllerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := logf.FromContext(ctx).WithName("SetupController") + log.Info("Reconciling setup controller") + + if !upgrade.HasDeleteConfigMap(ctx, r.Client) { + return ctrl.Result{}, nil + } + + if err := upgrade.OperatorUninstall(ctx, r.Client, cluster.GetRelease().Name); err != nil { + return ctrl.Result{}, fmt.Errorf("operator uninstall failed : %w", err) + } + + return ctrl.Result{}, nil +} + +func (r *SetupControllerReconciler) SetupWithManager(mgr ctrl.Manager) error { + operatorNs, err := cluster.GetOperatorNamespace() + + if err != nil { + return fmt.Errorf("failed to get operator namespace: %w", err) + } + return ctrl.NewControllerManagedBy(mgr). + For(&corev1.ConfigMap{}, builder.WithPredicates(r.filterDeleteConfigMap(operatorNs))). + Complete(r) +} + +func (r *SetupControllerReconciler) filterDeleteConfigMap(operatorNs string) predicate.Funcs { + filter := func(obj client.Object) bool { + cm, ok := obj.(*corev1.ConfigMap) + + if !ok { + return false + } + + if cm.Namespace != operatorNs { + return false + } + + if cm.Labels[upgrade.DeleteConfigMapLabel] != "true" { + return false + } + + return true + } + + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return filter(e.Object) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return filter(e.ObjectNew) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return false + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + } +} diff --git a/controllers/status/status.go b/controllers/status/status.go index 808cfee2f7b..664c8bbfcff 100644 --- a/controllers/status/status.go +++ b/controllers/status/status.go @@ -21,6 +21,10 @@ package status import ( conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" ) // These constants represent the overall Phase as used by .Status.Phase. @@ -63,6 +67,8 @@ const ( // ConditionReconcileComplete represents extra Condition Type, used by .Condition.Type. ConditionReconcileComplete conditionsv1.ConditionType = "ReconcileComplete" + + ConditionTypeReady string = "Ready" ) const ( @@ -83,6 +89,23 @@ const ( ReadySuffix = "Ready" ) +const ( + ServiceMeshNotConfiguredReason = "ServiceMeshNotConfigured" + ServiceMeshNotConfiguredMessage = "ServiceMesh needs to be set to 'Managed' in DSCI CR" + + ServiceMeshOperatorNotInstalledReason = "ServiceMeshOperatorNotInstalled" + ServiceMeshOperatorNotInstalledMessage = "ServiceMesh operator must be installed for this component's configuration" + + ServerlessOperatorNotInstalledReason = "ServerlessOperatorNotInstalled" + ServerlessOperatorNotInstalledMessage = "Serverless operator must be installed for this component's configuration" +) + +const ( + DataSciencePipelinesDoesntOwnArgoCRDReason = "DataSciencePipelinesDoesntOwnArgoCRD" + DataSciencePipelinesDoesntOwnArgoCRDMessage = "Failed upgrade: workflows.argoproj.io CRD already exists but not deployed by this operator " + + "remove existing Argo workflows or set `spec.components.datasciencepipelines.managementState` to Removed to proceed" +) + // SetProgressingCondition sets the ProgressingCondition to True and other conditions to false or // Unknown. Used when we are just starting to reconcile, and there are no existing conditions. func SetProgressingCondition(conditions *[]conditionsv1.Condition, reason string, message string) { @@ -214,3 +237,8 @@ func RemoveComponentCondition(conditions *[]conditionsv1.Condition, component st type ModelRegistryStatus struct { RegistriesNamespace string `json:"registriesNamespace,omitempty"` } + +func SetStatusCondition(obj common.WithStatus, condition metav1.Condition) bool { + s := obj.GetStatus() + return meta.SetStatusCondition(&s.Conditions, condition) +} diff --git a/controllers/webhook/webhook.go b/controllers/webhook/webhook.go index db2388790ad..73ccd9060c5 100644 --- a/controllers/webhook/webhook.go +++ b/controllers/webhook/webhook.go @@ -23,6 +23,7 @@ import ( "fmt" "net/http" + "github.com/go-logr/logr" operatorv1 "github.com/openshift/api/operator/v1" admissionv1 "k8s.io/api/admission/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -30,22 +31,34 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components/modelregistry" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelregistry" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" ) -var log = ctrl.Log.WithName("rhoai-controller-webhook") - //+kubebuilder:webhook:path=/validate-opendatahub-io-v1,mutating=false,failurePolicy=fail,sideEffects=None,groups=datasciencecluster.opendatahub.io;dscinitialization.opendatahub.io,resources=datascienceclusters;dscinitializations,verbs=create;delete,versions=v1,name=operator.opendatahub.io,admissionReviewVersions=v1 //nolint:lll +// TODO: Get rid of platform in name, rename to ValidatingWebhook. type OpenDataHubValidatingWebhook struct { Client client.Client Decoder *admission.Decoder + Name string +} + +// newLogConstructor creates a new logger constructor for a webhook. +// It is based on the root controller-runtime logger witch is set in main.go +// The purpose of it is to remove "admission" from the log name. +func newLogConstructor(name string) func(logr.Logger, *admission.Request) logr.Logger { + return func(_ logr.Logger, req *admission.Request) logr.Logger { + base := ctrl.Log + l := admission.DefaultLogConstructor(base, req) + return l.WithValues("webhook", name) + } } func Init(mgr ctrl.Manager) { @@ -60,7 +73,8 @@ func Init(mgr ctrl.Manager) { func (w *OpenDataHubValidatingWebhook) SetupWithManager(mgr ctrl.Manager) { hookServer := mgr.GetWebhookServer() odhWebhook := &webhook.Admission{ - Handler: w, + Handler: w, + LogConstructor: newLogConstructor(w.Name), } hookServer.Register("/validate-opendatahub-io-v1", odhWebhook) } @@ -90,6 +104,8 @@ func denyCountGtZero(ctx context.Context, cli client.Client, gvk schema.GroupVer } func (w *OpenDataHubValidatingWebhook) checkDupCreation(ctx context.Context, req admission.Request) admission.Response { + log := logf.FromContext(ctx) + switch req.Kind.Kind { case "DataScienceCluster", "DSCInitialization": default: @@ -119,6 +135,9 @@ func (w *OpenDataHubValidatingWebhook) checkDeletion(ctx context.Context, req ad } func (w *OpenDataHubValidatingWebhook) Handle(ctx context.Context, req admission.Request) admission.Response { + log := logf.FromContext(ctx).WithName(w.Name).WithValues("operation", req.Operation) + ctx = logf.IntoContext(ctx, log) + var resp admission.Response resp.Allowed = true // initialize Allowed to be true in case Operation falls into "default" case @@ -130,6 +149,7 @@ func (w *OpenDataHubValidatingWebhook) Handle(ctx context.Context, req admission default: // for other operations by default it is admission.Allowed("") // no-op } + if !resp.Allowed { return resp } @@ -140,19 +160,23 @@ func (w *OpenDataHubValidatingWebhook) Handle(ctx context.Context, req admission //+kubebuilder:webhook:path=/mutate-opendatahub-io-v1,mutating=true,failurePolicy=fail,sideEffects=None,groups=datasciencecluster.opendatahub.io,resources=datascienceclusters,verbs=create;update,versions=v1,name=mutate.operator.opendatahub.io,admissionReviewVersions=v1 //nolint:lll -type DSCDefaulter struct{} +type DSCDefaulter struct { + Name string +} // just assert that DSCDefaulter implements webhook.CustomDefaulter. var _ webhook.CustomDefaulter = &DSCDefaulter{} func (m *DSCDefaulter) SetupWithManager(mgr ctrl.Manager) { mutateWebhook := admission.WithCustomDefaulter(mgr.GetScheme(), &dscv1.DataScienceCluster{}, m) + mutateWebhook.LogConstructor = newLogConstructor(m.Name) mgr.GetWebhookServer().Register("/mutate-opendatahub-io-v1", mutateWebhook) } // Implement admission.CustomDefaulter interface. // It currently only sets defaults for modelregiestry in datascienceclusters. func (m *DSCDefaulter) Default(_ context.Context, obj runtime.Object) error { + // TODO: add debug logging, log := logf.FromContext(ctx).WithName(m.Name) dsc, isDSC := obj.(*dscv1.DataScienceCluster) if !isDSC { return fmt.Errorf("expected DataScienceCluster but got a different type: %T", obj) diff --git a/controllers/webhook/webhook_suite_test.go b/controllers/webhook/webhook_suite_test.go index f697c5fecdf..27c5cd811e6 100644 --- a/controllers/webhook/webhook_suite_test.go +++ b/controllers/webhook/webhook_suite_test.go @@ -40,18 +40,12 @@ import ( ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/components/codeflare" - "github.com/opendatahub-io/opendatahub-operator/v2/components/dashboard" - "github.com/opendatahub-io/opendatahub-operator/v2/components/datasciencepipelines" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" - "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" - "github.com/opendatahub-io/opendatahub-operator/v2/components/modelregistry" - "github.com/opendatahub-io/opendatahub-operator/v2/components/ray" - "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" - "github.com/opendatahub-io/opendatahub-operator/v2/components/workbenches" + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" + modelregistry2 "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelregistry" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/webhook" . "github.com/onsi/ginkgo/v2" @@ -81,6 +75,7 @@ func TestAPIs(t *testing.T) { var _ = BeforeSuite(func() { // can't use suite's context as the manager should survive the function + //nolint:fatcontext gCtx, gCancel = context.WithCancel(context.Background()) logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) @@ -215,7 +210,7 @@ var _ = Describe("DSC mutating webhook", func() { dscInstance := newMRDSC1(nameBase+"-dsc-mr1", "", operatorv1.Managed) Expect(k8sClient.Create(ctx, dscInstance)).Should(Succeed()) Expect(dscInstance.Spec.Components.ModelRegistry.RegistriesNamespace). - Should(Equal(modelregistry.DefaultModelRegistriesNamespace)) + Should(Equal(modelregistry2.DefaultModelRegistriesNamespace)) Expect(clearInstance(ctx, dscInstance)).Should(Succeed()) }) @@ -243,9 +238,11 @@ func newDSCI(appName string) *dsciv1.DSCInitialization { }, Spec: dsciv1.DSCInitializationSpec{ ApplicationsNamespace: namespace, - Monitoring: dsciv1.Monitoring{ - Namespace: monitoringNS, - ManagementState: operatorv1.Managed, + Monitoring: serviceApi.DSCMonitoring{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Managed}, + MonitoringCommonSpec: serviceApi.MonitoringCommonSpec{ + Namespace: monitoringNS, + }, }, TrustedCABundle: &dsciv1.TrustedCABundleSpec{ ManagementState: operatorv1.Managed, @@ -261,48 +258,48 @@ func newDSC(name string, namespace string) *dscv1.DataScienceCluster { }, Spec: dscv1.DataScienceClusterSpec{ Components: dscv1.Components{ - Dashboard: dashboard.Dashboard{ - Component: components.Component{ + Dashboard: componentApi.DSCDashboard{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, - Workbenches: workbenches.Workbenches{ - Component: components.Component{ + Workbenches: componentApi.DSCWorkbenches{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, - ModelMeshServing: modelmeshserving.ModelMeshServing{ - Component: components.Component{ + ModelMeshServing: componentApi.DSCModelMeshServing{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, - DataSciencePipelines: datasciencepipelines.DataSciencePipelines{ - Component: components.Component{ + DataSciencePipelines: componentApi.DSCDataSciencePipelines{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, - Kserve: kserve.Kserve{ - Component: components.Component{ + Kserve: componentApi.DSCKserve{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, - CodeFlare: codeflare.CodeFlare{ - Component: components.Component{ + CodeFlare: componentApi.DSCCodeFlare{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, - Ray: ray.Ray{ - Component: components.Component{ + Ray: componentApi.DSCRay{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, - TrustyAI: trustyai.TrustyAI{ - Component: components.Component{ + TrustyAI: componentApi.DSCTrustyAI{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, - ModelRegistry: modelregistry.ModelRegistry{ - Component: components.Component{ + ModelRegistry: componentApi.DSCModelRegistry{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, @@ -311,7 +308,7 @@ func newDSC(name string, namespace string) *dscv1.DataScienceCluster { } } -func newMRDSC1(name string, mrNamespace string, state operatorv1.ManagementState) *dscv1.DataScienceCluster { +func newMRDSC1(name string, mrNamespace string, _ operatorv1.ManagementState) *dscv1.DataScienceCluster { return &dscv1.DataScienceCluster{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -319,11 +316,13 @@ func newMRDSC1(name string, mrNamespace string, state operatorv1.ManagementState }, Spec: dscv1.DataScienceClusterSpec{ Components: dscv1.Components{ - ModelRegistry: modelregistry.ModelRegistry{ - Component: components.Component{ - ManagementState: state, + ModelRegistry: componentApi.DSCModelRegistry{ + ManagementSpec: common.ManagementSpec{ + ManagementState: operatorv1.Removed, + }, + ModelRegistryCommonSpec: componentApi.ModelRegistryCommonSpec{ + RegistriesNamespace: mrNamespace, }, - RegistriesNamespace: mrNamespace, }, }, }, @@ -338,8 +337,8 @@ func newMRDSC2(name string) *dscv1.DataScienceCluster { }, Spec: dscv1.DataScienceClusterSpec{ Components: dscv1.Components{ - Workbenches: workbenches.Workbenches{ - Component: components.Component{ + Workbenches: componentApi.DSCWorkbenches{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, diff --git a/docs/DESIGN.md b/docs/DESIGN.md index cbc90a534c7..6cde2083687 100644 --- a/docs/DESIGN.md +++ b/docs/DESIGN.md @@ -75,6 +75,8 @@ To deploy ODH components seamlessly, ODH operator will watch two CRDs: managementState: Managed workbenches: managementState: Managed + trustyai: + managementState: Managed ``` 2. Enable only Dashboard and Workbenches(Jupyter Notebooks) @@ -91,3 +93,16 @@ To deploy ODH components seamlessly, ODH operator will watch two CRDs: workbenches: managementState: Managed ``` + +3. Enable Data Science Pipelines + + ```console + apiVersion: datasciencecluster.opendatahub.io/v1 + kind: DataScienceCluster + metadata: + name: example + spec: + components: + datasciencepipelines: + managementState: Managed + ``` diff --git a/docs/api-overview.md b/docs/api-overview.md index f4259c203f8..284a52907c0 100644 --- a/docs/api-overview.md +++ b/docs/api-overview.md @@ -1,86 +1,1461 @@ # API Reference ## Packages +- [components.platform.opendatahub.io/v1alpha1](#componentsplatformopendatahubiov1alpha1) - [datasciencecluster.opendatahub.io/v1](#datascienceclusteropendatahubiov1) - [dscinitialization.opendatahub.io/v1](#dscinitializationopendatahubiov1) +- [services.platform.opendatahub.io/v1alpha1](#servicesplatformopendatahubiov1alpha1) -## datasciencecluster.opendatahub.io/codeflare +## components.platform.opendatahub.io/v1alpha1 + +Package v1 contains API Schema definitions for the components v1 API group + +### Resource Types +- [CodeFlare](#codeflare) +- [CodeFlareList](#codeflarelist) +- [Dashboard](#dashboard) +- [DashboardList](#dashboardlist) +- [DataSciencePipelines](#datasciencepipelines) +- [DataSciencePipelinesList](#datasciencepipelineslist) +- [Kserve](#kserve) +- [KserveList](#kservelist) +- [Kueue](#kueue) +- [KueueList](#kueuelist) +- [ModelController](#modelcontroller) +- [ModelControllerList](#modelcontrollerlist) +- [ModelMeshServing](#modelmeshserving) +- [ModelMeshServingList](#modelmeshservinglist) +- [ModelRegistry](#modelregistry) +- [ModelRegistryList](#modelregistrylist) +- [Ray](#ray) +- [RayList](#raylist) +- [TrainingOperator](#trainingoperator) +- [TrainingOperatorList](#trainingoperatorlist) +- [TrustyAI](#trustyai) +- [TrustyAIList](#trustyailist) +- [Workbenches](#workbenches) +- [WorkbenchesList](#workbencheslist) + + + +#### CodeFlare + + + +CodeFlare is the Schema for the codeflares API + + + +_Appears in:_ +- [CodeFlareList](#codeflarelist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `CodeFlare` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[CodeFlareSpec](#codeflarespec)_ | | | | +| `status` _[CodeFlareStatus](#codeflarestatus)_ | | | | + + +#### CodeFlareCommonSpec + + + + + + + +_Appears in:_ +- [CodeFlareSpec](#codeflarespec) +- [DSCCodeFlare](#dsccodeflare) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### CodeFlareCommonStatus + + + +CodeFlareCommonStatus defines the shared observed state of CodeFlare + + + +_Appears in:_ +- [CodeFlareStatus](#codeflarestatus) +- [DSCCodeFlareStatus](#dsccodeflarestatus) + + + +#### CodeFlareList + + + +CodeFlareList contains a list of CodeFlare + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `CodeFlareList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[CodeFlare](#codeflare) array_ | | | | + + +#### CodeFlareSpec + + + + + + + +_Appears in:_ +- [CodeFlare](#codeflare) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### CodeFlareStatus + + + +CodeFlareStatus defines the observed state of CodeFlare + + + +_Appears in:_ +- [CodeFlare](#codeflare) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | + + +#### DSCCodeFlare + + + + + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DSCCodeFlareStatus + + + +DSCCodeFlareStatus contains the observed state of the CodeFlare exposed in the DSC instance + + + +_Appears in:_ +- [ComponentsStatus](#componentsstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| + + +#### DSCDashboard + + + +DSCDashboard contains all the configuration exposed in DSC instance for Dashboard component + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DSCDashboardStatus + + + +DSCDashboardStatus contains the observed state of the Dashboard exposed in the DSC instance + + + +_Appears in:_ +- [ComponentsStatus](#componentsstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| + + +#### DSCDataSciencePipelines + + + +DSCDataSciencePipelines contains all the configuration exposed in DSC instance for DataSciencePipelines component + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DSCDataSciencePipelinesStatus + + + +DSCDataSciencePipelinesStatus contains the observed state of the DataSciencePipelines exposed in the DSC instance + + + +_Appears in:_ +- [ComponentsStatus](#componentsstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| + + +#### DSCKserve + + + +DSCKserve contains all the configuration exposed in DSC instance for Kserve component + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +| `serving` _[ServingSpec](#servingspec)_ | Serving configures the KNative-Serving stack used for model serving. A Service
Mesh (Istio) is prerequisite, since it is used as networking layer. | | | +| `defaultDeploymentMode` _[DefaultDeploymentMode](#defaultdeploymentmode)_ | Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'.
The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve.
This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. | | Enum: [Serverless RawDeployment]
Pattern: `^(Serverless\|RawDeployment)$`
| +| `nim` _[NimSpec](#nimspec)_ | Configures and enables NVIDIA NIM integration | | | + + +#### DSCKserveStatus + + + +DSCKserveStatus contains the observed state of the Kserve exposed in the DSC instance + + + +_Appears in:_ +- [ComponentsStatus](#componentsstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| + + +#### DSCKueue + + + +DSCKueue contains all the configuration exposed in DSC instance for Kueue component + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DSCKueueStatus + + + +DSCKueueStatus contains the observed state of the Kueue exposed in the DSC instance + + + +_Appears in:_ +- [ComponentsStatus](#componentsstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| + + +#### DSCModelMeshServing + + + +DSCModelMeshServing contains all the configuration exposed in DSC instance for ModelMeshServing component + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DSCModelMeshServingStatus + + + +DSCModelMeshServingStatus contains the observed state of the ModelMeshServing exposed in the DSC instance + + + +_Appears in:_ +- [ComponentsStatus](#componentsstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| + + +#### DSCModelRegistry + + + +DSCModelRegistry contains all the configuration exposed in DSC instance for ModelRegistry component + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +| `registriesNamespace` _string_ | Namespace for model registries to be installed, configurable only once when model registry is enabled, defaults to "odh-model-registries" | rhoai-model-registries | MaxLength: 63
Pattern: `^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$`
| + + +#### DSCModelRegistryStatus + + + +DSCModelRegistryStatus struct holds the status for the ModelRegistry component exposed in the DSC + + + +_Appears in:_ +- [ComponentsStatus](#componentsstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| + + +#### DSCRay + + + +DSCRay contains all the configuration exposed in DSC instance for Ray component + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DSCRayStatus + + + +DSCRayStatus struct holds the status for the Ray component exposed in the DSC + + + +_Appears in:_ +- [ComponentsStatus](#componentsstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| + + +#### DSCTrainingOperator + + + +DSCTrainingOperator contains all the configuration exposed in DSC instance for TrainingOperator component + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DSCTrainingOperatorStatus + + + +DSCTrainingOperatorStatus struct holds the status for the TrainingOperator component exposed in the DSC + + + +_Appears in:_ +- [ComponentsStatus](#componentsstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| + + +#### DSCTrustyAI + + + +DSCTrustyAI contains all the configuration exposed in DSC instance for TrustyAI component + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DSCTrustyAIStatus + + + +DSCTrustyAIStatus struct holds the status for the TrustyAI component exposed in the DSC + + + +_Appears in:_ +- [ComponentsStatus](#componentsstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| + + +#### DSCWorkbenches + + + +DSCWorkbenches contains all the configuration exposed in DSC instance for Workbenches component + + + +_Appears in:_ +- [Components](#components) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DSCWorkbenchesStatus + + + +DSCWorkbenchesStatus struct holds the status for the Workbenches component exposed in the DSC + + + +_Appears in:_ +- [ComponentsStatus](#componentsstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| + + +#### Dashboard + + + +Dashboard is the Schema for the dashboards API + + + +_Appears in:_ +- [DashboardList](#dashboardlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `Dashboard` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[DashboardSpec](#dashboardspec)_ | | | | +| `status` _[DashboardStatus](#dashboardstatus)_ | | | | + + +#### DashboardCommonSpec + + + +DashboardCommonSpec spec defines the shared desired state of Dashboard + + + +_Appears in:_ +- [DSCDashboard](#dscdashboard) +- [DashboardSpec](#dashboardspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DashboardCommonStatus + + + +DashboardCommonStatus defines the shared observed state of Dashboard + + + +_Appears in:_ +- [DSCDashboardStatus](#dscdashboardstatus) +- [DashboardStatus](#dashboardstatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `url` _string_ | | | | + + +#### DashboardList + + + +DashboardList contains a list of Dashboard + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `DashboardList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Dashboard](#dashboard) array_ | | | | + + +#### DashboardSpec + + + +DashboardSpec defines the desired state of Dashboard + + + +_Appears in:_ +- [Dashboard](#dashboard) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DashboardStatus + + + +DashboardStatus defines the observed state of Dashboard + + + +_Appears in:_ +- [Dashboard](#dashboard) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | +| `url` _string_ | | | | + + +#### DataSciencePipelines + + + +DataSciencePipelines is the Schema for the datasciencepipelines API + + + +_Appears in:_ +- [DataSciencePipelinesList](#datasciencepipelineslist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `DataSciencePipelines` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[DataSciencePipelinesSpec](#datasciencepipelinesspec)_ | | | | +| `status` _[DataSciencePipelinesStatus](#datasciencepipelinesstatus)_ | | | | + + +#### DataSciencePipelinesCommonSpec + + + + + + + +_Appears in:_ +- [DSCDataSciencePipelines](#dscdatasciencepipelines) +- [DataSciencePipelinesSpec](#datasciencepipelinesspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DataSciencePipelinesCommonStatus + + + +DataSciencePipelinesCommonStatus defines the shared observed state of DataSciencePipelines + + + +_Appears in:_ +- [DSCDataSciencePipelinesStatus](#dscdatasciencepipelinesstatus) +- [DataSciencePipelinesStatus](#datasciencepipelinesstatus) + + + +#### DataSciencePipelinesList + + + +DataSciencePipelinesList contains a list of DataSciencePipelines + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `DataSciencePipelinesList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[DataSciencePipelines](#datasciencepipelines) array_ | | | | + + +#### DataSciencePipelinesSpec + + + +DataSciencePipelinesSpec defines the desired state of DataSciencePipelines + + + +_Appears in:_ +- [DataSciencePipelines](#datasciencepipelines) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### DataSciencePipelinesStatus + + + +DataSciencePipelinesStatus defines the observed state of DataSciencePipelines + + + +_Appears in:_ +- [DataSciencePipelines](#datasciencepipelines) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | + + +#### DefaultDeploymentMode + +_Underlying type:_ _string_ + + + +_Validation:_ +- Pattern: `^(Serverless|RawDeployment)$` + +_Appears in:_ +- [DSCKserve](#dsckserve) +- [KserveCommonSpec](#kservecommonspec) +- [KserveSpec](#kservespec) + +| Field | Description | +| --- | --- | +| `Serverless` | Serverless will be used as the default deployment mode for Kserve. This requires Serverless and ServiceMesh operators configured as dependencies.
| +| `RawDeployment` | RawDeployment will be used as the default deployment mode for Kserve.
| + + +#### Kserve + + + +Kserve is the Schema for the kserves API + + + +_Appears in:_ +- [KserveList](#kservelist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `Kserve` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[KserveSpec](#kservespec)_ | | | | +| `status` _[KserveStatus](#kservestatus)_ | | | | + + +#### KserveCommonSpec + + + +KserveCommonSpec spec defines the shared desired state of Kserve + + + +_Appears in:_ +- [DSCKserve](#dsckserve) +- [KserveSpec](#kservespec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +| `serving` _[ServingSpec](#servingspec)_ | Serving configures the KNative-Serving stack used for model serving. A Service
Mesh (Istio) is prerequisite, since it is used as networking layer. | | | +| `defaultDeploymentMode` _[DefaultDeploymentMode](#defaultdeploymentmode)_ | Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'.
The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve.
This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. | | Enum: [Serverless RawDeployment]
Pattern: `^(Serverless\|RawDeployment)$`
| +| `nim` _[NimSpec](#nimspec)_ | Configures and enables NVIDIA NIM integration | | | + + +#### KserveCommonStatus + + + +KserveCommonStatus defines the shared observed state of Kserve + + + +_Appears in:_ +- [DSCKserveStatus](#dsckservestatus) +- [KserveStatus](#kservestatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `defaultDeploymentMode` _string_ | DefaultDeploymentMode is the value of the defaultDeploymentMode field
as read from the "deploy" JSON in the inferenceservice-config ConfigMap | | | + + +#### KserveList + + + +KserveList contains a list of Kserve + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `KserveList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Kserve](#kserve) array_ | | | | + + +#### KserveSpec + + + +KserveSpec defines the desired state of Kserve + + + +_Appears in:_ +- [Kserve](#kserve) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +| `serving` _[ServingSpec](#servingspec)_ | Serving configures the KNative-Serving stack used for model serving. A Service
Mesh (Istio) is prerequisite, since it is used as networking layer. | | | +| `defaultDeploymentMode` _[DefaultDeploymentMode](#defaultdeploymentmode)_ | Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'.
The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve.
This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. | | Enum: [Serverless RawDeployment]
Pattern: `^(Serverless\|RawDeployment)$`
| +| `nim` _[NimSpec](#nimspec)_ | Configures and enables NVIDIA NIM integration | | | + + +#### KserveStatus + + + +KserveStatus defines the observed state of Kserve + + + +_Appears in:_ +- [Kserve](#kserve) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | +| `defaultDeploymentMode` _string_ | DefaultDeploymentMode is the value of the defaultDeploymentMode field
as read from the "deploy" JSON in the inferenceservice-config ConfigMap | | | + + +#### Kueue + + + +Kueue is the Schema for the kueues API + + + +_Appears in:_ +- [KueueList](#kueuelist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `Kueue` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[KueueSpec](#kueuespec)_ | | | | +| `status` _[KueueStatus](#kueuestatus)_ | | | | + + +#### KueueCommonSpec + + + + + + + +_Appears in:_ +- [DSCKueue](#dsckueue) +- [KueueSpec](#kueuespec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### KueueCommonStatus + + + +KueueCommonStatus defines the shared observed state of Kueue + + + +_Appears in:_ +- [DSCKueueStatus](#dsckueuestatus) +- [KueueStatus](#kueuestatus) + + + +#### KueueList + + + +KueueList contains a list of Kueue + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `KueueList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Kueue](#kueue) array_ | | | | + + +#### KueueSpec + + + +KueueSpec defines the desired state of Kueue + + + +_Appears in:_ +- [Kueue](#kueue) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### KueueStatus + + + +KueueStatus defines the observed state of Kueue + + + +_Appears in:_ +- [Kueue](#kueue) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | + + +#### ModelController + + + +ModelController is the Schema for the modelcontroller API, it is a shared component between kserve and modelmeshserving + + + +_Appears in:_ +- [ModelControllerList](#modelcontrollerlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `ModelController` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ModelControllerSpec](#modelcontrollerspec)_ | | | | +| `status` _[ModelControllerStatus](#modelcontrollerstatus)_ | | | | + + +#### ModelControllerKerveSpec + + + +a mini version of the DSCKserve only keep devflags and management spec + + + +_Appears in:_ +- [ModelControllerSpec](#modelcontrollerspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | | | | +| `nim` _[NimSpec](#nimspec)_ | | | | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### ModelControllerList + + + +ModelControllerList contains a list of ModelController + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `ModelControllerList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[ModelController](#modelcontroller) array_ | | | | + + +#### ModelControllerMMSpec + + + +a mini version of the DSCModelMeshServing only keep devflags and management spec + + + +_Appears in:_ +- [ModelControllerSpec](#modelcontrollerspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | | | | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### ModelControllerSpec -Package codeflare provides utility functions to config CodeFlare as part of the stack -which makes managing distributed compute infrastructure in the cloud easy and intuitive for Data Scientists +ModelControllerSpec defines the desired state of ModelController + + + +_Appears in:_ +- [ModelController](#modelcontroller) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `kserve` _[ModelControllerKerveSpec](#modelcontrollerkervespec)_ | ModelMeshServing DSCModelMeshServing `json:"modelMeshServing,omitempty"` | | | +| `modelMeshServing` _[ModelControllerMMSpec](#modelcontrollermmspec)_ | | | | -#### CodeFlare +#### ModelControllerStatus -CodeFlare struct holds the configuration for the CodeFlare component. + +ModelControllerStatus defines the observed state of ModelController _Appears in:_ -- [Components](#components) +- [ModelController](#modelcontroller) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | +#### ModelMeshServing + + + +ModelMeshServing is the Schema for the modelmeshservings API + -## datasciencecluster.opendatahub.io/components +_Appears in:_ +- [ModelMeshServingList](#modelmeshservinglist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `ModelMeshServing` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ModelMeshServingSpec](#modelmeshservingspec)_ | | | | +| `status` _[ModelMeshServingStatus](#modelmeshservingstatus)_ | | | | +#### ModelMeshServingCommonSpec -#### Component -Component struct defines the basis for each OpenDataHub component configuration. _Appears in:_ -- [CodeFlare](#codeflare) -- [Dashboard](#dashboard) -- [DataSciencePipelines](#datasciencepipelines) -- [Kserve](#kserve) -- [Kueue](#kueue) +- [DSCModelMeshServing](#dscmodelmeshserving) +- [ModelMeshServingSpec](#modelmeshservingspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### ModelMeshServingCommonStatus + + + +ModelMeshServingCommonStatus defines the shared observed state of ModelMeshServing + + + +_Appears in:_ +- [DSCModelMeshServingStatus](#dscmodelmeshservingstatus) +- [ModelMeshServingStatus](#modelmeshservingstatus) + + + +#### ModelMeshServingList + + + +ModelMeshServingList contains a list of ModelMeshServing + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `ModelMeshServingList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[ModelMeshServing](#modelmeshserving) array_ | | | | + + +#### ModelMeshServingSpec + + + +ModelMeshServingSpec defines the desired state of ModelMeshServing + + + +_Appears in:_ +- [ModelMeshServing](#modelmeshserving) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### ModelMeshServingStatus + + + +ModelMeshServingStatus defines the observed state of ModelMeshServing + + + +_Appears in:_ - [ModelMeshServing](#modelmeshserving) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | + + +#### ModelRegistry + + + +ModelRegistry is the Schema for the modelregistries API + + + +_Appears in:_ +- [ModelRegistryList](#modelregistrylist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `ModelRegistry` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[ModelRegistrySpec](#modelregistryspec)_ | | | | +| `status` _[ModelRegistryStatus](#modelregistrystatus)_ | | | | + + +#### ModelRegistryCommonSpec + + + +ModelRegistryCommonSpec spec defines the shared desired state of ModelRegistry + + + +_Appears in:_ +- [DSCModelRegistry](#dscmodelregistry) +- [ModelRegistrySpec](#modelregistryspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +| `registriesNamespace` _string_ | Namespace for model registries to be installed, configurable only once when model registry is enabled, defaults to "odh-model-registries" | rhoai-model-registries | MaxLength: 63
Pattern: `^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$`
| + + +#### ModelRegistryCommonStatus + + + +ModelRegistryCommonStatus defines the shared observed state of ModelRegistry + + + +_Appears in:_ +- [DSCModelRegistryStatus](#dscmodelregistrystatus) +- [ModelRegistryStatus](#modelregistrystatus) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `registriesNamespace` _string_ | | | | + + +#### ModelRegistryList + + + +ModelRegistryList contains a list of ModelRegistry + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `ModelRegistryList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[ModelRegistry](#modelregistry) array_ | | | | + + +#### ModelRegistrySpec + + + +ModelRegistrySpec defines the desired state of ModelRegistry + + + +_Appears in:_ +- [ModelRegistry](#modelregistry) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +| `registriesNamespace` _string_ | Namespace for model registries to be installed, configurable only once when model registry is enabled, defaults to "odh-model-registries" | rhoai-model-registries | MaxLength: 63
Pattern: `^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$`
| + + +#### ModelRegistryStatus + + + +ModelRegistryStatus defines the observed state of ModelRegistry + + + +_Appears in:_ - [ModelRegistry](#modelregistry) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | +| `registriesNamespace` _string_ | | | | + + +#### NimSpec + + + +nimSpec enables NVIDIA NIM integration + + + +_Appears in:_ +- [DSCKserve](#dsckserve) +- [KserveCommonSpec](#kservecommonspec) +- [KserveSpec](#kservespec) +- [ModelControllerKerveSpec](#modelcontrollerkervespec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `managementState` _[ManagementState](#managementstate)_ | | Managed | Enum: [Managed Removed]
| + + +#### Ray + + + +Ray is the Schema for the rays API + + + +_Appears in:_ +- [RayList](#raylist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `Ray` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[RaySpec](#rayspec)_ | | | | +| `status` _[RayStatus](#raystatus)_ | | | | + + +#### RayCommonSpec + + + + + + + +_Appears in:_ +- [DSCRay](#dscray) +- [RaySpec](#rayspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | + + +#### RayCommonStatus + + + +RayCommonStatus defines the shared observed state of Ray + + + +_Appears in:_ +- [DSCRayStatus](#dscraystatus) +- [RayStatus](#raystatus) + + + +#### RayList + + + +RayList contains a list of Ray + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `RayList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Ray](#ray) array_ | | | | + + +#### RaySpec + + + +RaySpec defines the desired state of Ray + + + +_Appears in:_ - [Ray](#ray) -- [TrainingOperator](#trainingoperator) -- [TrustyAI](#trustyai) -- [Workbenches](#workbenches) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| | `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +#### RayStatus -#### DevFlags +RayStatus defines the observed state of Ray + + + +_Appears in:_ +- [Ray](#ray) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | + + +#### TrainingOperator -DevFlags defines list of fields that can be used by developers to test customizations. This is not recommended -to be used in production environment. + +TrainingOperator is the Schema for the trainingoperators API _Appears in:_ -- [Component](#component) +- [TrainingOperatorList](#trainingoperatorlist) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `manifests` _[ManifestsConfig](#manifestsconfig) array_ | List of custom manifests for the given component | | | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `TrainingOperator` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TrainingOperatorSpec](#trainingoperatorspec)_ | | | | +| `status` _[TrainingOperatorStatus](#trainingoperatorstatus)_ | | | | -#### ManifestsConfig +#### TrainingOperatorCommonSpec @@ -89,240 +1464,302 @@ _Appears in:_ _Appears in:_ -- [DevFlags](#devflags) +- [DSCTrainingOperator](#dsctrainingoperator) +- [TrainingOperatorSpec](#trainingoperatorspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `uri` _string_ | uri is the URI point to a git repo with tag/branch. e.g. https://github.com/org/repo/tarball/ | | | -| `contextDir` _string_ | contextDir is the relative path to the folder containing manifests in a repository, default value "manifests" | manifests | | -| `sourcePath` _string_ | sourcePath is the subpath within contextDir where kustomize builds start. Examples include any sub-folder or path: `base`, `overlays/dev`, `default`, `odh` etc. | | | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +#### TrainingOperatorCommonStatus -## datasciencecluster.opendatahub.io/dashboard -Package dashboard provides utility functions to config Open Data Hub Dashboard: A web dashboard that displays -installed Open Data Hub components with easy access to component UIs and documentation +TrainingOperatorCommonStatus defines the shared observed state of TrainingOperator -#### Dashboard +_Appears in:_ +- [DSCTrainingOperatorStatus](#dsctrainingoperatorstatus) +- [TrainingOperatorStatus](#trainingoperatorstatus) -Dashboard struct holds the configuration for the Dashboard component. +#### TrainingOperatorList -_Appears in:_ -- [Components](#components) -| Field | Description | Default | Validation | -| --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | +TrainingOperatorList contains a list of TrainingOperator -## datasciencecluster.opendatahub.io/datasciencepipelines -Package datasciencepipelines provides utility functions to config Data Science Pipelines: -Pipeline solution for end to end MLOps workflows that support the Kubeflow Pipelines SDK and Argo Workflows. +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `TrainingOperatorList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[TrainingOperator](#trainingoperator) array_ | | | | -#### DataSciencePipelines +#### TrainingOperatorSpec -DataSciencePipelines struct holds the configuration for the DataSciencePipelines component. +TrainingOperatorSpec defines the desired state of TrainingOperator _Appears in:_ -- [Components](#components) +- [TrainingOperator](#trainingoperator) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +#### TrainingOperatorStatus -## datasciencecluster.opendatahub.io/kserve -Package kserve provides utility functions to config Kserve as the Controller for serving ML models on arbitrary frameworks +TrainingOperatorStatus defines the observed state of TrainingOperator -#### DefaultDeploymentMode -_Underlying type:_ _string_ +_Appears in:_ +- [TrainingOperator](#trainingoperator) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | +#### TrustyAI + + + +TrustyAI is the Schema for the trustyais API + -_Validation:_ -- Pattern: `^(Serverless|RawDeployment)$` _Appears in:_ -- [Kserve](#kserve) +- [TrustyAIList](#trustyailist) +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `TrustyAI` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[TrustyAISpec](#trustyaispec)_ | | | | +| `status` _[TrustyAIStatus](#trustyaistatus)_ | | | | -#### Kserve +#### TrustyAICommonSpec + -Kserve struct holds the configuration for the Kserve component. _Appears in:_ -- [Components](#components) +- [DSCTrustyAI](#dsctrustyai) +- [TrustyAISpec](#trustyaispec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | -| `serving` _[ServingSpec](#servingspec)_ | Serving configures the KNative-Serving stack used for model serving. A Service
Mesh (Istio) is prerequisite, since it is used as networking layer. | | | -| `defaultDeploymentMode` _[DefaultDeploymentMode](#defaultdeploymentmode)_ | Configures the default deployment mode for Kserve. This can be set to 'Serverless' or 'RawDeployment'.
The value specified in this field will be used to set the default deployment mode in the 'inferenceservice-config' configmap for Kserve.
This field is optional. If no default deployment mode is specified, Kserve will use Serverless mode. | | Enum: [Serverless RawDeployment]
Pattern: `^(Serverless\|RawDeployment)$`
| +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +#### TrustyAICommonStatus -## datasciencecluster.opendatahub.io/kueue +TrustyAICommonStatus defines the shared observed state of TrustyAI -#### Kueue +_Appears in:_ +- [DSCTrustyAIStatus](#dsctrustyaistatus) +- [TrustyAIStatus](#trustyaistatus) -Kueue struct holds the configuration for the Kueue component. +#### TrustyAIList -_Appears in:_ -- [Components](#components) -| Field | Description | Default | Validation | -| --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | +TrustyAIList contains a list of TrustyAI -## datasciencecluster.opendatahub.io/modelmeshserving -Package modelmeshserving provides utility functions to config MoModelMesh, a general-purpose model serving management/routing layer +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `TrustyAIList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[TrustyAI](#trustyai) array_ | | | | -#### ModelMeshServing +#### TrustyAISpec -ModelMeshServing struct holds the configuration for the ModelMeshServing component. +TrustyAISpec defines the desired state of TrustyAI _Appears in:_ -- [Components](#components) +- [TrustyAI](#trustyai) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +#### TrustyAIStatus -## datasciencecluster.opendatahub.io/modelregistry -Package modelregistry provides utility functions to config ModelRegistry, an ML Model metadata repository service +TrustyAIStatus defines the observed state of TrustyAI -#### ModelRegistry +_Appears in:_ +- [TrustyAI](#trustyai) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | + + +#### Workbenches +Workbenches is the Schema for the workbenches API _Appears in:_ -- [Components](#components) +- [WorkbenchesList](#workbencheslist) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | -| `registriesNamespace` _string_ | Namespace for model registries to be installed, configurable only once when model registry is enabled, defaults to "rhoai-model-registries" | rhoai-model-registries | MaxLength: 63
Pattern: `^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$`
| +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `Workbenches` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[WorkbenchesSpec](#workbenchesspec)_ | | | | +| `status` _[WorkbenchesStatus](#workbenchesstatus)_ | | | | +#### WorkbenchesCommonSpec -## datasciencecluster.opendatahub.io/ray -Package ray provides utility functions to config Ray as part of the stack -which makes managing distributed compute infrastructure in the cloud easy and intuitive for Data Scientists -#### Ray +_Appears in:_ +- [DSCWorkbenches](#dscworkbenches) +- [WorkbenchesSpec](#workbenchesspec) -Ray struct holds the configuration for the Ray component. +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +#### WorkbenchesCommonStatus -_Appears in:_ -- [Components](#components) -| Field | Description | Default | Validation | -| --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | +WorkbenchesCommonStatus defines the shared observed state of Workbenches -## datasciencecluster.opendatahub.io/trainingoperator -Package trainingoperator provides utility functions to config trainingoperator as part of the stack -which makes managing distributed compute infrastructure in the cloud easy and intuitive for Data Scientists +_Appears in:_ +- [DSCWorkbenchesStatus](#dscworkbenchesstatus) +- [WorkbenchesStatus](#workbenchesstatus) -#### TrainingOperator +#### WorkbenchesList -TrainingOperator struct holds the configuration for the TrainingOperator component. +WorkbenchesList contains a list of Workbenches + -_Appears in:_ -- [Components](#components) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | +| `apiVersion` _string_ | `components.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `WorkbenchesList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Workbenches](#workbenches) array_ | | | | +#### WorkbenchesSpec -## datasciencecluster.opendatahub.io/trustyai -Package trustyai provides utility functions to config TrustyAI, a bias/fairness and explainability toolkit +WorkbenchesSpec defines the desired state of Workbenches -#### TrustyAI + +_Appears in:_ +- [Workbenches](#workbenches) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `devFlags` _[DevFlags](#devflags)_ | Add developer fields | | | +#### WorkbenchesStatus -TrustyAI struct holds the configuration for the TrustyAI component. + + +WorkbenchesStatus defines the observed state of Workbenches _Appears in:_ -- [Components](#components) +- [Workbenches](#workbenches) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | + + + +## datasciencecluster.opendatahub.io/components + + + + @@ -400,17 +1837,17 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `dashboard` _[Dashboard](#dashboard)_ | Dashboard component configuration. | | | -| `workbenches` _[Workbenches](#workbenches)_ | Workbenches component configuration. | | | -| `modelmeshserving` _[ModelMeshServing](#modelmeshserving)_ | ModelMeshServing component configuration. | | | -| `datasciencepipelines` _[DataSciencePipelines](#datasciencepipelines)_ | DataServicePipeline component configuration.
Require OpenShift Pipelines Operator to be installed before enable component | | | -| `kserve` _[Kserve](#kserve)_ | Kserve component configuration.
Require OpenShift Serverless and OpenShift Service Mesh Operators to be installed before enable component
Does not support enabled ModelMeshServing at the same time | | | -| `kueue` _[Kueue](#kueue)_ | Kueue component configuration. | | | -| `codeflare` _[CodeFlare](#codeflare)_ | CodeFlare component configuration.
If CodeFlare Operator has been installed in the cluster, it should be uninstalled first before enabled component. | | | -| `ray` _[Ray](#ray)_ | Ray component configuration. | | | -| `trustyai` _[TrustyAI](#trustyai)_ | TrustyAI component configuration. | | | -| `trainingoperator` _[TrainingOperator](#trainingoperator)_ | Training Operator component configuration. | | | -| `modelregistry` _[ModelRegistry](#modelregistry)_ | ModelRegistry component configuration. | | | +| `dashboard` _[DSCDashboard](#dscdashboard)_ | Dashboard component configuration. | | | +| `workbenches` _[DSCWorkbenches](#dscworkbenches)_ | Workbenches component configuration. | | | +| `modelmeshserving` _[DSCModelMeshServing](#dscmodelmeshserving)_ | ModelMeshServing component configuration. | | | +| `datasciencepipelines` _[DSCDataSciencePipelines](#dscdatasciencepipelines)_ | DataSciencePipeline component configuration.
Requires OpenShift Pipelines Operator to be installed before enable component | | | +| `kserve` _[DSCKserve](#dsckserve)_ | Kserve component configuration.
Requires OpenShift Serverless and OpenShift Service Mesh Operators to be installed before enable component
Does not support enabled ModelMeshServing at the same time | | | +| `kueue` _[DSCKueue](#dsckueue)_ | Kueue component configuration. | | | +| `codeflare` _[DSCCodeFlare](#dsccodeflare)_ | CodeFlare component configuration.
If CodeFlare Operator has been installed in the cluster, it should be uninstalled first before enabling component. | | | +| `ray` _[DSCRay](#dscray)_ | Ray component configuration. | | | +| `trustyai` _[DSCTrustyAI](#dsctrustyai)_ | TrustyAI component configuration. | | | +| `modelregistry` _[DSCModelRegistry](#dscmodelregistry)_ | ModelRegistry component configuration. | | | +| `trainingoperator` _[DSCTrainingOperator](#dsctrainingoperator)_ | Training Operator component configuration. | | | #### ComponentsStatus @@ -426,7 +1863,17 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `modelregistry` _[ModelRegistryStatus](#modelregistrystatus)_ | ModelRegistry component status | | | +| `dashboard` _[DSCDashboardStatus](#dscdashboardstatus)_ | Dashboard component status. | | | +| `workbenches` _[DSCWorkbenchesStatus](#dscworkbenchesstatus)_ | Workbenches component status. | | | +| `modelmeshserving` _[DSCModelMeshServingStatus](#dscmodelmeshservingstatus)_ | ModelMeshServing component status. | | | +| `datasciencepipelines` _[DSCDataSciencePipelinesStatus](#dscdatasciencepipelinesstatus)_ | DataSciencePipeline component status. | | | +| `kserve` _[DSCKserveStatus](#dsckservestatus)_ | Kserve component status. | | | +| `kueue` _[DSCKueueStatus](#dsckueuestatus)_ | Kueue component status. | | | +| `codeflare` _[DSCCodeFlareStatus](#dsccodeflarestatus)_ | CodeFlare component status. | | | +| `ray` _[DSCRayStatus](#dscraystatus)_ | Ray component status. | | | +| `trustyai` _[DSCTrustyAIStatus](#dsctrustyaistatus)_ | TrustyAI component status. | | | +| `modelregistry` _[DSCModelRegistryStatus](#dscmodelregistrystatus)_ | ModelRegistry component status. | | | +| `trainingoperator` _[DSCTrainingOperatorStatus](#dsctrainingoperatorstatus)_ | Training Operator component status. | | | #### ControlPlaneSpec @@ -499,6 +1946,7 @@ _Appears in:_ | --- | --- | --- | --- | | `phase` _string_ | Phase describes the Phase of DataScienceCluster reconciliation state
This is used by OLM UI to provide status information to the user | | | | `conditions` _Condition array_ | Conditions describes the state of the DataScienceCluster resource. | | | +| `observedGeneration` _integer_ | The generation observed by the deployment controller. | | | | `relatedObjects` _[ObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectreference-v1-core) array_ | RelatedObjects is a list of objects created and maintained by this operator.
Object references will be added to this list after they have been created AND found in the cluster. | | | | `errorMessage` _string_ | | | | | `installedComponents` _object (keys:string, values:boolean)_ | List of components with status if installed or not | | | @@ -551,7 +1999,9 @@ bindings with the Service Mesh. _Appears in:_ -- [Kserve](#kserve) +- [DSCKserve](#dsckserve) +- [KserveCommonSpec](#kservecommonspec) +- [KserveSpec](#kservespec) | Field | Description | Default | Validation | | --- | --- | --- | --- | @@ -561,26 +2011,11 @@ _Appears in:_ -## datasciencecluster.opendatahub.io/workbenches - -Package workbenches provides utility functions to config Workbenches to secure Jupyter Notebook in Kubernetes environments with support for OAuth - - +## dscinitialization.opendatahub.io/services -#### Workbenches - - - -Workbenches struct holds the configuration for the Workbenches component. -_Appears in:_ -- [Components](#components) - -| Field | Description | Default | Validation | -| --- | --- | --- | --- | -| `Component` _[Component](#component)_ | | | | @@ -628,7 +2063,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `applicationsNamespace` _string_ | Namespace for applications to be installed, non-configurable, default to "redhat-ods-applications" | redhat-ods-applications | MaxLength: 63
Pattern: `^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$`
| -| `monitoring` _[Monitoring](#monitoring)_ | Enable monitoring on specified namespace | | | +| `monitoring` _[DSCMonitoring](#dscmonitoring)_ | Enable monitoring on specified namespace | | | | `serviceMesh` _[ServiceMeshSpec](#servicemeshspec)_ | Configures Service Mesh as networking layer for Data Science Clusters components.
The Service Mesh is a mandatory prerequisite for single model serving (KServe) and
you should review this configuration if you are planning to use KServe.
For other components, it enhances user experience; e.g. it provides unified
authentication giving a Single Sign On experience. | | | | `trustedCABundle` _[TrustedCABundleSpec](#trustedcabundlespec)_ | When set to `Managed`, adds odh-trusted-ca-bundle Configmap to all namespaces that includes
cluster-wide Trusted CA Bundle in .data["ca-bundle.crt"].
Additionally, this fields allows admins to add custom CA bundles to the configmap using the .CustomCABundle field. | | | | `devFlags` _[DevFlags](#devflags)_ | Internal development useful field to test customizations.
This is not recommended to be used in production environment. | | | @@ -668,11 +2103,12 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `manifestsUri` _string_ | Custom manifests uri for odh-manifests | | | -| `logmode` _string_ | | production | Enum: [devel development prod production default]
| +| `manifestsUri` _string_ | ## DEPRECATED ## : ManifestsUri set on DSCI is not maintained.
Custom manifests uri for odh-manifests | | | +| `logmode` _string_ | ## DEPRECATED ##: Ignored, use LogLevel instead | production | Enum: [devel development prod production default]
| +| `logLevel` _string_ | Override Zap log level. Can be "debug", "info", "error" or a number (more verbose). | | | -#### Monitoring +#### TrustedCABundleSpec @@ -685,11 +2121,101 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:
- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so.
- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it. | | Enum: [Managed Removed]
| -| `namespace` _string_ | Namespace for monitoring if it is enabled | redhat-ods-monitoring | MaxLength: 63
Pattern: `^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$`
| +| `managementState` _[ManagementState](#managementstate)_ | managementState indicates whether and how the operator should manage customized CA bundle | Removed | Enum: [Managed Removed Unmanaged]
| +| `customCABundle` _string_ | A custom CA bundle that will be available for all components in the
Data Science Cluster(DSC). This bundle will be stored in odh-trusted-ca-bundle
ConfigMap .data.odh-ca-bundle.crt . | | | -#### TrustedCABundleSpec + +## services.platform.opendatahub.io/v1alpha1 + +Package v1 contains API Schema definitions for the services v1 API group + +### Resource Types +- [Auth](#auth) +- [AuthList](#authlist) +- [Monitoring](#monitoring) +- [MonitoringList](#monitoringlist) + + + +#### Auth + + + +Auth is the Schema for the auths API + + + +_Appears in:_ +- [AuthList](#authlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `services.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `Auth` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[AuthSpec](#authspec)_ | | | | +| `status` _[AuthStatus](#authstatus)_ | | | | + + +#### AuthList + + + +AuthList contains a list of Auth + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `services.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `AuthList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Auth](#auth) array_ | | | | + + +#### AuthSpec + + + +AuthSpec defines the desired state of Auth + + + +_Appears in:_ +- [Auth](#auth) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `adminGroups` _string array_ | | | | +| `allowedGroups` _string array_ | | | | + + +#### AuthStatus + + + +AuthStatus defines the observed state of Auth + + + +_Appears in:_ +- [Auth](#auth) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | + + +#### DSCMonitoring @@ -702,7 +2228,101 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `managementState` _[ManagementState](#managementstate)_ | managementState indicates whether and how the operator should manage customized CA bundle | Removed | Enum: [Managed Removed Unmanaged]
| -| `customCABundle` _string_ | A custom CA bundle that will be available for all components in the
Data Science Cluster(DSC). This bundle will be stored in odh-trusted-ca-bundle
ConfigMap .data.odh-ca-bundle.crt . | | | +| `managementState` _[ManagementState](#managementstate)_ | Set to one of the following values:

- "Managed" : the operator is actively managing the component and trying to keep it active.
It will only upgrade the component if it is safe to do so

- "Removed" : the operator is actively managing the component and will not install it,
or if it is installed, the operator will try to remove it | | Enum: [Managed Removed]
| +| `namespace` _string_ | monitoring spec exposed to DSCI api
Namespace for monitoring if it is enabled | opendatahub | MaxLength: 63
Pattern: `^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$`
| + + +#### Monitoring + + + +Monitoring is the Schema for the monitorings API + + + +_Appears in:_ +- [MonitoringList](#monitoringlist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `services.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `Monitoring` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[MonitoringSpec](#monitoringspec)_ | | | | +| `status` _[MonitoringStatus](#monitoringstatus)_ | | | | + + +#### MonitoringCommonSpec + + + +MonitoringCommonSpec spec defines the shared desired state of Dashboard + + + +_Appears in:_ +- [DSCMonitoring](#dscmonitoring) +- [MonitoringSpec](#monitoringspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `namespace` _string_ | monitoring spec exposed to DSCI api
Namespace for monitoring if it is enabled | opendatahub | MaxLength: 63
Pattern: `^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$`
| + + +#### MonitoringList + + + +MonitoringList contains a list of Monitoring + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `services.platform.opendatahub.io/v1alpha1` | | | +| `kind` _string_ | `MonitoringList` | | | +| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | | +| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[Monitoring](#monitoring) array_ | | | | + + +#### MonitoringSpec + + + +MonitoringSpec defines the desired state of Monitoring + + + +_Appears in:_ +- [Monitoring](#monitoring) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `namespace` _string_ | monitoring spec exposed to DSCI api
Namespace for monitoring if it is enabled | opendatahub | MaxLength: 63
Pattern: `^([a-z0-9]([-a-z0-9]*[a-z0-9])?)?$`
| + + +#### MonitoringStatus + + + +MonitoringStatus defines the observed state of Monitoring + + + +_Appears in:_ +- [Monitoring](#monitoring) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `phase` _string_ | | | | +| `observedGeneration` _integer_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.25/#condition-v1-meta) array_ | | | | +| `url` _string_ | | | | diff --git a/get_all_manifests.sh b/get_all_manifests.sh index 3a6620d4021..270644814d9 100755 --- a/get_all_manifests.sh +++ b/get_all_manifests.sh @@ -3,27 +3,27 @@ set -e GITHUB_URL="https://github.com" -# component: notebook, dsp, kserve, dashbaord, cf/ray/kueue/trainingoperator, trustyai, modelmesh, modelregistry. -# in the format of "repo-org:repo-name:ref-name:source-folder:target-folder". +# COMPONENT_MANIFESTS is a list of components repositories info to fetch the manifests +# in the format of "repo-org:repo-name:ref-name:source-folder" and key is the target folder under manifests/ declare -A COMPONENT_MANIFESTS=( - ["codeflare"]="red-hat-data-services:codeflare-operator:rhoai-2.16:config:codeflare" - ["ray"]="red-hat-data-services:kuberay:rhoai-2.16:ray-operator/config:ray" - ["kueue"]="red-hat-data-services:kueue:rhoai-2.16:config:kueue" - ["data-science-pipelines-operator"]="red-hat-data-services:data-science-pipelines-operator:rhoai-2.16:config:data-science-pipelines-operator" - ["odh-dashboard"]="red-hat-data-services:odh-dashboard:rhoai-2.16:manifests:dashboard" - ["kf-notebook-controller"]="red-hat-data-services:kubeflow:rhoai-2.16:components/notebook-controller/config:odh-notebook-controller/kf-notebook-controller" - ["odh-notebook-controller"]="red-hat-data-services:kubeflow:rhoai-2.16:components/odh-notebook-controller/config:odh-notebook-controller/odh-notebook-controller" - ["notebooks"]="red-hat-data-services:notebooks:rhoai-2.16:manifests:notebooks" - ["trustyai"]="red-hat-data-services:trustyai-service-operator:rhoai-2.16:config:trustyai-service-operator" - ["model-mesh"]="red-hat-data-services:modelmesh-serving:rhoai-2.16:config:model-mesh" - ["odh-model-controller"]="red-hat-data-services:odh-model-controller:rhoai-2.16:config:odh-model-controller" - ["kserve"]="red-hat-data-services:kserve:rhoai-2.16:config:kserve" - ["modelregistry"]="red-hat-data-services:model-registry-operator:rhoai-2.16:config:model-registry-operator" - ["trainingoperator"]="red-hat-data-services:training-operator:rhoai-2.16:manifests:trainingoperator" + ["dashboard"]="red-hat-data-services:odh-dashboard:rhoai-2.17:manifests" + ["workbenches/kf-notebook-controller"]="red-hat-data-services:kubeflow:rhoai-2.17:components/notebook-controller/config" + ["workbenches/odh-notebook-controller"]="red-hat-data-services:kubeflow:rhoai-2.17:components/odh-notebook-controller/config" + ["workbenches/notebooks"]="red-hat-data-services:notebooks:rhoai-2.17:manifests" + ["modelmeshserving"]="red-hat-data-services:modelmesh-serving:rhoai-2.17:config" + ["kserve"]="red-hat-data-services:kserve:rhoai-2.17:config" + ["kueue"]="red-hat-data-services:kueue:rhoai-2.17:config" + ["codeflare"]="red-hat-data-services:codeflare-operator:rhoai-2.17:config" + ["ray"]="red-hat-data-services:kuberay:rhoai-2.17:ray-operator/config" + ["trustyai"]="red-hat-data-services:trustyai-service-operator:rhoai-2.17:config" + ["modelregistry"]="red-hat-data-services:model-registry-operator:rhoai-2.17:config" + ["trainingoperator"]="red-hat-data-services:training-operator:rhoai-2.17:manifests" + ["datasciencepipelines"]="red-hat-data-services:data-science-pipelines-operator:rhoai-2.17:config" + ["modelcontroller"]="red-hat-data-services:odh-model-controller:rhoai-2.17:config" ) # Allow overwriting repo using flags component=repo -pattern="^[a-zA-Z0-9_.-]+:[a-zA-Z0-9_.-]+:[a-zA-Z0-9_.-]+:[a-zA-Z0-9_./-]+:[a-zA-Z0-9_./-]+$" +pattern="^[a-zA-Z0-9_.-]+:[a-zA-Z0-9_.-]+:[a-zA-Z0-9_.-]+:[a-zA-Z0-9_./-]+$" if [ "$#" -ge 1 ]; then for arg in "$@"; do if [[ $arg == --* ]]; then @@ -31,7 +31,7 @@ if [ "$#" -ge 1 ]; then IFS="=" read -r key value <<< "$arg" if [[ -n "${COMPONENT_MANIFESTS[$key]}" ]]; then if [[ ! $value =~ $pattern ]]; then - echo "ERROR: The value '$value' does not match the expected format 'repo-org:repo-name:branch-name:source-folder:target-folder'." + echo "ERROR: The value '$value' does not match the expected format 'repo-org:repo-name:ref-name:source-folder'." continue fi COMPONENT_MANIFESTS["$key"]=$value @@ -77,7 +77,7 @@ for key in "${!COMPONENT_MANIFESTS[@]}"; do repo_name="${repo_info[1]}" repo_ref="${repo_info[2]}" source_path="${repo_info[3]}" - target_path="${repo_info[4]}" + target_path="${key}" repo_url="${GITHUB_URL}/${repo_org}/${repo_name}" repo_dir=${TMP_DIR}/${key} diff --git a/go.mod b/go.mod index 3a04c5432eb..b0de9b689cf 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,11 @@ go 1.22.0 require ( github.com/blang/semver/v4 v4.0.0 - github.com/go-logr/logr v1.4.1 + github.com/davecgh/go-spew v1.1.1 + github.com/go-logr/logr v1.4.2 github.com/hashicorp/go-multierror v1.1.1 + github.com/itchyny/gojq v0.12.16 + github.com/onsi/ginkgo v1.16.4 github.com/onsi/ginkgo/v2 v2.14.0 github.com/onsi/gomega v1.30.0 github.com/openshift/addon-operator/apis v0.0.0-20230919043633-820afed15881 @@ -14,16 +17,21 @@ require ( github.com/operator-framework/api v0.18.0 github.com/pkg/errors v0.9.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.68.0 + github.com/prometheus/client_golang v1.20.5 + github.com/rs/xid v1.6.0 github.com/spf13/afero v1.10.0 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 go.uber.org/zap v1.26.0 - golang.org/x/exp v0.0.0-20231006140011-7918f672742d + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.29.2 k8s.io/apiextensions-apiserver v0.29.2 k8s.io/apimachinery v0.29.2 k8s.io/client-go v11.0.0+incompatible + k8s.io/klog/v2 v2.110.1 k8s.io/kube-aggregator v0.28.3 + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/controller-runtime v0.17.5 sigs.k8s.io/kustomize/api v0.13.4 sigs.k8s.io/kustomize/kyaml v0.16.0 @@ -32,10 +40,9 @@ require ( require ( github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-errors/errors v1.4.2 // indirect @@ -46,31 +53,31 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/itchyny/timefmt-go v0.1.6 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nxadm/tail v1.4.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.18.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rhobs/obo-prometheus-operator/pkg/apis/monitoring v0.61.1-rhobs1 // indirect - github.com/rogpeppe/go-internal v1.11.0 // indirect - github.com/sergi/go-diff v1.2.0 // indirect github.com/sirupsen/logrus v1.9.2 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/xlab/treeprint v1.2.0 // indirect @@ -84,13 +91,11 @@ require ( golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.24.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect k8s.io/component-base v0.29.2 // indirect - k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/go.sum b/go.sum index 82c125c0545..06e0c692faa 100644 --- a/go.sum +++ b/go.sum @@ -48,8 +48,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -75,9 +75,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -98,8 +97,8 @@ github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -151,9 +150,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= @@ -197,9 +195,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= @@ -207,9 +204,8 @@ github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97Dwqy github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -217,8 +213,12 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/itchyny/gojq v0.12.16 h1:yLfgLxhIr/6sJNVmYfQjTIv0jGctu6/DgDoivmxTr7g= +github.com/itchyny/gojq v0.12.16/go.mod h1:6abHbdC2uB9ogMS38XsErnfqJ94UlngIJGlRAIj4jTM= +github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/myJ5Q= +github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -228,6 +228,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -238,13 +240,13 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= -github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -261,11 +263,13 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= @@ -316,24 +320,25 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.68.0 h1:yl9ceUSUBo9woQIO+8eoWpcxZkdZgm89g+rVvu37TUw= github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.68.0/go.mod h1:9Uuu3pEU2jB8PwuqkHvegQ0HV/BlZRJUyfTYAqfdVF8= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= -github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rhobs/obo-prometheus-operator/pkg/apis/monitoring v0.61.1-rhobs1 h1:sI4OJX9/XkSd8O6/sY4cxJPiuwM1RHv3qygIbDpBoAY= github.com/rhobs/obo-prometheus-operator/pkg/apis/monitoring v0.61.1-rhobs1/go.mod h1:u8ctCYj9Nq8gkMLfNLxHoslu8SEGrqXP2gFiMUNsn9g= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -344,8 +349,9 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= @@ -355,8 +361,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -401,8 +407,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= +golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -782,8 +788,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -794,6 +800,7 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -805,7 +812,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -848,8 +854,8 @@ k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdz k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/main.go b/main.go index 8d12126c5ec..127008941e1 100644 --- a/main.go +++ b/main.go @@ -24,16 +24,21 @@ import ( addonv1alpha1 "github.com/openshift/addon-operator/apis/addons/v1alpha1" ocappsv1 "github.com/openshift/api/apps/v1" //nolint:importas //reason: conflicts with appsv1 "k8s.io/api/apps/v1" buildv1 "github.com/openshift/api/build/v1" + configv1 "github.com/openshift/api/config/v1" + consolev1 "github.com/openshift/api/console/v1" imagev1 "github.com/openshift/api/image/v1" oauthv1 "github.com/openshift/api/oauth/v1" operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" + securityv1 "github.com/openshift/api/security/v1" + templatev1 "github.com/openshift/api/template/v1" userv1 "github.com/openshift/api/user/v1" ofapiv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" ofapiv2 "github.com/operator-framework/api/pkg/operators/v2" - monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" + authorizationv1 "k8s.io/api/authorization/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" rbacv1 "k8s.io/api/rbac/v1" @@ -49,24 +54,48 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/healthz" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" "sigs.k8s.io/controller-runtime/pkg/manager" ctrlmetrics "sigs.k8s.io/controller-runtime/pkg/metrics/server" ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/certconfigmapgenerator" dscctrl "github.com/opendatahub-io/opendatahub-operator/v2/controllers/datasciencecluster" dscictrl "github.com/opendatahub-io/opendatahub-operator/v2/controllers/dscinitialization" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/secretgenerator" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/services/auth" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/setupcontroller" "github.com/opendatahub-io/opendatahub-operator/v2/controllers/webhook" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + cr "github.com/opendatahub-io/opendatahub-operator/v2/pkg/componentsregistry" + odhClient "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/logger" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/services/gc" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/upgrade" + + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/codeflare" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/dashboard" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/datasciencepipelines" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/kserve" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/kueue" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelcontroller" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelmeshserving" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelregistry" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/ray" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/trainingoperator" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/trustyai" + _ "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/workbenches" ) -const controllerNum = 4 // we should keep this updated if we have new controllers to add +const controllerNum = 20 // we should keep this updated if we have new controllers to add var ( scheme = runtime.NewScheme() @@ -74,6 +103,8 @@ var ( ) func init() { //nolint:gochecknoinits + utilruntime.Must(componentApi.AddToScheme(scheme)) + utilruntime.Must(serviceApi.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(dsciv1.AddToScheme(scheme)) @@ -95,8 +126,17 @@ func init() { //nolint:gochecknoinits utilruntime.Must(apiextensionsv1.AddToScheme(scheme)) utilruntime.Must(admissionregistrationv1.AddToScheme(scheme)) utilruntime.Must(apiregistrationv1.AddToScheme(scheme)) - utilruntime.Must(monitoringv1.AddToScheme(scheme)) - utilruntime.Must(operatorv1.Install(scheme)) // here also add configv1.Install(scheme) no need add configv1 explicitly + utilruntime.Must(promv1.AddToScheme(scheme)) + utilruntime.Must(operatorv1.Install(scheme)) + utilruntime.Must(consolev1.AddToScheme(scheme)) + utilruntime.Must(securityv1.Install(scheme)) + utilruntime.Must(templatev1.Install(scheme)) +} + +func initComponents(_ context.Context, p cluster.Platform) error { + return cr.ForEach(func(ch cr.ComponentHandler) error { + return ch.Init(p) + }) } func main() { //nolint:funlen,maintidx @@ -120,12 +160,16 @@ func main() { //nolint:funlen,maintidx flag.StringVar(&operatorName, "operator-name", "opendatahub", "The name of the operator") flag.StringVar(&logmode, "log-mode", "", "Log mode ('', prod, devel), default to ''") + opts := zap.Options{} + opts.BindFlags(flag.CommandLine) + flag.Parse() - ctrl.SetLogger(logger.ConfigLoggers(logmode)) + ctrl.SetLogger(logger.NewLogger(logmode, &opts)) // root context ctx := ctrl.SetupSignalHandler() + ctx = logf.IntoContext(ctx, setupLog) // Create new uncached client to run initial setup setupCfg, err := config.GetConfig() if err != nil { @@ -141,14 +185,21 @@ func main() { //nolint:funlen,maintidx setupLog.Error(err, "error getting client for setup") os.Exit(1) } - // Get operator platform - release, err := cluster.GetRelease(ctx, setupClient) + + err = cluster.Init(ctx, setupClient) if err != nil { - setupLog.Error(err, "error getting release") + setupLog.Error(err, "unable to initialize cluster config") os.Exit(1) } + + // Get operator platform + release := cluster.GetRelease() platform := release.Name - setupLog.Info("running on", "platform", platform) + + if err := initComponents(ctx, platform); err != nil { + setupLog.Error(err, "unable to init components") + os.Exit(1) + } secretCache := createSecretCacheConfig(platform) deploymentCache := createDeploymentCacheConfig(platform) @@ -175,8 +226,14 @@ func main() { //nolint:funlen,maintidx &operatorv1.IngressController{}: { Field: fields.Set{"metadata.name": "default"}.AsSelector(), }, + // For authentication CR "cluster" + &configv1.Authentication{}: { + Field: fields.Set{"metadata.name": cluster.ClusterAuthenticationObj}.AsSelector(), + }, // for prometheus and black-box deployment and ones we owns &appsv1.Deployment{}: {Namespaces: deploymentCache}, + // kueue need prometheusrules + &promv1.PrometheusRule{}: {Namespaces: deploymentCache}, }, } @@ -202,6 +259,17 @@ func main() { //nolint:funlen,maintidx // if you are doing or is intended to do any operation such as perform cleanups // after the manager stops then its usage might be unsafe. // LeaderElectionReleaseOnCancel: true, + Client: client.Options{ + Cache: &client.CacheOptions{ + DisableFor: []client.Object{ + resources.GvkToUnstructured(gvk.OpenshiftIngress), + &authorizationv1.SelfSubjectRulesReview{}, + }, + // Set it to true so the cache-backed client reads unstructured objects + // or lists from the cache instead of a live lookup. + Unstructured: true, + }, + }, }) if err != nil { setupLog.Error(err, "unable to start manager") @@ -210,10 +278,15 @@ func main() { //nolint:funlen,maintidx webhook.Init(mgr) + oc, err := odhClient.NewFromManager(mgr) + if err != nil { + setupLog.Error(err, "unable to create client") + os.Exit(1) + } + if err = (&dscictrl.DSCInitializationReconciler{ - Client: mgr.GetClient(), + Client: oc, Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DSCInitialization"), logmode), Recorder: mgr.GetEventRecorderFor("dscinitialization-controller"), ApplicationsNamespace: dscApplicationsNamespace, }).SetupWithManager(ctx, mgr); err != nil { @@ -222,38 +295,64 @@ func main() { //nolint:funlen,maintidx } if err = (&dscctrl.DataScienceClusterReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("DataScienceCluster"), logmode), - DataScienceCluster: &dscctrl.DataScienceClusterConfig{ - DSCISpec: &dsciv1.DSCInitializationSpec{ - ApplicationsNamespace: dscApplicationsNamespace, - }, - }, + Client: oc, + Scheme: mgr.GetScheme(), Recorder: mgr.GetEventRecorderFor("datasciencecluster-controller"), }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "DataScienceCluster") os.Exit(1) } + if err = (&setupcontroller.SetupControllerReconciler{ + Client: oc, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "SetupController") + os.Exit(1) + } + if err = (&secretgenerator.SecretGeneratorReconciler{ - Client: mgr.GetClient(), + Client: oc, Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("SecretGenerator"), logmode), - }).SetupWithManager(mgr); err != nil { + }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "SecretGenerator") os.Exit(1) } if err = (&certconfigmapgenerator.CertConfigmapGeneratorReconciler{ - Client: mgr.GetClient(), + Client: oc, Scheme: mgr.GetScheme(), - Log: logger.LogWithLevel(ctrl.Log.WithName(operatorName).WithName("controllers").WithName("CertConfigmapGenerator"), logmode), - }).SetupWithManager(mgr); err != nil { + }).SetupWithManager(ctx, mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "CertConfigmapGenerator") os.Exit(1) } + ons, err := cluster.GetOperatorNamespace() + if err != nil { + setupLog.Error(err, "unable to determine Operator Namespace") + os.Exit(1) + } + + gc.Instance = gc.New( + oc, + ons, + gc.WithUnremovables(gvk.CustomResourceDefinition, gvk.Lease), + ) + + err = mgr.Add(gc.Instance) + if err != nil { + setupLog.Error(err, "unable to register GC service") + os.Exit(1) + } + + // Initialize component reconcilers + if err = CreateComponentReconcilers(ctx, mgr); err != nil { + os.Exit(1) + } + + if err := auth.NewServiceReconciler(ctx, mgr); err != nil { + os.Exit(1) + } + // get old release version before we create default DSCI CR oldReleaseVersion, _ := upgrade.GetDeployedRelease(ctx, setupClient) @@ -312,6 +411,10 @@ func main() { //nolint:funlen,maintidx setupLog.Error(err, "unable to set up ready check") os.Exit(1) } + if err := initComponents(ctx, platform); err != nil { + setupLog.Error(err, "unable to init components") + os.Exit(1) + } setupLog.Info("starting manager") if err := mgr.Start(ctx); err != nil { @@ -353,6 +456,12 @@ func createDeploymentCacheConfig(platform cluster.Platform) map[string]cache.Con default: namespaceConfigs["opendatahub"] = cache.Config{} } - return namespaceConfigs } + +func CreateComponentReconcilers(ctx context.Context, mgr manager.Manager) error { + // TODO: can it be moved to initComponents? + return cr.ForEach(func(ch cr.ComponentHandler) error { + return ch.NewComponentReconciler(ctx, mgr) + }) +} diff --git a/pkg/cluster/cert.go b/pkg/cluster/cert.go index 57daf9c87ae..10eb557cd3d 100644 --- a/pkg/cluster/cert.go +++ b/pkg/cluster/cert.go @@ -19,9 +19,17 @@ import ( corev1 "k8s.io/api/core/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) +const IngressNamespace = "openshift-ingress" + +var IngressControllerName = types.NamespacedName{ + Namespace: "openshift-ingress-operator", + Name: "default", +} + func CreateSelfSignedCertificate(ctx context.Context, c client.Client, secretName, domain, namespace string, metaOptions ...MetaOptions) error { certSecret, err := GenerateSelfSignedCertificateAsSecret(secretName, domain, namespace) if err != nil { @@ -123,16 +131,25 @@ func generateCertificate(addr string) ([]byte, []byte, error) { return certBuffer.Bytes(), keyBuffer.Bytes(), nil } -// PropagateDefaultIngressCertificate copies ingress cert secrets from openshift-ingress ns to given namespace. -func PropagateDefaultIngressCertificate(ctx context.Context, c client.Client, secretName, namespace string) error { +func FindDefaultIngressSecret(ctx context.Context, c client.Client) (*corev1.Secret, error) { defaultIngressCtrl, err := FindAvailableIngressController(ctx, c) if err != nil { - return fmt.Errorf("failed to get ingress controller: %w", err) + return nil, fmt.Errorf("failed to get ingress controller: %w", err) } defaultIngressCertName := GetDefaultIngressCertSecretName(defaultIngressCtrl) - defaultIngressSecret, err := GetSecret(ctx, c, "openshift-ingress", defaultIngressCertName) + defaultIngressSecret, err := GetSecret(ctx, c, IngressNamespace, defaultIngressCertName) + if err != nil { + return nil, err + } + + return defaultIngressSecret, nil +} + +// PropagateDefaultIngressCertificate copies ingress cert secrets from openshift-ingress ns to given namespace. +func PropagateDefaultIngressCertificate(ctx context.Context, c client.Client, secretName, namespace string) error { + defaultIngressSecret, err := FindDefaultIngressSecret(ctx, c) if err != nil { return err } @@ -143,7 +160,7 @@ func PropagateDefaultIngressCertificate(ctx context.Context, c client.Client, se func FindAvailableIngressController(ctx context.Context, c client.Client) (*operatorv1.IngressController, error) { defaultIngressCtrl := &operatorv1.IngressController{} - err := c.Get(ctx, client.ObjectKey{Namespace: "openshift-ingress-operator", Name: "default"}, defaultIngressCtrl) + err := c.Get(ctx, IngressControllerName, defaultIngressCtrl) if err != nil { return nil, fmt.Errorf("error getting ingresscontroller resource :%w", err) } diff --git a/pkg/cluster/cluster_config.go b/pkg/cluster/cluster_config.go index 2e0c2217030..08a1c445bfe 100644 --- a/pkg/cluster/cluster_config.go +++ b/pkg/cluster/cluster_config.go @@ -8,6 +8,7 @@ import ( "strings" "github.com/blang/semver/v4" + "github.com/go-logr/logr" configv1 "github.com/openshift/api/config/v1" "github.com/operator-framework/api/pkg/lib/version" ofapiv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" @@ -17,11 +18,63 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" ) -// +kubebuilder:rbac:groups="config.openshift.io",resources=ingresses,verbs=get +type Platform string + +// Release includes information on operator version and platform +// +kubebuilder:object:generate=true +type Release struct { + Name Platform `json:"name,omitempty"` + Version version.OperatorVersion `json:"version,omitempty"` +} + +var clusterConfig struct { + Namespace string + Release Release +} + +// Init initializes cluster configuration variables on startup +// init() won't work since it is needed to check the error. +func Init(ctx context.Context, cli client.Client) error { + var err error + log := logf.FromContext(ctx) + + clusterConfig.Namespace, err = getOperatorNamespace() + if err != nil { + log.Error(err, "unable to find operator namespace") + // not fatal, fallback to "" + } + + clusterConfig.Release, err = getRelease(ctx, cli) + if err != nil { + return err + } + + printClusterConfig(log) + + return nil +} + +func printClusterConfig(log logr.Logger) { + log.Info("Cluster config", + "Namespace", clusterConfig.Namespace, + "Release", clusterConfig.Release) +} + +func GetOperatorNamespace() (string, error) { + if clusterConfig.Namespace == "" { + return "", errors.New("unable to find operator namespace") + } + return clusterConfig.Namespace, nil +} + +func GetRelease() Release { + return clusterConfig.Release +} func GetDomain(ctx context.Context, c client.Client) (string, error) { ingress := &unstructured.Unstructured{} @@ -42,7 +95,7 @@ func GetDomain(ctx context.Context, c client.Client) (string, error) { return domain, err } -func GetOperatorNamespace() (string, error) { +func getOperatorNamespace() (string, error) { operatorNS, exist := os.LookupEnv("OPERATOR_NAMESPACE") if exist && operatorNS != "" { return operatorNS, nil @@ -84,8 +137,6 @@ func GetClusterServiceVersion(ctx context.Context, c client.Client, namespace st gvk.ClusterServiceVersion.Kind) } -type Platform string - // detectSelfManaged detects if it is Self Managed Rhoai or OpenDataHub. func detectSelfManaged(ctx context.Context, cli client.Client) (Platform, error) { variants := map[string]Platform{ @@ -141,14 +192,7 @@ func getPlatform(ctx context.Context, cli client.Client) (Platform, error) { } } -// Release includes information on operator version and platform -// +kubebuilder:object:generate=true -type Release struct { - Name Platform `json:"name,omitempty"` - Version version.OperatorVersion `json:"version,omitempty"` -} - -func GetRelease(ctx context.Context, cli client.Client) (Release, error) { +func getRelease(ctx context.Context, cli client.Client) (Release, error) { initRelease := Release{ // dummy version set to name "", version 0.0.0 Version: version.OperatorVersion{ @@ -196,6 +240,7 @@ func IsDefaultAuthMethod(ctx context.Context, cli client.Client) (bool, error) { } return false, err } + // for now, HPC support "" "None" "IntegratedOAuth"(default) "OIDC" // other offering support "" "None" "IntegratedOAuth"(default) // we only create userGroups for "IntegratedOAuth" or "" and leave other or new supported type value in the future diff --git a/pkg/cluster/const.go b/pkg/cluster/const.go index 29226d75fb4..6a6562bff07 100644 --- a/pkg/cluster/const.go +++ b/pkg/cluster/const.go @@ -9,8 +9,10 @@ const ( OpenDataHub Platform = "Open Data Hub" // Unknown indicates that operator is not deployed using OLM. Unknown Platform = "" + // DefaultNotebooksNamespace defines default namespace for notebooks. DefaultNotebooksNamespace = "rhods-notebooks" + // Default cluster-scope Authentication CR name. ClusterAuthenticationObj = "cluster" ) diff --git a/pkg/cluster/gvk/gvk.go b/pkg/cluster/gvk/gvk.go index 0415304abe2..06a7f199699 100644 --- a/pkg/cluster/gvk/gvk.go +++ b/pkg/cluster/gvk/gvk.go @@ -1,6 +1,14 @@ package gvk -import "k8s.io/apimachinery/pkg/runtime/schema" +import ( + appsv1 "k8s.io/api/apps/v1" + coordinationv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" +) var ( ClusterServiceVersion = schema.GroupVersionKind{ @@ -21,11 +29,35 @@ var ( } Deployment = schema.GroupVersionKind{ - Group: "apps", - Version: "v1", + Group: appsv1.SchemeGroupVersion.Group, + Version: appsv1.SchemeGroupVersion.Version, Kind: "Deployment", } + ClusterRole = schema.GroupVersionKind{ + Group: "rbac.authorization.k8s.io", + Version: "v1", + Kind: "ClusterRole", + } + + RoleBinding = schema.GroupVersionKind{ + Group: rbacv1.SchemeGroupVersion.Group, + Version: rbacv1.SchemeGroupVersion.Version, + Kind: "RoleBinding", + } + + Secret = schema.GroupVersionKind{ + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, + Kind: "Secret", + } + + ConfigMap = schema.GroupVersionKind{ + Group: corev1.SchemeGroupVersion.Group, + Version: corev1.SchemeGroupVersion.Version, + Kind: "ConfigMap", + } + KnativeServing = schema.GroupVersionKind{ Group: "operator.knative.dev", Version: "v1beta1", @@ -56,6 +88,12 @@ var ( Kind: "OdhDocument", } + AcceleratorProfile = schema.GroupVersionKind{ + Group: "dashboard.opendatahub.io", + Version: "v1", + Kind: "AcceleratorProfile", + } + OdhQuickStart = schema.GroupVersionKind{ Group: "console.openshift.io", Version: "v1", @@ -67,4 +105,118 @@ var ( Version: "v1alpha", Kind: "OdhDashboardConfig", } + + Dashboard = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.DashboardKind, + } + + Workbenches = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.WorkbenchesKind, + } + + ModelController = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.ModelControllerKind, + } + + ModelMeshServing = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.ModelMeshServingKind, + } + + DataSciencePipelines = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.DataSciencePipelinesKind, + } + + Kserve = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.KserveKind, + } + + Kueue = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.KueueKind, + } + + CodeFlare = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.CodeFlareKind, + } + + Ray = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.RayKind, + } + + TrustyAI = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.TrustyAIKind, + } + + ModelRegistry = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.ModelRegistryKind, + } + + TrainingOperator = schema.GroupVersionKind{ + Group: componentApi.GroupVersion.Group, + Version: componentApi.GroupVersion.Version, + Kind: componentApi.TrainingOperatorKind, + } + + CustomResourceDefinition = schema.GroupVersionKind{ + Group: "apiextensions.k8s.io", + Version: "v1", + Kind: "CustomResourceDefinition", + } + + ServiceMeshMember = schema.GroupVersionKind{ + Group: "maistra.io", + Version: "v1", + Kind: "ServiceMeshMember", + } + + Lease = schema.GroupVersionKind{ + Group: coordinationv1.SchemeGroupVersion.Group, + Version: coordinationv1.SchemeGroupVersion.Version, + Kind: "Lease", + } + + EnvoyFilter = schema.GroupVersionKind{ + Group: "networking.istio.io", + Version: "v1alpha3", + Kind: "EnvoyFilter", + } + + AuthorizationPolicy = schema.GroupVersionKind{ + Group: "security.istio.io", + Version: "v1", + Kind: "AuthorizationPolicy", + } + + Gateway = schema.GroupVersionKind{ + Group: "networking.istio.io", + Version: "v1beta1", + Kind: "Gateway", + } + + Auth = schema.GroupVersionKind{ + Group: "services.platform.opendatahub.io", + Version: "v1alpha1", + Kind: "Auth", + } ) diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 00663de6057..9435e7b4df0 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -13,8 +13,8 @@ import ( k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" ) @@ -29,7 +29,7 @@ func UpdatePodSecurityRolebinding(ctx context.Context, cli client.Client, namesp for _, sa := range serviceAccountsList { // Append serviceAccount if not added already - if !subjectExistInRoleBinding(foundRoleBinding.Subjects, sa, namespace) { + if !SubjectExistInRoleBinding(foundRoleBinding.Subjects, sa, namespace) { foundRoleBinding.Subjects = append(foundRoleBinding.Subjects, rbacv1.Subject{ Kind: rbacv1.ServiceAccountKind, Name: sa, @@ -45,14 +45,14 @@ func UpdatePodSecurityRolebinding(ctx context.Context, cli client.Client, namesp return nil } -// Internal function used by UpdatePodSecurityRolebinding() -// Return whether Rolebinding matching service account and namespace exists or not. -func subjectExistInRoleBinding(subjectList []rbacv1.Subject, serviceAccountName, namespace string) bool { +// SubjectExistInRoleBinding return whether RoleBinding matching service account and namespace exists or not. +func SubjectExistInRoleBinding(subjectList []rbacv1.Subject, serviceAccountName, namespace string) bool { for _, subject := range subjectList { if subject.Name == serviceAccountName && subject.Namespace == namespace { return true } } + return false } @@ -71,7 +71,7 @@ func CreateSecret(ctx context.Context, cli client.Client, name, namespace string } foundSecret := &corev1.Secret{} - err := cli.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, foundSecret) + err := cli.Get(ctx, client.ObjectKeyFromObject(desiredSecret), foundSecret) if err != nil { if k8serr.IsNotFound(err) { err = cli.Create(ctx, desiredSecret) @@ -82,6 +82,7 @@ func CreateSecret(ctx context.Context, cli client.Client, name, namespace string return err } } + return nil } @@ -98,11 +99,7 @@ func CreateOrUpdateConfigMap(ctx context.Context, c client.Client, desiredCfgMap } existingCfgMap := &corev1.ConfigMap{} - err := c.Get(ctx, client.ObjectKey{ - Name: desiredCfgMap.Name, - Namespace: desiredCfgMap.Namespace, - }, existingCfgMap) - + err := c.Get(ctx, client.ObjectKeyFromObject(desiredCfgMap), existingCfgMap) if k8serr.IsNotFound(err) { return c.Create(ctx, desiredCfgMap) } else if err != nil { @@ -142,7 +139,7 @@ func CreateNamespace(ctx context.Context, cli client.Client, namespace string, m } foundNamespace := &corev1.Namespace{} - if getErr := cli.Get(ctx, client.ObjectKey{Name: namespace}, foundNamespace); client.IgnoreNotFound(getErr) != nil { + if getErr := cli.Get(ctx, client.ObjectKeyFromObject(desiredNamespace), foundNamespace); client.IgnoreNotFound(getErr) != nil { return nil, getErr } @@ -180,6 +177,7 @@ func ExecuteOnAllNamespaces(ctx context.Context, cli client.Client, processFunc // WaitForDeploymentAvailable to check if component deployment from 'namespace' is ready within 'timeout' before apply prometheus rules for the component. func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentName string, namespace string, interval int, timeout int) error { + log := logf.FromContext(ctx) resourceInterval := time.Duration(interval) * time.Second resourceTimeout := time.Duration(timeout) * time.Minute @@ -190,7 +188,7 @@ func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentN return false, fmt.Errorf("error fetching list of deployments: %w", err) } - ctrl.Log.Info("waiting for " + strconv.Itoa(len(componentDeploymentList.Items)) + " deployment to be ready for " + componentName) + log.Info("waiting for " + strconv.Itoa(len(componentDeploymentList.Items)) + " deployment to be ready for " + componentName) for _, deployment := range componentDeploymentList.Items { if deployment.Status.ReadyReplicas != deployment.Status.Replicas { return false, nil @@ -202,6 +200,7 @@ func WaitForDeploymentAvailable(ctx context.Context, c client.Client, componentN } func CreateWithRetry(ctx context.Context, cli client.Client, obj client.Object, timeoutMin int) error { + log := logf.FromContext(ctx) interval := time.Second * 5 // arbitrary value timeout := time.Duration(timeoutMin) * time.Minute @@ -229,7 +228,7 @@ func CreateWithRetry(ctx context.Context, cli client.Client, obj client.Object, // retry if 500, assume webhook is not available if k8serr.IsInternalError(errCreate) { - ctrl.Log.Info("Error creating object, retrying...", "reason", errCreate) + log.Info("Error creating object, retrying...", "reason", errCreate) return false, nil } diff --git a/pkg/cluster/roles.go b/pkg/cluster/roles.go index c989915aefe..96ccbae0eb4 100644 --- a/pkg/cluster/roles.go +++ b/pkg/cluster/roles.go @@ -23,7 +23,7 @@ func CreateOrUpdateClusterRole(ctx context.Context, cli client.Client, name stri } foundClusterRole := &rbacv1.ClusterRole{} - err := cli.Get(ctx, client.ObjectKey{Name: desiredClusterRole.GetName()}, foundClusterRole) + err := cli.Get(ctx, client.ObjectKeyFromObject(desiredClusterRole), foundClusterRole) if k8serr.IsNotFound(err) { return desiredClusterRole, cli.Create(ctx, desiredClusterRole) } @@ -63,7 +63,7 @@ func CreateOrUpdateClusterRoleBinding(ctx context.Context, cli client.Client, na } foundClusterRoleBinding := &rbacv1.ClusterRoleBinding{} - err := cli.Get(ctx, client.ObjectKey{Name: desiredClusterRoleBinding.GetName()}, foundClusterRoleBinding) + err := cli.Get(ctx, client.ObjectKeyFromObject(desiredClusterRoleBinding), foundClusterRoleBinding) if k8serr.IsNotFound(err) { return desiredClusterRoleBinding, cli.Create(ctx, desiredClusterRoleBinding) } diff --git a/pkg/common/common.go b/pkg/common/common.go index eb8a154426e..815308e4e33 100644 --- a/pkg/common/common.go +++ b/pkg/common/common.go @@ -24,6 +24,7 @@ import ( "fmt" "os" "regexp" + "slices" "strings" ) @@ -65,6 +66,7 @@ func MatchLineInFile(fileName string, replacements map[string]string) error { if err != nil { return fmt.Errorf("failed to write to file: %w", err) } + return nil } @@ -92,6 +94,7 @@ func TrimToRFC1123Name(input string) string { func isAlphanumeric(char byte) bool { regex := regexp.MustCompile(`^[A-Za-z0-9]$`) + return regex.Match([]byte{char}) } @@ -114,3 +117,21 @@ func GetMonitoringData(data string) (string, error) { return encodedData, nil } + +func sliceAddMissing(s *[]string, e string) int { + e = strings.TrimSpace(e) + if slices.Contains(*s, e) { + return 0 + } + *s = append(*s, e) + return 1 +} + +// adds elements of comma separated list. +func AddMissing(s *[]string, list string) int { + added := 0 + for _, e := range strings.Split(list, ",") { + added += sliceAddMissing(s, e) + } + return added +} diff --git a/pkg/componentsregistry/componentsregistry.go b/pkg/componentsregistry/componentsregistry.go new file mode 100644 index 00000000000..cd9c07b6402 --- /dev/null +++ b/pkg/componentsregistry/componentsregistry.go @@ -0,0 +1,56 @@ +// componentsregistry package is a registry of all components that can be managed by the operator +// TODO: it may make sense to put it under components/ when it's clear from the old stuff +package componentsregistry + +import ( + "context" + + "github.com/hashicorp/go-multierror" + operatorv1 "github.com/openshift/api/operator/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" +) + +// ComponentHandler is an interface to manage a component +// Every method should accept ctx since it contains the logger. +type ComponentHandler interface { + Init(platform cluster.Platform) error + // GetName and GetManagementState sound like pretty much the same across + // all components, but I could not find a way to avoid it + GetName() string + GetManagementState(dsc *dscv1.DataScienceCluster) operatorv1.ManagementState + // NewCRObject constructs components specific Custom Resource + // e.g. Dashboard in datasciencecluster.opendatahub.io group + // It returns interface, but it simplifies DSC reconciler code a lot + NewCRObject(dsc *dscv1.DataScienceCluster) common.PlatformObject + NewComponentReconciler(ctx context.Context, mgr ctrl.Manager) error + // UpdateDSCStatus updates the component specific status part of the DSC + UpdateDSCStatus(dsc *dscv1.DataScienceCluster, obj client.Object) error +} + +var registry = []ComponentHandler{} + +// Add registers a new component handler +// not thread safe, supposed to be called during init. +// TODO: check if init() can be called in parallel. +func Add(ch ComponentHandler) { + registry = append(registry, ch) +} + +// ForEach iterates over all registered component handlers +// With go1.23 probably https://go.dev/blog/range-functions can be used. +func ForEach(f func(ch ComponentHandler) error) error { + var errs *multierror.Error + for _, ch := range registry { + errs = multierror.Append(errs, f(ch)) + } + return errs.ErrorOrNil() +} + +func IsManaged(ch ComponentHandler, dsc *dscv1.DataScienceCluster) bool { + return ch.GetManagementState(dsc) == operatorv1.Managed +} diff --git a/pkg/controller/actions/actions.go b/pkg/controller/actions/actions.go new file mode 100644 index 00000000000..2d469f9085c --- /dev/null +++ b/pkg/controller/actions/actions.go @@ -0,0 +1,24 @@ +package actions + +import ( + "context" + "reflect" + "runtime" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" +) + +// +// Common +// + +const ( + ActionGroup = "action" +) + +type Fn func(ctx context.Context, rr *types.ReconciliationRequest) error + +func (f Fn) String() string { + fn := runtime.FuncForPC(reflect.ValueOf(f).Pointer()) + return fn.Name() +} diff --git a/pkg/controller/actions/deleteresource/action_delete_resources.go b/pkg/controller/actions/deleteresource/action_delete_resources.go new file mode 100644 index 00000000000..5ded47a01c6 --- /dev/null +++ b/pkg/controller/actions/deleteresource/action_delete_resources.go @@ -0,0 +1,76 @@ +package deleteresource + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" +) + +type Action struct { + types []client.Object + labels map[string]string +} + +type ActionOpts func(*Action) + +func WithDeleteResourcesTypes(values ...client.Object) ActionOpts { + return func(action *Action) { + action.types = append(action.types, values...) + } +} + +func WithDeleteResourcesLabel(k string, v string) ActionOpts { + return func(action *Action) { + action.labels[k] = v + } +} + +func WithDeleteResourcesLabels(values map[string]string) ActionOpts { + return func(action *Action) { + for k, v := range values { + action.labels[k] = v + } + } +} + +func (r *Action) run(ctx context.Context, rr *types.ReconciliationRequest) error { + for i := range r.types { + opts := make([]client.DeleteAllOfOption, 0) + + if len(r.labels) > 0 { + opts = append(opts, client.MatchingLabels(r.labels)) + } + + namespaced, err := rr.Client.IsObjectNamespaced(r.types[i]) + if err != nil { + return err + } + + if namespaced { + opts = append(opts, client.InNamespace(rr.DSCI.Spec.ApplicationsNamespace)) + } + + err = rr.Client.DeleteAllOf(ctx, r.types[i], opts...) + if err != nil { + return err + } + } + + return nil +} + +func NewAction(opts ...ActionOpts) actions.Fn { + action := Action{ + types: make([]client.Object, 0), + labels: map[string]string{}, + } + + for _, opt := range opts { + opt(&action) + } + + return action.run +} diff --git a/pkg/controller/actions/deleteresource/action_delete_resources_test.go b/pkg/controller/actions/deleteresource/action_delete_resources_test.go new file mode 100644 index 00000000000..ecf6b540e04 --- /dev/null +++ b/pkg/controller/actions/deleteresource/action_delete_resources_test.go @@ -0,0 +1,85 @@ +package deleteresource_test + +import ( + "context" + "testing" + + "github.com/onsi/gomega/gstruct" + "github.com/rs/xid" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deleteresource" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/fakeclient" + + . "github.com/onsi/gomega" +) + +func TestDeleteResourcesAction(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + + cl, err := fakeclient.New( + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.Deployment.GroupVersion().String(), + Kind: gvk.Deployment.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment", + Namespace: ns, + Labels: map[string]string{ + labels.K8SCommon.PartOf: "foo", + }, + }, + }, + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.Deployment.GroupVersion().String(), + Kind: gvk.Deployment.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment-2", + Namespace: ns, + Labels: map[string]string{ + labels.K8SCommon.PartOf: "baz", + }, + }, + }, + ) + + g.Expect(err).ShouldNot(HaveOccurred()) + + action := deleteresource.NewAction( + deleteresource.WithDeleteResourcesTypes(&appsv1.Deployment{}), + deleteresource.WithDeleteResourcesLabel(labels.K8SCommon.PartOf, "foo")) + + err = action(ctx, &types.ReconciliationRequest{ + Client: cl, + Instance: nil, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ApplicationsNamespace: ns}}, + Release: cluster.Release{Name: cluster.OpenDataHub}, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + deployments := appsv1.DeploymentList{} + err = cl.List(ctx, &deployments) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(deployments.Items).Should(HaveLen(1)) + g.Expect(deployments.Items[0]).To( + gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "ObjectMeta": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Name": Equal("my-deployment-2"), + }), + }), + ) +} diff --git a/pkg/controller/actions/deploy/action_deploy.go b/pkg/controller/actions/deploy/action_deploy.go new file mode 100644 index 00000000000..bb419f30f5c --- /dev/null +++ b/pkg/controller/actions/deploy/action_deploy.go @@ -0,0 +1,470 @@ +package deploy + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "strings" + + k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" + odhClient "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" + odhTypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +type Mode string + +const ( + ModePatch Mode = "patch" + ModeSSA Mode = "ssa" + + PlatformFieldOwner = "platform.opendatahub.io" +) + +// Action deploys the resources that are included in the ReconciliationRequest using +// the same create or patch machinery implemented as part of deploy.DeployManifestsFromPath. +type Action struct { + fieldOwner string + deployMode Mode + labels map[string]string + annotations map[string]string + cache *Cache +} + +type ActionOpts func(*Action) + +func WithFieldOwner(value string) ActionOpts { + return func(action *Action) { + action.fieldOwner = value + } +} + +func WithMode(value Mode) ActionOpts { + return func(action *Action) { + action.deployMode = value + } +} + +func WithLabel(name string, value string) ActionOpts { + return func(action *Action) { + if action.labels == nil { + action.labels = map[string]string{} + } + + action.labels[name] = value + } +} + +func WithLabels(values map[string]string) ActionOpts { + return func(action *Action) { + if action.labels == nil { + action.labels = map[string]string{} + } + + for k, v := range values { + action.labels[k] = v + } + } +} + +func WithAnnotation(name string, value string) ActionOpts { + return func(action *Action) { + if action.annotations == nil { + action.annotations = map[string]string{} + } + + action.annotations[name] = value + } +} + +func WithAnnotations(values map[string]string) ActionOpts { + return func(action *Action) { + if action.annotations == nil { + action.annotations = map[string]string{} + } + + for k, v := range values { + action.annotations[k] = v + } + } +} + +func WithCache(opts ...CacheOpt) ActionOpts { + return func(action *Action) { + action.cache = newCache(opts...) + } +} + +func (a *Action) run(ctx context.Context, rr *odhTypes.ReconciliationRequest) error { + // cleanup old entries if needed + if a.cache != nil { + a.cache.Sync() + } + + kind, err := resources.KindForObject(rr.Client.Scheme(), rr.Instance) + if err != nil { + return err + } + + controllerName := strings.ToLower(kind) + + for i := range rr.Resources { + res := rr.Resources[i] + current := resources.GvkToUnstructured(res.GroupVersionKind()) + + lookupErr := rr.Client.Get(ctx, client.ObjectKeyFromObject(&res), current) + switch { + case k8serr.IsNotFound(lookupErr): + // set it to nil fto pass it down to other methods and signal + // that there's no previous known state of the resource + current = nil + case lookupErr != nil: + return fmt.Errorf("failed to lookup object %s/%s: %w", res.GetNamespace(), res.GetName(), lookupErr) + default: + // Remove the DSC and DSCI owner reference if set, This is required during the + // transition from the old to the new operator. + if err := removeOwnerReferences(ctx, rr.Client, current, isLegacyOwnerRef); err != nil { + return err + } + + // the user has explicitly marked the current object as not owned by the operator, so + // skip any further processing + if resources.GetAnnotation(current, annotations.ManagedByODHOperator) == "false" { + continue + } + } + + var ok bool + var err error + + switch rr.Resources[i].GroupVersionKind() { + case gvk.CustomResourceDefinition: + ok, err = a.deployCRD(ctx, rr, res, current) + default: + ok, err = a.deploy(ctx, rr, res, current) + } + + if err != nil { + return fmt.Errorf("failure deploying resource %s: %w", res, err) + } + + if ok { + DeployedResourcesTotal.WithLabelValues(controllerName).Inc() + } + } + + return nil +} + +func (a *Action) deployCRD( + ctx context.Context, + rr *odhTypes.ReconciliationRequest, + obj unstructured.Unstructured, + current *unstructured.Unstructured, +) (bool, error) { + resources.SetLabels(&obj, a.labels) + resources.SetAnnotations(&obj, a.annotations) + resources.SetLabel(&obj, labels.PlatformPartOf, labels.Platform) + + // backup copy for caching + origObj := obj.DeepCopy() + + if a.cache != nil { + cached, err := a.cache.Has(current, &obj) + if err != nil { + return false, fmt.Errorf("failed to check cache for object: %w", err) + } + if cached { + // no changes, no need to re-deploy it + return false, nil + } + } + + var deployedObj *unstructured.Unstructured + var err error + + ops := []client.PatchOption{ + client.ForceOwnership, + // Since CRDs are not bound to a component, set the field + // owner to the platform itself + client.FieldOwner(PlatformFieldOwner), + } + + switch a.deployMode { + case ModePatch: + deployedObj, err = a.patch(ctx, rr.Client, &obj, current, ops...) + case ModeSSA: + deployedObj, err = a.apply(ctx, rr.Client, &obj, current, ops...) + default: + err = fmt.Errorf("unsupported deploy mode %s", a.deployMode) + } + + if err != nil { + return false, client.IgnoreNotFound(err) + } + + if a.cache != nil { + err := a.cache.Add(deployedObj, origObj) + if err != nil { + return false, fmt.Errorf("failed to cache object: %w", err) + } + } + + return true, nil +} + +func (a *Action) deploy( + ctx context.Context, + rr *odhTypes.ReconciliationRequest, + obj unstructured.Unstructured, + current *unstructured.Unstructured, +) (bool, error) { + fo := a.fieldOwner + if fo == "" { + kind, err := resources.KindForObject(rr.Client.Scheme(), rr.Instance) + if err != nil { + return false, err + } + + fo = strings.ToLower(kind) + } + + resources.SetLabels(&obj, a.labels) + resources.SetAnnotations(&obj, a.annotations) + resources.SetAnnotation(&obj, annotations.InstanceGeneration, strconv.FormatInt(rr.Instance.GetGeneration(), 10)) + resources.SetAnnotation(&obj, annotations.InstanceName, rr.Instance.GetName()) + resources.SetAnnotation(&obj, annotations.InstanceUID, string(rr.Instance.GetUID())) + resources.SetAnnotation(&obj, annotations.PlatformType, string(rr.Release.Name)) + resources.SetAnnotation(&obj, annotations.PlatformVersion, rr.Release.Version.String()) + + if resources.GetLabel(&obj, labels.PlatformPartOf) == "" && fo != "" { + resources.SetLabel(&obj, labels.PlatformPartOf, fo) + } + + // backup copy for caching + origObj := obj.DeepCopy() + + if a.cache != nil { + cached, err := a.cache.Has(current, &obj) + if err != nil { + return false, fmt.Errorf("failed to check cache for object: %w", err) + } + if cached { + // no changes, no need to re-deploy it + return false, nil + } + } + + var deployedObj *unstructured.Unstructured + var err error + + switch { + // The object is explicitly marked as not owned by the operator in the manifests, + // so it should be created if it doesn't exist, but should not be modified afterward. + case resources.GetAnnotation(&obj, annotations.ManagedByODHOperator) == "false": + // remove the opendatahub.io/managed as it should not be set + // to the actual object in this case + resources.RemoveAnnotation(&obj, annotations.ManagedByODHOperator) + + deployedObj, err = a.create(ctx, rr.Client, &obj) + if err != nil && !k8serr.IsAlreadyExists(err) { + return false, err + } + + default: + owned := rr.Manager.Owns(obj.GroupVersionKind()) + if owned { + if err := ctrl.SetControllerReference(rr.Instance, &obj, rr.Client.Scheme()); err != nil { + return false, err + } + } + + ops := []client.PatchOption{ + client.ForceOwnership, + client.FieldOwner(fo), + } + + switch a.deployMode { + case ModePatch: + deployedObj, err = a.patch(ctx, rr.Client, &obj, current, ops...) + case ModeSSA: + deployedObj, err = a.apply(ctx, rr.Client, &obj, current, ops...) + default: + err = fmt.Errorf("unsupported deploy mode %s", a.deployMode) + } + + if err != nil { + return false, client.IgnoreNotFound(err) + } + } + + if a.cache != nil { + err := a.cache.Add(deployedObj, origObj) + if err != nil { + return false, fmt.Errorf("failed to cache object: %w", err) + } + } + + return true, nil +} + +func (a *Action) create( + ctx context.Context, + c *odhClient.Client, + obj *unstructured.Unstructured, +) (*unstructured.Unstructured, error) { + logf.FromContext(ctx).V(3).Info("create", + "gvk", obj.GroupVersionKind(), + "name", client.ObjectKeyFromObject(obj), + ) + + err := c.Create(ctx, obj) + if err != nil { + return obj, err + } + + return obj, nil +} + +func (a *Action) patch( + ctx context.Context, + c *odhClient.Client, + obj *unstructured.Unstructured, + old *unstructured.Unstructured, + opts ...client.PatchOption, +) (*unstructured.Unstructured, error) { + logf.FromContext(ctx).V(3).Info("patch", + "gvk", obj.GroupVersionKind(), + "name", client.ObjectKeyFromObject(obj), + ) + + switch obj.GroupVersionKind() { + case gvk.Deployment: + // For deployments, we allow the user to change some parameters, such as + // container resources and replicas except: + // - If the resource does not exist (the resource must be created) + // - If the resource is forcefully marked as managed by the operator via + // annotations (i.e. to bring it back to the default values) + if old == nil || resources.GetAnnotation(old, annotations.ManagedByODHOperator) == "true" { + break + } + + // To preserve backward compatibility with the current model, fields are being + // removed, hence not included in the final PATCH. Ideally with should leverage + // Server-Side Apply. + // + // Ideally deployed resources should be configured only via the platform API + if err := RemoveDeploymentsResources(obj); err != nil { + return nil, fmt.Errorf("failed to apply allow list to Deployment %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) + } + default: + // do nothing + break + } + + if old == nil { + err := c.Create(ctx, obj) + if err != nil { + return nil, fmt.Errorf("failed to create object %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) + } + } else { + data, err := json.Marshal(obj) + if err != nil { + return nil, err + } + + err = c.Patch( + ctx, + old, + client.RawPatch(types.ApplyPatchType, data), + opts..., + ) + + if err != nil { + return nil, fmt.Errorf("failed to patch object %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) + } + } + + return old, nil +} + +func (a *Action) apply( + ctx context.Context, + c *odhClient.Client, + obj *unstructured.Unstructured, + old *unstructured.Unstructured, + opts ...client.PatchOption, +) (*unstructured.Unstructured, error) { + logf.FromContext(ctx).V(3).Info("apply", + "gvk", obj.GroupVersionKind(), + "name", client.ObjectKeyFromObject(obj), + ) + + switch obj.GroupVersionKind() { + case gvk.Deployment: + // For deployments, we allow the user to change some parameters, such as + // container resources and replicas except: + // - If the resource does not exist (the resource must be created) + // - If the resource is forcefully marked as managed by the operator via + // annotations (i.e. to bring it back to the default values) + if old == nil || resources.GetAnnotation(old, annotations.ManagedByODHOperator) == "true" { + break + } + + // To preserve backward compatibility with the current model, fields are being + // merged from an existing Deployment (if it exists) to the rendered manifest, + // hence the current value is preserved [1]. + // + // Ideally deployed resources should be configured only via the platform API + // + // [1] https://kubernetes.io/docs/reference/using-api/server-side-apply/#conflicts + if err := MergeDeployments(old, obj); err != nil { + return nil, fmt.Errorf("failed to merge Deployment %s/%s: %w", obj.GetNamespace(), obj.GetName(), err) + } + case gvk.ClusterRole: + // For ClusterRole, if AggregationRule is set, then the Rules are controller managed + // and direct changes to Rules will be stomped by the controller. This also happen if + // the rules are set to an empty slice or nil hence we are removing the rules field + // if the ClusterRole is set to be an aggregation role. + _, found, err := unstructured.NestedFieldNoCopy(obj.Object, "aggregationRule") + if err != nil { + return nil, err + } + if found { + unstructured.RemoveNestedField(obj.Object, "rules") + } + default: + // do nothing + break + } + + err := c.Apply(ctx, obj, opts...) + if err != nil { + return nil, fmt.Errorf("apply failed %s: %w", obj.GroupVersionKind(), err) + } + + return obj, nil +} + +func NewAction(opts ...ActionOpts) actions.Fn { + action := Action{ + deployMode: ModeSSA, + } + + for _, opt := range opts { + opt(&action) + } + + return action.run +} diff --git a/pkg/controller/actions/deploy/action_deploy_cache.go b/pkg/controller/actions/deploy/action_deploy_cache.go new file mode 100644 index 00000000000..10ce3d3da18 --- /dev/null +++ b/pkg/controller/actions/deploy/action_deploy_cache.go @@ -0,0 +1,117 @@ +package deploy + +import ( + "encoding/base64" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +// This code is heavily inspired by https://github.com/kubernetes-sigs/cluster-api/tree/main/internal/util/ssa + +const ( + DefaultCacheTTL = 10 * time.Minute +) + +type Cache struct { + s cache.Store + ttl time.Duration +} + +type CacheOpt func(*Cache) + +func WithTTL(ttl time.Duration) CacheOpt { + return func(c *Cache) { + c.ttl = ttl + } +} + +func newCache(opts ...CacheOpt) *Cache { + c := Cache{ + ttl: DefaultCacheTTL, + } + + for _, opt := range opts { + opt(&c) + } + + c.s = cache.NewTTLStore( + func(obj interface{}) (string, error) { + s, ok := obj.(string) + if !ok { + return "", errors.New("failed to cast object to string") + } + + return s, nil + }, + c.ttl, + ) + + return &c +} + +func (r *Cache) Add(original *unstructured.Unstructured, modified *unstructured.Unstructured) error { + if original == nil || modified == nil { + return errors.New("invalid input") + } + + key, err := r.computeCacheKey(original, modified) + if err != nil { + return fmt.Errorf("failed to compute cacheKey: %w", err) + } + + if key == "" { + return nil + } + + _ = r.s.Add(key) + + return nil +} + +func (r *Cache) Has(original *unstructured.Unstructured, modified *unstructured.Unstructured) (bool, error) { + if original == nil || modified == nil { + return false, nil + } + + key, err := r.computeCacheKey(original, modified) + if err != nil { + return false, fmt.Errorf("failed to compute cacheKey: %w", err) + } + + if key == "" { + return false, nil + } + + _, exists, _ := r.s.GetByKey(key) + + return exists, nil +} + +func (r *Cache) Sync() { + r.s.List() +} + +func (r *Cache) computeCacheKey( + original *unstructured.Unstructured, + modified *unstructured.Unstructured, +) (string, error) { + modifiedObjectHash, err := resources.Hash(modified) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s.%s.%s.%s.%s", + original.GroupVersionKind().GroupVersion(), + original.GroupVersionKind().Kind, + klog.KObj(original), + original.GetResourceVersion(), + base64.RawURLEncoding.EncodeToString(modifiedObjectHash), + ), nil +} diff --git a/pkg/controller/actions/deploy/action_deploy_cache_test.go b/pkg/controller/actions/deploy/action_deploy_cache_test.go new file mode 100644 index 00000000000..4ee26b6b31d --- /dev/null +++ b/pkg/controller/actions/deploy/action_deploy_cache_test.go @@ -0,0 +1,261 @@ +package deploy_test + +import ( + "context" + "path/filepath" + "testing" + "time" + + "github.com/blang/semver/v4" + "github.com/operator-framework/api/pkg/lib/version" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/rs/xid" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" + + . "github.com/onsi/gomega" +) + +func TestDeployWithCacheAction(t *testing.T) { + g := NewWithT(t) + s := runtime.NewScheme() + + utilruntime.Must(corev1.AddToScheme(s)) + utilruntime.Must(appsv1.AddToScheme(s)) + utilruntime.Must(apiextensionsv1.AddToScheme(s)) + utilruntime.Must(componentApi.AddToScheme(s)) + + projectDir, err := envtestutil.FindProjectRoot() + g.Expect(err).NotTo(HaveOccurred()) + + envTest := &envtest.Environment{ + CRDInstallOptions: envtest.CRDInstallOptions{ + Scheme: s, + Paths: []string{ + filepath.Join(projectDir, "config", "crd", "bases"), + }, + ErrorIfPathMissing: true, + CleanUpAfterUse: false, + }, + } + + t.Cleanup(func() { + _ = envTest.Stop() + }) + + cfg, err := envTest.Start() + g.Expect(err).NotTo(HaveOccurred()) + + envTestClient, err := ctrlCli.New(cfg, ctrlCli.Options{Scheme: s}) + g.Expect(err).NotTo(HaveOccurred()) + + cli, err := client.NewFromConfig(cfg, envTestClient) + g.Expect(err).NotTo(HaveOccurred()) + + t.Run("ExistingResource", func(t *testing.T) { + testResourceNotReDeployed( + t, + cli, + &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.ConfigMap.GroupVersion().String(), + Kind: gvk.ConfigMap.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: xid.New().String(), + Namespace: xid.New().String(), + }, + }, + true) + }) + + t.Run("NonExistingResource", func(t *testing.T) { + testResourceNotReDeployed( + t, + cli, + &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.ConfigMap.GroupVersion().String(), + Kind: gvk.ConfigMap.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: xid.New().String(), + Namespace: xid.New().String(), + }, + }, + false) + }) + + t.Run("CacheTTL", func(t *testing.T) { + testCacheTTL( + t, + cli, + &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.ConfigMap.GroupVersion().String(), + Kind: gvk.ConfigMap.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: xid.New().String(), + Namespace: xid.New().String(), + }, + }) + }) +} + +func testResourceNotReDeployed(t *testing.T, cli *client.Client, obj ctrlCli.Object, create bool) { + t.Helper() + + g := NewWithT(t) + ctx := context.Background() + + in, err := resources.ToUnstructured(obj) + g.Expect(err).ShouldNot(HaveOccurred()) + + err = cli.Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: in.GetNamespace(), + }, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + if create { + err = cli.Create(ctx, in.DeepCopy()) + g.Expect(err).ShouldNot(HaveOccurred()) + } + + rr := types.ReconciliationRequest{ + Client: cli, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: in.GetNamespace()}, + }, + Instance: &componentApi.Dashboard{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }, + Release: cluster.Release{ + Name: cluster.OpenDataHub, + Version: version.OperatorVersion{Version: semver.Version{ + Major: 1, Minor: 2, Patch: 3, + }}}, + Resources: []unstructured.Unstructured{ + *in.DeepCopy(), + }, + } + + action := deploy.NewAction( + deploy.WithCache(), + deploy.WithMode(deploy.ModeSSA), + deploy.WithFieldOwner(xid.New().String()), + ) + + deploy.DeployedResourcesTotal.Reset() + + // Resource should be created if missing + err = action(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(testutil.ToFloat64(deploy.DeployedResourcesTotal)).Should(Equal(float64(1))) + + out1 := unstructured.Unstructured{} + out1.SetGroupVersionKind(in.GroupVersionKind()) + + err = cli.Get(ctx, ctrlCli.ObjectKeyFromObject(in), &out1) + g.Expect(err).ShouldNot(HaveOccurred()) + + // Resource should not be re-deployed + err = action(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(testutil.ToFloat64(deploy.DeployedResourcesTotal)).Should(Equal(float64(1))) + + out2 := unstructured.Unstructured{} + out2.SetGroupVersionKind(in.GroupVersionKind()) + + err = cli.Get(ctx, ctrlCli.ObjectKeyFromObject(in), &out2) + g.Expect(err).ShouldNot(HaveOccurred()) + + // check that the resource version has not changed + g.Expect(out1.GetResourceVersion()).Should(Equal(out2.GetResourceVersion())) +} + +func testCacheTTL(t *testing.T, cli *client.Client, obj ctrlCli.Object) { + t.Helper() + + g := NewWithT(t) + ctx := context.Background() + + in, err := resources.ToUnstructured(obj) + g.Expect(err).ShouldNot(HaveOccurred()) + + err = cli.Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: in.GetNamespace(), + }, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + rr := types.ReconciliationRequest{ + Client: cli, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: in.GetNamespace()}, + }, + Instance: &componentApi.Dashboard{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }, + Release: cluster.Release{ + Name: cluster.OpenDataHub, + Version: version.OperatorVersion{Version: semver.Version{ + Major: 1, Minor: 2, Patch: 3, + }}}, + Resources: []unstructured.Unstructured{ + *in.DeepCopy(), + }, + } + + ttl := 1 * time.Second + + action := deploy.NewAction( + deploy.WithCache(deploy.WithTTL(ttl)), + deploy.WithMode(deploy.ModeSSA), + deploy.WithFieldOwner(xid.New().String()), + ) + + deploy.DeployedResourcesTotal.Reset() + + err = action(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(testutil.ToFloat64(deploy.DeployedResourcesTotal)).Should(BeNumerically("==", 1)) + + g.Eventually(func() (float64, error) { + if err := action(ctx, &rr); err != nil { + return 0, err + } + + return testutil.ToFloat64(deploy.DeployedResourcesTotal), nil + }).WithTimeout(5 * ttl).WithPolling(2 * ttl).Should( + BeNumerically("==", 2), + ) +} diff --git a/pkg/controller/actions/deploy/action_deploy_merge_deployment.go b/pkg/controller/actions/deploy/action_deploy_merge_deployment.go new file mode 100644 index 00000000000..55e4795c46d --- /dev/null +++ b/pkg/controller/actions/deploy/action_deploy_merge_deployment.go @@ -0,0 +1,108 @@ +package deploy + +import ( + "errors" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func MergeDeployments(source *unstructured.Unstructured, target *unstructured.Unstructured) error { + containersPath := []string{"spec", "template", "spec", "containers"} + replicasPath := []string{"spec", "replicas"} + + // + // Resources + // + + sc, ok, err := unstructured.NestedFieldNoCopy(source.Object, containersPath...) + if err != nil && ok { + return err + } + tc, ok, err := unstructured.NestedFieldNoCopy(target.Object, containersPath...) + if err != nil && ok { + return err + } + + resources := make(map[string]interface{}) + + var sourceContainers []interface{} + if sc != nil { + sourceContainers, ok = sc.([]interface{}) + if !ok { + return errors.New("field is not a slice") + } + } + + var targetContainers []interface{} + if tc != nil { + targetContainers, ok = tc.([]interface{}) + if !ok { + return errors.New("field is not a slice") + } + } + + for i := range sourceContainers { + m, ok := sourceContainers[i].(map[string]interface{}) + if !ok { + return errors.New("field is not a map") + } + + name, ok := m["name"] + if !ok { + // can't deal with unnamed containers + continue + } + + r, ok := m["resources"] + if !ok { + r = make(map[string]interface{}) + } + + //nolint:forcetypeassert,errcheck + resources[name.(string)] = r + } + + for i := range targetContainers { + m, ok := targetContainers[i].(map[string]interface{}) + if !ok { + return errors.New("field is not a map") + } + + name, ok := m["name"] + if !ok { + // can't deal with unnamed containers + continue + } + + //nolint:errcheck + nr, ok := resources[name.(string)] + if !ok { + continue + } + + //nolint:forcetypeassert,errcheck + if len(nr.(map[string]interface{})) == 0 { + delete(m, "resources") + } else { + m["resources"] = nr + } + } + + // + // Replicas + // + + sourceReplica, ok, err := unstructured.NestedFieldNoCopy(source.Object, replicasPath...) + if err != nil { + return err + } + if !ok { + unstructured.RemoveNestedField(target.Object, replicasPath...) + } else { + if err := unstructured.SetNestedField(target.Object, sourceReplica, replicasPath...); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/controller/actions/deploy/action_deploy_merge_deployment_test.go b/pkg/controller/actions/deploy/action_deploy_merge_deployment_test.go new file mode 100644 index 00000000000..dbd68768ebe --- /dev/null +++ b/pkg/controller/actions/deploy/action_deploy_merge_deployment_test.go @@ -0,0 +1,145 @@ +package deploy_test + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +func TestMergeDeploymentsOverride(t *testing.T) { + g := NewWithT(t) + + source, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](1), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("3Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + }, + }, + }, + }, + }, + }, + }) + g.Expect(err).ShouldNot(HaveOccurred()) + + target, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](3), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }, + }, + }, + }, + }, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + src := unstructured.Unstructured{Object: source} + trg := unstructured.Unstructured{Object: target} + + err = deploy.MergeDeployments(&src, &trg) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(trg).Should(And( + jq.Match(`.spec.replicas == 1`), + jq.Match(`.spec.template.spec.containers[0].resources.requests.cpu == "3"`), + jq.Match(`.spec.template.spec.containers[0].resources.requests.memory == "3Gi"`), + jq.Match(`.spec.template.spec.containers[0].resources.limits.cpu == "4"`), + jq.Match(`.spec.template.spec.containers[0].resources.limits.memory == "4Gi"`), + )) +} + +func TestMergeDeploymentsRemove(t *testing.T) { + g := NewWithT(t) + + source, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + }, + }, + }, + }, + }, + }) + g.Expect(err).ShouldNot(HaveOccurred()) + + target, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](3), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }, + }, + }, + }, + }, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + src := unstructured.Unstructured{Object: source} + trg := unstructured.Unstructured{Object: target} + + err = deploy.MergeDeployments(&src, &trg) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(trg).Should(And( + jq.Match(`.spec | has("replicas") | not`), + jq.Match(`.spec.template.spec.containers[0] | has("resources") | not`), + )) +} diff --git a/pkg/controller/actions/deploy/action_deploy_metrics.go b/pkg/controller/actions/deploy/action_deploy_metrics.go new file mode 100644 index 00000000000..0d5bce938a4 --- /dev/null +++ b/pkg/controller/actions/deploy/action_deploy_metrics.go @@ -0,0 +1,29 @@ +package deploy + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // DeployedResourcesTotal is a prometheus counter metrics which holds the total + // number of resource deployed by the action per controller. It has one label. + // controller label refers to the controller name. + DeployedResourcesTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "action_deploy_resources_total", + Help: "Number of deployed resources", + }, + []string{ + "controller", + }, + ) +) + +// init register metrics to the global registry from controller-runtime/pkg/metrics. +// see https://book.kubebuilder.io/reference/metrics#publishing-additional-metrics +// +//nolint:gochecknoinits +func init() { + metrics.Registry.MustRegister(DeployedResourcesTotal) +} diff --git a/pkg/controller/actions/deploy/action_deploy_remove_deployment_resources.go b/pkg/controller/actions/deploy/action_deploy_remove_deployment_resources.go new file mode 100644 index 00000000000..44ca5e8e5c5 --- /dev/null +++ b/pkg/controller/actions/deploy/action_deploy_remove_deployment_resources.go @@ -0,0 +1,46 @@ +package deploy + +import ( + "errors" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func RemoveDeploymentsResources(obj *unstructured.Unstructured) error { + containersPath := []string{"spec", "template", "spec", "containers"} + replicasPath := []string{"spec", "replicas"} + + // + // Resources + // + + sc, ok, err := unstructured.NestedFieldNoCopy(obj.Object, containersPath...) + if err != nil && ok { + return err + } + + var sourceContainers []interface{} + if sc != nil { + sourceContainers, ok = sc.([]interface{}) + if !ok { + return errors.New("field is not a slice") + } + } + + for i := range sourceContainers { + m, ok := sourceContainers[i].(map[string]interface{}) + if !ok { + return errors.New("field is not a map") + } + + delete(m, "resources") + } + + // + // Replicas + // + + unstructured.RemoveNestedField(obj.Object, replicasPath...) + + return nil +} diff --git a/pkg/controller/actions/deploy/action_deploy_remove_deployment_resources_test.go b/pkg/controller/actions/deploy/action_deploy_remove_deployment_resources_test.go new file mode 100644 index 00000000000..170bf850a7b --- /dev/null +++ b/pkg/controller/actions/deploy/action_deploy_remove_deployment_resources_test.go @@ -0,0 +1,57 @@ +package deploy_test + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +func TestMRemoveDeploymentsResources(t *testing.T) { + g := NewWithT(t) + + source, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&appsv1.Deployment{ + Spec: appsv1.DeploymentSpec{ + Replicas: ptr.To[int32](1), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("3Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + }, + }, + }, + }, + }, + }, + }) + g.Expect(err).ShouldNot(HaveOccurred()) + + src := unstructured.Unstructured{Object: source} + + err = deploy.RemoveDeploymentsResources(&src) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(src).Should(And( + jq.Match(`.spec | has("replicas") | not`), + jq.Match(`.spec.template.spec.containers[0] | has("resources") | not`), + )) +} diff --git a/pkg/controller/actions/deploy/action_deploy_support.go b/pkg/controller/actions/deploy/action_deploy_support.go new file mode 100644 index 00000000000..f6799a03af3 --- /dev/null +++ b/pkg/controller/actions/deploy/action_deploy_support.go @@ -0,0 +1,73 @@ +package deploy + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" +) + +func isLegacyOwnerRef(or metav1.OwnerReference) bool { + switch { + case or.APIVersion == gvk.DataScienceCluster.GroupVersion().String() && or.Kind == gvk.DataScienceCluster.Kind: + return true + case or.APIVersion == gvk.DSCInitialization.GroupVersion().String() && or.Kind == gvk.DSCInitialization.Kind: + return true + default: + return false + } +} + +// removeOwnerReferences removes all owner references from a Kubernetes object that match the provided predicate. +// +// This function iterates through the OwnerReferences of the given object, filters out those that satisfy +// the predicate, and updates the object in the cluster using the provided client. +// +// Parameters: +// - ctx: The context for the request, which can carry deadlines, cancellation signals, and other request-scoped values. +// - cli: A controller-runtime client used to update the Kubernetes object. +// - obj: The Kubernetes object whose OwnerReferences are to be filtered. It must implement client.Object. +// - predicate: A function that takes an OwnerReference and returns true if the reference should be removed. +// +// Returns: +// - An error if the update operation fails, otherwise nil. +func removeOwnerReferences( + ctx context.Context, + cli client.Client, + obj client.Object, + predicate func(reference metav1.OwnerReference) bool, +) error { + oldRefs := obj.GetOwnerReferences() + if len(oldRefs) == 0 { + return nil + } + + newRefs := oldRefs[:0] + for _, ref := range oldRefs { + if !predicate(ref) { + newRefs = append(newRefs, ref) + } + } + + if len(newRefs) == len(oldRefs) { + return nil + } + + obj.SetOwnerReferences(newRefs) + + // Update the object in the cluster + if err := cli.Update(ctx, obj); err != nil { + return fmt.Errorf( + "failed to remove owner references from object %s/%s with gvk %s: %w", + obj.GetNamespace(), + obj.GetName(), + obj.GetObjectKind().GroupVersionKind(), + err, + ) + } + + return nil +} diff --git a/pkg/controller/actions/deploy/action_deploy_support_test.go b/pkg/controller/actions/deploy/action_deploy_support_test.go new file mode 100644 index 00000000000..01396aa3a22 --- /dev/null +++ b/pkg/controller/actions/deploy/action_deploy_support_test.go @@ -0,0 +1,189 @@ +//nolint:testpackage +package deploy + +import ( + "context" + "path/filepath" + "testing" + + "github.com/onsi/gomega/gstruct" + "github.com/onsi/gomega/types" + "github.com/rs/xid" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + odhCli "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" + "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" + + . "github.com/onsi/gomega" +) + +func TestIsLegacyOwnerRef(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + + tests := []struct { + name string + ownerRef metav1.OwnerReference + matcher types.GomegaMatcher + }{ + { + name: "Valid DataScienceCluster owner reference", + ownerRef: metav1.OwnerReference{ + APIVersion: gvk.DataScienceCluster.GroupVersion().String(), + Kind: gvk.DataScienceCluster.Kind, + }, + matcher: BeTrue(), + }, + { + name: "Valid DSCInitialization owner reference", + ownerRef: metav1.OwnerReference{ + APIVersion: gvk.DSCInitialization.GroupVersion().String(), + Kind: gvk.DSCInitialization.Kind, + }, + matcher: BeTrue(), + }, + { + name: "Invalid owner reference (different group)", + ownerRef: metav1.OwnerReference{ + APIVersion: "othergroup/v1", + Kind: gvk.DSCInitialization.Kind, + }, + matcher: BeFalse(), + }, + { + name: "Invalid owner reference (different kind)", + ownerRef: metav1.OwnerReference{ + APIVersion: gvk.DSCInitialization.GroupVersion().String(), + Kind: "OtherKind", + }, + matcher: BeFalse(), + }, + { + name: "Invalid owner reference (different group and kind)", + ownerRef: metav1.OwnerReference{ + APIVersion: "othergroup/v1", + Kind: "OtherKind", + }, + matcher: BeFalse(), + }, + { + name: "Empty owner reference", + ownerRef: metav1.OwnerReference{}, + matcher: BeFalse(), + }, + } + + for i := range tests { + tt := tests[i] + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + result := isLegacyOwnerRef(tt.ownerRef) + g.Expect(result).To(tt.matcher) + }) + } +} + +func TestRemoveOwnerRef(t *testing.T) { + g := NewWithT(t) + s := runtime.NewScheme() + + ctx := context.Background() + ns := xid.New().String() + + utilruntime.Must(corev1.AddToScheme(s)) + utilruntime.Must(appsv1.AddToScheme(s)) + utilruntime.Must(apiextensionsv1.AddToScheme(s)) + utilruntime.Must(componentApi.AddToScheme(s)) + utilruntime.Must(dsciv1.AddToScheme(s)) + utilruntime.Must(dscv1.AddToScheme(s)) + utilruntime.Must(rbacv1.AddToScheme(s)) + + projectDir, err := envtestutil.FindProjectRoot() + g.Expect(err).NotTo(HaveOccurred()) + + envTest := &envtest.Environment{ + CRDInstallOptions: envtest.CRDInstallOptions{ + Scheme: s, + Paths: []string{ + filepath.Join(projectDir, "config", "crd", "bases"), + }, + ErrorIfPathMissing: true, + CleanUpAfterUse: false, + }, + } + + t.Cleanup(func() { + _ = envTest.Stop() + }) + + cfg, err := envTest.Start() + g.Expect(err).NotTo(HaveOccurred()) + + envTestClient, err := client.New(cfg, client.Options{Scheme: s}) + g.Expect(err).NotTo(HaveOccurred()) + + cli, err := odhCli.NewFromConfig(cfg, envTestClient) + g.Expect(err).NotTo(HaveOccurred()) + + err = cli.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + g.Expect(err).ToNot(HaveOccurred()) + + cm1 := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: ns}} + cm1.SetGroupVersionKind(gvk.ConfigMap) + + err = cli.Create(ctx, cm1) + g.Expect(err).ToNot(HaveOccurred()) + + cm2 := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm2", Namespace: ns}} + cm2.SetGroupVersionKind(gvk.ConfigMap) + + err = cli.Create(ctx, cm2) + g.Expect(err).ToNot(HaveOccurred()) + + // Create a ConfigMap with OwnerReferences + configMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "test-configmap", Namespace: ns}} + + err = controllerutil.SetOwnerReference(cm1, configMap, s) + g.Expect(err).ToNot(HaveOccurred()) + err = controllerutil.SetOwnerReference(cm2, configMap, s) + g.Expect(err).ToNot(HaveOccurred()) + + err = cli.Create(ctx, configMap) + g.Expect(err).ToNot(HaveOccurred()) + + predicate := func(ref metav1.OwnerReference) bool { + return ref.Name == cm1.Name + } + + err = removeOwnerReferences(ctx, cli, configMap, predicate) + g.Expect(err).ToNot(HaveOccurred()) + + updatedConfigMap := &corev1.ConfigMap{} + err = cli.Get(ctx, client.ObjectKeyFromObject(configMap), updatedConfigMap) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(updatedConfigMap.GetOwnerReferences()).Should(And( + HaveLen(1), + HaveEach(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Name": Equal(cm2.Name), + "APIVersion": Equal(gvk.ConfigMap.GroupVersion().String()), + "Kind": Equal(gvk.ConfigMap.Kind), + "UID": Equal(cm2.UID), + })), + )) +} diff --git a/pkg/controller/actions/deploy/action_deploy_test.go b/pkg/controller/actions/deploy/action_deploy_test.go new file mode 100644 index 00000000000..16e80c50270 --- /dev/null +++ b/pkg/controller/actions/deploy/action_deploy_test.go @@ -0,0 +1,636 @@ +package deploy_test + +import ( + "context" + "path/filepath" + "strconv" + "strings" + "testing" + + "github.com/blang/semver/v4" + "github.com/onsi/gomega/gstruct" + "github.com/operator-framework/api/pkg/lib/version" + "github.com/rs/xid" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + apimachinery "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/deploy" + odhCli "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/manager" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/fakeclient" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" + + . "github.com/onsi/gomega" +) + +func TestDeployAction(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + cl, err := fakeclient.New() + g.Expect(err).ShouldNot(HaveOccurred()) + + action := deploy.NewAction( + // fake client does not yet support SSA + // - https://github.com/kubernetes/kubernetes/issues/115598 + // - https://github.com/kubernetes-sigs/controller-runtime/issues/2341 + deploy.WithMode(deploy.ModePatch), + ) + + obj1, err := resources.ToUnstructured(&appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: xid.New().String(), + Namespace: ns, + }, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + rr := types.ReconciliationRequest{ + Client: cl, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ApplicationsNamespace: ns}}, + Instance: &componentApi.Dashboard{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }, + Release: cluster.Release{ + Name: cluster.OpenDataHub, + Version: version.OperatorVersion{Version: semver.Version{ + Major: 1, Minor: 2, Patch: 3, + }}}, + Resources: []unstructured.Unstructured{*obj1}, + } + + err = action(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKeyFromObject(obj1), obj1) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(obj1).Should(And( + jq.Match(`.metadata.labels."%s" == "%s"`, labels.PlatformPartOf, strings.ToLower(componentApi.DashboardKind)), + jq.Match(`.metadata.annotations."%s" == "%s"`, annotations.InstanceGeneration, strconv.FormatInt(rr.Instance.GetGeneration(), 10)), + jq.Match(`.metadata.annotations."%s" == "%s"`, annotations.PlatformVersion, "1.2.3"), + jq.Match(`.metadata.annotations."%s" == "%s"`, annotations.PlatformType, string(cluster.OpenDataHub)), + )) +} + +func TestDeployNotOwnedSkip(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + name := xid.New().String() + + action := deploy.NewAction( + // fake client does not yet support SSA + // - https://github.com/kubernetes/kubernetes/issues/115598 + // - https://github.com/kubernetes-sigs/controller-runtime/issues/2341 + deploy.WithMode(deploy.ModePatch), + ) + + oldObj, err := resources.ToUnstructured(&appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: appsv1.DeploymentSpec{ + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RecreateDeploymentStrategyType, + }, + }, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + newObj, err := resources.ToUnstructured(&appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Annotations: map[string]string{ + annotations.ManagedByODHOperator: "false", + }, + }, + Spec: appsv1.DeploymentSpec{ + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + }, + }, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + cl, err := fakeclient.New(oldObj) + g.Expect(err).ShouldNot(HaveOccurred()) + + rr := types.ReconciliationRequest{ + Client: cl, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ApplicationsNamespace: ns}}, + Instance: &componentApi.Dashboard{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }, + Release: cluster.Release{ + Name: cluster.OpenDataHub, + Version: version.OperatorVersion{Version: semver.Version{ + Major: 1, Minor: 2, Patch: 3, + }}}, + Resources: []unstructured.Unstructured{*newObj}, + } + + err = action(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKeyFromObject(newObj), newObj) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(newObj).Should(And( + jq.Match(`.metadata.annotations | has("%s") | not`, annotations.ManagedByODHOperator), + jq.Match(`.spec.strategy.type == "%s"`, appsv1.RecreateDeploymentStrategyType), + )) +} + +func TestDeployNotOwnedCreate(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + name := xid.New().String() + + action := deploy.NewAction( + // fake client does not yet support SSA + // - https://github.com/kubernetes/kubernetes/issues/115598 + // - https://github.com/kubernetes-sigs/controller-runtime/issues/2341 + deploy.WithMode(deploy.ModePatch), + ) + + newObj, err := resources.ToUnstructured(&appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Annotations: map[string]string{ + annotations.ManagedByODHOperator: "false", + }, + }, + Spec: appsv1.DeploymentSpec{ + Strategy: appsv1.DeploymentStrategy{ + Type: appsv1.RollingUpdateDeploymentStrategyType, + }, + }, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + cl, err := fakeclient.New() + g.Expect(err).ShouldNot(HaveOccurred()) + + rr := types.ReconciliationRequest{ + Client: cl, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ApplicationsNamespace: ns}}, + Instance: &componentApi.Dashboard{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }, + Release: cluster.Release{ + Name: cluster.OpenDataHub, + Version: version.OperatorVersion{Version: semver.Version{ + Major: 1, Minor: 2, Patch: 3, + }}}, + Resources: []unstructured.Unstructured{*newObj}, + } + + err = action(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + + err = cl.Get(ctx, client.ObjectKeyFromObject(newObj), newObj) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(newObj).Should(And( + jq.Match(`.metadata.annotations | has("%s") | not`, annotations.ManagedByODHOperator), + jq.Match(`.spec.strategy.type == "%s"`, appsv1.RollingUpdateDeploymentStrategyType), + )) +} + +func TestDeployClusterRole(t *testing.T) { + g := NewWithT(t) + s := runtime.NewScheme() + + utilruntime.Must(corev1.AddToScheme(s)) + utilruntime.Must(appsv1.AddToScheme(s)) + utilruntime.Must(apiextensionsv1.AddToScheme(s)) + utilruntime.Must(componentApi.AddToScheme(s)) + utilruntime.Must(rbacv1.AddToScheme(s)) + + projectDir, err := envtestutil.FindProjectRoot() + g.Expect(err).NotTo(HaveOccurred()) + + envTest := &envtest.Environment{ + CRDInstallOptions: envtest.CRDInstallOptions{ + Scheme: s, + Paths: []string{ + filepath.Join(projectDir, "config", "crd", "bases"), + }, + ErrorIfPathMissing: true, + CleanUpAfterUse: false, + }, + } + + t.Cleanup(func() { + _ = envTest.Stop() + }) + + cfg, err := envTest.Start() + g.Expect(err).NotTo(HaveOccurred()) + + envTestClient, err := client.New(cfg, client.Options{Scheme: s}) + g.Expect(err).NotTo(HaveOccurred()) + + cli, err := odhCli.NewFromConfig(cfg, envTestClient) + g.Expect(err).NotTo(HaveOccurred()) + + t.Run("aggregation", func(t *testing.T) { + ctx := context.Background() + name := xid.New().String() + + deployClusterRoles(t, ctx, cli, rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"*"}, + Resources: []string{"*"}, + APIGroups: []string{"*"}, + }}, + AggregationRule: &rbacv1.AggregationRule{ + ClusterRoleSelectors: []metav1.LabelSelector{{ + MatchLabels: map[string]string{"foo": "bar"}, + }}, + }, + }) + + out := rbacv1.ClusterRole{} + err = cli.Get(ctx, client.ObjectKey{Name: name}, &out) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(out).To(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Rules": BeEmpty(), + })) + }) + + t.Run("no aggregation", func(t *testing.T) { + ctx := context.Background() + name := xid.New().String() + + deployClusterRoles(t, ctx, cli, rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Rules: []rbacv1.PolicyRule{{ + Verbs: []string{"*"}, + Resources: []string{"*"}, + APIGroups: []string{"*"}, + }}, + }) + + out := rbacv1.ClusterRole{} + err = cli.Get(ctx, client.ObjectKey{Name: name}, &out) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(out).To(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Rules": HaveLen(1), + })) + }) +} + +func deployClusterRoles(t *testing.T, ctx context.Context, cli *odhCli.Client, roles ...rbacv1.ClusterRole) { + t.Helper() + + g := NewWithT(t) + + rr := types.ReconciliationRequest{ + Client: cli, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: xid.New().String(), + }}, + Instance: &componentApi.Dashboard{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + UID: apimachinery.UID(xid.New().String()), + }, + }, + Release: cluster.Release{ + Name: cluster.OpenDataHub, + Version: version.OperatorVersion{Version: semver.Version{ + Major: 1, Minor: 2, Patch: 3, + }}}, + } + + for i := range roles { + err := rr.AddResources(roles[i].DeepCopy()) + g.Expect(err).ShouldNot(HaveOccurred()) + } + + err := deploy.NewAction()(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) +} + +func TestDeployCRD(t *testing.T) { + g := NewWithT(t) + s := runtime.NewScheme() + + ctx := context.Background() + id := xid.New().String() + + utilruntime.Must(corev1.AddToScheme(s)) + utilruntime.Must(appsv1.AddToScheme(s)) + utilruntime.Must(apiextensionsv1.AddToScheme(s)) + utilruntime.Must(componentApi.AddToScheme(s)) + utilruntime.Must(rbacv1.AddToScheme(s)) + + projectDir, err := envtestutil.FindProjectRoot() + g.Expect(err).NotTo(HaveOccurred()) + + envTest := &envtest.Environment{ + CRDInstallOptions: envtest.CRDInstallOptions{ + Scheme: s, + Paths: []string{ + filepath.Join(projectDir, "config", "crd", "bases"), + }, + ErrorIfPathMissing: true, + CleanUpAfterUse: false, + }, + } + + t.Cleanup(func() { + _ = envTest.Stop() + }) + + cfg, err := envTest.Start() + g.Expect(err).NotTo(HaveOccurred()) + + envTestClient, err := client.New(cfg, client.Options{Scheme: s}) + g.Expect(err).NotTo(HaveOccurred()) + + cli, err := odhCli.NewFromConfig(cfg, envTestClient) + g.Expect(err).NotTo(HaveOccurred()) + + rr := types.ReconciliationRequest{ + Client: cli, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: id, + }}, + Instance: &componentApi.Dashboard{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + UID: apimachinery.UID(id), + }, + }, + Release: cluster.Release{ + Name: cluster.OpenDataHub, + Version: version.OperatorVersion{Version: semver.Version{ + Major: 1, Minor: 2, Patch: 3, + }}}, + } + + err = rr.AddResources(&apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acceleratorprofiles.dashboard.opendatahub.io", + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "dashboard.opendatahub.io", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: "AcceleratorProfile", + ListKind: "AcceleratorProfileList", + Plural: "acceleratorprofiles", + Singular: "acceleratorprofile", + }, + Scope: apiextensionsv1.NamespaceScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + }, + }) + + g.Expect(err).NotTo(HaveOccurred()) + + err = deploy.NewAction()(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + + out := resources.GvkToUnstructured(gvk.CustomResourceDefinition) + out.SetName("acceleratorprofiles.dashboard.opendatahub.io") + + err = cli.Get(ctx, client.ObjectKeyFromObject(out), out) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(out).Should(And( + jq.Match(`.metadata.labels."%s" == "%s"`, labels.PlatformPartOf, labels.Platform), + Not(jq.Match(`.metadata | has ("annotations")`)), + )) +} + +func TestDeployOwnerRef(t *testing.T) { + g := NewWithT(t) + s := runtime.NewScheme() + + ctx := context.Background() + id := xid.New().String() + ns := xid.New().String() + + utilruntime.Must(corev1.AddToScheme(s)) + utilruntime.Must(appsv1.AddToScheme(s)) + utilruntime.Must(apiextensionsv1.AddToScheme(s)) + utilruntime.Must(dscv1.AddToScheme(s)) + utilruntime.Must(componentApi.AddToScheme(s)) + utilruntime.Must(rbacv1.AddToScheme(s)) + + projectDir, err := envtestutil.FindProjectRoot() + g.Expect(err).NotTo(HaveOccurred()) + + envTest := &envtest.Environment{ + CRDInstallOptions: envtest.CRDInstallOptions{ + Scheme: s, + Paths: []string{ + filepath.Join(projectDir, "config", "crd", "bases"), + }, + ErrorIfPathMissing: true, + CleanUpAfterUse: false, + }, + } + + t.Cleanup(func() { + _ = envTest.Stop() + }) + + cfg, err := envTest.Start() + g.Expect(err).NotTo(HaveOccurred()) + + envTestClient, err := client.New(cfg, client.Options{Scheme: s}) + g.Expect(err).NotTo(HaveOccurred()) + + cli, err := odhCli.NewFromConfig(cfg, envTestClient) + g.Expect(err).NotTo(HaveOccurred()) + + err = cli.Create(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}) + g.Expect(err).ToNot(HaveOccurred()) + + dsc := &dscv1.DataScienceCluster{ObjectMeta: metav1.ObjectMeta{Name: "default-dsc"}} + dsc.SetGroupVersionKind(gvk.DataScienceCluster) + + err = cli.Create(ctx, dsc) + g.Expect(err).ToNot(HaveOccurred()) + + instance := &componentApi.Dashboard{ObjectMeta: metav1.ObjectMeta{Name: componentApi.DashboardInstanceName}} + instance.SetGroupVersionKind(gvk.Dashboard) + + err = cli.Create(ctx, instance) + g.Expect(err).ToNot(HaveOccurred()) + + // + // ConfigMap + // + + configMapRef := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "cm1", Namespace: ns}} + configMapRef.SetGroupVersionKind(gvk.ConfigMap) + + configMap := configMapRef.DeepCopy() + err = controllerutil.SetOwnerReference(dsc, configMap, s) + g.Expect(err).ToNot(HaveOccurred()) + + err = cli.Create(ctx, configMap.DeepCopy()) + g.Expect(err).ToNot(HaveOccurred()) + + // + // CustomResourceDefinition + // + + crdRef := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "acceleratorprofiles.dashboard.opendatahub.io", + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: "dashboard.opendatahub.io", + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: "AcceleratorProfile", + ListKind: "AcceleratorProfileList", + Plural: "acceleratorprofiles", + Singular: "acceleratorprofile", + }, + Scope: apiextensionsv1.NamespaceScoped, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1", + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + }, + } + + crdRef.SetGroupVersionKind(gvk.CustomResourceDefinition) + + crd := crdRef.DeepCopy() + err = controllerutil.SetOwnerReference(dsc, crd, s) + g.Expect(err).ToNot(HaveOccurred()) + + err = cli.Create(ctx, crd.DeepCopy()) + g.Expect(err).ToNot(HaveOccurred()) + + // + // deploy + // + + rr := types.ReconciliationRequest{ + Client: cli, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: id, + }}, + Instance: instance, + Release: cluster.Release{ + Name: cluster.OpenDataHub, + Version: version.OperatorVersion{Version: semver.Version{ + Major: 1, Minor: 2, Patch: 3, + }}}, + Manager: manager.New(nil), + } + + rr.Manager.AddGVK(gvk.ConfigMap, true) + + err = rr.AddResources(configMapRef.DeepCopy(), crdRef.DeepCopy()) + g.Expect(err).NotTo(HaveOccurred()) + + err = deploy.NewAction()(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + + updatedConfigMap := &corev1.ConfigMap{} + err = cli.Get(ctx, client.ObjectKeyFromObject(configMapRef), updatedConfigMap) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(updatedConfigMap.GetOwnerReferences()).Should(And( + HaveLen(1), + HaveEach(gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Name": Equal(instance.Name), + "APIVersion": Equal(gvk.Dashboard.GroupVersion().String()), + "Kind": Equal(gvk.Dashboard.Kind), + "UID": Equal(instance.UID), + })), + )) + + updatedCRD := &apiextensionsv1.CustomResourceDefinition{} + err = cli.Get(ctx, client.ObjectKeyFromObject(crdRef), updatedCRD) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(updatedCRD.GetOwnerReferences()).Should(BeEmpty()) +} diff --git a/pkg/controller/actions/errors/errors.go b/pkg/controller/actions/errors/errors.go new file mode 100644 index 00000000000..419984ef862 --- /dev/null +++ b/pkg/controller/actions/errors/errors.go @@ -0,0 +1,24 @@ +package errors + +import ( + "fmt" +) + +// StopError is a marker error that thew ComponentController uses +// to break out from the action execution loop. +type StopError struct { + reason error +} + +func (e StopError) Error() string { + return e.reason.Error() +} + +func NewStopErrorW(reason error) StopError { + return StopError{reason} +} +func NewStopError(format string, args ...any) StopError { + return StopError{ + fmt.Errorf(format, args...), + } +} diff --git a/pkg/controller/actions/gc/action_gc.go b/pkg/controller/actions/gc/action_gc.go new file mode 100644 index 00000000000..b39dc705bd2 --- /dev/null +++ b/pkg/controller/actions/gc/action_gc.go @@ -0,0 +1,144 @@ +package gc + +import ( + "context" + "fmt" + "slices" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" + odhTypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhLabels "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/services/gc" +) + +type PredicateFn func(*odhTypes.ReconciliationRequest, unstructured.Unstructured) (bool, error) +type ActionOpts func(*Action) + +type Action struct { + labels map[string]string + selector labels.Selector + unremovables []schema.GroupVersionKind + gc *gc.GC + predicateFn PredicateFn +} + +func WithLabel(name string, value string) ActionOpts { + return func(action *Action) { + if action.labels == nil { + action.labels = map[string]string{} + } + + action.labels[name] = value + } +} + +func WithLabels(values map[string]string) ActionOpts { + return func(action *Action) { + if action.labels == nil { + action.labels = map[string]string{} + } + + for k, v := range values { + action.labels[k] = v + } + } +} + +func WithUnremovables(items ...schema.GroupVersionKind) ActionOpts { + return func(action *Action) { + action.unremovables = append(action.unremovables, items...) + } +} + +func WithPredicate(value PredicateFn) ActionOpts { + return func(action *Action) { + if value == nil { + return + } + + action.predicateFn = value + } +} + +func WithGC(value *gc.GC) ActionOpts { + return func(action *Action) { + if value == nil { + return + } + + action.gc = value + } +} + +func (a *Action) run(ctx context.Context, rr *odhTypes.ReconciliationRequest) error { + // To avoid the expensive GC, run it only when resources have + // been generated + if !rr.Generated { + return nil + } + + kind, err := resources.KindForObject(rr.Client.Scheme(), rr.Instance) + if err != nil { + return err + } + + controllerName := strings.ToLower(kind) + + CyclesTotal.WithLabelValues(controllerName).Inc() + + selector := a.selector + if selector == nil { + selector = labels.SelectorFromSet(map[string]string{ + odhLabels.PlatformPartOf: strings.ToLower(kind), + }) + } + + deleted, err := a.gc.Run( + ctx, + selector, + func(ctx context.Context, obj unstructured.Unstructured) (bool, error) { + if slices.Contains(a.unremovables, obj.GroupVersionKind()) { + return false, nil + } + + return a.predicateFn(rr, obj) + }, + ) + + if err != nil { + return fmt.Errorf("cannot run gc: %w", err) + } + + if deleted > 0 { + DeletedTotal.WithLabelValues(controllerName).Add(float64(deleted)) + } + + return nil +} + +func NewAction(opts ...ActionOpts) actions.Fn { + action := Action{} + action.predicateFn = DefaultPredicate + action.unremovables = make([]schema.GroupVersionKind, 0) + + for _, opt := range opts { + opt(&action) + } + + if len(action.labels) > 0 { + action.selector = labels.SelectorFromSet(action.labels) + } + + // TODO: refactor + if action.gc == nil { + action.gc = gc.Instance + } + + return action.run +} diff --git a/pkg/controller/actions/gc/action_gc_metrics.go b/pkg/controller/actions/gc/action_gc_metrics.go new file mode 100644 index 00000000000..d345fe6536f --- /dev/null +++ b/pkg/controller/actions/gc/action_gc_metrics.go @@ -0,0 +1,43 @@ +package gc + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // DeletedTotal is a prometheus counter metrics which holds the total number + // of resource deleted by the action per controller. It has one label. + // controller label refers to the controller name. + DeletedTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "action_gc_deleted_total", + Help: "Number of GCed resources", + }, + []string{ + "controller", + }, + ) + + // CyclesTotal is a prometheus counter metrics which holds the total number + // gc cycles per controller. It has one label. + // controller label refers to the controller name. + CyclesTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "action_gc_cycles_total", + Help: "Number of GC cycles", + }, + []string{ + "controller", + }, + ) +) + +// init register metrics to the global registry from controller-runtime/pkg/metrics. +// see https://book.kubebuilder.io/reference/metrics#publishing-additional-metrics +// +//nolint:gochecknoinits +func init() { + metrics.Registry.MustRegister(DeletedTotal) + metrics.Registry.MustRegister(CyclesTotal) +} diff --git a/pkg/controller/actions/gc/action_gc_support.go b/pkg/controller/actions/gc/action_gc_support.go new file mode 100644 index 00000000000..6ee3da9ebff --- /dev/null +++ b/pkg/controller/actions/gc/action_gc_support.go @@ -0,0 +1,46 @@ +package gc + +import ( + "fmt" + "strconv" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + odhTypes "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + odhAnnotations "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +func DefaultPredicate(rr *odhTypes.ReconciliationRequest, obj unstructured.Unstructured) (bool, error) { + if obj.GetAnnotations() == nil { + return false, nil + } + + pv := resources.GetAnnotation(&obj, odhAnnotations.PlatformVersion) + pt := resources.GetAnnotation(&obj, odhAnnotations.PlatformType) + ig := resources.GetAnnotation(&obj, odhAnnotations.InstanceGeneration) + iu := resources.GetAnnotation(&obj, odhAnnotations.InstanceUID) + + if pv == "" || pt == "" || ig == "" || iu == "" { + return false, nil + } + + if pv != rr.Release.Version.String() { + return true, nil + } + + if pt != string(rr.Release.Name) { + return true, nil + } + + if iu != string(rr.Instance.GetUID()) { + return true, nil + } + + g, err := strconv.Atoi(ig) + if err != nil { + return false, fmt.Errorf("cannot determine generation: %w", err) + } + + return rr.Instance.GetGeneration() != int64(g), nil +} diff --git a/pkg/controller/actions/gc/action_gc_test.go b/pkg/controller/actions/gc/action_gc_test.go new file mode 100644 index 00000000000..2020c7ee3b2 --- /dev/null +++ b/pkg/controller/actions/gc/action_gc_test.go @@ -0,0 +1,255 @@ +package gc_test + +import ( + "context" + "strings" + "testing" + + "github.com/blang/semver/v4" + gTypes "github.com/onsi/gomega/types" + "github.com/operator-framework/api/pkg/lib/version" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/rs/xid" + appsv1 "k8s.io/api/apps/v1" + authorizationv1 "k8s.io/api/authorization/v1" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + apytypes "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/gc" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + gcSvc "github.com/opendatahub-io/opendatahub-operator/v2/pkg/services/gc" + + . "github.com/onsi/gomega" +) + +func TestGcAction(t *testing.T) { + g := NewWithT(t) + + s := runtime.NewScheme() + ctx := context.Background() + + utilruntime.Must(corev1.AddToScheme(s)) + utilruntime.Must(appsv1.AddToScheme(s)) + utilruntime.Must(apiextensionsv1.AddToScheme(s)) + utilruntime.Must(authorizationv1.AddToScheme(s)) + + envTest := &envtest.Environment{ + CRDInstallOptions: envtest.CRDInstallOptions{ + Scheme: s, + CleanUpAfterUse: true, + }, + } + + t.Cleanup(func() { + _ = envTest.Stop() + }) + + cfg, err := envTest.Start() + g.Expect(err).NotTo(HaveOccurred()) + + envTestClient, err := ctrlCli.New(cfg, ctrlCli.Options{Scheme: s}) + g.Expect(err).NotTo(HaveOccurred()) + + cli, err := client.NewFromConfig(cfg, envTestClient) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cli).NotTo(BeNil()) + + tests := []struct { + name string + version semver.Version + generated bool + matcher gTypes.GomegaMatcher + metricsMatcher gTypes.GomegaMatcher + labels map[string]string + options []gc.ActionOpts + uidFn func(request *types.ReconciliationRequest) string + }{ + { + name: "should delete leftovers", + version: semver.Version{Major: 0, Minor: 0, Patch: 1}, + generated: true, + matcher: Satisfy(k8serr.IsNotFound), + metricsMatcher: BeNumerically("==", 1), + uidFn: func(rr *types.ReconciliationRequest) string { return string(rr.Instance.GetUID()) }, + }, + { + name: "should not delete resources because same annotations", + version: semver.Version{Major: 0, Minor: 1, Patch: 0}, + generated: true, + matcher: Not(HaveOccurred()), + metricsMatcher: BeNumerically("==", 1), + uidFn: func(rr *types.ReconciliationRequest) string { return string(rr.Instance.GetUID()) }, + }, + { + name: "should not delete resources because of no generated resources have been detected", + version: semver.Version{Major: 0, Minor: 0, Patch: 1}, + generated: false, + matcher: Not(HaveOccurred()), + metricsMatcher: BeNumerically("==", 0), + uidFn: func(rr *types.ReconciliationRequest) string { return string(rr.Instance.GetUID()) }, + }, + { + name: "should not delete resources because of selector", + version: semver.Version{Major: 0, Minor: 0, Patch: 1}, + generated: true, + matcher: Not(HaveOccurred()), + metricsMatcher: BeNumerically("==", 1), + labels: map[string]string{"foo": "bar"}, + options: []gc.ActionOpts{gc.WithLabel("foo", "baz")}, + uidFn: func(rr *types.ReconciliationRequest) string { return string(rr.Instance.GetUID()) }, + }, + { + name: "should not delete resources because of unremovable type", + version: semver.Version{Major: 0, Minor: 0, Patch: 1}, + generated: true, + matcher: Not(HaveOccurred()), + metricsMatcher: BeNumerically("==", 1), + options: []gc.ActionOpts{gc.WithUnremovables(gvk.ConfigMap)}, + uidFn: func(rr *types.ReconciliationRequest) string { return string(rr.Instance.GetUID()) }, + }, + { + name: "should not delete resources because of predicate", + version: semver.Version{Major: 0, Minor: 0, Patch: 1}, + generated: true, + matcher: Not(HaveOccurred()), + metricsMatcher: BeNumerically("==", 1), + options: []gc.ActionOpts{gc.WithPredicate( + func(request *types.ReconciliationRequest, unstructured unstructured.Unstructured) (bool, error) { + return unstructured.GroupVersionKind() != gvk.ConfigMap, nil + }, + )}, + uidFn: func(rr *types.ReconciliationRequest) string { return string(rr.Instance.GetUID()) }, + }, + { + name: "should delete leftovers because of UID", + version: semver.Version{Major: 0, Minor: 1, Patch: 0}, + generated: true, + matcher: Satisfy(k8serr.IsNotFound), + metricsMatcher: BeNumerically("==", 1), + uidFn: func(rr *types.ReconciliationRequest) string { return xid.New().String() }, + }, + { + name: "should not delete leftovers because of UID", + version: semver.Version{Major: 0, Minor: 1, Patch: 0}, + generated: true, + matcher: Not(HaveOccurred()), + metricsMatcher: BeNumerically("==", 1), + uidFn: func(rr *types.ReconciliationRequest) string { return string(rr.Instance.GetUID()) }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gc.CyclesTotal.Reset() + gc.CyclesTotal.WithLabelValues("dashboard").Add(0) + + g := NewWithT(t) + nsn := xid.New().String() + id := xid.New().String() + + gci := gcSvc.New( + cli, + nsn, + // This is required as there are no kubernetes controller running + // with the envtest, hence we can't use the foreground deletion + // policy (default) + gcSvc.WithPropagationPolicy(metav1.DeletePropagationBackground), + ) + + ns := corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: nsn, + }, + } + + g.Expect(cli.Create(ctx, &ns)). + NotTo(HaveOccurred()) + g.Expect(gci.Start(ctx)). + NotTo(HaveOccurred()) + + rr := types.ReconciliationRequest{ + Client: cli, + DSCI: &dsciv1.DSCInitialization{ + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }, + Instance: &componentApi.Dashboard{ + TypeMeta: metav1.TypeMeta{ + APIVersion: componentApi.GroupVersion.String(), + Kind: componentApi.DashboardKind, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + UID: apytypes.UID(id), + }, + }, + Release: cluster.Release{ + Name: cluster.OpenDataHub, + Version: version.OperatorVersion{ + Version: tt.version, + }, + }, + Generated: tt.generated, + } + + l := make(map[string]string) + for k, v := range tt.labels { + l[k] = v + } + + l[labels.PlatformPartOf] = strings.ToLower(componentApi.DashboardKind) + + cm := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "gc-cm", + Namespace: nsn, + Annotations: map[string]string{ + annotations.InstanceGeneration: "1", + annotations.InstanceUID: tt.uidFn(&rr), + annotations.PlatformVersion: "0.1.0", + annotations.PlatformType: string(cluster.OpenDataHub), + }, + Labels: l, + }, + } + + g.Expect(cli.Create(ctx, &cm)). + NotTo(HaveOccurred()) + + opts := make([]gc.ActionOpts, 0, len(tt.options)+1) + opts = append(opts, gc.WithGC(gci)) + opts = append(opts, tt.options...) + + a := gc.NewAction(opts...) + + err = a(ctx, &rr) + g.Expect(err).NotTo(HaveOccurred()) + + if tt.matcher != nil { + err = cli.Get(ctx, ctrlCli.ObjectKeyFromObject(&cm), &corev1.ConfigMap{}) + g.Expect(err).To(tt.matcher) + } + + if tt.metricsMatcher != nil { + ct := testutil.ToFloat64(gc.CyclesTotal) + g.Expect(ct).Should(tt.metricsMatcher) + } + }) + } +} diff --git a/pkg/controller/actions/render/kustomize/action_render_manifests.go b/pkg/controller/actions/render/kustomize/action_render_manifests.go new file mode 100644 index 00000000000..b6007aa5325 --- /dev/null +++ b/pkg/controller/actions/render/kustomize/action_render_manifests.go @@ -0,0 +1,157 @@ +package kustomize + +import ( + "bytes" + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/kustomize/kyaml/filesys" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/manifests/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +const RendererEngine = "kustomize" + +// Action takes a set of manifest locations and render them as Unstructured resources for +// further processing. The Action can eventually cache the results in memory to avoid doing +// a full manifest rendering when not needed. +type Action struct { + keOpts []kustomize.EngineOptsFn + ke *kustomize.Engine + + cachingKeyFn render.CachingKeyFn + cachingKey []byte + cachedResources resources.UnstructuredList +} + +type ActionOpts func(*Action) + +func WithEngineFS(value filesys.FileSystem) ActionOpts { + return func(a *Action) { + a.keOpts = append(a.keOpts, kustomize.WithEngineFS(value)) + } +} + +func WithLabel(name string, value string) ActionOpts { + return func(a *Action) { + a.keOpts = append(a.keOpts, kustomize.WithEngineRenderOpts(kustomize.WithLabel(name, value))) + } +} + +func WithLabels(values map[string]string) ActionOpts { + return func(a *Action) { + a.keOpts = append(a.keOpts, kustomize.WithEngineRenderOpts(kustomize.WithLabels(values))) + } +} + +func WithAnnotation(name string, value string) ActionOpts { + return func(a *Action) { + a.keOpts = append(a.keOpts, kustomize.WithEngineRenderOpts(kustomize.WithAnnotation(name, value))) + } +} + +func WithAnnotations(values map[string]string) ActionOpts { + return func(a *Action) { + a.keOpts = append(a.keOpts, kustomize.WithEngineRenderOpts(kustomize.WithAnnotations(values))) + } +} + +func WithManifestsOptions(values ...kustomize.EngineOptsFn) ActionOpts { + return func(action *Action) { + action.keOpts = append(action.keOpts, values...) + } +} + +func WithCache() ActionOpts { + return func(action *Action) { + action.cachingKeyFn = types.Hash + } +} + +func (a *Action) run(_ context.Context, rr *types.ReconciliationRequest) error { + var err error + var cachingKey []byte + + inst, ok := rr.Instance.(common.WithDevFlags) + if ok && inst.GetDevFlags() != nil { + // if dev flags are enabled, caching is disabled as dev flags are meant for + // development time only where caching is not relevant + a.cachingKey = nil + } else { + cachingKey, err = a.cachingKeyFn(rr) + if err != nil { + return fmt.Errorf("unable to calculate checksum of reconciliation object: %w", err) + } + } + + var result resources.UnstructuredList + + if len(cachingKey) != 0 && bytes.Equal(cachingKey, a.cachingKey) && len(a.cachedResources) != 0 { + result = a.cachedResources + } else { + res, err := a.render(rr) + if err != nil { + return fmt.Errorf("unable to render reconciliation object: %w", err) + } + + result = res + + if len(cachingKey) != 0 { + a.cachingKey = cachingKey + a.cachedResources = result + } + + controllerName := strings.ToLower(rr.Instance.GetObjectKind().GroupVersionKind().Kind) + render.RenderedResourcesTotal.WithLabelValues(controllerName, RendererEngine).Add(float64(len(result))) + + rr.Generated = true + } + + // deep copy object so changes done in the pipelines won't + // alter them + rr.Resources = append(rr.Resources, result.Clone()...) + + return nil +} + +func (a *Action) render(rr *types.ReconciliationRequest) ([]unstructured.Unstructured, error) { + result := make([]unstructured.Unstructured, 0) + + for i := range rr.Manifests { + renderedResources, err := a.ke.Render( + rr.Manifests[i].String(), + kustomize.WithNamespace(rr.DSCI.Spec.ApplicationsNamespace), + ) + + if err != nil { + return nil, err + } + + result = append(result, renderedResources...) + } + + return result, nil +} + +func NewAction(opts ...ActionOpts) actions.Fn { + action := Action{ + cachingKeyFn: func(rr *types.ReconciliationRequest) ([]byte, error) { + return nil, nil + }, + } + + for _, opt := range opts { + opt(&action) + } + + action.ke = kustomize.NewEngine(action.keOpts...) + + return action.run +} diff --git a/pkg/controller/actions/render/kustomize/action_render_manifests_test.go b/pkg/controller/actions/render/kustomize/action_render_manifests_test.go new file mode 100644 index 00000000000..01a7a0f7f4d --- /dev/null +++ b/pkg/controller/actions/render/kustomize/action_render_manifests_test.go @@ -0,0 +1,247 @@ +package kustomize_test + +import ( + "context" + "path" + "testing" + + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/rs/xid" + "sigs.k8s.io/kustomize/kyaml/filesys" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + mk "github.com/opendatahub-io/opendatahub-operator/v2/pkg/manifests/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/fakeclient" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +const testRenderResourcesKustomization = ` +apiVersion: kustomize.config.k8s.io/v1beta1 +resources: +- test-resources-cm.yaml +- test-resources-deployment-managed.yaml +- test-resources-deployment-unmanaged.yaml +- test-resources-deployment-forced.yaml +` + +const testRenderResourcesConfigMap = ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-cm +data: + foo: bar +` + +const testRenderResourcesManaged = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deployment-managed +spec: + replicas: 3 + template: + spec: + containers: + - name: nginx + image: nginx:1.14.2 + resources: + limits: + memory: 200Mi + cpu: 1 + requests: + memory: 100Mi + cpu: 100m +` + +const testRenderResourcesUnmanaged = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deployment-unmanaged +spec: + replicas: 3 + template: + spec: + containers: + - name: nginx + image: nginx:1.14.2 + resources: + limits: + memory: 200Mi + cpu: 1 + requests: + memory: 100Mi + cpu: 100m +` +const testRenderResourcesForced = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deployment-forced +spec: + replicas: 3 + template: + spec: + containers: + - name: nginx + image: nginx:1.14.2 +` + +func TestRenderResourcesAction(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + id := xid.New().String() + fs := filesys.MakeFsInMemory() + + _ = fs.MkdirAll(path.Join(id, mk.DefaultKustomizationFilePath)) + _ = fs.WriteFile(path.Join(id, mk.DefaultKustomizationFileName), []byte(testRenderResourcesKustomization)) + _ = fs.WriteFile(path.Join(id, "test-resources-cm.yaml"), []byte(testRenderResourcesConfigMap)) + _ = fs.WriteFile(path.Join(id, "test-resources-deployment-managed.yaml"), []byte(testRenderResourcesManaged)) + _ = fs.WriteFile(path.Join(id, "test-resources-deployment-unmanaged.yaml"), []byte(testRenderResourcesUnmanaged)) + _ = fs.WriteFile(path.Join(id, "test-resources-deployment-forced.yaml"), []byte(testRenderResourcesForced)) + + cl, err := fakeclient.New() + g.Expect(err).ShouldNot(HaveOccurred()) + + action := kustomize.NewAction( + kustomize.WithLabel("component.opendatahub.io/name", "foo"), + kustomize.WithLabel("platform.opendatahub.io/namespace", ns), + kustomize.WithAnnotation("platform.opendatahub.io/release", "1.2.3"), + kustomize.WithAnnotation("platform.opendatahub.io/type", "managed"), + // for testing + kustomize.WithManifestsOptions( + mk.WithEngineFS(fs), + ), + ) + + rr := types.ReconciliationRequest{ + Client: cl, + Instance: &componentApi.Dashboard{}, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ApplicationsNamespace: ns}}, + Release: cluster.Release{Name: cluster.OpenDataHub}, + Manifests: []types.ManifestInfo{{Path: id}}, + } + + err = action(ctx, &rr) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(rr.Resources).Should(And( + HaveLen(4), + HaveEach(And( + jq.Match(`.metadata.namespace == "%s"`, ns), + jq.Match(`.metadata.labels."component.opendatahub.io/name" == "%s"`, "foo"), + jq.Match(`.metadata.labels."platform.opendatahub.io/namespace" == "%s"`, ns), + jq.Match(`.metadata.annotations."platform.opendatahub.io/release" == "%s"`, "1.2.3"), + jq.Match(`.metadata.annotations."platform.opendatahub.io/type" == "%s"`, "managed"), + )), + )) +} + +const testRenderResourcesWithCacheKustomization = ` +apiVersion: kustomize.config.k8s.io/v1beta1 +resources: +- test-resources-deployment.yaml +` + +const testRenderResourcesWithCacheDeployment = ` +apiVersion: apps/v1 +kind: Deployment +metadata: + name: test-deployment-managed +spec: + replicas: 3 + template: + spec: + containers: + - name: nginx + image: nginx:1.14.2 + resources: + limits: + memory: 200Mi + cpu: 1 + requests: + memory: 100Mi + cpu: 100m +` + +func TestRenderResourcesWithCacheAction(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + id := xid.New().String() + fs := filesys.MakeFsInMemory() + + _ = fs.MkdirAll(path.Join(id, mk.DefaultKustomizationFilePath)) + _ = fs.WriteFile(path.Join(id, mk.DefaultKustomizationFileName), []byte(testRenderResourcesWithCacheKustomization)) + _ = fs.WriteFile(path.Join(id, "test-resources-deployment.yaml"), []byte(testRenderResourcesWithCacheDeployment)) + + cl, err := fakeclient.New() + g.Expect(err).ShouldNot(HaveOccurred()) + + action := kustomize.NewAction( + kustomize.WithCache(), + kustomize.WithLabel(labels.PlatformPartOf, "foo"), + kustomize.WithLabel("platform.opendatahub.io/namespace", ns), + kustomize.WithAnnotation("platform.opendatahub.io/release", "1.2.3"), + kustomize.WithAnnotation("platform.opendatahub.io/type", "managed"), + // for testing + kustomize.WithManifestsOptions( + mk.WithEngineFS(fs), + ), + ) + + render.RenderedResourcesTotal.Reset() + + for i := range 3 { + d := componentApi.Dashboard{} + + if i >= 1 { + d.Generation = 1 + } + + rr := types.ReconciliationRequest{ + Client: cl, + Instance: &d, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ApplicationsNamespace: ns}}, + Release: cluster.Release{Name: cluster.OpenDataHub}, + Manifests: []types.ManifestInfo{{Path: id}}, + } + + err = action(ctx, &rr) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(rr.Resources).Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.namespace == "%s"`, ns), + jq.Match(`.metadata.labels."%s" == "%s"`, labels.PlatformPartOf, "foo"), + jq.Match(`.metadata.labels."platform.opendatahub.io/namespace" == "%s"`, ns), + jq.Match(`.metadata.annotations."platform.opendatahub.io/release" == "%s"`, "1.2.3"), + jq.Match(`.metadata.annotations."platform.opendatahub.io/type" == "%s"`, "managed"), + )), + )) + + rc := testutil.ToFloat64(render.RenderedResourcesTotal) + + switch i { + case 0: + g.Expect(rc).Should(BeNumerically("==", 1)) + case 1: + g.Expect(rc).Should(BeNumerically("==", 2)) + case 2: + g.Expect(rc).Should(BeNumerically("==", 2)) + } + } +} diff --git a/pkg/controller/actions/render/render_metrics.go b/pkg/controller/actions/render/render_metrics.go new file mode 100644 index 00000000000..761308708fa --- /dev/null +++ b/pkg/controller/actions/render/render_metrics.go @@ -0,0 +1,32 @@ +package render + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // RenderedResourcesTotal is a prometheus counter metrics which holds the total + // number of resource rendered by the action per controller and rendering type. + // It has two labels. + // controller label refers to the controller name. + // engine label refers to the rendering engine. + RenderedResourcesTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "action_renderer_manifests_total", + Help: "Number of rendered resources", + }, + []string{ + "controller", + "engine", + }, + ) +) + +// init register metrics to the global registry from controller-runtime/pkg/metrics. +// see https://book.kubebuilder.io/reference/metrics#publishing-additional-metrics +// +//nolint:gochecknoinits +func init() { + metrics.Registry.MustRegister(RenderedResourcesTotal) +} diff --git a/pkg/controller/actions/render/render_support.go b/pkg/controller/actions/render/render_support.go new file mode 100644 index 00000000000..259ea6823d4 --- /dev/null +++ b/pkg/controller/actions/render/render_support.go @@ -0,0 +1,7 @@ +package render + +import ( + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" +) + +type CachingKeyFn func(rr *types.ReconciliationRequest) ([]byte, error) diff --git a/pkg/controller/actions/render/template/action_render_templates.go b/pkg/controller/actions/render/template/action_render_templates.go new file mode 100644 index 00000000000..106cb6e1889 --- /dev/null +++ b/pkg/controller/actions/render/template/action_render_templates.go @@ -0,0 +1,155 @@ +package template + +import ( + "bytes" + "context" + "fmt" + "io/fs" + "maps" + "strings" + gt "text/template" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/serializer" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +const ( + RendererEngine = "template" + ComponentKey = "Component" + DSCIKey = "DSCI" +) + +// Action takes a set of template locations and render them as Unstructured resources for +// further processing. The Action can eventually cache the results in memory to avoid doing +// a full manifest rendering when not needed. +type Action struct { + cachingKeyFn render.CachingKeyFn + cachingKey []byte + cachedResources resources.UnstructuredList + data map[string]any +} + +type ActionOpts func(*Action) + +func WithCache() ActionOpts { + return func(action *Action) { + action.cachingKeyFn = types.Hash + } +} + +func WithData(data map[string]any) ActionOpts { + return func(action *Action) { + for k, v := range data { + action.data[k] = v + } + } +} + +func (a *Action) run(_ context.Context, rr *types.ReconciliationRequest) error { + var err error + var cachingKey []byte + + inst, ok := rr.Instance.(common.WithDevFlags) + if ok && inst.GetDevFlags() != nil { + // if dev flags are enabled, caching is disabled as dev flags are meant for + // development time only where caching is not relevant + a.cachingKey = nil + } else { + cachingKey, err = a.cachingKeyFn(rr) + if err != nil { + return fmt.Errorf("unable to calculate checksum of reconciliation object: %w", err) + } + } + + var result resources.UnstructuredList + + if len(cachingKey) != 0 && bytes.Equal(cachingKey, a.cachingKey) && len(a.cachedResources) != 0 { + result = a.cachedResources + } else { + res, err := a.render(rr) + if err != nil { + return fmt.Errorf("unable to render reconciliation object: %w", err) + } + + result = res + + if len(cachingKey) != 0 { + a.cachingKey = cachingKey + a.cachedResources = result + } + + controllerName := strings.ToLower(rr.Instance.GetObjectKind().GroupVersionKind().Kind) + render.RenderedResourcesTotal.WithLabelValues(controllerName, RendererEngine).Add(float64(len(result))) + + rr.Generated = true + } + + // deep copy object so changes done in the pipelines won't + // alter them + rr.Resources = append(rr.Resources, result.Clone()...) + + return nil +} + +func (a *Action) render(rr *types.ReconciliationRequest) ([]unstructured.Unstructured, error) { + decoder := serializer.NewCodecFactory(rr.Client.Scheme()).UniversalDeserializer() + + data := maps.Clone(a.data) + data[ComponentKey] = rr.Instance + data[DSCIKey] = rr.DSCI + + result := make([]unstructured.Unstructured, 0) + + var buffer bytes.Buffer + + for i := range rr.Templates { + content, err := fs.ReadFile(rr.Templates[i].FS, rr.Templates[i].Path) + if err != nil { + return nil, fmt.Errorf("failed to read file: %w", err) + } + + tmpl, err := gt.New(rr.Templates[i].Path). + Option("missingkey=error"). + Parse(string(content)) + + if err != nil { + return nil, fmt.Errorf("failed to parse template: %w", err) + } + + buffer.Reset() + err = tmpl.Execute(&buffer, data) + if err != nil { + return nil, fmt.Errorf("failed to execute template: %w", err) + } + + u, err := resources.Decode(decoder, buffer.Bytes()) + if err != nil { + return nil, fmt.Errorf("failed to decode template: %w", err) + } + + result = append(result, u...) + } + + return result, nil +} + +func NewAction(opts ...ActionOpts) actions.Fn { + action := Action{ + cachingKeyFn: func(rr *types.ReconciliationRequest) ([]byte, error) { + return nil, nil + }, + data: make(map[string]any), + } + + for _, opt := range opts { + opt(&action) + } + + return action.run +} diff --git a/pkg/controller/actions/render/template/action_render_templates_test.go b/pkg/controller/actions/render/template/action_render_templates_test.go new file mode 100644 index 00000000000..299f91c69e8 --- /dev/null +++ b/pkg/controller/actions/render/template/action_render_templates_test.go @@ -0,0 +1,203 @@ +package template_test + +import ( + "context" + "embed" + "testing" + + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/rs/xid" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/render/template" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/fakeclient" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +//go:embed resources +var testFS embed.FS + +func TestRenderTemplate(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + + cl, err := fakeclient.New() + g.Expect(err).ShouldNot(HaveOccurred()) + + action := template.NewAction() + + rr := types.ReconciliationRequest{ + Client: cl, + Instance: &componentApi.Dashboard{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns, + }, + }, + DSCI: &dsciv1.DSCInitialization{ + Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: ns, + ServiceMesh: &infrav1.ServiceMeshSpec{ + ControlPlane: infrav1.ControlPlaneSpec{ + Name: xid.New().String(), + Namespace: xid.New().String(), + }, + }, + }, + }, + Release: cluster.Release{Name: cluster.OpenDataHub}, + Templates: []types.TemplateInfo{{FS: testFS, Path: "resources/smm.tmpl.yaml"}}, + } + + err = action(ctx, &rr) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(rr.Resources).Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.namespace == "%s"`, ns), + jq.Match(`.spec.controlPlaneRef.namespace == "%s"`, rr.DSCI.Spec.ServiceMesh.ControlPlane.Namespace), + jq.Match(`.spec.controlPlaneRef.name == "%s"`, rr.DSCI.Spec.ServiceMesh.ControlPlane.Name), + jq.Match(`.metadata.annotations."instance-name" == "%s"`, rr.Instance.GetName()), + )), + )) +} + +func TestRenderTemplateWithData(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + id := xid.New().String() + name := xid.New().String() + + cl, err := fakeclient.New() + g.Expect(err).ShouldNot(HaveOccurred()) + + action := template.NewAction( + template.WithData(map[string]any{ + "ID": id, + "SMM": map[string]any{ + "Name": name, + }, + }), + ) + + rr := types.ReconciliationRequest{ + Client: cl, + Instance: &componentApi.Dashboard{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns, + }, + }, + DSCI: &dsciv1.DSCInitialization{ + Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: ns, + ServiceMesh: &infrav1.ServiceMeshSpec{ + ControlPlane: infrav1.ControlPlaneSpec{ + Name: xid.New().String(), + Namespace: xid.New().String(), + }, + }, + }, + }, + Release: cluster.Release{Name: cluster.OpenDataHub}, + Templates: []types.TemplateInfo{{FS: testFS, Path: "resources/smm-data.tmpl.yaml"}}, + } + + err = action(ctx, &rr) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(rr.Resources).Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.name == "%s"`, name), + jq.Match(`.metadata.namespace == "%s"`, ns), + jq.Match(`.spec.controlPlaneRef.namespace == "%s"`, rr.DSCI.Spec.ServiceMesh.ControlPlane.Namespace), + jq.Match(`.spec.controlPlaneRef.name == "%s"`, rr.DSCI.Spec.ServiceMesh.ControlPlane.Name), + jq.Match(`.metadata.annotations."instance-name" == "%s"`, rr.Instance.GetName()), + jq.Match(`.metadata.annotations."instance-id" == "%s"`, id), + )), + )) +} + +func TestRenderTemplateWithCache(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + + cl, err := fakeclient.New() + g.Expect(err).ShouldNot(HaveOccurred()) + + action := template.NewAction( + template.WithCache(), + ) + + render.RenderedResourcesTotal.Reset() + + dsci := dsciv1.DSCInitialization{ + Spec: dsciv1.DSCInitializationSpec{ + ApplicationsNamespace: ns, + ServiceMesh: &infrav1.ServiceMeshSpec{ + ControlPlane: infrav1.ControlPlaneSpec{ + Name: xid.New().String(), + Namespace: xid.New().String(), + }, + }, + }, + } + + for i := range 3 { + d := componentApi.Dashboard{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns, + }, + } + + if i >= 1 { + d.Generation = 1 + } + + rr := types.ReconciliationRequest{ + Client: cl, + Instance: &d, + DSCI: &dsci, + Release: cluster.Release{Name: cluster.OpenDataHub}, + Templates: []types.TemplateInfo{{FS: testFS, Path: "resources/smm.tmpl.yaml"}}, + } + + err = action(ctx, &rr) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(rr.Resources).Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.namespace == "%s"`, ns), + jq.Match(`.spec.controlPlaneRef.namespace == "%s"`, rr.DSCI.Spec.ServiceMesh.ControlPlane.Namespace), + jq.Match(`.spec.controlPlaneRef.name == "%s"`, rr.DSCI.Spec.ServiceMesh.ControlPlane.Name), + jq.Match(`.metadata.annotations."instance-name" == "%s"`, rr.Instance.GetName()), + )), + )) + + rc := testutil.ToFloat64(render.RenderedResourcesTotal) + + switch i { + case 0: + g.Expect(rc).Should(BeNumerically("==", 1)) + case 1: + g.Expect(rc).Should(BeNumerically("==", 2)) + case 2: + g.Expect(rc).Should(BeNumerically("==", 2)) + } + } +} diff --git a/pkg/controller/actions/render/template/resources/smm-data.tmpl.yaml b/pkg/controller/actions/render/template/resources/smm-data.tmpl.yaml new file mode 100644 index 00000000000..751c69f39bf --- /dev/null +++ b/pkg/controller/actions/render/template/resources/smm-data.tmpl.yaml @@ -0,0 +1,12 @@ +apiVersion: maistra.io/v1 +kind: ServiceMeshMember +metadata: + name: {{.SMM.Name}} + namespace: {{.DSCI.Spec.ApplicationsNamespace}} + annotations: + instance-name: {{.Component.Name}} + instance-id: {{.ID}} +spec: + controlPlaneRef: + namespace: {{ .DSCI.Spec.ServiceMesh.ControlPlane.Namespace }} + name: {{ .DSCI.Spec.ServiceMesh.ControlPlane.Name }} \ No newline at end of file diff --git a/pkg/controller/actions/render/template/resources/smm.tmpl.yaml b/pkg/controller/actions/render/template/resources/smm.tmpl.yaml new file mode 100644 index 00000000000..2db131e03ce --- /dev/null +++ b/pkg/controller/actions/render/template/resources/smm.tmpl.yaml @@ -0,0 +1,11 @@ +apiVersion: maistra.io/v1 +kind: ServiceMeshMember +metadata: + name: default + namespace: {{.DSCI.Spec.ApplicationsNamespace}} + annotations: + instance-name: {{.Component.Name}} +spec: + controlPlaneRef: + namespace: {{ .DSCI.Spec.ServiceMesh.ControlPlane.Namespace }} + name: {{ .DSCI.Spec.ServiceMesh.ControlPlane.Name }} \ No newline at end of file diff --git a/pkg/controller/actions/security/actions.go b/pkg/controller/actions/security/actions.go new file mode 100644 index 00000000000..ad1d441796a --- /dev/null +++ b/pkg/controller/actions/security/actions.go @@ -0,0 +1,26 @@ +package security + +import ( + "context" + "fmt" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" +) + +func NewUpdatePodSecurityRoleBindingAction(roles map[cluster.Platform][]string) actions.Fn { + return func(ctx context.Context, rr *types.ReconciliationRequest) error { + v := roles[rr.Release.Name] + if len(v) == 0 { + return nil + } + + err := cluster.UpdatePodSecurityRolebinding(ctx, rr.Client, rr.DSCI.Spec.ApplicationsNamespace, v...) + if err != nil { + return fmt.Errorf("failed to update PodSecurityRolebinding for %s: %w", v, err) + } + + return nil + } +} diff --git a/pkg/controller/actions/security/actions_test.go b/pkg/controller/actions/security/actions_test.go new file mode 100644 index 00000000000..bb72feda9bc --- /dev/null +++ b/pkg/controller/actions/security/actions_test.go @@ -0,0 +1,78 @@ +package security_test + +import ( + "context" + "testing" + + "github.com/rs/xid" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/security" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/fakeclient" + + . "github.com/onsi/gomega" +) + +func TestUpdatePodSecurityRoleBindingAction(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + m := map[cluster.Platform][]string{ + cluster.OpenDataHub: {"odh-dashboard"}, + cluster.SelfManagedRhoai: {"rhods-dashboard"}, + cluster.ManagedRhoai: {"rhods-dashboard", "fake-account"}, + } + + action := security.NewUpdatePodSecurityRoleBindingAction(m) + + for p, s := range m { + k := p + vl := s + + t.Run(string(k), func(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + ns := xid.New().String() + + cl, err := fakeclient.New( + &rbacv1.RoleBinding{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.RoleBinding.GroupVersion().String(), + Kind: gvk.RoleBinding.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: ns, + Namespace: ns, + }, + }, + ) + + g.Expect(err).ShouldNot(HaveOccurred()) + + err = action(ctx, &types.ReconciliationRequest{ + Client: cl, + Instance: nil, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ApplicationsNamespace: ns}}, + Release: cluster.Release{Name: k}, + }) + + g.Expect(err).ShouldNot(HaveOccurred()) + + rb := rbacv1.RoleBinding{} + err = cl.Get(ctx, client.ObjectKey{Namespace: ns, Name: ns}, &rb) + + g.Expect(err).ShouldNot(HaveOccurred()) + for _, v := range vl { + g.Expect(cluster.SubjectExistInRoleBinding(rb.Subjects, v, ns)).Should(BeTrue()) + } + }) + } +} diff --git a/pkg/controller/actions/updatestatus/action_update_status.go b/pkg/controller/actions/updatestatus/action_update_status.go new file mode 100644 index 00000000000..9e60c097082 --- /dev/null +++ b/pkg/controller/actions/updatestatus/action_update_status.go @@ -0,0 +1,119 @@ +package updatestatus + +import ( + "context" + "fmt" + "strings" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +const ( + DeploymentsNotReadyReason = "DeploymentsNotReady" + ReadyReason = "Ready" +) + +type Action struct { + labels map[string]string +} + +type ActionOpts func(*Action) + +func WithSelectorLabel(k string, v string) ActionOpts { + return func(action *Action) { + action.labels[k] = v + } +} + +func WithSelectorLabels(values map[string]string) ActionOpts { + return func(action *Action) { + for k, v := range values { + action.labels[k] = v + } + } +} + +func (a *Action) run(ctx context.Context, rr *types.ReconciliationRequest) error { + l := make(map[string]string, len(a.labels)) + for k, v := range a.labels { + l[k] = v + } + + if l[labels.PlatformPartOf] == "" { + kind, err := resources.KindForObject(rr.Client.Scheme(), rr.Instance) + if err != nil { + return err + } + + l[labels.PlatformPartOf] = strings.ToLower(kind) + } + + obj, ok := rr.Instance.(types.ResourceObject) + if !ok { + return fmt.Errorf("resource instance %v is not a ResourceObject", rr.Instance) + } + + deployments := &appsv1.DeploymentList{} + + err := rr.Client.List( + ctx, + deployments, + client.InNamespace(rr.DSCI.Spec.ApplicationsNamespace), + client.MatchingLabels(l), + ) + + if err != nil { + return fmt.Errorf("error fetching list of deployments: %w", err) + } + + ready := 0 + for _, deployment := range deployments.Items { + if deployment.Status.ReadyReplicas == deployment.Status.Replicas { + ready++ + } + } + + s := obj.GetStatus() + s.ObservedGeneration = obj.GetGeneration() + s.Phase = "Ready" + + conditionReady := metav1.Condition{ + Type: status.ConditionTypeReady, + Status: metav1.ConditionTrue, + Reason: ReadyReason, + Message: fmt.Sprintf("%d/%d deployments ready", ready, len(deployments.Items)), + ObservedGeneration: s.ObservedGeneration, + } + + if len(deployments.Items) == 0 || (len(deployments.Items) > 0 && ready != len(deployments.Items)) { + conditionReady.Status = metav1.ConditionFalse + conditionReady.Reason = DeploymentsNotReadyReason + + s.Phase = "NotReady" + } + + meta.SetStatusCondition(&s.Conditions, conditionReady) + + return nil +} + +func NewAction(opts ...ActionOpts) actions.Fn { + action := Action{ + labels: map[string]string{}, + } + + for _, opt := range opts { + opt(&action) + } + + return action.run +} diff --git a/pkg/controller/actions/updatestatus/action_update_status_test.go b/pkg/controller/actions/updatestatus/action_update_status_test.go new file mode 100644 index 00000000000..7eb47d60e92 --- /dev/null +++ b/pkg/controller/actions/updatestatus/action_update_status_test.go @@ -0,0 +1,305 @@ +//nolint:dupl +package updatestatus_test + +import ( + "context" + "strings" + "testing" + + "github.com/onsi/gomega/gstruct" + "github.com/rs/xid" + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/status" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/updatestatus" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/fakeclient" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers" + + . "github.com/onsi/gomega" +) + +//nolint:dupl +func TestUpdateStatusActionNotReady(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + + cl, err := fakeclient.New( + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.Deployment.GroupVersion().String(), + Kind: gvk.Deployment.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment", + Namespace: ns, + Labels: map[string]string{ + labels.PlatformPartOf: ns, + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + ReadyReplicas: 0, + }, + }, + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.Deployment.GroupVersion().String(), + Kind: gvk.Deployment.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment-2", + Namespace: ns, + Labels: map[string]string{ + labels.PlatformPartOf: ns, + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + ReadyReplicas: 1, + }, + }, + ) + + g.Expect(err).ShouldNot(HaveOccurred()) + + action := updatestatus.NewAction( + updatestatus.WithSelectorLabel(labels.PlatformPartOf, ns)) + + rr := types.ReconciliationRequest{ + Client: cl, + Instance: &componentApi.Dashboard{}, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ApplicationsNamespace: ns}}, + Release: cluster.Release{Name: cluster.OpenDataHub}, + } + + err = action(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(rr.Instance).Should( + WithTransform( + matchers.ExtractStatusCondition(status.ConditionTypeReady), + gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Status": Equal(metav1.ConditionFalse), + "Reason": Equal(updatestatus.DeploymentsNotReadyReason), + }), + ), + ) +} + +func TestUpdateStatusActionReady(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + + cl, err := fakeclient.New( + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.Deployment.GroupVersion().String(), + Kind: gvk.Deployment.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment", + Namespace: ns, + Labels: map[string]string{ + labels.PlatformPartOf: ns, + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + ReadyReplicas: 1, + }, + }, + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.Deployment.GroupVersion().String(), + Kind: gvk.Deployment.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment-2", + Namespace: ns, + Labels: map[string]string{ + labels.PlatformPartOf: ns, + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + ReadyReplicas: 1, + }, + }, + ) + + g.Expect(err).ShouldNot(HaveOccurred()) + + action := updatestatus.NewAction( + updatestatus.WithSelectorLabel(labels.PlatformPartOf, ns)) + + rr := types.ReconciliationRequest{ + Client: cl, + Instance: &componentApi.Dashboard{}, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ApplicationsNamespace: ns}}, + Release: cluster.Release{Name: cluster.OpenDataHub}, + } + + err = action(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(rr.Instance).Should( + WithTransform( + matchers.ExtractStatusCondition(status.ConditionTypeReady), + gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Status": Equal(metav1.ConditionTrue), + "Reason": Equal(updatestatus.ReadyReason), + }), + ), + ) +} + +func TestUpdateStatusActionReadyAutoSelector(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + + cl, err := fakeclient.New( + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.Deployment.GroupVersion().String(), + Kind: gvk.Deployment.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment", + Namespace: ns, + Labels: map[string]string{ + labels.PlatformPartOf: strings.ToLower(componentApi.DashboardKind), + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + ReadyReplicas: 1, + }, + }, + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.Deployment.GroupVersion().String(), + Kind: gvk.Deployment.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment-2", + Namespace: ns, + Labels: map[string]string{ + labels.PlatformPartOf: strings.ToLower(componentApi.DashboardKind), + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + ReadyReplicas: 1, + }, + }, + ) + + g.Expect(err).ShouldNot(HaveOccurred()) + + action := updatestatus.NewAction() + + rr := types.ReconciliationRequest{ + Client: cl, + Instance: &componentApi.Dashboard{}, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ApplicationsNamespace: ns}}, + Release: cluster.Release{Name: cluster.OpenDataHub}, + } + + err = action(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(rr.Instance).Should( + WithTransform( + matchers.ExtractStatusCondition(status.ConditionTypeReady), + gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Status": Equal(metav1.ConditionTrue), + "Reason": Equal(updatestatus.ReadyReason), + }), + ), + ) +} + +func TestUpdateStatusActionNotReadyNotFound(t *testing.T) { + g := NewWithT(t) + + ctx := context.Background() + ns := xid.New().String() + + cl, err := fakeclient.New( + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.Deployment.GroupVersion().String(), + Kind: gvk.Deployment.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment", + Namespace: ns, + Labels: map[string]string{ + labels.PlatformPartOf: ns, + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + ReadyReplicas: 1, + }, + }, + &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.Deployment.GroupVersion().String(), + Kind: gvk.Deployment.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "my-deployment-2", + Namespace: ns, + Labels: map[string]string{ + labels.PlatformPartOf: ns, + }, + }, + Status: appsv1.DeploymentStatus{ + Replicas: 1, + ReadyReplicas: 1, + }, + }, + ) + + g.Expect(err).ShouldNot(HaveOccurred()) + + action := updatestatus.NewAction() + + rr := types.ReconciliationRequest{ + Client: cl, + Instance: &componentApi.Dashboard{}, + DSCI: &dsciv1.DSCInitialization{Spec: dsciv1.DSCInitializationSpec{ApplicationsNamespace: ns}}, + Release: cluster.Release{Name: cluster.OpenDataHub}, + } + + err = action(ctx, &rr) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(rr.Instance).Should( + WithTransform( + matchers.ExtractStatusCondition(status.ConditionTypeReady), + gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "Status": Equal(metav1.ConditionFalse), + "Reason": Equal(updatestatus.DeploymentsNotReadyReason), + }), + ), + ) +} diff --git a/pkg/controller/client/client.go b/pkg/controller/client/client.go new file mode 100644 index 00000000000..33eff7361dc --- /dev/null +++ b/pkg/controller/client/client.go @@ -0,0 +1,123 @@ +package client + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +func NewFromManager(mgr ctrl.Manager) (*Client, error) { + return NewFromConfig(mgr.GetConfig(), mgr.GetClient()) +} + +func NewFromConfig(cfg *rest.Config, client ctrlCli.Client) (*Client, error) { + kubernetesCl, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, fmt.Errorf("unable to construct a Kubernetes client: %w", err) + } + + dynamicCl, err := dynamic.NewForConfig(cfg) + if err != nil { + return nil, fmt.Errorf("unable to construct a Discovery client: %w", err) + } + + return New(client, kubernetesCl, dynamicCl), nil +} + +func New(client ctrlCli.Client, kubernetes kubernetes.Interface, dynamic dynamic.Interface) *Client { + return &Client{ + Client: client, + kubernetes: kubernetes, + dynamic: dynamic, + } +} + +type Client struct { + ctrlCli.Client + kubernetes kubernetes.Interface + dynamic dynamic.Interface +} + +func (c *Client) Kubernetes() kubernetes.Interface { + return c.kubernetes +} + +func (c *Client) Discovery() discovery.DiscoveryInterface { + return c.kubernetes.Discovery() +} + +func (c *Client) Dynamic() dynamic.Interface { + return c.dynamic +} + +func (c *Client) Apply(ctx context.Context, in ctrlCli.Object, opts ...ctrlCli.PatchOption) error { + u, err := resources.ToUnstructured(in) + if err != nil { + return fmt.Errorf("failed to convert resource to unstructured: %w", err) + } + + // safe copy + u = u.DeepCopy() + + // remove not required fields + unstructured.RemoveNestedField(u.Object, "metadata", "managedFields") + unstructured.RemoveNestedField(u.Object, "metadata", "resourceVersion") + unstructured.RemoveNestedField(u.Object, "status") + + err = c.Client.Patch(ctx, u, ctrlCli.Apply, opts...) + switch { + case k8serr.IsNotFound(err): + return nil + case err != nil: + return fmt.Errorf("unable to patch object %s: %w", u, err) + } + + // Write back the modified object so callers can access the patched object. + err = c.Scheme().Convert(u, in, ctx) + if err != nil { + return errors.Wrapf(err, "failed to write modified object") + } + + return nil +} + +func (c *Client) ApplyStatus(ctx context.Context, in ctrlCli.Object, opts ...ctrlCli.SubResourcePatchOption) error { + u, err := resources.ToUnstructured(in) + if err != nil { + return fmt.Errorf("failed to convert resource to unstructured: %w", err) + } + + // safe copy + u = u.DeepCopy() + + // remove not required fields + unstructured.RemoveNestedField(u.Object, "metadata", "managedFields") + unstructured.RemoveNestedField(u.Object, "metadata", "resourceVersion") + + err = c.Client.Status().Patch(ctx, u, ctrlCli.Apply, opts...) + switch { + case k8serr.IsNotFound(err): + return nil + case err != nil: + return fmt.Errorf("unable to patch object status %s: %w", u, err) + } + + // Write back the modified object so callers can access the patched object. + err = c.Scheme().Convert(u, in, ctx) + if err != nil { + return errors.Wrapf(err, "failed to write modified object") + } + + return nil +} diff --git a/pkg/controller/handlers/handlers.go b/pkg/controller/handlers/handlers.go new file mode 100644 index 00000000000..5dad19f4c2d --- /dev/null +++ b/pkg/controller/handlers/handlers.go @@ -0,0 +1,63 @@ +package handlers + +import ( + "context" + + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func LabelToName(key string) handler.EventHandler { + return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request { + values := a.GetLabels() + if len(values) == 0 { + return []reconcile.Request{} + } + + name := values[key] + if name == "" { + return []reconcile.Request{} + } + + return []reconcile.Request{{ + NamespacedName: types.NamespacedName{ + Name: name, + }, + }} + }) +} +func AnnotationToName(key string) handler.EventHandler { + return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + values := obj.GetAnnotations() + if len(values) == 0 { + return []reconcile.Request{} + } + + name := values[key] + if name == "" { + return []reconcile.Request{} + } + + return []reconcile.Request{{ + NamespacedName: types.NamespacedName{ + Name: name, + }, + }} + }) +} + +func Fn(fn func(ctx context.Context, a client.Object) []reconcile.Request) handler.EventHandler { + return handler.EnqueueRequestsFromMapFunc(fn) +} + +func ToNamed(name string) handler.EventHandler { + return handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, a client.Object) []reconcile.Request { + return []reconcile.Request{{ + NamespacedName: types.NamespacedName{ + Name: name, + }, + }} + }) +} diff --git a/pkg/controller/manager/manager.go b/pkg/controller/manager/manager.go new file mode 100644 index 00000000000..b52ad12724b --- /dev/null +++ b/pkg/controller/manager/manager.go @@ -0,0 +1,42 @@ +package manager + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + ctrl "sigs.k8s.io/controller-runtime" +) + +type gvkInfo struct { + owned bool +} + +func New(manager ctrl.Manager) *Manager { + return &Manager{ + m: manager, + gvks: map[schema.GroupVersionKind]gvkInfo{}, + } +} + +type Manager struct { + m ctrl.Manager + + gvks map[schema.GroupVersionKind]gvkInfo +} + +func (m *Manager) AddGVK(gvk schema.GroupVersionKind, owned bool) { + if m == nil { + return + } + + m.gvks[gvk] = gvkInfo{ + owned: owned, + } +} + +func (m *Manager) Owns(gvk schema.GroupVersionKind) bool { + if m == nil { + return false + } + + i, ok := m.gvks[gvk] + return ok && i.owned +} diff --git a/pkg/controller/predicates/clusterrole/clusterrole.go b/pkg/controller/predicates/clusterrole/clusterrole.go new file mode 100644 index 00000000000..b4c1d818819 --- /dev/null +++ b/pkg/controller/predicates/clusterrole/clusterrole.go @@ -0,0 +1,41 @@ +package clusterrole + +import ( + "reflect" + + rbacv1 "k8s.io/api/rbac/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +// IgnoreIfAggregationRule is a watch predicate that can be used with +// ClusterRoles to ignore the rules field on update if aggregationRule is set. +func IgnoreIfAggregationRule() predicate.Predicate { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldClusterRole, ok := e.ObjectOld.DeepCopyObject().(*rbacv1.ClusterRole) + if !ok { + return true + } + newClusterRole, ok := e.ObjectNew.DeepCopyObject().(*rbacv1.ClusterRole) + if !ok { + return true + } + + // if aggregationRule is set, then the rules are set by k8s based on other + // ClusterRoles matching a label selector, so we shouldn't try to reset that + // back to empty + if newClusterRole.AggregationRule != nil { + oldClusterRole.Rules = nil + newClusterRole.Rules = nil + } + + oldClusterRole.SetManagedFields(nil) + newClusterRole.SetManagedFields(nil) + oldClusterRole.SetResourceVersion("") + newClusterRole.SetResourceVersion("") + + return !reflect.DeepEqual(oldClusterRole, newClusterRole) + }, + } +} diff --git a/pkg/controller/predicates/component/component.go b/pkg/controller/predicates/component/component.go new file mode 100644 index 00000000000..d26badb5d38 --- /dev/null +++ b/pkg/controller/predicates/component/component.go @@ -0,0 +1,36 @@ +package component + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +func ForLabel(name string, value string) predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return resources.HasLabel(e.Object, name, value) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return resources.HasLabel(e.ObjectNew, name, value) || resources.HasLabel(e.ObjectOld, name, value) + }, + } +} + +func ForAnnotation(name string, value string) predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return resources.HasAnnotation(e.Object, name, value) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return resources.HasAnnotation(e.ObjectNew, name, value) || resources.HasAnnotation(e.ObjectOld, name, value) + }, + } +} diff --git a/pkg/controller/predicates/dependent/dependent.go b/pkg/controller/predicates/dependent/dependent.go new file mode 100644 index 00000000000..ce7b37906a0 --- /dev/null +++ b/pkg/controller/predicates/dependent/dependent.go @@ -0,0 +1,109 @@ +package dependent + +import ( + "reflect" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +var _ predicate.Predicate = Predicate{} + +type PredicateOption func(*Predicate) *Predicate + +func WithWatchDeleted(val bool) PredicateOption { + return func(in *Predicate) *Predicate { + in.WatchDelete = val + return in + } +} + +func WithWatchUpdate(val bool) PredicateOption { + return func(in *Predicate) *Predicate { + in.WatchUpdate = val + return in + } +} + +func WithWatchStatus(val bool) PredicateOption { + return func(in *Predicate) *Predicate { + in.WatchStatus = val + return in + } +} + +func New(opts ...PredicateOption) *Predicate { + dp := &Predicate{ + WatchDelete: true, + WatchUpdate: true, + WatchStatus: false, + } + + for i := range opts { + dp = opts[i](dp) + } + + return dp +} + +type Predicate struct { + WatchDelete bool + WatchUpdate bool + WatchStatus bool + + predicate.Funcs +} + +func (p Predicate) Create(event.CreateEvent) bool { + return false +} + +func (p Predicate) Generic(event.GenericEvent) bool { + return false +} + +func (p Predicate) Delete(e event.DeleteEvent) bool { + return p.WatchDelete +} + +func (p Predicate) Update(e event.UpdateEvent) bool { + if !p.WatchUpdate { + return false + } + + if e.ObjectOld.GetResourceVersion() == e.ObjectNew.GetResourceVersion() { + return false + } + + oldObj, err := resources.ToUnstructured(e.ObjectOld) + if err != nil { + return false + } + + newObj, err := resources.ToUnstructured(e.ObjectNew) + if err != nil { + return false + } + + oldObj = oldObj.DeepCopy() + newObj = newObj.DeepCopy() + + if !p.WatchStatus { + // Update filters out events that change only the dependent resource + // status. It is not typical for the controller of a primary + // resource to write to the status of one its dependent resources. + unstructured.RemoveNestedField(oldObj.Object, "status") + unstructured.RemoveNestedField(newObj.Object, "status") + } + + // Reset field not meaningful for comparison + oldObj.SetResourceVersion("") + newObj.SetResourceVersion("") + oldObj.SetManagedFields(nil) + newObj.SetManagedFields(nil) + + return !reflect.DeepEqual(oldObj.Object, newObj.Object) +} diff --git a/pkg/controller/predicates/generation/generation.go b/pkg/controller/predicates/generation/generation.go new file mode 100644 index 00000000000..e2c27f379dd --- /dev/null +++ b/pkg/controller/predicates/generation/generation.go @@ -0,0 +1,32 @@ +package generation + +import ( + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var _ predicate.Predicate = Predicate{} + +type Predicate struct { + predicate.Funcs +} + +// Update implements default UpdateEvent filter for validating generation change. +func (Predicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil || e.ObjectNew == nil { + return false + } + + // If the generation is set to zero, it means that for such resource, the + // generation does not matter, hence we should pass the event down for + // further processing (if needed) + if e.ObjectNew.GetGeneration() == 0 || e.ObjectOld.GetGeneration() == 0 { + return true + } + + return e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() +} + +func New() *Predicate { + return &Predicate{} +} diff --git a/pkg/controller/predicates/hash/hash.go b/pkg/controller/predicates/hash/hash.go new file mode 100644 index 00000000000..b2157793912 --- /dev/null +++ b/pkg/controller/predicates/hash/hash.go @@ -0,0 +1,38 @@ +package hash + +import ( + "bytes" + + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +// Updated is a watch predicate that can be used to ignore updates +// of resources if they're considered equal after hashing by resources.Hash(). +func Updated() predicate.Predicate { + return predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + oldUnstructured, err := resources.ToUnstructured(e.ObjectOld.DeepCopyObject()) + if err != nil { + return true + } + newUnstructured, err := resources.ToUnstructured(e.ObjectNew.DeepCopyObject()) + if err != nil { + return true + } + + oldHash, err := resources.Hash(oldUnstructured) + if err != nil { + return true + } + newHash, err := resources.Hash(newUnstructured) + if err != nil { + return true + } + + return !bytes.Equal(oldHash, newHash) + }, + } +} diff --git a/pkg/controller/predicates/partial/partial.go b/pkg/controller/predicates/partial/partial.go new file mode 100644 index 00000000000..3832d5da6cf --- /dev/null +++ b/pkg/controller/predicates/partial/partial.go @@ -0,0 +1,77 @@ +package partial + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var _ predicate.Predicate = Predicate{} + +type PredicateOption func(*Predicate) *Predicate + +func WatchDeleted(val bool) PredicateOption { + return func(in *Predicate) *Predicate { + in.WatchDelete = val + return in + } +} + +func WatchUpdate(val bool) PredicateOption { + return func(in *Predicate) *Predicate { + in.WatchUpdate = val + return in + } +} + +func New(opts ...PredicateOption) *Predicate { + dp := &Predicate{ + WatchDelete: true, + WatchUpdate: true, + } + + for i := range opts { + dp = opts[i](dp) + } + + return dp +} + +type Predicate struct { + WatchDelete bool + WatchUpdate bool + + predicate.Funcs +} + +func (p Predicate) Create(event.CreateEvent) bool { + return false +} + +func (p Predicate) Generic(event.GenericEvent) bool { + return false +} + +func (p Predicate) Delete(e event.DeleteEvent) bool { + if !p.WatchDelete { + return false + } + + _, ok := e.Object.(*metav1.PartialObjectMetadata) + + return ok +} + +func (p Predicate) Update(e event.UpdateEvent) bool { + if !p.WatchUpdate { + return false + } + + if e.ObjectOld.GetResourceVersion() == e.ObjectNew.GetResourceVersion() { + return false + } + + _, ok := e.ObjectNew.(*metav1.PartialObjectMetadata) + + return ok +} diff --git a/pkg/controller/predicates/predicates.go b/pkg/controller/predicates/predicates.go new file mode 100644 index 00000000000..128e7921462 --- /dev/null +++ b/pkg/controller/predicates/predicates.go @@ -0,0 +1,21 @@ +package predicates + +import ( + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/generation" +) + +var ( + // DefaultPredicate is the default set of predicates associated to + // resources when there is no specific predicate configured via the + // builder. + // + // It would trigger a reconciliation if either the generation or + // metadata (labels, annotations) have changed. + DefaultPredicate = predicate.Or( + generation.New(), + predicate.LabelChangedPredicate{}, + predicate.AnnotationChangedPredicate{}, + ) +) diff --git a/pkg/controller/predicates/resources/resources.go b/pkg/controller/predicates/resources/resources.go new file mode 100644 index 00000000000..827eab1b7f4 --- /dev/null +++ b/pkg/controller/predicates/resources/resources.go @@ -0,0 +1,55 @@ +package resources + +import ( + appsv1 "k8s.io/api/apps/v1" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +var _ predicate.Predicate = DeploymentPredicate{} + +type DeploymentPredicate struct { + predicate.Funcs +} + +// Update implements default UpdateEvent filter for validating generation change. +func (DeploymentPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil || e.ObjectNew == nil { + return false + } + + oldDeployment, ok := e.ObjectOld.(*appsv1.Deployment) + if !ok { + return false + } + + newDeployment, ok := e.ObjectNew.(*appsv1.Deployment) + if !ok { + return false + } + + return oldDeployment.Generation != newDeployment.Generation || + oldDeployment.Status.Replicas != newDeployment.Status.Replicas || + oldDeployment.Status.ReadyReplicas != newDeployment.Status.ReadyReplicas +} + +func NewDeploymentPredicate() *DeploymentPredicate { + return &DeploymentPredicate{} +} + +func Deleted() predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return false + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return false + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return true + }, + GenericFunc: func(e event.GenericEvent) bool { + return false + }, + } +} diff --git a/pkg/controller/reconciler/reconciler.go b/pkg/controller/reconciler/reconciler.go new file mode 100644 index 00000000000..43afe93eda1 --- /dev/null +++ b/pkg/controller/reconciler/reconciler.go @@ -0,0 +1,220 @@ +package reconciler + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" + odherrors "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions/errors" + odhClient "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" + odhManager "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/manager" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" +) + +// Reconciler provides generic reconciliation functionality for ODH objects. +type Reconciler[T common.PlatformObject] struct { + Client *odhClient.Client + Scheme *runtime.Scheme + Actions []actions.Fn + Finalizer []actions.Fn + Log logr.Logger + Controller controller.Controller + Recorder record.EventRecorder + Release cluster.Release + + name string + m *odhManager.Manager + instanceFactory func() (T, error) +} + +// NewReconciler creates a new reconciler for the given type. +func NewReconciler[T common.PlatformObject](mgr manager.Manager, name string, object T) (*Reconciler[T], error) { + oc, err := odhClient.NewFromManager(mgr) + if err != nil { + return nil, err + } + + cc := Reconciler[T]{ + Client: oc, + Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName(name), + Recorder: mgr.GetEventRecorderFor(name), + Release: cluster.GetRelease(), + name: name, + m: odhManager.New(mgr), + instanceFactory: func() (T, error) { + t := reflect.TypeOf(object).Elem() + res, ok := reflect.New(t).Interface().(T) + if !ok { + return res, fmt.Errorf("unable to construct instance of %v", t) + } + + return res, nil + }, + } + + return &cc, nil +} + +func (r *Reconciler[T]) GetRelease() cluster.Release { + return r.Release +} + +func (r *Reconciler[T]) GetLogger() logr.Logger { + return r.Log +} + +func (r *Reconciler[T]) AddOwnedType(gvk schema.GroupVersionKind) { + r.m.AddGVK(gvk, true) +} + +func (r *Reconciler[T]) Owns(obj client.Object) bool { + return r.m.Owns(obj.GetObjectKind().GroupVersionKind()) +} + +func (r *Reconciler[T]) AddAction(action actions.Fn) { + r.Actions = append(r.Actions, action) +} + +func (r *Reconciler[T]) AddFinalizer(action actions.Fn) { + r.Finalizer = append(r.Finalizer, action) +} + +func (r *Reconciler[T]) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + l := log.FromContext(ctx) + l.Info("reconcile") + + res, err := r.instanceFactory() + if err != nil { + return ctrl.Result{}, err + } + + if err := r.Client.Get(ctx, client.ObjectKey{Name: req.Name}, res); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if !res.GetDeletionTimestamp().IsZero() { + if err := r.delete(ctx, res); err != nil { + return ctrl.Result{}, err + } + } else { + if err := r.apply(ctx, res); err != nil { + return ctrl.Result{}, err + } + } + + return ctrl.Result{}, nil +} + +func (r *Reconciler[T]) delete(ctx context.Context, res client.Object) error { + l := log.FromContext(ctx) + l.Info("delete") + + rr := types.ReconciliationRequest{ + Client: r.Client, + Manager: r.m, + Instance: res, + Release: r.Release, + Manifests: make([]types.ManifestInfo, 0), + + // The DSCI should not be required when deleting a component, if the + // component requires some additional info, then such info should be + // stored as part of the spec/status + DSCI: nil, + } + + // Execute finalizers + for _, action := range r.Finalizer { + l.V(3).Info("Executing finalizer", "action", action) + + actx := log.IntoContext( + ctx, + l.WithName(actions.ActionGroup).WithName(action.String()), + ) + + if err := action(actx, &rr); err != nil { + se := odherrors.StopError{} + if !errors.As(err, &se) { + l.Error(err, "Failed to execute finalizer", "action", action) + return err + } + + l.V(3).Info("detected stop marker", "action", action) + break + } + } + + return nil +} + +func (r *Reconciler[T]) apply(ctx context.Context, res client.Object) error { + l := log.FromContext(ctx) + l.Info("apply") + + dscil := dsciv1.DSCInitializationList{} + if err := r.Client.List(ctx, &dscil); err != nil { + return err + } + + if len(dscil.Items) != 1 { + return errors.New("unable to find DSCInitialization") + } + + rr := types.ReconciliationRequest{ + Client: r.Client, + Manager: r.m, + Instance: res, + DSCI: &dscil.Items[0], + Release: r.Release, + Manifests: make([]types.ManifestInfo, 0), + } + + // Execute actions + for _, action := range r.Actions { + l.Info("Executing action", "action", action) + + actx := log.IntoContext( + ctx, + l.WithName(actions.ActionGroup).WithName(action.String()), + ) + + if err := action(actx, &rr); err != nil { + se := odherrors.StopError{} + if !errors.As(err, &se) { + l.Error(err, "Failed to execute action", "action", action) + return err + } + + l.V(3).Info("detected stop marker", "action", action) + break + } + } + + err := r.Client.ApplyStatus( + ctx, + rr.Instance, + client.FieldOwner(r.name), + client.ForceOwnership, + ) + + if err != nil { + return client.IgnoreNotFound(err) + } + + return nil +} diff --git a/pkg/controller/reconciler/reconciler_actions.go b/pkg/controller/reconciler/reconciler_actions.go new file mode 100644 index 00000000000..5d32508734f --- /dev/null +++ b/pkg/controller/reconciler/reconciler_actions.go @@ -0,0 +1,86 @@ +package reconciler + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" +) + +type dynamicWatchFn func(client.Object, handler.EventHandler, ...predicate.Predicate) error + +type dynamicWatchAction struct { + fn dynamicWatchFn + watches []watchInput + watched map[schema.GroupVersionKind]struct{} +} + +func (a *dynamicWatchAction) run(ctx context.Context, rr *types.ReconciliationRequest) error { + controllerName := strings.ToLower(rr.Instance.GetObjectKind().GroupVersionKind().Kind) + + for i := range a.watches { + w := a.watches[i] + gvk := w.object.GetObjectKind().GroupVersionKind() + + if _, ok := a.watched[gvk]; ok { + // already registered + continue + } + + ok := a.shouldWatch(ctx, w, rr) + if !ok { + continue + } + + err := a.fn(w.object, w.eventHandler, w.predicates...) + if err != nil { + return fmt.Errorf("failed to create watcher for %s: %w", w.object.GetObjectKind().GroupVersionKind(), err) + } + + a.watched[gvk] = struct{}{} + DynamicWatchResourcesTotal.WithLabelValues(controllerName).Inc() + } + + return nil +} + +func (a *dynamicWatchAction) shouldWatch(ctx context.Context, in watchInput, rr *types.ReconciliationRequest) bool { + for pi := range in.dynamicPred { + ok := in.dynamicPred[pi](ctx, rr) + if !ok { + return false + } + } + + return true +} + +func newDynamicWatch(fn dynamicWatchFn, watches []watchInput) *dynamicWatchAction { + action := dynamicWatchAction{ + fn: fn, + watched: map[schema.GroupVersionKind]struct{}{}, + } + + for i := range watches { + if !watches[i].dynamic { + // not dynamic + continue + } + + action.watches = append(action.watches, watches[i]) + } + + return &action +} + +func newDynamicWatchAction(fn dynamicWatchFn, watches []watchInput) actions.Fn { + action := newDynamicWatch(fn, watches) + return action.run +} diff --git a/pkg/controller/reconciler/reconciler_actions_test.go b/pkg/controller/reconciler/reconciler_actions_test.go new file mode 100644 index 00000000000..8db4d88c9b0 --- /dev/null +++ b/pkg/controller/reconciler/reconciler_actions_test.go @@ -0,0 +1,255 @@ +//nolint:testpackage +package reconciler + +import ( + "context" + "testing" + + gomegaTypes "github.com/onsi/gomega/types" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/rs/xid" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + + . "github.com/onsi/gomega" +) + +func TestDynamicWatchAction_Run(t *testing.T) { + tests := []struct { + name string + object common.PlatformObject + preds []DynamicPredicate + errMatcher gomegaTypes.GomegaMatcher + cntMatcher gomegaTypes.GomegaMatcher + keyMatcher gomegaTypes.GomegaMatcher + }{ + { + name: "should register a watcher if no predicates", + object: &componentApi.Dashboard{TypeMeta: metav1.TypeMeta{Kind: gvk.Dashboard.Kind}}, + preds: []DynamicPredicate{}, + errMatcher: Not(HaveOccurred()), + cntMatcher: BeNumerically("==", 1), + keyMatcher: HaveKey(gvk.ConfigMap), + }, + + { + name: "should register a watcher when the predicate evaluate to true", + object: &componentApi.Dashboard{TypeMeta: metav1.TypeMeta{Kind: gvk.Dashboard.Kind}}, + preds: []DynamicPredicate{ + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return true + }, + }, + errMatcher: Not(HaveOccurred()), + cntMatcher: BeNumerically("==", 1), + keyMatcher: HaveKey(gvk.ConfigMap), + }, + + { + name: "should register a watcher when all predicates evaluate to true", + object: &componentApi.Dashboard{ + TypeMeta: metav1.TypeMeta{ + Kind: gvk.Dashboard.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + ResourceVersion: xid.New().String(), + }, + }, + preds: []DynamicPredicate{ + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetGeneration() > 0 + }, + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetResourceVersion() != "" + }, + }, + errMatcher: Not(HaveOccurred()), + cntMatcher: BeNumerically("==", 1), + keyMatcher: HaveKey(gvk.ConfigMap), + }, + + { + name: "should not register a watcher the predicate returns false", + object: &componentApi.Dashboard{TypeMeta: metav1.TypeMeta{Kind: gvk.Dashboard.Kind}}, + preds: []DynamicPredicate{ + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return false + }, + }, + errMatcher: Not(HaveOccurred()), + cntMatcher: BeNumerically("==", 0), + keyMatcher: BeEmpty(), + }, + + { + name: "should not register a watcher when a predicate returns false", + object: &componentApi.Dashboard{ + TypeMeta: metav1.TypeMeta{ + Kind: gvk.Dashboard.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + ResourceVersion: "", + }, + }, + preds: []DynamicPredicate{ + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetGeneration() > 0 + }, + func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetResourceVersion() != "" + }, + }, + errMatcher: Not(HaveOccurred()), + cntMatcher: BeNumerically("==", 0), + keyMatcher: BeEmpty(), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + watches := []watchInput{{ + object: resources.GvkToUnstructured(gvk.ConfigMap), + dynamic: true, + dynamicPred: test.preds, + }} + + mockFn := func(_ client.Object, _ handler.EventHandler, _ ...predicate.Predicate) error { + return nil + } + + DynamicWatchResourcesTotal.Reset() + DynamicWatchResourcesTotal.WithLabelValues("dashboard").Add(0) + + action := newDynamicWatch(mockFn, watches) + err := action.run(ctx, &types.ReconciliationRequest{Instance: test.object}) + + if test.errMatcher != nil { + g.Expect(err).To(test.errMatcher) + } + if test.cntMatcher != nil { + g.Expect(testutil.ToFloat64(DynamicWatchResourcesTotal)).To(test.cntMatcher) + } + if test.keyMatcher != nil { + g.Expect(action.watched).Should(test.keyMatcher) + } + }) + } +} + +func TestDynamicWatchAction_Inputs(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + mockFn := func(_ client.Object, _ handler.EventHandler, _ ...predicate.Predicate) error { + return nil + } + + DynamicWatchResourcesTotal.Reset() + DynamicWatchResourcesTotal.WithLabelValues("dashboard").Add(0) + + watches := []watchInput{ + { + object: resources.GvkToUnstructured(gvk.Secret), + dynamic: true, + dynamicPred: []DynamicPredicate{func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetGeneration() == 0 + }}, + }, + { + object: resources.GvkToUnstructured(gvk.ConfigMap), + dynamic: true, + dynamicPred: []DynamicPredicate{func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetGeneration() > 0 + }}, + }, + } + + action := newDynamicWatch(mockFn, watches) + err := action.run(ctx, &types.ReconciliationRequest{Instance: &componentApi.Dashboard{ + TypeMeta: metav1.TypeMeta{ + Kind: gvk.Dashboard.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }}) + + g.Expect(err). + ShouldNot(HaveOccurred()) + g.Expect(testutil.ToFloat64(DynamicWatchResourcesTotal)). + Should(BeNumerically("==", 1)) + g.Expect(action.watched). + Should(And( + HaveLen(1), + HaveKey(gvk.ConfigMap)), + ) +} + +func TestDynamicWatchAction_NotTwice(t *testing.T) { + g := NewWithT(t) + ctx := context.Background() + + mockFn := func(_ client.Object, _ handler.EventHandler, _ ...predicate.Predicate) error { + return nil + } + + DynamicWatchResourcesTotal.Reset() + DynamicWatchResourcesTotal.WithLabelValues("dashboard").Add(0) + + watches := []watchInput{ + { + object: resources.GvkToUnstructured(gvk.ConfigMap), + dynamic: true, + dynamicPred: []DynamicPredicate{func(_ context.Context, rr *types.ReconciliationRequest) bool { + return rr.Instance.GetGeneration() > 0 + }}, + }, + } + + action := newDynamicWatch(mockFn, watches) + + err1 := action.run(ctx, &types.ReconciliationRequest{Instance: &componentApi.Dashboard{ + TypeMeta: metav1.TypeMeta{ + Kind: gvk.Dashboard.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }}) + + g.Expect(err1). + ShouldNot(HaveOccurred()) + + err2 := action.run(ctx, &types.ReconciliationRequest{Instance: &componentApi.Dashboard{ + TypeMeta: metav1.TypeMeta{ + Kind: gvk.Dashboard.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Generation: 1, + }, + }}) + + g.Expect(err2). + ShouldNot(HaveOccurred()) + + g.Expect(testutil.ToFloat64(DynamicWatchResourcesTotal)). + Should(BeNumerically("==", 1)) + g.Expect(action.watched). + Should(And( + HaveLen(1), + HaveKey(gvk.ConfigMap)), + ) +} diff --git a/pkg/controller/reconciler/reconciler_metrics.go b/pkg/controller/reconciler/reconciler_metrics.go new file mode 100644 index 00000000000..70ff3505081 --- /dev/null +++ b/pkg/controller/reconciler/reconciler_metrics.go @@ -0,0 +1,30 @@ +package reconciler + +import ( + "github.com/prometheus/client_golang/prometheus" + "sigs.k8s.io/controller-runtime/pkg/metrics" +) + +var ( + // DynamicWatchResourcesTotal is a prometheus counter metrics which holds the total + // number of dynamically watched resource per controller. + // It has one labels. + // controller label refers to the controller name. + DynamicWatchResourcesTotal = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "action_dynamic_watch_total", + Help: "Number of dynamically watched resources", + }, + []string{ + "controller", + }, + ) +) + +// init register metrics to the global registry from controller-runtime/pkg/metrics. +// see https://book.kubebuilder.io/reference/metrics#publishing-additional-metrics +// +//nolint:gochecknoinits +func init() { + metrics.Registry.MustRegister(DynamicWatchResourcesTotal) +} diff --git a/pkg/controller/reconciler/reconciler_support.go b/pkg/controller/reconciler/reconciler_support.go new file mode 100644 index 00000000000..f4e7c9621b0 --- /dev/null +++ b/pkg/controller/reconciler/reconciler_support.go @@ -0,0 +1,280 @@ +package reconciler + +import ( + "context" + "errors" + "fmt" + "slices" + "strings" + + "github.com/hashicorp/go-multierror" + "k8s.io/apimachinery/pkg/runtime/schema" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/actions" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/handlers" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/predicates/component" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +type forInput struct { + object client.Object + options []builder.ForOption + gvk schema.GroupVersionKind +} + +type DynamicPredicate func(context.Context, *types.ReconciliationRequest) bool + +type watchInput struct { + object client.Object + eventHandler handler.EventHandler + predicates []predicate.Predicate + owned bool + dynamic bool + dynamicPred []DynamicPredicate +} + +type WatchOpts func(*watchInput) + +func WithPredicates(values ...predicate.Predicate) WatchOpts { + return func(a *watchInput) { + a.predicates = append(a.predicates, values...) + } +} + +func WithEventHandler(value handler.EventHandler) WatchOpts { + return func(a *watchInput) { + a.eventHandler = value + } +} + +func WithEventMapper(value handler.MapFunc) WatchOpts { + return func(a *watchInput) { + a.eventHandler = handler.EnqueueRequestsFromMapFunc(value) + } +} + +func Dynamic(predicates ...DynamicPredicate) WatchOpts { + return func(a *watchInput) { + a.dynamic = true + a.dynamicPred = slices.Clone(predicates) + } +} + +type ReconcilerBuilder[T common.PlatformObject] struct { + mgr ctrl.Manager + input forInput + watches []watchInput + predicates []predicate.Predicate + instanceName string + actions []actions.Fn + finalizers []actions.Fn + errors error +} + +func ReconcilerFor[T common.PlatformObject](mgr ctrl.Manager, object T, opts ...builder.ForOption) *ReconcilerBuilder[T] { + crb := ReconcilerBuilder[T]{ + mgr: mgr, + } + + gvk, err := mgr.GetClient().GroupVersionKindFor(object) + if err != nil { + crb.errors = multierror.Append(crb.errors, fmt.Errorf("unable to determine GVK: %w", err)) + } + + iops := slices.Clone(opts) + if len(iops) == 0 { + iops = append(iops, builder.WithPredicates( + predicates.DefaultPredicate), + ) + } + + crb.input = forInput{ + object: object, + options: iops, + gvk: gvk, + } + + return &crb +} + +func (b *ReconcilerBuilder[T]) WithInstanceName(instanceName string) *ReconcilerBuilder[T] { + b.instanceName = instanceName + return b +} + +func (b *ReconcilerBuilder[T]) WithAction(value actions.Fn) *ReconcilerBuilder[T] { + b.actions = append(b.actions, value) + return b +} + +func (b *ReconcilerBuilder[T]) WithFinalizer(value actions.Fn) *ReconcilerBuilder[T] { + b.finalizers = append(b.finalizers, value) + return b +} + +func (b *ReconcilerBuilder[T]) Watches(object client.Object, opts ...WatchOpts) *ReconcilerBuilder[T] { + in := watchInput{} + in.object = object + in.owned = false + + for _, opt := range opts { + opt(&in) + } + + if in.eventHandler == nil { + // use the platform.opendatahub.io/instance.name label to find out + // the owner + in.eventHandler = handlers.AnnotationToName(annotations.InstanceName) + } + + if len(in.predicates) == 0 { + in.predicates = append(in.predicates, predicate.And( + predicates.DefaultPredicate, + // use the platform.opendatahub.io/part-of label to filter + // events not related to the owner type + component.ForLabel(labels.PlatformPartOf, strings.ToLower(b.input.gvk.Kind)), + )) + } + + b.watches = append(b.watches, in) + + return b +} + +func (b *ReconcilerBuilder[T]) WatchesGVK(gvk schema.GroupVersionKind, opts ...WatchOpts) *ReconcilerBuilder[T] { + return b.Watches(resources.GvkToUnstructured(gvk), opts...) +} + +func (b *ReconcilerBuilder[T]) Owns(object client.Object, opts ...WatchOpts) *ReconcilerBuilder[T] { + in := watchInput{} + in.object = object + in.owned = true + + for _, opt := range opts { + opt(&in) + } + + if in.eventHandler == nil { + in.eventHandler = handler.EnqueueRequestForOwner( + b.mgr.GetScheme(), + b.mgr.GetRESTMapper(), + b.input.object, + handler.OnlyControllerOwner(), + ) + } + + if len(in.predicates) == 0 { + in.predicates = append(in.predicates, predicates.DefaultPredicate) + } + + b.watches = append(b.watches, in) + + return b +} + +func (b *ReconcilerBuilder[T]) WithEventFilter(p predicate.Predicate) *ReconcilerBuilder[T] { + b.predicates = append(b.predicates, p) + return b +} + +func (b *ReconcilerBuilder[T]) OwnsGVK(gvk schema.GroupVersionKind, opts ...WatchOpts) *ReconcilerBuilder[T] { + return b.Owns(resources.GvkToUnstructured(gvk), opts...) +} + +func (b *ReconcilerBuilder[T]) Build(_ context.Context) (*Reconciler[T], error) { + if b.errors != nil { + return nil, b.errors + } + name := b.instanceName + if name == "" { + name = strings.ToLower(b.input.gvk.Kind) + } + + obj, ok := b.input.object.(T) + if !ok { + return nil, errors.New("invalid type for object") + } + r, err := NewReconciler(b.mgr, name, obj) + if err != nil { + return nil, fmt.Errorf("failed to create reconciler for component %s: %w", name, err) + } + + c := ctrl.NewControllerManagedBy(b.mgr) + + // automatically add default predicates to the watched API if no + // predicates are provided + forOpts := b.input.options + if len(forOpts) == 0 { + forOpts = append(forOpts, builder.WithPredicates(predicate.Or( + predicate.GenerationChangedPredicate{}, + predicate.LabelChangedPredicate{}, + predicate.AnnotationChangedPredicate{}, + ))) + } + + c = c.For(b.input.object, forOpts...) + + for i := range b.watches { + if b.watches[i].owned { + kinds, _, err := b.mgr.GetScheme().ObjectKinds(b.watches[i].object) + if err != nil { + return nil, err + } + + for i := range kinds { + r.AddOwnedType(kinds[i]) + } + } + + // if the watch is dynamic, then the watcher will be registered + // at later stage + if b.watches[i].dynamic { + continue + } + + c = c.Watches( + b.watches[i].object, + b.watches[i].eventHandler, + builder.WithPredicates(b.watches[i].predicates...), + ) + } + + for i := range b.predicates { + c = c.WithEventFilter(b.predicates[i]) + } + + for i := range b.actions { + r.AddAction(b.actions[i]) + } + for i := range b.finalizers { + r.AddFinalizer(b.finalizers[i]) + } + + cc, err := c.Build(r) + if err != nil { + return nil, err + } + + // internal action + r.AddAction( + newDynamicWatchAction( + func(obj client.Object, eventHandler handler.EventHandler, predicates ...predicate.Predicate) error { + return cc.Watch(source.Kind(b.mgr.GetCache(), obj), eventHandler, predicates...) + }, + b.watches, + ), + ) + + return r, nil +} diff --git a/pkg/controller/types/types.go b/pkg/controller/types/types.go new file mode 100644 index 00000000000..6427986ceb2 --- /dev/null +++ b/pkg/controller/types/types.go @@ -0,0 +1,210 @@ +package types + +import ( + "crypto/sha256" + "encoding/binary" + "fmt" + "io/fs" + "path" + + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + odhClient "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/manager" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +type ResourceObject interface { + client.Object + common.WithStatus +} + +type WithLogger interface { + GetLogger() logr.Logger +} + +type ManifestInfo struct { + Path string + ContextDir string + SourcePath string +} + +func (mi ManifestInfo) String() string { + result := mi.Path + + if mi.ContextDir != "" { + result = path.Join(result, mi.ContextDir) + } + + if mi.SourcePath != "" { + result = path.Join(result, mi.SourcePath) + } + + return result +} + +type TemplateInfo struct { + FS fs.FS + Path string +} + +type ReconciliationRequest struct { + *odhClient.Client + + Manager *manager.Manager + Instance client.Object + DSCI *dsciv1.DSCInitialization + Release cluster.Release + Manifests []ManifestInfo + + // + // TODO: unify templates and resources. + // + // Unfortunately, the kustomize APIs do not yet support a FileSystem that is + // backed by golang's fs.Fs so it is not simple to have a single abstraction + // for both the manifests types. + // + // it would be nice to have a structure like: + // + // struct { + // FS fs.FS + // URI net.URL + // } + // + // where the URI could be something like: + // - kustomize:///path/to/overlay + // - template:///path/to/resource.tmpl.yaml + // + // and use the scheme as discriminator for the rendering engine + // + Templates []TemplateInfo + Resources []unstructured.Unstructured + + // TODO: this has been added to reduce GC work and only run when + // resources have been generated. It should be removed and + // replaced with a better way of describing resources and + // their origin + Generated bool +} + +// AddResources adds one or more resources to the ReconciliationRequest's Resources slice. +// Each provided client.Object is normalized by ensuring it has the appropriate GVK and is +// converted into an unstructured.Unstructured format before being appended to the list. +func (rr *ReconciliationRequest) AddResources(values ...client.Object) error { + for i := range values { + if values[i] == nil { + continue + } + + err := resources.EnsureGroupVersionKind(rr.Client.Scheme(), values[i]) + if err != nil { + return fmt.Errorf("cannot normalize object: %w", err) + } + + u, err := resources.ToUnstructured(values[i]) + if err != nil { + return fmt.Errorf("cannot convert object to Unstructured: %w", err) + } + + rr.Resources = append(rr.Resources, *u) + } + + return nil +} + +// ForEachResource iterates over each resource in the ReconciliationRequest's Resources slice, +// invoking the provided function `fn` for each resource. The function `fn` takes a pointer to +// an unstructured.Unstructured object and returns a boolean and an error. +// +// The iteration stops early if: +// - `fn` returns an error. +// - `fn` returns `true` as the first return value (`stop`). +func (rr *ReconciliationRequest) ForEachResource(fn func(*unstructured.Unstructured) (bool, error)) error { + for i := range rr.Resources { + stop, err := fn(&rr.Resources[i]) + if err != nil { + return fmt.Errorf("cannot process resource %s: %w", rr.Resources[i].GroupVersionKind(), err) + } + if stop { + break + } + } + + return nil +} + +// RemoveResources removes resources from the ReconciliationRequest's Resources slice +// based on a provided predicate function. The predicate determines whether a resource +// should be removed. +// +// Parameters: +// - predicate: A function that takes a pointer to an unstructured.Unstructured object +// and returns a boolean indicating whether the resource should be removed. +func (rr *ReconciliationRequest) RemoveResources(predicate func(*unstructured.Unstructured) bool) error { + filtered := rr.Resources[:0] // Create a slice with zero length but full capacity + + for i := range rr.Resources { + if predicate(&rr.Resources[i]) { + continue + } + + filtered = append(filtered, rr.Resources[i]) + } + + rr.Resources = filtered + + return nil +} + +func Hash(rr *ReconciliationRequest) ([]byte, error) { + hash := sha256.New() + + dsciGeneration := make([]byte, binary.MaxVarintLen64) + binary.PutVarint(dsciGeneration, rr.DSCI.GetGeneration()) + + instanceGeneration := make([]byte, binary.MaxVarintLen64) + binary.PutVarint(instanceGeneration, rr.Instance.GetGeneration()) + + if _, err := hash.Write([]byte(rr.Instance.GetUID())); err != nil { + return nil, fmt.Errorf("failed to hash instance: %w", err) + } + if _, err := hash.Write(dsciGeneration); err != nil { + return nil, fmt.Errorf("failed to hash dsci generation: %w", err) + } + if _, err := hash.Write(instanceGeneration); err != nil { + return nil, fmt.Errorf("failed to hash instance generation: %w", err) + } + if _, err := hash.Write([]byte(rr.Release.Name)); err != nil { + return nil, fmt.Errorf("failed to hash release: %w", err) + } + if _, err := hash.Write([]byte(rr.Release.Version.String())); err != nil { + return nil, fmt.Errorf("failed to hash release: %w", err) + } + + for i := range rr.Manifests { + if _, err := hash.Write([]byte(rr.Manifests[i].String())); err != nil { + return nil, fmt.Errorf("failed to hash manifest: %w", err) + } + } + for i := range rr.Templates { + if _, err := hash.Write([]byte(rr.Templates[i].Path)); err != nil { + return nil, fmt.Errorf("failed to hash template: %w", err) + } + } + + return hash.Sum(nil), nil +} + +func HashStr(rr *ReconciliationRequest) (string, error) { + h, err := Hash(rr) + if err != nil { + return "", err + } + + return resources.EncodeToString(h), nil +} diff --git a/pkg/controller/types/types_test.go b/pkg/controller/types/types_test.go new file mode 100644 index 00000000000..ef52a8ac882 --- /dev/null +++ b/pkg/controller/types/types_test.go @@ -0,0 +1,122 @@ +package types_test + +import ( + "testing" + + "github.com/rs/xid" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/fakeclient" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +func TestReconciliationRequest_AddResource(t *testing.T) { + g := NewWithT(t) + + cl, err := fakeclient.New() + g.Expect(err).ToNot(HaveOccurred()) + + rr := types.ReconciliationRequest{Client: cl} + + g.Expect(rr.AddResources(&unstructured.Unstructured{})).To(HaveOccurred()) + g.Expect(rr.Resources).To(BeEmpty()) + + g.Expect(rr.AddResources(&corev1.ConfigMap{})).ToNot(HaveOccurred()) + g.Expect(rr.Resources).To(HaveLen(1)) + + g.Expect(rr.AddResources([]client.Object{}...)).ToNot(HaveOccurred()) + g.Expect(rr.Resources).To(HaveLen(1)) +} + +func TestReconciliationRequest_ForEachResource_UpdateSome(t *testing.T) { + g := NewWithT(t) + + cl, err := fakeclient.New() + g.Expect(err).ToNot(HaveOccurred()) + + rr := types.ReconciliationRequest{Client: cl} + g.Expect(rr.AddResources(&corev1.ConfigMap{})).ToNot(HaveOccurred()) + g.Expect(rr.AddResources(&corev1.Secret{})).ToNot(HaveOccurred()) + g.Expect(rr.Resources).To(HaveLen(2)) + + val := xid.New().String() + + g.Expect( + rr.ForEachResource(func(u *unstructured.Unstructured) (bool, error) { + if u.GroupVersionKind() == gvk.ConfigMap { + return false, nil + } + + if err := unstructured.SetNestedField(u.Object, val, "data", "key"); err != nil { + return false, err + } + + return true, nil + }), + ).ToNot(HaveOccurred()) + + g.Expect(rr.Resources).To(HaveLen(2)) + g.Expect(rr.Resources[0].Object).To(jq.Match(`has("data") | not`)) + g.Expect(rr.Resources[1].Object).To(jq.Match(`.data.key == "%s"`, val)) +} + +func TestReconciliationRequest_ForEachResource_UpdateAll(t *testing.T) { + g := NewWithT(t) + + cl, err := fakeclient.New() + g.Expect(err).ToNot(HaveOccurred()) + + rr := types.ReconciliationRequest{Client: cl} + g.Expect(rr.AddResources(&corev1.ConfigMap{})).ToNot(HaveOccurred()) + g.Expect(rr.AddResources(&corev1.Secret{})).ToNot(HaveOccurred()) + g.Expect(rr.Resources).To(HaveLen(2)) + + val := xid.New().String() + + g.Expect( + rr.ForEachResource(func(u *unstructured.Unstructured) (bool, error) { + if err := unstructured.SetNestedField(u.Object, val, "data", "key"); err != nil { + return false, err + } + + return false, nil + }), + ).ToNot(HaveOccurred()) + + g.Expect(rr.Resources).To(And( + HaveLen(2), + HaveEach(jq.Match(`.data.key == "%s"`, val)), + )) +} + +func TestReconciliationRequest_RemoveResources(t *testing.T) { + g := NewWithT(t) + + cl, err := fakeclient.New() + g.Expect(err).ToNot(HaveOccurred()) + + // Create a ReconciliationRequest with some resources + rr := types.ReconciliationRequest{Client: cl} + + err = rr.AddResources(&corev1.ConfigMap{}, &corev1.Secret{}) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(rr.Resources).To(HaveLen(2)) + + // Remove all ConfigMaps using the predicate function + err = rr.RemoveResources(func(u *unstructured.Unstructured) bool { + return u.GroupVersionKind() == gvk.ConfigMap + }) + + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(rr.Resources).To(And( + HaveLen(1), + HaveEach(jq.Match(`.kind == "%s"`, gvk.Secret.Kind)), + )) +} diff --git a/pkg/deploy/deploy.go b/pkg/deploy/deploy.go index e23624c5cc9..cb28d2c5239 100644 --- a/pkg/deploy/deploy.go +++ b/pkg/deploy/deploy.go @@ -25,12 +25,12 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "os" "path/filepath" "strings" - "golang.org/x/exp/maps" k8serr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,7 +44,7 @@ import ( "sigs.k8s.io/kustomize/api/resource" "sigs.k8s.io/kustomize/kyaml/filesys" - "github.com/opendatahub-io/opendatahub-operator/v2/components" + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/conversion" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" @@ -52,19 +52,21 @@ import ( ) var ( - DefaultManifestPath = os.Getenv("DEFAULT_MANIFESTS_PATH") + DefaultManifestPath = os.Getenv("DEFAULT_MANIFESTS_PATH") + errPathResolutionFailed = errors.New("path resolution failed") + errPathIrrelevant = errors.New("path is irrelevant") ) // DownloadManifests function performs following tasks: // 1. It takes component URI and only downloads folder specified by component.ContextDir field // 2. It saves the manifests in the odh-manifests/component-name/ folder. -func DownloadManifests(ctx context.Context, componentName string, manifestConfig components.ManifestsConfig) error { - // Get the component repo from the given url - // e.g. https://github.com/example/tarball/master +func DownloadManifests(ctx context.Context, componentName string, manifestConfig common.ManifestsConfig) error { + // Download and validate the manifest archive from the given url, e.g. https://github.com/example/tarball/master req, err := http.NewRequestWithContext(ctx, http.MethodGet, manifestConfig.URI, nil) if err != nil { return err } + resp, err := http.DefaultClient.Do(req) if err != nil { return fmt.Errorf("error downloading manifests: %w", err) @@ -75,73 +77,111 @@ func DownloadManifests(ctx context.Context, componentName string, manifestConfig return fmt.Errorf("error downloading manifests: %v HTTP status", resp.StatusCode) } - // Create a new gzip reader + // Initialize a gzip reader for the response body gzipReader, err := gzip.NewReader(resp.Body) if err != nil { return fmt.Errorf("error creating gzip reader: %w", err) } defer gzipReader.Close() - // Create a new TAR reader - tarReader := tar.NewReader(gzipReader) + // Ensure manifest directory exists + if err := createDirectory(DefaultManifestPath); err != nil { + return err + } + + // Extract TAR contents + return unpackTarFromReader(gzipReader, DefaultManifestPath, componentName, manifestConfig.ContextDir) +} - // Create manifest directory - mode := os.ModePerm - err = os.MkdirAll(DefaultManifestPath, mode) +// createDirectory ensures the specified directory exists, creating it if necessary. +func createDirectory(path string) error { + err := os.MkdirAll(path, os.ModePerm) if err != nil { - return fmt.Errorf("error creating manifests directory : %w", err) + return fmt.Errorf("error creating directory %s: %w", path, err) } + return nil +} + +// unpackTarFromReader extracts files from a TAR reader into the target base path. +func unpackTarFromReader(reader io.Reader, basePath, componentName, contextDir string) error { + tarReader := tar.NewReader(reader) - // Extract the contents of the TAR archive to the current directory for { header, err := tarReader.Next() if errors.Is(err, io.EOF) { break } if err != nil { - return err + return fmt.Errorf("error reading tar header: %w", err) } - componentFiles := strings.Split(header.Name, "/") - componentFileName := header.Name - componentManifestPath := componentFiles[0] + "/" + manifestConfig.ContextDir - - if strings.Contains(componentFileName, componentManifestPath) { - // Get manifest path relative to repo - // e.g. of repo/a/b/manifests/base --> base/ - componentFoldersList := strings.Split(componentFileName, "/") - componentFileRelativePathFound := strings.Join(componentFoldersList[len(strings.Split(componentManifestPath, "/")):], "/") - - if header.Typeflag == tar.TypeDir { - err = os.MkdirAll(DefaultManifestPath+"/"+componentName+"/"+componentFileRelativePathFound, mode) - if err != nil { - return fmt.Errorf("error creating directory:%w", err) - } - continue - } + targetPath, err := resolveTargetPath(header.Name, basePath, componentName, contextDir) + if errors.Is(err, errPathIrrelevant) { + continue + } + if err != nil { + return err + } - if header.Typeflag == tar.TypeReg { - file, err := os.Create(DefaultManifestPath + "/" + componentName + "/" + componentFileRelativePathFound) - if err != nil { - return fmt.Errorf("error creating file: %w", err) - } - - defer file.Close() - - for { - _, err := io.CopyN(file, tarReader, 1024) - if err != nil { - if errors.Is(err, io.EOF) { - break - } - return fmt.Errorf("error downloading file contents: %w", err) - } - } - continue - } + err = extractFileOrDirectory(header, tarReader, targetPath) + if err != nil { + return err } } - return err + + return nil +} + +// resolveTargetPath computes the target file path based on the tar header and context directory. +func resolveTargetPath(headerName, basePath, componentName, contextDir string) (string, error) { + componentFiles := strings.Split(headerName, "/") + componentManifestPath := filepath.Join(componentFiles[0], contextDir) + + if !strings.Contains(headerName, componentManifestPath) { + return "", errPathIrrelevant + } + + componentFoldersList := strings.Split(headerName, "/") + if len(componentFoldersList) < len(strings.Split(componentManifestPath, "/")) { + return "", errPathResolutionFailed // Path resolution failed + } + + relativePath := strings.Join(componentFoldersList[len(strings.Split(componentManifestPath, "/")):], "/") + + return filepath.Join(basePath, componentName, relativePath), nil +} + +// processTarHeader processes a TAR header, creating files or directories as needed. +func extractFileOrDirectory(header *tar.Header, tarReader *tar.Reader, targetPath string) error { + switch header.Typeflag { + case tar.TypeDir: + // Create a directory for the current header + return createDirectory(targetPath) + + case tar.TypeReg: + // Create a file and copy its contents from the TAR reader + return writeFileFromTar(targetPath, tarReader) + + default: + // Handle unsupported header types if needed + return nil + } +} + +// writeFileFromTar writes a file from the tar reader to the target path. +func writeFileFromTar(targetPath string, tarReader *tar.Reader) error { + file, err := os.Create(targetPath) + if err != nil { + return fmt.Errorf("error creating file %s: %w", targetPath, err) + } + defer file.Close() + + _, err = io.Copy(file, tarReader) + if err != nil { + return fmt.Errorf("error writing to file %s: %w", targetPath, err) + } + + return nil } func DeployManifestsFromPath( @@ -152,6 +192,28 @@ func DeployManifestsFromPath( namespace string, componentName string, componentEnabled bool, +) error { + return DeployManifestsFromPathWithLabels( + ctx, + cli, + owner, + manifestPath, + namespace, + componentName, + componentEnabled, map[string]string{}, + ) +} + +func DeployManifestsFromPathWithLabels( + ctx context.Context, + cli client.Client, + owner metav1.Object, + manifestPath string, + namespace string, + componentName string, + componentEnabled bool, + // TODO: this method must be refactored, left it just to avoid breaking compatibility + additionalLabels map[string]string, ) error { // Render the Kustomize manifests k := krusty.MakeKustomizer(krusty.MakeDefaultOptions()) @@ -177,7 +239,22 @@ func DeployManifestsFromPath( return fmt.Errorf("failed applying namespace plugin when preparing Kustomize resources. %w", err) } - labelsPlugin := plugins.CreateAddLabelsPlugin(componentName) + resourceLabels := map[string]string{ + labels.ODH.Component(componentName): "true", + labels.K8SCommon.PartOf: componentName, + } + + for k, v := range additionalLabels { + _, ok := resourceLabels[k] + if ok { + // don't override default labels + continue + } + + resourceLabels[k] = v + } + + labelsPlugin := plugins.CreateSetLabelsPlugin(resourceLabels) if err := labelsPlugin.Transform(resMap); err != nil { return fmt.Errorf("failed applying labels plugin when preparing Kustomize resources. %w", err) } diff --git a/pkg/feature/servicemesh/conditions.go b/pkg/feature/servicemesh/conditions.go index e845868aa3d..6926a5dfe3f 100644 --- a/pkg/feature/servicemesh/conditions.go +++ b/pkg/feature/servicemesh/conditions.go @@ -116,9 +116,9 @@ func CheckControlPlaneComponentReadiness(ctx context.Context, c client.Client, s return false, fmt.Errorf("status conditions not found or error in parsing of Service Mesh Control Plane: %w", err) } - readyComponents := len(components["ready"].([]interface{})) //nolint:forcetypeassert - pendingComponents := len(components["pending"].([]interface{})) //nolint:forcetypeassert - unreadyComponents := len(components["unready"].([]interface{})) //nolint:forcetypeassert + readyComponents := len(components["ready"].([]interface{})) //nolint:forcetypeassert,errcheck + pendingComponents := len(components["pending"].([]interface{})) //nolint:forcetypeassert,errcheck + unreadyComponents := len(components["unready"].([]interface{})) //nolint:forcetypeassert,errcheck return pendingComponents == 0 && unreadyComponents == 0 && readyComponents > 0, nil } diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 38e1964253e..8844e6e3925 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -1,49 +1,108 @@ package logger import ( + "errors" + "flag" + "fmt" "os" + "strconv" "strings" + "sync/atomic" "github.com/go-logr/logr" + "go.uber.org/zap" "go.uber.org/zap/zapcore" - "sigs.k8s.io/controller-runtime/pkg/log/zap" + ctrlzap "sigs.k8s.io/controller-runtime/pkg/log/zap" ) -var logLevelMapping = map[string]int{ - "devel": 0, - "default": 1, // default one when not set log-mode - "prod": 2, +const envVarName = "ZAP_LOG_LEVEL" + +var defaultLogLevel = zap.InfoLevel + +var logLevel atomic.Value + +// copy from controller-runtime/pkg/log/zap/flag.go. +var levelStrings = map[string]zapcore.Level{ + "debug": zap.DebugLevel, + "info": zap.InfoLevel, + "error": zap.ErrorLevel, +} + +// adjusted copy from controller-runtime/pkg/log/zap/flag.go, keep the same argument name. +func stringToLevel(flagValue string) (zapcore.Level, error) { + level, validLevel := levelStrings[strings.ToLower(flagValue)] + if validLevel { + return level, nil + } + logLevel, err := strconv.ParseInt(flagValue, 10, 8) + if err != nil { + return 0, fmt.Errorf("invalid log level \"%s\"", flagValue) + } + if logLevel > 0 { + intLevel := -1 * int8(logLevel) + return zapcore.Level(intLevel), nil + } + + return 0, fmt.Errorf("invalid log level \"%s\"", flagValue) } -// in each controller, to use different log level. -func LogWithLevel(logger logr.Logger, level string) logr.Logger { - level = strings.TrimSpace(level) - verbosityLevel, ok := logLevelMapping[level] +func SetLevel(levelStr string) error { + if levelStr == "" { + return nil + } + levelNum, err := stringToLevel(levelStr) + if err != nil { + return err + } + + // ctrlzap.addDefauls() uses a pointer to the AtomicLevel, + // but ctrlzap.(*levelFlag).Set() the structure itsef. + // So use the structure and always set the value in newOptions() to addDefaults() call + level, ok := logLevel.Load().(zap.AtomicLevel) if !ok { - verbosityLevel = 1 // fallback to info level + return errors.New("stored loglevel is not of type *zap.AtomicLevel") + } + + level.SetLevel(levelNum) + return nil +} + +func levelFromEnvOrDefault() zapcore.Level { + levelStr := os.Getenv(envVarName) + if levelStr == "" { + return defaultLogLevel + } + level, err := stringToLevel(levelStr) + if err != nil { + return defaultLogLevel } - return logger.V(verbosityLevel) + return level } -// in DSC component, to use different mode for logging, e.g. development, production -// when not set mode it falls to "default" which is used by startup main.go. -func ConfigLoggers(mode string) logr.Logger { - var opts zap.Options +func NewLogger(mode string, override *ctrlzap.Options) logr.Logger { + opts := newOptions(mode, levelFromEnvOrDefault()) + overrideOptions(opts, override) + logLevel.Store(opts.Level) + return ctrlzap.New(ctrlzap.UseFlagOptions(opts)) +} + +func newOptions(mode string, defaultLevel zapcore.Level) *ctrlzap.Options { + var opts ctrlzap.Options + level := zap.NewAtomicLevelAt(defaultLevel) + switch mode { case "devel", "development": // the most logging verbosity - opts = zap.Options{ + opts = ctrlzap.Options{ Development: true, StacktraceLevel: zapcore.WarnLevel, - Level: zapcore.InfoLevel, DestWriter: os.Stdout, } case "prod", "production": // the least logging verbosity - opts = zap.Options{ + opts = ctrlzap.Options{ Development: false, StacktraceLevel: zapcore.ErrorLevel, - Level: zapcore.InfoLevel, DestWriter: os.Stdout, - EncoderConfigOptions: []zap.EncoderConfigOption{func(config *zapcore.EncoderConfig) { + EncoderConfigOptions: []ctrlzap.EncoderConfigOption{func(config *zapcore.EncoderConfig) { config.EncodeTime = zapcore.ISO8601TimeEncoder // human readable not epoch config.EncodeDuration = zapcore.SecondsDurationEncoder config.LevelKey = "LogLevel" @@ -55,12 +114,42 @@ func ConfigLoggers(mode string) logr.Logger { }}, } default: - opts = zap.Options{ + opts = ctrlzap.Options{ Development: false, StacktraceLevel: zapcore.ErrorLevel, - Level: zapcore.InfoLevel, DestWriter: os.Stdout, } } - return zap.New(zap.UseFlagOptions(&opts)) + + opts.Level = level + return &opts +} + +func overrideOptions(orig, override *ctrlzap.Options) { + // Development is boolean, cannot check for nil, so check if it was set + isDevelopmentSet := false + flag.Visit(func(f *flag.Flag) { + if f.Name == "zap-devel" { + isDevelopmentSet = true + } + }) + if isDevelopmentSet { + orig.Development = override.Development + } + + if override.StacktraceLevel != nil { + orig.StacktraceLevel = override.StacktraceLevel + } + + if override.Level != nil { + orig.Level = override.Level + } + + if override.DestWriter != nil { + orig.DestWriter = override.DestWriter + } + + if override.EncoderConfigOptions != nil { + orig.EncoderConfigOptions = override.EncoderConfigOptions + } } diff --git a/pkg/manifests/kustomize/kustomize.go b/pkg/manifests/kustomize/kustomize.go new file mode 100644 index 00000000000..3c217a73b7a --- /dev/null +++ b/pkg/manifests/kustomize/kustomize.go @@ -0,0 +1,28 @@ +package kustomize + +import ( + "sigs.k8s.io/kustomize/api/krusty" + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +const ( + DefaultKustomizationFileName = "kustomization.yaml" + DefaultKustomizationFilePath = "default" +) + +func NewEngine(opts ...EngineOptsFn) *Engine { + e := Engine{ + k: krusty.MakeKustomizer(krusty.MakeDefaultOptions()), + fs: filesys.MakeFsOnDisk(), + renderOpts: renderOpts{ + kustomizationFileName: DefaultKustomizationFileName, + kustomizationFileOverlay: DefaultKustomizationFilePath, + }, + } + + for _, fn := range opts { + fn(&e) + } + + return &e +} diff --git a/pkg/manifests/kustomize/kustomize_engine.go b/pkg/manifests/kustomize/kustomize_engine.go new file mode 100644 index 00000000000..b067ed4a010 --- /dev/null +++ b/pkg/manifests/kustomize/kustomize_engine.go @@ -0,0 +1,88 @@ +package kustomize + +import ( + "fmt" + "maps" + "path/filepath" + "slices" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/kustomize/api/krusty" + "sigs.k8s.io/kustomize/kyaml/filesys" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/plugins" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +type Engine struct { + k *krusty.Kustomizer + fs filesys.FileSystem + renderOpts renderOpts +} + +func (e *Engine) Render(path string, opts ...RenderOptsFn) ([]unstructured.Unstructured, error) { + // poor man clone + ro := e.renderOpts + ro.labels = maps.Clone(e.renderOpts.labels) + ro.annotations = maps.Clone(e.renderOpts.annotations) + ro.plugins = slices.Clone(e.renderOpts.plugins) + + for _, fn := range opts { + fn(&ro) + } + + if !e.fs.Exists(filepath.Join(path, ro.kustomizationFileName)) { + path = filepath.Join(path, ro.kustomizationFileOverlay) + } + + resMap, err := e.k.Run(e.fs, path) + if err != nil { + return nil, err + } + + if ro.ns != "" { + plugin := plugins.CreateNamespaceApplierPlugin(ro.ns) + if err := plugin.Transform(resMap); err != nil { + return nil, fmt.Errorf("failed applying namespace plugin when preparing Kustomize resources. %w", err) + } + } + + if len(ro.labels) != 0 { + plugin := plugins.CreateSetLabelsPlugin(ro.labels) + if err := plugin.Transform(resMap); err != nil { + return nil, fmt.Errorf("failed applying labels plugin when preparing Kustomize resources. %w", err) + } + } + + if len(ro.annotations) != 0 { + plugin := plugins.CreateAddAnnotationsPlugin(ro.annotations) + if err := plugin.Transform(resMap); err != nil { + return nil, fmt.Errorf("failed applying annotations plugin when preparing Kustomize resources. %w", err) + } + } + + for i := range ro.plugins { + if err := ro.plugins[i].Transform(resMap); err != nil { + return nil, fmt.Errorf("failed applying %v plugin when preparing Kustomize resources. %w", ro.plugins[i], err) + } + } + + renderedRes := resMap.Resources() + resp := make([]unstructured.Unstructured, len(renderedRes)) + + for i := range renderedRes { + m, err := renderedRes[i].Map() + if err != nil { + return nil, fmt.Errorf("failed to transform Resources to Unstructured. %w", err) + } + + u, err := resources.ToUnstructured(&m) + if err != nil { + return nil, fmt.Errorf("failed to transform Resources to Unstructured. %w", err) + } + + resp[i] = *u + } + + return resp, nil +} diff --git a/pkg/manifests/kustomize/kustomize_filters.go b/pkg/manifests/kustomize/kustomize_filters.go new file mode 100644 index 00000000000..d88feddf30d --- /dev/null +++ b/pkg/manifests/kustomize/kustomize_filters.go @@ -0,0 +1,28 @@ +package kustomize + +import ( + "sigs.k8s.io/kustomize/api/resmap" + "sigs.k8s.io/kustomize/kyaml/kio" + kyaml "sigs.k8s.io/kustomize/kyaml/yaml" +) + +var _ resmap.Transformer = &filterPlugin{} +var _ kio.Filter = &filterProxy{} + +type filterPlugin struct { + f FilterFn +} + +func (p *filterPlugin) Transform(m resmap.ResMap) error { + return m.ApplyFilter(&filterProxy{ + f: p.f, + }) +} + +type filterProxy struct { + f FilterFn +} + +func (f *filterProxy) Filter(nodes []*kyaml.RNode) ([]*kyaml.RNode, error) { + return f.f(nodes) +} diff --git a/pkg/manifests/kustomize/kustomize_opts.go b/pkg/manifests/kustomize/kustomize_opts.go new file mode 100644 index 00000000000..a5cbe4cba42 --- /dev/null +++ b/pkg/manifests/kustomize/kustomize_opts.go @@ -0,0 +1,21 @@ +package kustomize + +import ( + "sigs.k8s.io/kustomize/kyaml/filesys" +) + +type EngineOptsFn func(engine *Engine) + +func WithEngineFS(value filesys.FileSystem) EngineOptsFn { + return func(engine *Engine) { + engine.fs = value + } +} + +func WithEngineRenderOpts(values ...RenderOptsFn) EngineOptsFn { + return func(engine *Engine) { + for _, fn := range values { + fn(&engine.renderOpts) + } + } +} diff --git a/pkg/manifests/kustomize/kustomize_render_opts.go b/pkg/manifests/kustomize/kustomize_render_opts.go new file mode 100644 index 00000000000..970365e121c --- /dev/null +++ b/pkg/manifests/kustomize/kustomize_render_opts.go @@ -0,0 +1,101 @@ +package kustomize + +import ( + "sigs.k8s.io/kustomize/api/resmap" + kyaml "sigs.k8s.io/kustomize/kyaml/yaml" +) + +type FilterFn func(nodes []*kyaml.RNode) ([]*kyaml.RNode, error) + +type renderOpts struct { + kustomizationFileName string + kustomizationFileOverlay string + ns string + labels map[string]string + annotations map[string]string + plugins []resmap.Transformer +} + +type RenderOptsFn func(*renderOpts) + +func WithKustomizationFileName(value string) RenderOptsFn { + return func(opts *renderOpts) { + opts.kustomizationFileName = value + } +} + +func WithKustomizationOverlayPath(value string) RenderOptsFn { + return func(opts *renderOpts) { + opts.kustomizationFileOverlay = value + } +} + +func WithNamespace(value string) RenderOptsFn { + return func(opts *renderOpts) { + opts.ns = value + } +} + +func WithLabel(name string, value string) RenderOptsFn { + return func(opts *renderOpts) { + if opts.labels == nil { + opts.labels = map[string]string{} + } + + opts.labels[name] = value + } +} + +func WithLabels(values map[string]string) RenderOptsFn { + return func(opts *renderOpts) { + if opts.labels == nil { + opts.labels = map[string]string{} + } + + for k, v := range values { + opts.labels[k] = v + } + } +} + +func WithAnnotation(name string, value string) RenderOptsFn { + return func(opts *renderOpts) { + if opts.annotations == nil { + opts.annotations = map[string]string{} + } + + opts.annotations[name] = value + } +} + +func WithAnnotations(values map[string]string) RenderOptsFn { + return func(opts *renderOpts) { + if opts.annotations == nil { + opts.annotations = map[string]string{} + } + + for k, v := range values { + opts.annotations[k] = v + } + } +} + +func WithPlugin(value resmap.Transformer) RenderOptsFn { + return func(opts *renderOpts) { + opts.plugins = append(opts.plugins, value) + } +} + +func WithFilter(value FilterFn) RenderOptsFn { + return func(opts *renderOpts) { + opts.plugins = append(opts.plugins, &filterPlugin{f: value}) + } +} + +func WithFilters(values ...FilterFn) RenderOptsFn { + return func(opts *renderOpts) { + for i := range values { + opts.plugins = append(opts.plugins, &filterPlugin{f: values[i]}) + } + } +} diff --git a/pkg/manifests/kustomize/kustomize_support.go b/pkg/manifests/kustomize/kustomize_support.go new file mode 100644 index 00000000000..bb11f436938 --- /dev/null +++ b/pkg/manifests/kustomize/kustomize_support.go @@ -0,0 +1,16 @@ +package kustomize + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kyaml "sigs.k8s.io/kustomize/kyaml/yaml" +) + +func NodeToUnstructured(n *kyaml.RNode) unstructured.Unstructured { + u := unstructured.Unstructured{} + u.SetAPIVersion(n.GetApiVersion()) + u.SetKind(n.GetKind()) + u.SetNamespace(n.GetNamespace()) + u.SetName(n.GetName()) + + return u +} diff --git a/pkg/manifests/kustomize/kustomize_test.go b/pkg/manifests/kustomize/kustomize_test.go new file mode 100644 index 00000000000..c1bf663fb89 --- /dev/null +++ b/pkg/manifests/kustomize/kustomize_test.go @@ -0,0 +1,68 @@ +package kustomize_test + +import ( + "path" + "testing" + + "github.com/rs/xid" + "sigs.k8s.io/kustomize/kyaml/filesys" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/manifests/kustomize" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +const testEngineKustomization = ` +apiVersion: kustomize.config.k8s.io/v1beta1 +resources: +- test-engine-cm.yaml +` + +const testEngineConfigMap = ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-engine-cm +data: + foo: bar +` + +func TestEngine(t *testing.T) { + g := NewWithT(t) + id := xid.New().String() + ns := xid.New().String() + fs := filesys.MakeFsInMemory() + + e := kustomize.NewEngine( + kustomize.WithEngineFS(fs), + ) + + _ = fs.MkdirAll(path.Join(id, kustomize.DefaultKustomizationFilePath)) + _ = fs.WriteFile(path.Join(id, kustomize.DefaultKustomizationFileName), []byte(testEngineKustomization)) + _ = fs.WriteFile(path.Join(id, "test-engine-cm.yaml"), []byte(testEngineConfigMap)) + + r, err := e.Render( + id, + kustomize.WithNamespace(ns), + kustomize.WithLabel("component.opendatahub.io/name", "foo"), + kustomize.WithLabel("platform.opendatahub.io/namespace", ns), + kustomize.WithAnnotations(map[string]string{ + "platform.opendatahub.io/release": "1.2.3", + "platform.opendatahub.io/type": "managed", + }), + ) + + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(r).Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.namespace == "%s"`, ns), + jq.Match(`.metadata.labels."component.opendatahub.io/name" == "%s"`, "foo"), + jq.Match(`.metadata.labels."platform.opendatahub.io/namespace" == "%s"`, ns), + jq.Match(`.metadata.annotations."platform.opendatahub.io/release" == "%s"`, "1.2.3"), + jq.Match(`.metadata.annotations."platform.opendatahub.io/type" == "%s"`, "managed"), + )), + )) +} diff --git a/pkg/metadata/annotations/annotations.go b/pkg/metadata/annotations/annotations.go index e2d4cfde922..7ebd755d077 100644 --- a/pkg/metadata/annotations/annotations.go +++ b/pkg/metadata/annotations/annotations.go @@ -13,3 +13,14 @@ const ( SecretLengthAnnotation = "secret-generator.opendatahub.io/complexity" SecretOauthClientAnnotation = "secret-generator.opendatahub.io/oauth-client-route" ) + +// ManagementStateAnnotation set on Component CR only, to show which ManagementState value if defined in DSC for the component. +const ManagementStateAnnotation = "component.opendatahub.io/management-state" + +const ( + PlatformVersion = "platform.opendatahub.io/version" + PlatformType = "platform.opendatahub.io/type" + InstanceGeneration = "platform.opendatahub.io/instance.generation" + InstanceName = "platform.opendatahub.io/instance.name" + InstanceUID = "platform.opendatahub.io/instance.uid" +) diff --git a/pkg/metadata/labels/types.go b/pkg/metadata/labels/types.go index 382c2e38db3..8b79f7d895f 100644 --- a/pkg/metadata/labels/types.go +++ b/pkg/metadata/labels/types.go @@ -5,6 +5,9 @@ const ( InjectTrustCA = "config.openshift.io/inject-trusted-cabundle" SecurityEnforce = "pod-security.kubernetes.io/enforce" ClusterMonitoring = "openshift.io/cluster-monitoring" + PlatformPartOf = "platform.opendatahub.io/part-of" + Platform = "platform" + True = "true" ) // K8SCommon keeps common kubernetes labels [1] diff --git a/pkg/plugins/addAnnotationsplugin.go b/pkg/plugins/addAnnotationsplugin.go new file mode 100644 index 00000000000..1e0fbbaeb59 --- /dev/null +++ b/pkg/plugins/addAnnotationsplugin.go @@ -0,0 +1,20 @@ +package plugins + +import ( + "sigs.k8s.io/kustomize/api/builtins" //nolint:staticcheck // Remove after package update + "sigs.k8s.io/kustomize/api/types" + "sigs.k8s.io/kustomize/kyaml/resid" +) + +func CreateAddAnnotationsPlugin(annotations map[string]string) *builtins.AnnotationsTransformerPlugin { + return &builtins.AnnotationsTransformerPlugin{ + Annotations: annotations, + FieldSpecs: []types.FieldSpec{ + { + Gvk: resid.Gvk{}, + Path: "metadata/annotations", + CreateIfNotPresent: true, + }, + }, + } +} diff --git a/pkg/plugins/addLabelsplugin.go b/pkg/plugins/addLabelsplugin.go index 13ecada5a93..5c79d2cb613 100644 --- a/pkg/plugins/addLabelsplugin.go +++ b/pkg/plugins/addLabelsplugin.go @@ -16,11 +16,15 @@ import ( // - It adds labels to the "spec/template/metadata/labels" and "spec/selector/matchLabels" paths // for resources of kind "Deployment". func CreateAddLabelsPlugin(componentName string) *builtins.LabelTransformerPlugin { + return CreateSetLabelsPlugin(map[string]string{ + labels.ODH.Component(componentName): "true", + labels.K8SCommon.PartOf: componentName, + }) +} + +func CreateSetLabelsPlugin(labels map[string]string) *builtins.LabelTransformerPlugin { return &builtins.LabelTransformerPlugin{ - Labels: map[string]string{ - labels.ODH.Component(componentName): "true", - labels.K8SCommon.PartOf: componentName, - }, + Labels: labels, FieldSpecs: []types.FieldSpec{ { Gvk: resid.Gvk{Kind: "Deployment"}, diff --git a/pkg/plugins/removerplugin.go b/pkg/plugins/removerplugin.go index e0c852f7c69..330aea788d4 100644 --- a/pkg/plugins/removerplugin.go +++ b/pkg/plugins/removerplugin.go @@ -73,9 +73,18 @@ func (f RemoverFilter) run(node *kyaml.RNode) (*kyaml.RNode, error) { return node, errors.New("no field set to remove, path to the field cannot be empty") } + return ClearFieldFor(node, f.Gvk, f.Path) +} + +func ClearFieldFor(node *kyaml.RNode, gvk schema.GroupVersionKind, fieldPath []string) (*kyaml.RNode, error) { + pathLen := len(fieldPath) + if pathLen == 0 { + return node, nil + } + typeMeta := kyaml.TypeMeta{ - APIVersion: f.Gvk.GroupVersion().String(), - Kind: f.Gvk.Kind, + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, } meta, err := node.GetMeta() @@ -87,8 +96,29 @@ func (f RemoverFilter) run(node *kyaml.RNode) (*kyaml.RNode, error) { return node, nil } - path := f.Path[:pathLen-1] - name := f.Path[pathLen-1] + path := fieldPath[:pathLen-1] + name := fieldPath[pathLen-1] + + matcher := &kyaml.PathMatcher{Path: path} + result, err := node.Pipe(matcher) + if err != nil { + return node, err + } + + return node, result.VisitElements( + func(node *kyaml.RNode) error { + return node.PipeE(kyaml.FieldClearer{Name: name}) + }) +} + +func ClearField(node *kyaml.RNode, fieldPath []string) (*kyaml.RNode, error) { + pathLen := len(fieldPath) + if pathLen == 0 { + return node, nil + } + + path := fieldPath[:pathLen-1] + name := fieldPath[pathLen-1] matcher := &kyaml.PathMatcher{Path: path} result, err := node.Pipe(matcher) diff --git a/pkg/resources/resources.go b/pkg/resources/resources.go new file mode 100644 index 00000000000..f5de7cf39a3 --- /dev/null +++ b/pkg/resources/resources.go @@ -0,0 +1,329 @@ +package resources + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "io" + "slices" + + "github.com/davecgh/go-spew/spew" + routev1 "github.com/openshift/api/route/v1" + "gopkg.in/yaml.v3" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" +) + +func ToUnstructured(obj any) (*unstructured.Unstructured, error) { + data, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, fmt.Errorf("unable to convert object %T to unstructured: %w", obj, err) + } + + u := unstructured.Unstructured{ + Object: data, + } + + return &u, nil +} + +func Decode(decoder runtime.Decoder, content []byte) ([]unstructured.Unstructured, error) { + results := make([]unstructured.Unstructured, 0) + + r := bytes.NewReader(content) + yd := yaml.NewDecoder(r) + + for { + var out map[string]interface{} + + err := yd.Decode(&out) + if err != nil { + if errors.Is(err, io.EOF) { + break + } + + return nil, fmt.Errorf("unable to decode resource: %w", err) + } + + if len(out) == 0 { + continue + } + + if out["Kind"] == "" { + continue + } + + encoded, err := yaml.Marshal(out) + if err != nil { + return nil, fmt.Errorf("unable to marshal resource: %w", err) + } + + var obj unstructured.Unstructured + + if _, _, err = decoder.Decode(encoded, nil, &obj); err != nil { + if runtime.IsMissingKind(err) { + continue + } + + return nil, fmt.Errorf("unable to decode resource: %w", err) + } + + results = append(results, obj) + } + + return results, nil +} + +func GvkToUnstructured(gvk schema.GroupVersionKind) *unstructured.Unstructured { + u := unstructured.Unstructured{} + u.SetGroupVersionKind(gvk) + + return &u +} + +func IngressHost(r routev1.Route) string { + if len(r.Status.Ingress) != 1 { + return "" + } + + in := r.Status.Ingress[0] + + for i := range in.Conditions { + if in.Conditions[i].Type == routev1.RouteAdmitted && in.Conditions[i].Status == corev1.ConditionTrue { + return in.Host + } + } + + return "" +} + +func HasLabel(obj client.Object, k string, values ...string) bool { + if obj == nil { + return false + } + + target := obj.GetLabels() + if target == nil { + return false + } + + val, found := target[k] + if !found { + return false + } + + return slices.Contains(values, val) +} + +func SetLabels(obj client.Object, values map[string]string) { + target := obj.GetLabels() + if target == nil { + target = make(map[string]string) + } + + for k, v := range values { + target[k] = v + } + + obj.SetLabels(target) +} + +func SetLabel(obj client.Object, k string, v string) string { + target := obj.GetLabels() + if target == nil { + target = make(map[string]string) + } + + old := target[k] + target[k] = v + + obj.SetLabels(target) + + return old +} + +func RemoveLabel(obj client.Object, k string) { + target := obj.GetLabels() + if target == nil { + return + } + + delete(target, k) + + obj.SetLabels(target) +} + +func GetLabel(obj client.Object, k string) string { + target := obj.GetLabels() + if target == nil { + return "" + } + + return target[k] +} + +func HasAnnotation(obj client.Object, k string, values ...string) bool { + if obj == nil { + return false + } + + target := obj.GetAnnotations() + if target == nil { + return false + } + + val, found := target[k] + if !found { + return false + } + + return slices.Contains(values, val) +} + +func SetAnnotations(obj client.Object, values map[string]string) { + target := obj.GetAnnotations() + if target == nil { + target = make(map[string]string) + } + + for k, v := range values { + target[k] = v + } + + obj.SetAnnotations(target) +} + +func SetAnnotation(obj client.Object, k string, v string) string { + target := obj.GetAnnotations() + if target == nil { + target = make(map[string]string) + } + + old := target[k] + target[k] = v + + obj.SetAnnotations(target) + + return old +} + +func RemoveAnnotation(obj client.Object, k string) { + target := obj.GetAnnotations() + if target == nil { + return + } + + delete(target, k) + + obj.SetAnnotations(target) +} + +func GetAnnotation(obj client.Object, k string) string { + target := obj.GetAnnotations() + if target == nil { + return "" + } + + return target[k] +} + +// Hash generates an SHA-256 hash of an unstructured Kubernetes object, omitting +// specific fields that are typically irrelevant for hash comparison such as +// "creationTimestamp", "deletionTimestamp", "managedFields", "ownerReferences", +// "uid", "resourceVersion", and "status". It returns the computed hash as a byte +// slice or an error if the hashing process fails. +func Hash(in *unstructured.Unstructured) ([]byte, error) { + obj := in.DeepCopy() + unstructured.RemoveNestedField(obj.Object, "metadata", "uid") + unstructured.RemoveNestedField(obj.Object, "metadata", "resourceVersion") + unstructured.RemoveNestedField(obj.Object, "metadata", "deletionTimestamp") + unstructured.RemoveNestedField(obj.Object, "metadata", "managedFields") + unstructured.RemoveNestedField(obj.Object, "metadata", "ownerReferences") + unstructured.RemoveNestedField(obj.Object, "status") + + printer := spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + } + + hasher := sha256.New() + + if _, err := printer.Fprintf(hasher, "%#v", obj); err != nil { + return nil, fmt.Errorf("failed to calculate hash: %w", err) + } + + return hasher.Sum(nil), nil +} + +func EncodeToString(in []byte) string { + return "v" + base64.RawURLEncoding.EncodeToString(in) +} + +func KindForObject(scheme *runtime.Scheme, obj runtime.Object) (string, error) { + if obj.GetObjectKind().GroupVersionKind().Kind != "" { + return obj.GetObjectKind().GroupVersionKind().Kind, nil + } + + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return "", fmt.Errorf("failed to get GVK: %w", err) + } + + return gvk.Kind, nil +} + +func GetGroupVersionKindForObject(s *runtime.Scheme, obj runtime.Object) (schema.GroupVersionKind, error) { + if obj == nil { + return schema.GroupVersionKind{}, errors.New("nil object") + } + + if obj.GetObjectKind().GroupVersionKind().Version != "" && obj.GetObjectKind().GroupVersionKind().Kind != "" { + return obj.GetObjectKind().GroupVersionKind(), nil + } + + gvk, err := apiutil.GVKForObject(obj, s) + if err != nil { + return schema.GroupVersionKind{}, fmt.Errorf("failed to get GVK: %w", err) + } + + return gvk, nil +} + +func EnsureGroupVersionKind(s *runtime.Scheme, obj client.Object) error { + gvk, err := GetGroupVersionKindForObject(s, obj) + if err != nil { + return err + } + + obj.GetObjectKind().SetGroupVersionKind(gvk) + + return nil +} + +func HasDevFlags(in common.WithDevFlags) bool { + if in == nil { + return false + } + + df := in.GetDevFlags() + + return df != nil && len(df.Manifests) != 0 +} + +func NamespacedNameFromObject(obj client.Object) types.NamespacedName { + return types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: obj.GetName(), + } +} diff --git a/pkg/resources/resources_test.go b/pkg/resources/resources_test.go new file mode 100644 index 00000000000..88d4d589539 --- /dev/null +++ b/pkg/resources/resources_test.go @@ -0,0 +1,138 @@ +package resources_test + +import ( + "errors" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + + . "github.com/onsi/gomega" +) + +func TestHasAnnotationAndLabels(t *testing.T) { + tests := []struct { + name string + data map[string]string + key string + values []string + expected bool + }{ + {"nil object", nil, "key1", []string{"val1"}, false}, + {"no metadata", map[string]string{}, "key1", []string{"val1"}, false}, + {"metadata exists and value matches", map[string]string{"key1": "val1"}, "key1", []string{"val1"}, true}, + {"metadata exists and value doesn't match", map[string]string{"key1": "val2"}, "key1", []string{"val1"}, false}, + {"metadata exists and value in list", map[string]string{"key1": "val2"}, "key1", []string{"val1", "val2"}, true}, + {"metadata exists and key doesn't match", map[string]string{"key2": "val1"}, "key1", []string{"val1"}, false}, + {"multiple values and no match", map[string]string{"key1": "val3"}, "key1", []string{"val1", "val2"}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Run("annotations_"+tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := unstructured.Unstructured{} + if len(tt.data) != 0 { + obj.SetAnnotations(tt.data) + } + + result := resources.HasAnnotation(&obj, tt.key, tt.values...) + + g.Expect(result).To(Equal(tt.expected)) + }) + + t.Run("labels_"+tt.name, func(t *testing.T) { + g := NewWithT(t) + + obj := unstructured.Unstructured{} + if len(tt.data) != 0 { + obj.SetLabels(tt.data) + } + + result := resources.HasLabel(&obj, tt.key, tt.values...) + + g.Expect(result).To(Equal(tt.expected)) + }) + }) + } +} + +func TestGetGroupVersionKindForObject(t *testing.T) { + g := NewWithT(t) + + scheme := runtime.NewScheme() + g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) + g.Expect(appsv1.AddToScheme(scheme)).To(Succeed()) + + t.Run("ObjectWithGVK", func(t *testing.T) { + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(gvk.Deployment) + + gotGVK, err := resources.GetGroupVersionKindForObject(scheme, obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(gotGVK).To(Equal(gvk.Deployment)) + }) + + t.Run("ObjectWithoutGVK_SuccessfulLookup", func(t *testing.T) { + obj := &appsv1.Deployment{} + + gotGVK, err := resources.GetGroupVersionKindForObject(scheme, obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(gotGVK).To(Equal(gvk.Deployment)) + }) + + t.Run("ObjectWithoutGVK_ErrorInLookup", func(t *testing.T) { + obj := &unstructured.Unstructured{} + + _, err := resources.GetGroupVersionKindForObject(scheme, obj) + g.Expect(err).To(WithTransform( + errors.Unwrap, + MatchError(runtime.IsMissingKind, "IsMissingKind"), + )) + }) + + t.Run("NilObject", func(t *testing.T) { + _, err := resources.GetGroupVersionKindForObject(scheme, nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("nil object")) + }) +} + +func TestEnsureGroupVersionKind(t *testing.T) { + g := NewWithT(t) + + scheme := runtime.NewScheme() + g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) + g.Expect(appsv1.AddToScheme(scheme)).To(Succeed()) + + t.Run("ForObject", func(t *testing.T) { + obj := &unstructured.Unstructured{} + obj.SetAPIVersion(gvk.Deployment.GroupVersion().String()) + obj.SetKind(gvk.Deployment.Kind) + + err := resources.EnsureGroupVersionKind(scheme, obj) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj.GetObjectKind().GroupVersionKind()).To(Equal(gvk.Deployment)) + }) + + t.Run("ErrorOnNilObject", func(t *testing.T) { + err := resources.EnsureGroupVersionKind(scheme, nil) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("nil object")) + }) + + t.Run("ErrorOnInvalidObject", func(t *testing.T) { + obj := &unstructured.Unstructured{} + obj.SetKind("UnknownKind") + + err := resources.EnsureGroupVersionKind(scheme, obj) + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("failed to get GVK")) + }) +} diff --git a/pkg/resources/resources_types.go b/pkg/resources/resources_types.go new file mode 100644 index 00000000000..8249cb775f9 --- /dev/null +++ b/pkg/resources/resources_types.go @@ -0,0 +1,19 @@ +package resources + +import "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + +type UnstructuredList []unstructured.Unstructured + +func (l UnstructuredList) Clone() []unstructured.Unstructured { + if len(l) == 0 { + return nil + } + + result := make([]unstructured.Unstructured, len(l)) + + for i := range l { + result[i] = *l[i].DeepCopy() + } + + return result +} diff --git a/pkg/services/gc/gc.go b/pkg/services/gc/gc.go new file mode 100644 index 00000000000..284a2da3dd7 --- /dev/null +++ b/pkg/services/gc/gc.go @@ -0,0 +1,341 @@ +package gc + +import ( + "context" + "fmt" + "slices" + "strings" + + "github.com/go-logr/logr" + "golang.org/x/exp/maps" + authorizationv1 "k8s.io/api/authorization/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + ctrlCli "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" +) + +// Instance a global instance of the GC service. +// +// TODO: since the GC service is quite heavy, as it has to discover +// +// resources that can be subject to GC, we share single global +// instance, however as long term, we should find a better way +// to let consumer of the service to access it. +var Instance *GC + +const ( + DeleteVerb = "delete" + AnyVerb = "*" + AnyResource = "*" +) + +type options struct { + propagationPolicy ctrlCli.PropagationPolicy + unremovables []schema.GroupVersionKind +} + +type OptsFn func(*options) + +func WithUnremovables(items ...schema.GroupVersionKind) OptsFn { + return func(o *options) { + o.unremovables = append(o.unremovables, items...) + } +} + +func WithPropagationPolicy(policy metav1.DeletionPropagation) OptsFn { + return func(o *options) { + o.propagationPolicy = ctrlCli.PropagationPolicy(policy) + } +} + +func New(cli *client.Client, ns string, opts ...OptsFn) *GC { + res := GC{ + client: cli, + ns: ns, + options: options{ + propagationPolicy: ctrlCli.PropagationPolicy(metav1.DeletePropagationForeground), + unremovables: make([]schema.GroupVersionKind, 0), + }, + + resources: Resources{ + items: make([]Resource, 0), + }, + } + + for _, o := range opts { + o(&res.options) + } + + return &res +} + +type GC struct { + client *client.Client + ns string + resources Resources + options options +} + +func (gc *GC) Start(ctx context.Context) error { + l := gc.log(ctx) + l.Info("Start computing deletable types") + + res, err := gc.computeDeletableTypes(ctx) + if err != nil { + return fmt.Errorf("cannot discover deletable types: %w", err) + } + + gc.resources.Set(res) + + l.Info("Deletable types computed", "count", gc.resources.Len()) + + return nil +} + +func (gc *GC) Run( + ctx context.Context, + selector labels.Selector, + predicate func(context.Context, unstructured.Unstructured) (bool, error), +) (int, error) { + l := gc.log(ctx) + + deleted := 0 + resources := gc.resources.Get() + + dc := gc.client.Dynamic() + lo := metav1.ListOptions{LabelSelector: selector.String()} + + for r := range resources { + items, err := dc.Resource(resources[r].GroupVersionResource()).Namespace("").List(ctx, lo) + if err != nil { + if k8serr.IsForbidden(err) { + l.V(3).Info( + "cannot list resource", + "reason", err.Error(), + "gvk", resources[r].GroupVersionKind(), + ) + + continue + } + + if k8serr.IsNotFound(err) { + continue + } + + return 0, fmt.Errorf("cannot list child resources %s: %w", resources[r].String(), err) + } + + for i := range items.Items { + ok, err := gc.delete(ctx, items.Items[i], predicate) + if err != nil { + return 0, err + } + + if ok { + deleted++ + } + } + } + + return deleted, nil +} + +func (gc *GC) delete( + ctx context.Context, + resource unstructured.Unstructured, + predicate func(context.Context, unstructured.Unstructured) (bool, error), +) (bool, error) { + if slices.Contains(gc.options.unremovables, resource.GroupVersionKind()) { + return false, nil + } + + canBeDeleted, err := predicate(ctx, resource) + if err != nil { + return false, err + } + + if !canBeDeleted { + return false, err + } + + gc.log(ctx).Info( + "delete", + "gvk", resource.GroupVersionKind(), + "ns", resource.GetNamespace(), + "name", resource.GetName(), + ) + + err = gc.client.Delete(ctx, &resource, gc.options.propagationPolicy) + if err != nil { + // The resource may have already been deleted + if k8serr.IsNotFound(err) { + return true, nil + } + + return false, fmt.Errorf( + "cannot delete resources gvk:%s, namespace: %s, name: %s, reason: %w", + resource.GroupVersionKind().String(), + resource.GetNamespace(), + resource.GetName(), + err, + ) + } + + return true, nil +} + +func (gc *GC) computeDeletableTypes( + ctx context.Context, +) ([]Resource, error) { + // We rely on the discovery API to retrieve all the resources GVK, + // that results in an unbounded set that can impact garbage collection + // latency when scaling up. + items, err := gc.client.Discovery().ServerPreferredNamespacedResources() + + // Swallow group discovery errors, e.g., Knative serving exposes + // an aggregated API for custom.metrics.k8s.io that requires special + // authentication scheme while discovering preferred resources. + if err != nil && !discovery.IsGroupDiscoveryFailedError(err) { + return nil, fmt.Errorf("failure retrieving supported namespaced resources: %w", err) + } + + // We only take types that support the "delete" verb, + // to prevents from performing queries that we know are going to + // return "MethodNotAllowed". + apiResourceLists := discovery.FilteredBy( + discovery.SupportsAllVerbs{ + Verbs: []string{DeleteVerb}, + }, + items, + ) + + // Get the permissions of the service account in the specified namespace. + rules, err := gc.retrieveResourceRules(ctx) + if err != nil { + return nil, fmt.Errorf("failure retiring resource rules: %w", err) + } + + // Collect deletable resources. + resources, err := gc.collectDeletableResources(apiResourceLists, rules) + if err != nil { + return nil, fmt.Errorf("failure retiring deletable resources: %w", err) + } + + return resources, nil +} + +func (gc *GC) retrieveResourceRules( + ctx context.Context, +) ([]authorizationv1.ResourceRule, error) { + // Retrieve the permissions granted to the operator service account. + // We assume the operator has only to garbage collect the resources + // it has created. + rulesReview := authorizationv1.SelfSubjectRulesReview{ + Spec: authorizationv1.SelfSubjectRulesReviewSpec{ + Namespace: gc.ns, + }, + } + + err := gc.client.Create(ctx, &rulesReview) + if err != nil { + return nil, fmt.Errorf("unable to create SelfSubjectRulesReviews: %w", err) + } + + return rulesReview.Status.ResourceRules, nil +} + +func (gc *GC) isResourceDeletable( + group string, + apiRes metav1.APIResource, + rules []authorizationv1.ResourceRule, +) bool { + for _, rule := range rules { + if !slices.Contains(rule.Verbs, DeleteVerb) && !slices.Contains(rule.Verbs, AnyVerb) { + continue + } + if !MatchRule(group, apiRes, rule) { + continue + } + + return true + } + + return false +} + +func (gc *GC) collectDeletableResources( + apiResourceLists []*metav1.APIResourceList, + rules []authorizationv1.ResourceRule, +) ([]Resource, error) { + resp := make(map[Resource]struct{}) + + for i := range apiResourceLists { + res := apiResourceLists[i] + + for _, apiRes := range res.APIResources { + resourceGroup := apiRes.Group + if resourceGroup == "" { + gv, err := schema.ParseGroupVersion(res.GroupVersion) + if err != nil { + return nil, fmt.Errorf("unable to parse group version: %w", err) + } + + resourceGroup = gv.Group + } + + if !gc.isResourceDeletable(resourceGroup, apiRes, rules) { + continue + } + + gv, err := schema.ParseGroupVersion(res.GroupVersion) + if err != nil { + return nil, err + } + + gvr := Resource{ + RESTMapping: meta.RESTMapping{ + Resource: schema.GroupVersionResource{ + Group: gv.Group, + Version: gv.Version, + Resource: apiRes.Name, + }, + GroupVersionKind: schema.GroupVersionKind{ + Group: gv.Group, + Version: gv.Version, + Kind: apiRes.Kind, + }, + Scope: meta.RESTScopeNamespace, + }, + } + + if !apiRes.Namespaced { + gvr.Scope = meta.RESTScopeRoot + } + + if slices.Contains(gc.options.unremovables, gvr.GroupVersionKind()) { + continue + } + + resp[gvr] = struct{}{} + } + } + + resources := maps.Keys(resp) + slices.SortFunc(resources, func(a, b Resource) int { + return strings.Compare(a.String(), b.String()) + }) + + return resources, nil +} + +func (gc *GC) log(ctx context.Context) logr.Logger { + return logf.FromContext(ctx).WithName("service").WithName("gc") +} diff --git a/pkg/services/gc/gc_support.go b/pkg/services/gc/gc_support.go new file mode 100644 index 00000000000..dc445b1452d --- /dev/null +++ b/pkg/services/gc/gc_support.go @@ -0,0 +1,78 @@ +package gc + +import ( + "slices" + "sync" + + authorizationv1 "k8s.io/api/authorization/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type Resource struct { + meta.RESTMapping +} + +func (r Resource) GroupVersionResource() schema.GroupVersionResource { + return r.RESTMapping.Resource +} + +func (r Resource) GroupVersionKind() schema.GroupVersionKind { + return r.RESTMapping.GroupVersionKind +} + +func (r Resource) String() string { + return r.RESTMapping.Resource.String() +} + +func (r Resource) IsNamespaced() bool { + if r.Scope == nil { + return false + } + + return r.Scope.Name() == meta.RESTScopeNameNamespace +} + +// We may want to introduce iterators (https://pkg.go.dev/iter) once moved to go 1.23 + +type Resources struct { + lock sync.RWMutex + items []Resource +} + +func (r *Resources) Set(resources []Resource) { + r.lock.Lock() + defer r.lock.Unlock() + + r.items = resources +} +func (r *Resources) Get() []Resource { + r.lock.RLock() + defer r.lock.RUnlock() + + return slices.Clone(r.items) +} + +func (r *Resources) Len() int { + return len(r.items) +} + +func MatchRule(resourceGroup string, apiRes metav1.APIResource, rule authorizationv1.ResourceRule) bool { + for rgi := range rule.APIGroups { + // Check if the resource group matches the rule group or is a wildcard, if not + // discard it + if resourceGroup != rule.APIGroups[rgi] && rule.APIGroups[rgi] != AnyResource { + continue + } + + for ri := range rule.Resources { + // Check if the API resource name matches the rule resource or is a wildcard + if apiRes.Name == rule.Resources[ri] || rule.Resources[ri] == AnyResource { + return true + } + } + } + + return false +} diff --git a/pkg/services/gc/gc_test.go b/pkg/services/gc/gc_test.go new file mode 100644 index 00000000000..2419c2157ea --- /dev/null +++ b/pkg/services/gc/gc_test.go @@ -0,0 +1,98 @@ +package gc_test + +import ( + "testing" + + gTypes "github.com/onsi/gomega/types" + authorizationv1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/services/gc" + + . "github.com/onsi/gomega" +) + +func allVerb() []string { + return []string{"delete", "create", "get", "list", "patch"} +} + +func anyRule() authorizationv1.ResourceRule { + return authorizationv1.ResourceRule{ + Verbs: []string{gc.AnyVerb}, + APIGroups: []string{gc.AnyVerb}, + Resources: []string{gc.AnyVerb}, + } +} + +func TestMatchRules(t *testing.T) { + tests := []struct { + name string + resourceGroup string + apiResource metav1.APIResource + rule authorizationv1.ResourceRule + matcher gTypes.GomegaMatcher + }{ + // + // Positive Match + // + + { + name: "Should match", + resourceGroup: "", + apiResource: metav1.APIResource{ + Verbs: allVerb(), + }, + rule: anyRule(), + matcher: BeTrue(), + }, + { + name: "Should match as resource is explicitly listed", + resourceGroup: "unknown", + apiResource: metav1.APIResource{ + Name: "baz", + }, + rule: authorizationv1.ResourceRule{ + APIGroups: []string{gc.AnyResource}, + Resources: []string{"baz"}, + }, + matcher: BeTrue(), + }, + + // + // Negative Match + // + + { + name: "Should not match as API group is not listed", + resourceGroup: "unknown", + apiResource: metav1.APIResource{}, + rule: authorizationv1.ResourceRule{ + APIGroups: []string{"baz"}, + }, + matcher: BeFalse(), + }, + { + name: "Should not match as resource is not listed", + resourceGroup: "unknown", + apiResource: metav1.APIResource{}, + rule: authorizationv1.ResourceRule{ + Resources: []string{"baz"}, + }, + matcher: BeFalse(), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect( + gc.MatchRule( + test.resourceGroup, + test.apiResource, + test.rule, + ), + ).To(test.matcher) + }) + } +} diff --git a/pkg/services/monitoring/prometheus.go b/pkg/services/monitoring/prometheus.go new file mode 100644 index 00000000000..48ff14a3e85 --- /dev/null +++ b/pkg/services/monitoring/prometheus.go @@ -0,0 +1,126 @@ +package monitoring + +import ( + "context" + "os" + "path/filepath" + "strings" + + "gopkg.in/yaml.v2" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" +) + +var ( + prometheusConfigPath = filepath.Join(deploy.DefaultManifestPath, "monitoring", "prometheus", "apps", "prometheus-configs.yaml") +) + +// UpdatePrometheusConfig update prometheus-configs.yaml to include/exclude .rules +// parameter enable when set to true to add new rules, when set to false to remove existing rules. +func UpdatePrometheusConfig(ctx context.Context, _ client.Client, enable bool, component string) error { + l := logf.FromContext(ctx) + + // create a struct to mock poremtheus.yml + type ConfigMap struct { + APIVersion string `yaml:"apiVersion"` + Kind string `yaml:"kind"` + Metadata struct { + Name string `yaml:"name"` + Namespace string `yaml:"namespace"` + } `yaml:"metadata"` + Data struct { + PrometheusYML string `yaml:"prometheus.yml"` + OperatorRules string `yaml:"operator-recording.rules"` + DeadManSnitchRules string `yaml:"deadmanssnitch-alerting.rules"` + CFRRules string `yaml:"codeflare-recording.rules"` + CRARules string `yaml:"codeflare-alerting.rules"` + DashboardRRules string `yaml:"rhods-dashboard-recording.rules"` + DashboardARules string `yaml:"rhods-dashboard-alerting.rules"` + DSPRRules string `yaml:"data-science-pipelines-operator-recording.rules"` + DSPARules string `yaml:"data-science-pipelines-operator-alerting.rules"` + MMRRules string `yaml:"model-mesh-recording.rules"` + MMARules string `yaml:"model-mesh-alerting.rules"` + OdhModelRRules string `yaml:"odh-model-controller-recording.rules"` + OdhModelARules string `yaml:"odh-model-controller-alerting.rules"` + CFORRules string `yaml:"codeflare-recording.rules"` + CFOARules string `yaml:"codeflare-alerting.rules"` + RayARules string `yaml:"ray-alerting.rules"` + WorkbenchesRRules string `yaml:"workbenches-recording.rules"` + WorkbenchesARules string `yaml:"workbenches-alerting.rules"` + KserveRRules string `yaml:"kserve-recording.rules"` + KserveARules string `yaml:"kserve-alerting.rules"` + TrustyAIRRules string `yaml:"trustyai-recording.rules"` + TrustyAIARules string `yaml:"trustyai-alerting.rules"` + KueueRRules string `yaml:"kueue-recording.rules"` + KueueARules string `yaml:"kueue-alerting.rules"` + TrainingOperatorRRules string `yaml:"trainingoperator-recording.rules"` + TrainingOperatorARules string `yaml:"trainingoperator-alerting.rules"` + ModelRegistryRRules string `yaml:"model-registry-operator-recording.rules"` + ModelRegistryARules string `yaml:"model-registry-operator-alerting.rules"` + } `yaml:"data"` + } + + var configMap ConfigMap + // prometheusContent will represent content of prometheus.yml due to its dynamic struct + var prometheusContent map[interface{}]interface{} + + // read prometheus.yml from local disk /opt/mainfests/monitoring/prometheus/apps/ + yamlData, err := os.ReadFile(prometheusConfigPath) + if err != nil { + return err + } + if err := yaml.Unmarshal(yamlData, &configMap); err != nil { + return err + } + + // get prometheus.yml part from configmap + if err := yaml.Unmarshal([]byte(configMap.Data.PrometheusYML), &prometheusContent); err != nil { + return err + } + + // to add component rules when it is not there yet + if enable { + // Check if the rule not yet exists in rule_files + if !strings.Contains(configMap.Data.PrometheusYML, component+"*.rules") { + // check if have rule_files + if ruleFiles, ok := prometheusContent["rule_files"]; ok { + if ruleList, isList := ruleFiles.([]interface{}); isList { + // add new component rules back to rule_files + ruleList = append(ruleList, component+"*.rules") + prometheusContent["rule_files"] = ruleList + } + } + } + } else { // to remove component rules if it is there + l.Info("Removing prometheus rule: " + component + "*.rules") + if ruleList, ok := prometheusContent["rule_files"].([]interface{}); ok { + for i, item := range ruleList { + if rule, isStr := item.(string); isStr && rule == component+"*.rules" { + ruleList = append(ruleList[:i], ruleList[i+1:]...) + + break + } + } + prometheusContent["rule_files"] = ruleList + } + } + + // Marshal back + newDataYAML, err := yaml.Marshal(&prometheusContent) + if err != nil { + return err + } + configMap.Data.PrometheusYML = string(newDataYAML) + + newyamlData, err := yaml.Marshal(&configMap) + if err != nil { + return err + } + + // Write the modified content back to the file + err = os.WriteFile(prometheusConfigPath, newyamlData, 0) + + return err +} diff --git a/pkg/trustedcabundle/trustedcabundle.go b/pkg/trustedcabundle/trustedcabundle.go index f99a3fde80f..41a9ab2ef8b 100644 --- a/pkg/trustedcabundle/trustedcabundle.go +++ b/pkg/trustedcabundle/trustedcabundle.go @@ -75,10 +75,7 @@ func CreateOdhTrustedCABundleConfigMap(ctx context.Context, cli client.Client, n // Create Configmap if doesn't exist foundConfigMap := &corev1.ConfigMap{} - if err := cli.Get(ctx, client.ObjectKey{ - Name: CAConfigMapName, - Namespace: namespace, - }, foundConfigMap); err != nil { + if err := cli.Get(ctx, client.ObjectKeyFromObject(desiredConfigMap), foundConfigMap); err != nil { if k8serr.IsNotFound(err) { err = cli.Create(ctx, desiredConfigMap) if err != nil && !k8serr.IsAlreadyExists(err) { @@ -113,8 +110,8 @@ func DeleteOdhTrustedCABundleConfigMap(ctx context.Context, cli client.Client, n // return false when these two are matching => skip update // return true when not match => need upate. func IsTrustedCABundleUpdated(ctx context.Context, cli client.Client, dscInit *dsciv1.DSCInitialization) (bool, error) { - userNamespace := &corev1.Namespace{} - if err := cli.Get(ctx, client.ObjectKey{Name: dscInit.Spec.ApplicationsNamespace}, userNamespace); err != nil { + appNamespace := &corev1.Namespace{} + if err := cli.Get(ctx, client.ObjectKey{Name: dscInit.Spec.ApplicationsNamespace}, appNamespace); err != nil { if k8serr.IsNotFound(err) { // if namespace is not found, return true. This is to ensure we reconcile, and check for other namespaces. return true, nil @@ -122,7 +119,7 @@ func IsTrustedCABundleUpdated(ctx context.Context, cli client.Client, dscInit *d return false, err } - if !ShouldInjectTrustedBundle(userNamespace) { + if !ShouldInjectTrustedBundle(appNamespace) { return false, nil } diff --git a/pkg/upgrade/uninstallation.go b/pkg/upgrade/uninstallation.go index 6a8ec4a36e8..5f300091f1d 100644 --- a/pkg/upgrade/uninstallation.go +++ b/pkg/upgrade/uninstallation.go @@ -5,12 +5,14 @@ import ( "fmt" "time" - "github.com/hashicorp/go-multierror" corev1 "k8s.io/api/core/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" @@ -25,7 +27,13 @@ const ( // OperatorUninstall deletes all the externally generated resources. // This includes DSCI, namespace created by operator (but not workbench or MR's), subscription and CSV. func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster.Platform) error { - if err := removeDSCInitialization(ctx, cli); err != nil { + log := logf.FromContext(ctx) + + if err := removeDSC(ctx, cli); err != nil { + return err + } + + if err := removeDSCI(ctx, cli); err != nil { return err } @@ -50,7 +58,7 @@ func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster. if err := cli.Delete(ctx, &namespace); err != nil { return fmt.Errorf("error deleting namespace %v: %w", namespace.Name, err) } - ctrl.Log.Info("Namespace " + namespace.Name + " deleted as a part of uninstallation.") + log.Info("Namespace " + namespace.Name + " deleted as a part of uninstallation.") } } @@ -65,7 +73,7 @@ func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster. return err } - ctrl.Log.Info("Removing operator subscription which in turn will remove installplan") + log.Info("Removing operator subscription which in turn will remove installplan") subsName := "opendatahub-operator" if platform == cluster.SelfManagedRhoai { subsName = "rhods-operator" @@ -76,28 +84,31 @@ func OperatorUninstall(ctx context.Context, cli client.Client, platform cluster. } } - ctrl.Log.Info("Removing the operator CSV in turn remove operator deployment") + log.Info("Removing the operator CSV in turn remove operator deployment") err = removeCSV(ctx, cli) - ctrl.Log.Info("All resources deleted as part of uninstall.") + log.Info("All resources deleted as part of uninstall.") return err } -func removeDSCInitialization(ctx context.Context, cli client.Client) error { - instanceList := &dsciv1.DSCInitializationList{} +func removeDSCI(ctx context.Context, cli client.Client) error { + instance := &dsciv1.DSCInitialization{} - if err := cli.List(ctx, instanceList); err != nil { - return err + if err := cli.DeleteAllOf(ctx, instance, client.PropagationPolicy(metav1.DeletePropagationForeground)); err != nil { + return fmt.Errorf("failure deleting DSCI: %w", err) } - var multiErr *multierror.Error - for _, dsciInstance := range instanceList.Items { - if err := cli.Delete(ctx, &dsciInstance); !k8serr.IsNotFound(err) { - multiErr = multierror.Append(multiErr, err) - } + return nil +} + +func removeDSC(ctx context.Context, cli client.Client) error { + instance := &dscv1.DataScienceCluster{} + + if err := cli.DeleteAllOf(ctx, instance, client.PropagationPolicy(metav1.DeletePropagationForeground)); err != nil { + return fmt.Errorf("failure deleting DSC: %w", err) } - return multiErr.ErrorOrNil() + return nil } // HasDeleteConfigMap returns true if delete configMap is added to the operator namespace by managed-tenants repo. @@ -124,6 +135,7 @@ func HasDeleteConfigMap(ctx context.Context, c client.Client) bool { } func removeCSV(ctx context.Context, c client.Client) error { + log := logf.FromContext(ctx) // Get watchNamespace operatorNamespace, err := cluster.GetOperatorNamespace() if err != nil { @@ -140,7 +152,7 @@ func removeCSV(ctx context.Context, c client.Client) error { return err } - ctrl.Log.Info("Deleting CSV " + operatorCsv.Name) + log.Info("Deleting CSV " + operatorCsv.Name) err = c.Delete(ctx, operatorCsv) if err != nil { if k8serr.IsNotFound(err) { @@ -149,7 +161,7 @@ func removeCSV(ctx context.Context, c client.Client) error { return fmt.Errorf("error deleting clusterserviceversion: %w", err) } - ctrl.Log.Info("Clusterserviceversion " + operatorCsv.Name + " deleted as a part of uninstall") + log.Info("Clusterserviceversion " + operatorCsv.Name + " deleted as a part of uninstall") return nil } diff --git a/pkg/upgrade/upgrade.go b/pkg/upgrade/upgrade.go index 109b5c796e7..d0ed81d3714 100644 --- a/pkg/upgrade/upgrade.go +++ b/pkg/upgrade/upgrade.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/go-multierror" operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" + templatev1 "github.com/openshift/api/template/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -23,25 +24,16 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" featuresv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/components/codeflare" - "github.com/opendatahub-io/opendatahub-operator/v2/components/dashboard" - "github.com/opendatahub-io/opendatahub-operator/v2/components/datasciencepipelines" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kueue" - "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" - "github.com/opendatahub-io/opendatahub-operator/v2/components/modelregistry" - "github.com/opendatahub-io/opendatahub-operator/v2/components/ray" - "github.com/opendatahub-io/opendatahub-operator/v2/components/trainingoperator" - "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" - "github.com/opendatahub-io/opendatahub-operator/v2/components/workbenches" + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" @@ -70,38 +62,38 @@ func CreateDefaultDSC(ctx context.Context, cli client.Client) error { }, Spec: dscv1.DataScienceClusterSpec{ Components: dscv1.Components{ - Dashboard: dashboard.Dashboard{ - Component: components.Component{ManagementState: operatorv1.Managed}, + Dashboard: componentApi.DSCDashboard{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Managed}, }, - Workbenches: workbenches.Workbenches{ - Component: components.Component{ManagementState: operatorv1.Managed}, + Workbenches: componentApi.DSCWorkbenches{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Managed}, }, - ModelMeshServing: modelmeshserving.ModelMeshServing{ - Component: components.Component{ManagementState: operatorv1.Managed}, + ModelMeshServing: componentApi.DSCModelMeshServing{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Managed}, }, - DataSciencePipelines: datasciencepipelines.DataSciencePipelines{ - Component: components.Component{ManagementState: operatorv1.Managed}, + DataSciencePipelines: componentApi.DSCDataSciencePipelines{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Managed}, }, - Kserve: kserve.Kserve{ - Component: components.Component{ManagementState: operatorv1.Managed}, + Kserve: componentApi.DSCKserve{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Managed}, }, - CodeFlare: codeflare.CodeFlare{ - Component: components.Component{ManagementState: operatorv1.Managed}, + CodeFlare: componentApi.DSCCodeFlare{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Managed}, }, - Ray: ray.Ray{ - Component: components.Component{ManagementState: operatorv1.Managed}, + Ray: componentApi.DSCRay{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Managed}, }, - Kueue: kueue.Kueue{ - Component: components.Component{ManagementState: operatorv1.Managed}, + Kueue: componentApi.DSCKueue{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Managed}, }, - TrainingOperator: trainingoperator.TrainingOperator{ - Component: components.Component{ManagementState: operatorv1.Removed}, + TrustyAI: componentApi.DSCTrustyAI{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Managed}, }, - TrustyAI: trustyai.TrustyAI{ - Component: components.Component{ManagementState: operatorv1.Managed}, + ModelRegistry: componentApi.DSCModelRegistry{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Removed}, }, - ModelRegistry: modelregistry.ModelRegistry{ - Component: components.Component{ManagementState: operatorv1.Removed}, + TrainingOperator: componentApi.DSCTrainingOperator{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Removed}, }, }, }, @@ -117,11 +109,14 @@ func CreateDefaultDSC(ctx context.Context, cli client.Client) error { // If there exists default-dsci instance already, it will not update DSCISpec on it. // Note: DSCI CR modifcations are not supported, as it is the initial prereq setting for the components. func CreateDefaultDSCI(ctx context.Context, cli client.Client, _ cluster.Platform, appNamespace, monNamespace string) error { + log := logf.FromContext(ctx) defaultDsciSpec := &dsciv1.DSCInitializationSpec{ ApplicationsNamespace: appNamespace, - Monitoring: dsciv1.Monitoring{ - ManagementState: operatorv1.Managed, - Namespace: monNamespace, + Monitoring: serviceApi.DSCMonitoring{ + ManagementSpec: common.ManagementSpec{ManagementState: operatorv1.Removed}, + MonitoringCommonSpec: serviceApi.MonitoringCommonSpec{ + Namespace: monNamespace, + }, }, ServiceMesh: &infrav1.ServiceMeshSpec{ ManagementState: "Managed", @@ -154,14 +149,14 @@ func CreateDefaultDSCI(ctx context.Context, cli client.Client, _ cluster.Platfor switch { case len(instances.Items) > 1: - ctrl.Log.Info("only one instance of DSCInitialization object is allowed. Please delete other instances.") + log.Info("only one instance of DSCInitialization object is allowed. Please delete other instances.") return nil case len(instances.Items) == 1: // Do not patch/update if DSCI already exists. - ctrl.Log.Info("DSCInitialization resource already exists. It will not be updated with default DSCI.") + log.Info("DSCInitialization resource already exists. It will not be updated with default DSCI.") return nil case len(instances.Items) == 0: - ctrl.Log.Info("create default DSCI CR.") + log.Info("create default DSCI CR.") err := cluster.CreateWithRetry(ctx, cli, defaultDsci, 1) // 1 min timeout if err != nil { return err @@ -270,7 +265,6 @@ func CleanupExistingResource(ctx context.Context, "jupyterhub-use-s3-bucket-data", }) multiErr = multierror.Append(multiErr, deleteResources(ctx, cli, &odhDocJPH)) - // only apply on RHOAI since ODH has a different way to create this CR by dashboard if platform == cluster.SelfManagedRhoai || platform == cluster.ManagedRhoai { if err := upgradeODCCR(ctx, cli, "odh-dashboard-config", dscApplicationsNamespace, oldReleaseVersion); err != nil { @@ -288,8 +282,8 @@ func CleanupExistingResource(ctx context.Context, toDelete := getDashboardWatsonResources(dscApplicationsNamespace) multiErr = multierror.Append(multiErr, deleteResources(ctx, cli, &toDelete)) - // cleanup nvidia nim integration remove tech preview - multiErr = multierror.Append(multiErr, cleanupNimIntegrationTechPreview(ctx, cli, oldReleaseVersion, dscApplicationsNamespace)) + // cleanup nvidia nim integration + multiErr = multierror.Append(multiErr, cleanupNimIntegration(ctx, cli, oldReleaseVersion, dscApplicationsNamespace)) return multiErr.ErrorOrNil() } @@ -306,13 +300,14 @@ func deleteResources(ctx context.Context, c client.Client, resources *[]Resource } func deleteOneResource(ctx context.Context, c client.Client, res ResourceSpec) error { + log := logf.FromContext(ctx) list := &unstructured.UnstructuredList{} list.SetGroupVersionKind(res.Gvk) err := c.List(ctx, list, client.InNamespace(res.Namespace)) if err != nil { if errors.Is(err, &meta.NoKindMatchError{}) { - ctrl.Log.Info("CRD not found, will not delete " + res.Gvk.String()) + log.Info("CRD not found, will not delete " + res.Gvk.String()) return nil } return fmt.Errorf("failed to list %s: %w", res.Gvk.Kind, err) @@ -334,7 +329,7 @@ func deleteOneResource(ctx context.Context, c client.Client, res ResourceSpec) e if err != nil { return fmt.Errorf("failed to delete %s %s/%s: %w", res.Gvk.Kind, res.Namespace, item.GetName(), err) } - ctrl.Log.Info("Deleted object " + item.GetName() + " " + res.Gvk.String() + "in namespace" + res.Namespace) + log.Info("Deleted object " + item.GetName() + " " + res.Gvk.String() + "in namespace" + res.Namespace) } } } @@ -343,6 +338,7 @@ func deleteOneResource(ctx context.Context, c client.Client, res ResourceSpec) e } func deleteDeprecatedResources(ctx context.Context, cli client.Client, namespace string, resourceList []string, resourceType client.ObjectList) error { + log := logf.FromContext(ctx) var multiErr *multierror.Error listOpts := &client.ListOptions{Namespace: namespace} if err := cli.List(ctx, resourceType, listOpts); err != nil { @@ -353,16 +349,16 @@ func deleteDeprecatedResources(ctx context.Context, cli client.Client, namespace item := items.Index(i).Addr().Interface().(client.Object) //nolint:errcheck,forcetypeassert for _, name := range resourceList { if name == item.GetName() { - ctrl.Log.Info("Attempting to delete " + item.GetName() + " in namespace " + namespace) + log.Info("Attempting to delete " + item.GetName() + " in namespace " + namespace) err := cli.Delete(ctx, item) if err != nil { if k8serr.IsNotFound(err) { - ctrl.Log.Info("Could not find " + item.GetName() + " in namespace " + namespace) + log.Info("Could not find " + item.GetName() + " in namespace " + namespace) } else { multiErr = multierror.Append(multiErr, err) } } - ctrl.Log.Info("Successfully deleted " + item.GetName()) + log.Info("Successfully deleted " + item.GetName()) } } } @@ -371,6 +367,7 @@ func deleteDeprecatedResources(ctx context.Context, cli client.Client, namespace // Need to handle ServiceMonitor deletion separately as the generic function does not work for ServiceMonitors because of how the package is built. func deleteDeprecatedServiceMonitors(ctx context.Context, cli client.Client, namespace string, resourceList []string) error { + log := logf.FromContext(ctx) var multiErr *multierror.Error listOpts := &client.ListOptions{Namespace: namespace} servicemonitors := &monitoringv1.ServiceMonitorList{} @@ -381,16 +378,16 @@ func deleteDeprecatedServiceMonitors(ctx context.Context, cli client.Client, nam for _, servicemonitor := range servicemonitors.Items { for _, name := range resourceList { if name == servicemonitor.Name { - ctrl.Log.Info("Attempting to delete " + servicemonitor.Name + " in namespace " + namespace) + log.Info("Attempting to delete " + servicemonitor.Name + " in namespace " + namespace) err := cli.Delete(ctx, servicemonitor) if err != nil { if k8serr.IsNotFound(err) { - ctrl.Log.Info("Could not find " + servicemonitor.Name + " in namespace " + namespace) + log.Info("Could not find " + servicemonitor.Name + " in namespace " + namespace) } else { multiErr = multierror.Append(multiErr, err) } } - ctrl.Log.Info("Successfully deleted " + servicemonitor.Name) + log.Info("Successfully deleted " + servicemonitor.Name) } } } @@ -461,10 +458,11 @@ func unsetOwnerReference(ctx context.Context, cli client.Client, instanceName st } func updateODCBiasMetrics(ctx context.Context, cli client.Client, instanceName string, oldRelease cluster.Release, odhObject *unstructured.Unstructured) error { + log := logf.FromContext(ctx) // "from version" as oldRelease, if return "0.0.0" meaning running on 2.10- release/dummy CI build // if oldRelease is lower than 2.14.0(e.g 2.13.x-a), flip disableBiasMetrics to false (even the field did not exist) if oldRelease.Version.Minor < 14 { - ctrl.Log.Info("Upgrade force BiasMetrics to false in " + instanceName + " CR due to old release < 2.14.0") + log.Info("Upgrade force BiasMetrics to false in " + instanceName + " CR due to old release < 2.14.0") // flip TrustyAI BiasMetrics to false (.spec.dashboardConfig.disableBiasMetrics) disableBiasMetricsValue := []byte(`{"spec": {"dashboardConfig": {"disableBiasMetrics": false}}}`) if err := cli.Patch(ctx, odhObject, client.RawPatch(types.MergePatchType, disableBiasMetricsValue)); err != nil { @@ -472,28 +470,30 @@ func updateODCBiasMetrics(ctx context.Context, cli client.Client, instanceName s } return nil } - ctrl.Log.Info("Upgrade does not force BiasMetrics to false due to from release >= 2.14.0") + log.Info("Upgrade does not force BiasMetrics to false due to from release >= 2.14.0") return nil } func updateODCModelRegistry(ctx context.Context, cli client.Client, instanceName string, oldRelease cluster.Release, odhObject *unstructured.Unstructured) error { + log := logf.FromContext(ctx) // "from version" as oldRelease, if return "0.0.0" meaning running on 2.10- release/dummy CI build // if oldRelease is lower than 2.14.0(e.g 2.13.x-a), flip disableModelRegistry to false (even the field did not exist) if oldRelease.Version.Minor < 14 { - ctrl.Log.Info("Upgrade force ModelRegistry to false in " + instanceName + " CR due to old release < 2.14.0") + log.Info("Upgrade force ModelRegistry to false in " + instanceName + " CR due to old release < 2.14.0") disableModelRegistryValue := []byte(`{"spec": {"dashboardConfig": {"disableModelRegistry": false}}}`) if err := cli.Patch(ctx, odhObject, client.RawPatch(types.MergePatchType, disableModelRegistryValue)); err != nil { return fmt.Errorf("error enable ModelRegistry in CR %s : %w", instanceName, err) } return nil } - ctrl.Log.Info("Upgrade does not force ModelRegistry to false due to from release >= 2.14.0") + log.Info("Upgrade does not force ModelRegistry to false due to from release >= 2.14.0") return nil } // workaround for RHOAIENG-15328 // TODO: this can be removed from ODH 2.22. func removeRBACProxyModelRegistry(ctx context.Context, cli client.Client, componentName string, containerName string, applicationNS string) error { + log := logf.FromContext(ctx) deploymentList := &appsv1.DeploymentList{} if err := cli.List(ctx, deploymentList, client.InNamespace(applicationNS), client.HasLabels{labels.ODH.Component(componentName)}); err != nil { return fmt.Errorf("error fetching list of deployments: %w", err) @@ -509,7 +509,7 @@ func removeRBACProxyModelRegistry(ctx context.Context, cli client.Client, compon return nil } - ctrl.Log.Info("Upgrade force ModelRegistry to remove container from deployment") + log.Info("Upgrade force ModelRegistry to remove container from deployment") for i, container := range mrContainerList { if container.Name == containerName { removeUnusedKubeRbacProxy := []byte(fmt.Sprintf("[{\"op\": \"remove\", \"path\": \"/spec/template/spec/containers/%d\"}]", i)) @@ -538,6 +538,7 @@ func RemoveLabel(ctx context.Context, cli client.Client, objectName string, labe } func deleteDeprecatedNamespace(ctx context.Context, cli client.Client, namespace string) error { + log := logf.FromContext(ctx) foundNamespace := &corev1.Namespace{} if err := cli.Get(ctx, client.ObjectKey{Name: namespace}, foundNamespace); err != nil { if k8serr.IsNotFound(err) { @@ -566,7 +567,7 @@ func deleteDeprecatedNamespace(ctx context.Context, cli client.Client, namespace return fmt.Errorf("error getting pods from namespace %s: %w", namespace, err) } if len(podList.Items) != 0 { - ctrl.Log.Info("Skip deletion of namespace " + namespace + " due to running Pods in it") + log.Info("Skip deletion of namespace " + namespace + " due to running Pods in it") return nil } @@ -599,46 +600,68 @@ func GetDeployedRelease(ctx context.Context, cli client.Client) (cluster.Release return cluster.Release{}, nil } -func cleanupNimIntegrationTechPreview(ctx context.Context, cli client.Client, oldRelease cluster.Release, applicationNS string) error { +func cleanupNimIntegration(ctx context.Context, cli client.Client, oldRelease cluster.Release, applicationNS string) error { var errs *multierror.Error - if oldRelease.Version.Minor >= 14 && oldRelease.Version.Minor <= 15 { - nimCronjob := "nvidia-nim-periodic-validator" - nimConfigMap := "nvidia-nim-validation-result" - nimAPISec := "nvidia-nim-access" - - deleteObjs := []struct { + if oldRelease.Version.Minor >= 14 && oldRelease.Version.Minor <= 16 { + log := logf.FromContext(ctx) + type objForDel struct { obj client.Object name, desc string - }{ - { - obj: &batchv1.CronJob{}, - name: nimCronjob, - desc: "validator CronJob", - }, + } + + // the following objects created by TP (14-15) and by the first GA (16) + deleteObjs := []objForDel{ { obj: &corev1.ConfigMap{}, - name: nimConfigMap, + name: "nvidia-nim-images-data", desc: "data ConfigMap", }, + { + obj: &templatev1.Template{}, + name: "nvidia-nim-serving-template", + desc: "runtime Template", + }, { obj: &corev1.Secret{}, - name: nimAPISec, - desc: "API key Secret", + name: "nvidia-nim-image-pull", + desc: "pull Secret", }, } + + // the following objects created by TP (14-15) + if oldRelease.Version.Minor < 16 { + deleteObjs = append(deleteObjs, + objForDel{ + obj: &batchv1.CronJob{}, + name: "nvidia-nim-periodic-validator", + desc: "validator CronJob", + }, + objForDel{ + obj: &corev1.ConfigMap{}, + name: "nvidia-nim-validation-result", + desc: "validation result ConfigMap", + }, + // the api key is also used by GA (16), but cleanup is only required for TP->GA switch + objForDel{ + obj: &corev1.Secret{}, + name: "nvidia-nim-access", + desc: "API key Secret", + }) + } + for _, delObj := range deleteObjs { if gErr := cli.Get(ctx, types.NamespacedName{Name: delObj.name, Namespace: applicationNS}, delObj.obj); gErr != nil { if !k8serr.IsNotFound(gErr) { - ctrl.Log.V(1).Error(gErr, fmt.Sprintf("failed to get NIM %s %s", delObj.desc, delObj.name)) + log.V(1).Error(gErr, fmt.Sprintf("failed to get NIM %s %s", delObj.desc, delObj.name)) errs = multierror.Append(errs, gErr) } } else { if dErr := cli.Delete(ctx, delObj.obj); dErr != nil { - ctrl.Log.Error(dErr, fmt.Sprintf("failed to remove NIM %s %s", delObj.desc, delObj.name)) + log.Error(dErr, fmt.Sprintf("failed to remove NIM %s %s", delObj.desc, delObj.name)) errs = multierror.Append(errs, dErr) } else { - ctrl.Log.Info(fmt.Sprintf("removed NIM %s successfully", delObj.desc)) + log.Info(fmt.Sprintf("removed NIM %s successfully", delObj.desc)) } } } diff --git a/pkg/utils/test/fakeclient/fakeclient.go b/pkg/utils/test/fakeclient/fakeclient.go new file mode 100644 index 00000000000..4b94a7c5f14 --- /dev/null +++ b/pkg/utils/test/fakeclient/fakeclient.go @@ -0,0 +1,53 @@ +package fakeclient + +import ( + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + dynamicFake "k8s.io/client-go/dynamic/fake" + k8sFake "k8s.io/client-go/kubernetes/fake" + ctrlClient "sigs.k8s.io/controller-runtime/pkg/client" + clientFake "sigs.k8s.io/controller-runtime/pkg/client/fake" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +func New(objs ...ctrlClient.Object) (*client.Client, error) { + scheme := runtime.NewScheme() + utilruntime.Must(corev1.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(rbacv1.AddToScheme(scheme)) + utilruntime.Must(componentApi.AddToScheme(scheme)) + + fakeMapper := meta.NewDefaultRESTMapper(scheme.PreferredVersionAllGroups()) + for gvk := range scheme.AllKnownTypes() { + fakeMapper.Add(gvk, meta.RESTScopeNamespace) + } + + ro := make([]runtime.Object, len(objs)) + for i := range objs { + u, err := resources.ToUnstructured(objs[i]) + if err != nil { + return nil, err + } + + ro[i] = u + } + + c := client.New( + clientFake.NewClientBuilder(). + WithScheme(scheme). + WithRESTMapper(fakeMapper). + WithObjects(objs...). + Build(), + k8sFake.NewSimpleClientset(ro...), + dynamicFake.NewSimpleDynamicClient(scheme, ro...), + ) + + return c, nil +} diff --git a/pkg/utils/test/matchers/jq/jq_matcher.go b/pkg/utils/test/matchers/jq/jq_matcher.go new file mode 100644 index 00000000000..3c483bb657a --- /dev/null +++ b/pkg/utils/test/matchers/jq/jq_matcher.go @@ -0,0 +1,59 @@ +package jq + +import ( + "fmt" + + "github.com/itchyny/gojq" + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" +) + +func Match(format string, args ...any) *Matcher { + return &Matcher{ + expression: fmt.Sprintf(format, args...), + } +} + +var _ types.GomegaMatcher = &Matcher{} + +type Matcher struct { + expression string + firstFailurePath []interface{} +} + +func (matcher *Matcher) Match(actual interface{}) (bool, error) { + query, err := gojq.Parse(matcher.expression) + if err != nil { + return false, fmt.Errorf("unable to parse expression %s, %w", matcher.expression, err) + } + + data, err := toType(actual) + if err != nil { + return false, err + } + + it := query.Run(data) + + v, ok := it.Next() + if !ok { + return false, nil + } + + if err, ok := v.(error); ok { + return false, err + } + + if match, ok := v.(bool); ok { + return match, nil + } + + return false, nil +} + +func (matcher *Matcher) FailureMessage(actual interface{}) string { + return formattedMessage(format.Message(fmt.Sprintf("%v", actual), "to match expression", matcher.expression), matcher.firstFailurePath) +} + +func (matcher *Matcher) NegatedFailureMessage(actual interface{}) string { + return formattedMessage(format.Message(fmt.Sprintf("%v", actual), "not to match expression", matcher.expression), matcher.firstFailurePath) +} diff --git a/pkg/utils/test/matchers/jq/jq_matcher_test.go b/pkg/utils/test/matchers/jq/jq_matcher_test.go new file mode 100644 index 00000000000..73cbe03280c --- /dev/null +++ b/pkg/utils/test/matchers/jq/jq_matcher_test.go @@ -0,0 +1,89 @@ +package jq_test + +import ( + "encoding/json" + "testing" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +func TestMatcher(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + + g.Expect(`{"a":1}`).Should( + jq.Match(`.a == 1`), + ) + + g.Expect(`{"a":1}`).Should( + Not( + jq.Match(`.a == 2`), + ), + ) + + g.Expect(`{"Values":[ "foo" ]}`).Should( + jq.Match(`.Values | if . then any(. == "foo") else false end`), + ) + + g.Expect(`{"Values":[ "foo" ]}`).Should( + Not( + jq.Match(`.Values | if . then any(. == "bar") else false end`), + ), + ) + + g.Expect(`{"Values": null}`).Should( + Not( + jq.Match(`.Values | if . then any(. == "foo") else false end`), + ), + ) + + g.Expect(`{ "status": { "foo": { "bar": "fr", "baz": "fb" } } }`).Should( + And( + jq.Match(`.status.foo.bar == "fr"`), + jq.Match(`.status.foo.baz == "fb"`), + ), + ) +} + +func TestMatcherWithType(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + + g.Expect(map[string]any{"a": 1}). + Should( + WithTransform(json.Marshal, jq.Match(`.a == 1`)), + ) + + g.Expect( + map[string]any{ + "status": map[string]any{ + "foo": map[string]any{ + "bar": "fr", + "baz": "fb", + }, + }, + }). + Should( + WithTransform(json.Marshal, And( + jq.Match(`.status.foo.bar == "fr"`), + jq.Match(`.status.foo.baz == "fb"`), + )), + ) + + g.Expect(map[string]any{"a": 1}). + Should(jq.Match(`.a == 1`)) + + g.Expect( + struct { + A int `json:"a"` + }{ + A: 1, + }). + Should( + WithTransform(json.Marshal, jq.Match(`.a == 1`)), + ) +} diff --git a/pkg/utils/test/matchers/jq/jq_support.go b/pkg/utils/test/matchers/jq/jq_support.go new file mode 100644 index 00000000000..3916cb9bac5 --- /dev/null +++ b/pkg/utils/test/matchers/jq/jq_support.go @@ -0,0 +1,129 @@ +package jq + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "reflect" + "strings" + + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/gbytes" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func formattedMessage(comparisonMessage string, failurePath []interface{}) string { + diffMessage := "" + + if len(failurePath) != 0 { + diffMessage = "\n\nfirst mismatched key: " + formattedFailurePath(failurePath) + } + + return comparisonMessage + diffMessage +} + +func formattedFailurePath(failurePath []interface{}) string { + formattedPaths := make([]string, 0) + + for i := len(failurePath) - 1; i >= 0; i-- { + switch p := failurePath[i].(type) { + case int: + val := fmt.Sprintf(`[%d]`, p) + formattedPaths = append(formattedPaths, val) + default: + if i != len(failurePath)-1 { + formattedPaths = append(formattedPaths, ".") + } + + val := fmt.Sprintf(`"%s"`, p) + formattedPaths = append(formattedPaths, val) + } + } + + return strings.Join(formattedPaths, "") +} + +//nolint:cyclop +func toType(in any) (any, error) { + switch v := in.(type) { + case string: + d, err := byteToType([]byte(v)) + if err != nil { + return nil, err + } + + return d, nil + case []byte: + d, err := byteToType(v) + if err != nil { + return nil, err + } + + return d, nil + case json.RawMessage: + d, err := byteToType(v) + if err != nil { + return nil, err + } + + return d, nil + case *gbytes.Buffer: + d, err := byteToType(v.Contents()) + if err != nil { + return nil, err + } + + return d, nil + case io.Reader: + data, err := io.ReadAll(v) + if err != nil { + return nil, fmt.Errorf("failed to read from reader: %w", err) + } + + d, err := byteToType(data) + if err != nil { + return nil, err + } + + return d, nil + case unstructured.Unstructured: + return v.Object, nil + case *unstructured.Unstructured: + return v.Object, nil + } + + switch reflect.TypeOf(in).Kind() { + case reflect.Map: + return in, nil + case reflect.Slice: + return in, nil + default: + return nil, fmt.Errorf("unsuported type:\n%s", format.Object(in, 1)) + } +} + +func byteToType(in []byte) (any, error) { + if len(in) == 0 { + return nil, errors.New("a valid Json document is expected") + } + + switch in[0] { + case '{': + data := make(map[string]any) + if err := json.Unmarshal(in, &data); err != nil { + return nil, fmt.Errorf("unable to unmarshal result, %w", err) + } + + return data, nil + case '[': + var data []any + if err := json.Unmarshal(in, &data); err != nil { + return nil, fmt.Errorf("unable to unmarshal result, %w", err) + } + + return data, nil + default: + return nil, errors.New("a Json Array or Object is required") + } +} diff --git a/pkg/utils/test/matchers/jq/jq_support_test.go b/pkg/utils/test/matchers/jq/jq_support_test.go new file mode 100644 index 00000000000..c314775089a --- /dev/null +++ b/pkg/utils/test/matchers/jq/jq_support_test.go @@ -0,0 +1,54 @@ +//nolint:testpackage +package jq + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/onsi/gomega/gbytes" + + . "github.com/onsi/gomega" +) + +func TestToType(t *testing.T) { + t.Parallel() + + typeTestData := []byte(`{ "foo": "bar" }`) + g := NewWithT(t) + + items := map[string]func() any{ + "gbytes": func() any { + b := gbytes.NewBuffer() + + _, err := b.Write(typeTestData) + g.Expect(err).ShouldNot(HaveOccurred()) + + return b + }, + "bytes": func() any { + return typeTestData + }, + "string": func() any { + return string(typeTestData) + }, + "raw-message": func() any { + return json.RawMessage(typeTestData) + }, + } + + for name, fn := range items { + f := fn + + t.Run(name, func(t *testing.T) { + t.Parallel() + + tt, err := toType(f()) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(tt).Should(Satisfy(func(in any) bool { + return reflect.TypeOf(in).Kind() == reflect.Map + })) + }) + } +} diff --git a/pkg/utils/test/matchers/jq/jq_transform.go b/pkg/utils/test/matchers/jq/jq_transform.go new file mode 100644 index 00000000000..341855c919c --- /dev/null +++ b/pkg/utils/test/matchers/jq/jq_transform.go @@ -0,0 +1,46 @@ +package jq + +import ( + "fmt" + + "github.com/itchyny/gojq" +) + +func Extract(expression string) func(in any) (any, error) { + return func(in any) (any, error) { + return ExtractValue[any](in, expression) + } +} + +func ExtractValue[T any](in any, expression string) (T, error) { + var result T + var ok bool + + query, err := gojq.Parse(expression) + if err != nil { + return result, fmt.Errorf("unable to parse expression %s, %w", expression, err) + } + + data, err := toType(in) + if err != nil { + return result, err + } + + it := query.Run(data) + + v, ok := it.Next() + if !ok { + return result, nil + } + + if err, ok := v.(error); ok { + return result, err + } + + result, ok = v.(T) + if !ok { + return result, fmt.Errorf("result value is not of the expected type (expected:%T, got:%T", result, v) + } + + return result, nil +} diff --git a/pkg/utils/test/matchers/jq/jq_transform_test.go b/pkg/utils/test/matchers/jq/jq_transform_test.go new file mode 100644 index 00000000000..6d020ed8a63 --- /dev/null +++ b/pkg/utils/test/matchers/jq/jq_transform_test.go @@ -0,0 +1,60 @@ +package jq_test + +import ( + "encoding/json" + "testing" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +func TestExtract(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + + g.Expect(`{ "foo": { "a": 1 }}`).Should( + WithTransform(jq.Extract(`.foo`), WithTransform(json.Marshal, + jq.Match(`.a == 1`), + )), + ) + + g.Expect(`{ "status": { "foo": { "bar": "fr", "baz": "fz" } } }`).Should( + WithTransform(jq.Extract(`.status`), + And( + jq.Match(`.foo.bar == "fr"`), + jq.Match(`.foo.baz == "fz"`), + ), + ), + ) +} + +func TestExtractValue(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + + transform1 := func(in string) (any, error) { + return jq.ExtractValue[any](in, `.foo`) + } + + g.Expect(`{ "foo": { "a": 1 }}`).Should( + WithTransform(transform1, WithTransform(json.Marshal, + jq.Match(`.a == 1`), + )), + ) + + transform2 := func(in string) (any, error) { + return jq.ExtractValue[any](in, `.status`) + } + + g.Expect(`{ "status": { "foo": { "bar": "fr", "baz": "fz" } } }`).Should( + WithTransform(transform2, + And( + jq.Match(`.foo.bar == "fr"`), + jq.Match(`.foo.baz == "fz"`), + ), + ), + ) +} diff --git a/pkg/utils/test/matchers/matechers.go b/pkg/utils/test/matchers/matechers.go new file mode 100644 index 00000000000..59f2001b7db --- /dev/null +++ b/pkg/utils/test/matchers/matechers.go @@ -0,0 +1,19 @@ +package matchers + +import ( + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/types" +) + +func ExtractStatusCondition(conditionType string) func(in types.ResourceObject) metav1.Condition { + return func(in types.ResourceObject) metav1.Condition { + c := meta.FindStatusCondition(in.GetStatus().Conditions, conditionType) + if c == nil { + return metav1.Condition{} + } + + return *c + } +} diff --git a/pkg/utils/test/testf/testf.go b/pkg/utils/test/testf/testf.go new file mode 100644 index 00000000000..98bdc570084 --- /dev/null +++ b/pkg/utils/test/testf/testf.go @@ -0,0 +1,186 @@ +package testf + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/onsi/gomega" + operatorv1 "github.com/openshift/api/operator/v1" + routev1 "github.com/openshift/api/route/v1" + ofapi "github.com/operator-framework/api/pkg/operators/v1alpha1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrlcli "sigs.k8s.io/controller-runtime/pkg/client" + ctrlcfg "sigs.k8s.io/controller-runtime/pkg/client/config" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + odhcli "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" +) + +const ( + DefaultPollInterval = 1 * time.Second + DefaultTimeout = 2 * time.Minute +) + +var ( + DefaultAddToSchemes = []func(*runtime.Scheme) error{ + clientgoscheme.AddToScheme, + routev1.AddToScheme, + apiextv1.AddToScheme, + dsciv1.AddToScheme, + dscv1.AddToScheme, + featurev1.AddToScheme, + monitoringv1.AddToScheme, + ofapi.AddToScheme, + operatorv1.AddToScheme, + componentApi.AddToScheme, + } +) + +type testContextOpts struct { + ctx context.Context + cfg *rest.Config + client *odhcli.Client + scheme *runtime.Scheme + withTOpts []WithTOpts +} + +type TestContextOpt func(testContext *testContextOpts) + +func WithClient(value *odhcli.Client) TestContextOpt { + return func(tc *testContextOpts) { + tc.client = value + } +} + +func WithRestConfig(value *rest.Config) TestContextOpt { + return func(tc *testContextOpts) { + tc.cfg = value + } +} + +func WithScheme(value *runtime.Scheme) TestContextOpt { + return func(tc *testContextOpts) { + tc.scheme = value + } +} + +//nolint:fatcontext +func WitContext(value context.Context) TestContextOpt { + return func(tc *testContextOpts) { + tc.ctx = value + } +} + +func WithTOptions(opts ...WithTOpts) TestContextOpt { + return func(tc *testContextOpts) { + tc.withTOpts = append(tc.withTOpts, opts...) + } +} + +func NewTestContext(opts ...TestContextOpt) (*TestContext, error) { + tco := testContextOpts{} + for _, opt := range opts { + opt(&tco) + } + + tc := TestContext{ + ctx: tco.ctx, + scheme: tco.scheme, + client: tco.client, + withTOpts: tco.withTOpts, + } + + if tc.ctx == nil { + tc.ctx = context.Background() + } + + if tc.scheme == nil { + tc.scheme = runtime.NewScheme() + for _, at := range DefaultAddToSchemes { + if err := at(tc.scheme); err != nil { + return nil, err + } + } + } + + if tc.client == nil { + clientCfg := tco.cfg + if clientCfg == nil { + cfg, err := ctrlcfg.GetConfig() + if err != nil { + return nil, fmt.Errorf("error creating the config object %w", err) + } + + clientCfg = cfg + } + + ctrlCli, err := ctrlcli.New(clientCfg, ctrlcli.Options{Scheme: tc.scheme}) + if err != nil { + return nil, fmt.Errorf("failed to initialize custom client: %w", err) + } + + odhCli, err := odhcli.NewFromConfig(clientCfg, ctrlCli) + if err != nil { + return nil, fmt.Errorf("failed to initialize odh client: %w", err) + } + + tc.client = odhCli + } + + return &tc, nil +} + +type TestContext struct { + ctx context.Context + client *odhcli.Client + scheme *runtime.Scheme + + withTOpts []WithTOpts +} + +func (tc *TestContext) Context() context.Context { + return tc.ctx +} + +func (tc *TestContext) Client() *odhcli.Client { + return tc.client +} + +func (tc *TestContext) Scheme() *runtime.Scheme { + return tc.client.Scheme() +} + +func (tc *TestContext) NewWithT(t *testing.T, opts ...WithTOpts) *WithT { + t.Helper() + + g := gomega.NewWithT(t) + g.SetDefaultEventuallyTimeout(DefaultTimeout) + g.SetDefaultEventuallyPollingInterval(DefaultPollInterval) + g.SetDefaultConsistentlyDuration(DefaultTimeout) + g.SetDefaultConsistentlyPollingInterval(DefaultPollInterval) + + answer := WithT{ + ctx: tc.ctx, + client: tc.client, + WithT: g, + } + + for _, opt := range tc.withTOpts { + opt(&answer) + } + + for _, opt := range opts { + opt(&answer) + } + + return &answer +} diff --git a/pkg/utils/test/testf/testf_assertions.go b/pkg/utils/test/testf/testf_assertions.go new file mode 100644 index 00000000000..5ff981c389f --- /dev/null +++ b/pkg/utils/test/testf/testf_assertions.go @@ -0,0 +1,167 @@ +package testf + +import ( + "context" + "errors" + "sync/atomic" + "time" + + "github.com/onsi/gomega" + "github.com/onsi/gomega/types" +) + +type Mode int + +const ( + eventually Mode = iota + consistently +) + +type EventuallyValue[T any] struct { + ctx context.Context + g *gomega.WithT + f func(context.Context) (T, error) +} + +func (e *EventuallyValue[T]) Get() (T, error) { + v, err := e.f(e.ctx) + + var pse gomega.PollingSignalError + if errors.As(err, &pse) { + if ue := errors.Unwrap(err); ue != nil { + err = ue + } + } + + return v, err +} + +func (e *EventuallyValue[T]) Eventually(args ...interface{}) *Assertion[T] { + return &Assertion[T]{ + ctx: e.ctx, + g: e.g, + f: e.f, + args: args, + m: eventually, + } +} + +func (e *EventuallyValue[T]) Consistently(args ...interface{}) *Assertion[T] { + return &Assertion[T]{ + ctx: e.ctx, + g: e.g, + f: e.f, + args: args, + m: consistently, + } +} + +type Assertion[T any] struct { + ctx context.Context + g *gomega.WithT + f func(context.Context) (T, error) + args []interface{} + + m Mode + + timeout time.Duration + polling time.Duration +} + +func (a *Assertion[T]) WithTimeout(interval time.Duration) *Assertion[T] { + a.timeout = interval + return a +} + +func (a *Assertion[T]) WithPolling(interval time.Duration) *Assertion[T] { + a.polling = interval + return a +} + +func (a *Assertion[T]) WithContext(ctx context.Context) *Assertion[T] { + a.ctx = ctx + return a +} + +func (a *Assertion[T]) build(f func(ctx context.Context) (T, error)) gomega.AsyncAssertion { + var aa gomega.AsyncAssertion + + switch a.m { + case eventually: + aa = a.g.Eventually(f, a.args...) + case consistently: + aa = a.g.Consistently(f, a.args...) + default: + panic("unsupported mode") + } + + aa = aa.WithContext(a.ctx) + + if a.timeout > 0 { + aa = aa.WithTimeout(a.timeout) + } + if a.polling > 0 { + aa = aa.WithPolling(a.polling) + } + + return aa +} + +func (a *Assertion[T]) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) T { + var res atomic.Value + + wrapper := func(ctx context.Context) (T, error) { + v, err := a.f(ctx) + res.Store(v) + + return v, err + } + + a.build(wrapper).Should(matcher, optionalDescription...) + + //nolint:forcetypeassert,errcheck + return res.Load().(T) +} + +func (a *Assertion[T]) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) T { + var res atomic.Value + + wrapper := func(ctx context.Context) (T, error) { + v, err := a.f(ctx) + res.Store(v) + + return v, err + } + + a.build(wrapper).ShouldNot(matcher, optionalDescription...) + + //nolint:forcetypeassert,errcheck + return res.Load().(T) +} + +type EventuallyErr struct { + ctx context.Context + g *gomega.WithT + f func(context.Context) error +} + +func (e *EventuallyErr) Get() error { + err := e.f(e.ctx) + + var pse gomega.PollingSignalError + if errors.As(err, &pse) { + if ue := errors.Unwrap(err); ue != nil { + err = ue + } + } + + return err +} + +func (e *EventuallyErr) Eventually() types.AsyncAssertion { + return e.g.Eventually(e.ctx, e.f).WithContext(e.ctx) +} + +func (e *EventuallyErr) Consistently() types.AsyncAssertion { + return e.g.Consistently(e.ctx, e.f) +} diff --git a/pkg/utils/test/testf/testf_support.go b/pkg/utils/test/testf/testf_support.go new file mode 100644 index 00000000000..f2d560f1bad --- /dev/null +++ b/pkg/utils/test/testf/testf_support.go @@ -0,0 +1,105 @@ +package testf + +import ( + "fmt" + + "github.com/itchyny/gojq" + "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// StopErr stops the retry process with a specified message and wraps the provided error. +// +// This function leverages Gomega's StopTrying function to signal an end to retrying operations +// when a condition is not satisfied or an error occurs. It enhances the error output +// by wrapping the original error (if any) with the provided message. +// +// Parameters: +// - err: An error to wrap. +// - message: A string message that describes the reason for stopping retries. +// +// Returns: +// +// An error that combines the stopping message and the wrapped error. +// +// Example usage: +// +// err := someOperation() +// if err != nil { +// return StopErr(err, "Operation failed") +// } +func StopErr(err error, format string, args ...any) error { + msg := format + if len(args) != 0 { + msg = fmt.Sprintf(format, args...) + } + + return gomega.StopTrying(msg).Wrap(err) +} + +// TransformFn defines a function type that takes an *unstructured.Unstructured object +// and applies a transformation to it. The function returns an error if the transformation fails. +type TransformFn func(obj *unstructured.Unstructured) error + +// TransformPipeline constructs a composite TransformFn from a series of TransformFn steps. +// It returns a single TransformFn that applies each step sequentially to the given object. +// +// If any step returns an error, the pipeline terminates immediately and returns that error. +// If all steps succeed, the pipeline returns nil. +func TransformPipeline(steps ...TransformFn) TransformFn { + return func(obj *unstructured.Unstructured) error { + for _, step := range steps { + err := step(obj) + if err != nil { + return err + } + } + + return nil + } +} + +// Transform creates a transformation function that applies a JQ-like query expression to an +// unstructured Kubernetes object (`unstructured.Unstructured`), allowing dynamic field extraction, +// modification, or replacement of the object's content. +// +// This function generates a transformation function by formatting a query string using +// the provided format and arguments. The returned function can be applied to an +// `*unstructured.Unstructured` object, which will be updated based on the result of the query. +// +// Parameters: +// - format: A format string for building a JQ-like query expression. +// - args: Variadic arguments to populate placeholders in the format string. +// +// Returns: +// - func(*unstructured.Unstructured) error: A function that applies the formatted query to +// the provided `*unstructured.Unstructured` object and updates its content. +func Transform(format string, args ...any) TransformFn { + expression := fmt.Sprintf(format, args...) + + return func(in *unstructured.Unstructured) error { + query, err := gojq.Parse(expression) + if err != nil { + return fmt.Errorf("unable to parse expression %q: %w", expression, err) + } + + result, ok := query.Run(in.Object).Next() + if !ok || result == nil { + // No results or nil result, nothing to update + return nil + } + + if err, ok := result.(error); ok { + return fmt.Errorf("query execution error: %w", err) + } + + uc, ok := result.(map[string]interface{}) + if !ok { + return fmt.Errorf("expected map[string]interface{}, got %T", result) + } + + in.SetUnstructuredContent(uc) + + return nil + } +} diff --git a/pkg/utils/test/testf/testf_support_test.go b/pkg/utils/test/testf/testf_support_test.go new file mode 100644 index 00000000000..8e4928f8c27 --- /dev/null +++ b/pkg/utils/test/testf/testf_support_test.go @@ -0,0 +1,132 @@ +package testf_test + +import ( + "errors" + "testing" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/testf" + + . "github.com/onsi/gomega" +) + +func TestTransform(t *testing.T) { + g := NewWithT(t) + + t.Run("Change Value of Nested Field", func(t *testing.T) { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "Example", + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "key1": "value1", + "key2": "value2", + }, + }, + }, + } + + const expression = `.metadata.annotations.key1 |= "new-value"` + + err := testf.Transform(expression)(obj) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(obj.Object).Should(And( + jq.Match(`.kind == "Example"`), + jq.Match(`.metadata.annotations.key1 == "new-value"`), + jq.Match(`.metadata.annotations.key2 == "value2"`), + )) + }) + + t.Run("Invalid JQ Expression", func(t *testing.T) { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "Example", + "data": "value", + }, + } + + const expression = "~~~invalid-expression" + + err := testf.Transform(expression)(obj) + + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("unable to parse expression")) + }) + + t.Run("Query Result Is Not Map", func(t *testing.T) { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "Example", + "data": []string{"value1", "value2"}, + }, + } + + const expression = ".data" + + err := testf.Transform(expression)(obj) + + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("expected map[string]interface{}")) + }) + + t.Run("Empty Query Result", func(t *testing.T) { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "Example", + "data": map[string]interface{}{ + "name": "value", + }, + }, + } + + const expression = ".nonexistent" + + err := testf.Transform(expression)(obj) + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(obj.Object).Should(And( + jq.Match(`.kind == "Example"`), + jq.Match(`.data.name == "value"`), + )) + }) +} + +func TestTransformPipeline(t *testing.T) { + g := NewWithT(t) + + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "example", + }, + }, + } + + step1 := func(obj *unstructured.Unstructured) error { + obj.SetName("transformed-example") + return nil + } + + step2 := func(obj *unstructured.Unstructured) error { + obj.Object["status"] = "active" + return nil + } + + step3 := func(obj *unstructured.Unstructured) error { + if obj.GetName() == "" { + return errors.New("name cannot be empty") + } + return nil + } + + pipeline := testf.TransformPipeline(step1, step2, step3) + + err := pipeline(obj) + + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(obj.GetName()).To(Equal("transformed-example")) + g.Expect(obj.Object["status"]).To(Equal("active")) +} diff --git a/pkg/utils/test/testf/testf_witht.go b/pkg/utils/test/testf/testf_witht.go new file mode 100644 index 00000000000..585574455ed --- /dev/null +++ b/pkg/utils/test/testf/testf_witht.go @@ -0,0 +1,171 @@ +package testf + +import ( + "context" + "time" + + "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + odhClient "github.com/opendatahub-io/opendatahub-operator/v2/pkg/controller/client" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" +) + +type WithTOpts func(*WithT) + +func WithEventuallyTimeout(value time.Duration) WithTOpts { + return func(g *WithT) { + g.SetDefaultEventuallyTimeout(value) + } +} + +func WithEventuallyPollingInterval(value time.Duration) WithTOpts { + return func(g *WithT) { + g.SetDefaultEventuallyPollingInterval(value) + } +} + +func WithConsistentlyDuration(value time.Duration) WithTOpts { + return func(g *WithT) { + g.SetDefaultConsistentlyDuration(value) + } +} + +func WithConsistentlyPollingInterval(value time.Duration) WithTOpts { + return func(g *WithT) { + g.SetDefaultConsistentlyPollingInterval(value) + } +} + +type WithT struct { + ctx context.Context + client *odhClient.Client + + *gomega.WithT +} + +func (t *WithT) Context() context.Context { + return t.ctx +} + +func (t *WithT) Client() *odhClient.Client { + return t.client +} + +func (t *WithT) List( + gvk schema.GroupVersionKind, + option ...client.ListOption, +) *EventuallyValue[[]unstructured.Unstructured] { + return &EventuallyValue[[]unstructured.Unstructured]{ + ctx: t.Context(), + g: t.WithT, + f: func(ctx context.Context) ([]unstructured.Unstructured, error) { + items := unstructured.UnstructuredList{} + items.SetGroupVersionKind(gvk) + + err := t.Client().List(ctx, &items, option...) + if err != nil { + return nil, StopErr(err, "failed to list resource: %s", gvk) + } + + return items.Items, nil + }, + } +} + +func (t *WithT) Get( + gvk schema.GroupVersionKind, + nn types.NamespacedName, + option ...client.GetOption, +) *EventuallyValue[*unstructured.Unstructured] { + return &EventuallyValue[*unstructured.Unstructured]{ + ctx: t.Context(), + g: t.WithT, + f: func(ctx context.Context) (*unstructured.Unstructured, error) { + u := unstructured.Unstructured{} + u.SetGroupVersionKind(gvk) + + err := t.Client().Get(ctx, nn, &u, option...) + switch { + case errors.IsNotFound(err): + return nil, nil + case err != nil: + return nil, StopErr(err, "failed to get resource: %s, nn: %s", gvk, nn.String()) + default: + return &u, nil + } + }, + } +} + +func (t *WithT) Delete( + gvk schema.GroupVersionKind, + nn types.NamespacedName, + option ...client.DeleteOption, +) *EventuallyErr { + return &EventuallyErr{ + ctx: t.Context(), + g: t.WithT, + f: func(ctx context.Context) error { + u := resources.GvkToUnstructured(gvk) + u.SetName(nn.Name) + u.SetNamespace(nn.Namespace) + + err := t.Client().Delete(ctx, u, option...) + switch { + case errors.IsNotFound(err): + return nil + case err != nil: + return StopErr(err, "failed to delete resource: %s, nn: %s", gvk, nn.String()) + default: + return nil + } + }, + } +} + +func (t *WithT) Update( + gvk schema.GroupVersionKind, + nn types.NamespacedName, + fn func(obj *unstructured.Unstructured) error, + option ...client.UpdateOption, +) *EventuallyValue[*unstructured.Unstructured] { + return &EventuallyValue[*unstructured.Unstructured]{ + ctx: t.Context(), + g: t.WithT, + f: func(ctx context.Context) (*unstructured.Unstructured, error) { + obj := resources.GvkToUnstructured(gvk) + + err := t.Client().Get(ctx, nn, obj) + switch { + case errors.IsNotFound(err): + return nil, nil + case err != nil: + return nil, StopErr(err, "failed to get resource: %s, nn: %s", gvk, nn.String()) + } + + in, err := resources.ToUnstructured(obj) + if err != nil { + return nil, StopErr(err, "failed to convert to unstructured") + } + + if err := fn(in); err != nil { + return nil, StopErr(err, "failed to apply function") + } + + err = t.Client().Update(ctx, in, option...) + switch { + case errors.IsForbidden(err): + return nil, StopErr(err, "failed to update resource: %s, nn: %s", gvk, nn.String()) + case err != nil: + return nil, err + default: + return in, nil + } + }, + } +} diff --git a/pkg/utils/test/testf/testf_witht_test.go b/pkg/utils/test/testf/testf_witht_test.go new file mode 100644 index 00000000000..60ab8629c4c --- /dev/null +++ b/pkg/utils/test/testf/testf_witht_test.go @@ -0,0 +1,237 @@ +package testf_test + +import ( + "testing" + "time" + + "github.com/rs/xid" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/fakeclient" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/testf" + + . "github.com/onsi/gomega" +) + +func TestGet(t *testing.T) { + g := NewWithT(t) + + cm := corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.ConfigMap.GroupVersion().String(), + Kind: gvk.ConfigMap.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: xid.New().String(), + }, + } + + cl, err := fakeclient.New(&cm) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(cl).ShouldNot(BeNil()) + + tc, err := testf.NewTestContext(testf.WithClient(cl)) + g.Expect(err).ShouldNot(HaveOccurred()) + + key := client.ObjectKeyFromObject(&cm) + + matchMetadata := And( + jq.Match(`.metadata.namespace == "%s"`, cm.Namespace), + jq.Match(`.metadata.name == "%s"`, cm.Name), + ) + + t.Run("Get", func(t *testing.T) { + wt := tc.NewWithT(t) + + v, err := wt.Get(gvk.ConfigMap, key).Get() + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(v).Should(matchMetadata) + }) + + t.Run("Eventually", func(t *testing.T) { + wt := tc.NewWithT(t) + + v := wt.Get(gvk.ConfigMap, key).Eventually().Should(matchMetadata) + g.Expect(v).ShouldNot(BeNil()) + }) + + t.Run("Consistently", func(t *testing.T) { + wt := tc.NewWithT(t) + + v := wt.Get(gvk.ConfigMap, key).Consistently().WithTimeout(1 * time.Second).Should(matchMetadata) + g.Expect(v).ShouldNot(BeNil()) + }) +} + +func TestList(t *testing.T) { + g := NewWithT(t) + + cm := corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.ConfigMap.GroupVersion().String(), + Kind: gvk.ConfigMap.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: xid.New().String(), + }, + } + + cl, err := fakeclient.New(&cm) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(cl).ShouldNot(BeNil()) + + tc, err := testf.NewTestContext(testf.WithClient(cl)) + g.Expect(err).ShouldNot(HaveOccurred()) + + matchMetadata := And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.namespace == "%s"`, cm.Namespace), + jq.Match(`.metadata.name == "%s"`, cm.Name), + )), + ) + + t.Run("Get", func(t *testing.T) { + wt := tc.NewWithT(t) + + v, err := wt.List(gvk.ConfigMap).Get() + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(v).Should(matchMetadata) + }) + + t.Run("Eventually", func(t *testing.T) { + wt := tc.NewWithT(t) + + v := wt.List(gvk.ConfigMap).Eventually().Should(matchMetadata) + g.Expect(v).ShouldNot(BeNil()) + }) + + t.Run("Consistently", func(t *testing.T) { + wt := tc.NewWithT(t) + + v := wt.List(gvk.ConfigMap).Consistently().WithTimeout(1 * time.Second).Should(matchMetadata) + g.Expect(v).ShouldNot(BeNil()) + }) +} + +func TestUpdate(t *testing.T) { + g := NewWithT(t) + + cm := corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.ConfigMap.GroupVersion().String(), + Kind: gvk.ConfigMap.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: xid.New().String(), + }, + } + + cl, err := fakeclient.New(&cm) + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(cl).ShouldNot(BeNil()) + + tc, err := testf.NewTestContext(testf.WithClient(cl)) + g.Expect(err).ShouldNot(HaveOccurred()) + + matchMetadataAndData := And( + jq.Match(`.metadata.namespace == "%s"`, cm.Namespace), + jq.Match(`.metadata.name == "%s"`, cm.Name), + jq.Match(`.data.foo == "%s"`, cm.Name), + ) + + key := client.ObjectKeyFromObject(&cm) + transformer := testf.Transform(`.data.foo = "%s"`, cm.Name) + + t.Run("Get", func(t *testing.T) { + wt := tc.NewWithT(t) + + v, err := wt.Update(gvk.ConfigMap, key, transformer).Get() + + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(v).Should(matchMetadataAndData) + }) + + t.Run("Eventually", func(t *testing.T) { + wt := tc.NewWithT(t) + + v := wt.Update(gvk.ConfigMap, key, transformer).Eventually().Should(matchMetadataAndData) + g.Expect(v).Should(matchMetadataAndData) + }) + + t.Run("Consistently", func(t *testing.T) { + wt := tc.NewWithT(t) + + v := wt.Update(gvk.ConfigMap, key, transformer).Consistently().WithTimeout(1 * time.Second).Should(matchMetadataAndData) + g.Expect(v).Should(matchMetadataAndData) + }) +} + +func TestDelete(t *testing.T) { + g := NewWithT(t) + + cm := corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: gvk.ConfigMap.GroupVersion().String(), + Kind: gvk.ConfigMap.Kind, + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: xid.New().String(), + }, + } + + cl, err := fakeclient.New() + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(cl).ShouldNot(BeNil()) + + tc, err := testf.NewTestContext(testf.WithClient(cl)) + g.Expect(err).ShouldNot(HaveOccurred()) + + key := client.ObjectKeyFromObject(&cm) + + t.Run("Get", func(t *testing.T) { + wt := tc.NewWithT(t) + + err := wt.Client().Create(wt.Context(), cm.DeepCopy()) + g.Expect(err).ShouldNot(HaveOccurred()) + + err = wt.Delete(gvk.ConfigMap, key).Get() + g.Expect(err).ShouldNot(HaveOccurred()) + + wt.List(gvk.ConfigMap).Eventually().Should(BeEmpty()) + }) + + t.Run("Eventually", func(t *testing.T) { + wt := tc.NewWithT(t) + + err := wt.Client().Create(wt.Context(), cm.DeepCopy()) + g.Expect(err).ShouldNot(HaveOccurred()) + + ok := wt.Delete(gvk.ConfigMap, key).Eventually().Should(Succeed()) + g.Expect(ok).Should(BeTrue()) + + wt.List(gvk.ConfigMap).Eventually().Should(BeEmpty()) + }) + + t.Run("Consistently", func(t *testing.T) { + wt := tc.NewWithT(t) + + err := wt.Client().Create(wt.Context(), cm.DeepCopy()) + g.Expect(err).ShouldNot(HaveOccurred()) + + ok := wt.Delete(gvk.ConfigMap, key).Consistently().WithTimeout(1 * time.Second).Should(Succeed()) + g.Expect(ok).Should(BeTrue()) + + wt.List(gvk.ConfigMap).Eventually().Should(BeEmpty()) + }) +} diff --git a/tests/e2e/authcontroller_test.go b/tests/e2e/authcontroller_test.go new file mode 100644 index 00000000000..0850cab414f --- /dev/null +++ b/tests/e2e/authcontroller_test.go @@ -0,0 +1,189 @@ +package e2e_test + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/types" + + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" + + . "github.com/onsi/gomega" +) + +type AuthControllerTestCtx struct { + *testContext + testAuthInstance serviceApi.Auth +} + +func authControllerTestSuite(t *testing.T) { + t.Helper() + + tc, err := NewTestContext() + require.NoError(t, err) + + authServiceCtx := AuthControllerTestCtx{ + testContext: tc, + } + + t.Run(tc.testDsc.Name, func(t *testing.T) { + t.Run("Auto creation of Auth CR", func(t *testing.T) { + err = authServiceCtx.validateAuthCRCreation() + require.NoError(t, err, "error getting Auth CR") + }) + t.Run("Test Auth CR content", func(t *testing.T) { + err = authServiceCtx.validateAuthCRDefaultContent() + require.NoError(t, err, "unexpected content in Auth CR") + }) + t.Run("Test role creation", func(t *testing.T) { + err = authServiceCtx.validateAuthCRRoleCreation() + require.NoError(t, err, "error getting created roles") + }) + t.Run("Test rolebinding creation", func(t *testing.T) { + err = authServiceCtx.validateAuthCRRoleBindingCreation() + require.NoError(t, err, "error getting created rolebindings") + }) + t.Run("Test rolebinding is added when group is added", func(t *testing.T) { + err = authServiceCtx.validateAddingGroups() + require.NoError(t, err, "error getting created rolebindings") + }) + t.Run("Test clusterrole is added when group is added", func(t *testing.T) { + err = authServiceCtx.validateAuthCRClusterRoleCreation() + require.NoError(t, err, "error getting created rolebindings") + }) + t.Run("Test clusterrolebinding is added when group is added", func(t *testing.T) { + err = authServiceCtx.validateAuthCRClusterRoleBindingCreation() + require.NoError(t, err, "error getting created rolebindings") + }) + }) +} + +func (tc *AuthControllerTestCtx) WithT(t *testing.T) *WithT { + t.Helper() + + g := NewWithT(t) + g.SetDefaultEventuallyTimeout(generalWaitTimeout) + g.SetDefaultEventuallyPollingInterval(1 * time.Second) + + return g +} + +func (tc *AuthControllerTestCtx) validateAuthCRCreation() error { + authList := &serviceApi.AuthList{} + if err := tc.testContext.customClient.List(tc.ctx, authList); err != nil { + return fmt.Errorf("unable to find Auth CR instance: %w", err) + } + + switch { + case len(authList.Items) == 1: + tc.testAuthInstance = authList.Items[0] + return nil + case len(authList.Items) > 1: + return fmt.Errorf("only one Auth CR expected, found %v", len(authList.Items)) + default: + return nil + } +} + +func (tc *AuthControllerTestCtx) validateAuthCRDefaultContent() error { + if len(tc.testAuthInstance.Spec.AdminGroups) == 0 { + return errors.New("AdminGroups is empty ") + } + + if tc.testAuthInstance.Spec.AdminGroups[0] != "odh-admins" { + return fmt.Errorf("expected odh-admins, found %v", tc.testAuthInstance.Spec.AdminGroups[0]) + } + + if tc.testAuthInstance.Spec.AllowedGroups[0] != "system:authenticated" { + return fmt.Errorf("expected system:authenticated, found %v", tc.testAuthInstance.Spec.AllowedGroups[0]) + } + + return nil +} + +func (tc *AuthControllerTestCtx) validateAuthCRRoleCreation() error { + adminRole := &rbacv1.Role{} + allowedRole := &rbacv1.Role{} + + if err := tc.testContext.customClient.Get(tc.ctx, types.NamespacedName{Namespace: "opendatahub", Name: "admingroup-role"}, adminRole); err != nil { + return err + } + + if err := tc.testContext.customClient.Get(tc.ctx, types.NamespacedName{Namespace: "opendatahub", Name: "allowedgroup-role"}, allowedRole); err != nil { + return err + } + + return nil +} + +func (tc *AuthControllerTestCtx) validateAuthCRClusterRoleCreation() error { + adminClusterRole := &rbacv1.ClusterRole{} + + if err := tc.testContext.customClient.Get(tc.ctx, types.NamespacedName{Namespace: "opendatahub", Name: "admingroupcluster-role"}, adminClusterRole); err != nil { + return err + } + + return nil +} + +func (tc *AuthControllerTestCtx) validateAuthCRRoleBindingCreation() error { + adminRolebinding := &rbacv1.RoleBinding{} + allowedRolebinding := &rbacv1.RoleBinding{} + + if err := tc.testContext.customClient.Get(tc.ctx, types.NamespacedName{Namespace: "opendatahub", Name: "admingroup-rolebinding"}, adminRolebinding); err != nil { + return err + } + + if err := tc.testContext.customClient.Get(tc.ctx, types.NamespacedName{Namespace: "opendatahub", Name: "allowedgroup-rolebinding"}, allowedRolebinding); err != nil { + return err + } + + return nil +} + +func (tc *AuthControllerTestCtx) validateAuthCRClusterRoleBindingCreation() error { + adminClusterRolebinding := &rbacv1.ClusterRoleBinding{} + + if err := tc.testContext.customClient.Get(tc.ctx, types.NamespacedName{Namespace: "opendatahub", Name: "admingroupcluster-rolebinding"}, adminClusterRolebinding); err != nil { + return err + } + + return nil +} + +func (tc *AuthControllerTestCtx) validateAddingGroups() error { + tc.testAuthInstance.Spec.AdminGroups = append(tc.testAuthInstance.Spec.AdminGroups, "aTestAdminGroup") + tc.testAuthInstance.Spec.AllowedGroups = append(tc.testAuthInstance.Spec.AllowedGroups, "aTestAllowedGroup") + err := tc.customClient.Update(tc.ctx, &tc.testAuthInstance) + if err != nil { + fmt.Println("ERR: ", err) + } + + adminRolebinding := &rbacv1.RoleBinding{} + adminClusterRolebinding := &rbacv1.ClusterRoleBinding{} + allowedRolebinding := &rbacv1.RoleBinding{} + + if err := tc.testContext.customClient.Get(tc.ctx, types.NamespacedName{Namespace: "opendatahub", Name: "admingroup-rolebinding"}, adminRolebinding); err != nil { + if adminRolebinding.Subjects[1].Name != "aTestAdminGroup" { + return fmt.Errorf("Expected aTestAdminGroup found %s ", adminRolebinding.Subjects[1].Name) + } + } + + if err := tc.testContext.customClient.Get(tc.ctx, types.NamespacedName{Namespace: "opendatahub", Name: "admingroupcluster-rolebinding"}, adminClusterRolebinding); err != nil { + if adminRolebinding.Subjects[1].Name != "aTestAdminGroup" { + return fmt.Errorf("Expected aTestAdminGroup found %s ", adminRolebinding.Subjects[1].Name) + } + } + + if err := tc.testContext.customClient.Get(tc.ctx, types.NamespacedName{Namespace: "opendatahub", Name: "allowedgroup-rolebinding"}, allowedRolebinding); err != nil { + if allowedRolebinding.Subjects[1].Name != "aTestAllowedGroup" { + return fmt.Errorf("Expected aTestAllowedGroup found %s ", allowedRolebinding.Subjects[1].Name) + } + } + + return nil +} diff --git a/tests/e2e/codeflare_test.go b/tests/e2e/codeflare_test.go new file mode 100644 index 00000000000..26781260463 --- /dev/null +++ b/tests/e2e/codeflare_test.go @@ -0,0 +1,29 @@ +package e2e_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" +) + +func codeflareTestSuite(t *testing.T) { + t.Helper() + + ct, err := NewComponentTestCtx(&componentApi.CodeFlare{}) + require.NoError(t, err) + + componentCtx := CodeFlareTestCtx{ + ComponentTestCtx: ct, + } + + t.Run("Validate component enabled", componentCtx.ValidateComponentEnabled) + t.Run("Validate operands have OwnerReferences", componentCtx.ValidateOperandsOwnerReferences) + t.Run("Validate update operand resources", componentCtx.ValidateUpdateDeploymentsResources) + t.Run("Validate component disabled", componentCtx.ValidateComponentDisabled) +} + +type CodeFlareTestCtx struct { + *ComponentTestCtx +} diff --git a/tests/e2e/components_test.go b/tests/e2e/components_test.go new file mode 100644 index 00000000000..9ceac4c4f6f --- /dev/null +++ b/tests/e2e/components_test.go @@ -0,0 +1,280 @@ +package e2e_test + +import ( + "fmt" + "strings" + "testing" + "time" + + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/testf" + + . "github.com/onsi/gomega" +) + +type ComponentTestCtx struct { + *testf.TestContext + + GVK schema.GroupVersionKind + DSCName types.NamespacedName + DSCIName types.NamespacedName + ApplicationNamespace string +} + +func NewComponentTestCtx(object common.PlatformObject) (*ComponentTestCtx, error) { + tcf, err := testf.NewTestContext( + testf.WithTOptions( + testf.WithEventuallyTimeout(generalWaitTimeout), + testf.WithEventuallyPollingInterval(generalPollInterval), + ), + ) + + if err != nil { + return nil, err + } + + ogvk, err := resources.GetGroupVersionKindForObject(tcf.Scheme(), object) + if err != nil { + return nil, err + } + + dsciList := dsciv1.DSCInitializationList{} + if err := tcf.Client().List(tcf.Context(), &dsciList); err != nil { + return nil, err + } + + if len(dsciList.Items) != 1 { + return nil, fmt.Errorf("failure looking up DSCInitialization, expected=1, found=%d", len(dsciList.Items)) + } + + dscList := dscv1.DataScienceClusterList{} + if err := tcf.Client().List(tcf.Context(), &dscList); err != nil { + return nil, err + } + + if len(dscList.Items) != 1 { + return nil, fmt.Errorf("failure looking up DataScienceCluster, expected=1, found=%d", len(dscList.Items)) + } + + componentCtx := ComponentTestCtx{ + TestContext: tcf, + GVK: ogvk, + DSCName: client.ObjectKeyFromObject(&dscList.Items[0]), + DSCIName: client.ObjectKeyFromObject(&dsciList.Items[0]), + ApplicationNamespace: dsciList.Items[0].Spec.ApplicationsNamespace, + } + + return &componentCtx, nil +} + +func (c *ComponentTestCtx) ValidateComponentEnabled(t *testing.T) { + g := c.NewWithT(t) + + g.Update( + gvk.DataScienceCluster, + c.DSCName, + testf.Transform(`.spec.components.%s.managementState = "%s"`, strings.ToLower(c.GVK.Kind), operatorv1.Managed), + ).Eventually().Should( + jq.Match(`.spec.components.%s.managementState == "%s"`, strings.ToLower(c.GVK.Kind), operatorv1.Managed), + ) + + g.List(gvk.DataScienceCluster).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.spec.components.%s.managementState == "%s"`, strings.ToLower(c.GVK.Kind), operatorv1.Managed), + jq.Match(`.status.conditions[] | select(.type == "%sReady") | .status == "%s"`, c.GVK.Kind, metav1.ConditionTrue), + )), + )) + + g.List(c.GVK).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.ownerReferences[0].kind == "%s"`, gvk.DataScienceCluster.Kind), + jq.Match(`.status.conditions[] | select(.type == "Ready") | .status == "%s"`, metav1.ConditionTrue), + )), + )) +} + +func (c *ComponentTestCtx) ValidateOperandsOwnerReferences(t *testing.T) { + g := c.NewWithT(t) + + g.List( + gvk.Deployment, + client.InNamespace(c.ApplicationNamespace), + client.MatchingLabels{labels.PlatformPartOf: strings.ToLower(c.GVK.Kind)}, + ).Eventually().Should(And( + Not(BeEmpty()), + HaveEach( + jq.Match(`.metadata.ownerReferences[0].kind == "%s"`, c.GVK.Kind), + ), + )) +} + +func (c *ComponentTestCtx) ValidateUpdateDeploymentsResources(t *testing.T) { + g := c.NewWithT(t) + + deployments := g.List( + gvk.Deployment, + client.InNamespace(c.ApplicationNamespace), + client.MatchingLabels{ + labels.PlatformPartOf: strings.ToLower(c.GVK.Kind), + }, + ).Eventually().ShouldNot( + BeEmpty(), + ) + + for _, d := range deployments { + t.Run("deployment_"+d.GetName(), func(t *testing.T) { + replicas, err := jq.ExtractValue[int](d, `.spec.replicas`) + g.Expect(err).ShouldNot(HaveOccurred()) + + expectedReplica := replicas + 1 + if replicas > 1 { + expectedReplica = 1 + } + + g.Update( + gvk.Deployment, + client.ObjectKeyFromObject(&d), + testf.Transform(`.spec.replicas = %d`, expectedReplica), + ).Eventually().WithTimeout(30 * time.Second).WithPolling(1 * time.Second).Should( + jq.Match(`.spec.replicas == %d`, expectedReplica), + ) + + g.Get( + gvk.Deployment, + client.ObjectKeyFromObject(&d), + ).Eventually().Should( + jq.Match(`.spec.replicas == %d`, expectedReplica), + ) + + g.Get( + gvk.Deployment, + client.ObjectKeyFromObject(&d), + ).Consistently().WithTimeout(30 * time.Second).WithPolling(1 * time.Second).Should( + jq.Match(`.spec.replicas == %d`, expectedReplica), + ) + }) + } +} + +func (c *ComponentTestCtx) ValidateComponentDisabled(t *testing.T) { + g := c.NewWithT(t) + + g.List(c.GVK).Eventually().ShouldNot( + BeEmpty(), + ) + + g.Update( + gvk.DataScienceCluster, + c.DSCName, + testf.Transform(`.spec.components.%s.managementState = "%s"`, strings.ToLower(c.GVK.Kind), operatorv1.Removed), + ).Eventually().Should( + jq.Match(`.spec.components.%s.managementState == "%s"`, strings.ToLower(c.GVK.Kind), operatorv1.Removed), + ) + + g.List(c.GVK).Eventually().Should( + BeEmpty(), + ) + + g.List( + gvk.Deployment, + client.InNamespace(c.ApplicationNamespace), + client.MatchingLabels{ + labels.PlatformPartOf: strings.ToLower(c.GVK.Kind), + }, + ).Eventually().Should( + BeEmpty(), + ) + + g.List(gvk.DataScienceCluster).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.spec.components.%s.managementState == "%s"`, strings.ToLower(c.GVK.Kind), operatorv1.Removed), + jq.Match(`.status.conditions[] | select(.type == "%sReady") | .status == "%s"`, c.GVK.Kind, metav1.ConditionFalse), + )), + )) +} + +func (c *ComponentTestCtx) ValidateCRDReinstated(t *testing.T, name string) { + t.Helper() + + g := c.NewWithT(t) + crdSel := client.MatchingFields{"metadata.name": name} + + g.Update( + gvk.DataScienceCluster, + c.DSCName, + testf.Transform(`.spec.components.%s.managementState = "%s"`, strings.ToLower(c.GVK.Kind), operatorv1.Removed), + ).Eventually().Should( + jq.Match(`.spec.components.%s.managementState == "%s"`, strings.ToLower(c.GVK.Kind), operatorv1.Removed), + ) + + g.List(c.GVK).Eventually().Should( + BeEmpty(), + ) + g.List(gvk.CustomResourceDefinition, crdSel).Eventually().Should( + HaveLen(1), + ) + + g.Delete( + gvk.CustomResourceDefinition, + types.NamespacedName{Name: name}, + client.PropagationPolicy(metav1.DeletePropagationForeground), + ).Eventually().Should( + Succeed(), + ) + + g.List(gvk.CustomResourceDefinition, crdSel).Eventually().Should( + BeEmpty(), + ) + + g.Update( + gvk.DataScienceCluster, + c.DSCName, + testf.Transform(`.spec.components.%s.managementState = "%s"`, strings.ToLower(c.GVK.Kind), operatorv1.Managed), + ).Eventually().Should( + jq.Match(`.spec.components.%s.managementState == "%s"`, strings.ToLower(c.GVK.Kind), operatorv1.Managed), + ) + + g.List(c.GVK).Eventually().Should( + HaveLen(1), + ) + g.List(gvk.CustomResourceDefinition, crdSel).Eventually().Should( + HaveLen(1), + ) +} + +func (c *ComponentTestCtx) GetDSC() (*dscv1.DataScienceCluster, error) { + obj := dscv1.DataScienceCluster{} + + err := c.Client().Get(c.Context(), c.DSCName, &obj) + if err != nil { + return nil, err + } + + return &obj, nil +} + +func (c *ComponentTestCtx) GetDSCI() (*dsciv1.DSCInitialization, error) { + obj := dsciv1.DSCInitialization{} + + err := c.Client().Get(c.Context(), c.DSCIName, &obj) + if err != nil { + return nil, err + } + + return &obj, nil +} diff --git a/tests/e2e/controller_test.go b/tests/e2e/controller_test.go index 59bf3a325ce..e1e58279e22 100644 --- a/tests/e2e/controller_test.go +++ b/tests/e2e/controller_test.go @@ -5,12 +5,15 @@ import ( "flag" "fmt" "os" + "slices" + "strings" "testing" operatorv1 "github.com/openshift/api/operator/v1" routev1 "github.com/openshift/api/route/v1" ofapi "github.com/operator-framework/api/pkg/operators/v1alpha1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + "golang.org/x/exp/maps" autoscalingv1 "k8s.io/api/autoscaling/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" @@ -23,17 +26,57 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" ) +type TestFn func(t *testing.T) + var ( - opNamespace string - skipDeletion bool - scheme = runtime.NewScheme() + testOpts testContextConfig + Scheme = runtime.NewScheme() + + componentsTestSuites = map[string]TestFn{ + // do not add modelcontroller here, due to dependency, test it separately below + componentApi.DashboardComponentName: dashboardTestSuite, + componentApi.RayComponentName: rayTestSuite, + componentApi.ModelRegistryComponentName: modelRegistryTestSuite, + componentApi.TrustyAIComponentName: trustyAITestSuite, + componentApi.KueueComponentName: kueueTestSuite, + componentApi.TrainingOperatorComponentName: trainingOperatorTestSuite, + componentApi.DataSciencePipelinesComponentName: dataSciencePipelinesTestSuite, + componentApi.CodeFlareComponentName: codeflareTestSuite, + componentApi.WorkbenchesComponentName: workbenchesTestSuite, + componentApi.KserveComponentName: kserveTestSuite, + componentApi.ModelMeshServingComponentName: modelMeshServingTestSuite, + componentApi.ModelControllerComponentName: modelControllerTestSuite, + } ) +type arrayFlags []string + +func (i *arrayFlags) String() string { + return fmt.Sprintf("%v", *i) +} + +func (i *arrayFlags) Set(value string) error { + *i = append(*i, value) + return nil +} + +type testContextConfig struct { + operatorNamespace string + skipDeletion bool + + operatorControllerTest bool + webhookTest bool + components arrayFlags + authControllerTest bool +} + // Holds information specific to individual tests. type testContext struct { // Rest config @@ -53,6 +96,8 @@ type testContext struct { // context for accessing resources //nolint:containedctx //reason: legacy v1 test setup ctx context.Context + // test configuration + testOpts testContextConfig } func NewTestContext() (*testContext, error) { @@ -70,7 +115,7 @@ func NewTestContext() (*testContext, error) { } // custom client to manages resources like Route etc - custClient, err := client.New(config, client.Options{Scheme: scheme}) + custClient, err := client.New(config, client.Options{Scheme: Scheme}) if err != nil { return nil, fmt.Errorf("failed to initialize custom client: %w", err) } @@ -84,40 +129,63 @@ func NewTestContext() (*testContext, error) { cfg: config, kubeClient: kc, customClient: custClient, - operatorNamespace: opNamespace, + operatorNamespace: testOpts.operatorNamespace, applicationsNamespace: testDSCI.Spec.ApplicationsNamespace, ctx: context.TODO(), testDsc: testDSC, testDSCI: testDSCI, + testOpts: testOpts, }, nil } // TestOdhOperator sets up the testing suite for ODH Operator. func TestOdhOperator(t *testing.T) { - utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(routev1.AddToScheme(scheme)) - utilruntime.Must(apiextv1.AddToScheme(scheme)) - utilruntime.Must(autoscalingv1.AddToScheme(scheme)) - utilruntime.Must(dsciv1.AddToScheme(scheme)) - utilruntime.Must(dscv1.AddToScheme(scheme)) - utilruntime.Must(featurev1.AddToScheme(scheme)) - utilruntime.Must(monitoringv1.AddToScheme(scheme)) - utilruntime.Must(ofapi.AddToScheme(scheme)) - utilruntime.Must(operatorv1.AddToScheme(scheme)) + utilruntime.Must(clientgoscheme.AddToScheme(Scheme)) + utilruntime.Must(routev1.AddToScheme(Scheme)) + utilruntime.Must(apiextv1.AddToScheme(Scheme)) + utilruntime.Must(autoscalingv1.AddToScheme(Scheme)) + utilruntime.Must(dsciv1.AddToScheme(Scheme)) + utilruntime.Must(dscv1.AddToScheme(Scheme)) + utilruntime.Must(featurev1.AddToScheme(Scheme)) + utilruntime.Must(monitoringv1.AddToScheme(Scheme)) + utilruntime.Must(ofapi.AddToScheme(Scheme)) + utilruntime.Must(operatorv1.AddToScheme(Scheme)) + utilruntime.Must(componentApi.AddToScheme(Scheme)) + utilruntime.Must(serviceApi.AddToScheme(Scheme)) log.SetLogger(zap.New(zap.UseDevMode(true))) - // individual test suites after the operator is running - if !t.Run("validate operator pod is running", testODHOperatorValidation) { - return + if testOpts.operatorControllerTest { + // individual test suites after the operator is running + if !t.Run("validate operator pod is running", testODHOperatorValidation) { + return + } } + // Run create and delete tests for all the components - t.Run("create Opendatahub components", creationTestSuite) + t.Run("create DSCI and DSC CRs", creationTestSuite) + + t.Run("components", func(t *testing.T) { + for k, v := range componentsTestSuites { + if len(testOpts.components) != 0 && !slices.Contains(testOpts.components, k) { + t.Logf("Skipping tests for component %s", k) + continue + } + + t.Run(k, v) + } + }) + + if testOpts.authControllerTest { + t.Run("test auth controller", authControllerTestSuite) + } // Run deletion if skipDeletion is not set - if !skipDeletion { - // this is a negative test case, since by using the positive CM('true'), even CSV gets deleted which leaves no operator pod in prow - t.Run("components should not be removed if labeled is set to 'false' on configmap", cfgMapDeletionTestSuite) + if !testOpts.skipDeletion { + if testOpts.operatorControllerTest { + // this is a negative test case, since by using the positive CM('true'), even CSV gets deleted which leaves no operator pod in prow + t.Run("components should not be removed if labeled is set to 'false' on configmap", cfgMapDeletionTestSuite) + } t.Run("delete components", deletionTestSuite) } @@ -125,10 +193,25 @@ func TestOdhOperator(t *testing.T) { func TestMain(m *testing.M) { // call flag.Parse() here if TestMain uses flags - flag.StringVar(&opNamespace, "operator-namespace", - "opendatahub-operator-system", "Namespace where the odh operator is deployed") - flag.BoolVar(&skipDeletion, "skip-deletion", false, "skip deletion of the controllers") + flag.StringVar(&testOpts.operatorNamespace, "operator-namespace", "opendatahub-operator-system", "Namespace where the odh operator is deployed") + flag.BoolVar(&testOpts.skipDeletion, "skip-deletion", false, "skip deletion of the controllers") + + flag.BoolVar(&testOpts.operatorControllerTest, "test-operator-controller", true, "run operator controller tests") + flag.BoolVar(&testOpts.webhookTest, "test-webhook", true, "run webhook tests") + + componentNames := strings.Join(maps.Keys(componentsTestSuites), ", ") + flag.Var(&testOpts.components, "test-component", "run tests for the specified component. valid components names are: "+componentNames) + + flag.BoolVar(&testOpts.authControllerTest, "test-auth-controller", true, "run auth controller tests") flag.Parse() + + for _, n := range testOpts.components { + if _, ok := componentsTestSuites[n]; !ok { + fmt.Printf("test-component: unknown component %s, valid values are: %s", n, componentNames) + os.Exit(1) + } + } + os.Exit(m.Run()) } diff --git a/tests/e2e/creation_test.go b/tests/e2e/creation_test.go index 1975f11c777..c486a20a72a 100644 --- a/tests/e2e/creation_test.go +++ b/tests/e2e/creation_test.go @@ -1,35 +1,27 @@ +//nolint:unused package e2e_test import ( "context" - "errors" "fmt" "log" "reflect" "testing" - "time" operatorv1 "github.com/openshift/api/operator/v1" "github.com/stretchr/testify/require" - autoscalingv1 "k8s.io/api/autoscaling/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/components/modelregistry" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelregistry" ) func creationTestSuite(t *testing.T) { @@ -45,14 +37,17 @@ func creationTestSuite(t *testing.T) { err = testCtx.testDSCICreation() require.NoError(t, err, "error creating DSCI CR") }) - - t.Run("Creation of more than one of DSCInitialization instance", func(t *testing.T) { - testCtx.testDSCIDuplication(t) - }) + if testCtx.testOpts.webhookTest { + t.Run("Creation of more than one of DSCInitialization instance", func(t *testing.T) { + testCtx.testDSCIDuplication(t) + }) + } + // Validates Servicemesh fields t.Run("Validate DSCInitialization instance", func(t *testing.T) { err = testCtx.validateDSCI() require.NoError(t, err, "error validating DSCInitialization instance") }) + t.Run("Check owned namespaces exist", func(t *testing.T) { err = testCtx.testOwnedNamespacesAllExist() require.NoError(t, err, "error owned namespace is missing") @@ -63,58 +58,25 @@ func creationTestSuite(t *testing.T) { err = testCtx.testDSCCreation(t) require.NoError(t, err, "error creating DataScienceCluster instance") }) - t.Run("Creation of more than one of DataScienceCluster instance", func(t *testing.T) { - testCtx.testDSCDuplication(t) - }) + if testCtx.testOpts.webhookTest { + t.Run("Creation of more than one of DataScienceCluster instance", func(t *testing.T) { + testCtx.testDSCDuplication(t) + }) + } - t.Run("Validate Ownerrefrences exist", func(t *testing.T) { - err = testCtx.testOwnerrefrences() - require.NoError(t, err, "error getting all DataScienceCluster's Ownerrefrences") - }) - t.Run("Validate all deployed components", func(t *testing.T) { - // this will take about 5-6 mins to complete - err = testCtx.testAllComponentCreation(t) - require.NoError(t, err, "error testing deployments for DataScienceCluster: "+testCtx.testDsc.Name) - }) - t.Run("Validate DSC Ready", func(t *testing.T) { - err = testCtx.validateDSCReady() - require.NoError(t, err, "DataScienceCluster instance is not Ready") - }) // Kserve - t.Run("Validate Knative resoruce", func(t *testing.T) { + t.Run("Validate Knative resource", func(t *testing.T) { err = testCtx.validateDSC() - require.NoError(t, err, "error getting Knatvie resrouce as part of DataScienceCluster validation") - }) - t.Run("Validate default certs available", func(t *testing.T) { - // move it to be part of check with kserve since it is using serving's secret - err = testCtx.testDefaultCertsAvailable() - require.NoError(t, err, "error getting default cert secrets for Kserve") + require.NoError(t, err, "error getting Knative resource as part of DataScienceCluster validation") }) // ModelReg - t.Run("Validate model registry cert config", func(t *testing.T) { - err = testCtx.validateModelRegistryConfig() - require.NoError(t, err, "error validating ModelRegistry config") - }) - t.Run("Validate default model registry cert available", func(t *testing.T) { - err = testCtx.testDefaultModelRegistryCertAvailable() - require.NoError(t, err, "error getting default cert secret for ModelRegistry") - }) - t.Run("Validate model registry servicemeshmember available", func(t *testing.T) { - err = testCtx.testMRServiceMeshMember() - require.NoError(t, err, "error getting servicemeshmember for Model Registry") - }) - - // reconcile - t.Run("Validate Controller reconcile", func(t *testing.T) { - // only test Dashboard component for now - err = testCtx.testUpdateComponentReconcile() - require.NoError(t, err, "error testing updates for DSC managed resource") - }) - t.Run("Validate Component Enabled field", func(t *testing.T) { - err = testCtx.testUpdateDSCComponentEnabled() - require.NoError(t, err, "error testing component enabled field") - }) + if testCtx.testOpts.webhookTest { + t.Run("Validate model registry config", func(t *testing.T) { + err = testCtx.validateModelRegistryConfig() + require.NoError(t, err, "error validating ModelRegistry config") + }) + } }) } @@ -179,6 +141,7 @@ func (tc *testContext) testDSCCreation(t *testing.T) error { if creationErr != nil { log.Printf("error creating DSC resource %v: %v, trying again", tc.testDsc.Name, creationErr) + return false, nil } return true, nil @@ -208,7 +171,7 @@ func waitDSCReady(tc *testContext) error { if err != nil { return false, err } - return dsc.Status.Phase == "Ready", nil + return dsc.Status.Phase == readyStatus, nil }) if err != nil { @@ -225,19 +188,24 @@ func (tc *testContext) requireInstalled(t *testing.T, gvk schema.GroupVersionKin err := tc.customClient.List(tc.ctx, list) require.NoErrorf(t, err, "Could not get %s list", gvk.Kind) + require.NotEmptyf(t, list.Items, "%s has not been installed", gvk.Kind) } func (tc *testContext) testDuplication(t *testing.T, gvk schema.GroupVersionKind, o any) { t.Helper() tc.requireInstalled(t, gvk) + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(o) require.NoErrorf(t, err, "Could not unstructure %s", gvk.Kind) + obj := &unstructured.Unstructured{ Object: u, } obj.SetGroupVersionKind(gvk) + err = tc.customClient.Create(tc.ctx, obj) + require.Errorf(t, err, "Could create second %s", gvk.Kind) } @@ -263,78 +231,75 @@ func (tc *testContext) testDSCDuplication(t *testing.T) { //nolint:thelper tc.testDuplication(t, gvk, dup) } -func (tc *testContext) testAllComponentCreation(t *testing.T) error { //nolint:funlen,thelper - // Validate all components are in Ready state - - dscLookupKey := types.NamespacedName{Name: tc.testDsc.Name} - createdDSC := &dscv1.DataScienceCluster{} - - // Wait for components to get deployed - time.Sleep(1 * time.Minute) - - err := tc.customClient.Get(tc.ctx, dscLookupKey, createdDSC) - if err != nil { - return fmt.Errorf("error getting DataScienceCluster instance :%v", tc.testDsc.Name) - } - tc.testDsc = createdDSC - - components, err := tc.testDsc.GetComponents() - if err != nil { - return err - } - - for _, c := range components { - name := c.GetComponentName() - t.Run("Validate "+name, func(t *testing.T) { - t.Parallel() - err = tc.testComponentCreation(c) - require.NoError(t, err, "error validating component %s when %v", name, c.GetManagementState()) - }) - } - return nil -} - -func (tc *testContext) testComponentCreation(component components.ComponentInterface) error { - err := wait.PollUntilContextTimeout(tc.ctx, generalRetryInterval, componentReadyTimeout, true, func(ctx context.Context) (bool, error) { - // TODO: see if checking deployment is a good test, CF does not create deployment - var componentName = component.GetComponentName() - if component.GetComponentName() == "dashboard" { // special case for RHOAI dashboard name - componentName = "rhods-dashboard" - } - - appList, err := tc.kubeClient.AppsV1().Deployments(tc.applicationsNamespace).List(ctx, metav1.ListOptions{ - - LabelSelector: labels.ODH.Component(componentName), - }) - if err != nil { - log.Printf("error listing component deployments :%v", err) - return false, fmt.Errorf("error listing component deployments :%w", err) - } - if len(appList.Items) != 0 { - if component.GetManagementState() == operatorv1.Removed { - // deployment exists for removed component, retrying - return false, nil - } - - for _, deployment := range appList.Items { - if deployment.Status.ReadyReplicas < 1 { - log.Printf("waiting for component deployments to be in Ready state: %s", deployment.Name) - return false, nil - } - } - return true, nil - } - // when no deployment is found - // It's ok not to have deployements for unmanaged component - if component.GetManagementState() != operatorv1.Managed { - return true, nil - } - - return false, nil - }) - - return err -} +// TODO: cleanup +// func (tc *testContext) testAllComponentCreation(t *testing.T) error { //nolint:funlen,thelper +// // Validate all components are in Ready state + +// dscLookupKey := types.NamespacedName{Name: tc.testDsc.Name} +// createdDSC := &dscv1.DataScienceCluster{} + +// // Wait for components to get deployed +// time.Sleep(1 * time.Minute) + +// err := tc.customClient.Get(tc.ctx, dscLookupKey, createdDSC) +// if err != nil { +// return fmt.Errorf("error getting DataScienceCluster instance :%v", tc.testDsc.Name) +// } +// tc.testDsc = createdDSC + +// components, err := tc.testDsc.GetComponents() +// if err != nil { +// return err +// } + +// for _, c := range components { +// c := c +// name := c.GetComponentName() +// t.Run("Validate "+name, func(t *testing.T) { +// t.Parallel() +// err = tc.testComponentCreation(c) +// require.NoError(t, err, "error validating component %s when %v", name, c.GetManagementState()) +// }) +// } +// return nil +// } + +// TODO: cleanup +// func (tc *testContext) testComponentCreation(component components.ComponentInterface) error { +// err := wait.PollUntilContextTimeout(tc.ctx, generalRetryInterval, componentReadyTimeout, true, func(ctx context.Context) (bool, error) { +// // TODO: see if checking deployment is a good test, CF does not create deployment +// appList, err := tc.kubeClient.AppsV1().Deployments(tc.applicationsNamespace).List(ctx, metav1.ListOptions{ +// LabelSelector: labels.ODH.Component(component.GetComponentName()), +// }) +// if err != nil { +// log.Printf("error listing component deployments :%v", err) +// return false, fmt.Errorf("error listing component deployments :%w", err) +// } +// if len(appList.Items) != 0 { +// if component.GetManagementState() == operatorv1.Removed { +// // deployment exists for removed component, retrying +// return false, nil +// } + +// for _, deployment := range appList.Items { +// if deployment.Status.ReadyReplicas < 1 { +// log.Printf("waiting for component deployments to be in Ready state: %s", deployment.Name) +// return false, nil +// } +// } +// return true, nil +// } +// // when no deployment is found +// // It's ok not to have deployements for unmanaged component +// if component.GetManagementState() != operatorv1.Managed { +// return true, nil +// } + +// return false, nil +// }) + +// return err +// } func (tc *testContext) validateDSCI() error { // expected @@ -385,228 +350,6 @@ func (tc *testContext) validateDSC() error { return nil } -func (tc *testContext) testOwnerrefrences() error { - // Test Dashboard component - if tc.testDsc.Spec.Components.Dashboard.ManagementState == operatorv1.Managed { - appDeployments, err := tc.kubeClient.AppsV1().Deployments(tc.applicationsNamespace).List(tc.ctx, metav1.ListOptions{ - LabelSelector: labels.ODH.Component("rhods-dashboard"), - }) - if err != nil { - return fmt.Errorf("error listing component deployments %w", err) - } - // test any one deployment for ownerreference - if len(appDeployments.Items) != 0 && appDeployments.Items[0].OwnerReferences[0].Kind != "DataScienceCluster" { - return fmt.Errorf("expected ownerreference not found. Got ownereferrence: %v", - appDeployments.Items[0].OwnerReferences) - } - } - return nil -} - -func (tc *testContext) testDefaultCertsAvailable() error { - // Get expected cert secrets - defaultIngressCtrl, err := cluster.FindAvailableIngressController(tc.ctx, tc.customClient) - if err != nil { - return fmt.Errorf("failed to get ingress controller: %w", err) - } - - defaultIngressCertName := cluster.GetDefaultIngressCertSecretName(defaultIngressCtrl) - - defaultIngressSecret, err := cluster.GetSecret(tc.ctx, tc.customClient, "openshift-ingress", defaultIngressCertName) - if err != nil { - return err - } - - // Verify secret from Control Plane namespace matches the default cert secret - defaultSecretName := tc.testDsc.Spec.Components.Kserve.Serving.IngressGateway.Certificate.SecretName - if defaultSecretName == "" { - defaultSecretName = serverless.DefaultCertificateSecretName - } - ctrlPlaneSecret, err := cluster.GetSecret(tc.ctx, tc.customClient, tc.testDSCI.Spec.ServiceMesh.ControlPlane.Namespace, - defaultSecretName) - if err != nil { - return err - } - - if ctrlPlaneSecret.Type != defaultIngressSecret.Type { - return fmt.Errorf("wrong type of cert secret is created for %v. Expected %v, Got %v", defaultSecretName, defaultIngressSecret.Type, ctrlPlaneSecret.Type) - } - - if string(defaultIngressSecret.Data["tls.crt"]) != string(ctrlPlaneSecret.Data["tls.crt"]) { - return fmt.Errorf("default cert secret not expected. Epected %v, Got %v", defaultIngressSecret.Data["tls.crt"], ctrlPlaneSecret.Data["tls.crt"]) - } - - if string(defaultIngressSecret.Data["tls.key"]) != string(ctrlPlaneSecret.Data["tls.key"]) { - return fmt.Errorf("default cert secret not expected. Epected %v, Got %v", defaultIngressSecret.Data["tls.crt"], ctrlPlaneSecret.Data["tls.crt"]) - } - return nil -} - -func (tc *testContext) testDefaultModelRegistryCertAvailable() error { - // return if MR is not set to Managed - if tc.testDsc.Spec.Components.ModelRegistry.ManagementState != operatorv1.Managed { - return nil - } - - // Get expected cert secrets - defaultIngressCtrl, err := cluster.FindAvailableIngressController(tc.ctx, tc.customClient) - if err != nil { - return fmt.Errorf("failed to get ingress controller: %w", err) - } - - defaultIngressCertName := cluster.GetDefaultIngressCertSecretName(defaultIngressCtrl) - - defaultIngressSecret, err := cluster.GetSecret(tc.ctx, tc.customClient, "openshift-ingress", defaultIngressCertName) - if err != nil { - return err - } - - // Verify secret from Control Plane namespace matches the default MR cert secret - defaultMRSecretName := modelregistry.DefaultModelRegistryCert - defaultMRSecret, err := cluster.GetSecret(tc.ctx, tc.customClient, tc.testDSCI.Spec.ServiceMesh.ControlPlane.Namespace, - defaultMRSecretName) - if err != nil { - return err - } - - if defaultMRSecret.Type != defaultIngressSecret.Type { - return fmt.Errorf("wrong type of MR cert secret is created for %v. Expected %v, Got %v", defaultMRSecretName, defaultIngressSecret.Type, defaultMRSecret.Type) - } - - if string(defaultIngressSecret.Data["tls.crt"]) != string(defaultMRSecret.Data["tls.crt"]) { - return fmt.Errorf("default MR cert secret not expected. Epected %v, Got %v", defaultIngressSecret.Data["tls.crt"], defaultMRSecret.Data["tls.crt"]) - } - - if string(defaultIngressSecret.Data["tls.key"]) != string(defaultMRSecret.Data["tls.key"]) { - return fmt.Errorf("default MR cert secret not expected. Epected %v, Got %v", defaultIngressSecret.Data["tls.crt"], defaultMRSecret.Data["tls.crt"]) - } - return nil -} - -func (tc *testContext) testMRServiceMeshMember() error { - if tc.testDsc.Spec.Components.ModelRegistry.ManagementState != operatorv1.Managed { - return nil - } - - // Get unstructured ServiceMeshMember - smm := unstructured.Unstructured{} - smm.SetAPIVersion("maistra.io/v1") - smm.SetKind("ServiceMeshMember") - err := tc.customClient.Get(tc.ctx, - client.ObjectKey{Namespace: modelregistry.DefaultModelRegistriesNamespace, Name: "default"}, &smm) - if err != nil { - return fmt.Errorf("failed to get servicemesh member: %w", err) - } - return nil -} - -func (tc *testContext) testUpdateComponentReconcile() error { - // Test Updating Dashboard Replicas - appDeployments, err := tc.kubeClient.AppsV1().Deployments(tc.applicationsNamespace).List(tc.ctx, metav1.ListOptions{ - LabelSelector: labels.ODH.Component("rhods-dashboard"), - }) - if err != nil { - return err - } - - if len(appDeployments.Items) != 1 { - return fmt.Errorf("error getting deployment for component %s", tc.testDsc.Spec.Components.Dashboard.GetComponentName()) - } - - const expectedReplica int32 = 3 - - testDeployment := appDeployments.Items[0] - patchedReplica := &autoscalingv1.Scale{ - ObjectMeta: metav1.ObjectMeta{ - Name: testDeployment.Name, - Namespace: testDeployment.Namespace, - }, - Spec: autoscalingv1.ScaleSpec{ - Replicas: expectedReplica, - }, - Status: autoscalingv1.ScaleStatus{}, - } - updatedDep, err := tc.kubeClient.AppsV1().Deployments(tc.applicationsNamespace).UpdateScale(tc.ctx, testDeployment.Name, patchedReplica, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("error patching component resources : %w", err) - } - if updatedDep.Spec.Replicas != patchedReplica.Spec.Replicas { - return fmt.Errorf("failed to patch replicas : expect to be %v but got %v", patchedReplica.Spec.Replicas, updatedDep.Spec.Replicas) - } - - // Sleep for 40 seconds to allow the operator to reconcile - // we expect it should not revert back to original value because of AllowList - time.Sleep(4 * generalRetryInterval) - reconciledDep, err := tc.kubeClient.AppsV1().Deployments(tc.applicationsNamespace).Get(tc.ctx, testDeployment.Name, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("error getting component resource after reconcile: %w", err) - } - if *reconciledDep.Spec.Replicas != expectedReplica { - return fmt.Errorf("failed to revert back replicas : expect to be %v but got %v", expectedReplica, *reconciledDep.Spec.Replicas) - } - - return nil -} - -func (tc *testContext) testUpdateDSCComponentEnabled() error { - // Test Updating dashboard to be disabled - var dashboardDeploymentName string - - if tc.testDsc.Spec.Components.Dashboard.ManagementState == operatorv1.Managed { - appDeployments, err := tc.kubeClient.AppsV1().Deployments(tc.applicationsNamespace).List(tc.ctx, metav1.ListOptions{ - LabelSelector: labels.ODH.Component("rhods-dashboard"), - }) - if err != nil { - return fmt.Errorf("error getting enabled component %v", "rhods-dashboard") - } - if len(appDeployments.Items) > 0 { - dashboardDeploymentName = appDeployments.Items[0].Name - if appDeployments.Items[0].Status.ReadyReplicas == 0 { - return fmt.Errorf("error getting enabled component: %s its deployment 'ReadyReplicas'", dashboardDeploymentName) - } - } - } else { - return errors.New("dashboard spec should be in 'enabled: true' state in order to perform test") - } - - // Disable component Dashboard - err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - // refresh the instance in case it was updated during the reconcile - err := tc.customClient.Get(tc.ctx, types.NamespacedName{Name: tc.testDsc.Name}, tc.testDsc) - if err != nil { - return fmt.Errorf("error getting resource %w", err) - } - // Disable the Component - tc.testDsc.Spec.Components.Dashboard.ManagementState = operatorv1.Removed - - // Try to update - err = tc.customClient.Update(tc.ctx, tc.testDsc) - // Return err itself here (not wrapped inside another error) - // so that RetryOnConflict can identify it correctly. - if err != nil { - return fmt.Errorf("error updating component from 'enabled: true' to 'enabled: false': %w", err) - } - return nil - }) - if err != nil { - return fmt.Errorf("error after retry %w", err) - } - - // Sleep for 80 seconds to allow the operator to reconcile - time.Sleep(8 * generalRetryInterval) - _, err = tc.kubeClient.AppsV1().Deployments(tc.applicationsNamespace).Get(tc.ctx, dashboardDeploymentName, metav1.GetOptions{}) - if err != nil { - if k8serr.IsNotFound(err) { - return nil // correct result: should not find deployment after we disable it already - } - return fmt.Errorf("error getting component resource after reconcile: %w", err) - } - return fmt.Errorf("component %v is disabled, should not get its deployment %v from NS %v any more", - tc.testDsc.Spec.Components.Dashboard.GetComponentName(), - dashboardDeploymentName, - tc.applicationsNamespace) -} - const testNs = "test-model-registries" func (tc *testContext) validateModelRegistryConfig() error { diff --git a/tests/e2e/dashboard_test.go b/tests/e2e/dashboard_test.go new file mode 100644 index 00000000000..56d6f1007a3 --- /dev/null +++ b/tests/e2e/dashboard_test.go @@ -0,0 +1,85 @@ +package e2e_test + +import ( + "strings" + "testing" + + "github.com/rs/xid" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +func dashboardTestSuite(t *testing.T) { + t.Helper() + + ct, err := NewComponentTestCtx(&componentApi.Dashboard{}) + require.NoError(t, err) + + componentCtx := DashboardTestCtx{ + ComponentTestCtx: ct, + } + + t.Run("Validate component enabled", componentCtx.ValidateComponentEnabled) + t.Run("Validate operands have OwnerReferences", componentCtx.ValidateOperandsOwnerReferences) + t.Run("Validate update operand resources", componentCtx.ValidateUpdateDeploymentsResources) + t.Run("Validate dynamically watches operands", componentCtx.validateOperandsDynamicallyWatchedResources) + t.Run("Validate CRDs reinstated", componentCtx.validateCRDReinstated) + t.Run("Validate component disabled", componentCtx.ValidateComponentDisabled) +} + +type DashboardTestCtx struct { + *ComponentTestCtx +} + +func (c *DashboardTestCtx) validateOperandsDynamicallyWatchedResources(t *testing.T) { + g := c.NewWithT(t) + + newPt := xid.New().String() + oldPt := "" + + g.Update( + gvk.OdhApplication, + types.NamespacedName{Name: "jupyter", Namespace: c.ApplicationNamespace}, + func(obj *unstructured.Unstructured) error { + oldPt = resources.SetAnnotation(obj, annotations.PlatformType, newPt) + return nil + }, + ).Eventually().Should( + jq.Match(`.metadata.annotations."%s" == "%s"`, annotations.PlatformType, newPt), + ) + + g.List( + gvk.OdhApplication, + client.MatchingLabels{labels.PlatformPartOf: strings.ToLower(gvk.Dashboard.Kind)}, + ).Eventually().Should(And( + HaveEach( + jq.Match(`.metadata.annotations."%s" == "%s"`, annotations.PlatformType, oldPt), + ), + )) +} + +func (c *DashboardTestCtx) validateCRDReinstated(t *testing.T) { + crds := []string{ + "acceleratorprofiles.dashboard.opendatahub.io", + "hardwareprofiles.dashboard.opendatahub.io", + "odhapplications.dashboard.opendatahub.io", + "odhdocuments.dashboard.opendatahub.io", + } + + for _, crd := range crds { + t.Run(crd, func(t *testing.T) { + c.ValidateCRDReinstated(t, crd) + }) + } +} diff --git a/tests/e2e/datasciencepipelines_test.go b/tests/e2e/datasciencepipelines_test.go new file mode 100644 index 00000000000..b00f41e8133 --- /dev/null +++ b/tests/e2e/datasciencepipelines_test.go @@ -0,0 +1,29 @@ +package e2e_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" +) + +func dataSciencePipelinesTestSuite(t *testing.T) { + t.Helper() + + ct, err := NewComponentTestCtx(&componentApi.DataSciencePipelines{}) + require.NoError(t, err) + + componentCtx := KueueTestCtx{ + ComponentTestCtx: ct, + } + + t.Run("Validate component enabled", componentCtx.ValidateComponentEnabled) + t.Run("Validate operands have OwnerReferences", componentCtx.ValidateOperandsOwnerReferences) + t.Run("Validate update operand resources", componentCtx.ValidateUpdateDeploymentsResources) + t.Run("Validate component disabled", componentCtx.ValidateComponentDisabled) +} + +type DataSciencePipelinesTestCtx struct { + *ComponentTestCtx +} diff --git a/tests/e2e/deletion_test.go b/tests/e2e/deletion_test.go index 837b8bf7acb..fe6169998e7 100644 --- a/tests/e2e/deletion_test.go +++ b/tests/e2e/deletion_test.go @@ -1,43 +1,28 @@ package e2e_test import ( - "context" "fmt" - "log" "testing" "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" ) func deletionTestSuite(t *testing.T) { testCtx, err := NewTestContext() require.NoError(t, err) - // pre-check before deletion - t.Run("Ensure all components created", func(t *testing.T) { - err = testCtx.testAllComponentCreation(t) - require.NoError(t, err, "Not all components are created") - }) - t.Run(testCtx.testDsc.Name, func(t *testing.T) { t.Run("Deletion DSC instance", func(t *testing.T) { err = testCtx.testDeletionExistDSC() require.NoError(t, err, "Error to delete DSC instance") }) - t.Run("Check all component resource are deleted", func(t *testing.T) { - err = testCtx.testAllApplicationDeletion(t) - require.NoError(t, err, "Should not found component exist") - }) + t.Run("Deletion DSCI instance", func(t *testing.T) { err = testCtx.testDeletionExistDSCI() require.NoError(t, err, "Error to delete DSCI instance") @@ -57,54 +42,11 @@ func (tc *testContext) testDeletionExistDSC() error { if dscerr != nil { return fmt.Errorf("error deleting DSC instance %s: %w", expectedDSC.Name, dscerr) } - } else if !errors.IsNotFound(err) { + } else if !k8serr.IsNotFound(err) { if err != nil { return fmt.Errorf("could not find DSC instance to delete: %w", err) } } - return nil -} - -func (tc *testContext) testComponentDeletion(component components.ComponentInterface) error { - // Deletion of Deployments - if err := wait.PollUntilContextTimeout(tc.ctx, generalRetryInterval, componentDeletionTimeout, true, func(ctx context.Context) (bool, error) { - var componentName = component.GetComponentName() - if component.GetComponentName() == "dashboard" { // special case for RHOAI dashboard name - componentName = "rhods-dashboard" - } - - appList, err := tc.kubeClient.AppsV1().Deployments(tc.applicationsNamespace).List(ctx, metav1.ListOptions{ - LabelSelector: labels.ODH.Component(componentName), - }) - if err != nil { - log.Printf("error getting component deployments :%v. Trying again...", err) - - return false, err - } - - return len(appList.Items) == 0, nil - }); err != nil { - return fmt.Errorf("error to find component still exist: %v", component.GetComponentName()) - } - - return nil -} - -func (tc *testContext) testAllApplicationDeletion(t *testing.T) error { //nolint:thelper - // Deletion all listed components' deployments - - components, err := tc.testDsc.GetComponents() - if err != nil { - return err - } - - for _, c := range components { - t.Run("Delete "+c.GetComponentName(), func(t *testing.T) { - t.Parallel() - err = tc.testComponentDeletion(c) - require.NoError(t, err) - }) - } return nil } @@ -123,10 +65,11 @@ func (tc *testContext) testDeletionExistDSCI() error { if dscierr != nil { return fmt.Errorf("error deleting DSCI instance %s: %w", expectedDSCI.Name, dscierr) } - } else if !errors.IsNotFound(err) { + } else if !k8serr.IsNotFound(err) { if err != nil { return fmt.Errorf("could not find DSCI instance to delete :%w", err) } } + return nil } diff --git a/tests/e2e/helper_test.go b/tests/e2e/helper_test.go index 08ccc4d9590..387fa7fe409 100644 --- a/tests/e2e/helper_test.go +++ b/tests/e2e/helper_test.go @@ -14,28 +14,21 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/api/errors" + k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/opendatahub-io/opendatahub-operator/v2/apis/common" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components" - "github.com/opendatahub-io/opendatahub-operator/v2/components/codeflare" - "github.com/opendatahub-io/opendatahub-operator/v2/components/dashboard" - "github.com/opendatahub-io/opendatahub-operator/v2/components/datasciencepipelines" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kueue" - "github.com/opendatahub-io/opendatahub-operator/v2/components/modelmeshserving" - "github.com/opendatahub-io/opendatahub-operator/v2/components/modelregistry" - "github.com/opendatahub-io/opendatahub-operator/v2/components/ray" - "github.com/opendatahub-io/opendatahub-operator/v2/components/trainingoperator" - "github.com/opendatahub-io/opendatahub-operator/v2/components/trustyai" - "github.com/opendatahub-io/opendatahub-operator/v2/components/workbenches" + serviceApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/services/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelregistry" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" ) const ( @@ -53,13 +46,16 @@ const ( dscCreationTimeout = 20 * time.Second // time required to wait till DSC is created. generalRetryInterval = 10 * time.Second generalWaitTimeout = 2 * time.Minute + generalPollInterval = 1 * time.Second + readyStatus = "Ready" + dscKind = "DataScienceCluster" ) func (tc *testContext) waitForOperatorDeployment(name string, replicas int32) error { err := wait.PollUntilContextTimeout(tc.ctx, generalRetryInterval, operatorReadyTimeout, false, func(ctx context.Context) (bool, error) { controllerDeployment, err := tc.kubeClient.AppsV1().Deployments(tc.operatorNamespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return false, nil } log.Printf("Failed to get %s controller deployment", name) @@ -79,19 +75,43 @@ func (tc *testContext) waitForOperatorDeployment(name string, replicas int32) er return err } +func (tc *testContext) getComponentDeployments(componentGVK schema.GroupVersionKind) ([]appsv1.Deployment, error) { + deployments := appsv1.DeploymentList{} + err := tc.customClient.List( + tc.ctx, + &deployments, + client.InNamespace( + tc.applicationsNamespace, + ), + client.MatchingLabels{ + labels.PlatformPartOf: strings.ToLower(componentGVK.Kind), + }, + ) + + if err != nil { + return nil, err + } + + return deployments.Items, nil +} + func setupDSCICR(name string) *dsciv1.DSCInitialization { dsciTest := &dsciv1.DSCInitialization{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, Spec: dsciv1.DSCInitializationSpec{ - ApplicationsNamespace: "redhat-ods-applications", - Monitoring: dsciv1.Monitoring{ - ManagementState: "Managed", - Namespace: "redhat-ods-monitoring", + ApplicationsNamespace: "opendatahub", + Monitoring: serviceApi.DSCMonitoring{ + ManagementSpec: common.ManagementSpec{ + ManagementState: operatorv1.Removed, + }, + MonitoringCommonSpec: serviceApi.MonitoringCommonSpec{ + Namespace: "opendatahub", + }, }, TrustedCABundle: &dsciv1.TrustedCABundleSpec{ - ManagementState: "Managed", + ManagementState: operatorv1.Managed, CustomCABundle: "", }, ServiceMesh: &infrav1.ServiceMeshSpec{ @@ -100,7 +120,7 @@ func setupDSCICR(name string) *dsciv1.DSCInitialization { Name: "data-science-smcp", Namespace: "istio-system", }, - ManagementState: "Managed", + ManagementState: operatorv1.Managed, }, }, } @@ -115,61 +135,73 @@ func setupDSCInstance(name string) *dscv1.DataScienceCluster { Spec: dscv1.DataScienceClusterSpec{ Components: dscv1.Components{ // keep dashboard as enabled, because other test is rely on this - Dashboard: dashboard.Dashboard{ - Component: components.Component{ - ManagementState: operatorv1.Managed, + Dashboard: componentApi.DSCDashboard{ + ManagementSpec: common.ManagementSpec{ + ManagementState: operatorv1.Removed, }, }, - Workbenches: workbenches.Workbenches{ - Component: components.Component{ - ManagementState: operatorv1.Managed, + Workbenches: componentApi.DSCWorkbenches{ + ManagementSpec: common.ManagementSpec{ + ManagementState: operatorv1.Removed, }, }, - ModelMeshServing: modelmeshserving.ModelMeshServing{ - Component: components.Component{ - ManagementState: operatorv1.Managed, + ModelMeshServing: componentApi.DSCModelMeshServing{ + ManagementSpec: common.ManagementSpec{ + ManagementState: operatorv1.Removed, }, }, - DataSciencePipelines: datasciencepipelines.DataSciencePipelines{ - Component: components.Component{ - ManagementState: operatorv1.Managed, + DataSciencePipelines: componentApi.DSCDataSciencePipelines{ + ManagementSpec: common.ManagementSpec{ + ManagementState: operatorv1.Removed, }, }, - Kserve: kserve.Kserve{ - Component: components.Component{ - ManagementState: operatorv1.Managed, + Kserve: componentApi.DSCKserve{ + ManagementSpec: common.ManagementSpec{ + ManagementState: operatorv1.Removed, }, - Serving: infrav1.ServingSpec{ - ManagementState: operatorv1.Managed, + KserveCommonSpec: componentApi.KserveCommonSpec{ + DefaultDeploymentMode: componentApi.Serverless, + Serving: infrav1.ServingSpec{ + ManagementState: operatorv1.Managed, + Name: "knative-serving", + IngressGateway: infrav1.GatewaySpec{ + Certificate: infrav1.CertificateSpec{ + Type: infrav1.OpenshiftDefaultIngress, + }, + }, + }, }, }, - CodeFlare: codeflare.CodeFlare{ - Component: components.Component{ - ManagementState: operatorv1.Managed, + CodeFlare: componentApi.DSCCodeFlare{ + ManagementSpec: common.ManagementSpec{ + ManagementState: operatorv1.Removed, }, }, - Ray: ray.Ray{ - Component: components.Component{ - ManagementState: operatorv1.Managed, + Ray: componentApi.DSCRay{ + ManagementSpec: common.ManagementSpec{ + ManagementState: operatorv1.Removed, }, }, - Kueue: kueue.Kueue{ - Component: components.Component{ - ManagementState: operatorv1.Managed, + Kueue: componentApi.DSCKueue{ + ManagementSpec: common.ManagementSpec{ + ManagementState: operatorv1.Removed, }, }, - TrustyAI: trustyai.TrustyAI{ - Component: components.Component{ - ManagementState: operatorv1.Managed, + TrustyAI: componentApi.DSCTrustyAI{ + ManagementSpec: common.ManagementSpec{ + ManagementState: operatorv1.Removed, }, }, - ModelRegistry: modelregistry.ModelRegistry{ - Component: components.Component{ + ModelRegistry: componentApi.DSCModelRegistry{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, + ModelRegistryCommonSpec: componentApi.ModelRegistryCommonSpec{ + RegistriesNamespace: modelregistry.DefaultModelRegistriesNamespace, + }, }, - TrainingOperator: trainingoperator.TrainingOperator{ - Component: components.Component{ + TrainingOperator: componentApi.DSCTrainingOperator{ + ManagementSpec: common.ManagementSpec{ ManagementState: operatorv1.Removed, }, }, @@ -204,7 +236,7 @@ func (tc *testContext) validateCRD(crdName string) error { err := wait.PollUntilContextTimeout(tc.ctx, generalRetryInterval, crdReadyTimeout, false, func(ctx context.Context) (bool, error) { err := tc.customClient.Get(ctx, obj, crd) if err != nil { - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return false, nil } log.Printf("Failed to get CRD %s", crdName) @@ -250,7 +282,7 @@ func getCSV(ctx context.Context, cli client.Client, name string, namespace strin } } - return nil, errors.NewNotFound(schema.GroupResource{}, name) + return nil, k8serr.NewNotFound(schema.GroupResource{}, name) } // Use existing or create a new one. @@ -260,8 +292,9 @@ func getSubscription(tc *testContext, name string, ns string) (*ofapi.Subscripti sub := setupSubscription(name, ns) if err := tc.customClient.Create(tc.ctx, sub); err != nil { - return nil, fmt.Errorf("error creating subscription %s: %w", name, err) + return nil, fmt.Errorf("error creating subscription: %w", err) } + return sub, nil } @@ -272,7 +305,7 @@ func getSubscription(tc *testContext, name string, ns string) (*ofapi.Subscripti } err := tc.customClient.Get(tc.ctx, key, sub) - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return createSubscription(name, ns) } if err != nil { @@ -286,7 +319,7 @@ func waitCSV(tc *testContext, name string, ns string) error { interval := generalRetryInterval isReady := func(ctx context.Context) (bool, error) { csv, err := getCSV(ctx, tc.customClient, name, ns) - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return false, nil } if err != nil { @@ -409,7 +442,7 @@ func ensureServicemeshOperators(t *testing.T, tc *testContext) error { //nolint: }(op) } - for range len(ops) { + for range ops { err := <-c errors = multierror.Append(errors, err) } diff --git a/tests/e2e/kserve_test.go b/tests/e2e/kserve_test.go new file mode 100644 index 00000000000..3528f0e274f --- /dev/null +++ b/tests/e2e/kserve_test.go @@ -0,0 +1,101 @@ +package e2e_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelcontroller" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +func kserveTestSuite(t *testing.T) { + t.Helper() + + ct, err := NewComponentTestCtx(&componentApi.Kserve{}) + require.NoError(t, err) + + componentCtx := KserveTestCtx{ + ComponentTestCtx: ct, + } + + t.Run("Validate component enabled", componentCtx.ValidateComponentEnabled) + t.Run("Validate component spec", componentCtx.validateSpec) + t.Run("Validate model controller", componentCtx.validateModelControllerInstance) + t.Run("Validate operands have OwnerReferences", componentCtx.ValidateOperandsOwnerReferences) + t.Run("Validate default certs", componentCtx.validateDefaultCertsAvailable) + t.Run("Validate update operand resources", componentCtx.ValidateUpdateDeploymentsResources) + t.Run("Validate component disabled", componentCtx.ValidateComponentDisabled) +} + +type KserveTestCtx struct { + *ComponentTestCtx +} + +func (c *KserveTestCtx) validateSpec(t *testing.T) { + g := c.NewWithT(t) + + dsc, err := c.GetDSC() + g.Expect(err).NotTo(HaveOccurred()) + + g.List(gvk.Kserve).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.spec.defaultDeploymentMode == "%s"`, dsc.Spec.Components.Kserve.DefaultDeploymentMode), + jq.Match(`.spec.nim.managementState == "%s"`, dsc.Spec.Components.Kserve.NIM.ManagementState), + jq.Match(`.spec.serving.managementState == "%s"`, dsc.Spec.Components.Kserve.Serving.ManagementState), + jq.Match(`.spec.serving.name == "%s"`, dsc.Spec.Components.Kserve.Serving.Name), + jq.Match(`.spec.serving.ingressGateway.certificate.type == "%s"`, dsc.Spec.Components.Kserve.Serving.IngressGateway.Certificate.Type), + )), + )) +} + +func (c *KserveTestCtx) validateModelControllerInstance(t *testing.T) { + g := c.NewWithT(t) + + g.List(gvk.ModelController).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.ownerReferences[0].kind == "%s"`, gvk.DataScienceCluster.Kind), + jq.Match(`.status.phase == "%s"`, readyStatus), + )), + )) + + g.List(gvk.DataScienceCluster).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.status.conditions[] | select(.type == "%s") | .status == "%s"`, modelcontroller.ReadyConditionType, metav1.ConditionTrue), + )), + )) +} + +func (c *KserveTestCtx) validateDefaultCertsAvailable(t *testing.T) { + g := c.NewWithT(t) + + defaultIngressSecret, err := cluster.FindDefaultIngressSecret(g.Context(), g.Client()) + g.Expect(err).ToNot(HaveOccurred()) + + dsc, err := c.GetDSC() + g.Expect(err).ToNot(HaveOccurred()) + + dsci, err := c.GetDSCI() + g.Expect(err).ToNot(HaveOccurred()) + + defaultSecretName := dsc.Spec.Components.Kserve.Serving.IngressGateway.Certificate.SecretName + if defaultSecretName == "" { + defaultSecretName = serverless.DefaultCertificateSecretName + } + + ctrlPlaneSecret, err := cluster.GetSecret(g.Context(), g.Client(), dsci.Spec.ServiceMesh.ControlPlane.Namespace, defaultSecretName) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(ctrlPlaneSecret.Type).Should(Equal(defaultIngressSecret.Type)) + g.Expect(defaultIngressSecret.Data).Should(Equal(ctrlPlaneSecret.Data)) +} diff --git a/tests/e2e/kueue_test.go b/tests/e2e/kueue_test.go new file mode 100644 index 00000000000..4f3299f184c --- /dev/null +++ b/tests/e2e/kueue_test.go @@ -0,0 +1,29 @@ +package e2e_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" +) + +func kueueTestSuite(t *testing.T) { + t.Helper() + + ct, err := NewComponentTestCtx(&componentApi.Kueue{}) + require.NoError(t, err) + + componentCtx := KueueTestCtx{ + ComponentTestCtx: ct, + } + + t.Run("Validate component enabled", componentCtx.ValidateComponentEnabled) + t.Run("Validate operands have OwnerReferences", componentCtx.ValidateOperandsOwnerReferences) + t.Run("Validate update operand resources", componentCtx.ValidateUpdateDeploymentsResources) + t.Run("Validate component disabled", componentCtx.ValidateComponentDisabled) +} + +type KueueTestCtx struct { + *ComponentTestCtx +} diff --git a/tests/e2e/modelcontroller_test.go b/tests/e2e/modelcontroller_test.go new file mode 100644 index 00000000000..fe58578d105 --- /dev/null +++ b/tests/e2e/modelcontroller_test.go @@ -0,0 +1,129 @@ +package e2e_test + +import ( + "strings" + "testing" + "time" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelcontroller" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/testf" + + . "github.com/onsi/gomega" +) + +func modelControllerTestSuite(t *testing.T) { + t.Helper() + + ct, err := NewComponentTestCtx(&componentApi.ModelController{}) + require.NoError(t, err) + + componentCtx := ModelControllerTestCtx{ + ComponentTestCtx: ct, + } + + t.Run("Validate component enabled", componentCtx.validateComponentEnabled) + t.Run("Validate operands have OwnerReferences", componentCtx.ValidateOperandsOwnerReferences) + t.Run("Validate update operand resources", componentCtx.ValidateUpdateDeploymentsResources) + t.Run("Validate component disabled", componentCtx.validateComponentDisabled) +} + +type ModelControllerTestCtx struct { + *ComponentTestCtx +} + +func (c *ModelControllerTestCtx) validateComponentEnabled(t *testing.T) { + t.Run("ModelMeshServing enabled", func(t *testing.T) { + c.validateComponentDeployed(t, operatorv1.Managed, operatorv1.Removed, metav1.ConditionTrue) + }) + t.Run("Kserve enabled", func(t *testing.T) { + c.validateComponentDeployed(t, operatorv1.Removed, operatorv1.Managed, metav1.ConditionTrue) + }) + t.Run("Kserve and ModelMeshServing enabled", func(t *testing.T) { + c.validateComponentDeployed(t, operatorv1.Managed, operatorv1.Managed, metav1.ConditionTrue) + }) +} + +func (c *ModelControllerTestCtx) validateComponentDisabled(t *testing.T) { + t.Run("Kserve and ModelMeshServing disabled", func(t *testing.T) { + c.validateComponentDeployed(t, operatorv1.Removed, operatorv1.Removed, metav1.ConditionFalse) + }) +} + +func (c *ModelControllerTestCtx) validateComponentDeployed( + t *testing.T, + modelMeshState operatorv1.ManagementState, + kserveState operatorv1.ManagementState, + status metav1.ConditionStatus, +) { + t.Helper() + + g := c.NewWithT(t) + + g.Update( + gvk.DataScienceCluster, + c.DSCName, + testf.TransformPipeline( + testf.Transform(`.spec.components.%s.managementState = "%s"`, componentApi.ModelMeshServingComponentName, modelMeshState), + testf.Transform(`.spec.components.%s.managementState = "%s"`, componentApi.KserveComponentName, kserveState), + ), + ).Eventually().WithTimeout(30 * time.Second).WithPolling(1 * time.Second).Should(And( + jq.Match(`.spec.components.%s.managementState == "%s"`, componentApi.ModelMeshServingComponentName, modelMeshState), + jq.Match(`.spec.components.%s.managementState == "%s"`, componentApi.KserveComponentName, kserveState), + )) + + if status == metav1.ConditionTrue { + g.List(gvk.ModelController).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.ownerReferences[0].kind == "%s"`, gvk.DataScienceCluster.Kind), + jq.Match(`.status.phase == "%s"`, readyStatus), + )), + )) + + g.List( + gvk.Deployment, + client.InNamespace(c.ApplicationNamespace), + client.MatchingLabels{ + labels.PlatformPartOf: strings.ToLower(c.GVK.Kind), + }, + ).Eventually().ShouldNot( + BeEmpty(), + ) + } else { + g.List(gvk.Kserve).Eventually().Should( + BeEmpty(), + ) + g.List(gvk.ModelMeshServing).Eventually().Should( + BeEmpty(), + ) + g.List(gvk.ModelController).Eventually().Should( + BeEmpty(), + ) + + g.List( + gvk.Deployment, + client.InNamespace(c.ApplicationNamespace), + client.MatchingLabels{ + labels.PlatformPartOf: strings.ToLower(gvk.ModelController.Kind), + }, + ).Eventually().Should( + BeEmpty(), + ) + } + + g.List(gvk.DataScienceCluster).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.status.conditions[] | select(.type == "%s") | .status == "%s"`, modelcontroller.ReadyConditionType, status), + )), + )) +} diff --git a/tests/e2e/modelmeshserving_test.go b/tests/e2e/modelmeshserving_test.go new file mode 100644 index 00000000000..005db8ff03a --- /dev/null +++ b/tests/e2e/modelmeshserving_test.go @@ -0,0 +1,55 @@ +package e2e_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelcontroller" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +func modelMeshServingTestSuite(t *testing.T) { + t.Helper() + + ct, err := NewComponentTestCtx(&componentApi.ModelMeshServing{}) + require.NoError(t, err) + + componentCtx := ModelMeshServingTestCtx{ + ComponentTestCtx: ct, + } + + t.Run("Validate component enabled", componentCtx.ValidateComponentEnabled) + t.Run("Validate model controller", componentCtx.validateModelControllerInstance) + t.Run("Validate operands have OwnerReferences", componentCtx.ValidateOperandsOwnerReferences) + t.Run("Validate update operand resources", componentCtx.ValidateUpdateDeploymentsResources) + t.Run("Validate component disabled", componentCtx.ValidateComponentDisabled) +} + +type ModelMeshServingTestCtx struct { + *ComponentTestCtx +} + +func (tc *ModelMeshServingTestCtx) validateModelControllerInstance(t *testing.T) { + g := tc.NewWithT(t) + + g.List(gvk.ModelController).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.ownerReferences[0].kind == "%s"`, gvk.DataScienceCluster.Kind), + jq.Match(`.status.phase == "%s"`, readyStatus), + )), + )) + + g.List(gvk.DataScienceCluster).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.status.conditions[] | select(.type == "%s") | .status == "%s"`, modelcontroller.ReadyConditionType, metav1.ConditionTrue), + )), + )) +} diff --git a/tests/e2e/modelregistry_test.go b/tests/e2e/modelregistry_test.go new file mode 100644 index 00000000000..1ccff3c530c --- /dev/null +++ b/tests/e2e/modelregistry_test.go @@ -0,0 +1,155 @@ +package e2e_test + +import ( + "strings" + "testing" + + "github.com/rs/xid" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/modelregistry" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/annotations" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/resources" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/utils/test/matchers/jq" + + . "github.com/onsi/gomega" +) + +type ModelRegistryTestCtx struct { + *ComponentTestCtx +} + +func modelRegistryTestSuite(t *testing.T) { + t.Helper() + + ct, err := NewComponentTestCtx(&componentApi.ModelRegistry{}) + require.NoError(t, err) + + componentCtx := ModelRegistryTestCtx{ + ComponentTestCtx: ct, + } + + t.Run("Validate component enabled", componentCtx.ValidateComponentEnabled) + t.Run("Validate component spec", componentCtx.validateSpec) + t.Run("Validate operands have OwnerReferences", componentCtx.ValidateOperandsOwnerReferences) + t.Run("Validate update operand resources", componentCtx.ValidateUpdateDeploymentsResources) + + t.Run("Validate watched resources", componentCtx.validateOperandsWatchedResources) + t.Run("Validate dynamically watches operands", componentCtx.validateOperandsDynamicallyWatchedResources) + t.Run("Validate CRDs reinstated", componentCtx.validateCRDReinstated) + t.Run("Validate cert", componentCtx.validateModelRegistryCert) + t.Run("Validate ServiceMeshMember", componentCtx.validateModelRegistryServiceMeshMember) + + t.Run("Validate component disabled", componentCtx.ValidateComponentDisabled) +} + +func (c *ModelRegistryTestCtx) validateSpec(t *testing.T) { + g := c.NewWithT(t) + + dsc, err := c.GetDSC() + g.Expect(err).NotTo(HaveOccurred()) + + g.List(gvk.ModelRegistry).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.spec.registriesNamespace == "%s"`, dsc.Spec.Components.ModelRegistry.RegistriesNamespace), + )), + )) +} + +func (c *ModelRegistryTestCtx) validateOperandsWatchedResources(t *testing.T) { + g := c.NewWithT(t) + + g.List( + gvk.ServiceMeshMember, + client.MatchingLabels{labels.PlatformPartOf: strings.ToLower(componentApi.ModelRegistryKind)}, + ).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata | has("ownerReferences") | not`), + )), + )) +} + +func (c *ModelRegistryTestCtx) validateOperandsDynamicallyWatchedResources(t *testing.T) { + g := c.NewWithT(t) + + mri, err := g.Get(gvk.ModelRegistry, types.NamespacedName{Name: componentApi.ModelRegistryInstanceName}).Get() + g.Expect(err).ShouldNot(HaveOccurred()) + + rn, err := jq.ExtractValue[string](mri, ".spec.registriesNamespace") + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(rn).ShouldNot(BeEmpty()) + + newPt := xid.New().String() + oldPt := "" + + g.Update(gvk.ServiceMeshMember, types.NamespacedName{Name: "default", Namespace: rn}, func(obj *unstructured.Unstructured) error { + oldPt = resources.SetAnnotation(obj, annotations.PlatformType, newPt) + return nil + }).Eventually().Should( + jq.Match(`.metadata.annotations."%s" == "%s"`, annotations.PlatformType, newPt), + ) + + g.List( + gvk.ServiceMeshMember, + client.MatchingLabels{labels.PlatformPartOf: strings.ToLower(componentApi.ModelRegistryKind)}, + ).Eventually().Should(And( + HaveLen(1), + HaveEach(And( + jq.Match(`.metadata.annotations."%s" == "%s"`, annotations.PlatformType, oldPt), + )), + )) +} + +func (c *ModelRegistryTestCtx) validateModelRegistryCert(t *testing.T) { + g := c.NewWithT(t) + + dsci, err := g.Get(gvk.DSCInitialization, c.DSCIName).Get() + g.Expect(err).ShouldNot(HaveOccurred()) + + smns, err := jq.ExtractValue[string](dsci, ".spec.serviceMesh.controlPlane.namespace") + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(smns).ShouldNot(BeEmpty()) + + is, err := cluster.FindDefaultIngressSecret(g.Context(), g.Client()) + g.Expect(err).ShouldNot(HaveOccurred()) + + g.Get(gvk.Secret, types.NamespacedName{Namespace: smns, Name: modelregistry.DefaultModelRegistryCert}).Eventually().Should(And( + jq.Match(`.type == "%s"`, is.Type), + jq.Match(`(.data."tls.crt" | @base64d) == "%s"`, is.Data["tls.crt"]), + jq.Match(`(.data."tls.key" | @base64d) == "%s"`, is.Data["tls.key"]), + )) +} + +func (c *ModelRegistryTestCtx) validateModelRegistryServiceMeshMember(t *testing.T) { + g := c.NewWithT(t) + + mri, err := g.Get(gvk.ModelRegistry, types.NamespacedName{Name: componentApi.ModelRegistryInstanceName}).Get() + g.Expect(err).ShouldNot(HaveOccurred()) + + rn, err := jq.ExtractValue[string](mri, ".spec.registriesNamespace") + g.Expect(err).ShouldNot(HaveOccurred()) + g.Expect(rn).ShouldNot(BeEmpty()) + + g.Get(gvk.ServiceMeshMember, types.NamespacedName{Namespace: rn, Name: "default"}).Eventually().Should( + jq.Match(`.spec | has("controlPlaneRef")`), + ) +} + +func (c *ModelRegistryTestCtx) validateCRDReinstated(t *testing.T) { + crds := []string{"modelregistries.modelregistry.opendatahub.io"} + + for _, crd := range crds { + t.Run(crd, func(t *testing.T) { + c.ValidateCRDReinstated(t, crd) + }) + } +} diff --git a/tests/e2e/odh_manager_test.go b/tests/e2e/odh_manager_test.go index c3e666673b9..49dfe2e2a89 100644 --- a/tests/e2e/odh_manager_test.go +++ b/tests/e2e/odh_manager_test.go @@ -37,4 +37,71 @@ func (tc *testContext) validateOwnedCRDs(t *testing.T) { require.NoErrorf(t, tc.validateCRD("featuretrackers.features.opendatahub.io"), "error in validating CRD : featuretrackers.features.opendatahub.io") }) + + // Validate component CRDs + t.Run("Validate Dashboard CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("dashboards.components.platform.opendatahub.io"), + "error in validating CRD : dashboards.components.platform.opendatahub.io") + }) + + t.Run("Validate Ray CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("rays.components.platform.opendatahub.io"), + "error in validating CRD : rays.components.platform.opendatahub.io") + }) + + t.Run("Validate ModelRegistry CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("modelregistries.components.platform.opendatahub.io"), + "error in validating CRD : modelregistries.components.platform.opendatahub.io") + }) + + t.Run("Validate TrustyAI CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("trustyais.components.platform.opendatahub.io"), + "error in validating CRD : trustyais.components.platform.opendatahub.io") + }) + + t.Run("Validate Kueue CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("kueues.components.platform.opendatahub.io"), + "error in validating CRD : kueues.components.platform.opendatahub.io") + }) + + t.Run("Validate TrainingOperator CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("trainingoperators.components.platform.opendatahub.io"), + "error in validating CRD : trainingoperators.components.platform.opendatahub.io") + }) + + t.Run("Validate DataSciencePipelines CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("datasciencepipelines.components.platform.opendatahub.io"), + "error in validating CRD : datasciencepipelines.components.platform.opendatahub.io") + }) + + t.Run("Validate Workbenches CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("workbenches.components.platform.opendatahub.io"), + "error in validating CRD : workbenches.components.platform.opendatahub.io") + }) + + t.Run("Validate Kserve CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("kserves.components.platform.opendatahub.io"), + "error in validating CRD : kserves.components.platform.opendatahub.io") + }) + + t.Run("Validate ModelMeshServing CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("modelmeshservings.components.platform.opendatahub.io"), + "error in validating CRD : modelmeshservings.components.platform.opendatahub.io") + }) + + t.Run("Validate ModelController CRD", func(t *testing.T) { + t.Parallel() + require.NoErrorf(t, tc.validateCRD("modelcontrollers.components.platform.opendatahub.io"), + "error in validating CRD : modelcontrollers.components.platform.opendatahub.io") + }) } diff --git a/tests/e2e/ray_test.go b/tests/e2e/ray_test.go new file mode 100644 index 00000000000..d56b0309c61 --- /dev/null +++ b/tests/e2e/ray_test.go @@ -0,0 +1,29 @@ +package e2e_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" +) + +func rayTestSuite(t *testing.T) { + t.Helper() + + ct, err := NewComponentTestCtx(&componentApi.Ray{}) + require.NoError(t, err) + + componentCtx := RayTestCtx{ + ComponentTestCtx: ct, + } + + t.Run("Validate component enabled", componentCtx.ValidateComponentEnabled) + t.Run("Validate operands have OwnerReferences", componentCtx.ValidateOperandsOwnerReferences) + t.Run("Validate update operand resources", componentCtx.ValidateUpdateDeploymentsResources) + t.Run("Validate component disabled", componentCtx.ValidateComponentDisabled) +} + +type RayTestCtx struct { + *ComponentTestCtx +} diff --git a/tests/e2e/trainingoperator_test.go b/tests/e2e/trainingoperator_test.go new file mode 100644 index 00000000000..d1b4eca9367 --- /dev/null +++ b/tests/e2e/trainingoperator_test.go @@ -0,0 +1,29 @@ +package e2e_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" +) + +func trainingOperatorTestSuite(t *testing.T) { + t.Helper() + + ct, err := NewComponentTestCtx(&componentApi.TrainingOperator{}) + require.NoError(t, err) + + componentCtx := TrainingOperatorTestCtx{ + ComponentTestCtx: ct, + } + + t.Run("Validate component enabled", componentCtx.ValidateComponentEnabled) + t.Run("Validate operands have OwnerReferences", componentCtx.ValidateOperandsOwnerReferences) + t.Run("Validate update operand resources", componentCtx.ValidateUpdateDeploymentsResources) + t.Run("Validate component disabled", componentCtx.ValidateComponentDisabled) +} + +type TrainingOperatorTestCtx struct { + *ComponentTestCtx +} diff --git a/tests/e2e/trustyai_test.go b/tests/e2e/trustyai_test.go new file mode 100644 index 00000000000..7d9b4d9094f --- /dev/null +++ b/tests/e2e/trustyai_test.go @@ -0,0 +1,29 @@ +package e2e_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" +) + +func trustyAITestSuite(t *testing.T) { + t.Helper() + + ct, err := NewComponentTestCtx(&componentApi.TrustyAI{}) + require.NoError(t, err) + + componentCtx := TrustyAITestCtx{ + ComponentTestCtx: ct, + } + + t.Run("Validate component enabled", componentCtx.ValidateComponentEnabled) + t.Run("Validate operands have OwnerReferences", componentCtx.ValidateOperandsOwnerReferences) + t.Run("Validate update operand resources", componentCtx.ValidateUpdateDeploymentsResources) + t.Run("Validate component disabled", componentCtx.ValidateComponentDisabled) +} + +type TrustyAITestCtx struct { + *ComponentTestCtx +} diff --git a/tests/e2e/workbenches_test.go b/tests/e2e/workbenches_test.go new file mode 100644 index 00000000000..28466c5dba3 --- /dev/null +++ b/tests/e2e/workbenches_test.go @@ -0,0 +1,325 @@ +package e2e_test + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + "testing" + "time" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/stretchr/testify/require" + autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" + dscv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/controllers/components/workbenches" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster/gvk" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/metadata/labels" +) + +type WorkbenchesTestCtx struct { + testCtx *testContext + testWorkbenchesInstance componentApi.Workbenches +} + +func workbenchesTestSuite(t *testing.T) { + t.Helper() + + workbenchesCtx := WorkbenchesTestCtx{} + var err error + workbenchesCtx.testCtx, err = NewTestContext() + require.NoError(t, err) + + testCtx := workbenchesCtx.testCtx + + t.Run(testCtx.testDsc.Name, func(t *testing.T) { + t.Run("Creation of Workbenches CR", func(t *testing.T) { + err = workbenchesCtx.testWorkbenchesCreation() + require.NoError(t, err, "error creating Workbenches CR") + }) + + t.Run("Validate Workbenches instance", func(t *testing.T) { + err = workbenchesCtx.validateWorkbenches() + require.NoError(t, err, "error validating Workbenches instance") + }) + + t.Run("Validate Ownerreferences exist", func(t *testing.T) { + err = workbenchesCtx.testOwnerReferences() + require.NoError(t, err, "error getting all Workbenches's Ownerreferences") + }) + + t.Run("Validate Workbenches Ready", func(t *testing.T) { + err = workbenchesCtx.validateWorkbenchesReady() + require.NoError(t, err, "Workbenches instance is not Ready") + }) + + // reconcile + t.Run("Validate Controller reconcile", func(t *testing.T) { + err = workbenchesCtx.testUpdateOnWorkbenchesResources() + require.NoError(t, err, "error testing updates for Workbenches' managed resources") + }) + + t.Run("Validate Disabling Component", func(t *testing.T) { + err = workbenchesCtx.testUpdateWorkbenchesComponentDisabled() + require.NoError(t, err, "error testing component enabled field") + }) + }) +} + +func (tc *WorkbenchesTestCtx) testWorkbenchesCreation() error { + err := tc.testCtx.wait(func(ctx context.Context) (bool, error) { + key := client.ObjectKeyFromObject(tc.testCtx.testDsc) + + err := tc.testCtx.customClient.Get(ctx, key, tc.testCtx.testDsc) + if err != nil { + return false, fmt.Errorf("error getting resource %w", err) + } + + tc.testCtx.testDsc.Spec.Components.Workbenches.ManagementState = operatorv1.Managed + + switch err = tc.testCtx.customClient.Update(ctx, tc.testCtx.testDsc); { + case err == nil: + return true, nil + case k8serr.IsConflict(err): + return false, nil + default: + return false, fmt.Errorf("error updating resource %w", err) + } + }) + if err != nil { + return fmt.Errorf("error after retry %w", err) + } + + err = tc.testCtx.wait(func(ctx context.Context) (bool, error) { + existingWorkbenchesList := &componentApi.WorkbenchesList{} + + err := tc.testCtx.customClient.List(ctx, existingWorkbenchesList) + if err != nil { + return false, err + } + + switch { + case len(existingWorkbenchesList.Items) == 1: + tc.testWorkbenchesInstance = existingWorkbenchesList.Items[0] + return true, nil + + case len(existingWorkbenchesList.Items) > 1: + return false, fmt.Errorf( + "unexpected Workbenches CR instances. Expected 1 , Found %v instance", len(existingWorkbenchesList.Items)) + default: + return false, nil + } + }) + + if err != nil { + return fmt.Errorf("unable to find Workbenches CR instance: %w", err) + } + + return nil +} + +func (tc *WorkbenchesTestCtx) validateWorkbenches() error { + // Workbenches spec should match the spec of Workbenches component in DSC + if !reflect.DeepEqual(tc.testCtx.testDsc.Spec.Components.Workbenches.WorkbenchesCommonSpec, tc.testWorkbenchesInstance.Spec.WorkbenchesCommonSpec) { + err := fmt.Errorf("expected spec for Workbenches %v, got %v", + tc.testCtx.testDsc.Spec.Components.Workbenches.WorkbenchesCommonSpec, tc.testWorkbenchesInstance.Spec.WorkbenchesCommonSpec) + return err + } + return nil +} + +func (tc *WorkbenchesTestCtx) testOwnerReferences() error { + if len(tc.testWorkbenchesInstance.OwnerReferences) != 1 { + return errors.New("expect CR has ownerreferences set") + } + + // Test Workbenches CR ownerref + if tc.testWorkbenchesInstance.OwnerReferences[0].Kind != dscKind { + return fmt.Errorf("expected ownerreference DataScienceCluster not found. Got ownerreferrence: %v", + tc.testWorkbenchesInstance.OwnerReferences[0].Kind) + } + + // Test Workbenches resources + + appDeployments, err := tc.testCtx.kubeClient.AppsV1().Deployments(tc.testCtx.applicationsNamespace).List(tc.testCtx.ctx, metav1.ListOptions{ + LabelSelector: labels.PlatformPartOf + "=" + strings.ToLower(gvk.Workbenches.Kind), + }) + if err != nil { + return fmt.Errorf("error listing component deployments %w", err) + } + // test any one deployment for ownerreference + if len(appDeployments.Items) != 0 && appDeployments.Items[0].OwnerReferences[0].Kind != componentApi.WorkbenchesKind { + return fmt.Errorf("expected ownerreference not found. Got ownerreferrence: %v", + appDeployments.Items[0].OwnerReferences) + } + + return nil +} + +// Verify Workbenches instance is in Ready phase when Workbenches deployments are up and running. +func (tc *WorkbenchesTestCtx) validateWorkbenchesReady() error { + err := wait.PollUntilContextTimeout(tc.testCtx.ctx, generalRetryInterval, componentReadyTimeout, true, func(ctx context.Context) (bool, error) { + key := types.NamespacedName{Name: tc.testWorkbenchesInstance.Name} + wb := &componentApi.Workbenches{} + + err := tc.testCtx.customClient.Get(ctx, key, wb) + if err != nil { + return false, err + } + return wb.Status.Phase == readyStatus, nil + }) + + if err != nil { + return fmt.Errorf("error waiting on Ready state for Workbenches %v: %w", tc.testWorkbenchesInstance.Name, err) + } + + err = wait.PollUntilContextTimeout(tc.testCtx.ctx, generalRetryInterval, componentReadyTimeout, true, func(ctx context.Context) (bool, error) { + list := dscv1.DataScienceClusterList{} + err := tc.testCtx.customClient.List(ctx, &list) + if err != nil { + return false, err + } + + if len(list.Items) != 1 { + return false, fmt.Errorf("expected 1 DataScience Cluster CR but found %v", len(list.Items)) + } + + for _, c := range list.Items[0].Status.Conditions { + if c.Type == workbenches.ReadyConditionType { + return c.Status == corev1.ConditionTrue, nil + } + } + + return false, nil + }) + + if err != nil { + return fmt.Errorf("error waiting on Ready state for Workbenches component in DSC: %w", err) + } + + return nil +} + +func (tc *WorkbenchesTestCtx) testUpdateOnWorkbenchesResources() error { + appDeployments, err := tc.testCtx.kubeClient.AppsV1().Deployments(tc.testCtx.applicationsNamespace).List(tc.testCtx.ctx, metav1.ListOptions{ + LabelSelector: labels.PlatformPartOf + "=" + strings.ToLower(tc.testWorkbenchesInstance.Kind), + }) + if err != nil { + return err + } + + // expects odh-notebook-controller-manager and notebook-controller-deployment deployments + if len(appDeployments.Items) != 2 { + return fmt.Errorf("error getting deployment for component %s", tc.testWorkbenchesInstance.Name) + } + + const expectedReplica int32 = 2 + + testDeployment := appDeployments.Items[0] + patchedReplica := &autoscalingv1.Scale{ + ObjectMeta: metav1.ObjectMeta{ + Name: testDeployment.Name, + Namespace: testDeployment.Namespace, + }, + Spec: autoscalingv1.ScaleSpec{ + Replicas: expectedReplica, + }, + Status: autoscalingv1.ScaleStatus{}, + } + updatedDep, err := tc.testCtx.kubeClient.AppsV1().Deployments(tc.testCtx.applicationsNamespace).UpdateScale(tc.testCtx.ctx, + testDeployment.Name, patchedReplica, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("error patching component resources : %w", err) + } + if updatedDep.Spec.Replicas != patchedReplica.Spec.Replicas { + return fmt.Errorf("failed to patch replicas : expect to be %v but got %v", patchedReplica.Spec.Replicas, updatedDep.Spec.Replicas) + } + + // Sleep for 40 seconds to allow the operator to reconcile + // we expect it should not revert back to original value because of AllowList + time.Sleep(4 * generalRetryInterval) + reconciledDep, err := tc.testCtx.kubeClient.AppsV1().Deployments(tc.testCtx.applicationsNamespace).Get(tc.testCtx.ctx, testDeployment.Name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("error getting component resource after reconcile: %w", err) + } + if *reconciledDep.Spec.Replicas != expectedReplica { + return fmt.Errorf("failed to revert back replicas : expect to be %v but got %v", expectedReplica, *reconciledDep.Spec.Replicas) + } + + return nil +} + +func (tc *WorkbenchesTestCtx) testUpdateWorkbenchesComponentDisabled() error { + if tc.testCtx.testDsc.Spec.Components.Workbenches.ManagementState != operatorv1.Managed { + return errors.New("the Workbenches spec should be in 'enabled: true' state in order to perform test") + } + + deployments, err := tc.testCtx.getComponentDeployments(gvk.Workbenches) + if err != nil { + return fmt.Errorf("error getting enabled component %s", componentApi.WorkbenchesComponentName) + } + + for _, d := range deployments { + if d.Status.ReadyReplicas == 0 { + return fmt.Errorf("component %s deployment %sis not ready", d.Name, componentApi.WorkbenchesComponentName) + } + } + + // Disable component Workbenches + err = retry.RetryOnConflict(retry.DefaultRetry, func() error { + // refresh the instance in case it was updated during the reconcile + err := tc.testCtx.customClient.Get(tc.testCtx.ctx, types.NamespacedName{Name: tc.testCtx.testDsc.Name}, tc.testCtx.testDsc) + if err != nil { + return fmt.Errorf("error getting resource %w", err) + } + // Disable the Component + tc.testCtx.testDsc.Spec.Components.Workbenches.ManagementState = operatorv1.Removed + + // Try to update + err = tc.testCtx.customClient.Update(tc.testCtx.ctx, tc.testCtx.testDsc) + // Return err itself here (not wrapped inside another error) + // so that RetryOnConflict can identify it correctly. + if err != nil { + return fmt.Errorf("error updating component from 'enabled: true' to 'enabled: false': %w", err) + } + + return nil + }) + if err != nil { + return fmt.Errorf("error after retry %w", err) + } + + err = tc.testCtx.wait(func(ctx context.Context) (bool, error) { + // Verify Workbenches CR is deleted + wb := &componentApi.Workbenches{} + err = tc.testCtx.customClient.Get(ctx, client.ObjectKey{Name: tc.testWorkbenchesInstance.Name}, wb) + return k8serr.IsNotFound(err), nil + }) + + if err != nil { + return fmt.Errorf("component %v is disabled, should not get the Workbenches CR %v", tc.testWorkbenchesInstance.Name, tc.testWorkbenchesInstance.Name) + } + + deployments, err = tc.testCtx.getComponentDeployments(gvk.Workbenches) + if err != nil { + return fmt.Errorf("error listing deployments: %w", err) + } + + if len(deployments) != 0 { + return fmt.Errorf("component %v is disabled, should not have deployments in NS %v any more", + gvk.Workbenches.Kind, + tc.testCtx.applicationsNamespace) + } + + return nil +} diff --git a/tests/integration/features/cleanup_int_test.go b/tests/integration/features/cleanup_int_test.go index 8d0634cfc0c..070346ccbb3 100644 --- a/tests/integration/features/cleanup_int_test.go +++ b/tests/integration/features/cleanup_int_test.go @@ -4,7 +4,7 @@ import ( "context" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" + k8serr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -81,7 +81,7 @@ var _ = Describe("feature cleanup", func() { WithContext(ctx). WithTimeout(fixtures.Timeout). WithPolling(fixtures.Interval). - Should(WithTransform(errors.IsNotFound, BeTrue())) + Should(WithTransform(k8serr.IsNotFound, BeTrue())) }) }) @@ -154,11 +154,11 @@ var _ = Describe("feature cleanup", func() { WithContext(ctx). WithTimeout(fixtures.Timeout). WithPolling(fixtures.Interval). - Should(WithTransform(errors.IsNotFound, BeTrue())) + Should(WithTransform(k8serr.IsNotFound, BeTrue())) Consistently(func() error { _, err := fixtures.GetFeatureTracker(ctx, envTestClient, namespace, featureName) - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return nil } return err @@ -213,7 +213,7 @@ func createdSecretHasOwnerReferenceToOwningFeature(namespace, featureName string func namespaceExists(ctx context.Context, cli client.Client, f *feature.Feature) (bool, error) { namespace, err := fixtures.GetNamespace(ctx, cli, "conditional-ns") - if errors.IsNotFound(err) { + if k8serr.IsNotFound(err) { return false, nil } // ensuring it fails if namespace is still deleting diff --git a/tests/integration/features/features_suite_int_test.go b/tests/integration/features/features_suite_int_test.go index 1dcf8d68c5a..edf14152d76 100644 --- a/tests/integration/features/features_suite_int_test.go +++ b/tests/integration/features/features_suite_int_test.go @@ -59,7 +59,6 @@ var _ = BeforeSuite(func() { Scheme: testScheme, Paths: []string{ filepath.Join(projectDir, "config", "crd", "bases"), - filepath.Join(projectDir, "config", "crd", "dashboard-crds"), filepath.Join(projectDir, "tests", "integration", "features", "fixtures", "crd"), }, ErrorIfPathMissing: true, diff --git a/tests/integration/features/serverless_feature_test.go b/tests/integration/features/serverless_feature_test.go index 7f116479592..8f36b4c96c4 100644 --- a/tests/integration/features/serverless_feature_test.go +++ b/tests/integration/features/serverless_feature_test.go @@ -13,9 +13,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" + componentApi "github.com/opendatahub-io/opendatahub-operator/v2/apis/components/v1alpha1" dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/infrastructure/v1" - "github.com/opendatahub-io/opendatahub-operator/v2/components/kserve" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" @@ -31,7 +31,7 @@ var _ = Describe("Serverless feature", func() { var ( dsci *dsciv1.DSCInitialization objectCleaner *envtestutil.Cleaner - kserveComponent *kserve.Kserve + kserveComponent *componentApi.Kserve ) BeforeEach(func(ctx context.Context) { @@ -43,7 +43,7 @@ var _ = Describe("Serverless feature", func() { namespace := envtestutil.AppendRandomNameTo("ns-serverless") dsciName := envtestutil.AppendRandomNameTo("dsci-serverless") dsci = fixtures.NewDSCInitialization(ctx, envTestClient, dsciName, namespace) - kserveComponent = &kserve.Kserve{} + kserveComponent = &componentApi.Kserve{} }) Context("verifying preconditions", func() { @@ -63,7 +63,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentApi.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // when applyErr := featuresHandler.Apply(ctx, envTestClient) @@ -111,7 +111,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentApi.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // then Expect(featuresHandler.Apply(ctx, envTestClient)).To(Succeed()) @@ -130,7 +130,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentApi.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // then Expect(featuresHandler.Apply(ctx, envTestClient)).To(Succeed()) @@ -160,7 +160,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentApi.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // then Expect(featuresHandler.Apply(ctx, envTestClient)).ToNot(Succeed()) @@ -271,17 +271,17 @@ var _ = Describe("Serverless feature", func() { It("should create a TLS secret if certificate is SelfSigned", func(ctx context.Context) { // given - kserveComponent.Serving.IngressGateway.Certificate.Type = infrav1.SelfSigned - kserveComponent.Serving.IngressGateway.Domain = fixtures.TestDomainFooCom + kserveComponent.Spec.Serving.IngressGateway.Certificate.Type = infrav1.SelfSigned + kserveComponent.Spec.Serving.IngressGateway.Domain = fixtures.TestDomainFooCom featuresProvider := func(registry feature.FeaturesRegistry) error { errFeatureAdd := registry.Add( feature.Define("tls-secret-creation"). WithData( servicemesh.FeatureData.ControlPlane.Define(&dsci.Spec).AsAction(), - serverless.FeatureData.Serving.Define(&kserveComponent.Serving).AsAction(), - serverless.FeatureData.IngressDomain.Define(&kserveComponent.Serving).AsAction(), - serverless.FeatureData.CertificateName.Define(&kserveComponent.Serving).AsAction(), + serverless.FeatureData.Serving.Define(&kserveComponent.Spec.Serving).AsAction(), + serverless.FeatureData.IngressDomain.Define(&kserveComponent.Spec.Serving).AsAction(), + serverless.FeatureData.CertificateName.Define(&kserveComponent.Spec.Serving).AsAction(), ). WithResources(serverless.ServingCertificateResource), ) @@ -291,7 +291,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentApi.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // when Expect(featuresHandler.Apply(ctx, envTestClient)).To(Succeed()) @@ -313,17 +313,17 @@ var _ = Describe("Serverless feature", func() { It("should not create any TLS secret if certificate is user provided", func(ctx context.Context) { // given - kserveComponent.Serving.IngressGateway.Certificate.Type = infrav1.Provided - kserveComponent.Serving.IngressGateway.Domain = fixtures.TestDomainFooCom + kserveComponent.Spec.Serving.IngressGateway.Certificate.Type = infrav1.Provided + kserveComponent.Spec.Serving.IngressGateway.Domain = fixtures.TestDomainFooCom featuresProvider := func(registry feature.FeaturesRegistry) error { errFeatureAdd := registry.Add( feature.Define("tls-secret-creation"). WithData( servicemesh.FeatureData.ControlPlane.Define(&dsci.Spec).AsAction(), - serverless.FeatureData.Serving.Define(&kserveComponent.Serving).AsAction(), - serverless.FeatureData.IngressDomain.Define(&kserveComponent.Serving).AsAction(), - serverless.FeatureData.CertificateName.Define(&kserveComponent.Serving).AsAction(), + serverless.FeatureData.Serving.Define(&kserveComponent.Spec.Serving).AsAction(), + serverless.FeatureData.IngressDomain.Define(&kserveComponent.Spec.Serving).AsAction(), + serverless.FeatureData.CertificateName.Define(&kserveComponent.Spec.Serving).AsAction(), ). WithResources(serverless.ServingCertificateResource), ) @@ -333,7 +333,7 @@ var _ = Describe("Serverless feature", func() { return nil } - featuresHandler := feature.ComponentFeaturesHandler(dsci, kserveComponent.GetComponentName(), dsci.Spec.ApplicationsNamespace, featuresProvider) + featuresHandler := feature.ComponentFeaturesHandler(dsci, componentApi.KserveComponentName, dsci.Spec.ApplicationsNamespace, featuresProvider) // when Expect(featuresHandler.Apply(ctx, envTestClient)).To(Succeed()) diff --git a/tests/integration/features/servicemesh_feature_test.go b/tests/integration/features/servicemesh_feature_test.go index 588a19f9e65..6cb5ec5cbf2 100644 --- a/tests/integration/features/servicemesh_feature_test.go +++ b/tests/integration/features/servicemesh_feature_test.go @@ -6,7 +6,7 @@ import ( corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/api/errors" + k8serr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/yaml" "sigs.k8s.io/controller-runtime/pkg/client" @@ -307,7 +307,7 @@ var _ = Describe("Service Mesh setup", func() { Expect(found).To(BeTrue()) _, err = fixtures.GetNamespace(ctx, envTestClient, serviceMeshSpec.Auth.Namespace) - Expect(errors.IsNotFound(err)).To(BeTrue()) + Expect(k8serr.IsNotFound(err)).To(BeTrue()) return extensionProviders