diff --git a/bundle-ocp/manifests/instaslice-operator-controller-manager-metrics-service_v1_service.yaml b/bundle-ocp/manifests/instaslice-operator-controller-manager-metrics-service_v1_service.yaml index 686477d8..634a8283 100644 --- a/bundle-ocp/manifests/instaslice-operator-controller-manager-metrics-service_v1_service.yaml +++ b/bundle-ocp/manifests/instaslice-operator-controller-manager-metrics-service_v1_service.yaml @@ -3,7 +3,6 @@ kind: Service metadata: creationTimestamp: null labels: - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: instaslice-operator app.kubernetes.io/instance: controller-manager-metrics-service app.kubernetes.io/managed-by: kustomize diff --git a/bundle-ocp/manifests/instaslice-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/bundle-ocp/manifests/instaslice-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml index 9912b62b..289fd8e4 100644 --- a/bundle-ocp/manifests/instaslice-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml +++ b/bundle-ocp/manifests/instaslice-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -3,7 +3,6 @@ kind: ClusterRole metadata: creationTimestamp: null labels: - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: instaslice-operator app.kubernetes.io/instance: metrics-reader app.kubernetes.io/managed-by: kustomize diff --git a/bundle-ocp/manifests/instaslice-operator.clusterserviceversion.yaml b/bundle-ocp/manifests/instaslice-operator.clusterserviceversion.yaml index eb094dfe..4a4c139d 100644 --- a/bundle-ocp/manifests/instaslice-operator.clusterserviceversion.yaml +++ b/bundle-ocp/manifests/instaslice-operator.clusterserviceversion.yaml @@ -248,29 +248,6 @@ spec: - mountPath: /tmp/k8s-webhook-server/serving-certs name: cert readOnly: true - - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - image: registry.redhat.io/openshift4/ose-kube-rbac-proxy-rhel9@sha256:90b19de8a962e4b99cf336af1a51e6288ce493e35644f3fb8b9077b76e7ff98a - name: kube-rbac-proxy - ports: - - containerPort: 8443 - name: https - protocol: TCP - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL securityContext: runAsNonRoot: true seccompProfile: diff --git a/bundle/manifests/instaslice-operator-controller-manager-metrics-service_v1_service.yaml b/bundle/manifests/instaslice-operator-controller-manager-metrics-service_v1_service.yaml index 686477d8..634a8283 100644 --- a/bundle/manifests/instaslice-operator-controller-manager-metrics-service_v1_service.yaml +++ b/bundle/manifests/instaslice-operator-controller-manager-metrics-service_v1_service.yaml @@ -3,7 +3,6 @@ kind: Service metadata: creationTimestamp: null labels: - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: instaslice-operator app.kubernetes.io/instance: controller-manager-metrics-service app.kubernetes.io/managed-by: kustomize diff --git a/bundle/manifests/instaslice-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml b/bundle/manifests/instaslice-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml index 9912b62b..289fd8e4 100644 --- a/bundle/manifests/instaslice-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml +++ b/bundle/manifests/instaslice-operator-metrics-reader_rbac.authorization.k8s.io_v1_clusterrole.yaml @@ -3,7 +3,6 @@ kind: ClusterRole metadata: creationTimestamp: null labels: - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: instaslice-operator app.kubernetes.io/instance: metrics-reader app.kubernetes.io/managed-by: kustomize diff --git a/bundle/manifests/instaslice-operator.clusterserviceversion.yaml b/bundle/manifests/instaslice-operator.clusterserviceversion.yaml index 26cc3214..af975eff 100644 --- a/bundle/manifests/instaslice-operator.clusterserviceversion.yaml +++ b/bundle/manifests/instaslice-operator.clusterserviceversion.yaml @@ -243,29 +243,6 @@ spec: - mountPath: /tmp/k8s-webhook-server/serving-certs name: cert readOnly: true - - args: - - --secure-listen-address=0.0.0.0:8443 - - --upstream=http://127.0.0.1:8080/ - - --logtostderr=true - - --v=0 - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 - name: kube-rbac-proxy - ports: - - containerPort: 8443 - name: https - protocol: TCP - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL securityContext: runAsNonRoot: false serviceAccountName: instaslice-operator-controller-manager diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 5ff1612f..39bd08e9 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -33,6 +33,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsfilters "sigs.k8s.io/controller-runtime/pkg/metrics/filters" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" @@ -66,7 +67,7 @@ func main() { flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") - flag.BoolVar(&secureMetrics, "metrics-secure", false, + flag.BoolVar(&secureMetrics, "metrics-secure", true, "If set the metrics endpoint is served securely") flag.BoolVar(&enableHTTP2, "enable-http2", false, "If set, HTTP/2 will be enabled for the metrics and webhook servers") @@ -104,6 +105,9 @@ func main() { BindAddress: metricsAddr, SecureServing: secureMetrics, TLSOpts: tlsOpts, + // enable the authN and authZ filter to support the deprecation of gcr.io/kubebuilder/kube-rbac-proxy image + // Ref: https://github.com/openshift/instaslice-operator/issues/291 + FilterProvider: metricsfilters.WithAuthenticationAndAuthorization, }, WebhookServer: webhookServer, HealthProbeBindAddress: probeAddr, diff --git a/cmd/daemonset/main.go b/cmd/daemonset/main.go index 5c502a5b..05dec69d 100644 --- a/cmd/daemonset/main.go +++ b/cmd/daemonset/main.go @@ -33,6 +33,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsfilters "sigs.k8s.io/controller-runtime/pkg/metrics/filters" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -65,7 +66,7 @@ func main() { flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") - flag.BoolVar(&secureMetrics, "metrics-secure", false, + flag.BoolVar(&secureMetrics, "metrics-secure", true, "If set the metrics endpoint is served securely") flag.BoolVar(&enableHTTP2, "enable-http2", false, "If set, HTTP/2 will be enabled for the metrics and webhook servers") @@ -106,6 +107,9 @@ func main() { BindAddress: metricsAddr, SecureServing: secureMetrics, TLSOpts: tlsOpts, + // enable the authN and authZ filter to support the deprecation of gcr.io/kubebuilder/kube-rbac-proxy image + // Ref: https://github.com/openshift/instaslice-operator/issues/291 + FilterProvider: metricsfilters.WithAuthenticationAndAuthorization, }, WebhookServer: webhookServer, HealthProbeBindAddress: probeAddr, diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 717eec6d..4b68fe82 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -25,12 +25,14 @@ resources: - ../certmanager # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. #- ../prometheus +- manager_metrics_service.yaml patches: # Protect the /metrics endpoint by putting it behind auth. # If you want your controller-manager to expose the /metrics # endpoint w/o any authn/z, please comment the following line. -- path: manager_auth_proxy_patch.yaml +#- path: manager_auth_proxy_patch.yaml +- path: manager_config_patch.yaml # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in # crd/kustomization.yaml diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml index f6f58916..550a5740 100644 --- a/config/default/manager_config_patch.yaml +++ b/config/default/manager_config_patch.yaml @@ -8,3 +8,7 @@ spec: spec: containers: - name: manager + args: + - "--health-probe-bind-address=:8081" + - "--metrics-bind-address=:8443" + - "--leader-elect" diff --git a/config/default/manager_metrics_service.yaml b/config/default/manager_metrics_service.yaml new file mode 100644 index 00000000..b5db6676 --- /dev/null +++ b/config/default/manager_metrics_service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: service + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/created-by: instaslice-operator + app.kubernetes.io/part-of: instaslice-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: 8443 + selector: + control-plane: controller-manager diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml index dc5dd1e3..510f00e4 100644 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -4,7 +4,6 @@ metadata: labels: app.kubernetes.io/name: clusterrole app.kubernetes.io/instance: metrics-reader - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: instaslice-operator app.kubernetes.io/part-of: instaslice-operator app.kubernetes.io/managed-by: kustomize diff --git a/config/rbac/auth_proxy_metrics_reader_role_binding.yaml b/config/rbac/auth_proxy_metrics_reader_role_binding.yaml new file mode 100644 index 00000000..7256d5b9 --- /dev/null +++ b/config/rbac/auth_proxy_metrics_reader_role_binding.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: metrics-reader-rolebinding + app.kubernetes.io/created-by: instaslice-operator + app.kubernetes.io/part-of: instaslice-operator + app.kubernetes.io/managed-by: kustomize + name: metrics-reader-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metrics-reader +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml index 553163f5..92362732 100644 --- a/config/rbac/auth_proxy_role.yaml +++ b/config/rbac/auth_proxy_role.yaml @@ -4,7 +4,6 @@ metadata: labels: app.kubernetes.io/name: clusterrole app.kubernetes.io/instance: proxy-role - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: instaslice-operator app.kubernetes.io/part-of: instaslice-operator app.kubernetes.io/managed-by: kustomize diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml index 135ddb09..ba27ea69 100644 --- a/config/rbac/auth_proxy_role_binding.yaml +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -4,7 +4,6 @@ metadata: labels: app.kubernetes.io/name: clusterrolebinding app.kubernetes.io/instance: proxy-rolebinding - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: instaslice-operator app.kubernetes.io/part-of: instaslice-operator app.kubernetes.io/managed-by: kustomize diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index b2575806..27610fe6 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -5,7 +5,6 @@ metadata: control-plane: controller-manager app.kubernetes.io/name: service app.kubernetes.io/instance: controller-manager-metrics-service - app.kubernetes.io/component: kube-rbac-proxy app.kubernetes.io/created-by: instaslice-operator app.kubernetes.io/part-of: instaslice-operator app.kubernetes.io/managed-by: kustomize diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index f5f822c0..88cd8562 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -12,10 +12,11 @@ resources: # Comment the following 4 lines if you want to disable # the auth proxy (https://github.com/brancz/kube-rbac-proxy) # which protects your /metrics endpoint. -- auth_proxy_service.yaml +#- auth_proxy_service.yaml - auth_proxy_role.yaml - auth_proxy_role_binding.yaml - auth_proxy_client_clusterrole.yaml +- auth_proxy_metrics_reader_role_binding.yaml #- instaslice-operator-scc.yaml #- openshift_cluster_role.yaml #- openshift_scc_cluster_role_binding.yaml diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index ececdaa4..f68ab2ea 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -21,8 +21,13 @@ import ( "fmt" "log" "os" + "os/exec" + "path/filepath" "time" + "github.com/openshift/instaslice-operator/test/utils" + "k8s.io/apimachinery/pkg/util/json" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -63,6 +68,11 @@ var ( k8sClient client.Client ) +const ( + instasliceMetricSvc = "instaslice-operator-controller-manager-metrics-service" + instasliceServiceAccount = "instaslice-operator-controller-manager" +) + type TemplateVars struct { NodeName string } @@ -190,6 +200,71 @@ var _ = Describe("controller", Ordered, func() { return fmt.Errorf("finalizer %s not found on Pod %s", controller.FinalizerName, pod.Name) }, time.Minute, 5*time.Second).Should(Succeed(), "Failed to verify finalizer on Pod") }) + It("should ensure the metrics endpoint is serving metrics", func() { + By("creating a ClusterRole to access /metrics endpoint") + clusterRole := resources.GetClusterRole() + err := k8sClient.Create(ctx, clusterRole) + Expect(err).NotTo(HaveOccurred(), "Failed to create the ClusterRole") + + DeferCleanup(func() { + err = k8sClient.Delete(ctx, clusterRole) + if err != nil { + log.Printf("Error deleting the ClusterRole %+v: %+v", clusterRole, err) + } + }) + By("creating a ClusterRoleBinding for the service account to allow access to metrics") + clusterRoleBinding := resources.GetClusterRoleBinding() + err = k8sClient.Create(ctx, clusterRoleBinding) + Expect(err).NotTo(HaveOccurred(), "Failed to create the ClusterRoleBinding") + + DeferCleanup(func() { + err = k8sClient.Delete(ctx, clusterRoleBinding) + if err != nil { + log.Printf("Error deleting the ClusterRoleBinding %+v: %+v", clusterRole, err) + } + }) + + By("validating that the metrics service is available") + var svc corev1.Service + err = k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: instasliceMetricSvc}, &svc) + Expect(err).NotTo(HaveOccurred(), "Metrics service should exist") + + By("getting the service account token") + token, err := serviceAccountToken() + Expect(err).NotTo(HaveOccurred()) + Expect(token).NotTo(BeEmpty()) + + By("waiting for the metrics endpoint to be ready") + verifyMetricsEndpointReady := func(g Gomega) { + var endPoints corev1.Endpoints + err = k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: instasliceMetricSvc}, &endPoints) + g.Expect(err).NotTo(HaveOccurred()) + if len(endPoints.Subsets) != 0 { + g.Expect(endPoints.Subsets[0].String()).To(ContainSubstring("8443"), "Metrics endpoint is not ready") + } + } + Eventually(verifyMetricsEndpointReady).Should(Succeed()) + + By("creating the curl-metrics pod to access the metrics endpoint") + metricsPod := resources.GetMetricPod(token) + err = k8sClient.Create(ctx, metricsPod) + Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod") + + By("waiting for the curl-metrics pod to complete.") + verifyCurlUp := func(g Gomega) { + var pod corev1.Pod + err = k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: metricsPod.Name}, &pod) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(pod.Status.Phase).To(Equal("Succeeded"), "Metrics pod status not matched") + } + Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed()) + + By("getting the metrics by checking curl-metrics logs") + metricsOutput := getMetricsOutput() + Expect(metricsOutput).To(ContainSubstring( + "controller_runtime_reconcile_total", + )) + }) It("should create a pod with no requests and check the allocation in instaslice object", func() { pod := resources.GetVectorAddNoReqPod() err := k8sClient.Create(ctx, pod) @@ -544,3 +619,62 @@ func getNodeName(label map[string]string) (string, error) { return "", fmt.Errorf("no node name found for pods with label: %v", label) } + +// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response, +// containing only the token field that we need to extract. +type tokenRequest struct { + Status struct { + Token string `json:"token"` + } `json:"status"` +} + +// serviceAccountToken returns a token for the specified service account in the given namespace. +// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request +// and parsing the resulting token from the API response. +func serviceAccountToken() (string, error) { + const tokenRequestRawString = `{ + "apiVersion": "authentication.k8s.io/v1", + "kind": "TokenRequest" + }` + + // Temporary file to store the token request + secretName := fmt.Sprintf("%s-token-request", instasliceServiceAccount) + tokenRequestFile := filepath.Join("/tmp", secretName) + err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644)) + if err != nil { + return "", err + } + + var out string + verifyTokenCreation := func(g Gomega) { + // Execute kubectl command to create the token + cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf( + "/api/v1/namespaces/%s/serviceaccounts/%s/token", + namespace, + instasliceServiceAccount, + ), "-f", tokenRequestFile) + + output, err := cmd.CombinedOutput() + g.Expect(err).NotTo(HaveOccurred()) + + // Parse the JSON output to extract the token + var token tokenRequest + err = json.Unmarshal(output, &token) + g.Expect(err).NotTo(HaveOccurred()) + + out = token.Status.Token + } + Eventually(verifyTokenCreation).Should(Succeed()) + + return out, err +} + +// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint. +func getMetricsOutput() string { + By("getting the curl-metrics logs") + cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace) + metricsOutput, err := utils.Run(cmd) + Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod") + Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK")) + return string(metricsOutput) +} diff --git a/test/e2e/resources/resource_generator.go b/test/e2e/resources/resource_generator.go index fa6bb565..a9a0e112 100644 --- a/test/e2e/resources/resource_generator.go +++ b/test/e2e/resources/resource_generator.go @@ -17,12 +17,15 @@ limitations under the License. package resources import ( + "fmt" + "github.com/openshift/instaslice-operator/api/v1alpha1" inferencev1alpha1 "github.com/openshift/instaslice-operator/api/v1alpha1" "github.com/openshift/instaslice-operator/internal/controller" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -461,3 +464,76 @@ func GetMultiPods() []*corev1.Pod { } return pods } + +func GetClusterRoleBinding() *rbac.RoleBinding { + sub := rbac.Subject{ + Kind: "ServiceAccount", + Name: "instaslice-operator-controller-manager", + Namespace: controller.InstaSliceOperatorNamespace, + } + return &rbac.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metrics-reader-rolebinding", + Namespace: controller.InstaSliceOperatorNamespace, + Labels: map[string]string{ + "app.kubernetes.io/name": "clusterrolebinding", + "app.kubernetes.io/instance": "metrics-reader-rolebinding", + "app.kubernetes.io/component": "rbac", + "app.kubernetes.io/created-by": "instaslice-operator", + }, + }, + Subjects: []rbac.Subject{sub}, + RoleRef: rbac.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "metrics-reader", + }, + } +} + +func GetClusterRole() *rbac.ClusterRole { + policyRule := rbac.PolicyRule{ + Verbs: []string{"get"}, + NonResourceURLs: []string{"/metrics"}, + } + return &rbac.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: "metrics-reader", + Namespace: controller.InstaSliceOperatorNamespace, + Labels: map[string]string{ + "app.kubernetes.io/name": "clusterrole", + "app.kubernetes.io/instance": "metrics-reader", + "app.kubernetes.io/component": "rbac", + "app.kubernetes.io/created-by": "instaslice-operator", + }, + }, + Rules: []rbac.PolicyRule{policyRule}, + } +} + +func GetMetricPod(token string) *corev1.Pod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "curl-metrics", + Namespace: controller.InstaSliceOperatorNamespace, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + TerminationGracePeriodSeconds: func(i int64) *int64 { return &i }(0), + ServiceAccountName: "instaslice-operator-controller-manager", + Containers: []corev1.Container{ + { + Name: "metrics-consumer", + Image: "curlimages/curl:7.78.0", + Command: []string{ + "/bin/sh", + }, + Args: []string{"-c", fmt.Sprintf( + "curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics", + token, "instaslice-operator-controller-manager-metrics-service", controller.InstaSliceOperatorNamespace)}, + }, + }, + }, + } + return pod +}