From ffa087d7c4232dfffc98c5caa4f07e2de81c7906 Mon Sep 17 00:00:00 2001 From: Israel Blancas Date: Fri, 20 Sep 2024 13:37:03 +0200 Subject: [PATCH 1/3] Add must-gather Signed-off-by: Israel Blancas --- .../workflows/reusable-publish-images.yaml | 6 + Makefile | 13 + cmd/gather/Dockerfile | 34 ++ cmd/gather/README.md | 36 ++ cmd/gather/cluster/cluster.go | 419 ++++++++++++++++++ cmd/gather/cluster/write.go | 109 +++++ cmd/gather/config/config.go | 75 ++++ cmd/gather/main.go | 84 ++++ go.mod | 5 +- go.sum | 9 + 10 files changed, 789 insertions(+), 1 deletion(-) create mode 100644 cmd/gather/Dockerfile create mode 100644 cmd/gather/README.md create mode 100644 cmd/gather/cluster/cluster.go create mode 100644 cmd/gather/cluster/write.go create mode 100644 cmd/gather/config/config.go create mode 100644 cmd/gather/main.go diff --git a/.github/workflows/reusable-publish-images.yaml b/.github/workflows/reusable-publish-images.yaml index 6a063e5d5..cb21de198 100644 --- a/.github/workflows/reusable-publish-images.yaml +++ b/.github/workflows/reusable-publish-images.yaml @@ -84,3 +84,9 @@ jobs: run: make bundle bundle-build bundle-push catalog-build catalog-push env: IMG_PREFIX: ghcr.io/${{ github.repository }} + + - name: Publish must-gather image + if: ${{ inputs.publish_bundle }} + run: make container-must-gather container-must-gather-push + env: + IMG_PREFIX: ghcr.io/${{ github.repository }} diff --git a/Makefile b/Makefile index 8e46b1cee..81511b88e 100644 --- a/Makefile +++ b/Makefile @@ -15,6 +15,7 @@ JAEGER_QUERY_IMAGE ?= docker.io/jaegertracing/jaeger-query:$(JAEGER_QUERY_VERSIO TEMPO_QUERY_IMAGE ?= docker.io/grafana/tempo-query:$(TEMPO_QUERY_VERSION) TEMPO_GATEWAY_IMAGE ?= quay.io/observatorium/api:$(TEMPO_GATEWAY_VERSION) TEMPO_GATEWAY_OPA_IMAGE ?= quay.io/observatorium/opa-openshift:$(TEMPO_GATEWAY_OPA_VERSION) +MUSTGATHER_IMAGE ?= ${IMG_PREFIX}/must-gather:$(OPERATOR_VERSION) OAUTH_PROXY_IMAGE ?= quay.io/openshift/origin-oauth-proxy:$(OAUTH_PROXY_VERSION) VERSION_PKG ?= github.com/grafana/tempo-operator/internal/version @@ -146,6 +147,10 @@ test: manifests generate fmt setup-envtest ## Run tests. build: generate fmt ## Build manager binary. CGO_ENABLED=0 go build -o bin/manager -ldflags ${LD_FLAGS} main.go +.PHONY: must-gather +must-gather: + CGO_ENABLED=0 go build -o bin/must-gather ./cmd/gather/main.go + .PHONY: run run: manifests generate ## Run a controller from your host. @echo -e "\033[33mRemoving tempo-operator from the cluster. Webhooks are disabled, use the normal deployment method to enable full operator functionality.\033[0m" @@ -161,6 +166,14 @@ run: manifests generate ## Run a controller from your host. RELATED_IMAGE_OAUTH_PROXY=$(OAUTH_PROXY_IMAGE) \ go run -ldflags ${LD_FLAGS} ./main.go --zap-log-level=info start +.PHONY: container-must-gather +container-must-gather: + docker build -f cmd/gather/Dockerfile --load -t ${MUSTGATHER_IMAGE} . + +.PHONY: container-must-gather-push +container-must-gather-push: + docker push ${MUSTGATHER_IMAGE} + .PHONY: docker-build docker-build: ## Build docker image with the manager. docker buildx build --load --platform linux/${ARCH} --build-arg OPERATOR_VERSION --build-arg TEMPO_VERSION -t ${IMG} . diff --git a/cmd/gather/Dockerfile b/cmd/gather/Dockerfile new file mode 100644 index 000000000..cc71e8627 --- /dev/null +++ b/cmd/gather/Dockerfile @@ -0,0 +1,34 @@ +# Build the manager binary +FROM golang:1.22 as builder + +WORKDIR /workspace +# Cache tool dependencies +COPY Makefile Makefile +RUN make controller-gen kustomize +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY . . + +RUN make must-gather + + +FROM registry.access.redhat.com/ubi9-minimal:9.2 + +RUN INSTALL_PKGS=" \ + rsync \ + tar \ + " && \ + microdnf install -y $INSTALL_PKGS && \ + microdnf clean all + +COPY --from=builder /workspace/bin/must-gather /usr/bin/must-gather + +USER 65532:65532 + +ENTRYPOINT ["/usr/bin/must-gather"] diff --git a/cmd/gather/README.md b/cmd/gather/README.md new file mode 100644 index 000000000..fa8630fd8 --- /dev/null +++ b/cmd/gather/README.md @@ -0,0 +1,36 @@ +# Tempo Operator Must-Gather + +The Tempo Operator `must-gather` tool is designed to collect comprehensive information about Tempo components within an OpenShift cluster. This utility extends the functionality of [OpenShift must-gather](https://github.com/openshift/must-gather) by specifically targeting and retrieving data related to the Tempo Operator, helping in diagnostics and troubleshooting. + +Note that you can use this utility too to gather information about the objects deployed by the Tempo Operator if you don't use OpenShift. + +## What is a Must-Gather? + +The `must-gather` tool is a utility that collects logs, cluster information, and resource configurations related to a specific operator or application in an OpenShift cluster. It helps cluster administrators and developers diagnose issues by providing a snapshot of the cluster's state related to the targeted component. More information [in the official documentation](https://docs.openshift.com/container-platform/4.16/support/gathering-cluster-data.html). + +## Usage + +First, you will need to build and push the image: +```sh +make container-must-gather container-must-gather-push +``` + +To run the must-gather tool for the Tempo Operator, use one of the following commands, depending on how you want to source the image and the namespace where the operator is deployed. + +### Using the image from the Operator deployment + +If you want to use the image in a running cluster, you need to run the following command: + +```sh +oc adm must-gather --image= -- /usr/bin/must-gather --operator-namespace tempo-operator-system +``` + +### Using it as a CLI + +You only need to build and run: +```sh +make must-gather +./bin/must-gather --help +``` + +This is the recommended way to do it if you are not using OpenShift. \ No newline at end of file diff --git a/cmd/gather/cluster/cluster.go b/cmd/gather/cluster/cluster.go new file mode 100644 index 000000000..c98a5d8ed --- /dev/null +++ b/cmd/gather/cluster/cluster.go @@ -0,0 +1,419 @@ +package cluster + +import ( + "context" + "fmt" + "log" + "os" + "path/filepath" + "reflect" + "strings" + + tempov1alpha1 "github.com/grafana/tempo-operator/apis/tempo/v1alpha1" + "github.com/grafana/tempo-operator/cmd/gather/config" + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + routev1 "github.com/openshift/api/route/v1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" + networkingv1 "k8s.io/api/networking/v1" + policy1 "k8s.io/api/policy/v1" + rbacv1 "k8s.io/api/rbac/v1" +) + +type Cluster struct { + config *config.Config + apiAvailabilityCache map[schema.GroupVersionResource]bool +} + +func NewCluster(cfg *config.Config) Cluster { + return Cluster{ + config: cfg, + apiAvailabilityCache: make(map[schema.GroupVersionResource]bool), + } +} + +func (c *Cluster) getOperatorNamespace() (string, error) { + if c.config.OperatorNamespace != "" { + return c.config.OperatorNamespace, nil + } + + deployment, err := c.getOperatorDeployment() + if err != nil { + return "", err + } + + c.config.OperatorNamespace = deployment.Namespace + + return c.config.OperatorNamespace, nil +} + +func (c *Cluster) getOperatorDeployment() (appsv1.Deployment, error) { + operatorDeployments := appsv1.DeploymentList{} + err := c.config.KubernetesClient.List(context.TODO(), &operatorDeployments, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{ + "app.kubernetes.io/name": "tempo-operator", + }), + }) + + if err != nil { + return appsv1.Deployment{}, err + } + + if len(operatorDeployments.Items) == 0 { + return appsv1.Deployment{}, fmt.Errorf("operator not found") + } + + return operatorDeployments.Items[0], nil + +} + +func (c *Cluster) GetOperatorLogs() error { + deployment, err := c.getOperatorDeployment() + if err != nil { + return err + } + + labelSelector := labels.Set(deployment.Spec.Selector.MatchLabels).AsSelectorPreValidated() + operatorPods := corev1.PodList{} + err = c.config.KubernetesClient.List(context.TODO(), &operatorPods, &client.ListOptions{ + LabelSelector: labelSelector, + }) + if err != nil { + return err + } + + pod := operatorPods.Items[0] + c.getPodLogs(pod.Name, pod.Namespace, "manager") + return nil +} + +func (c *Cluster) getPodLogs(podName, namespace, container string) { + pods := c.config.KubernetesClientSet.CoreV1().Pods(namespace) + writeLogToFile(c.config.CollectionDir, podName, container, pods) +} + +func (c *Cluster) GetOperatorDeploymentInfo() error { + err := os.MkdirAll(c.config.CollectionDir, os.ModePerm) + if err != nil { + return err + } + + deployment, err := c.getOperatorDeployment() + if err != nil { + return err + } + + writeToFile(c.config.CollectionDir, &deployment) + + return nil +} + +func (c *Cluster) GetOLMInfo() error { + if !c.isAPIAvailable(schema.GroupVersionResource{ + Group: operatorsv1.SchemeGroupVersion.Group, + Version: operatorsv1.SchemeGroupVersion.Version, + Resource: "Operator", + }) { + log.Println("OLM info not available") + return nil + } + + outputDir := filepath.Join(c.config.CollectionDir, "olm") + err := os.MkdirAll(outputDir, os.ModePerm) + if err != nil { + return err + } + + operatorNamespace, err := c.getOperatorNamespace() + if err != nil { + return err + } + + // Operators + operators := operatorsv1.OperatorList{} + err = c.config.KubernetesClient.List(context.TODO(), &operators, &client.ListOptions{ + Namespace: operatorNamespace, + }) + if err != nil { + return err + } + for _, o := range operators.Items { + o := o + writeToFile(outputDir, &o) + + } + + // OperatorGroups + operatorGroups := operatorsv1.OperatorGroupList{} + err = c.config.KubernetesClient.List(context.TODO(), &operatorGroups, &client.ListOptions{ + Namespace: operatorNamespace, + }) + if err != nil { + return err + } + for _, o := range operatorGroups.Items { + o := o + if strings.Contains(o.Name, "tempo") { + writeToFile(outputDir, &o) + } + } + + // Subscription + subscriptions := operatorsv1alpha1.SubscriptionList{} + err = c.config.KubernetesClient.List(context.TODO(), &subscriptions, &client.ListOptions{ + Namespace: operatorNamespace, + }) + if err != nil { + return err + } + for _, o := range subscriptions.Items { + o := o + writeToFile(outputDir, &o) + } + + // InstallPlan + ips := operatorsv1alpha1.InstallPlanList{} + err = c.config.KubernetesClient.List(context.TODO(), &ips, &client.ListOptions{ + Namespace: operatorNamespace, + }) + if err != nil { + return err + } + for _, o := range ips.Items { + o := o + writeToFile(outputDir, &o) + } + + // ClusterServiceVersion + csvs := operatorsv1alpha1.ClusterServiceVersionList{} + err = c.config.KubernetesClient.List(context.TODO(), &csvs, &client.ListOptions{ + Namespace: operatorNamespace, + }) + if err != nil { + return err + } + for _, o := range csvs.Items { + o := o + if strings.Contains(o.Name, "tempo") { + writeToFile(outputDir, &o) + } + } + + return nil +} + +func (c *Cluster) GetTempoStacks() error { + tempoStacks := tempov1alpha1.TempoStackList{} + + err := c.config.KubernetesClient.List(context.TODO(), &tempoStacks) + if err != nil { + return err + } + + log.Println("TempoStacks found:", len(tempoStacks.Items)) + + errorDetected := false + + for _, tempoStack := range tempoStacks.Items { + tempoStack := tempoStack + err := c.processTempoStack(&tempoStack) + if err != nil { + log.Fatalln(err) + errorDetected = true + } + } + + if errorDetected { + return fmt.Errorf("something failed while getting the tempostacks") + } + return nil +} + +func (c *Cluster) GetTempoMonolithics() error { + tempoMonolithics := tempov1alpha1.TempoMonolithicList{} + + err := c.config.KubernetesClient.List(context.TODO(), &tempoMonolithics) + if err != nil { + return err + } + + log.Println("TempoMonolithic found:", len(tempoMonolithics.Items)) + + errorDetected := false + + for _, tempoMonolithic := range tempoMonolithics.Items { + tempoMonolithic := tempoMonolithic + err := c.processTempoMonolithic(&tempoMonolithic) + if err != nil { + log.Fatalln(err) + errorDetected = true + } + } + + if errorDetected { + return fmt.Errorf("something failed while getting the tempomonolithics") + } + return nil +} + +func (c *Cluster) processTempoStack(tempoStack *tempov1alpha1.TempoStack) error { + log.Printf("Processing TempoStack %s/%s", tempoStack.Namespace, tempoStack.Name) + folder, err := createTempoStackFolder(c.config.CollectionDir, tempoStack) + if err != nil { + return err + } + writeToFile(folder, tempoStack) + + err = c.processOwnedResources(tempoStack, folder) + if err != nil { + return err + } + + return nil +} + +func (c *Cluster) processTempoMonolithic(tempoMonolithic *tempov1alpha1.TempoMonolithic) error { + log.Printf("Processing TempoMonolithic %s/%s", tempoMonolithic.Namespace, tempoMonolithic.Name) + folder, err := createTempoMonolithicFolder(c.config.CollectionDir, tempoMonolithic) + if err != nil { + return err + } + writeToFile(folder, tempoMonolithic) + + err = c.processOwnedResources(tempoMonolithic, folder) + if err != nil { + return err + } + + return nil +} + +func (c *Cluster) processOwnedResources(owner interface{}, folder string) error { + resourceTypes := []struct { + list client.ObjectList + apiCheck func() bool + }{ + {&appsv1.DaemonSetList{}, func() bool { return true }}, + {&appsv1.DeploymentList{}, func() bool { return true }}, + {&appsv1.StatefulSetList{}, func() bool { return true }}, + {&rbacv1.ClusterRoleList{}, func() bool { return true }}, + {&rbacv1.ClusterRoleBindingList{}, func() bool { return true }}, + {&corev1.ConfigMapList{}, func() bool { return true }}, + {&corev1.PersistentVolumeList{}, func() bool { return true }}, + {&corev1.PersistentVolumeClaimList{}, func() bool { return true }}, + {&corev1.PodList{}, func() bool { return true }}, + {&corev1.ServiceList{}, func() bool { return true }}, + {&corev1.ServiceAccountList{}, func() bool { return true }}, + {&autoscalingv2.HorizontalPodAutoscalerList{}, func() bool { return true }}, + {&networkingv1.IngressList{}, func() bool { return true }}, + {&policy1.PodDisruptionBudgetList{}, func() bool { return true }}, + {&monitoringv1.PodMonitorList{}, c.isMonitoringAPIAvailable}, + {&monitoringv1.ServiceMonitorList{}, c.isMonitoringAPIAvailable}, + {&routev1.RouteList{}, c.isRouteAPIAvailable}, + } + + for _, rt := range resourceTypes { + if rt.apiCheck() { + if err := c.processResourceType(rt.list, owner, folder); err != nil { + return err + } + } + } + + return nil +} + +func (c *Cluster) processResourceType(list client.ObjectList, owner interface{}, folder string) error { + resources, err := c.getOwnerResources(list, owner) + if err != nil { + return fmt.Errorf("failed to get resources: %w", err) + } + for _, resource := range resources { + writeToFile(folder, resource) + } + return nil +} + +func (c *Cluster) isMonitoringAPIAvailable() bool { + return c.isAPIAvailable(schema.GroupVersionResource{ + Group: monitoringv1.SchemeGroupVersion.Group, + Version: monitoringv1.SchemeGroupVersion.Version, + Resource: "ServiceMonitor", + }) +} + +func (c *Cluster) isRouteAPIAvailable() bool { + return c.isAPIAvailable(schema.GroupVersionResource{ + Group: routev1.GroupName, + Version: routev1.GroupVersion.Version, + Resource: "Route", + }) +} + +func (c *Cluster) isAPIAvailable(gvr schema.GroupVersionResource) bool { + if result, ok := c.apiAvailabilityCache[gvr]; ok { + return result + } + + rm := c.config.KubernetesClient.RESTMapper() + + gvk, err := rm.KindFor(gvr) + result := err == nil && !gvk.Empty() + c.apiAvailabilityCache[gvr] = result + + return result +} + +func (c *Cluster) getOwnerResources(objList client.ObjectList, owner interface{}) ([]client.Object, error) { + err := c.config.KubernetesClient.List(context.TODO(), objList, &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{ + "app.kubernetes.io/managed-by": "tempo-operator", + }), + }) + if err != nil { + return nil, err + } + + resources := []client.Object{} + + items := reflect.ValueOf(objList).Elem().FieldByName("Items") + for i := 0; i < items.Len(); i++ { + item := items.Index(i).Addr().Interface().(client.Object) + if hasOwnerReference(item, owner) { + resources = append(resources, item) + } + } + return resources, nil + +} + +func hasOwnerReference(obj client.Object, owner interface{}) bool { + var ownerKind string + var ownerUID types.UID + + switch o := owner.(type) { + case *tempov1alpha1.TempoStack: + ownerKind = o.Kind + ownerUID = o.UID + case *tempov1alpha1.TempoMonolithic: + ownerKind = o.Kind + ownerUID = o.UID + default: + return false + } + + for _, ownerRef := range obj.GetOwnerReferences() { + if ownerRef.Kind == ownerKind && ownerRef.UID == ownerUID { + return true + } + } + return false +} diff --git a/cmd/gather/cluster/write.go b/cmd/gather/cluster/write.go new file mode 100644 index 000000000..917efe30d --- /dev/null +++ b/cmd/gather/cluster/write.go @@ -0,0 +1,109 @@ +package cluster + +import ( + "context" + "fmt" + "io" + "log" + "os" + "path/filepath" + "reflect" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + cgocorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + tempov1alpha1 "github.com/grafana/tempo-operator/apis/tempo/v1alpha1" +) + +func createTempoStackFolder(collectionDir string, tempoStack *tempov1alpha1.TempoStack) (string, error) { + outputDir := filepath.Join(collectionDir, "namespaces", tempoStack.Namespace, "tempostack", tempoStack.Name) + err := os.MkdirAll(outputDir, os.ModePerm) + if err != nil { + return "", err + } + return outputDir, nil +} + +func createTempoMonolithicFolder(collectionDir string, tempoMonolith *tempov1alpha1.TempoMonolithic) (string, error) { + outputDir := filepath.Join(collectionDir, "namespaces", tempoMonolith.Namespace, "tempomonolithic", tempoMonolith.Name) + err := os.MkdirAll(outputDir, os.ModePerm) + if err != nil { + return "", err + } + return outputDir, nil +} + +func createFile(outputDir string, obj client.Object) (*os.File, error) { + kind := obj.GetObjectKind().GroupVersionKind().Kind + + if kind == "" { + // reflect.TypeOf(obj) will return something like *v1.Deployment. We remove the first part + prefix, typeName, found := strings.Cut(reflect.TypeOf(obj).String(), ".") + if found { + kind = typeName + } else { + kind = prefix + } + } + + kind = strings.ToLower(kind) + name := strings.ReplaceAll(obj.GetName(), ".", "-") + + path := filepath.Join(outputDir, fmt.Sprintf("%s-%s.yaml", kind, name)) + return os.Create(path) +} + +func writeLogToFile(outputDir, podName, container string, p cgocorev1.PodInterface) { + req := p.GetLogs(podName, &corev1.PodLogOptions{Container: container}) + podLogs, err := req.Stream(context.Background()) + if err != nil { + log.Fatalf("Error getting pod logs: %v\n", err) + return + } + defer podLogs.Close() + + err = os.MkdirAll(outputDir, os.ModePerm) + if err != nil { + log.Fatalln(err) + return + } + + outputFile, err := os.Create(filepath.Join(outputDir, podName)) + if err != nil { + log.Fatalf("Error getting pod logs: %v\n", err) + return + } + + _, err = io.Copy(outputFile, podLogs) + if err != nil { + log.Fatalf("Error copying logs to file: %v\n", err) + } +} + +func writeToFile(outputDir string, o client.Object) { + // Open or create the file for writing + outputFile, err := createFile(outputDir, o) + if err != nil { + log.Fatalf("Failed to create file: %v", err) + } + defer outputFile.Close() + + unstructuredDeployment, err := runtime.DefaultUnstructuredConverter.ToUnstructured(o) + if err != nil { + log.Fatalf("Error converting deployment to unstructured: %v", err) + } + + unstructuredObj := &unstructured.Unstructured{Object: unstructuredDeployment} + + // Serialize the unstructured object to YAML + serializer := json.NewYAMLSerializer(json.DefaultMetaFactory, nil, nil) + err = serializer.Encode(unstructuredObj, outputFile) + if err != nil { + log.Fatalf("Error encoding to YAML: %v", err) + } +} diff --git a/cmd/gather/config/config.go b/cmd/gather/config/config.go new file mode 100644 index 000000000..87cdddf1a --- /dev/null +++ b/cmd/gather/config/config.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "fmt" + "path/filepath" + + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/homedir" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Config struct { + CollectionDir string + OperatorName string + OperatorNamespace string + KubernetesClient client.Client + KubernetesClientSet *kubernetes.Clientset +} + +func NewConfig(scheme *runtime.Scheme) (Config, error) { + var operatorName, operatorNamespace, collectionDir, kubeconfigPath string + + pflag.StringVar(&operatorName, "operator-name", "tempo-operator", "Operator name") + pflag.StringVar(&operatorNamespace, "operator-namespace", "", "Namespace where the operator was deployed") + pflag.StringVar(&collectionDir, "collection-dir", filepath.Join(homedir.HomeDir(), "/must-gather"), "Absolute path to the KubeconfigPath file") + pflag.StringVar(&kubeconfigPath, "kubeconfig", "", "Path to the kubeconfig file") + pflag.Parse() + + config, err := rest.InClusterConfig() + if err != nil { + if kubeconfigPath == "" { + kubeconfigPath = filepath.Join(homedir.HomeDir(), ".kube", "config") + } + config, err = clientcmd.BuildConfigFromFlags("", kubeconfigPath) + if err != nil { + return Config{}, fmt.Errorf("failed to create Kubernetes client config: %w", err) + } + } + + clusterClient, err := client.New(config, client.Options{Scheme: scheme}) + if err != nil { + return Config{}, fmt.Errorf("creating the Kubernetes client: %w\n", err) + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return Config{}, fmt.Errorf("creating the Kubernetes clienset: %w\n", err) + } + + return Config{ + CollectionDir: collectionDir, + KubernetesClient: clusterClient, + KubernetesClientSet: clientset, + OperatorName: operatorName, + OperatorNamespace: operatorNamespace, + }, nil +} diff --git a/cmd/gather/main.go b/cmd/gather/main.go new file mode 100644 index 000000000..149cf0bd1 --- /dev/null +++ b/cmd/gather/main.go @@ -0,0 +1,84 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "log" + "os" + + tempov1alpha1 "github.com/grafana/tempo-operator/apis/tempo/v1alpha1" + routev1 "github.com/openshift/api/route/v1" + operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" + operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" + monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" + appsv1 "k8s.io/api/apps/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + policyV1 "k8s.io/api/policy/v1" + rbacv1 "k8s.io/api/rbac/v1" + k8sruntime "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + "github.com/grafana/tempo-operator/cmd/gather/cluster" + "github.com/grafana/tempo-operator/cmd/gather/config" +) + +var scheme *k8sruntime.Scheme + +func init() { + scheme = k8sruntime.NewScheme() + utilruntime.Must(tempov1alpha1.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(corev1.AddToScheme(scheme)) + utilruntime.Must(networkingv1.AddToScheme(scheme)) + utilruntime.Must(autoscalingv2.AddToScheme(scheme)) + utilruntime.Must(rbacv1.AddToScheme(scheme)) + utilruntime.Must(policyV1.AddToScheme(scheme)) + utilruntime.Must(monitoringv1.AddToScheme(scheme)) + utilruntime.Must(routev1.AddToScheme(scheme)) + utilruntime.Must(operatorsv1.AddToScheme(scheme)) + utilruntime.Must(operatorsv1alpha1.AddToScheme(scheme)) +} + +func main() { + config, err := config.NewConfig(scheme) + if err != nil { + log.Fatalln(err) + os.Exit(1) + } + + cluster := cluster.NewCluster(&config) + err = cluster.GetOperatorLogs() + if err != nil { + log.Fatalln(err) + } + err = cluster.GetOperatorDeploymentInfo() + if err != nil { + log.Fatalln(err) + } + err = cluster.GetOLMInfo() + if err != nil { + log.Fatalln(err) + } + err = cluster.GetTempoStacks() + if err != nil { + log.Fatalln(err) + } + err = cluster.GetTempoMonolithics() + if err != nil { + log.Fatalln(err) + } +} diff --git a/go.mod b/go.mod index 21a784f0b..b48832974 100644 --- a/go.mod +++ b/go.mod @@ -14,10 +14,12 @@ require ( github.com/onsi/ginkgo/v2 v2.19.0 github.com/onsi/gomega v1.33.1 github.com/openshift/library-go v0.0.0-20220622115547-84d884f4c9f6 + github.com/operator-framework/api v0.23.0 github.com/operator-framework/operator-lib v0.13.0 github.com/prometheus/client_golang v1.19.1 github.com/prometheus/common v0.55.0 github.com/spf13/cobra v1.8.1 + github.com/spf13/pflag v1.0.5 go.opentelemetry.io/otel v1.28.0 go.opentelemetry.io/otel/exporters/prometheus v0.50.0 go.opentelemetry.io/otel/metric v1.28.0 @@ -46,6 +48,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect @@ -84,7 +87,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/sirupsen/logrus v1.9.2 // indirect github.com/stretchr/objx v0.5.2 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opentelemetry.io/otel/sdk v1.28.0 // indirect diff --git a/go.sum b/go.sum index eec126baf..7f25aad2b 100644 --- a/go.sum +++ b/go.sum @@ -8,6 +8,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -109,6 +111,8 @@ github.com/openshift/api v0.0.0-20230223193310-d964c7a58d75 h1:OQJsfiach1cKBI1xU github.com/openshift/api v0.0.0-20230223193310-d964c7a58d75/go.mod h1:ctXNyWanKEjGj8sss1KjjHQ3ENKFm33FFnS5BKaIPh4= github.com/openshift/library-go v0.0.0-20220622115547-84d884f4c9f6 h1:lmfmsIGq62lmj17qrZh4Gbbb86WvJw6pLhCNwNjB2Yk= github.com/openshift/library-go v0.0.0-20220622115547-84d884f4c9f6/go.mod h1:AMZwYwSdbvALDl3QobEzcJ2IeDO7DYLsr42izKzh524= +github.com/operator-framework/api v0.23.0 h1:kHymOwcHBpBVujT49SKOCd4EVG7Odwj4wl3NbOR2LLA= +github.com/operator-framework/api v0.23.0/go.mod h1:oKcFOz+Xc1UhMi2Pzcp6qsO7wjS4r+yP7EQprQBXrfM= github.com/operator-framework/operator-lib v0.13.0 h1:+TWgJhbJqyNix9m1LmHK5gY/lb3CGqZX3Wvl7K0k+6I= github.com/operator-framework/operator-lib v0.13.0/go.mod h1:RDs1wGdOKWSMCO+BYSbqmmKGnD5jOP7TVP+KvoX8jMg= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -129,6 +133,8 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y= +github.com/sirupsen/logrus v1.9.2/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -138,6 +144,7 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -183,6 +190,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= @@ -216,6 +224,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.29.4 h1:WEnF/XdxuCxdG3ayHNRR8yH3cI1B/llkWBma6bq4R3w= From 0f47b0fd788af33e4516328181336f0d21f2c619 Mon Sep 17 00:00:00 2001 From: Israel Blancas Date: Mon, 23 Sep 2024 10:46:21 +0200 Subject: [PATCH 2/3] Fix lint Signed-off-by: Israel Blancas --- cmd/gather/cluster/cluster.go | 49 ++++++++++++++++++++--------------- cmd/gather/cluster/write.go | 18 ++++++++++--- cmd/gather/config/config.go | 2 ++ cmd/gather/main.go | 3 ++- 4 files changed, 46 insertions(+), 26 deletions(-) diff --git a/cmd/gather/cluster/cluster.go b/cmd/gather/cluster/cluster.go index c98a5d8ed..4af874cd8 100644 --- a/cmd/gather/cluster/cluster.go +++ b/cmd/gather/cluster/cluster.go @@ -9,8 +9,6 @@ import ( "reflect" "strings" - tempov1alpha1 "github.com/grafana/tempo-operator/apis/tempo/v1alpha1" - "github.com/grafana/tempo-operator/cmd/gather/config" operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" appsv1 "k8s.io/api/apps/v1" @@ -20,6 +18,9 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + tempov1alpha1 "github.com/grafana/tempo-operator/apis/tempo/v1alpha1" + "github.com/grafana/tempo-operator/cmd/gather/config" + routev1 "github.com/openshift/api/route/v1" monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" autoscalingv2 "k8s.io/api/autoscaling/v2" @@ -28,19 +29,20 @@ import ( rbacv1 "k8s.io/api/rbac/v1" ) -type Cluster struct { +type cluster struct { config *config.Config apiAvailabilityCache map[schema.GroupVersionResource]bool } -func NewCluster(cfg *config.Config) Cluster { - return Cluster{ +// NewCluster creates a new cluster. +func NewCluster(cfg *config.Config) cluster { + return cluster{ config: cfg, apiAvailabilityCache: make(map[schema.GroupVersionResource]bool), } } -func (c *Cluster) getOperatorNamespace() (string, error) { +func (c *cluster) getOperatorNamespace() (string, error) { if c.config.OperatorNamespace != "" { return c.config.OperatorNamespace, nil } @@ -55,7 +57,7 @@ func (c *Cluster) getOperatorNamespace() (string, error) { return c.config.OperatorNamespace, nil } -func (c *Cluster) getOperatorDeployment() (appsv1.Deployment, error) { +func (c *cluster) getOperatorDeployment() (appsv1.Deployment, error) { operatorDeployments := appsv1.DeploymentList{} err := c.config.KubernetesClient.List(context.TODO(), &operatorDeployments, &client.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set{ @@ -75,7 +77,8 @@ func (c *Cluster) getOperatorDeployment() (appsv1.Deployment, error) { } -func (c *Cluster) GetOperatorLogs() error { +// GetOperatorLogs gets the operator logs from the cluster. +func (c *cluster) GetOperatorLogs() error { deployment, err := c.getOperatorDeployment() if err != nil { return err @@ -95,12 +98,13 @@ func (c *Cluster) GetOperatorLogs() error { return nil } -func (c *Cluster) getPodLogs(podName, namespace, container string) { +func (c *cluster) getPodLogs(podName, namespace, container string) { pods := c.config.KubernetesClientSet.CoreV1().Pods(namespace) writeLogToFile(c.config.CollectionDir, podName, container, pods) } -func (c *Cluster) GetOperatorDeploymentInfo() error { +// GetOperatorDeploymentInfo gets the operator deployment info from the cluster. +func (c *cluster) GetOperatorDeploymentInfo() error { err := os.MkdirAll(c.config.CollectionDir, os.ModePerm) if err != nil { return err @@ -116,7 +120,8 @@ func (c *Cluster) GetOperatorDeploymentInfo() error { return nil } -func (c *Cluster) GetOLMInfo() error { +// GetOLMInfo gets the OLM info from the cluster. +func (c *cluster) GetOLMInfo() error { if !c.isAPIAvailable(schema.GroupVersionResource{ Group: operatorsv1.SchemeGroupVersion.Group, Version: operatorsv1.SchemeGroupVersion.Version, @@ -210,7 +215,8 @@ func (c *Cluster) GetOLMInfo() error { return nil } -func (c *Cluster) GetTempoStacks() error { +// GetTempoStacks gets all the TempoStacks in the cluster and resources owned by them. +func (c *cluster) GetTempoStacks() error { tempoStacks := tempov1alpha1.TempoStackList{} err := c.config.KubernetesClient.List(context.TODO(), &tempoStacks) @@ -237,7 +243,8 @@ func (c *Cluster) GetTempoStacks() error { return nil } -func (c *Cluster) GetTempoMonolithics() error { +// GetTempoMonolithics gets all the TempoMonolithics in the cluster and resources owned by them. +func (c *cluster) GetTempoMonolithics() error { tempoMonolithics := tempov1alpha1.TempoMonolithicList{} err := c.config.KubernetesClient.List(context.TODO(), &tempoMonolithics) @@ -264,7 +271,7 @@ func (c *Cluster) GetTempoMonolithics() error { return nil } -func (c *Cluster) processTempoStack(tempoStack *tempov1alpha1.TempoStack) error { +func (c *cluster) processTempoStack(tempoStack *tempov1alpha1.TempoStack) error { log.Printf("Processing TempoStack %s/%s", tempoStack.Namespace, tempoStack.Name) folder, err := createTempoStackFolder(c.config.CollectionDir, tempoStack) if err != nil { @@ -280,7 +287,7 @@ func (c *Cluster) processTempoStack(tempoStack *tempov1alpha1.TempoStack) error return nil } -func (c *Cluster) processTempoMonolithic(tempoMonolithic *tempov1alpha1.TempoMonolithic) error { +func (c *cluster) processTempoMonolithic(tempoMonolithic *tempov1alpha1.TempoMonolithic) error { log.Printf("Processing TempoMonolithic %s/%s", tempoMonolithic.Namespace, tempoMonolithic.Name) folder, err := createTempoMonolithicFolder(c.config.CollectionDir, tempoMonolithic) if err != nil { @@ -296,7 +303,7 @@ func (c *Cluster) processTempoMonolithic(tempoMonolithic *tempov1alpha1.TempoMon return nil } -func (c *Cluster) processOwnedResources(owner interface{}, folder string) error { +func (c *cluster) processOwnedResources(owner interface{}, folder string) error { resourceTypes := []struct { list client.ObjectList apiCheck func() bool @@ -331,7 +338,7 @@ func (c *Cluster) processOwnedResources(owner interface{}, folder string) error return nil } -func (c *Cluster) processResourceType(list client.ObjectList, owner interface{}, folder string) error { +func (c *cluster) processResourceType(list client.ObjectList, owner interface{}, folder string) error { resources, err := c.getOwnerResources(list, owner) if err != nil { return fmt.Errorf("failed to get resources: %w", err) @@ -342,7 +349,7 @@ func (c *Cluster) processResourceType(list client.ObjectList, owner interface{}, return nil } -func (c *Cluster) isMonitoringAPIAvailable() bool { +func (c *cluster) isMonitoringAPIAvailable() bool { return c.isAPIAvailable(schema.GroupVersionResource{ Group: monitoringv1.SchemeGroupVersion.Group, Version: monitoringv1.SchemeGroupVersion.Version, @@ -350,7 +357,7 @@ func (c *Cluster) isMonitoringAPIAvailable() bool { }) } -func (c *Cluster) isRouteAPIAvailable() bool { +func (c *cluster) isRouteAPIAvailable() bool { return c.isAPIAvailable(schema.GroupVersionResource{ Group: routev1.GroupName, Version: routev1.GroupVersion.Version, @@ -358,7 +365,7 @@ func (c *Cluster) isRouteAPIAvailable() bool { }) } -func (c *Cluster) isAPIAvailable(gvr schema.GroupVersionResource) bool { +func (c *cluster) isAPIAvailable(gvr schema.GroupVersionResource) bool { if result, ok := c.apiAvailabilityCache[gvr]; ok { return result } @@ -372,7 +379,7 @@ func (c *Cluster) isAPIAvailable(gvr schema.GroupVersionResource) bool { return result } -func (c *Cluster) getOwnerResources(objList client.ObjectList, owner interface{}) ([]client.Object, error) { +func (c *cluster) getOwnerResources(objList client.ObjectList, owner interface{}) ([]client.Object, error) { err := c.config.KubernetesClient.List(context.TODO(), objList, &client.ListOptions{ LabelSelector: labels.SelectorFromSet(labels.Set{ "app.kubernetes.io/managed-by": "tempo-operator", diff --git a/cmd/gather/cluster/write.go b/cmd/gather/cluster/write.go index 917efe30d..ca0932959 100644 --- a/cmd/gather/cluster/write.go +++ b/cmd/gather/cluster/write.go @@ -54,7 +54,7 @@ func createFile(outputDir string, obj client.Object) (*os.File, error) { kind = strings.ToLower(kind) name := strings.ReplaceAll(obj.GetName(), ".", "-") - path := filepath.Join(outputDir, fmt.Sprintf("%s-%s.yaml", kind, name)) + path := filepath.Clean(filepath.Join(outputDir, fmt.Sprintf("%s-%s.yaml", kind, name))) return os.Create(path) } @@ -65,7 +65,12 @@ func writeLogToFile(outputDir, podName, container string, p cgocorev1.PodInterfa log.Fatalf("Error getting pod logs: %v\n", err) return } - defer podLogs.Close() + defer func() { + err := podLogs.Close() + if err != nil { + log.Fatalf("Error closing pod logs: %v", err) + } + }() err = os.MkdirAll(outputDir, os.ModePerm) if err != nil { @@ -73,7 +78,7 @@ func writeLogToFile(outputDir, podName, container string, p cgocorev1.PodInterfa return } - outputFile, err := os.Create(filepath.Join(outputDir, podName)) + outputFile, err := os.Create(filepath.Clean(filepath.Join(outputDir, podName))) if err != nil { log.Fatalf("Error getting pod logs: %v\n", err) return @@ -91,7 +96,12 @@ func writeToFile(outputDir string, o client.Object) { if err != nil { log.Fatalf("Failed to create file: %v", err) } - defer outputFile.Close() + defer func() { + err := outputFile.Close() + if err != nil { + log.Fatalf("Error closing file: %v", err) + } + }() unstructuredDeployment, err := runtime.DefaultUnstructuredConverter.ToUnstructured(o) if err != nil { diff --git a/cmd/gather/config/config.go b/cmd/gather/config/config.go index 87cdddf1a..b231d4fc2 100644 --- a/cmd/gather/config/config.go +++ b/cmd/gather/config/config.go @@ -27,6 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +// Config is the must-gather config. type Config struct { CollectionDir string OperatorName string @@ -35,6 +36,7 @@ type Config struct { KubernetesClientSet *kubernetes.Clientset } +// NewConfig creates a new must-gather config. func NewConfig(scheme *runtime.Scheme) (Config, error) { var operatorName, operatorNamespace, collectionDir, kubeconfigPath string diff --git a/cmd/gather/main.go b/cmd/gather/main.go index 149cf0bd1..ea358bbe6 100644 --- a/cmd/gather/main.go +++ b/cmd/gather/main.go @@ -18,7 +18,6 @@ import ( "log" "os" - tempov1alpha1 "github.com/grafana/tempo-operator/apis/tempo/v1alpha1" routev1 "github.com/openshift/api/route/v1" operatorsv1 "github.com/operator-framework/api/pkg/operators/v1" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" @@ -32,6 +31,8 @@ import ( k8sruntime "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + tempov1alpha1 "github.com/grafana/tempo-operator/apis/tempo/v1alpha1" + "github.com/grafana/tempo-operator/cmd/gather/cluster" "github.com/grafana/tempo-operator/cmd/gather/config" ) From c88c945d894bf37433175d05f71851957aa221cc Mon Sep 17 00:00:00 2001 From: Israel Blancas Date: Mon, 23 Sep 2024 10:48:33 +0200 Subject: [PATCH 3/3] Add changelog Signed-off-by: Israel Blancas --- .chloggen/add-must-gather.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100755 .chloggen/add-must-gather.yaml diff --git a/.chloggen/add-must-gather.yaml b/.chloggen/add-must-gather.yaml new file mode 100755 index 000000000..4ddf81131 --- /dev/null +++ b/.chloggen/add-must-gather.yaml @@ -0,0 +1,16 @@ +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: new_component + +# The name of the component, or a single word describing the area of concern, (e.g. tempostack, tempomonolithic, github action) +component: must-gather + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add must-gather to collect information about the components deployed by the operator in a cluster. + +# One or more tracking issues related to the change +issues: [1033] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: