From 626784f1add7972907166b5b486747355b947d27 Mon Sep 17 00:00:00 2001 From: Bastian Krol Date: Thu, 23 May 2024 15:41:47 +0200 Subject: [PATCH] feat: add support for more resource types * CronJob * DaemonSet * Job * StatefulSet --- internal/controller/dash0_controller.go | 281 ++++++++++++++++-- internal/controller/dash0_controller_test.go | 236 ++++++++++----- internal/k8sresources/modify.go | 128 +++++--- internal/k8sresources/modify_test.go | 75 ++--- internal/webhook/dash0_webhook.go | 170 +++++++++-- internal/webhook/dash0_webhook_test.go | 51 +++- .../node.js/express/build-and-deploy.sh | 2 +- test-resources/node.js/express/daemonset.yaml | 39 +++ .../express/{deploy.yaml => deployment.yaml} | 0 test-resources/node.js/express/undeploy.sh | 2 +- test/e2e/e2e_helpers.go | 81 +++-- test/e2e/e2e_test.go | 29 +- test/util/helpers.go | 66 +++- test/util/resources.go | 214 +++++++++++-- test/util/verification.go | 87 +++++- 15 files changed, 1169 insertions(+), 292 deletions(-) create mode 100644 test-resources/node.js/express/daemonset.yaml rename test-resources/node.js/express/{deploy.yaml => deployment.yaml} (100%) diff --git a/internal/controller/dash0_controller.go b/internal/controller/dash0_controller.go index 882335ee..ca9fd5c4 100644 --- a/internal/controller/dash0_controller.go +++ b/internal/controller/dash0_controller.go @@ -5,10 +5,12 @@ package controller import ( "context" + "errors" "fmt" "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,6 +34,25 @@ type Dash0Reconciler struct { Versions k8sresources.Versions } +type ImmutableResourceError struct { + resourceType string + resource string +} + +func (e ImmutableResourceError) Error() string { + return fmt.Sprintf( + "Dash0 cannot instrument the existing %s %s, since the this type of resource is immutable.", + e.resourceType, + e.resource, + ) +} + +var ( + labelNotSetFilter = metav1.ListOptions{ + LabelSelector: fmt.Sprintf("!%s", util.Dash0AutoInstrumentationLabel), + } +) + func (r *Dash0Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&operatorv1alpha1.Dash0{}). @@ -168,52 +189,276 @@ func (r *Dash0Reconciler) refreshStatus(ctx context.Context, dash0CustomResource func (r *Dash0Reconciler) modifyExistingResources(ctx context.Context, dash0CustomResource *operatorv1alpha1.Dash0) error { namespace := dash0CustomResource.Namespace - listOptions := metav1.ListOptions{ - LabelSelector: fmt.Sprintf("!%s", util.Dash0AutoInstrumentationLabel), + errCronJobs := r.findAndModifyCronJobs(ctx, namespace) + errDaemonSets := r.findAndModifyDaemonSets(ctx, namespace) + errDeployments := r.findAndModifyDeployments(ctx, namespace) + errJobs := r.findAndHandleJobs(ctx, namespace) + errStatefulSets := r.findAndModifyStatefulSets(ctx, namespace) + combinedErrors := errors.Join( + errCronJobs, + errDaemonSets, + errDeployments, + errJobs, + errStatefulSets, + ) + if combinedErrors != nil { + return combinedErrors } + return nil +} - deploymentsInNamespace, err := r.ClientSet.AppsV1().Deployments(namespace).List(ctx, listOptions) +func (r *Dash0Reconciler) findAndModifyCronJobs(ctx context.Context, namespace string) error { + matchingResourcesInNamespace, err := r.ClientSet.BatchV1().CronJobs(namespace).List(ctx, labelNotSetFilter) if err != nil { - return fmt.Errorf("error when querying deployments: %w", err) + return fmt.Errorf("error when querying cron jobs: %w", err) } + for _, resource := range matchingResourcesInNamespace.Items { + r.modifyCronJob(ctx, resource) + } + return nil +} + +func (r *Dash0Reconciler) modifyCronJob(ctx context.Context, cronJob batchv1.CronJob) { + if cronJob.DeletionTimestamp != nil { + // do not modify resources that are being deleted + return + } + logger := log.FromContext(ctx).WithValues( + "resource type", + "CronJob", + "resource namespace", + cronJob.GetNamespace(), + "resource name", + cronJob.GetName(), + ) + hasBeenModified := false + retryErr := util.Retry("modifying cron job", func() error { + if err := r.Client.Get(ctx, client.ObjectKey{ + Namespace: cronJob.GetNamespace(), + Name: cronJob.GetName(), + }, &cronJob); err != nil { + return fmt.Errorf("error when fetching cron job %s/%s: %w", cronJob.GetNamespace(), cronJob.GetName(), err) + } + hasBeenModified = k8sresources. + NewResourceModifier(r.Versions, logger). + ModifyCronJob(&cronJob, cronJob.GetNamespace()) + if hasBeenModified { + return r.Client.Update(ctx, &cronJob) + } else { + return nil + } + }, &logger) + + r.postProcess(&cronJob, hasBeenModified, logger, retryErr) +} - for _, deployment := range deploymentsInNamespace.Items { - r.modifySingleResource(ctx, deployment) +func (r *Dash0Reconciler) findAndModifyDaemonSets(ctx context.Context, namespace string) error { + matchingResourcesInNamespace, err := r.ClientSet.AppsV1().DaemonSets(namespace).List(ctx, labelNotSetFilter) + if err != nil { + return fmt.Errorf("error when querying daemon sets: %w", err) + } + for _, resource := range matchingResourcesInNamespace.Items { + r.modifyDaemonSet(ctx, resource) } return nil } -func (r *Dash0Reconciler) modifySingleResource(ctx context.Context, deployment appsv1.Deployment) { - logger := log.FromContext(ctx).WithValues("resource type", "deployment", "resource namespace", deployment.GetNamespace(), "resource name", deployment.GetName()) +func (r *Dash0Reconciler) modifyDaemonSet(ctx context.Context, daemonSet appsv1.DaemonSet) { + if daemonSet.DeletionTimestamp != nil { + // do not modify resources that are being deleted + return + } + logger := log.FromContext(ctx).WithValues( + "resource type", + "DaemonSet", + "resource namespace", + daemonSet.GetNamespace(), + "resource name", + daemonSet.GetName(), + ) hasBeenModified := false - retryErr := util.Retry("Modifying deployment", func() error { + retryErr := util.Retry("modifying daemon set", func() error { + if err := r.Client.Get(ctx, client.ObjectKey{ + Namespace: daemonSet.GetNamespace(), + Name: daemonSet.GetName(), + }, &daemonSet); err != nil { + return fmt.Errorf("error when fetching daemon set %s/%s: %w", daemonSet.GetNamespace(), daemonSet.GetName(), err) + } + hasBeenModified = k8sresources. + NewResourceModifier(r.Versions, logger). + ModifyDaemonSet(&daemonSet, daemonSet.GetNamespace()) + if hasBeenModified { + return r.Client.Update(ctx, &daemonSet) + } else { + return nil + } + }, &logger) + + r.postProcess(&daemonSet, hasBeenModified, logger, retryErr) +} + +func (r *Dash0Reconciler) findAndModifyDeployments(ctx context.Context, namespace string) error { + matchingResourcesInNamespace, err := r.ClientSet.AppsV1().Deployments(namespace).List(ctx, labelNotSetFilter) + if err != nil { + return fmt.Errorf("error when querying deployments: %w", err) + } + for _, resource := range matchingResourcesInNamespace.Items { + r.modifyDeployment(ctx, resource) + } + return nil +} + +func (r *Dash0Reconciler) modifyDeployment(ctx context.Context, deployment appsv1.Deployment) { + if deployment.DeletionTimestamp != nil { + // do not modify resources that are being deleted + return + } + logger := log.FromContext(ctx).WithValues( + "resource type", + "Deployment", + "resource namespace", + deployment.GetNamespace(), + "resource name", + deployment.GetName(), + ) + hasBeenModified := false + retryErr := util.Retry("modifying deployment", func() error { if err := r.Client.Get(ctx, client.ObjectKey{ Namespace: deployment.GetNamespace(), Name: deployment.GetName(), }, &deployment); err != nil { return fmt.Errorf("error when fetching deployment %s/%s: %w", deployment.GetNamespace(), deployment.GetName(), err) } - hasBeenModified = k8sresources.ModifyDeployment( - &deployment, - deployment.GetNamespace(), - r.Versions, + hasBeenModified = k8sresources. + NewResourceModifier(r.Versions, logger). + ModifyDeployment(&deployment, deployment.GetNamespace()) + if hasBeenModified { + return r.Client.Update(ctx, &deployment) + } else { + return nil + } + }, &logger) + + r.postProcess(&deployment, hasBeenModified, logger, retryErr) +} + +func (r *Dash0Reconciler) findAndHandleJobs(ctx context.Context, namespace string) error { + matchingResourcesInNamespace, err := r.ClientSet.BatchV1().Jobs(namespace).List(ctx, labelNotSetFilter) + if err != nil { + return fmt.Errorf("error when querying cron jobs: %w", err) + } + + for _, job := range matchingResourcesInNamespace.Items { + r.handleJob(ctx, job) + } + return nil +} + +func (r *Dash0Reconciler) handleJob(ctx context.Context, job batchv1.Job) { + if job.DeletionTimestamp != nil { + // do not modify resources that are being deleted + return + } + logger := log.FromContext(ctx).WithValues( + "resource type", + "Job", + "resource namespace", + job.GetNamespace(), + "resource name", + job.GetName(), + ) + retryErr := util.Retry("labelling immutable job", func() error { + if err := r.Client.Get(ctx, client.ObjectKey{ + Namespace: job.GetNamespace(), + Name: job.GetName(), + }, &job); err != nil { + return fmt.Errorf("error when fetching job %s/%s: %w", job.GetNamespace(), job.GetName(), err) + } + k8sresources. + NewResourceModifier(r.Versions, logger). + AddLabelsToImmutableJob(&job) + return r.Client.Update(ctx, &job) + }, &logger) + + if retryErr != nil { + r.postProcess(&job, false, logger, retryErr) + } else { + r.postProcess( + &job, + false, logger, + ImmutableResourceError{ + resourceType: "job", + resource: fmt.Sprintf("%s/%s", job.GetNamespace(), job.GetName()), + }, ) + } +} + +func (r *Dash0Reconciler) findAndModifyStatefulSets(ctx context.Context, namespace string) error { + matchingResourcesInNamespace, err := r.ClientSet.AppsV1().StatefulSets(namespace).List(ctx, labelNotSetFilter) + if err != nil { + return fmt.Errorf("error when querying stateful sets: %w", err) + } + for _, resource := range matchingResourcesInNamespace.Items { + r.modifyStatefulSet(ctx, resource) + } + return nil +} + +func (r *Dash0Reconciler) modifyStatefulSet(ctx context.Context, statefulSet appsv1.StatefulSet) { + if statefulSet.DeletionTimestamp != nil { + // do not modify resources that are being deleted + return + } + logger := log.FromContext(ctx).WithValues( + "resource type", + "StatefulSet", + "resource namespace", + statefulSet.GetNamespace(), + "resource name", + statefulSet.GetName(), + ) + hasBeenModified := false + retryErr := util.Retry("modifying stateful set", func() error { + if err := r.Client.Get(ctx, client.ObjectKey{ + Namespace: statefulSet.GetNamespace(), + Name: statefulSet.GetName(), + }, &statefulSet); err != nil { + return fmt.Errorf("error when fetching stateful set %s/%s: %w", statefulSet.GetNamespace(), statefulSet.GetName(), err) + } + hasBeenModified = k8sresources. + NewResourceModifier(r.Versions, logger). + ModifyStatefulSet(&statefulSet, statefulSet.GetNamespace()) if hasBeenModified { - return r.Client.Update(ctx, &deployment) + return r.Client.Update(ctx, &statefulSet) } else { return nil } }, &logger) + r.postProcess(&statefulSet, hasBeenModified, logger, retryErr) +} + +func (r *Dash0Reconciler) postProcess( + resource runtime.Object, + hasBeenModified bool, + logger logr.Logger, + retryErr error, +) { if retryErr != nil { - logger.Error(retryErr, "Dash0 instrumentation by controller has not been successful.") - util.QueueFailedInstrumentationEvent(r.Recorder, &deployment, "controller", retryErr) + e := &ImmutableResourceError{} + if errors.As(retryErr, e) { + logger.Info(e.Error()) + } else { + logger.Error(retryErr, "Dash0 instrumentation by controller has not been successful.") + } + util.QueueFailedInstrumentationEvent(r.Recorder, resource, "controller", retryErr) } else if !hasBeenModified { logger.Info("Dash0 instrumentation already present, no modification by controller is necessary.") - util.QueueAlreadyInstrumentedEvent(r.Recorder, &deployment, "controller") + util.QueueAlreadyInstrumentedEvent(r.Recorder, resource, "controller") } else { logger.Info("The controller has added Dash0 instrumentation to the resource.") - util.QueueSuccessfulInstrumentationEvent(r.Recorder, &deployment, "controller") + util.QueueSuccessfulInstrumentationEvent(r.Recorder, resource, "controller") } } diff --git a/internal/controller/dash0_controller_test.go b/internal/controller/dash0_controller_test.go index b9a2cfa8..45b635ff 100644 --- a/internal/controller/dash0_controller_test.go +++ b/internal/controller/dash0_controller_test.go @@ -7,22 +7,32 @@ import ( "context" "time" - "k8s.io/apimachinery/pkg/api/meta" + . "github.com/dash0hq/dash0-operator/test/util" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - . "github.com/dash0hq/dash0-operator/test/util" - + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" operatorv1alpha1 "github.com/dash0hq/dash0-operator/api/v1alpha1" "github.com/dash0hq/dash0-operator/internal/k8sresources" ) +const ( + dash0CustomResourceName = "dash0-test-resource" +) + var ( + dash0CustomResourceQualifiedName = types.NamespacedName{ + Namespace: TestNamespaceName, + Name: dash0CustomResourceName, + } + namespace = dash0CustomResourceQualifiedName.Namespace + timeout = 15 * time.Second pollingInterval = 50 * time.Millisecond @@ -34,33 +44,27 @@ var ( var _ = Describe("Dash0 Controller", func() { Context("When reconciling a resource", func() { - const resourceName = "dash0-test-resource" ctx := context.Background() + var createdObjects []client.Object - dash0ResourceName := types.NamespacedName{ - Namespace: TestNamespaceName, - Name: resourceName, - } - namespace := dash0ResourceName.Namespace var reconciler *Dash0Reconciler BeforeEach(func() { - By("creating the custom resource for the Kind Dash0", func() { - EnsureTestNamespaceExists(ctx, k8sClient, namespace) - EnsureResourceExists( - ctx, - k8sClient, - dash0ResourceName, - &operatorv1alpha1.Dash0{}, - &operatorv1alpha1.Dash0{ - ObjectMeta: metav1.ObjectMeta{ - Name: dash0ResourceName.Name, - Namespace: dash0ResourceName.Namespace, - }, + By("creating the custom resource for the Kind Dash0") + EnsureTestNamespaceExists(ctx, k8sClient, namespace) + EnsureDash0CustomResourceExists( + ctx, + k8sClient, + dash0CustomResourceQualifiedName, + &operatorv1alpha1.Dash0{}, + &operatorv1alpha1.Dash0{ + ObjectMeta: metav1.ObjectMeta{ + Name: dash0CustomResourceQualifiedName.Name, + Namespace: dash0CustomResourceQualifiedName.Namespace, }, - ) - }) + }, + ) reconciler = &Dash0Reconciler{ Client: k8sClient, @@ -69,98 +73,168 @@ var _ = Describe("Dash0 Controller", func() { Scheme: k8sClient.Scheme(), Versions: versions, } + createdObjects = make([]client.Object, 0) }) AfterEach(func() { - By("Cleanup the Dash0 resource instance", func() { - resource := &operatorv1alpha1.Dash0{} - err := k8sClient.Get(ctx, dash0ResourceName, resource) - Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) - }) + By("Cleanup the Dash0 resource instance") + dash0CustomResource := &operatorv1alpha1.Dash0{} + err := k8sClient.Get(ctx, dash0CustomResourceQualifiedName, dash0CustomResource) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient.Delete(ctx, dash0CustomResource)).To(Succeed()) + + for _, object := range createdObjects { + Expect(k8sClient.Delete(ctx, object, &client.DeleteOptions{ + GracePeriodSeconds: new(int64), + })).To(Succeed()) + } + + DeleteAllEvents(ctx, clientset, namespace) + allEvents, err := clientset.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(allEvents.Items).To(BeEmpty()) }) It("should successfully run the first reconcile (no modifiable resources exist)", func() { - By("Reconciling the created resource", func() { - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: dash0ResourceName, - }) - Expect(err).NotTo(HaveOccurred()) + By("Reconciling the created resource") + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: dash0CustomResourceQualifiedName, }) + Expect(err).NotTo(HaveOccurred()) - verifyStatusConditions(ctx, dash0ResourceName) + verifyStatusConditions(ctx, dash0CustomResourceQualifiedName) }) It("should successfully run multiple reconciles (no modifiable resources exist)", func() { - By("First reconcile request", func() { - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: dash0ResourceName, - }) - Expect(err).NotTo(HaveOccurred()) + By("First reconcile request") + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: dash0CustomResourceQualifiedName, }) + Expect(err).NotTo(HaveOccurred()) - firstAvailableStatusCondition := verifyStatusConditions(ctx, dash0ResourceName) + firstAvailableStatusCondition := verifyStatusConditions(ctx, dash0CustomResourceQualifiedName) originalTransitionTimestamp := firstAvailableStatusCondition.LastTransitionTime.Time time.Sleep(50 * time.Millisecond) - By("Second reconcile request", func() { - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: dash0ResourceName, - }) - Expect(err).NotTo(HaveOccurred()) + By("Second reconcile request") + _, err = reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: dash0CustomResourceQualifiedName, }) + Expect(err).NotTo(HaveOccurred()) // The LastTransitionTime should not change with subsequent reconciliations. - secondAvailableCondition := verifyStatusConditions(ctx, dash0ResourceName) + secondAvailableCondition := verifyStatusConditions(ctx, dash0CustomResourceQualifiedName) Expect(secondAvailableCondition.LastTransitionTime.Time).To(Equal(originalTransitionTimestamp)) }) + It("should modify an existing cron job", func() { + name := CronJobName + By("Inititalize a cron job") + cronJob := CreateBasicCronJob(ctx, k8sClient, namespace, name) + createdObjects = append(createdObjects, cronJob) + + By("Reconciling the created resource") + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: dash0CustomResourceQualifiedName, + }) + Expect(err).NotTo(HaveOccurred()) + + verifyStatusConditionAndSuccessEvent(ctx, namespace, name) + VerifyModifiedCronJob(GetCronJob(ctx, k8sClient, namespace, name), BasicPodSpecExpectations) + }) + + It("should modify an existing daemon set", func() { + name := DaemonSetName + By("Inititalize a daemon set") + daemonSet := CreateBasicDaemonSet(ctx, k8sClient, namespace, name) + createdObjects = append(createdObjects, daemonSet) + + By("Reconciling the created resource") + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: dash0CustomResourceQualifiedName, + }) + Expect(err).NotTo(HaveOccurred()) + + verifyStatusConditionAndSuccessEvent(ctx, namespace, name) + VerifyModifiedDaemonSet(GetDaemonSet(ctx, k8sClient, namespace, name), BasicPodSpecExpectations) + }) + It("should modify an existing deployment", func() { - deploymentName := DeploymentNameExisting - By("Inititalize a deployment", func() { - CreateBasicDeployment(ctx, k8sClient, namespace, deploymentName) + name := DeploymentName + By("Inititalize a deployment") + deployment := CreateBasicDeployment(ctx, k8sClient, namespace, name) + createdObjects = append(createdObjects, deployment) + + By("Reconciling the created resource") + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: dash0CustomResourceQualifiedName, }) + Expect(err).NotTo(HaveOccurred()) + + verifyStatusConditionAndSuccessEvent(ctx, namespace, name) + VerifyModifiedDeployment(GetDeployment(ctx, k8sClient, namespace, name), BasicPodSpecExpectations) + }) + + It("should record a failure event for an existing job and add labels", func() { + name := JobName + By("Inititalize a job") + job := CreateBasicJob(ctx, k8sClient, namespace, name) + createdObjects = append(createdObjects, job) - By("Reconciling the created resource", func() { - _, err := reconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: dash0ResourceName, - }) - Expect(err).NotTo(HaveOccurred()) + By("Reconciling the created resource") + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: dash0CustomResourceQualifiedName, }) + Expect(err).NotTo(HaveOccurred()) + + verifyStatusConditions(ctx, dash0CustomResourceQualifiedName) + VerifyFailureEvent( + ctx, + clientset, + namespace, + name, + "controller", + "Dash0 instrumentation by controller has not been successful. Error message: Dash0 cannot"+ + " instrument the existing job test-namespace/job, since the this type of resource is immutable.", + ) + VerifyUnmodifiedJob(GetJob(ctx, k8sClient, namespace, name)) + }) + + It("should modify an stateful set", func() { + name := StatefulSetName + By("Inititalize a stateful set") + statefulSet := CreateBasicStatefulSet(ctx, k8sClient, namespace, name) + createdObjects = append(createdObjects, statefulSet) - verifyStatusConditions(ctx, dash0ResourceName) - VerifyModifiedDeployment(GetDeployment(ctx, k8sClient, namespace, deploymentName), DeploymentExpectations{ - Volumes: 1, - Dash0VolumeIdx: 0, - InitContainers: 1, - Dash0InitContainerIdx: 0, - Containers: []ContainerExpectations{{ - VolumeMounts: 1, - Dash0VolumeMountIdx: 0, - EnvVars: 2, - NodeOptionsEnvVarIdx: 0, - Dash0CollectorBaseUrlEnvVarIdx: 1, - Dash0CollectorBaseUrlEnvVarExpectedValue: "http://dash0-opentelemetry-collector-daemonset.test-namespace.svc.cluster.local:4318", - }}, + By("Reconciling the created resource") + _, err := reconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: dash0CustomResourceQualifiedName, }) - VerifySuccessEvent(ctx, clientset, namespace, deploymentName, "controller") + Expect(err).NotTo(HaveOccurred()) + + verifyStatusConditionAndSuccessEvent(ctx, namespace, name) + VerifyModifiedStatefulSet(GetStatefulSet(ctx, k8sClient, namespace, name), BasicPodSpecExpectations) }) }) }) +func verifyStatusConditionAndSuccessEvent(ctx context.Context, namespace string, name string) { + verifyStatusConditions(ctx, dash0CustomResourceQualifiedName) + VerifySuccessEvent(ctx, clientset, namespace, name, "controller") +} + func verifyStatusConditions(ctx context.Context, typeNamespacedName types.NamespacedName) *metav1.Condition { var available *metav1.Condition - By("Verifying status conditions", func() { - Eventually(func(g Gomega) { - dash0 := &operatorv1alpha1.Dash0{} - g.Expect(k8sClient.Get(ctx, typeNamespacedName, dash0)).To(Succeed()) - available = meta.FindStatusCondition(dash0.Status.Conditions, string(operatorv1alpha1.ConditionTypeAvailable)) - g.Expect(available).NotTo(BeNil()) - g.Expect(available.Status).To(Equal(metav1.ConditionTrue)) - degraded := meta.FindStatusCondition(dash0.Status.Conditions, string(operatorv1alpha1.ConditionTypeDegraded)) - g.Expect(degraded).To(BeNil()) - }, timeout, pollingInterval).Should(Succeed()) - }) + By("Verifying status conditions") + Eventually(func(g Gomega) { + dash0 := &operatorv1alpha1.Dash0{} + g.Expect(k8sClient.Get(ctx, typeNamespacedName, dash0)).To(Succeed()) + available = meta.FindStatusCondition(dash0.Status.Conditions, string(operatorv1alpha1.ConditionTypeAvailable)) + g.Expect(available).NotTo(BeNil()) + g.Expect(available.Status).To(Equal(metav1.ConditionTrue)) + degraded := meta.FindStatusCondition(dash0.Status.Conditions, string(operatorv1alpha1.ConditionTypeDegraded)) + g.Expect(degraded).To(BeNil()) + }, timeout, pollingInterval).Should(Succeed()) return available } diff --git a/internal/k8sresources/modify.go b/internal/k8sresources/modify.go index 5d2db217..800e6752 100644 --- a/internal/k8sresources/modify.go +++ b/internal/k8sresources/modify.go @@ -7,12 +7,14 @@ import ( "fmt" "reflect" "slices" + "strconv" "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -25,10 +27,10 @@ const ( dash0InstrumentationDirectory = "/opt/dash0/instrumentation" // envVarLdPreloadName = "LD_PRELOAD" // envVarLdPreloadValue = "/opt/dash0/preload/inject.so" - envVarNodeOptionsName = "NODE_OPTIONS" - envVarNodeOptionsValue = "--require /opt/dash0/instrumentation/node.js/node_modules/@dash0/opentelemetry/src/index.js" - envVarDash0CollectorBaseUrlName = "DASH0_OTEL_COLLECTOR_BASE_URL" - envVarDash0CollectorBaseUrlNameValueTemplate = "http://dash0-opentelemetry-collector-daemonset.%s.svc.cluster.local:4318" + envVarNodeOptionsName = "NODE_OPTIONS" + envVarNodeOptionsValue = "--require /opt/dash0/instrumentation/node.js/node_modules/@dash0/opentelemetry/src/index.js" + envVarDash0CollectorBaseUrlName = "DASH0_OTEL_COLLECTOR_BASE_URL" + envVarDash0CollectorBaseUrlValueTemplate = "http://dash0-opentelemetry-collector-daemonset.%s.svc.cluster.local:4318" instrumentedLabelKey = "dash0.instrumented" operatorVersionLabelKey = "dash0.operator.version" @@ -48,39 +50,67 @@ type Versions struct { InitContainerImageVersion string } -func ModifyDeployment( - deployment *appsv1.Deployment, - namespace string, - versions Versions, - logger logr.Logger, -) bool { - podTemplateSpec := &deployment.Spec.Template - hasBeenModified := modifyPodSpec( +type ResourceModifier struct { + versions Versions + logger logr.Logger +} + +func NewResourceModifier(versions Versions, logger logr.Logger) *ResourceModifier { + return &ResourceModifier{ + versions: versions, + logger: logger, + } +} + +func (m *ResourceModifier) ModifyCronJob(cronJob *batchv1.CronJob, namespace string) bool { + return m.modifyResource(&cronJob.Spec.JobTemplate.Spec.Template, &cronJob.ObjectMeta, namespace) +} + +func (m *ResourceModifier) ModifyDaemonSet(daemonSet *appsv1.DaemonSet, namespace string) bool { + return m.modifyResource(&daemonSet.Spec.Template, &daemonSet.ObjectMeta, namespace) +} + +func (m *ResourceModifier) ModifyDeployment(deployment *appsv1.Deployment, namespace string) bool { + return m.modifyResource(&deployment.Spec.Template, &deployment.ObjectMeta, namespace) +} + +func (m *ResourceModifier) ModifyJob(job *batchv1.Job, namespace string) bool { + return m.modifyResource(&job.Spec.Template, &job.ObjectMeta, namespace) +} + +func (m *ResourceModifier) AddLabelsToImmutableJob(job *batchv1.Job) { + m.addInstrumentationLabels(&job.ObjectMeta, m.versions, false) +} + +func (m *ResourceModifier) ModifyStatefulSet(statefulSet *appsv1.StatefulSet, namespace string) bool { + return m.modifyResource(&statefulSet.Spec.Template, &statefulSet.ObjectMeta, namespace) +} + +func (m *ResourceModifier) modifyResource(podTemplateSpec *corev1.PodTemplateSpec, meta *metav1.ObjectMeta, namespace string) bool { + hasBeenModified := m.modifyPodSpec( &podTemplateSpec.Spec, - namespace, - versions.InitContainerImageVersion, - logger, + fmt.Sprintf(envVarDash0CollectorBaseUrlValueTemplate, namespace), ) if hasBeenModified { - addInstrumentationLabels(&deployment.ObjectMeta, versions) - addInstrumentationLabels(&podTemplateSpec.ObjectMeta, versions) + m.addInstrumentationLabels(meta, m.versions, true) + m.addInstrumentationLabels(&podTemplateSpec.ObjectMeta, m.versions, true) } return hasBeenModified } -func modifyPodSpec(podSpec *corev1.PodSpec, namespace string, initContainerImageVersion string, logger logr.Logger) bool { +func (m *ResourceModifier) modifyPodSpec(podSpec *corev1.PodSpec, dash0CollectorBaseUrl string) bool { originalSpec := podSpec.DeepCopy() - addInstrumentationVolume(podSpec) - addInitContainer(podSpec, initContainerImageVersion) + m.addInstrumentationVolume(podSpec) + m.addInitContainer(podSpec) for idx := range podSpec.Containers { container := &podSpec.Containers[idx] - instrumentContainer(container, namespace, logger) + m.instrumentContainer(container, dash0CollectorBaseUrl) } return !reflect.DeepEqual(originalSpec, podSpec) } -func addInstrumentationVolume(podSpec *corev1.PodSpec) { +func (m *ResourceModifier) addInstrumentationVolume(podSpec *corev1.PodSpec) { if podSpec.Volumes == nil { podSpec.Volumes = make([]corev1.Volume, 0) } @@ -103,7 +133,7 @@ func addInstrumentationVolume(podSpec *corev1.PodSpec) { } } -func addInitContainer(podSpec *corev1.PodSpec, initContainerImageVersion string) { +func (m *ResourceModifier) addInitContainer(podSpec *corev1.PodSpec) { // The init container has all the instrumentation packages (e.g. the Dash0 Node.js distribution etc.), stored under // /dash0/instrumentation. Its main responsibility is to copy these files to the Kubernetes volume created and mounted in // addInstrumentationVolume (mounted at /opt/dash0/instrumentation in the init container and also in the target containers). @@ -114,7 +144,7 @@ func addInitContainer(podSpec *corev1.PodSpec, initContainerImageVersion string) idx := slices.IndexFunc(podSpec.InitContainers, func(c corev1.Container) bool { return c.Name == initContainerName }) - initContainer := createInitContainer(podSpec, initContainerImageVersion) + initContainer := m.createInitContainer(podSpec) if idx < 0 { podSpec.InitContainers = append(podSpec.InitContainers, *initContainer) } else { @@ -122,7 +152,7 @@ func addInitContainer(podSpec *corev1.PodSpec, initContainerImageVersion string) } } -func createInitContainer(podSpec *corev1.PodSpec, initContainerImageVersion string) *corev1.Container { +func (m *ResourceModifier) createInitContainer(podSpec *corev1.PodSpec) *corev1.Container { initContainerUser := &defaultInitContainerUser initContainerGroup := &defaultInitContainerGroup @@ -137,7 +167,7 @@ func createInitContainer(podSpec *corev1.PodSpec, initContainerImageVersion stri return &corev1.Container{ Name: initContainerName, - Image: fmt.Sprintf(initContainerImageTemplate, initContainerImageVersion), + Image: fmt.Sprintf(initContainerImageTemplate, m.versions.InitContainerImageVersion), Env: []corev1.EnvVar{ { Name: dash0DirectoryEnvVarName, @@ -162,13 +192,13 @@ func createInitContainer(podSpec *corev1.PodSpec, initContainerImageVersion stri } } -func instrumentContainer(container *corev1.Container, namespace string, logger logr.Logger) { - logger = logger.WithValues("container", container.Name) - addMount(container) - addEnvironmentVariables(container, namespace, logger) +func (m *ResourceModifier) instrumentContainer(container *corev1.Container, dash0CollectorBaseUrl string) { + perContainerLogger := m.logger.WithValues("container", container.Name) + m.addMount(container) + m.addEnvironmentVariables(container, dash0CollectorBaseUrl, perContainerLogger) } -func addMount(container *corev1.Container) { +func (m *ResourceModifier) addMount(container *corev1.Container) { if container.VolumeMounts == nil { container.VolumeMounts = make([]corev1.VolumeMount, 0) } @@ -187,17 +217,23 @@ func addMount(container *corev1.Container) { } } -func addEnvironmentVariables(container *corev1.Container, namespace string, logger logr.Logger) { +func (m *ResourceModifier) addEnvironmentVariables(container *corev1.Container, dash0CollectorBaseUrl string, perContainerLogger logr.Logger) { // For now, we directly modify NODE_OPTIONS. Consider migrating to an LD_PRELOAD hook at some point. - addOrPrependToEnvironmentVariable(container, envVarNodeOptionsName, envVarNodeOptionsValue, logger) + m.addOrPrependToEnvironmentVariable(container, envVarNodeOptionsName, envVarNodeOptionsValue, perContainerLogger) - addOrReplaceEnvironmentVariable( + m.addOrReplaceEnvironmentVariable( container, envVarDash0CollectorBaseUrlName, - fmt.Sprintf(envVarDash0CollectorBaseUrlNameValueTemplate, namespace)) + dash0CollectorBaseUrl, + ) } -func addOrPrependToEnvironmentVariable(container *corev1.Container, name string, value string, logger logr.Logger) { +func (m *ResourceModifier) addOrPrependToEnvironmentVariable( + container *corev1.Container, + name string, + value string, + perContainerLogger logr.Logger, +) { if container.Env == nil { container.Env = make([]corev1.EnvVar, 0) } @@ -214,7 +250,7 @@ func addOrPrependToEnvironmentVariable(container *corev1.Container, name string, envVar := container.Env[idx] previousValue := envVar.Value if previousValue == "" && envVar.ValueFrom != nil { - logger.Info( + perContainerLogger.Info( fmt.Sprintf( "Dash0 cannot prepend anything to the environment variable %s as it is specified via "+ "ValueFrom. This container will not be instrumented.", @@ -225,7 +261,7 @@ func addOrPrependToEnvironmentVariable(container *corev1.Container, name string, } } -func addOrReplaceEnvironmentVariable(container *corev1.Container, name string, value string) { +func (m *ResourceModifier) addOrReplaceEnvironmentVariable(container *corev1.Container, name string, value string) { if container.Env == nil { container.Env = make([]corev1.EnvVar, 0) } @@ -244,13 +280,17 @@ func addOrReplaceEnvironmentVariable(container *corev1.Container, name string, v } } -func addInstrumentationLabels(meta *v1.ObjectMeta, labelInformation Versions) { - addLabel(meta, instrumentedLabelKey, "true") - addLabel(meta, operatorVersionLabelKey, labelInformation.OperatorVersion) - addLabel(meta, initContainerImageVersionLabelKey, labelInformation.InitContainerImageVersion) +func (m *ResourceModifier) addInstrumentationLabels( + meta *metav1.ObjectMeta, + labelInformation Versions, + hasBeenInstrumented bool, +) { + m.addLabel(meta, instrumentedLabelKey, strconv.FormatBool(hasBeenInstrumented)) + m.addLabel(meta, operatorVersionLabelKey, labelInformation.OperatorVersion) + m.addLabel(meta, initContainerImageVersionLabelKey, labelInformation.InitContainerImageVersion) } -func addLabel(meta *v1.ObjectMeta, key string, value string) { +func (m *ResourceModifier) addLabel(meta *metav1.ObjectMeta, key string, value string) { if meta.Labels == nil { meta.Labels = make(map[string]string, 1) } diff --git a/internal/k8sresources/modify_test.go b/internal/k8sresources/modify_test.go index d15c070d..51eff4b5 100644 --- a/internal/k8sresources/modify_test.go +++ b/internal/k8sresources/modify_test.go @@ -28,45 +28,23 @@ var ( var _ = Describe("Dash0 Resource Modification", func() { ctx := context.Background() + resourceModifier := NewResourceModifier(versions, log.FromContext(ctx)) - Context("when mutating new deployments", func() { + Context("when mutating new resources", func() { It("should inject Dash into a new basic deployment", func() { deployment := BasicDeployment(TestNamespaceName, DeploymentName) - result := ModifyDeployment( - deployment, - TestNamespaceName, - versions, - log.FromContext(ctx), - ) + result := resourceModifier.ModifyDeployment(deployment, TestNamespaceName) Expect(result).To(BeTrue()) - VerifyModifiedDeployment(deployment, DeploymentExpectations{ - Volumes: 1, - Dash0VolumeIdx: 0, - InitContainers: 1, - Dash0InitContainerIdx: 0, - Containers: []ContainerExpectations{{ - VolumeMounts: 1, - Dash0VolumeMountIdx: 0, - EnvVars: 2, - NodeOptionsEnvVarIdx: 0, - Dash0CollectorBaseUrlEnvVarIdx: 1, - Dash0CollectorBaseUrlEnvVarExpectedValue: "http://dash0-opentelemetry-collector-daemonset.test-namespace.svc.cluster.local:4318", - }}, - }) + VerifyModifiedDeployment(deployment, BasicPodSpecExpectations) }) It("should inject Dash into a new deployment that has multiple Containers, and already has Volumes and init Containers", func() { deployment := DeploymentWithMoreBellsAndWhistles(TestNamespaceName, DeploymentName) - result := ModifyDeployment( - deployment, - TestNamespaceName, - versions, - log.FromContext(ctx), - ) + result := resourceModifier.ModifyDeployment(deployment, TestNamespaceName) Expect(result).To(BeTrue()) - VerifyModifiedDeployment(deployment, DeploymentExpectations{ + VerifyModifiedDeployment(deployment, PodSpecExpectations{ Volumes: 3, Dash0VolumeIdx: 2, InitContainers: 3, @@ -94,15 +72,10 @@ var _ = Describe("Dash0 Resource Modification", func() { It("should update existing Dash artifacts in a new deployment", func() { deployment := DeploymentWithExistingDash0Artifacts(TestNamespaceName, DeploymentName) - result := ModifyDeployment( - deployment, - TestNamespaceName, - versions, - log.FromContext(ctx), - ) + result := resourceModifier.ModifyDeployment(deployment, TestNamespaceName) Expect(result).To(BeTrue()) - VerifyModifiedDeployment(deployment, DeploymentExpectations{ + VerifyModifiedDeployment(deployment, PodSpecExpectations{ Volumes: 3, Dash0VolumeIdx: 1, InitContainers: 3, @@ -129,5 +102,37 @@ var _ = Describe("Dash0 Resource Modification", func() { }, }) }) + + It("should inject Dash into a new basic cron job", func() { + resource := BasicCronJob(TestNamespaceName, CronJobName) + result := resourceModifier.ModifyCronJob(resource, TestNamespaceName) + + Expect(result).To(BeTrue()) + VerifyModifiedCronJob(resource, BasicPodSpecExpectations) + }) + + It("should inject Dash into a new basic daemon set", func() { + resource := BasicDaemonSet(TestNamespaceName, DaemonSetName) + result := resourceModifier.ModifyDaemonSet(resource, TestNamespaceName) + + Expect(result).To(BeTrue()) + VerifyModifiedDaemonSet(resource, BasicPodSpecExpectations) + }) + + It("should inject Dash into a new basic job", func() { + resource := BasicJob(TestNamespaceName, JobName) + result := resourceModifier.ModifyJob(resource, TestNamespaceName) + + Expect(result).To(BeTrue()) + VerifyModifiedJob(resource, BasicPodSpecExpectations) + }) + + It("should inject Dash into a new basic stateful set", func() { + resource := BasicStatefulSet(TestNamespaceName, StatefulSetName) + result := resourceModifier.ModifyStatefulSet(resource, TestNamespaceName) + + Expect(result).To(BeTrue()) + VerifyModifiedStatefulSet(resource, BasicPodSpecExpectations) + }) }) }) diff --git a/internal/webhook/dash0_webhook.go b/internal/webhook/dash0_webhook.go index efd962ae..bea096b5 100644 --- a/internal/webhook/dash0_webhook.go +++ b/internal/webhook/dash0_webhook.go @@ -12,7 +12,10 @@ import ( "github.com/dash0hq/dash0-operator/internal/k8sresources" "github.com/dash0hq/dash0-operator/internal/util" + "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" @@ -23,6 +26,22 @@ import ( var ( log = logf.Log.WithName("dash0-webhook") decoder = scheme.Codecs.UniversalDecoder() + + cronJobGroup = "batch" + cronJobVersion = "v1" + cronJobKind = "CronJob" + daemonSetGroup = "apps" + daemonSetVersion = "v1" + daemonSetKind = "DaemonSet" + deploymentGroup = "apps" + deploymentVersion = "v1" + deploymentKind = "Deployment" + jobGroup = "batch" + jobVersion = "v1" + jobKind = "Job" + statefulSetGroup = "apps" + statefulSetVersion = "v1" + statefulSetKind = "StatefulSet" ) type Handler struct { @@ -52,33 +71,136 @@ func (h *Handler) Handle(_ context.Context, request admission.Request) admission group := gkv.Group version := gkv.Version kind := gkv.Kind + gvkLabel := fmt.Sprintf("%s/%s.%s", group, version, kind) - if group == "apps" && version == "v1" && kind == "Deployment" { - deployment := &appsv1.Deployment{} - if _, _, err := decoder.Decode(request.Object.Raw, nil, deployment); err != nil { - err := fmt.Errorf("cannot parse resource into a %s/%s.%s: %w", group, version, kind, err) - util.QueueFailedInstrumentationEvent(h.Recorder, deployment, "webhook", err) - return admission.Errored(http.StatusInternalServerError, err) - } - - hasBeenModified := k8sresources.ModifyDeployment(deployment, request.Namespace, h.Versions, logger) - if !hasBeenModified { - logger.Info("Dash0 instrumentation already present, no modification by webhook is necessary.") - util.QueueAlreadyInstrumentedEvent(h.Recorder, deployment, "webhook") - return admission.Allowed("no changes") - } - - marshalled, err := json.Marshal(deployment) - if err != nil { - util.QueueFailedInstrumentationEvent(h.Recorder, deployment, "webhook", err) - return admission.Allowed(fmt.Errorf("error when marshalling modfied resource to JSON: %w", err).Error()) - } - - logger.Info("The webhook has added Dash0 instrumentation to the resource.") - util.QueueSuccessfulInstrumentationEvent(h.Recorder, deployment, "webhook") - return admission.PatchResponseFromRaw(request.Object.Raw, marshalled) + if group == cronJobGroup && version == cronJobVersion && kind == cronJobKind { + return h.handleCronJob(request, gvkLabel, logger) + } else if group == daemonSetGroup && version == daemonSetVersion && kind == daemonSetKind { + return h.handleDaemonSet(request, gvkLabel, logger) + } else if group == deploymentGroup && version == deploymentVersion && kind == deploymentKind { + return h.handleDeployment(request, gvkLabel, logger) + } else if group == jobGroup && version == jobVersion && kind == jobKind { + return h.handleJob(request, gvkLabel, logger) + } else if group == statefulSetGroup && version == statefulSetVersion && kind == statefulSetKind { + return h.handleStatefulSet(request, gvkLabel, logger) } else { logger.Info("resource type not supported", "group", group, "version", version, "kind", kind) return admission.Allowed("unknown resource type") } } + +func (h *Handler) handleCronJob( + request admission.Request, + gvkLabel string, + logger logr.Logger, +) admission.Response { + cronJob := &batchv1.CronJob{} + responseIfFailed, failed := h.preProcess(request, gvkLabel, cronJob) + if failed { + return responseIfFailed + } + hasBeenModified := k8sresources. + NewResourceModifier(h.Versions, logger). + ModifyCronJob(cronJob, request.Namespace) + return h.postProcess(request, cronJob, hasBeenModified, logger) +} + +func (h *Handler) handleDaemonSet( + request admission.Request, + gvkLabel string, + logger logr.Logger, +) admission.Response { + daemonSet := &appsv1.DaemonSet{} + responseIfFailed, failed := h.preProcess(request, gvkLabel, daemonSet) + if failed { + return responseIfFailed + } + hasBeenModified := k8sresources. + NewResourceModifier(h.Versions, logger). + ModifyDaemonSet(daemonSet, request.Namespace) + return h.postProcess(request, daemonSet, hasBeenModified, logger) +} + +func (h *Handler) handleDeployment( + request admission.Request, + gvkLabel string, + logger logr.Logger, +) admission.Response { + deployment := &appsv1.Deployment{} + responseIfFailed, failed := h.preProcess(request, gvkLabel, deployment) + if failed { + return responseIfFailed + } + hasBeenModified := k8sresources. + NewResourceModifier(h.Versions, logger). + ModifyDeployment(deployment, request.Namespace) + return h.postProcess(request, deployment, hasBeenModified, logger) +} + +func (h *Handler) handleJob( + request admission.Request, + gvkLabel string, + logger logr.Logger, +) admission.Response { + job := &batchv1.Job{} + responseIfFailed, failed := h.preProcess(request, gvkLabel, job) + if failed { + return responseIfFailed + } + hasBeenModified := k8sresources. + NewResourceModifier(h.Versions, logger). + ModifyJob(job, request.Namespace) + return h.postProcess(request, job, hasBeenModified, logger) +} + +func (h *Handler) handleStatefulSet( + request admission.Request, + gvkLabel string, + logger logr.Logger, +) admission.Response { + statefulSet := &appsv1.StatefulSet{} + responseIfFailed, failed := h.preProcess(request, gvkLabel, statefulSet) + if failed { + return responseIfFailed + } + hasBeenModified := k8sresources. + NewResourceModifier(h.Versions, logger). + ModifyStatefulSet(statefulSet, request.Namespace) + return h.postProcess(request, statefulSet, hasBeenModified, logger) +} + +func (h *Handler) preProcess( + request admission.Request, + gvkLabel string, + resource runtime.Object, +) (admission.Response, bool) { + if _, _, err := decoder.Decode(request.Object.Raw, nil, resource); err != nil { + err := fmt.Errorf("cannot parse resource into a %s: %w", gvkLabel, err) + util.QueueFailedInstrumentationEvent(h.Recorder, resource, "webhook", err) + return admission.Errored(http.StatusInternalServerError, err), true + } + return admission.Response{}, false +} + +func (h *Handler) postProcess( + request admission.Request, + resource runtime.Object, + hasBeenModified bool, + logger logr.Logger, +) admission.Response { + if !hasBeenModified { + logger.Info("Dash0 instrumentation already present, no modification by webhook is necessary.") + util.QueueAlreadyInstrumentedEvent(h.Recorder, resource, "webhook") + return admission.Allowed("no changes") + } + + marshalled, err := json.Marshal(resource) + if err != nil { + util.QueueFailedInstrumentationEvent(h.Recorder, resource, "webhook", err) + return admission.Allowed(fmt.Errorf("error when marshalling modfied resource to JSON: %w", err).Error()) + } + + logger.Info("The webhook has added Dash0 instrumentation to the resource.") + util.QueueSuccessfulInstrumentationEvent(h.Recorder, resource, "webhook") + return admission.PatchResponseFromRaw(request.Object.Raw, marshalled) +} diff --git a/internal/webhook/dash0_webhook_test.go b/internal/webhook/dash0_webhook_test.go index 1a2ebb1c..788a1a29 100644 --- a/internal/webhook/dash0_webhook_test.go +++ b/internal/webhook/dash0_webhook_test.go @@ -25,20 +25,7 @@ var _ = Describe("Dash0 Webhook", func() { Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) deployment = GetDeployment(ctx, k8sClient, TestNamespaceName, DeploymentName) - VerifyModifiedDeployment(deployment, DeploymentExpectations{ - Volumes: 1, - Dash0VolumeIdx: 0, - InitContainers: 1, - Dash0InitContainerIdx: 0, - Containers: []ContainerExpectations{{ - VolumeMounts: 1, - Dash0VolumeMountIdx: 0, - EnvVars: 2, - NodeOptionsEnvVarIdx: 0, - Dash0CollectorBaseUrlEnvVarIdx: 1, - Dash0CollectorBaseUrlEnvVarExpectedValue: "http://dash0-opentelemetry-collector-daemonset.test-namespace.svc.cluster.local:4318", - }}, - }) + VerifyModifiedDeployment(deployment, BasicPodSpecExpectations) }) It("should inject Dash into a new deployment that has multiple Containers, and already has Volumes and init Containers", func() { @@ -46,7 +33,7 @@ var _ = Describe("Dash0 Webhook", func() { Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) deployment = GetDeployment(ctx, k8sClient, TestNamespaceName, DeploymentName) - VerifyModifiedDeployment(deployment, DeploymentExpectations{ + VerifyModifiedDeployment(deployment, PodSpecExpectations{ Volumes: 3, Dash0VolumeIdx: 2, InitContainers: 3, @@ -78,7 +65,7 @@ var _ = Describe("Dash0 Webhook", func() { Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) deployment = GetDeployment(ctx, k8sClient, TestNamespaceName, DeploymentName) - VerifyModifiedDeployment(deployment, DeploymentExpectations{ + VerifyModifiedDeployment(deployment, PodSpecExpectations{ Volumes: 3, Dash0VolumeIdx: 1, InitContainers: 3, @@ -105,5 +92,37 @@ var _ = Describe("Dash0 Webhook", func() { }, }) }) + + It("should inject Dash into a new basic cron job", func() { + cronJob := BasicCronJob(TestNamespaceName, CronJobName) + Expect(k8sClient.Create(ctx, cronJob)).Should(Succeed()) + + cronJob = GetCronJob(ctx, k8sClient, TestNamespaceName, CronJobName) + VerifyModifiedCronJob(cronJob, BasicPodSpecExpectations) + }) + + It("should inject Dash into a new basic daemon set", func() { + daemonSet := BasicDaemonSet(TestNamespaceName, DaemonSetName) + Expect(k8sClient.Create(ctx, daemonSet)).Should(Succeed()) + + daemonSet = GetDaemonSet(ctx, k8sClient, TestNamespaceName, DaemonSetName) + VerifyModifiedDaemonSet(daemonSet, BasicPodSpecExpectations) + }) + + It("should inject Dash into a new basic job", func() { + job := BasicJob(TestNamespaceName, JobName) + Expect(k8sClient.Create(ctx, job)).Should(Succeed()) + + job = GetJob(ctx, k8sClient, TestNamespaceName, JobName) + VerifyModifiedJob(job, BasicPodSpecExpectations) + }) + + It("should inject Dash into a new basic stateful set", func() { + statefulSet := BasicStatefulSet(TestNamespaceName, StatefulSetName) + Expect(k8sClient.Create(ctx, statefulSet)).Should(Succeed()) + + statefulSet = GetStatefulSet(ctx, k8sClient, TestNamespaceName, StatefulSetName) + VerifyModifiedStatefulSet(statefulSet, BasicPodSpecExpectations) + }) }) }) diff --git a/test-resources/node.js/express/build-and-deploy.sh b/test-resources/node.js/express/build-and-deploy.sh index 6ef644bc..215622dd 100755 --- a/test-resources/node.js/express/build-and-deploy.sh +++ b/test-resources/node.js/express/build-and-deploy.sh @@ -14,4 +14,4 @@ if [[ -z ${SKIP_DOCKER_BUILD:-} ]]; then fi ./undeploy.sh ${target_namespace} -kubectl apply -n ${target_namespace} -f deploy.yaml +kubectl apply -n ${target_namespace} -f deployment.yaml diff --git a/test-resources/node.js/express/daemonset.yaml b/test-resources/node.js/express/daemonset.yaml new file mode 100644 index 00000000..7d3635d9 --- /dev/null +++ b/test-resources/node.js/express/daemonset.yaml @@ -0,0 +1,39 @@ +# SPDX-FileCopyrightText: Copyright 2024 Dash0 Inc. +# SPDX-License-Identifier: Apache-2.0 + +apiVersion: v1 +kind: Service +metadata: + name: dash0-operator-nodejs-20-express-test-service +spec: + selector: + app: dash0-operator-nodejs-20-express-test-app + ports: + - port: 1207 + targetPort: 1207 + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: dash0-operator-nodejs-20-express-test-daemonset + labels: + app: dash0-operator-nodejs-20-express-test-app +spec: + selector: + matchLabels: + app: dash0-operator-nodejs-20-express-test-app + template: + metadata: + labels: + app: dash0-operator-nodejs-20-express-test-app + spec: + containers: + - name: dash0-operator-nodejs-20-express-test-app + image: "dash0-operator-nodejs-20-express-test-app:latest" + env: + - name: DASH0_DEBUG + value: "true" + ports: + - containerPort: 1207 + imagePullPolicy: Never \ No newline at end of file diff --git a/test-resources/node.js/express/deploy.yaml b/test-resources/node.js/express/deployment.yaml similarity index 100% rename from test-resources/node.js/express/deploy.yaml rename to test-resources/node.js/express/deployment.yaml diff --git a/test-resources/node.js/express/undeploy.sh b/test-resources/node.js/express/undeploy.sh index 2074a892..7a488f07 100755 --- a/test-resources/node.js/express/undeploy.sh +++ b/test-resources/node.js/express/undeploy.sh @@ -9,4 +9,4 @@ cd "$(dirname ${BASH_SOURCE})" target_namespace=${1:-default} -kubectl delete -n ${target_namespace} -f deploy.yaml || true +kubectl delete -n ${target_namespace} -f deployment.yaml || true diff --git a/test/e2e/e2e_helpers.go b/test/e2e/e2e_helpers.go index dd720be1..af48149f 100644 --- a/test/e2e/e2e_helpers.go +++ b/test/e2e/e2e_helpers.go @@ -292,17 +292,27 @@ func UndeployDash0Resource(namespace string) { "kubectl", "delete", "-n", namespace, "-k", "config/samples"))).To(Succeed()) } -func InstallNodeJsDeployment(namespace string) error { - imageName := "dash0-operator-nodejs-20-express-test-app" - err := RunMultipleFromStrings([][]string{ - {"docker", "build", "test-resources/node.js/express", "-t", imageName}, - {"kubectl", "apply", "-f", "test-resources/node.js/express/deploy.yaml", "--namespace", namespace}, - }) - if err != nil { - return err - } +func InstallNodeJsDaemonSet(namespace string) error { + return installNodeJsApplication( + namespace, + "daemonset", + exec.Command("kubectl", + "rollout", + "status", + "daemonset", + "dash0-operator-nodejs-20-express-test-daemonset", + "--namespace", + namespace, + "--timeout", + "30s", + ), + ) +} - return RunAndIgnoreOutput( +func InstallNodeJsDeployment(namespace string) error { + return installNodeJsApplication( + namespace, + "deployment", exec.Command( "kubectl", "wait", @@ -313,27 +323,56 @@ func InstallNodeJsDeployment(namespace string) error { namespace, "--timeout", "30s", - )) + ), + ) +} + +func installNodeJsApplication(namespace string, kind string, waitCommand *exec.Cmd) error { + imageName := "dash0-operator-nodejs-20-express-test-app" + err := RunMultipleFromStrings([][]string{ + {"docker", "build", "test-resources/node.js/express", "-t", imageName}, + {"kubectl", + "apply", + "--namespace", + namespace, + "-f", + fmt.Sprintf("test-resources/node.js/express/%s.yaml", kind), + }, + }) + if err != nil { + return err + } + + return RunAndIgnoreOutput(waitCommand) +} + +func UninstallNodeJsDaemonSet(namespace string) error { + return uninstallNodeJsApplication(namespace, "daemonset") } func UninstallNodeJsDeployment(namespace string) error { + return uninstallNodeJsApplication(namespace, "deployment") +} + +func uninstallNodeJsApplication(namespace string, kind string) error { return RunAndIgnoreOutput( exec.Command( "kubectl", "delete", "--namespace", namespace, + "--ignore-not-found", "-f", - "test-resources/node.js/express/deploy.yaml", + fmt.Sprintf("test-resources/node.js/express/%s.yaml", kind), )) } -func SendRequestsAndVerifySpansHaveBeenProduced(namespace string) { +func SendRequestsAndVerifySpansHaveBeenProduced(namespace string, kind string) { timestampLowerBound := time.Now() By("verify that the resource has been instrumented and is sending telemetry", func() { Eventually(func(g Gomega) { - verifyLabels(g, namespace) + verifyLabels(g, namespace, kind) response, err := Run(exec.Command("curl", "http://localhost:1207/ohai"), false) g.ExpectWithOffset(1, err).NotTo(HaveOccurred()) g.ExpectWithOffset(1, string(response)).To(ContainSubstring( @@ -365,23 +404,23 @@ func SendRequestsAndVerifySpansHaveBeenProduced(namespace string) { }) } -func verifyLabels(g Gomega, namespace string) { - instrumented := readLabel(g, namespace, "dash0.instrumented") +func verifyLabels(g Gomega, namespace string, kind string) { + instrumented := readLabel(g, namespace, kind, "dash0.instrumented") g.ExpectWithOffset(1, instrumented).To(Equal("true")) - operatorVersion := readLabel(g, namespace, "dash0.operator.version") + operatorVersion := readLabel(g, namespace, kind, "dash0.operator.version") g.ExpectWithOffset(1, operatorVersion).To(MatchRegexp("\\d+\\.\\d+\\.\\d+")) - initContainerImageVersion := readLabel(g, namespace, "dash0.initcontainer.image.version") + initContainerImageVersion := readLabel(g, namespace, kind, "dash0.initcontainer.image.version") g.ExpectWithOffset(1, initContainerImageVersion).To(MatchRegexp("\\d+\\.\\d+\\.\\d+")) } -func readLabel(g Gomega, namespace string, labelKey string) string { +func readLabel(g Gomega, namespace string, kind string, labelKey string) string { labelValue, err := Run(exec.Command( "kubectl", "get", - "deployment", + kind, "--namespace", namespace, - "dash0-operator-nodejs-20-express-test-deployment", + fmt.Sprintf("dash0-operator-nodejs-20-express-test-%s", kind), "-o", fmt.Sprintf("jsonpath={.metadata.labels['%s']}", strings.ReplaceAll(labelKey, ".", "\\.")), ), false) diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index c0b4af13..9be03d9f 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -18,7 +18,7 @@ const ( operatorNamespace = "dash0-operator-system" operatorImage = "dash0-operator-controller:latest" - applicationUnderTestNamespace = "application-under-test-namespace" + applicationUnderTestNamespace = "e2e-application-under-test-namespace" managerYaml = "config/manager/manager.yaml" managerYamlBackup = managerYaml + ".backup" @@ -92,7 +92,8 @@ var _ = Describe("Dash0 Kubernetes Operator", Ordered, func() { }) AfterAll(func() { - By("uninstalling the Node.js deployment") + By("uninstalling the test applications") + ExpectWithOffset(1, UninstallNodeJsDaemonSet(applicationUnderTestNamespace)).To(Succeed()) ExpectWithOffset(1, UninstallNodeJsDeployment(applicationUnderTestNamespace)).To(Succeed()) if managerYamlNeedsRevert { @@ -124,7 +125,7 @@ var _ = Describe("Dash0 Kubernetes Operator", Ordered, func() { fmt.Fprint(GinkgoWriter, string(output)) }) - Context("the Dash0 operator's webhook", func() { + Context("the webhook", func() { BeforeAll(func() { DeployOperator(operatorNamespace, operatorImage) @@ -136,27 +137,43 @@ var _ = Describe("Dash0 Kubernetes Operator", Ordered, func() { UndeployOperator(operatorNamespace) }) + It("should modify new daemon sets", func() { + By("installing the Node.js daemon set") + Expect(InstallNodeJsDaemonSet(applicationUnderTestNamespace)).To(Succeed()) + By("verifying that the Node.js daemonset has been instrumented by the webhook") + SendRequestsAndVerifySpansHaveBeenProduced(applicationUnderTestNamespace, "daemonset") + }) + It("should modify new deployments", func() { By("installing the Node.js deployment") Expect(InstallNodeJsDeployment(applicationUnderTestNamespace)).To(Succeed()) By("verifying that the Node.js deployment has been instrumented by the webhook") - SendRequestsAndVerifySpansHaveBeenProduced(applicationUnderTestNamespace) + SendRequestsAndVerifySpansHaveBeenProduced(applicationUnderTestNamespace, "deployment") }) }) - Context("the Dash0 operator controller", func() { + Context("the controller", func() { AfterAll(func() { UndeployDash0Resource(applicationUnderTestNamespace) UndeployOperator(operatorNamespace) }) + It("should modify new daemon sets", func() { + By("installing the Node.js daemon set") + Expect(InstallNodeJsDaemonSet(applicationUnderTestNamespace)).To(Succeed()) + DeployOperator(operatorNamespace, operatorImage) + DeployDash0Resource(applicationUnderTestNamespace) + By("verifying that the Node.js daemon set has been instrumented by the controller") + SendRequestsAndVerifySpansHaveBeenProduced(applicationUnderTestNamespace, "deployment") + }) + It("should modify existing deployments", func() { By("installing the Node.js deployment") Expect(InstallNodeJsDeployment(applicationUnderTestNamespace)).To(Succeed()) DeployOperator(operatorNamespace, operatorImage) DeployDash0Resource(applicationUnderTestNamespace) By("verifying that the Node.js deployment has been instrumented by the controller") - SendRequestsAndVerifySpansHaveBeenProduced(applicationUnderTestNamespace) + SendRequestsAndVerifySpansHaveBeenProduced(applicationUnderTestNamespace, "deployment") }) }) }) diff --git a/test/util/helpers.go b/test/util/helpers.go index fadc7978..9ee64072 100644 --- a/test/util/helpers.go +++ b/test/util/helpers.go @@ -7,9 +7,10 @@ import ( "context" "fmt" + . "github.com/onsi/gomega" + "github.com/dash0hq/dash0-operator/api/v1alpha1" - "github.com/onsi/gomega" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" ) @@ -20,15 +21,62 @@ func VerifySuccessEvent( resourceName string, eventSource string, ) { - allEvents, err := clientset.CoreV1().Events(namespace).List(ctx, v1.ListOptions{}) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(allEvents.Items).To(gomega.HaveLen(1)) - gomega.Expect(allEvents.Items).To( - gomega.ContainElement( + verifyEvent( + ctx, + clientset, + namespace, + resourceName, + v1alpha1.ReasonSuccessfulInstrumentation, + fmt.Sprintf("Dash0 instrumentation by %s has been successful.", eventSource), + ) +} + +func VerifyFailureEvent( + ctx context.Context, + clientset *kubernetes.Clientset, + namespace string, + resourceName string, + eventSource string, + message string, +) { + verifyEvent( + ctx, + clientset, + namespace, + resourceName, + v1alpha1.ReasonFailedInstrumentation, + message, + ) +} + +func verifyEvent( + ctx context.Context, + clientset *kubernetes.Clientset, + namespace string, + resourceName string, + reason v1alpha1.Reason, + message string, +) { + allEvents, err := clientset.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(allEvents.Items).To(HaveLen(1)) + Expect(allEvents.Items).To( + ContainElement( MatchEvent( namespace, resourceName, - v1alpha1.ReasonSuccessfulInstrumentation, - fmt.Sprintf("Dash0 instrumentation by %s has been successful.", eventSource), + reason, + message, ))) } + +func DeleteAllEvents( + ctx context.Context, + clientset *kubernetes.Clientset, + namespace string, +) { + err := clientset.CoreV1().Events(namespace).DeleteCollection(ctx, metav1.DeleteOptions{ + GracePeriodSeconds: new(int64), // delete immediately + }, metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) +} diff --git a/test/util/resources.go b/test/util/resources.go index 810d0515..6bbcfed3 100644 --- a/test/util/resources.go +++ b/test/util/resources.go @@ -7,8 +7,8 @@ import ( "context" . "github.com/onsi/gomega" - appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -17,12 +17,15 @@ import ( ) const ( - TestNamespaceName = "test-namespace" - DeploymentName = "deployment" - DeploymentNameExisting = "existing-deployment" + TestNamespaceName = "test-namespace" + CronJobName = "cronjob" + DaemonSetName = "daemonset" + DeploymentName = "deployment" + JobName = "job" + StatefulSetName = "statefulset" ) -func EnsureResourceExists( +func EnsureDash0CustomResourceExists( ctx context.Context, k8sClient client.Client, qualifiedName types.NamespacedName, @@ -59,7 +62,7 @@ func EnsureTestNamespaceExists( k8sClient client.Client, name string, ) *corev1.Namespace { - object := EnsureResourceExists( + object := EnsureDash0CustomResourceExists( ctx, k8sClient, types.NamespacedName{Name: name}, @@ -69,20 +72,53 @@ func EnsureTestNamespaceExists( return object.(*corev1.Namespace) } +func BasicCronJob(namespace string, name string) *batchv1.CronJob { + resource := &batchv1.CronJob{} + resource.Namespace = namespace + resource.Name = name + resource.Spec = batchv1.CronJobSpec{} + resource.Spec.Schedule = "*/1 * * * *" + resource.Spec.JobTemplate.Spec.Template = basicPodSpecTemplate() + resource.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyOnFailure + return resource +} + +func CreateBasicCronJob( + ctx context.Context, + k8sClient client.Client, + namespace string, + name string, +) *batchv1.CronJob { + return create(ctx, k8sClient, BasicCronJob(namespace, name)).(*batchv1.CronJob) +} + +func BasicDaemonSet(namespace string, name string) *appsv1.DaemonSet { + resource := &appsv1.DaemonSet{} + resource.Namespace = namespace + resource.Name = name + resource.Spec = appsv1.DaemonSetSpec{} + resource.Spec.Template = basicPodSpecTemplate() + resource.Spec.Selector = createSelector() + return resource +} + +func CreateBasicDaemonSet( + ctx context.Context, + k8sClient client.Client, + namespace string, + name string, +) *appsv1.DaemonSet { + return create(ctx, k8sClient, BasicDaemonSet(namespace, name)).(*appsv1.DaemonSet) +} + func BasicDeployment(namespace string, name string) *appsv1.Deployment { - deployment := &appsv1.Deployment{} - deployment.Namespace = namespace - deployment.Name = name - deployment.Spec = appsv1.DeploymentSpec{} - deployment.Spec.Template = corev1.PodTemplateSpec{} - deployment.Spec.Template.Labels = map[string]string{"app": "test"} - deployment.Spec.Template.Spec.Containers = []corev1.Container{{ - Name: "test-container-0", - Image: "ubuntu", - }} - deployment.Spec.Selector = &metav1.LabelSelector{} - deployment.Spec.Selector.MatchLabels = map[string]string{"app": "test"} - return deployment + resource := &appsv1.Deployment{} + resource.Namespace = namespace + resource.Name = name + resource.Spec = appsv1.DeploymentSpec{} + resource.Spec.Template = basicPodSpecTemplate() + resource.Spec.Selector = createSelector() + return resource } func CreateBasicDeployment( @@ -91,14 +127,72 @@ func CreateBasicDeployment( namespace string, name string, ) *appsv1.Deployment { - deployment := BasicDeployment(namespace, name) - Expect(k8sClient.Create(ctx, deployment)).Should(Succeed()) - return deployment + return create(ctx, k8sClient, BasicDeployment(namespace, name)).(*appsv1.Deployment) +} + +func BasicJob(namespace string, name string) *batchv1.Job { + resource := &batchv1.Job{} + resource.Namespace = namespace + resource.Name = name + resource.Spec = batchv1.JobSpec{} + resource.Spec.TTLSecondsAfterFinished = new(int32) + resource.Spec.Template = basicPodSpecTemplate() + resource.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyOnFailure + return resource +} + +func CreateBasicJob( + ctx context.Context, + k8sClient client.Client, + namespace string, + name string, +) *batchv1.Job { + return create(ctx, k8sClient, BasicJob(namespace, name)).(*batchv1.Job) +} + +func BasicStatefulSet(namespace string, name string) *appsv1.StatefulSet { + resource := &appsv1.StatefulSet{} + resource.Namespace = namespace + resource.Name = name + resource.Spec = appsv1.StatefulSetSpec{} + resource.Spec.Template = basicPodSpecTemplate() + resource.Spec.Selector = createSelector() + return resource +} + +func CreateBasicStatefulSet( + ctx context.Context, + k8sClient client.Client, + namespace string, + name string, +) *appsv1.StatefulSet { + return create(ctx, k8sClient, BasicStatefulSet(namespace, name)).(*appsv1.StatefulSet) +} + +func basicPodSpecTemplate() corev1.PodTemplateSpec { + podSpecTemplate := corev1.PodTemplateSpec{} + podSpecTemplate.Labels = map[string]string{"app": "test"} + podSpecTemplate.Spec.Containers = []corev1.Container{{ + Name: "test-container-0", + Image: "ubuntu", + }} + return podSpecTemplate +} + +func createSelector() *metav1.LabelSelector { + selector := &metav1.LabelSelector{} + selector.MatchLabels = map[string]string{"app": "test"} + return selector +} + +func create(ctx context.Context, k8sClient client.Client, resource client.Object) client.Object { + Expect(k8sClient.Create(ctx, resource)).Should(Succeed()) + return resource } func DeploymentWithMoreBellsAndWhistles(namespace string, name string) *appsv1.Deployment { - deployment := BasicDeployment(namespace, name) - podSpec := &deployment.Spec.Template.Spec + resource := BasicDeployment(namespace, name) + podSpec := &resource.Spec.Template.Spec podSpec.Volumes = []corev1.Volume{ { Name: "test-volume-0", @@ -172,7 +266,7 @@ func DeploymentWithMoreBellsAndWhistles(namespace string, name string) *appsv1.D }, } - return deployment + return resource } func DeploymentWithExistingDash0Artifacts(namespace string, name string) *appsv1.Deployment { @@ -294,17 +388,77 @@ func DeploymentWithExistingDash0Artifacts(namespace string, name string) *appsv1 return deployment } +func GetCronJob( + ctx context.Context, + k8sClient client.Client, + namespace string, + name string, +) *batchv1.CronJob { + resource := &batchv1.CronJob{} + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + ExpectWithOffset(1, k8sClient.Get(ctx, namespacedName, resource)).Should(Succeed()) + return resource +} + +func GetDaemonSet( + ctx context.Context, + k8sClient client.Client, + namespace string, + name string, +) *appsv1.DaemonSet { + resource := &appsv1.DaemonSet{} + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + ExpectWithOffset(1, k8sClient.Get(ctx, namespacedName, resource)).Should(Succeed()) + return resource +} + func GetDeployment( ctx context.Context, k8sClient client.Client, namespace string, - deploymentName string, + name string, ) *appsv1.Deployment { - deployment := &appsv1.Deployment{} + resource := &appsv1.Deployment{} namespacedName := types.NamespacedName{ Namespace: namespace, - Name: deploymentName, + Name: name, } - ExpectWithOffset(1, k8sClient.Get(ctx, namespacedName, deployment)).Should(Succeed()) - return deployment + ExpectWithOffset(1, k8sClient.Get(ctx, namespacedName, resource)).Should(Succeed()) + return resource +} + +func GetJob( + ctx context.Context, + k8sClient client.Client, + namespace string, + name string, +) *batchv1.Job { + resource := &batchv1.Job{} + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + ExpectWithOffset(1, k8sClient.Get(ctx, namespacedName, resource)).Should(Succeed()) + return resource +} + +func GetStatefulSet( + ctx context.Context, + k8sClient client.Client, + namespace string, + name string, +) *appsv1.StatefulSet { + resource := &appsv1.StatefulSet{} + namespacedName := types.NamespacedName{ + Namespace: namespace, + Name: name, + } + ExpectWithOffset(1, k8sClient.Get(ctx, namespacedName, resource)).Should(Succeed()) + return resource } diff --git a/test/util/verification.go b/test/util/verification.go index 1d43f0ac..739ba641 100644 --- a/test/util/verification.go +++ b/test/util/verification.go @@ -7,7 +7,10 @@ import ( "fmt" . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -22,7 +25,7 @@ type ContainerExpectations struct { Dash0CollectorBaseUrlEnvVarExpectedValue string } -type DeploymentExpectations struct { +type PodSpecExpectations struct { Volumes int Dash0VolumeIdx int InitContainers int @@ -30,9 +33,61 @@ type DeploymentExpectations struct { Containers []ContainerExpectations } -func VerifyModifiedDeployment(deployment *appsv1.Deployment, expectations DeploymentExpectations) { - podSpec := deployment.Spec.Template.Spec +var ( + BasicPodSpecExpectations = PodSpecExpectations{ + Volumes: 1, + Dash0VolumeIdx: 0, + InitContainers: 1, + Dash0InitContainerIdx: 0, + Containers: []ContainerExpectations{{ + VolumeMounts: 1, + Dash0VolumeMountIdx: 0, + EnvVars: 2, + NodeOptionsEnvVarIdx: 0, + Dash0CollectorBaseUrlEnvVarIdx: 1, + Dash0CollectorBaseUrlEnvVarExpectedValue:// + "http://dash0-opentelemetry-collector-daemonset.test-namespace.svc.cluster.local:4318", + }}, + } +) + +func VerifyModifiedCronJob(resource *batchv1.CronJob, expectations PodSpecExpectations) { + verifyModifiedPodSpec(resource.Spec.JobTemplate.Spec.Template.Spec, expectations) + verifyLabelsAfterSuccessfulModification(resource.Spec.JobTemplate.Spec.Template.ObjectMeta) + verifyLabelsAfterSuccessfulModification(resource.ObjectMeta) +} + +func VerifyModifiedDaemonSet(resource *appsv1.DaemonSet, expectations PodSpecExpectations) { + verifyModifiedPodSpec(resource.Spec.Template.Spec, expectations) + verifyLabelsAfterSuccessfulModification(resource.Spec.Template.ObjectMeta) + verifyLabelsAfterSuccessfulModification(resource.ObjectMeta) +} + +func VerifyModifiedDeployment(resource *appsv1.Deployment, expectations PodSpecExpectations) { + verifyModifiedPodSpec(resource.Spec.Template.Spec, expectations) + verifyLabelsAfterSuccessfulModification(resource.Spec.Template.ObjectMeta) + verifyLabelsAfterSuccessfulModification(resource.ObjectMeta) +} + +func VerifyModifiedJob(resource *batchv1.Job, expectations PodSpecExpectations) { + verifyModifiedPodSpec(resource.Spec.Template.Spec, expectations) + verifyLabelsAfterSuccessfulModification(resource.Spec.Template.ObjectMeta) + verifyLabelsAfterSuccessfulModification(resource.ObjectMeta) +} + +func VerifyUnmodifiedJob(resource *batchv1.Job) { + verifyUnmodifiedPodSpec(resource.Spec.Template.Spec) + verifyNoDash0Labels(resource.Spec.Template.ObjectMeta) + verifyLabelsAfterFailureToModify(resource.ObjectMeta) +} +func VerifyModifiedStatefulSet(resource *appsv1.StatefulSet, expectations PodSpecExpectations) { + verifyModifiedPodSpec(resource.Spec.Template.Spec, expectations) + verifyLabelsAfterSuccessfulModification(resource.Spec.Template.ObjectMeta) + verifyLabelsAfterSuccessfulModification(resource.ObjectMeta) +} + +func verifyModifiedPodSpec(podSpec corev1.PodSpec, expectations PodSpecExpectations) { Expect(podSpec.Volumes).To(HaveLen(expectations.Volumes)) for i, volume := range podSpec.Volumes { if i == expectations.Dash0VolumeIdx { @@ -95,13 +150,33 @@ func VerifyModifiedDeployment(deployment *appsv1.Deployment, expectations Deploy } } } +} - verifyLabels(deployment.Spec.Template.ObjectMeta) - verifyLabels(deployment.ObjectMeta) +func verifyUnmodifiedPodSpec(podSpec corev1.PodSpec) { + Expect(podSpec.Volumes).To(BeEmpty()) + Expect(podSpec.InitContainers).To(BeEmpty()) + Expect(podSpec.Containers).To(HaveLen(1)) + for i, container := range podSpec.Containers { + Expect(container.Name).To(Equal(fmt.Sprintf("test-container-%d", i))) + Expect(container.VolumeMounts).To(BeEmpty()) + Expect(container.Env).To(BeEmpty()) + } } -func verifyLabels(meta metav1.ObjectMeta) { +func verifyLabelsAfterSuccessfulModification(meta metav1.ObjectMeta) { Expect(meta.Labels["dash0.instrumented"]).To(Equal("true")) Expect(meta.Labels["dash0.operator.version"]).To(Equal("1.2.3")) Expect(meta.Labels["dash0.initcontainer.image.version"]).To(Equal("4.5.6")) } + +func verifyLabelsAfterFailureToModify(meta metav1.ObjectMeta) { + Expect(meta.Labels["dash0.instrumented"]).To(Equal("false")) + Expect(meta.Labels["dash0.operator.version"]).To(Equal("1.2.3")) + Expect(meta.Labels["dash0.initcontainer.image.version"]).To(Equal("4.5.6")) +} + +func verifyNoDash0Labels(meta metav1.ObjectMeta) { + Expect(meta.Labels["dash0.instrumented"]).To(Equal("")) + Expect(meta.Labels["dash0.operator.version"]).To(Equal("")) + Expect(meta.Labels["dash0.initcontainer.image.version"]).To(Equal("")) +}