From b137f2ca2b7c121dffff00d0f3253f33f158b4e6 Mon Sep 17 00:00:00 2001 From: Nick Ripley Date: Thu, 30 May 2024 10:52:21 -0400 Subject: [PATCH] profiler: remove TestProfilerPassthrough and BenchmarkDoRequest (#2702) The test does not verify anything useful. It modifies several private variables/fields to change the behavior of the profiler, and makes assertions about the modified behavior. If we change the internal implementation of the profiler, this test could break without identifying an actual bug. Tests should, where possible, only make assertions about the observable behavior (what the profiler sends to the agent) based on the public API (what a customer will actually use). TestAllUploaded does what this test is meant to do, but without perturbing the implementation. BenchmarkDoRequest also modifies implementation details, and more importantly measures something that is in reality almost entirely I/O-bound in an unrealistic way. The real overhead of the profiler comes from elsewhere: delta profile computation, which we already measure, and the overhead of the profilers themselves, which are better studied on real workloads. --- profiler/profiler_test.go | 35 ----------------------------------- profiler/upload_test.go | 31 ------------------------------- 2 files changed, 66 deletions(-) diff --git a/profiler/profiler_test.go b/profiler/profiler_test.go index e9d56444c2..5e94eac82e 100644 --- a/profiler/profiler_test.go +++ b/profiler/profiler_test.go @@ -236,41 +236,6 @@ func TestSetProfileFraction(t *testing.T) { }) } -func TestProfilerPassthrough(t *testing.T) { - if testing.Short() { - return - } - beforeExecutionTraceEnabledDefault := executionTraceEnabledDefault - executionTraceEnabledDefault = false - defer func() { executionTraceEnabledDefault = beforeExecutionTraceEnabledDefault }() - - out := make(chan batch) - p, err := newProfiler() - require.NoError(t, err) - p.cfg.period = 200 * time.Millisecond - p.cfg.cpuDuration = 1 * time.Millisecond - p.uploadFunc = func(bat batch) error { - out <- bat - return nil - } - p.run() - defer p.stop() - var bat batch - select { - case bat = <-out: - // TODO (knusbaum) this timeout is long because we were seeing timeouts at 500ms. - // it would be nice to have a time-independent way to test this - case <-time.After(1000 * time.Millisecond): - t.Fatal("time expired") - } - - assert := assert.New(t) - // should contain cpu.pprof, delta-heap.pprof - assert.Equal(2, len(bat.profiles)) - assert.NotEmpty(bat.profiles[0].data) - assert.NotEmpty(bat.profiles[1].data) -} - func unstartedProfiler(opts ...Option) (*profiler, error) { p, err := newProfiler(opts...) if err != nil { diff --git a/profiler/upload_test.go b/profiler/upload_test.go index acaa471011..1e56eb0e4a 100644 --- a/profiler/upload_test.go +++ b/profiler/upload_test.go @@ -7,7 +7,6 @@ package profiler import ( "fmt" - "io" "net" "net/http" "net/http/httptest" @@ -187,36 +186,6 @@ func TestEntityIDHeader(t *testing.T) { assert.Equal(t, entityID, profile.headers.Get("Datadog-Entity-Id")) } -func BenchmarkDoRequest(b *testing.B) { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - _, err := io.ReadAll(req.Body) - if err != nil { - b.Fatal(err) - } - req.Body.Close() - w.WriteHeader(200) - })) - defer srv.Close() - prof := profile{ - name: "heap", - data: []byte("my-heap-profile"), - } - bat := batch{ - start: time.Now().Add(-10 * time.Second), - end: time.Now(), - host: "my-host", - profiles: []*profile{&prof}, - } - p, err := unstartedProfiler() - require.NoError(b, err) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - p.doRequest(bat) - } -} - func TestGitMetadata(t *testing.T) { maininternal.ResetGitMetadataTags() defer maininternal.ResetGitMetadataTags()