From 2b58a15383af9783e28a4a5d5e5fa7fecbf3823f Mon Sep 17 00:00:00 2001 From: Christos Markou Date: Thu, 7 Nov 2024 16:00:08 +0200 Subject: [PATCH 01/24] [pkg/stanza] Ensure time parsing happens before entry is sent downwards (#36213) #### Description This issue was caught at https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/35758. This PR ensures that time parsing happens before the entry is sent to the next operator in the pipeline. #### Link to tracking issue Fixes ~ #### Testing Added #### Documentation ~ Signed-off-by: ChrsMark --- .chloggen/fix_container_time_parsing.yaml | 27 ++++++++ .../operator/parser/container/parser.go | 33 +++++----- .../operator/parser/container/parser_test.go | 63 +++++++++++++++++++ 3 files changed, 104 insertions(+), 19 deletions(-) create mode 100644 .chloggen/fix_container_time_parsing.yaml diff --git a/.chloggen/fix_container_time_parsing.yaml b/.chloggen/fix_container_time_parsing.yaml new file mode 100644 index 000000000000..6c108aed5561 --- /dev/null +++ b/.chloggen/fix_container_time_parsing.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/stanza + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Ensure that time parsing happens before entry is sent to downstream operators + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36213] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/pkg/stanza/operator/parser/container/parser.go b/pkg/stanza/operator/parser/container/parser.go index 5c33005435f4..b53c025869df 100644 --- a/pkg/stanza/operator/parser/container/parser.go +++ b/pkg/stanza/operator/parser/container/parser.go @@ -63,12 +63,11 @@ type Parser struct { asyncConsumerStarted bool criConsumerStartOnce sync.Once criConsumers *sync.WaitGroup + timeLayout string } // Process will parse an entry of Container logs func (p *Parser) Process(ctx context.Context, entry *entry.Entry) (err error) { - var timeLayout string - format := p.format if format == "" { format, err = p.detectFormat(entry) @@ -79,15 +78,11 @@ func (p *Parser) Process(ctx context.Context, entry *entry.Entry) (err error) { switch format { case dockerFormat: - err = p.ParserOperator.ProcessWithCallback(ctx, entry, p.parseDocker, p.handleAttributeMappings) + p.timeLayout = goTimeLayout + err = p.ParserOperator.ProcessWithCallback(ctx, entry, p.parseDocker, p.handleTimeAndAttributeMappings) if err != nil { return fmt.Errorf("failed to process the docker log: %w", err) } - timeLayout = goTimeLayout - err = parseTime(entry, timeLayout) - if err != nil { - return fmt.Errorf("failed to parse time: %w", err) - } case containerdFormat, crioFormat: p.criConsumerStartOnce.Do(func() { err = p.criLogEmitter.Start(nil) @@ -119,22 +114,17 @@ func (p *Parser) Process(ctx context.Context, entry *entry.Entry) (err error) { if err != nil { return fmt.Errorf("failed to parse containerd log: %w", err) } - timeLayout = goTimeLayout + p.timeLayout = goTimeLayout } else { // parse the message err = p.ParserOperator.ParseWith(ctx, entry, p.parseCRIO) if err != nil { return fmt.Errorf("failed to parse crio log: %w", err) } - timeLayout = crioTimeLayout + p.timeLayout = crioTimeLayout } - err = parseTime(entry, timeLayout) - if err != nil { - return fmt.Errorf("failed to parse time: %w", err) - } - - err = p.handleAttributeMappings(entry) + err = p.handleTimeAndAttributeMappings(entry) if err != nil { return fmt.Errorf("failed to handle attribute mappings: %w", err) } @@ -251,9 +241,14 @@ func (p *Parser) parseDocker(value any) (any, error) { return parsedValue, nil } -// handleAttributeMappings handles fields' mappings and k8s meta extraction -func (p *Parser) handleAttributeMappings(e *entry.Entry) error { - err := p.handleMoveAttributes(e) +// handleTimeAndAttributeMappings handles fields' mappings and k8s meta extraction +func (p *Parser) handleTimeAndAttributeMappings(e *entry.Entry) error { + err := parseTime(e, p.timeLayout) + if err != nil { + return fmt.Errorf("failed to parse time: %w", err) + } + + err = p.handleMoveAttributes(e) if err != nil { return err } diff --git a/pkg/stanza/operator/parser/container/parser_test.go b/pkg/stanza/operator/parser/container/parser_test.go index 9c684e74d31a..93e769c23f4b 100644 --- a/pkg/stanza/operator/parser/container/parser_test.go +++ b/pkg/stanza/operator/parser/container/parser_test.go @@ -408,6 +408,69 @@ func TestRecombineProcess(t *testing.T) { } } +func TestProcessWithDockerTime(t *testing.T) { + cases := []struct { + name string + op func() (operator.Operator, error) + input *entry.Entry + expectedOutput *entry.Entry + }{ + { + "docker", + func() (operator.Operator, error) { + cfg := NewConfigWithID("test_id") + cfg.AddMetadataFromFilePath = true + set := componenttest.NewNopTelemetrySettings() + return cfg.Build(set) + }, + &entry.Entry{ + Body: `{"log":"INFO: log line here","stream":"stdout","time":"2029-03-30T08:31:20.545192187Z"}`, + Attributes: map[string]any{ + "log.file.path": "/var/log/pods/some_kube-scheduler-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d3/kube-scheduler44/1.log", + }, + }, + &entry.Entry{ + Attributes: map[string]any{ + "log.iostream": "stdout", + "log.file.path": "/var/log/pods/some_kube-scheduler-kind-control-plane_49cc7c1fd3702c40b2686ea7486091d3/kube-scheduler44/1.log", + }, + Body: "INFO: log line here", + Resource: map[string]any{ + "k8s.pod.name": "kube-scheduler-kind-control-plane", + "k8s.pod.uid": "49cc7c1fd3702c40b2686ea7486091d3", + "k8s.container.name": "kube-scheduler44", + "k8s.container.restart_count": "1", + "k8s.namespace.name": "some", + }, + Timestamp: time.Date(2029, time.March, 30, 8, 31, 20, 545192187, time.UTC), + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + op, err := tc.op() + require.NoError(t, err) + defer func() { require.NoError(t, op.Stop()) }() + r := op.(*Parser) + + fake := testutil.NewFakeOutput(t) + r.OutputOperators = ([]operator.Operator{fake}) + + require.NoError(t, r.Process(ctx, tc.input)) + + fake.ExpectEntry(t, tc.expectedOutput) + + select { + case e := <-fake.Received: + require.FailNow(t, "Received unexpected entry: ", e) + default: + } + }) + } +} + func TestCRIRecombineProcessWithFailedDownstreamOperator(t *testing.T) { cases := []struct { name string From c840e69bbbd185f0fc7f167c0fc1794fc7d10e57 Mon Sep 17 00:00:00 2001 From: Florian Bacher Date: Thu, 7 Nov 2024 15:48:04 +0100 Subject: [PATCH 02/24] [pkg/stanza] make log emitter and entry conversion in adapter synchronous (#35669) #### Description This PR changes the `LogEmitter` to accept a synchronous consumer callback function for processing a batch of log entries as an alternative to sending log entry batches to a channel. The components that use the `LogEmitter` (adapter and parser) have been adapted accordingly. Also, in the case of the adapter, the log entries are converted directly, rather than sending them over a channel to the converter and receiving the converted results over a different channel. #### Link to tracking issue Fixes #35453 #### Testing I did some initial performance tests using the `TestLogLargeFiles` load test to see how this change affects the performance. Below are the results: **Before the change (i.e. with async log entry batch processing)** ``` === RUN TestLogLargeFiles/filelog-largefiles-2Gb-lifetime 2024/10/08 09:02:53 | Sent:17,769,795 logs (179,507/sec) | Received:17,755,188 items (179,346/sec) === RUN TestLogLargeFiles/filelog-largefiles-6GB-lifetime 2024/10/08 09:06:29 | Sent:42,857,755 logs (216,465/sec) | Received:42,851,987 items (216,424/sec) Test |Result|Duration|CPU Avg%|CPU Max%|RAM Avg MiB|RAM Max MiB|Sent Items|Received Items| ---------------------------------------------|------|-------:|-------:|-------:|----------:|----------:|---------:|-------------:| LogLargeFiles/filelog-largefiles-2Gb-lifetime|PASS | 100s| 73.1| 78.4| 106| 118| 18249451| 18249451| LogLargeFiles/filelog-largefiles-6GB-lifetime|PASS | 200s| 87.5| 98.1| 110| 116| 44358460| 44358460| ``` **After the change (i.e. with sync log entry batch processing)** ``` === RUN TestLogLargeFiles/filelog-largefiles-2Gb-lifetime 2024/10/08 10:09:51 Agent RAM (RES): 139 MiB, CPU:71.7% | Sent:17,802,561 logs (179,836/sec) | Received:17,788,273 items (179,680/sec) === RUN TestLogLargeFiles/filelog-largefiles-6GB-lifetime 2024/10/08 10:05:15 Agent RAM (RES): 140 MiB, CPU:95.6% | Sent:42,912,030 logs (216,744/sec) | Received:42,904,306 items (216,689/sec) Test |Result|Duration|CPU Avg%|CPU Max%|RAM Avg MiB|RAM Max MiB|Sent Items|Received Items| ---------------------------------------------|------|-------:|-------:|-------:|----------:|----------:|---------:|-------------:| LogLargeFiles/filelog-largefiles-2Gb-lifetime|PASS | 100s| 74.8| 78.9| 127| 139| 17984687| 17984687| LogLargeFiles/filelog-largefiles-6GB-lifetime|PASS | 200s| 89.3| 100.9| 134| 140| 43376210| 43376210| ``` Those results seem to indicate comparable throughput, but with an increased resource consumption, especially in terms of memory. I also did a test comparing the performance between the synchronous and asynchronous log emitter using the same methodology as in #35454. The results were the following, and indicate an increase in the time it takes for reading the generated log file (see #35454 for details on how the file is generated and the test execution): - Async Log Emitter: ~8s - Sync Log Emitter: ~12s
output-async.log === Step 3: Thu Oct 10 08:54:23 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="3744d4cb-5080-427c-8c16-a96cb40a57d4",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 2.209674e+06 === Step 4: Thu Oct 10 08:54:25 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="3744d4cb-5080-427c-8c16-a96cb40a57d4",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 5.428103e+06 === Step 5: Thu Oct 10 08:54:26 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="3744d4cb-5080-427c-8c16-a96cb40a57d4",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 7.337017e+06 === Step 6: Thu Oct 10 08:54:27 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="3744d4cb-5080-427c-8c16-a96cb40a57d4",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 9.258843e+06 === Step 7: Thu Oct 10 08:54:29 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="3744d4cb-5080-427c-8c16-a96cb40a57d4",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 1.3082428e+07 === Step 8: Thu Oct 10 08:54:31 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="3744d4cb-5080-427c-8c16-a96cb40a57d4",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 1.6519068e+07
output-sync.log === Step 2: Thu Oct 10 08:51:27 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="dcf5371b-19eb-47b3-a820-756c1832b448",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 1.580891e+06 === Step 3: Thu Oct 10 08:51:28 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="dcf5371b-19eb-47b3-a820-756c1832b448",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 3.01034e+06 === Step 4: Thu Oct 10 08:51:29 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="dcf5371b-19eb-47b3-a820-756c1832b448",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 4.434627e+06 === Step 5: Thu Oct 10 08:51:31 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="dcf5371b-19eb-47b3-a820-756c1832b448",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 7.416612e+06 === Step 6: Thu Oct 10 08:51:34 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="dcf5371b-19eb-47b3-a820-756c1832b448",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 1.0496072e+07 === Step 7: Thu Oct 10 08:51:36 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="dcf5371b-19eb-47b3-a820-756c1832b448",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 1.3523882e+07 === Step 8: Thu Oct 10 08:51:37 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="dcf5371b-19eb-47b3-a820-756c1832b448",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 1.4929707e+07 === Step 9: Thu Oct 10 08:51:39 CEST 2024 otelcol_receiver_accepted_log_records{receiver="filelog",service_instance_id="dcf5371b-19eb-47b3-a820-756c1832b448",service_name="otelcontribcol",service_version="0.111.0-dev",transport=""} 1.6519105e+07
--------- Signed-off-by: Florian Bacher Co-authored-by: Andrzej Stencel --- .chloggen/stanza-sync-log-emitter.yaml | 27 ++++ pkg/stanza/adapter/converter.go | 54 +++---- pkg/stanza/adapter/factory.go | 44 +++--- pkg/stanza/adapter/integration_test.go | 23 +-- pkg/stanza/adapter/receiver.go | 98 +++---------- pkg/stanza/adapter/receiver_test.go | 133 ++++++++++-------- pkg/stanza/operator/helper/emitter.go | 34 +---- pkg/stanza/operator/helper/emitter_test.go | 94 ++++++++----- .../operator/parser/container/config.go | 22 +-- .../operator/parser/container/parser.go | 27 ++-- processor/logstransformprocessor/processor.go | 95 +------------ testbed/tests/log_test.go | 9 ++ 12 files changed, 283 insertions(+), 377 deletions(-) create mode 100644 .chloggen/stanza-sync-log-emitter.yaml diff --git a/.chloggen/stanza-sync-log-emitter.yaml b/.chloggen/stanza-sync-log-emitter.yaml new file mode 100644 index 000000000000..b8bc8cfbf954 --- /dev/null +++ b/.chloggen/stanza-sync-log-emitter.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/stanza + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Synchronous handling of entries passed from the log emitter to the receiver adapter + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35453] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/pkg/stanza/adapter/converter.go b/pkg/stanza/adapter/converter.go index 2f8c3540cf42..3ab508745bc3 100644 --- a/pkg/stanza/adapter/converter.go +++ b/pkg/stanza/adapter/converter.go @@ -155,44 +155,46 @@ func (c *Converter) workerLoop() { defer c.wg.Done() for entries := range c.workerChan { + // Send plogs directly to flushChan + c.flushChan <- ConvertEntries(entries) + } +} - resourceHashToIdx := make(map[uint64]int) - scopeIdxByResource := make(map[uint64]map[string]int) +func ConvertEntries(entries []*entry.Entry) plog.Logs { + resourceHashToIdx := make(map[uint64]int) + scopeIdxByResource := make(map[uint64]map[string]int) - pLogs := plog.NewLogs() - var sl plog.ScopeLogs + pLogs := plog.NewLogs() + var sl plog.ScopeLogs - for _, e := range entries { - resourceID := HashResource(e.Resource) - var rl plog.ResourceLogs + for _, e := range entries { + resourceID := HashResource(e.Resource) + var rl plog.ResourceLogs - resourceIdx, ok := resourceHashToIdx[resourceID] - if !ok { - resourceHashToIdx[resourceID] = pLogs.ResourceLogs().Len() + resourceIdx, ok := resourceHashToIdx[resourceID] + if !ok { + resourceHashToIdx[resourceID] = pLogs.ResourceLogs().Len() - rl = pLogs.ResourceLogs().AppendEmpty() - upsertToMap(e.Resource, rl.Resource().Attributes()) + rl = pLogs.ResourceLogs().AppendEmpty() + upsertToMap(e.Resource, rl.Resource().Attributes()) - scopeIdxByResource[resourceID] = map[string]int{e.ScopeName: 0} + scopeIdxByResource[resourceID] = map[string]int{e.ScopeName: 0} + sl = rl.ScopeLogs().AppendEmpty() + sl.Scope().SetName(e.ScopeName) + } else { + rl = pLogs.ResourceLogs().At(resourceIdx) + scopeIdxInResource, ok := scopeIdxByResource[resourceID][e.ScopeName] + if !ok { + scopeIdxByResource[resourceID][e.ScopeName] = rl.ScopeLogs().Len() sl = rl.ScopeLogs().AppendEmpty() sl.Scope().SetName(e.ScopeName) } else { - rl = pLogs.ResourceLogs().At(resourceIdx) - scopeIdxInResource, ok := scopeIdxByResource[resourceID][e.ScopeName] - if !ok { - scopeIdxByResource[resourceID][e.ScopeName] = rl.ScopeLogs().Len() - sl = rl.ScopeLogs().AppendEmpty() - sl.Scope().SetName(e.ScopeName) - } else { - sl = pLogs.ResourceLogs().At(resourceIdx).ScopeLogs().At(scopeIdxInResource) - } + sl = pLogs.ResourceLogs().At(resourceIdx).ScopeLogs().At(scopeIdxInResource) } - convertInto(e, sl.LogRecords().AppendEmpty()) } - - // Send plogs directly to flushChan - c.flushChan <- pLogs + convertInto(e, sl.LogRecords().AppendEmpty()) } + return pLogs } func (c *Converter) flushLoop() { diff --git a/pkg/stanza/adapter/factory.go b/pkg/stanza/adapter/factory.go index 2f42a1480bb6..e4b8c83ecac2 100644 --- a/pkg/stanza/adapter/factory.go +++ b/pkg/stanza/adapter/factory.go @@ -46,6 +46,21 @@ func createLogsReceiver(logReceiverType LogReceiverType) rcvr.CreateLogsFunc { operators := append([]operator.Config{inputCfg}, baseCfg.Operators...) + obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ + ReceiverID: params.ID, + ReceiverCreateSettings: params, + }) + if err != nil { + return nil, err + } + rcv := &receiver{ + set: params.TelemetrySettings, + id: params.ID, + consumer: consumerretry.NewLogs(baseCfg.RetryOnFailure, params.Logger, nextConsumer), + obsrecv: obsrecv, + storageID: baseCfg.StorageID, + } + var emitterOpts []helper.EmitterOption if baseCfg.maxBatchSize > 0 { emitterOpts = append(emitterOpts, helper.WithMaxBatchSize(baseCfg.maxBatchSize)) @@ -53,7 +68,8 @@ func createLogsReceiver(logReceiverType LogReceiverType) rcvr.CreateLogsFunc { if baseCfg.flushInterval > 0 { emitterOpts = append(emitterOpts, helper.WithFlushInterval(baseCfg.flushInterval)) } - emitter := helper.NewLogEmitter(params.TelemetrySettings, emitterOpts...) + + emitter := helper.NewLogEmitter(params.TelemetrySettings, rcv.consumeEntries, emitterOpts...) pipe, err := pipeline.Config{ Operators: operators, DefaultOutput: emitter, @@ -62,27 +78,9 @@ func createLogsReceiver(logReceiverType LogReceiverType) rcvr.CreateLogsFunc { return nil, err } - var converterOpts []converterOption - if baseCfg.numWorkers > 0 { - converterOpts = append(converterOpts, withWorkerCount(baseCfg.numWorkers)) - } - converter := NewConverter(params.TelemetrySettings, converterOpts...) - obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ - ReceiverID: params.ID, - ReceiverCreateSettings: params, - }) - if err != nil { - return nil, err - } - return &receiver{ - set: params.TelemetrySettings, - id: params.ID, - pipe: pipe, - emitter: emitter, - consumer: consumerretry.NewLogs(baseCfg.RetryOnFailure, params.Logger, nextConsumer), - converter: converter, - obsrecv: obsrecv, - storageID: baseCfg.StorageID, - }, nil + rcv.emitter = emitter + rcv.pipe = pipe + + return rcv, nil } } diff --git a/pkg/stanza/adapter/integration_test.go b/pkg/stanza/adapter/integration_test.go index a088a917c808..c75eeedd56d9 100644 --- a/pkg/stanza/adapter/integration_test.go +++ b/pkg/stanza/adapter/integration_test.go @@ -27,7 +27,7 @@ import ( func createNoopReceiver(nextConsumer consumer.Logs) (*receiver, error) { set := componenttest.NewNopTelemetrySettings() set.Logger = zap.NewNop() - emitter := helper.NewLogEmitter(set) + pipe, err := pipeline.Config{ Operators: []operator.Config{ { @@ -48,15 +48,18 @@ func createNoopReceiver(nextConsumer consumer.Logs) (*receiver, error) { return nil, err } - return &receiver{ - set: set, - id: component.MustNewID("testReceiver"), - pipe: pipe, - emitter: emitter, - consumer: nextConsumer, - converter: NewConverter(componenttest.NewNopTelemetrySettings()), - obsrecv: obsrecv, - }, nil + rcv := &receiver{ + set: set, + id: component.MustNewID("testReceiver"), + pipe: pipe, + consumer: nextConsumer, + obsrecv: obsrecv, + } + + emitter := helper.NewLogEmitter(set, rcv.consumeEntries) + + rcv.emitter = emitter + return rcv, nil } // BenchmarkEmitterToConsumer serves as a benchmark for entries going from the emitter to consumer, diff --git a/pkg/stanza/adapter/receiver.go b/pkg/stanza/adapter/receiver.go index 61124e3bf3c7..5b7760992181 100644 --- a/pkg/stanza/adapter/receiver.go +++ b/pkg/stanza/adapter/receiver.go @@ -6,7 +6,6 @@ package adapter // import "github.com/open-telemetry/opentelemetry-collector-con import ( "context" "fmt" - "sync" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" @@ -16,22 +15,19 @@ import ( "go.uber.org/multierr" "go.uber.org/zap" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/pipeline" ) type receiver struct { - set component.TelemetrySettings - id component.ID - emitWg sync.WaitGroup - consumeWg sync.WaitGroup - cancel context.CancelFunc - - pipe pipeline.Pipeline - emitter *helper.LogEmitter - consumer consumer.Logs - converter *Converter - obsrecv *receiverhelper.ObsReport + set component.TelemetrySettings + id component.ID + + pipe pipeline.Pipeline + emitter *helper.LogEmitter + consumer consumer.Logs + obsrecv *receiverhelper.ObsReport storageID *component.ID storageClient storage.Client @@ -42,8 +38,6 @@ var _ rcvr.Logs = (*receiver)(nil) // Start tells the receiver to start func (r *receiver) Start(ctx context.Context, host component.Host) error { - rctx, cancel := context.WithCancel(ctx) - r.cancel = cancel r.set.Logger.Info("Starting stanza receiver") if err := r.setStorageClient(ctx, host); err != nil { @@ -54,86 +48,26 @@ func (r *receiver) Start(ctx context.Context, host component.Host) error { return fmt.Errorf("start stanza: %w", err) } - r.converter.Start() - - // Below we're starting 2 loops: - // * one which reads all the logs produced by the emitter and then forwards - // them to converter - // ... - r.emitWg.Add(1) - go r.emitterLoop() - - // ... - // * second one which reads all the logs produced by the converter - // (aggregated by Resource) and then calls consumer to consume them. - r.consumeWg.Add(1) - go r.consumerLoop(rctx) - - // Those 2 loops are started in separate goroutines because batching in - // the emitter loop can cause a flush, caused by either reaching the max - // flush size or by the configurable ticker which would in turn cause - // a set of log entries to be available for reading in converter's out - // channel. In order to prevent backpressure, reading from the converter - // channel and batching are done in those 2 goroutines. - return nil } -// emitterLoop reads the log entries produced by the emitter and batches them -// in converter. -func (r *receiver) emitterLoop() { - defer r.emitWg.Done() - - // Don't create done channel on every iteration. - // emitter.OutChannel is closed on ctx.Done(), no need to handle ctx here - // instead we should drain and process the channel to let emitter cancel properly - for e := range r.emitter.OutChannel() { - if err := r.converter.Batch(e); err != nil { - r.set.Logger.Error("Could not add entry to batch", zap.Error(err)) - } - } +func (r *receiver) consumeEntries(ctx context.Context, entries []*entry.Entry) { + obsrecvCtx := r.obsrecv.StartLogsOp(ctx) + pLogs := ConvertEntries(entries) + logRecordCount := pLogs.LogRecordCount() - r.set.Logger.Debug("Emitter loop stopped") -} - -// consumerLoop reads converter log entries and calls the consumer to consumer them. -func (r *receiver) consumerLoop(ctx context.Context) { - defer r.consumeWg.Done() - - // Don't create done channel on every iteration. - // converter.OutChannel is closed on Shutdown before context is cancelled. - // Drain the channel and process events before exiting - for pLogs := range r.converter.OutChannel() { - obsrecvCtx := r.obsrecv.StartLogsOp(ctx) - logRecordCount := pLogs.LogRecordCount() - - cErr := r.consumer.ConsumeLogs(ctx, pLogs) - if cErr != nil { - r.set.Logger.Error("ConsumeLogs() failed", zap.Error(cErr)) - } - r.obsrecv.EndLogsOp(obsrecvCtx, "stanza", logRecordCount, cErr) + cErr := r.consumer.ConsumeLogs(ctx, pLogs) + if cErr != nil { + r.set.Logger.Error("ConsumeLogs() failed", zap.Error(cErr)) } - - r.set.Logger.Debug("Consumer loop stopped") + r.obsrecv.EndLogsOp(obsrecvCtx, "stanza", logRecordCount, cErr) } // Shutdown is invoked during service shutdown func (r *receiver) Shutdown(ctx context.Context) error { - if r.cancel == nil { - return nil - } - r.set.Logger.Info("Stopping stanza receiver") pipelineErr := r.pipe.Stop() - // wait for emitter to finish batching and let consumers catch up - r.emitWg.Wait() - - r.converter.Stop() - r.cancel() - // wait for consumers to catch up - r.consumeWg.Wait() - if r.storageClient != nil { clientErr := r.storageClient.Close(ctx) return multierr.Combine(pipelineErr, clientErr) diff --git a/pkg/stanza/adapter/receiver_test.go b/pkg/stanza/adapter/receiver_test.go index a5349a479866..c46d0c5a376f 100644 --- a/pkg/stanza/adapter/receiver_test.go +++ b/pkg/stanza/adapter/receiver_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/confmap/confmaptest" "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/pdata/plog" @@ -48,16 +49,12 @@ func TestStart(t *testing.T) { require.NoError(t, err, "receiver start failed") stanzaReceiver := logsReceiver.(*receiver) - logChan := stanzaReceiver.emitter.OutChannelForWrite() - logChan <- []*entry.Entry{entry.New()} + + stanzaReceiver.consumeEntries(context.Background(), []*entry.Entry{entry.New()}) // Eventually because of asynchronuous nature of the receiver. - require.Eventually(t, - func() bool { - return mockConsumer.LogRecordCount() == 1 - }, - 10*time.Second, 5*time.Millisecond, "one log entry expected", - ) + require.Equal(t, 1, mockConsumer.LogRecordCount()) + require.NoError(t, logsReceiver.Shutdown(context.Background())) } @@ -87,8 +84,8 @@ func TestHandleConsume(t *testing.T) { require.NoError(t, err, "receiver start failed") stanzaReceiver := logsReceiver.(*receiver) - logChan := stanzaReceiver.emitter.OutChannelForWrite() - logChan <- []*entry.Entry{entry.New()} + + stanzaReceiver.consumeEntries(context.Background(), []*entry.Entry{entry.New()}) // Eventually because of asynchronuous nature of the receiver. require.Eventually(t, @@ -113,8 +110,8 @@ func TestHandleConsumeRetry(t *testing.T) { require.NoError(t, logsReceiver.Start(context.Background(), componenttest.NewNopHost())) stanzaReceiver := logsReceiver.(*receiver) - logChan := stanzaReceiver.emitter.OutChannelForWrite() - logChan <- []*entry.Entry{entry.New()} + + stanzaReceiver.consumeEntries(context.Background(), []*entry.Entry{entry.New()}) require.Eventually(t, func() bool { @@ -212,26 +209,12 @@ func benchmarkReceiver(b *testing.B, logsPerIteration int) { Builder: inputBuilder, } - set := componenttest.NewNopTelemetrySettings() - emitter := helper.NewLogEmitter(set) - defer func() { - require.NoError(b, emitter.Stop()) - }() - - pipe, err := pipeline.Config{ - Operators: []operator.Config{inputCfg}, - DefaultOutput: emitter, - }.Build(set) - require.NoError(b, err) - storageClient := storagetest.NewInMemoryClient( component.KindReceiver, component.MustNewID("foolog"), "test", ) - converter := NewConverter(set) - obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ReceiverCreateSettings: receivertest.NewNopSettings()}) require.NoError(b, err) @@ -241,15 +224,27 @@ func benchmarkReceiver(b *testing.B, logsPerIteration int) { receivedLogs: atomic.Uint32{}, } rcv := &receiver{ - set: set, - pipe: pipe, - emitter: emitter, consumer: mockConsumer, - converter: converter, obsrecv: obsrecv, storageClient: storageClient, } + set := componenttest.NewNopTelemetrySettings() + emitter := helper.NewLogEmitter(set, rcv.consumeEntries) + defer func() { + require.NoError(b, emitter.Stop()) + }() + + pipe, err := pipeline.Config{ + Operators: []operator.Config{inputCfg}, + DefaultOutput: emitter, + }.Build(set) + require.NoError(b, err) + + rcv.pipe = pipe + rcv.set = set + rcv.emitter = emitter + b.ResetTimer() require.NoError(b, rcv.Start(context.Background(), nil)) @@ -264,20 +259,54 @@ func benchmarkReceiver(b *testing.B, logsPerIteration int) { } func BenchmarkReadLine(b *testing.B) { + receivedAllLogs := make(chan struct{}) filePath := filepath.Join(b.TempDir(), "bench.log") pipelineYaml := fmt.Sprintf(` -- type: file_input +pipeline: + type: file_input include: - %s start_at: beginning`, filePath) - var operatorCfgs []operator.Config - require.NoError(b, yaml.Unmarshal([]byte(pipelineYaml), &operatorCfgs)) + confmapFilePath := filepath.Join(b.TempDir(), "conf.yaml") + require.NoError(b, os.WriteFile(confmapFilePath, []byte(pipelineYaml), 0600)) + + testConfMaps, err := confmaptest.LoadConf(confmapFilePath) + require.NoError(b, err) + + conf, err := testConfMaps.Sub("pipeline") + require.NoError(b, err) + require.NotNil(b, conf) + + operatorCfg := operator.Config{} + require.NoError(b, conf.Unmarshal(&operatorCfg)) + + operatorCfgs := []operator.Config{operatorCfg} + + storageClient := storagetest.NewInMemoryClient( + component.KindReceiver, + component.MustNewID("foolog"), + "test", + ) + + obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ReceiverCreateSettings: receivertest.NewNopSettings()}) + require.NoError(b, err) + + mockConsumer := &testConsumer{ + receivedAllLogs: receivedAllLogs, + expectedLogs: uint32(b.N), + receivedLogs: atomic.Uint32{}, + } + rcv := &receiver{ + consumer: mockConsumer, + obsrecv: obsrecv, + storageClient: storageClient, + } set := componenttest.NewNopTelemetrySettings() - emitter := helper.NewLogEmitter(set) + emitter := helper.NewLogEmitter(set, rcv.consumeEntries) defer func() { require.NoError(b, emitter.Stop()) }() @@ -288,6 +317,10 @@ func BenchmarkReadLine(b *testing.B) { }.Build(set) require.NoError(b, err) + rcv.pipe = pipe + rcv.set = set + rcv.emitter = emitter + // Populate the file that will be consumed file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0666) require.NoError(b, err) @@ -296,22 +329,13 @@ func BenchmarkReadLine(b *testing.B) { require.NoError(b, err) } - storageClient := storagetest.NewInMemoryClient( - component.KindReceiver, - component.MustNewID("foolog"), - "test", - ) - // Run the actual benchmark b.ResetTimer() - require.NoError(b, pipe.Start(storageClient)) - logChan := emitter.OutChannel() - for i := 0; i < b.N; i++ { - entries := <-logChan - for _, e := range entries { - convert(e) - } - } + require.NoError(b, rcv.Start(context.Background(), nil)) + + <-receivedAllLogs + + require.NoError(b, rcv.Shutdown(context.Background())) } func BenchmarkParseAndMap(b *testing.B) { @@ -344,7 +368,11 @@ func BenchmarkParseAndMap(b *testing.B) { require.NoError(b, yaml.Unmarshal([]byte(pipelineYaml), &operatorCfgs)) set := componenttest.NewNopTelemetrySettings() - emitter := helper.NewLogEmitter(set) + emitter := helper.NewLogEmitter(set, func(_ context.Context, entries []*entry.Entry) { + for _, e := range entries { + convert(e) + } + }) defer func() { require.NoError(b, emitter.Stop()) }() @@ -372,13 +400,6 @@ func BenchmarkParseAndMap(b *testing.B) { // Run the actual benchmark b.ResetTimer() require.NoError(b, pipe.Start(storageClient)) - logChan := emitter.OutChannel() - for i := 0; i < b.N; i++ { - entries := <-logChan - for _, e := range entries { - convert(e) - } - } } const testInputOperatorTypeStr = "test_input" diff --git a/pkg/stanza/operator/helper/emitter.go b/pkg/stanza/operator/helper/emitter.go index 51f1aa772863..aa91b85c92be 100644 --- a/pkg/stanza/operator/helper/emitter.go +++ b/pkg/stanza/operator/helper/emitter.go @@ -14,10 +14,9 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" ) -// LogEmitter is a stanza operator that emits log entries to a channel +// LogEmitter is a stanza operator that emits log entries to the consumer callback function `consumerFunc` type LogEmitter struct { OutputOperator - logChan chan []*entry.Entry closeChan chan struct{} stopOnce sync.Once batchMux sync.Mutex @@ -25,6 +24,7 @@ type LogEmitter struct { wg sync.WaitGroup maxBatchSize uint flushInterval time.Duration + consumerFunc func(context.Context, []*entry.Entry) } var ( @@ -61,15 +61,15 @@ func (o flushIntervalOption) apply(e *LogEmitter) { } // NewLogEmitter creates a new receiver output -func NewLogEmitter(set component.TelemetrySettings, opts ...EmitterOption) *LogEmitter { +func NewLogEmitter(set component.TelemetrySettings, consumerFunc func(context.Context, []*entry.Entry), opts ...EmitterOption) *LogEmitter { op, _ := NewOutputConfig("log_emitter", "log_emitter").Build(set) e := &LogEmitter{ OutputOperator: op, - logChan: make(chan []*entry.Entry), closeChan: make(chan struct{}), maxBatchSize: defaultMaxBatchSize, batch: make([]*entry.Entry, 0, defaultMaxBatchSize), flushInterval: defaultFlushInterval, + consumerFunc: consumerFunc, } for _, opt := range opts { opt.apply(e) @@ -89,27 +89,15 @@ func (e *LogEmitter) Stop() error { e.stopOnce.Do(func() { close(e.closeChan) e.wg.Wait() - - close(e.logChan) }) return nil } -// OutChannel returns the channel on which entries will be sent to. -func (e *LogEmitter) OutChannel() <-chan []*entry.Entry { - return e.logChan -} - -// OutChannelForWrite returns the channel on which entries can be sent to. -func (e *LogEmitter) OutChannelForWrite() chan []*entry.Entry { - return e.logChan -} - // Process will emit an entry to the output channel func (e *LogEmitter) Process(ctx context.Context, ent *entry.Entry) error { if oldBatch := e.appendEntry(ent); len(oldBatch) > 0 { - e.flush(ctx, oldBatch) + e.consumerFunc(ctx, oldBatch) } return nil @@ -142,26 +130,18 @@ func (e *LogEmitter) flusher() { select { case <-ticker.C: if oldBatch := e.makeNewBatch(); len(oldBatch) > 0 { - e.flush(context.Background(), oldBatch) + e.consumerFunc(context.Background(), oldBatch) } case <-e.closeChan: // flush currently batched entries if oldBatch := e.makeNewBatch(); len(oldBatch) > 0 { - e.flush(context.Background(), oldBatch) + e.consumerFunc(context.Background(), oldBatch) } return } } } -// flush flushes the provided batch to the log channel. -func (e *LogEmitter) flush(ctx context.Context, batch []*entry.Entry) { - select { - case e.logChan <- batch: - case <-ctx.Done(): - } -} - // makeNewBatch replaces the current batch on the log emitter with a new batch, returning the old one func (e *LogEmitter) makeNewBatch() []*entry.Entry { e.batchMux.Lock() diff --git a/pkg/stanza/operator/helper/emitter_test.go b/pkg/stanza/operator/helper/emitter_test.go index f17e7f503b2d..927734dccf56 100644 --- a/pkg/stanza/operator/helper/emitter_test.go +++ b/pkg/stanza/operator/helper/emitter_test.go @@ -6,6 +6,7 @@ package helper import ( "context" "fmt" + "sync" "testing" "time" @@ -17,7 +18,16 @@ import ( ) func TestLogEmitter(t *testing.T) { - emitter := NewLogEmitter(componenttest.NewNopTelemetrySettings()) + rwMtx := &sync.RWMutex{} + var receivedEntries []*entry.Entry + emitter := NewLogEmitter( + componenttest.NewNopTelemetrySettings(), + func(_ context.Context, entries []*entry.Entry) { + rwMtx.Lock() + defer rwMtx.Unlock() + receivedEntries = entries + }, + ) require.NoError(t, emitter.Start(nil)) @@ -27,16 +37,14 @@ func TestLogEmitter(t *testing.T) { in := entry.New() - go func() { - assert.NoError(t, emitter.Process(context.Background(), in)) - }() + assert.NoError(t, emitter.Process(context.Background(), in)) - select { - case out := <-emitter.logChan: - require.Equal(t, in, out[0]) - case <-time.After(time.Second): - require.FailNow(t, "Timed out waiting for output") - } + require.Eventually(t, func() bool { + rwMtx.RLock() + defer rwMtx.RUnlock() + return receivedEntries != nil + }, time.Second, 10*time.Millisecond) + require.Equal(t, in, receivedEntries[0]) } func TestLogEmitterEmitsOnMaxBatchSize(t *testing.T) { @@ -44,7 +52,16 @@ func TestLogEmitterEmitsOnMaxBatchSize(t *testing.T) { maxBatchSize = 100 timeout = time.Second ) - emitter := NewLogEmitter(componenttest.NewNopTelemetrySettings()) + rwMtx := &sync.RWMutex{} + var receivedEntries []*entry.Entry + emitter := NewLogEmitter( + componenttest.NewNopTelemetrySettings(), + func(_ context.Context, entries []*entry.Entry) { + rwMtx.Lock() + defer rwMtx.Unlock() + receivedEntries = entries + }, + ) require.NoError(t, emitter.Start(nil)) defer func() { @@ -53,21 +70,17 @@ func TestLogEmitterEmitsOnMaxBatchSize(t *testing.T) { entries := complexEntries(maxBatchSize) - go func() { - ctx := context.Background() - for _, e := range entries { - assert.NoError(t, emitter.Process(ctx, e)) - } - }() - - timeoutChan := time.After(timeout) - - select { - case recv := <-emitter.logChan: - require.Len(t, recv, maxBatchSize, "Length of received entries was not the same as max batch size!") - case <-timeoutChan: - require.FailNow(t, "Failed to receive log entries before timeout") + ctx := context.Background() + for _, e := range entries { + assert.NoError(t, emitter.Process(ctx, e)) } + + require.Eventually(t, func() bool { + rwMtx.RLock() + defer rwMtx.RUnlock() + return receivedEntries != nil + }, timeout, 10*time.Millisecond) + require.Len(t, receivedEntries, maxBatchSize) } func TestLogEmitterEmitsOnFlushInterval(t *testing.T) { @@ -75,7 +88,17 @@ func TestLogEmitterEmitsOnFlushInterval(t *testing.T) { flushInterval = 100 * time.Millisecond timeout = time.Second ) - emitter := NewLogEmitter(componenttest.NewNopTelemetrySettings()) + rwMtx := &sync.RWMutex{} + var receivedEntries []*entry.Entry + emitter := NewLogEmitter( + componenttest.NewNopTelemetrySettings(), + func(_ context.Context, entries []*entry.Entry) { + rwMtx.Lock() + defer rwMtx.Unlock() + receivedEntries = entries + }, + ) + emitter.flushInterval = flushInterval require.NoError(t, emitter.Start(nil)) defer func() { @@ -84,19 +107,16 @@ func TestLogEmitterEmitsOnFlushInterval(t *testing.T) { entry := complexEntry() - go func() { - ctx := context.Background() - assert.NoError(t, emitter.Process(ctx, entry)) - }() + ctx := context.Background() + assert.NoError(t, emitter.Process(ctx, entry)) - timeoutChan := time.After(timeout) + require.Eventually(t, func() bool { + rwMtx.RLock() + defer rwMtx.RUnlock() + return receivedEntries != nil + }, timeout, 10*time.Millisecond) - select { - case recv := <-emitter.logChan: - require.Len(t, recv, 1, "Should have received one entry, got %d instead", len(recv)) - case <-timeoutChan: - require.FailNow(t, "Failed to receive log entry before timeout") - } + require.Len(t, receivedEntries, 1) } func complexEntries(count int) []*entry.Entry { diff --git a/pkg/stanza/operator/parser/container/config.go b/pkg/stanza/operator/parser/container/config.go index b707883713f7..81e6f2339be9 100644 --- a/pkg/stanza/operator/parser/container/config.go +++ b/pkg/stanza/operator/parser/container/config.go @@ -67,14 +67,6 @@ func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error return nil, err } - cLogEmitter := helper.NewLogEmitter(set) - recombineParser, err := createRecombine(set, c, cLogEmitter) - if err != nil { - return nil, fmt.Errorf("failed to create internal recombine config: %w", err) - } - - wg := sync.WaitGroup{} - if c.Format != "" { switch c.Format { case dockerFormat, crioFormat, containerdFormat: @@ -95,14 +87,24 @@ func (c Config) Build(set component.TelemetrySettings) (operator.Operator, error ) } + wg := sync.WaitGroup{} + p := &Parser{ ParserOperator: parserOperator, - recombineParser: recombineParser, format: c.Format, addMetadataFromFilepath: c.AddMetadataFromFilePath, - criLogEmitter: cLogEmitter, criConsumers: &wg, } + + cLogEmitter := helper.NewLogEmitter(set, p.consumeEntries) + p.criLogEmitter = cLogEmitter + recombineParser, err := createRecombine(set, c, cLogEmitter) + if err != nil { + return nil, fmt.Errorf("failed to create internal recombine config: %w", err) + } + + p.recombineParser = recombineParser + return p, nil } diff --git a/pkg/stanza/operator/parser/container/parser.go b/pkg/stanza/operator/parser/container/parser.go index b53c025869df..ae27fa778318 100644 --- a/pkg/stanza/operator/parser/container/parser.go +++ b/pkg/stanza/operator/parser/container/parser.go @@ -95,7 +95,6 @@ func (p *Parser) Process(ctx context.Context, entry *entry.Entry) (err error) { p.Logger().Error("unable to start the internal recombine operator", zap.Error(err)) return } - go p.criConsumer(ctx) p.asyncConsumerStarted = true }) @@ -141,22 +140,6 @@ func (p *Parser) Process(ctx context.Context, entry *entry.Entry) (err error) { return nil } -// criConsumer receives log entries from the criLogEmitter and -// writes them to the output of the main parser -func (p *Parser) criConsumer(ctx context.Context) { - entriesChan := p.criLogEmitter.OutChannel() - p.criConsumers.Add(1) - defer p.criConsumers.Done() - for entries := range entriesChan { - for _, e := range entries { - err := p.Write(ctx, e) - if err != nil { - p.Logger().Error("failed to write entry", zap.Error(err)) - } - } - } -} - // Stop ensures that the internal recombineParser, the internal criLogEmitter and // the crioConsumer are stopped in the proper order without being affected by // any possible race conditions @@ -165,7 +148,6 @@ func (p *Parser) Stop() error { // nothing is started return return nil } - var stopErrs []error err := p.recombineParser.Stop() if err != nil { @@ -305,6 +287,15 @@ func (p *Parser) extractk8sMetaFromFilePath(e *entry.Entry) error { return nil } +func (p *Parser) consumeEntries(ctx context.Context, entries []*entry.Entry) { + for _, e := range entries { + err := p.Write(ctx, e) + if err != nil { + p.Logger().Error("failed to write entry", zap.Error(err)) + } + } +} + func moveField(e *entry.Entry, originalKey, mappedKey string) error { val, exist := entry.NewAttributeField(originalKey).Delete(e) if !exist { diff --git a/processor/logstransformprocessor/processor.go b/processor/logstransformprocessor/processor.go index 270ea0439488..09f3a16430c9 100644 --- a/processor/logstransformprocessor/processor.go +++ b/processor/logstransformprocessor/processor.go @@ -17,6 +17,7 @@ import ( "go.uber.org/zap" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/pipeline" @@ -31,7 +32,6 @@ type logsTransformProcessor struct { pipe *pipeline.DirectedPipeline firstOperator operator.Operator emitter *helper.LogEmitter - converter *adapter.Converter fromConverter *adapter.FromPdataConverter shutdownFns []component.ShutdownFunc } @@ -45,7 +45,7 @@ func newProcessor(config *Config, nextConsumer consumer.Logs, set component.Tele baseCfg := p.config.BaseConfig - p.emitter = helper.NewLogEmitter(p.set) + p.emitter = helper.NewLogEmitter(p.set, p.consumeStanzaLogEntries) pipe, err := pipeline.Config{ Operators: baseCfg.Operators, DefaultOutput: p.emitter, @@ -79,8 +79,6 @@ func (ltp *logsTransformProcessor) Shutdown(ctx context.Context) error { } func (ltp *logsTransformProcessor) Start(ctx context.Context, _ component.Host) error { - // create all objects before starting them, since the loops (consumerLoop, converterLoop) depend on these converters not being nil. - ltp.converter = adapter.NewConverter(ltp.set) wkrCount := int(math.Max(1, float64(runtime.NumCPU()))) ltp.fromConverter = adapter.NewFromPdataConverter(ltp.set, wkrCount) @@ -90,15 +88,10 @@ func (ltp *logsTransformProcessor) Start(ctx context.Context, _ component.Host) // fromConverter: converts logs to stanza format -> // converterLoop: forwards converted logs to the stanza pipeline -> // pipeline: performs user configured operations on the logs -> - // emitterLoop: forwards output stanza logs for conversion to OTLP -> - // converter: converts stanza logs to OTLP -> - // consumerLoop: sends the converted OTLP logs to the next consumer + // transformProcessor: receives []*entry.Entries, converts them to plog.Logs and sends the converted OTLP logs to the next consumer // // We should start these components in reverse order of the data flow, then stop them in order of the data flow, // in order to allow for pipeline draining. - ltp.startConsumerLoop(ctx) - ltp.startConverter() - ltp.startEmitterLoop(ctx) err := ltp.startPipeline() if err != nil { return err @@ -151,41 +144,6 @@ func (ltp *logsTransformProcessor) startPipeline() error { return nil } -// startEmitterLoop starts the loop which reads all the logs modified by the pipeline and then forwards -// them to converter -func (ltp *logsTransformProcessor) startEmitterLoop(ctx context.Context) { - wg := &sync.WaitGroup{} - wg.Add(1) - go ltp.emitterLoop(ctx, wg) - - ltp.shutdownFns = append(ltp.shutdownFns, func(_ context.Context) error { - wg.Wait() - return nil - }) -} - -func (ltp *logsTransformProcessor) startConverter() { - ltp.converter.Start() - - ltp.shutdownFns = append(ltp.shutdownFns, func(_ context.Context) error { - ltp.converter.Stop() - return nil - }) -} - -// startConsumerLoop starts the loop which reads all the logs produced by the converter -// (aggregated by Resource) and then places them on the next consumer -func (ltp *logsTransformProcessor) startConsumerLoop(ctx context.Context) { - wg := &sync.WaitGroup{} - wg.Add(1) - go ltp.consumerLoop(ctx, wg) - - ltp.shutdownFns = append(ltp.shutdownFns, func(_ context.Context) error { - wg.Wait() - return nil - }) -} - func (ltp *logsTransformProcessor) ConsumeLogs(_ context.Context, ld plog.Logs) error { // Add the logs to the chain return ltp.fromConverter.Batch(ld) @@ -219,48 +177,9 @@ func (ltp *logsTransformProcessor) converterLoop(ctx context.Context, wg *sync.W } } -// emitterLoop reads the log entries produced by the emitter and batches them -// in converter. -func (ltp *logsTransformProcessor) emitterLoop(ctx context.Context, wg *sync.WaitGroup) { - defer wg.Done() - - for { - select { - case <-ctx.Done(): - ltp.set.Logger.Debug("emitter loop stopped") - return - case e, ok := <-ltp.emitter.OutChannel(): - if !ok { - ltp.set.Logger.Debug("emitter channel got closed") - return - } - - if err := ltp.converter.Batch(e); err != nil { - ltp.set.Logger.Error("processor encountered an issue with the converter", zap.Error(err)) - } - } - } -} - -// consumerLoop reads converter log entries and calls the consumer to consumer them. -func (ltp *logsTransformProcessor) consumerLoop(ctx context.Context, wg *sync.WaitGroup) { - defer wg.Done() - - for { - select { - case <-ctx.Done(): - ltp.set.Logger.Debug("consumer loop stopped") - return - - case pLogs, ok := <-ltp.converter.OutChannel(): - if !ok { - ltp.set.Logger.Debug("converter channel got closed") - return - } - - if err := ltp.consumer.ConsumeLogs(ctx, pLogs); err != nil { - ltp.set.Logger.Error("processor encountered an issue with next consumer", zap.Error(err)) - } - } +func (ltp *logsTransformProcessor) consumeStanzaLogEntries(ctx context.Context, entries []*entry.Entry) { + pLogs := adapter.ConvertEntries(entries) + if err := ltp.consumer.ConsumeLogs(ctx, pLogs); err != nil { + ltp.set.Logger.Error("processor encountered an issue with next consumer", zap.Error(err)) } } diff --git a/testbed/tests/log_test.go b/testbed/tests/log_test.go index 288b3c117783..8b44f83f670a 100644 --- a/testbed/tests/log_test.go +++ b/testbed/tests/log_test.go @@ -250,6 +250,7 @@ func TestLogLargeFiles(t *testing.T) { sender testbed.DataSender receiver testbed.DataReceiver loadOptions testbed.LoadOptions + resourceSpec testbed.ResourceSpec sleepSeconds int }{ { @@ -266,6 +267,10 @@ func TestLogLargeFiles(t *testing.T) { ItemsPerBatch: 1, Parallel: 100, }, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 80, + ExpectedMaxRAM: 150, + }, sleepSeconds: 100, }, { @@ -282,6 +287,10 @@ func TestLogLargeFiles(t *testing.T) { ItemsPerBatch: 10, Parallel: 10, }, + resourceSpec: testbed.ResourceSpec{ + ExpectedMaxCPU: 100, + ExpectedMaxRAM: 150, + }, sleepSeconds: 200, }, } From 9ae4b5ba998fe041540721f357ac5213abc3b776 Mon Sep 17 00:00:00 2001 From: odubajDT <93584209+odubajDT@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:21:29 +0100 Subject: [PATCH 03/24] [processor/resourcedetection] add support for Profiles signal type (#36107) #### Description - added support for Profiles signal in processor - added tests - adapted README - performed `go mod tidy` on components which depend on the processor #### Link to tracking issue Fixes #35980 --------- Signed-off-by: odubajDT --- .../resourcedetectionprocessor-profiles.yaml | 27 +++++++++ connector/datadogconnector/go.sum | 2 + exporter/datadogexporter/go.mod | 1 + exporter/datadogexporter/go.sum | 2 + .../datadogexporter/integrationtest/go.sum | 2 + .../resourcedetectionprocessor/README.md | 4 +- .../resourcedetectionprocessor/factory.go | 33 +++++++++-- .../factory_test.go | 9 +++ processor/resourcedetectionprocessor/go.mod | 7 ++- processor/resourcedetectionprocessor/go.sum | 2 + .../internal/metadata/generated_status.go | 7 ++- .../resourcedetectionprocessor/metadata.yaml | 1 + .../resourcedetection_processor.go | 13 +++++ .../resourcedetection_processor_test.go | 55 +++++++++++++++++++ 14 files changed, 154 insertions(+), 11 deletions(-) create mode 100644 .chloggen/resourcedetectionprocessor-profiles.yaml diff --git a/.chloggen/resourcedetectionprocessor-profiles.yaml b/.chloggen/resourcedetectionprocessor-profiles.yaml new file mode 100644 index 000000000000..11a69f5bab0d --- /dev/null +++ b/.chloggen/resourcedetectionprocessor-profiles.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: resourcedetectionprocessor + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Introduce support for Profiles signal type." + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35980] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/connector/datadogconnector/go.sum b/connector/datadogconnector/go.sum index ac9bdf4b497e..d51d935661a2 100644 --- a/connector/datadogconnector/go.sum +++ b/connector/datadogconnector/go.sum @@ -999,6 +999,8 @@ go.opentelemetry.io/collector/processor/batchprocessor v0.113.0 h1:LPNbVILg+cKTF go.opentelemetry.io/collector/processor/batchprocessor v0.113.0/go.mod h1:tCg+B/1idJS5inxod+nRPXFdVi89Bsnl6RvzIOO9k5I= go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0 h1:3/5z0Pe/yduwF0DSpytW2+mwDA5JaIL/w6vfNYy5KzQ= go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0/go.mod h1:h3wIlqMtJGIDKttjMJBo6J4dHU/Mi6+bKSxvRVUpsXs= +go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.113.0 h1:bZ1i5l6/4nj7PsLqeHw7Opw5vdrpUsDvuH6a6kx+2yg= +go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.113.0/go.mod h1:Uxv+5NNIJJCuz52DPFa9INjrpZSfidoTkv849tNp1qI= go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk= go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE= go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA= diff --git a/exporter/datadogexporter/go.mod b/exporter/datadogexporter/go.mod index b635050a4fd8..677a31246d27 100644 --- a/exporter/datadogexporter/go.mod +++ b/exporter/datadogexporter/go.mod @@ -352,6 +352,7 @@ require ( go.opentelemetry.io/collector/pdata/testdata v0.113.0 // indirect go.opentelemetry.io/collector/pipeline v0.113.0 // indirect go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.113.0 // indirect + go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.113.0 // indirect go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 // indirect go.opentelemetry.io/collector/processor/processortest v0.113.0 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.113.0 // indirect diff --git a/exporter/datadogexporter/go.sum b/exporter/datadogexporter/go.sum index 0809d95c9f9d..c29d1e892c8a 100644 --- a/exporter/datadogexporter/go.sum +++ b/exporter/datadogexporter/go.sum @@ -1127,6 +1127,8 @@ go.opentelemetry.io/collector/processor/batchprocessor v0.113.0 h1:LPNbVILg+cKTF go.opentelemetry.io/collector/processor/batchprocessor v0.113.0/go.mod h1:tCg+B/1idJS5inxod+nRPXFdVi89Bsnl6RvzIOO9k5I= go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0 h1:3/5z0Pe/yduwF0DSpytW2+mwDA5JaIL/w6vfNYy5KzQ= go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0/go.mod h1:h3wIlqMtJGIDKttjMJBo6J4dHU/Mi6+bKSxvRVUpsXs= +go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.113.0 h1:bZ1i5l6/4nj7PsLqeHw7Opw5vdrpUsDvuH6a6kx+2yg= +go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.113.0/go.mod h1:Uxv+5NNIJJCuz52DPFa9INjrpZSfidoTkv849tNp1qI= go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk= go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE= go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA= diff --git a/exporter/datadogexporter/integrationtest/go.sum b/exporter/datadogexporter/integrationtest/go.sum index 16e1341b2ec1..07abc40f75fe 100644 --- a/exporter/datadogexporter/integrationtest/go.sum +++ b/exporter/datadogexporter/integrationtest/go.sum @@ -1111,6 +1111,8 @@ go.opentelemetry.io/collector/processor/batchprocessor v0.113.0 h1:LPNbVILg+cKTF go.opentelemetry.io/collector/processor/batchprocessor v0.113.0/go.mod h1:tCg+B/1idJS5inxod+nRPXFdVi89Bsnl6RvzIOO9k5I= go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0 h1:3/5z0Pe/yduwF0DSpytW2+mwDA5JaIL/w6vfNYy5KzQ= go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.113.0/go.mod h1:h3wIlqMtJGIDKttjMJBo6J4dHU/Mi6+bKSxvRVUpsXs= +go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.113.0 h1:bZ1i5l6/4nj7PsLqeHw7Opw5vdrpUsDvuH6a6kx+2yg= +go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.113.0/go.mod h1:Uxv+5NNIJJCuz52DPFa9INjrpZSfidoTkv849tNp1qI= go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk= go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE= go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA= diff --git a/processor/resourcedetectionprocessor/README.md b/processor/resourcedetectionprocessor/README.md index 30e91a3d6227..cf16a5abc9e8 100644 --- a/processor/resourcedetectionprocessor/README.md +++ b/processor/resourcedetectionprocessor/README.md @@ -3,11 +3,13 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: traces, metrics, logs | +| Stability | [development]: profiles | +| | [beta]: traces, metrics, logs | | Distributions | [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aopen%20label%3Aprocessor%2Fresourcedetection%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aopen+is%3Aissue+label%3Aprocessor%2Fresourcedetection) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector-contrib?query=is%3Aissue%20is%3Aclosed%20label%3Aprocessor%2Fresourcedetection%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aclosed+is%3Aissue+label%3Aprocessor%2Fresourcedetection) | | [Code Owners](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/CONTRIBUTING.md#becoming-a-code-owner) | [@Aneurysm9](https://www.github.com/Aneurysm9), [@dashpole](https://www.github.com/dashpole) | +[development]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#development [beta]: https://github.com/open-telemetry/opentelemetry-collector/blob/main/docs/component-stability.md#beta [contrib]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-contrib [k8s]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-k8s diff --git a/processor/resourcedetectionprocessor/factory.go b/processor/resourcedetectionprocessor/factory.go index f1ae4b11ac35..05ff7934f81b 100644 --- a/processor/resourcedetectionprocessor/factory.go +++ b/processor/resourcedetectionprocessor/factory.go @@ -12,8 +12,11 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/confighttp" "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumerprofiles" "go.opentelemetry.io/collector/processor" "go.opentelemetry.io/collector/processor/processorhelper" + "go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles" + "go.opentelemetry.io/collector/processor/processorprofiles" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/aws/ec2" @@ -70,12 +73,13 @@ func NewFactory() processor.Factory { providers: map[component.ID]*internal.ResourceProvider{}, } - return processor.NewFactory( + return processorprofiles.NewFactory( metadata.Type, createDefaultConfig, - processor.WithTraces(f.createTracesProcessor, metadata.TracesStability), - processor.WithMetrics(f.createMetricsProcessor, metadata.MetricsStability), - processor.WithLogs(f.createLogsProcessor, metadata.LogsStability)) + processorprofiles.WithTraces(f.createTracesProcessor, metadata.TracesStability), + processorprofiles.WithMetrics(f.createMetricsProcessor, metadata.MetricsStability), + processorprofiles.WithLogs(f.createLogsProcessor, metadata.LogsStability), + processorprofiles.WithProfiles(f.createProfilesProcessor, metadata.ProfilesStability)) } // Type gets the type of the Option config created by this factory. @@ -164,6 +168,27 @@ func (f *factory) createLogsProcessor( processorhelper.WithStart(rdp.Start)) } +func (f *factory) createProfilesProcessor( + ctx context.Context, + set processor.Settings, + cfg component.Config, + nextConsumer consumerprofiles.Profiles, +) (processorprofiles.Profiles, error) { + rdp, err := f.getResourceDetectionProcessor(set, cfg) + if err != nil { + return nil, err + } + + return processorhelperprofiles.NewProfiles( + ctx, + set, + cfg, + nextConsumer, + rdp.processProfiles, + processorhelperprofiles.WithCapabilities(consumerCapabilities), + processorhelperprofiles.WithStart(rdp.Start)) +} + func (f *factory) getResourceDetectionProcessor( params processor.Settings, cfg component.Config, diff --git a/processor/resourcedetectionprocessor/factory_test.go b/processor/resourcedetectionprocessor/factory_test.go index 7256f074aa82..1d2fdc1db014 100644 --- a/processor/resourcedetectionprocessor/factory_test.go +++ b/processor/resourcedetectionprocessor/factory_test.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/confmap/confmaptest" "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/processor/processorprofiles" "go.opentelemetry.io/collector/processor/processortest" ) @@ -37,6 +38,10 @@ func TestCreateProcessor(t *testing.T) { lp, err := factory.CreateLogs(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) assert.NoError(t, err) assert.NotNil(t, lp) + + pp, err := factory.(processorprofiles.Factory).CreateProfiles(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) + assert.NoError(t, err) + assert.NotNil(t, pp) } func TestCreateConfigProcessors(t *testing.T) { @@ -82,4 +87,8 @@ func TestInvalidConfig(t *testing.T) { lp, err := factory.CreateLogs(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) assert.Error(t, err) assert.Nil(t, lp) + + pp, err := factory.(processorprofiles.Factory).CreateProfiles(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) + assert.Error(t, err) + assert.Nil(t, pp) } diff --git a/processor/resourcedetectionprocessor/go.mod b/processor/resourcedetectionprocessor/go.mod index d7eff97cd5ae..01a5250cf62f 100644 --- a/processor/resourcedetectionprocessor/go.mod +++ b/processor/resourcedetectionprocessor/go.mod @@ -20,10 +20,14 @@ require ( go.opentelemetry.io/collector/config/configtls v1.19.0 go.opentelemetry.io/collector/confmap v1.19.0 go.opentelemetry.io/collector/consumer v0.113.0 + go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 go.opentelemetry.io/collector/consumer/consumertest v0.113.0 go.opentelemetry.io/collector/featuregate v1.19.0 go.opentelemetry.io/collector/pdata v1.19.0 + go.opentelemetry.io/collector/pdata/pprofile v0.113.0 go.opentelemetry.io/collector/processor v0.113.0 + go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.113.0 + go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 go.opentelemetry.io/collector/processor/processortest v0.113.0 go.opentelemetry.io/collector/semconv v0.113.0 go.uber.org/goleak v1.3.0 @@ -114,13 +118,10 @@ require ( go.opentelemetry.io/collector/config/configcompression v1.19.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.113.0 // indirect go.opentelemetry.io/collector/config/internal v0.113.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.113.0 // indirect go.opentelemetry.io/collector/extension v0.113.0 // indirect go.opentelemetry.io/collector/extension/auth v0.113.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.113.0 // indirect go.opentelemetry.io/collector/pdata/testdata v0.113.0 // indirect go.opentelemetry.io/collector/pipeline v0.113.0 // indirect - go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect go.opentelemetry.io/otel v1.31.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.30.0 // indirect diff --git a/processor/resourcedetectionprocessor/go.sum b/processor/resourcedetectionprocessor/go.sum index 24c848ce37b5..f639f07ef9fc 100644 --- a/processor/resourcedetectionprocessor/go.sum +++ b/processor/resourcedetectionprocessor/go.sum @@ -502,6 +502,8 @@ go.opentelemetry.io/collector/pipeline v0.113.0 h1:vSRzRe3717jV0btCNPhVkhg2lu0uF go.opentelemetry.io/collector/pipeline v0.113.0/go.mod h1:4vOvjVsoYTHVGTbfFwqfnQOSV2K3RKUHofh3jNRc2Mg= go.opentelemetry.io/collector/processor v0.113.0 h1:BQI6MsKtiCG9HT/nmiRXTKP6SZFrjFKVfM6pTQfbc0k= go.opentelemetry.io/collector/processor v0.113.0/go.mod h1:oX91zMI8ZkoaYSUfUYflHiMiiBJPKtODNBUCrETLLd8= +go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.113.0 h1:bZ1i5l6/4nj7PsLqeHw7Opw5vdrpUsDvuH6a6kx+2yg= +go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.113.0/go.mod h1:Uxv+5NNIJJCuz52DPFa9INjrpZSfidoTkv849tNp1qI= go.opentelemetry.io/collector/processor/processorprofiles v0.113.0 h1:cczN6whdrCWww3T0FBV3U7lsVKQmkWDX05M+9lANHgk= go.opentelemetry.io/collector/processor/processorprofiles v0.113.0/go.mod h1:4Dmx5qsvujgJ+MC+KqWI7UDVM2liXa3sH/9XnGiL9aE= go.opentelemetry.io/collector/processor/processortest v0.113.0 h1:jGoDJ+tDCzuDcAWZeshQtnK/DQAvMKd4wZAIDgAM5aA= diff --git a/processor/resourcedetectionprocessor/internal/metadata/generated_status.go b/processor/resourcedetectionprocessor/internal/metadata/generated_status.go index ecca969e7db9..ee0a49313187 100644 --- a/processor/resourcedetectionprocessor/internal/metadata/generated_status.go +++ b/processor/resourcedetectionprocessor/internal/metadata/generated_status.go @@ -12,7 +12,8 @@ var ( ) const ( - TracesStability = component.StabilityLevelBeta - MetricsStability = component.StabilityLevelBeta - LogsStability = component.StabilityLevelBeta + ProfilesStability = component.StabilityLevelDevelopment + TracesStability = component.StabilityLevelBeta + MetricsStability = component.StabilityLevelBeta + LogsStability = component.StabilityLevelBeta ) diff --git a/processor/resourcedetectionprocessor/metadata.yaml b/processor/resourcedetectionprocessor/metadata.yaml index fee99b032daf..9e98d1f5cbd7 100644 --- a/processor/resourcedetectionprocessor/metadata.yaml +++ b/processor/resourcedetectionprocessor/metadata.yaml @@ -4,6 +4,7 @@ status: class: processor stability: beta: [traces, metrics, logs] + development: [profiles] distributions: [contrib, k8s] codeowners: active: [Aneurysm9, dashpole] diff --git a/processor/resourcedetectionprocessor/resourcedetection_processor.go b/processor/resourcedetectionprocessor/resourcedetection_processor.go index 44f3331e6473..40d2939ad354 100644 --- a/processor/resourcedetectionprocessor/resourcedetection_processor.go +++ b/processor/resourcedetectionprocessor/resourcedetection_processor.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" @@ -69,3 +70,15 @@ func (rdp *resourceDetectionProcessor) processLogs(_ context.Context, ld plog.Lo } return ld, nil } + +// processProfiles implements the ProcessProfilesFunc type. +func (rdp *resourceDetectionProcessor) processProfiles(_ context.Context, ld pprofile.Profiles) (pprofile.Profiles, error) { + rl := ld.ResourceProfiles() + for i := 0; i < rl.Len(); i++ { + rss := rl.At(i) + rss.SetSchemaUrl(internal.MergeSchemaURL(rss.SchemaUrl(), rdp.schemaURL)) + res := rss.Resource() + internal.MergeResource(res, rdp.resource, rdp.override) + } + return ld, nil +} diff --git a/processor/resourcedetectionprocessor/resourcedetection_processor_test.go b/processor/resourcedetectionprocessor/resourcedetection_processor_test.go index 78f63a150a0d..a61c8a4efaaf 100644 --- a/processor/resourcedetectionprocessor/resourcedetection_processor_test.go +++ b/processor/resourcedetectionprocessor/resourcedetection_processor_test.go @@ -19,8 +19,10 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/processor" + "go.opentelemetry.io/collector/processor/processorprofiles" "go.opentelemetry.io/collector/processor/processortest" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal" @@ -259,6 +261,37 @@ func TestResourceProcessor(t *testing.T) { got = tln.AllLogs()[0].ResourceLogs().At(0).Resource().Attributes().AsRaw() assert.Equal(t, tt.expectedResource, got) + + // Test profiles consumer + tpn := new(consumertest.ProfilesSink) + rpp, err := factory.createProfilesProcessor(context.Background(), processortest.NewNopSettings(), cfg, tpn) + + if tt.expectedNewError != "" { + assert.EqualError(t, err, tt.expectedNewError) + return + } + + require.NoError(t, err) + assert.True(t, rpp.Capabilities().MutatesData) + + err = rpp.Start(context.Background(), componenttest.NewNopHost()) + + if tt.detectedError != nil { + require.NoError(t, err) + return + } + + require.NoError(t, err) + defer func() { assert.NoError(t, rpp.Shutdown(context.Background())) }() + + pd := pprofile.NewProfiles() + require.NoError(t, pd.ResourceProfiles().AppendEmpty().Resource().Attributes().FromRaw(tt.sourceResource)) + + err = rpp.ConsumeProfiles(context.Background(), pd) + require.NoError(t, err) + got = tpn.AllProfiles()[0].ResourceProfiles().At(0).Resource().Attributes().AsRaw() + + assert.Equal(t, tt.expectedResource, got) }) } } @@ -328,3 +361,25 @@ func BenchmarkConsumeLogsAll(b *testing.B) { cfg := &Config{Override: true, Detectors: []string{env.TypeStr, gcp.TypeStr}} benchmarkConsumeLogs(b, cfg) } + +func benchmarkConsumeProfiles(b *testing.B, cfg *Config) { + factory := NewFactory() + sink := new(consumertest.ProfilesSink) + processor, _ := factory.(processorprofiles.Factory).CreateProfiles(context.Background(), processortest.NewNopSettings(), cfg, sink) + + b.ResetTimer() + for n := 0; n < b.N; n++ { + // TODO use testbed.PerfTestDataProvider here once that includes resources + assert.NoError(b, processor.ConsumeProfiles(context.Background(), pprofile.NewProfiles())) + } +} + +func BenchmarkConsumeProfilesDefault(b *testing.B) { + cfg := NewFactory().CreateDefaultConfig() + benchmarkConsumeProfiles(b, cfg.(*Config)) +} + +func BenchmarkConsumeProfilesAll(b *testing.B) { + cfg := &Config{Override: true, Detectors: []string{env.TypeStr, gcp.TypeStr}} + benchmarkConsumeProfiles(b, cfg) +} From cd2970a6fe7c1743923be798c22f6926226d8cb5 Mon Sep 17 00:00:00 2001 From: VihasMakwana <121151420+VihasMakwana@users.noreply.github.com> Date: Thu, 7 Nov 2024 22:01:06 +0530 Subject: [PATCH 04/24] [chore][testbed] - Further testbed enhancements (#35209) PR to add memory limiter test cases. This tests scenarios where collector is understress and we verify that we receive all the data, without drops/duplication. --- testbed/testbed/receivers.go | 9 +++- testbed/tests/log_test.go | 36 +++++++++++++++ testbed/tests/scenarios.go | 89 ++++++++++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 1 deletion(-) diff --git a/testbed/testbed/receivers.go b/testbed/testbed/receivers.go index 3c7b161e2547..6db874822f94 100644 --- a/testbed/testbed/receivers.go +++ b/testbed/testbed/receivers.go @@ -55,6 +55,7 @@ type BaseOTLPDataReceiver struct { compression string retry string sendingQueue string + timeout string } func (bor *BaseOTLPDataReceiver) Start(tc consumer.Traces, mc consumer.Metrics, lc consumer.Logs) error { @@ -98,6 +99,11 @@ func (bor *BaseOTLPDataReceiver) WithQueue(sendingQueue string) *BaseOTLPDataRec return bor } +func (bor *BaseOTLPDataReceiver) WithTimeout(timeout string) *BaseOTLPDataReceiver { + bor.timeout = timeout + return bor +} + func (bor *BaseOTLPDataReceiver) Stop() error { // we reuse the receiver across signals. Shutting down the log receiver shuts down the metrics and traces receiver. return bor.logReceiver.Shutdown(context.Background()) @@ -118,8 +124,9 @@ func (bor *BaseOTLPDataReceiver) GenConfigYAMLStr() string { endpoint: "%s" %s %s + %s tls: - insecure: true`, bor.exporterType, addr, bor.retry, bor.sendingQueue) + insecure: true`, bor.exporterType, addr, bor.retry, bor.sendingQueue, bor.timeout) comp := "none" if bor.compression != "" { comp = bor.compression diff --git a/testbed/tests/log_test.go b/testbed/tests/log_test.go index 8b44f83f670a..727fde8141ea 100644 --- a/testbed/tests/log_test.go +++ b/testbed/tests/log_test.go @@ -369,3 +369,39 @@ func TestLargeFileOnce(t *testing.T) { tc.StopAgent() tc.ValidateData() } + +func TestMemoryLimiterHit(t *testing.T) { + otlpreceiver := testbed.NewOTLPDataReceiver(testutil.GetAvailablePort(t)) + otlpreceiver.WithRetry(` + retry_on_failure: + enabled: true + max_interval: 5s +`) + otlpreceiver.WithQueue(` + sending_queue: + enabled: true + queue_size: 100000 + num_consumers: 20 +`) + otlpreceiver.WithTimeout(` + timeout: 0s +`) + processors := map[string]string{ + "memory_limiter": ` + memory_limiter: + check_interval: 1s + limit_mib: 300 + spike_limit_mib: 150 +`, + } + ScenarioMemoryLimiterHit( + t, + testbed.NewOTLPLogsDataSender(testbed.DefaultHost, testutil.GetAvailablePort(t)), + otlpreceiver, + testbed.LoadOptions{ + DataItemsPerSecond: 100000, + ItemsPerBatch: 1000, + Parallel: 1, + }, + performanceResultsSummary, 100, processors) +} diff --git a/testbed/tests/scenarios.go b/testbed/tests/scenarios.go index 85973f89b29a..8a3d5f694662 100644 --- a/testbed/tests/scenarios.go +++ b/testbed/tests/scenarios.go @@ -552,6 +552,95 @@ func ScenarioLong( tc.ValidateData() } +func ScenarioMemoryLimiterHit( + t *testing.T, + sender testbed.DataSender, + receiver testbed.DataReceiver, + loadOptions testbed.LoadOptions, + resultsSummary testbed.TestResultsSummary, + sleepTime int, + processors map[string]string, +) { + resultDir, err := filepath.Abs(path.Join("results", t.Name())) + require.NoError(t, err) + + agentProc := testbed.NewChildProcessCollector(testbed.WithEnvVar("GOMAXPROCS", "2")) + + configStr := createConfigYaml(t, sender, receiver, resultDir, processors, nil) + fmt.Println(configStr) + configCleanup, err := agentProc.PrepareConfig(configStr) + require.NoError(t, err) + defer configCleanup() + dataProvider := testbed.NewPerfTestDataProvider(loadOptions) + dataChannel := make(chan bool) + tc := testbed.NewTestCase( + t, + dataProvider, + sender, + receiver, + agentProc, + &testbed.CorrectnessLogTestValidator{}, + resultsSummary, + testbed.WithDecisionFunc(func() error { return testbed.GenerateNonPernamentErrorUntil(dataChannel) }), + ) + t.Cleanup(tc.Stop) + tc.MockBackend.EnableRecording() + + tc.StartBackend() + tc.StartAgent() + + tc.StartLoad(loadOptions) + + tc.WaitFor(func() bool { return tc.LoadGenerator.DataItemsSent() > 0 }, "load generator started") + + var timer *time.Timer + + // check for "Memory usage is above hard limit" + tc.WaitForN(func() bool { + logFound := tc.AgentLogsContains("Memory usage is above soft limit. Refusing data.") + if !logFound { + dataChannel <- true + return false + } + // Log found. But keep the collector under stress for 10 more seconds so it starts refusing data + if timer == nil { + timer = time.NewTimer(10 * time.Second) + } + select { + case <-timer.C: + default: + return false + } + close(dataChannel) + return logFound + }, time.Second*time.Duration(sleepTime), "memory limit not hit") + + // check if data started to be received successfully + tc.WaitForN(func() bool { + return tc.MockBackend.DataItemsReceived() > 0 + }, time.Second*time.Duration(sleepTime), "data started to be successfully received") + + // stop sending any more data + tc.StopLoad() + + tc.WaitForN(func() bool { return tc.LoadGenerator.DataItemsSent() == tc.MockBackend.DataItemsReceived() }, time.Second*time.Duration(sleepTime), "all logs received") + + tc.WaitForN(func() bool { + // get IDs from logs to retry + logsToRetry := getLogsID(tc.MockBackend.LogsToRetry) + + // get IDs from logs received successfully + successfulLogs := getLogsID(tc.MockBackend.ReceivedLogs) + + // check if all the logs to retry were actually retried + logsWereRetried := allElementsExistInSlice(logsToRetry, successfulLogs) + return logsWereRetried + }, time.Second*time.Duration(sleepTime), "all logs were retried successfully") + + tc.StopAgent() + tc.ValidateData() +} + func constructLoadOptions(test TestCase) testbed.LoadOptions { options := testbed.LoadOptions{DataItemsPerSecond: 1000, ItemsPerBatch: 10} options.Attributes = make(map[string]string) From 3458d516819f31e87a271dc7855529519917dc9c Mon Sep 17 00:00:00 2001 From: Christos Markou Date: Thu, 7 Nov 2024 19:47:03 +0200 Subject: [PATCH 05/24] [receiver/k8scluster] Fix e2e tests by limiting jobs/pods of cronjob to 1 (#36235) #### Description Fixes what was described at https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/35727#issuecomment-2459485280. After https://github.com/open-telemetry/opentelemetry-collector-contrib/pull/36114 the `k8scluster` receiver's e2e tests started showing some flakiness ([example](https://github.com/open-telemetry/opentelemetry-collector-contrib/actions/runs/11701936581/job/32589392145)). With this change we ensure that only 1 active job/pod of the cronjob will be present for the whole lifetime of the test to avoid hitting timing/scheduling related flakiness. @bacherfl could you also take a look here? #### Link to tracking issue Fixes #### Testing #### Documentation Signed-off-by: ChrsMark --- .../testdata/e2e/expected.yaml | 649 +++++++----------- .../testdata/e2e/testobjects/cronjob.yaml | 4 +- 2 files changed, 262 insertions(+), 391 deletions(-) diff --git a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml index 9f7aed414369..5b78ab5f0f95 100644 --- a/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml +++ b/receiver/k8sclusterreceiver/testdata/e2e/expected.yaml @@ -6,7 +6,7 @@ resourceMetrics: stringValue: default - key: k8s.namespace.uid value: - stringValue: feb94a85-d29f-4693-a6d7-ca5206a5141e + stringValue: 7f2b26ac-b0f5-4929-a959-5cd2c6daa767 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -19,7 +19,7 @@ resourceMetrics: name: k8s.namespace.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -27,7 +27,7 @@ resourceMetrics: stringValue: kube-node-lease - key: k8s.namespace.uid value: - stringValue: ff852fe4-f42e-48d7-883d-3df03ab5741c + stringValue: 23a9792c-bccb-4307-bc28-eb7b37a7c271 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -40,7 +40,7 @@ resourceMetrics: name: k8s.namespace.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -48,7 +48,7 @@ resourceMetrics: stringValue: kube-public - key: k8s.namespace.uid value: - stringValue: 66be991c-1e7d-4a14-af98-4f421bee9ec4 + stringValue: 38490c86-545d-47ca-8d32-b613b7ca546a schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -61,7 +61,7 @@ resourceMetrics: name: k8s.namespace.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -69,7 +69,7 @@ resourceMetrics: stringValue: kube-system - key: k8s.namespace.uid value: - stringValue: 1fdcff4f-01e0-459a-baaa-463b5f52eaa2 + stringValue: 850c440b-d959-4c07-a7da-abd8fd1b0c1b schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -82,7 +82,7 @@ resourceMetrics: name: k8s.namespace.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -90,7 +90,7 @@ resourceMetrics: stringValue: local-path-storage - key: k8s.namespace.uid value: - stringValue: cf6c8796-7d4b-4e61-ae41-9c90207c7c06 + stringValue: c475b194-71ef-4911-aff3-f4848f777417 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -103,7 +103,7 @@ resourceMetrics: name: k8s.namespace.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.node.name @@ -111,7 +111,7 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.node.uid value: - stringValue: 080365b3-8b82-48dc-9885-d88364004eb3 + stringValue: 9b78ed70-9ff6-4cf4-a94c-5df055362770 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -123,7 +123,7 @@ resourceMetrics: name: k8s.node.condition_ready scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.cronjob.name @@ -131,7 +131,7 @@ resourceMetrics: stringValue: test-k8scluster-receiver-cronjob - key: k8s.cronjob.uid value: - stringValue: 6a3c3e99-5db1-481f-9d5d-782ae9de9f58 + stringValue: 342bf5aa-ded4-4177-82fa-01002b66b971 - key: k8s.namespace.name value: stringValue: default @@ -141,14 +141,14 @@ resourceMetrics: - description: The number of actively running jobs for a cronjob gauge: dataPoints: - - asInt: "2" + - asInt: "1" startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: k8s.cronjob.active_jobs unit: '{job}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.daemonset.name @@ -156,7 +156,7 @@ resourceMetrics: stringValue: kindnet - key: k8s.daemonset.uid value: - stringValue: 4b389825-8fb0-4c66-a774-c9dfcba9d813 + stringValue: bcb148ce-6b1b-48b8-ade7-c54f95949a35 - key: k8s.namespace.name value: stringValue: kube-system @@ -197,7 +197,7 @@ resourceMetrics: unit: '{node}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.daemonset.name @@ -205,7 +205,7 @@ resourceMetrics: stringValue: kube-proxy - key: k8s.daemonset.uid value: - stringValue: b88aca8b-5776-4f6a-b1f4-d430f972e7fc + stringValue: 88bb9c34-24d1-45cd-8fce-39d5b216c158 - key: k8s.namespace.name value: stringValue: kube-system @@ -246,7 +246,7 @@ resourceMetrics: unit: '{node}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.deployment.name @@ -254,7 +254,7 @@ resourceMetrics: stringValue: coredns - key: k8s.deployment.uid value: - stringValue: 40f70689-1d8b-4eaf-b1b9-c7f1604ad616 + stringValue: 9d878e59-fc7e-4959-97d8-7434a08b5711 - key: k8s.namespace.name value: stringValue: kube-system @@ -279,7 +279,7 @@ resourceMetrics: unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.deployment.name @@ -287,7 +287,7 @@ resourceMetrics: stringValue: local-path-provisioner - key: k8s.deployment.uid value: - stringValue: c97a7ce6-7bc2-475b-ad74-ccbd1c464e17 + stringValue: b542b645-1939-4eb7-bb0b-a3cfa3eceba2 - key: k8s.namespace.name value: stringValue: local-path-storage @@ -312,15 +312,15 @@ resourceMetrics: unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.deployment.name value: - stringValue: otelcol-786b94f3 + stringValue: otelcol-aa664a32 - key: k8s.deployment.uid value: - stringValue: 6433ed08-d04b-458d-b3db-f526238a1e65 + stringValue: a787af64-c9cc-4532-8514-b769cce0f651 - key: k8s.namespace.name value: stringValue: default @@ -345,7 +345,7 @@ resourceMetrics: unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.hpa.name @@ -353,7 +353,7 @@ resourceMetrics: stringValue: test-k8scluster-receiver-hpa - key: k8s.hpa.uid value: - stringValue: 963572dc-4663-4fb2-930a-e143320a03c3 + stringValue: 0859b2dc-76e0-4ecb-b8c9-91bc2e05d9f4 - key: k8s.namespace.name value: stringValue: default @@ -394,72 +394,15 @@ resourceMetrics: unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest - - resource: - attributes: - - key: k8s.job.name - value: - stringValue: test-k8scluster-receiver-cronjob-28839770 - - key: k8s.job.uid - value: - stringValue: a38da134-af71-4bc1-a585-c9e0342f9aab - - key: k8s.namespace.name - value: - stringValue: default - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: The number of actively running pods for a job - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.job.active_pods - unit: '{pod}' - - description: The desired number of successfully finished pods the job should be run with - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.job.desired_successful_pods - unit: '{pod}' - - description: The number of pods which reached phase Failed for a job - gauge: - dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.job.failed_pods - unit: '{pod}' - - description: The max desired number of pods the job should run at any given time - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.job.max_parallel_pods - unit: '{pod}' - - description: The number of pods which reached phase Succeeded for a job - gauge: - dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.job.successful_pods - unit: '{pod}' - scope: - name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.job.name value: - stringValue: test-k8scluster-receiver-cronjob-28839771 + stringValue: test-k8scluster-receiver-cronjob-28848323 - key: k8s.job.uid value: - stringValue: 37a9e0cc-5315-4e89-bb2b-5221849ff483 + stringValue: fc819ca2-19ea-4c90-a46f-0b4f468e088c - key: k8s.namespace.name value: stringValue: default @@ -508,7 +451,7 @@ resourceMetrics: unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.job.name @@ -516,7 +459,7 @@ resourceMetrics: stringValue: test-k8scluster-receiver-job - key: k8s.job.uid value: - stringValue: b7ecbf9e-8e1a-4d70-beda-aab183645382 + stringValue: f9d8177f-2b94-4db6-a170-7c82fb37fc60 - key: k8s.namespace.name value: stringValue: default @@ -565,7 +508,7 @@ resourceMetrics: unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -573,10 +516,10 @@ resourceMetrics: stringValue: default - key: k8s.replicaset.name value: - stringValue: otelcol-786b94f3-67cf69944f + stringValue: otelcol-aa664a32-694bd5645b - key: k8s.replicaset.uid value: - stringValue: d532dd9c-0490-4f85-be78-fd21d8a1b56f + stringValue: 4dce2ca5-5236-40e2-a934-6c92782da3d7 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -598,7 +541,7 @@ resourceMetrics: unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -609,7 +552,7 @@ resourceMetrics: stringValue: test-k8scluster-receiver-statefulset - key: k8s.statefulset.uid value: - stringValue: 5ceb9f10-fc64-4d70-b6f8-228b4a0cfd3c + stringValue: 59d53c2e-cce4-403b-bcc0-b83fc1bc36b9 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -647,7 +590,7 @@ resourceMetrics: unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -655,10 +598,10 @@ resourceMetrics: stringValue: kube-system - key: k8s.replicaset.name value: - stringValue: coredns-7db6d8ff4d + stringValue: coredns-76f75df574 - key: k8s.replicaset.uid value: - stringValue: 2c8fee82-58d4-46c4-ae5e-81afcc5f9948 + stringValue: d4a8fe9f-b4f6-4af4-8f00-9b87b00113d8 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -680,7 +623,7 @@ resourceMetrics: unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -688,10 +631,10 @@ resourceMetrics: stringValue: local-path-storage - key: k8s.replicaset.name value: - stringValue: local-path-provisioner-988d74bc + stringValue: local-path-provisioner-7577fdbbfb - key: k8s.replicaset.uid value: - stringValue: e58f8ba2-8df8-425e-8a2a-c07cf351bbd8 + stringValue: 45dc7e2d-aff2-4ae8-b6d5-1b48dabbf262 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -713,7 +656,7 @@ resourceMetrics: unit: '{pod}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -724,10 +667,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: otelcol-786b94f3-67cf69944f-6zv25 + stringValue: otelcol-aa664a32-694bd5645b-9fmzd - key: k8s.pod.uid value: - stringValue: 1fb8be2b-ae32-41c2-a172-e6cb9beb7c37 + stringValue: 6913e713-f407-4186-aa31-946410566b81 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -740,7 +683,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -751,10 +694,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: test-k8scluster-receiver-cronjob-28839770-9pp7g + stringValue: test-k8scluster-receiver-cronjob-28848323-mcsr8 - key: k8s.pod.uid value: - stringValue: e388cfa8-06c3-47b6-a7a6-113d7cdda849 + stringValue: 0010e2e8-8797-4bfb-8ef5-ac9c87cee06c schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -767,7 +710,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -778,10 +721,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: test-k8scluster-receiver-cronjob-28839771-llccr + stringValue: test-k8scluster-receiver-job-4whkq - key: k8s.pod.uid value: - stringValue: 0c2351b3-842c-4632-95c2-e7b061128a98 + stringValue: b93fa4ec-ab09-4815-9c20-bc4860febfcc schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -794,34 +737,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest - - resource: - attributes: - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: test-k8scluster-receiver-job-bzjrh - - key: k8s.pod.uid - value: - stringValue: 7e8bdace-4bce-4750-bd8c-d7359bb3e56b - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Current phase of the pod (1 - Pending, 2 - Running, 3 - Succeeded, 4 - Failed, 5 - Unknown) - gauge: - dataPoints: - - asInt: "2" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.pod.phase - scope: - name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -835,7 +751,7 @@ resourceMetrics: stringValue: test-k8scluster-receiver-statefulset-0 - key: k8s.pod.uid value: - stringValue: f1ea5486-77b7-41c6-a3be-d03650011801 + stringValue: b2dd866e-3a93-465b-a6be-ee129972b9e1 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -848,7 +764,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -859,10 +775,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-7db6d8ff4d-5kh78 + stringValue: coredns-76f75df574-mmjtf - key: k8s.pod.uid value: - stringValue: 2c5b60e0-a01e-4312-8818-d85f94ab841e + stringValue: ed088c60-0fe4-4b70-9eba-55ea389dc573 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -875,7 +791,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -886,10 +802,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-7db6d8ff4d-p89tc + stringValue: coredns-76f75df574-pfdx5 - key: k8s.pod.uid value: - stringValue: f3494708-493a-4f0f-965c-dcedfdca253f + stringValue: ed6fbad4-9bc1-4142-9f4d-e772b6c9cfbd schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -902,7 +818,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -916,7 +832,7 @@ resourceMetrics: stringValue: etcd-kind-control-plane - key: k8s.pod.uid value: - stringValue: 40e8f13b-bec6-4dae-98d9-fd86939dfc4c + stringValue: 89fb113f-1bd3-4623-b46c-933da87e2719 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -929,7 +845,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -940,10 +856,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-qwzhw + stringValue: kindnet-gj7ss - key: k8s.pod.uid value: - stringValue: 955e1f8c-2fe3-4a1d-85e6-31ff7410dc00 + stringValue: 3bc0ca83-314f-4653-9179-43351ce6ec3d schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -956,7 +872,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -970,7 +886,7 @@ resourceMetrics: stringValue: kube-apiserver-kind-control-plane - key: k8s.pod.uid value: - stringValue: d2032a9e-8c7c-4d9c-bbcb-526bd1a7b4f7 + stringValue: f66aabd7-0b50-4839-9a1f-fc35eb59d040 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -983,7 +899,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -997,7 +913,7 @@ resourceMetrics: stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: e3e6d44a-5bc6-4687-85f1-37eb42c42c05 + stringValue: c74f189c-1292-4ad0-9cb3-ec00d9aea238 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -1010,7 +926,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -1021,10 +937,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-kktz6 + stringValue: kube-proxy-vf6lk - key: k8s.pod.uid value: - stringValue: c347e316-1bab-4b4d-bc37-4f526fca19a4 + stringValue: 3ab3dafa-1d0a-450c-b04e-908f8c634a7a schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -1037,7 +953,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -1051,7 +967,7 @@ resourceMetrics: stringValue: kube-scheduler-kind-control-plane - key: k8s.pod.uid value: - stringValue: 991bbf5d-d6b9-4e33-8954-2a5f3505ff2d + stringValue: de119c6f-e99f-4a50-b5cd-ba1264b496fb schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -1064,7 +980,7 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: k8s.namespace.name @@ -1075,10 +991,10 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-988d74bc-c2wx7 + stringValue: local-path-provisioner-7577fdbbfb-q4vsw - key: k8s.pod.uid value: - stringValue: 1169e7ae-031e-4535-bb94-aee23b0b7df3 + stringValue: ff867316-a95b-4024-9da6-ad397f94750a schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -1091,36 +1007,44 @@ resourceMetrics: name: k8s.pod.phase scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: 10c9bec31ac94fc58e65ce5ed809455727eee9daae8ea80668990e848a7e7da0 + stringValue: 24a762e91f4c1f45583fb6a59963d3f7c8f4f05040b1a5ebd90c8f062c389b2f - key: container.image.name value: - stringValue: docker.io/library/alpine + stringValue: registry.k8s.io/kube-scheduler-amd64 - key: container.image.tag value: - stringValue: latest + stringValue: v1.29.2 - key: k8s.container.name value: - stringValue: alpine + stringValue: kube-scheduler - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: test-k8scluster-receiver-cronjob-28839771-llccr + stringValue: kube-scheduler-kind-control-plane - key: k8s.pod.uid value: - stringValue: 0c2351b3-842c-4632-95c2-e7b061128a98 + stringValue: de119c6f-e99f-4a50-b5cd-ba1264b496fb schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asDouble: 0.1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_request + unit: '{cpu}' - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: @@ -1138,21 +1062,21 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: 1f493fa217d539d5b74ffc4579e887f904f630d320105a2b83a987105342ae80 + stringValue: 45e131a79cddf6a61e8283d9ec6f55305e2e222858853e7e799b40e349763dca - key: container.image.name value: - stringValue: registry.k8s.io/kube-proxy-arm64 + stringValue: registry.k8s.io/coredns/coredns - key: container.image.tag value: - stringValue: v1.30.0 + stringValue: v1.11.1 - key: k8s.container.name value: - stringValue: kube-proxy + stringValue: coredns - key: k8s.namespace.name value: stringValue: kube-system @@ -1161,13 +1085,37 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-proxy-kktz6 + stringValue: coredns-76f75df574-pfdx5 - key: k8s.pod.uid value: - stringValue: c347e316-1bab-4b4d-bc37-4f526fca19a4 + stringValue: ed6fbad4-9bc1-4142-9f4d-e772b6c9cfbd schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asDouble: 0.1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_request + unit: '{cpu}' + - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asInt: "178257920" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory_limit + unit: By + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asInt: "73400320" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory_request + unit: By - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: @@ -1185,12 +1133,12 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: 2cb1cb272a301a00f50020c3e4751bfa9a281496a6dc35f02a5546451e894e93 + stringValue: 5d94185c2a933a3b7f6be03668a145d76d4d881429edb95836f1ba4c8ca266bb - key: container.image.name value: stringValue: docker.io/library/nginx @@ -1211,7 +1159,7 @@ resourceMetrics: stringValue: test-k8scluster-receiver-statefulset-0 - key: k8s.pod.uid value: - stringValue: f1ea5486-77b7-41c6-a3be-d03650011801 + stringValue: b2dd866e-3a93-465b-a6be-ee129972b9e1 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: @@ -1232,36 +1180,44 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: 567cd0ad83d68987dfb4dbffd056732b25bd2fc89e912605c16a5d1a4cd2b54c + stringValue: 5e156d44eca670f77bdbfd440118ef874e369814bff85f01c39f960dcec1254a - key: container.image.name value: - stringValue: docker.io/library/alpine + stringValue: registry.k8s.io/kube-controller-manager-amd64 - key: container.image.tag value: - stringValue: latest + stringValue: v1.29.2 - key: k8s.container.name value: - stringValue: alpine + stringValue: kube-controller-manager - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: test-k8scluster-receiver-job-bzjrh + stringValue: kube-controller-manager-kind-control-plane - key: k8s.pod.uid value: - stringValue: 7e8bdace-4bce-4750-bd8c-d7359bb3e56b + stringValue: c74f189c-1292-4ad0-9cb3-ec00d9aea238 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asDouble: 0.2 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_request + unit: '{cpu}' - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: @@ -1279,21 +1235,21 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: 6af7be5c276ef225d046ad0de442ee450c39122a12991f9da82c9629f949967b + stringValue: 7e0eab8dbcf238dcb63b71b6d3889171dfe5c6febf66e5d6a8a793c53af4a2f9 - key: container.image.name value: - stringValue: registry.k8s.io/coredns/coredns + stringValue: docker.io/kindest/kindnetd - key: container.image.tag value: - stringValue: v1.11.1 + stringValue: v20240202-8f1494ea - key: k8s.container.name value: - stringValue: coredns + stringValue: kindnet-cni - key: k8s.namespace.name value: stringValue: kube-system @@ -1302,13 +1258,21 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-7db6d8ff4d-5kh78 + stringValue: kindnet-gj7ss - key: k8s.pod.uid value: - stringValue: 2c5b60e0-a01e-4312-8818-d85f94ab841e + stringValue: 3bc0ca83-314f-4653-9179-43351ce6ec3d schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: + - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asDouble: 0.1 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_limit + unit: '{cpu}' - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: @@ -1320,7 +1284,7 @@ resourceMetrics: - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asInt: "178257920" + - asInt: "52428800" startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: k8s.container.memory_limit @@ -1328,7 +1292,7 @@ resourceMetrics: - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asInt: "73400320" + - asInt: "52428800" startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: k8s.container.memory_request @@ -1350,44 +1314,36 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: 7349de0618283fb11a957febc6689a0fbbfd9b52af1106bb3608bc4278a27ecf + stringValue: 95dd9be2d5d8a17bdde0d4ee90910dee65589de65eb7428b85068e66f0467501 - key: container.image.name value: - stringValue: registry.k8s.io/kube-scheduler-arm64 + stringValue: docker.io/library/alpine - key: container.image.tag value: - stringValue: v1.30.0 + stringValue: latest - key: k8s.container.name value: - stringValue: kube-scheduler + stringValue: alpine - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-scheduler-kind-control-plane + stringValue: test-k8scluster-receiver-cronjob-28848323-mcsr8 - key: k8s.pod.uid value: - stringValue: 991bbf5d-d6b9-4e33-8954-2a5f3505ff2d + stringValue: 0010e2e8-8797-4bfb-8ef5-ac9c87cee06c schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.1 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: @@ -1405,21 +1361,21 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: 9c70b20960c36ddb400607a354058cd7525ec491251379c5aa84c359c5d518d7 + stringValue: cf67cab7bc6b163d1d43edd2ed222ae5f76e123eb1ff0a1b770757feeebd66cf - key: container.image.name value: - stringValue: registry.k8s.io/etcd + stringValue: registry.k8s.io/kube-apiserver-amd64 - key: container.image.tag value: - stringValue: 3.5.12-0 + stringValue: v1.29.2 - key: k8s.container.name value: - stringValue: etcd + stringValue: kube-apiserver - key: k8s.namespace.name value: stringValue: kube-system @@ -1428,29 +1384,21 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: etcd-kind-control-plane + stringValue: kube-apiserver-kind-control-plane - key: k8s.pod.uid value: - stringValue: 40e8f13b-bec6-4dae-98d9-fd86939dfc4c + stringValue: f66aabd7-0b50-4839-9a1f-fc35eb59d040 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asDouble: 0.1 + - asDouble: 0.25 startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: k8s.container.cpu_request unit: '{cpu}' - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "104857600" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_request - unit: By - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: @@ -1468,60 +1416,36 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: 9c9e2d8cc660d21018432215b93bd4b9f26fbb0b0dfe71dca8c7089997cce23e + stringValue: d291e1a11a62f9340ff410465c38db9e57a3646c5b9b3b479e968d1dea5c38de - key: container.image.name value: - stringValue: registry.k8s.io/coredns/coredns + stringValue: docker.io/library/alpine - key: container.image.tag value: - stringValue: v1.11.1 + stringValue: latest - key: k8s.container.name value: - stringValue: coredns + stringValue: alpine - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: coredns-7db6d8ff4d-p89tc + stringValue: test-k8scluster-receiver-job-4whkq - key: k8s.pod.uid value: - stringValue: f3494708-493a-4f0f-965c-dcedfdca253f + stringValue: b93fa4ec-ab09-4815-9c20-bc4860febfcc schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.1 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "178257920" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_limit - unit: By - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "73400320" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_request - unit: By - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: @@ -1539,36 +1463,68 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: acef2130e48fde6137e919c9eebc876435ff8a6a22031754fc1dde00cb6dae92 + stringValue: d54bdc2a7810420f7a2b46c187f5d44d3d4ff1f714f1aa47062d5bb6a46821f8 - key: container.image.name value: - stringValue: docker.io/kindest/local-path-provisioner + stringValue: docker.io/library/otelcontribcol - key: container.image.tag value: - stringValue: v20240202-8f1494ea + stringValue: latest - key: k8s.container.name value: - stringValue: local-path-provisioner + stringValue: opentelemetry-collector - key: k8s.namespace.name value: - stringValue: local-path-storage + stringValue: default - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: local-path-provisioner-988d74bc-c2wx7 + stringValue: otelcol-aa664a32-694bd5645b-9fmzd - key: k8s.pod.uid value: - stringValue: 1169e7ae-031e-4535-bb94-aee23b0b7df3 + stringValue: 6913e713-f407-4186-aa31-946410566b81 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: + - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asDouble: 0.128 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_limit + unit: '{cpu}' + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asDouble: 0.128 + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.cpu_request + unit: '{cpu}' + - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asInt: "268435456" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory_limit + unit: By + - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details + gauge: + dataPoints: + - asInt: "268435456" + startTimeUnixNano: "1000000" + timeUnixNano: "2000000" + name: k8s.container.memory_request + unit: By - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: @@ -1586,44 +1542,36 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: bd25536854ec1e582f0bb3ac0f79ce761ae97317d9ba1f7b256f3e833bcba862 + stringValue: deecc7697ef991519718ffbc8f59e496f8ffc1c4120455f115f9cadf149bad95 - key: container.image.name value: - stringValue: registry.k8s.io/kube-apiserver-arm64 + stringValue: docker.io/kindest/local-path-provisioner - key: container.image.tag value: - stringValue: v1.30.0 + stringValue: v20240202-8f1494ea - key: k8s.container.name value: - stringValue: kube-apiserver + stringValue: local-path-provisioner - key: k8s.namespace.name value: - stringValue: kube-system + stringValue: local-path-storage - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-apiserver-kind-control-plane + stringValue: local-path-provisioner-7577fdbbfb-q4vsw - key: k8s.pod.uid value: - stringValue: d2032a9e-8c7c-4d9c-bbcb-526bd1a7b4f7 + stringValue: ff867316-a95b-4024-9da6-ad397f94750a schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.25 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: @@ -1641,21 +1589,21 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: cc67e9bcb82cbeed83bc8dec9cf2b0c7915d921e793efb0d21da5225dfeb907d + stringValue: dfabe34b0764f7189eb24204d390caefc32f56d0e713048532e637db0ce0184e - key: container.image.name value: - stringValue: registry.k8s.io/kube-controller-manager-arm64 + stringValue: registry.k8s.io/kube-proxy-amd64 - key: container.image.tag value: - stringValue: v1.30.0 + stringValue: v1.29.2 - key: k8s.container.name value: - stringValue: kube-controller-manager + stringValue: kube-proxy - key: k8s.namespace.name value: stringValue: kube-system @@ -1664,21 +1612,13 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kube-controller-manager-kind-control-plane + stringValue: kube-proxy-vf6lk - key: k8s.pod.uid value: - stringValue: e3e6d44a-5bc6-4687-85f1-37eb42c42c05 + stringValue: 3ab3dafa-1d0a-450c-b04e-908f8c634a7a schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.2 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_request - unit: '{cpu}' - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) gauge: dataPoints: @@ -1696,48 +1636,40 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: e14e6f08e774618b74202d19334266e4c65c1feb0b26ef7e8b7807644754f730 + stringValue: ed83ae29a5becf08c8cca7e1bf12f11f89e7ee9ac4ac1e50a38f18d68791aff5 - key: container.image.name value: - stringValue: docker.io/library/otelcontribcol + stringValue: registry.k8s.io/coredns/coredns - key: container.image.tag value: - stringValue: latest + stringValue: v1.11.1 - key: k8s.container.name value: - stringValue: opentelemetry-collector + stringValue: coredns - key: k8s.namespace.name value: - stringValue: default + stringValue: kube-system - key: k8s.node.name value: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: otelcol-786b94f3-67cf69944f-6zv25 + stringValue: coredns-76f75df574-mmjtf - key: k8s.pod.uid value: - stringValue: 1fb8be2b-ae32-41c2-a172-e6cb9beb7c37 + stringValue: ed088c60-0fe4-4b70-9eba-55ea389dc573 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.128 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_limit - unit: '{cpu}' - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asDouble: 0.128 + - asDouble: 0.1 startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: k8s.container.cpu_request @@ -1745,7 +1677,7 @@ resourceMetrics: - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asInt: "268435456" + - asInt: "178257920" startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: k8s.container.memory_limit @@ -1753,7 +1685,7 @@ resourceMetrics: - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asInt: "268435456" + - asInt: "73400320" startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: k8s.container.memory_request @@ -1775,21 +1707,21 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev - resource: attributes: - key: container.id value: - stringValue: ed3ab86077c3de40d6d9125bf4f25dbf1734c58c9c3a864e5ccc1ce3bcfc1d30 + stringValue: f3f77fad1afc9498b4a341a1fb241821a91fc33f1401bd4e62920e929659be7e - key: container.image.name value: - stringValue: docker.io/kindest/kindnetd + stringValue: registry.k8s.io/etcd - key: container.image.tag value: - stringValue: v20240202-8f1494ea + stringValue: 3.5.10-0 - key: k8s.container.name value: - stringValue: kindnet-cni + stringValue: etcd - key: k8s.namespace.name value: stringValue: kube-system @@ -1798,21 +1730,13 @@ resourceMetrics: stringValue: kind-control-plane - key: k8s.pod.name value: - stringValue: kindnet-qwzhw + stringValue: etcd-kind-control-plane - key: k8s.pod.uid value: - stringValue: 955e1f8c-2fe3-4a1d-85e6-31ff7410dc00 + stringValue: 89fb113f-1bd3-4623-b46c-933da87e2719 schemaUrl: https://opentelemetry.io/schemas/1.18.0 scopeMetrics: - metrics: - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asDouble: 0.1 - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.cpu_limit - unit: '{cpu}' - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: @@ -1821,18 +1745,10 @@ resourceMetrics: timeUnixNano: "2000000" name: k8s.container.cpu_request unit: '{cpu}' - - description: Maximum resource limit set for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details - gauge: - dataPoints: - - asInt: "52428800" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.memory_limit - unit: By - description: Resource requested for the container. See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#resourcerequirements-v1-core for details gauge: dataPoints: - - asInt: "52428800" + - asInt: "104857600" startTimeUnixNano: "1000000" timeUnixNano: "2000000" name: k8s.container.memory_request @@ -1854,51 +1770,4 @@ resourceMetrics: unit: '{restart}' scope: name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest - - resource: - attributes: - - key: container.id - value: - stringValue: f01b9f5343f9ba34db396889c75d6128dace385b8f0c7aed2d39866ddd0df826 - - key: container.image.name - value: - stringValue: docker.io/library/alpine - - key: container.image.tag - value: - stringValue: latest - - key: k8s.container.name - value: - stringValue: alpine - - key: k8s.namespace.name - value: - stringValue: default - - key: k8s.node.name - value: - stringValue: kind-control-plane - - key: k8s.pod.name - value: - stringValue: test-k8scluster-receiver-cronjob-28839770-9pp7g - - key: k8s.pod.uid - value: - stringValue: e388cfa8-06c3-47b6-a7a6-113d7cdda849 - schemaUrl: https://opentelemetry.io/schemas/1.18.0 - scopeMetrics: - - metrics: - - description: Whether a container has passed its readiness probe (0 for no, 1 for yes) - gauge: - dataPoints: - - asInt: "1" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.ready - - description: How many times the container has restarted in the recent past. This value is pulled directly from the K8s API and the value can go indefinitely high and be reset to 0 at any time depending on how your kubelet is configured to prune dead containers. It is best to not depend too much on the exact value but rather look at it as either == 0, in which case you can conclude there were no restarts in the recent past, or > 0, in which case you can conclude there were restarts in the recent past, and not try and analyze the value beyond that. - gauge: - dataPoints: - - asInt: "0" - startTimeUnixNano: "1000000" - timeUnixNano: "2000000" - name: k8s.container.restarts - unit: '{restart}' - scope: - name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver - version: latest + version: 0.112.0-dev diff --git a/receiver/k8sclusterreceiver/testdata/e2e/testobjects/cronjob.yaml b/receiver/k8sclusterreceiver/testdata/e2e/testobjects/cronjob.yaml index 706bc90f26df..b78f59a4fe07 100644 --- a/receiver/k8sclusterreceiver/testdata/e2e/testobjects/cronjob.yaml +++ b/receiver/k8sclusterreceiver/testdata/e2e/testobjects/cronjob.yaml @@ -5,6 +5,8 @@ metadata: namespace: default spec: schedule: "*/1 * * * *" + # ensure that only one job/pod is active for the lifetime of the test + concurrencyPolicy: Forbid jobTemplate: spec: template: @@ -15,5 +17,5 @@ spec: args: - /bin/sh - -c - - "echo Running; sleep 120" + - "echo Running; sleep 600" restartPolicy: OnFailure From 4b1ca008869480ccd6f6eefba8e05e0a02bef8bb Mon Sep 17 00:00:00 2001 From: Yang Song Date: Thu, 7 Nov 2024 15:47:02 -0500 Subject: [PATCH 06/24] Revert "[chore][testbed] - Further testbed enhancements" (#36266) Reverts open-telemetry/opentelemetry-collector-contrib#35209 Breaks mainline: https://github.com/open-telemetry/opentelemetry-collector-contrib/actions/runs/11727234196/job/32667863492 --- testbed/testbed/receivers.go | 9 +--- testbed/tests/log_test.go | 36 --------------- testbed/tests/scenarios.go | 89 ------------------------------------ 3 files changed, 1 insertion(+), 133 deletions(-) diff --git a/testbed/testbed/receivers.go b/testbed/testbed/receivers.go index 6db874822f94..3c7b161e2547 100644 --- a/testbed/testbed/receivers.go +++ b/testbed/testbed/receivers.go @@ -55,7 +55,6 @@ type BaseOTLPDataReceiver struct { compression string retry string sendingQueue string - timeout string } func (bor *BaseOTLPDataReceiver) Start(tc consumer.Traces, mc consumer.Metrics, lc consumer.Logs) error { @@ -99,11 +98,6 @@ func (bor *BaseOTLPDataReceiver) WithQueue(sendingQueue string) *BaseOTLPDataRec return bor } -func (bor *BaseOTLPDataReceiver) WithTimeout(timeout string) *BaseOTLPDataReceiver { - bor.timeout = timeout - return bor -} - func (bor *BaseOTLPDataReceiver) Stop() error { // we reuse the receiver across signals. Shutting down the log receiver shuts down the metrics and traces receiver. return bor.logReceiver.Shutdown(context.Background()) @@ -124,9 +118,8 @@ func (bor *BaseOTLPDataReceiver) GenConfigYAMLStr() string { endpoint: "%s" %s %s - %s tls: - insecure: true`, bor.exporterType, addr, bor.retry, bor.sendingQueue, bor.timeout) + insecure: true`, bor.exporterType, addr, bor.retry, bor.sendingQueue) comp := "none" if bor.compression != "" { comp = bor.compression diff --git a/testbed/tests/log_test.go b/testbed/tests/log_test.go index 727fde8141ea..8b44f83f670a 100644 --- a/testbed/tests/log_test.go +++ b/testbed/tests/log_test.go @@ -369,39 +369,3 @@ func TestLargeFileOnce(t *testing.T) { tc.StopAgent() tc.ValidateData() } - -func TestMemoryLimiterHit(t *testing.T) { - otlpreceiver := testbed.NewOTLPDataReceiver(testutil.GetAvailablePort(t)) - otlpreceiver.WithRetry(` - retry_on_failure: - enabled: true - max_interval: 5s -`) - otlpreceiver.WithQueue(` - sending_queue: - enabled: true - queue_size: 100000 - num_consumers: 20 -`) - otlpreceiver.WithTimeout(` - timeout: 0s -`) - processors := map[string]string{ - "memory_limiter": ` - memory_limiter: - check_interval: 1s - limit_mib: 300 - spike_limit_mib: 150 -`, - } - ScenarioMemoryLimiterHit( - t, - testbed.NewOTLPLogsDataSender(testbed.DefaultHost, testutil.GetAvailablePort(t)), - otlpreceiver, - testbed.LoadOptions{ - DataItemsPerSecond: 100000, - ItemsPerBatch: 1000, - Parallel: 1, - }, - performanceResultsSummary, 100, processors) -} diff --git a/testbed/tests/scenarios.go b/testbed/tests/scenarios.go index 8a3d5f694662..85973f89b29a 100644 --- a/testbed/tests/scenarios.go +++ b/testbed/tests/scenarios.go @@ -552,95 +552,6 @@ func ScenarioLong( tc.ValidateData() } -func ScenarioMemoryLimiterHit( - t *testing.T, - sender testbed.DataSender, - receiver testbed.DataReceiver, - loadOptions testbed.LoadOptions, - resultsSummary testbed.TestResultsSummary, - sleepTime int, - processors map[string]string, -) { - resultDir, err := filepath.Abs(path.Join("results", t.Name())) - require.NoError(t, err) - - agentProc := testbed.NewChildProcessCollector(testbed.WithEnvVar("GOMAXPROCS", "2")) - - configStr := createConfigYaml(t, sender, receiver, resultDir, processors, nil) - fmt.Println(configStr) - configCleanup, err := agentProc.PrepareConfig(configStr) - require.NoError(t, err) - defer configCleanup() - dataProvider := testbed.NewPerfTestDataProvider(loadOptions) - dataChannel := make(chan bool) - tc := testbed.NewTestCase( - t, - dataProvider, - sender, - receiver, - agentProc, - &testbed.CorrectnessLogTestValidator{}, - resultsSummary, - testbed.WithDecisionFunc(func() error { return testbed.GenerateNonPernamentErrorUntil(dataChannel) }), - ) - t.Cleanup(tc.Stop) - tc.MockBackend.EnableRecording() - - tc.StartBackend() - tc.StartAgent() - - tc.StartLoad(loadOptions) - - tc.WaitFor(func() bool { return tc.LoadGenerator.DataItemsSent() > 0 }, "load generator started") - - var timer *time.Timer - - // check for "Memory usage is above hard limit" - tc.WaitForN(func() bool { - logFound := tc.AgentLogsContains("Memory usage is above soft limit. Refusing data.") - if !logFound { - dataChannel <- true - return false - } - // Log found. But keep the collector under stress for 10 more seconds so it starts refusing data - if timer == nil { - timer = time.NewTimer(10 * time.Second) - } - select { - case <-timer.C: - default: - return false - } - close(dataChannel) - return logFound - }, time.Second*time.Duration(sleepTime), "memory limit not hit") - - // check if data started to be received successfully - tc.WaitForN(func() bool { - return tc.MockBackend.DataItemsReceived() > 0 - }, time.Second*time.Duration(sleepTime), "data started to be successfully received") - - // stop sending any more data - tc.StopLoad() - - tc.WaitForN(func() bool { return tc.LoadGenerator.DataItemsSent() == tc.MockBackend.DataItemsReceived() }, time.Second*time.Duration(sleepTime), "all logs received") - - tc.WaitForN(func() bool { - // get IDs from logs to retry - logsToRetry := getLogsID(tc.MockBackend.LogsToRetry) - - // get IDs from logs received successfully - successfulLogs := getLogsID(tc.MockBackend.ReceivedLogs) - - // check if all the logs to retry were actually retried - logsWereRetried := allElementsExistInSlice(logsToRetry, successfulLogs) - return logsWereRetried - }, time.Second*time.Duration(sleepTime), "all logs were retried successfully") - - tc.StopAgent() - tc.ValidateData() -} - func constructLoadOptions(test TestCase) testbed.LoadOptions { options := testbed.LoadOptions{DataItemsPerSecond: 1000, ItemsPerBatch: 10} options.Attributes = make(map[string]string) From acab5bb10eab704c475d805d7ad35ce74b497b24 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Thu, 7 Nov 2024 16:27:44 -0800 Subject: [PATCH 07/24] [chore] fix codeowners (#36271) --- .github/CODEOWNERS | 560 ++++++++++++++++++++++----------------------- 1 file changed, 273 insertions(+), 287 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index f0aeb83d3281..4d2d3a841de6 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -15,300 +15,286 @@ * @open-telemetry/collector-contrib-approvers -cmd/githubgen/ @open-telemetry/collector-contrib-approvers @atoulme -cmd/opampsupervisor/ @open-telemetry/collector-contrib-approvers @evan-bradley @atoulme @tigrannajaryan -cmd/otelcontribcol/ @open-telemetry/collector-contrib-approvers -cmd/oteltestbedcol/ @open-telemetry/collector-contrib-approvers -cmd/telemetrygen/ @open-telemetry/collector-contrib-approvers @mx-psi @codeboten +cmd/githubgen/ @open-telemetry/collector-contrib-approvers @atoulme +cmd/opampsupervisor/ @open-telemetry/collector-contrib-approvers @evan-bradley @atoulme @tigrannajaryan +cmd/otelcontribcol/ @open-telemetry/collector-contrib-approvers +cmd/oteltestbedcol/ @open-telemetry/collector-contrib-approvers +cmd/telemetrygen/ @open-telemetry/collector-contrib-approvers @mx-psi @codeboten -confmap/provider/aesprovider/ @open-telemetry/collector-contrib-approvers @djaglowski @shazlehu -confmap/provider/s3provider/ @open-telemetry/collector-contrib-approvers @Aneurysm9 -confmap/provider/secretsmanagerprovider/ @open-telemetry/collector-contrib-approvers @driverpt @atoulme +confmap/provider/aesprovider/ @open-telemetry/collector-contrib-approvers @djaglowski @shazlehu +confmap/provider/s3provider/ @open-telemetry/collector-contrib-approvers @Aneurysm9 +confmap/provider/secretsmanagerprovider/ @open-telemetry/collector-contrib-approvers @driverpt @atoulme -connector/countconnector/ @open-telemetry/collector-contrib-approvers @djaglowski @jpkrohling -connector/datadogconnector/ @open-telemetry/collector-contrib-approvers @mx-psi @dineshg13 @ankitpatel96 -connector/exceptionsconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @marctc -connector/failoverconnector/ @open-telemetry/collector-contrib-approvers @akats7 @djaglowski @fatsheep9146 -connector/grafanacloudconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @rlankfo @jcreixell -connector/otlpjsonconnector/ @open-telemetry/collector-contrib-approvers @djaglowski @ChrsMark -connector/roundrobinconnector/ @open-telemetry/collector-contrib-approvers @bogdandrutu -connector/routingconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @mwear -connector/servicegraphconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @mapno @JaredTan95 -connector/spanmetricsconnector/ @open-telemetry/collector-contrib-approvers @portertech @Frapschen -connector/sumconnector/ @open-telemetry/collector-contrib-approvers @greatestusername @shalper2 @crobert-1 +connector/countconnector/ @open-telemetry/collector-contrib-approvers @djaglowski @jpkrohling +connector/datadogconnector/ @open-telemetry/collector-contrib-approvers @mx-psi @dineshg13 @ankitpatel96 +connector/exceptionsconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @marctc +connector/failoverconnector/ @open-telemetry/collector-contrib-approvers @akats7 @djaglowski @fatsheep9146 +connector/grafanacloudconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @rlankfo @jcreixell +connector/otlpjsonconnector/ @open-telemetry/collector-contrib-approvers @djaglowski @ChrsMark +connector/roundrobinconnector/ @open-telemetry/collector-contrib-approvers @bogdandrutu +connector/routingconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @mwear +connector/servicegraphconnector/ @open-telemetry/collector-contrib-approvers @jpkrohling @mapno @JaredTan95 +connector/spanmetricsconnector/ @open-telemetry/collector-contrib-approvers @portertech @Frapschen +connector/sumconnector/ @open-telemetry/collector-contrib-approvers @greatestusername @shalper2 @crobert-1 -examples/demo/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers +examples/demo/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers -exporter/alertmanagerexporter/ @open-telemetry/collector-contrib-approvers @jpkrohling @sokoide @mcube8 -exporter/alibabacloudlogserviceexporter/ @open-telemetry/collector-contrib-approvers @shabicheng @kongluoxing @qiansheng91 -exporter/awscloudwatchlogsexporter/ @open-telemetry/collector-contrib-approvers @boostchicken @bryan-aguilar @rapphil -exporter/awsemfexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @shaochengwang @mxiamxia @bryan-aguilar -exporter/awskinesisexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @MovieStoreGuy -exporter/awss3exporter/ @open-telemetry/collector-contrib-approvers @atoulme @pdelewski -exporter/awsxrayexporter/ @open-telemetry/collector-contrib-approvers @wangzlei @srprash -exporter/azuredataexplorerexporter/ @open-telemetry/collector-contrib-approvers @asaharn @ag-ramachandran -exporter/azuremonitorexporter/ @open-telemetry/collector-contrib-approvers @pcwiese -exporter/carbonexporter/ @open-telemetry/collector-contrib-approvers @aboguszewski-sumo -exporter/cassandraexporter/ @open-telemetry/collector-contrib-approvers @atoulme @emreyalvac -exporter/clickhouseexporter/ @open-telemetry/collector-contrib-approvers @hanjm @dmitryax @Frapschen @SpencerTorres -exporter/coralogixexporter/ @open-telemetry/collector-contrib-approvers @povilasv @matej-g -exporter/datadogexporter/ @open-telemetry/collector-contrib-approvers @mx-psi @dineshg13 @liustanley @songy23 @mackjmr @ankitpatel96 -exporter/datasetexporter/ @open-telemetry/collector-contrib-approvers @atoulme @martin-majlis-s1 @zdaratom-s1 @tomaz-s1 -exporter/dorisexporter/ @open-telemetry/collector-contrib-approvers @atoulme @joker-star-l -exporter/elasticsearchexporter/ @open-telemetry/collector-contrib-approvers @JaredTan95 @carsonip @lahsivjar -exporter/fileexporter/ @open-telemetry/collector-contrib-approvers @atingchen -exporter/googlecloudexporter/ @open-telemetry/collector-contrib-approvers @aabmass @dashpole @jsuereth @punya @psx95 -exporter/googlecloudpubsubexporter/ @open-telemetry/collector-contrib-approvers @alexvanboxel -exporter/googlemanagedprometheusexporter/ @open-telemetry/collector-contrib-approvers @aabmass @dashpole @jsuereth @punya @psx95 -exporter/honeycombmarkerexporter/ @open-telemetry/collector-contrib-approvers @TylerHelmuth @fchikwekwe -exporter/influxdbexporter/ @open-telemetry/collector-contrib-approvers @jacobmarble -exporter/kafkaexporter/ @open-telemetry/collector-contrib-approvers @pavolloffay @MovieStoreGuy -exporter/kineticaexporter/ @open-telemetry/collector-contrib-approvers @am-kinetica @TylerHelmuth -exporter/loadbalancingexporter/ @open-telemetry/collector-contrib-approvers @jpkrohling -exporter/logicmonitorexporter/ @open-telemetry/collector-contrib-approvers @bogdandrutu @khyatigandhi6 @avadhut123pisal -exporter/logzioexporter/ @open-telemetry/collector-contrib-approvers @yotamloe -exporter/lokiexporter/ @open-telemetry/collector-contrib-approvers @gramidt @jpkrohling @mar4uk -exporter/mezmoexporter/ @open-telemetry/collector-contrib-approvers @dashpole @billmeyer @gjanco -exporter/opencensusexporter/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers -exporter/otelarrowexporter/ @open-telemetry/collector-contrib-approvers @jmacd @moh-osman3 @lquerel -exporter/prometheusexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole -exporter/prometheusremotewriteexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @rapphil @dashpole -exporter/pulsarexporter/ @open-telemetry/collector-contrib-approvers @dmitryax @dao-jun -exporter/rabbitmqexporter/ @open-telemetry/collector-contrib-approvers @swar8080 @atoulme -exporter/sapmexporter/ @open-telemetry/collector-contrib-approvers @dmitryax @atoulme -exporter/sentryexporter/ @open-telemetry/collector-contrib-approvers @AbhiPrasad -exporter/signalfxexporter/ @open-telemetry/collector-contrib-approvers @dmitryax @crobert-1 -exporter/splunkhecexporter/ @open-telemetry/collector-contrib-approvers @atoulme @dmitryax -exporter/sumologicexporter/ @open-telemetry/collector-contrib-approvers @rnishtala-sumo @chan-tim-sumo -exporter/syslogexporter/ @open-telemetry/collector-contrib-approvers @kasia-kujawa @rnishtala-sumo @andrzej-stencel -exporter/tencentcloudlogserviceexporter/ @open-telemetry/collector-contrib-approvers @wgliang @yiyang5055 -exporter/zipkinexporter/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @andrzej-stencel @crobert-1 +exporter/alertmanagerexporter/ @open-telemetry/collector-contrib-approvers @jpkrohling @sokoide @mcube8 +exporter/alibabacloudlogserviceexporter/ @open-telemetry/collector-contrib-approvers @shabicheng @kongluoxing @qiansheng91 +exporter/awscloudwatchlogsexporter/ @open-telemetry/collector-contrib-approvers @boostchicken @bryan-aguilar @rapphil +exporter/awsemfexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @shaochengwang @mxiamxia @bryan-aguilar +exporter/awskinesisexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @MovieStoreGuy +exporter/awss3exporter/ @open-telemetry/collector-contrib-approvers @atoulme @pdelewski +exporter/awsxrayexporter/ @open-telemetry/collector-contrib-approvers @wangzlei @srprash +exporter/azuredataexplorerexporter/ @open-telemetry/collector-contrib-approvers @asaharn @ag-ramachandran +exporter/azuremonitorexporter/ @open-telemetry/collector-contrib-approvers @pcwiese +exporter/carbonexporter/ @open-telemetry/collector-contrib-approvers @aboguszewski-sumo +exporter/cassandraexporter/ @open-telemetry/collector-contrib-approvers @atoulme @emreyalvac +exporter/clickhouseexporter/ @open-telemetry/collector-contrib-approvers @hanjm @dmitryax @Frapschen @SpencerTorres +exporter/coralogixexporter/ @open-telemetry/collector-contrib-approvers @povilasv @matej-g +exporter/datadogexporter/ @open-telemetry/collector-contrib-approvers @mx-psi @dineshg13 @liustanley @songy23 @mackjmr @ankitpatel96 +exporter/datasetexporter/ @open-telemetry/collector-contrib-approvers @atoulme @martin-majlis-s1 @zdaratom-s1 @tomaz-s1 +exporter/dorisexporter/ @open-telemetry/collector-contrib-approvers @atoulme @joker-star-l +exporter/elasticsearchexporter/ @open-telemetry/collector-contrib-approvers @JaredTan95 @carsonip @lahsivjar +exporter/fileexporter/ @open-telemetry/collector-contrib-approvers @atingchen +exporter/googlecloudexporter/ @open-telemetry/collector-contrib-approvers @aabmass @dashpole @jsuereth @punya @psx95 +exporter/googlecloudpubsubexporter/ @open-telemetry/collector-contrib-approvers @alexvanboxel +exporter/googlemanagedprometheusexporter/ @open-telemetry/collector-contrib-approvers @aabmass @dashpole @jsuereth @punya @psx95 +exporter/honeycombmarkerexporter/ @open-telemetry/collector-contrib-approvers @TylerHelmuth @fchikwekwe +exporter/influxdbexporter/ @open-telemetry/collector-contrib-approvers @jacobmarble +exporter/kafkaexporter/ @open-telemetry/collector-contrib-approvers @pavolloffay @MovieStoreGuy +exporter/kineticaexporter/ @open-telemetry/collector-contrib-approvers @am-kinetica @TylerHelmuth +exporter/loadbalancingexporter/ @open-telemetry/collector-contrib-approvers @jpkrohling +exporter/logicmonitorexporter/ @open-telemetry/collector-contrib-approvers @bogdandrutu @khyatigandhi6 @avadhut123pisal +exporter/logzioexporter/ @open-telemetry/collector-contrib-approvers @yotamloe +exporter/lokiexporter/ @open-telemetry/collector-contrib-approvers @gramidt @jpkrohling @mar4uk +exporter/mezmoexporter/ @open-telemetry/collector-contrib-approvers @dashpole @billmeyer @gjanco +exporter/opencensusexporter/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers +exporter/otelarrowexporter/ @open-telemetry/collector-contrib-approvers @jmacd @moh-osman3 @lquerel +exporter/prometheusexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole +exporter/prometheusremotewriteexporter/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @rapphil @dashpole +exporter/pulsarexporter/ @open-telemetry/collector-contrib-approvers @dmitryax @dao-jun +exporter/rabbitmqexporter/ @open-telemetry/collector-contrib-approvers @swar8080 @atoulme +exporter/sapmexporter/ @open-telemetry/collector-contrib-approvers @dmitryax @atoulme +exporter/sentryexporter/ @open-telemetry/collector-contrib-approvers @AbhiPrasad +exporter/signalfxexporter/ @open-telemetry/collector-contrib-approvers @dmitryax @crobert-1 +exporter/splunkhecexporter/ @open-telemetry/collector-contrib-approvers @atoulme @dmitryax +exporter/sumologicexporter/ @open-telemetry/collector-contrib-approvers @rnishtala-sumo @chan-tim-sumo +exporter/syslogexporter/ @open-telemetry/collector-contrib-approvers @kasia-kujawa @rnishtala-sumo @andrzej-stencel +exporter/tencentcloudlogserviceexporter/ @open-telemetry/collector-contrib-approvers @wgliang @yiyang5055 +exporter/zipkinexporter/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @andrzej-stencel @crobert-1 -extension/ackextension/ @open-telemetry/collector-contrib-approvers @zpzhuSplunk @splunkericl -extension/asapauthextension/ @open-telemetry/collector-contrib-approvers @jamesmoessis @MovieStoreGuy -extension/awsproxy/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @mxiamxia -extension/basicauthextension/ @open-telemetry/collector-contrib-approvers @jpkrohling @frzifus -extension/bearertokenauthextension/ @open-telemetry/collector-contrib-approvers @jpkrohling @frzifus -extension/encoding/ @open-telemetry/collector-contrib-approvers @atoulme @dao-jun @dmitryax @MovieStoreGuy @VihasMakwana -extension/encoding/avrologencodingextension/ @open-telemetry/collector-contrib-approvers @thmshmm -extension/encoding/jaegerencodingextension/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @atoulme -extension/encoding/jsonlogencodingextension/ @open-telemetry/collector-contrib-approvers @VihasMakwana @atoulme -extension/encoding/otlpencodingextension/ @open-telemetry/collector-contrib-approvers @dao-jun @VihasMakwana -extension/encoding/textencodingextension/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @atoulme -extension/encoding/zipkinencodingextension/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @dao-jun -extension/googleclientauthextension/ @open-telemetry/collector-contrib-approvers @dashpole @aabmass @jsuereth @punya @psx95 -extension/headerssetterextension/ @open-telemetry/collector-contrib-approvers @jpkrohling -extension/healthcheckextension/ @open-telemetry/collector-contrib-approvers @jpkrohling -extension/healthcheckv2extension/ @open-telemetry/collector-contrib-approvers @jpkrohling @mwear -extension/httpforwarderextension/ @open-telemetry/collector-contrib-approvers @atoulme -extension/jaegerremotesampling/ @open-telemetry/collector-contrib-approvers @yurishkuro @frzifus -extension/oauth2clientauthextension/ @open-telemetry/collector-contrib-approvers @pavankrish123 @jpkrohling -extension/observer/ @open-telemetry/collector-contrib-approvers @dmitryax -extension/observer/cfgardenobserver/ @open-telemetry/collector-contrib-approvers @crobert-1 @cemdk @m1rp @jriguera -extension/observer/dockerobserver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy -extension/observer/ecsobserver/ @open-telemetry/collector-contrib-approvers @dmitryax -extension/observer/hostobserver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy -extension/observer/k8sobserver/ @open-telemetry/collector-contrib-approvers @dmitryax @ChrsMark -extension/oidcauthextension/ @open-telemetry/collector-contrib-approvers @jpkrohling -extension/opampcustommessages/ @open-telemetry/collector-contrib-approvers @evan-bradley -extension/opampextension/ @open-telemetry/collector-contrib-approvers @portertech @evan-bradley @tigrannajaryan -extension/pprofextension/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy -extension/remotetapextension/ @open-telemetry/collector-contrib-approvers @atoulme -extension/sigv4authextension/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @erichsueh3 -extension/solarwindsapmsettingsextension/ @open-telemetry/collector-contrib-approvers @jerrytfleung @cheempz -extension/storage/ @open-telemetry/collector-contrib-approvers @dmitryax @atoulme @djaglowski -extension/storage/dbstorage/ @open-telemetry/collector-contrib-approvers @dmitryax @atoulme -extension/storage/filestorage/ @open-telemetry/collector-contrib-approvers @djaglowski -extension/storage/redisstorageextension/ @open-telemetry/collector-contrib-approvers @atoulme -extension/sumologicextension/ @open-telemetry/collector-contrib-approvers @rnishtala-sumo @chan-tim-sumo +extension/ackextension/ @open-telemetry/collector-contrib-approvers @zpzhuSplunk @splunkericl +extension/asapauthextension/ @open-telemetry/collector-contrib-approvers @jamesmoessis @MovieStoreGuy +extension/awsproxy/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @mxiamxia +extension/basicauthextension/ @open-telemetry/collector-contrib-approvers @jpkrohling @frzifus +extension/bearertokenauthextension/ @open-telemetry/collector-contrib-approvers @jpkrohling @frzifus +extension/encoding/ @open-telemetry/collector-contrib-approvers @atoulme @dao-jun @dmitryax @MovieStoreGuy @VihasMakwana +extension/encoding/avrologencodingextension/ @open-telemetry/collector-contrib-approvers @thmshmm +extension/encoding/jaegerencodingextension/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @atoulme +extension/encoding/jsonlogencodingextension/ @open-telemetry/collector-contrib-approvers @VihasMakwana @atoulme +extension/encoding/otlpencodingextension/ @open-telemetry/collector-contrib-approvers @dao-jun @VihasMakwana +extension/encoding/textencodingextension/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @atoulme +extension/encoding/zipkinencodingextension/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @dao-jun +extension/googleclientauthextension/ @open-telemetry/collector-contrib-approvers @dashpole @aabmass @jsuereth @punya @psx95 +extension/headerssetterextension/ @open-telemetry/collector-contrib-approvers @jpkrohling +extension/healthcheckextension/ @open-telemetry/collector-contrib-approvers @jpkrohling +extension/healthcheckv2extension/ @open-telemetry/collector-contrib-approvers @jpkrohling @mwear +extension/httpforwarderextension/ @open-telemetry/collector-contrib-approvers @atoulme +extension/jaegerremotesampling/ @open-telemetry/collector-contrib-approvers @yurishkuro @frzifus +extension/oauth2clientauthextension/ @open-telemetry/collector-contrib-approvers @pavankrish123 @jpkrohling +extension/observer/ @open-telemetry/collector-contrib-approvers @dmitryax +extension/observer/cfgardenobserver/ @open-telemetry/collector-contrib-approvers @crobert-1 @cemdk @m1rp @jriguera +extension/observer/dockerobserver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy +extension/observer/ecsobserver/ @open-telemetry/collector-contrib-approvers @dmitryax +extension/observer/hostobserver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy +extension/observer/k8sobserver/ @open-telemetry/collector-contrib-approvers @dmitryax @ChrsMark +extension/oidcauthextension/ @open-telemetry/collector-contrib-approvers @jpkrohling +extension/opampcustommessages/ @open-telemetry/collector-contrib-approvers @evan-bradley +extension/opampextension/ @open-telemetry/collector-contrib-approvers @portertech @evan-bradley @tigrannajaryan +extension/pprofextension/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy +extension/remotetapextension/ @open-telemetry/collector-contrib-approvers @atoulme +extension/sigv4authextension/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @erichsueh3 +extension/solarwindsapmsettingsextension/ @open-telemetry/collector-contrib-approvers @jerrytfleung @cheempz +extension/storage/ @open-telemetry/collector-contrib-approvers @dmitryax @atoulme @djaglowski +extension/storage/dbstorage/ @open-telemetry/collector-contrib-approvers @dmitryax @atoulme +extension/storage/filestorage/ @open-telemetry/collector-contrib-approvers @djaglowski +extension/storage/redisstorageextension/ @open-telemetry/collector-contrib-approvers @atoulme +extension/sumologicextension/ @open-telemetry/collector-contrib-approvers @rnishtala-sumo @chan-tim-sumo -internal/aws/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @mxiamxia -internal/collectd/ @open-telemetry/collector-contrib-approvers @atoulme -internal/coreinternal/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers -internal/docker/ @open-telemetry/collector-contrib-approvers @jamesmoessis @MovieStoreGuy -internal/exp/metrics/ @open-telemetry/collector-contrib-approvers @sh0rez @RichieSams -internal/filter/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers -internal/grpcutil/ @open-telemetry/collector-contrib-approvers @jmacd @moh-osman3 @lquerel -internal/k8sconfig/ @open-telemetry/collector-contrib-approvers @dmitryax -internal/k8stest/ @open-telemetry/collector-contrib-approvers @crobert-1 -internal/kafka/ @open-telemetry/collector-contrib-approvers @pavolloffay @MovieStoreGuy -internal/kubelet/ @open-telemetry/collector-contrib-approvers @dmitryax -internal/metadataproviders/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole -internal/otelarrow/ @open-telemetry/collector-contrib-approvers @jmacd @moh-osman3 -internal/pdatautil/ @open-telemetry/collector-contrib-approvers @djaglowski -internal/rabbitmq/ @open-telemetry/collector-contrib-approvers @swar8080 @atoulme -internal/sharedcomponent/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers -internal/splunk/ @open-telemetry/collector-contrib-approvers @dmitryax -internal/sqlquery/ @open-telemetry/collector-contrib-approvers @crobert-1 @dmitryax -internal/tools/ @open-telemetry/collector-contrib-approvers +internal/aws/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @mxiamxia +internal/collectd/ @open-telemetry/collector-contrib-approvers @atoulme +internal/coreinternal/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers +internal/docker/ @open-telemetry/collector-contrib-approvers @jamesmoessis @MovieStoreGuy +internal/exp/metrics/ @open-telemetry/collector-contrib-approvers @sh0rez @RichieSams +internal/filter/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers +internal/grpcutil/ @open-telemetry/collector-contrib-approvers @jmacd @moh-osman3 @lquerel +internal/k8sconfig/ @open-telemetry/collector-contrib-approvers @dmitryax +internal/k8stest/ @open-telemetry/collector-contrib-approvers @crobert-1 +internal/kafka/ @open-telemetry/collector-contrib-approvers @pavolloffay @MovieStoreGuy +internal/kubelet/ @open-telemetry/collector-contrib-approvers @dmitryax +internal/metadataproviders/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole +internal/otelarrow/ @open-telemetry/collector-contrib-approvers @jmacd @moh-osman3 +internal/pdatautil/ @open-telemetry/collector-contrib-approvers @djaglowski +internal/rabbitmq/ @open-telemetry/collector-contrib-approvers @swar8080 @atoulme +internal/sharedcomponent/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers +internal/splunk/ @open-telemetry/collector-contrib-approvers @dmitryax +internal/sqlquery/ @open-telemetry/collector-contrib-approvers @crobert-1 @dmitryax +internal/tools/ @open-telemetry/collector-contrib-approvers -pkg/batchperresourceattr/ @open-telemetry/collector-contrib-approvers @atoulme @dmitryax -pkg/batchpersignal/ @open-telemetry/collector-contrib-approvers @jpkrohling -pkg/datadog/ @open-telemetry/collector-contrib-approvers @mx-psi @dineshg13 @liustanley @songy23 @mackjmr @ankitpatel96 -pkg/experimentalmetricmetadata/ @open-telemetry/collector-contrib-approvers @dmitryax -pkg/golden/ @open-telemetry/collector-contrib-approvers @djaglowski @atoulme -pkg/kafka/topic/ @open-telemetry/collector-contrib-approvers @pavolloffay @MovieStoreGuy -pkg/ottl/ @open-telemetry/collector-contrib-approvers @TylerHelmuth @kentquirk @bogdandrutu @evan-bradley -pkg/pdatatest/ @open-telemetry/collector-contrib-approvers @djaglowski @fatsheep9146 -pkg/pdatautil/ @open-telemetry/collector-contrib-approvers @dmitryax -pkg/resourcetotelemetry/ @open-telemetry/collector-contrib-approvers @mx-psi -pkg/sampling/ @open-telemetry/collector-contrib-approvers @kentquirk @jmacd -pkg/stanza/ @open-telemetry/collector-contrib-approvers @djaglowski -pkg/stanza/fileconsumer/ @open-telemetry/collector-contrib-approvers @djaglowski -pkg/status/ @open-telemetry/collector-contrib-approvers @jpkrohling @mwear -pkg/translator/azure/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers @atoulme @cparkins -pkg/translator/azurelogs/ @open-telemetry/collector-contrib-approvers @atoulme @cparkins @MikeGoldsmith -pkg/translator/jaeger/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers @frzifus -pkg/translator/loki/ @open-telemetry/collector-contrib-approvers @gouthamve @jpkrohling @mar4uk -pkg/translator/opencensus/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers -pkg/translator/prometheus/ @open-telemetry/collector-contrib-approvers @dashpole @bertysentry -pkg/translator/prometheusremotewrite/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole -pkg/translator/signalfx/ @open-telemetry/collector-contrib-approvers @dmitryax -pkg/translator/skywalking/ @open-telemetry/collector-contrib-approvers @JaredTan95 -pkg/translator/zipkin/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @andrzej-stencel @crobert-1 -pkg/winperfcounters/ @open-telemetry/collector-contrib-approvers @dashpole @Mrod1598 @alxbl @pjanotti +pkg/batchperresourceattr/ @open-telemetry/collector-contrib-approvers @atoulme @dmitryax +pkg/batchpersignal/ @open-telemetry/collector-contrib-approvers @jpkrohling +pkg/datadog/ @open-telemetry/collector-contrib-approvers @mx-psi @dineshg13 @liustanley @songy23 @mackjmr @ankitpatel96 +pkg/experimentalmetricmetadata/ @open-telemetry/collector-contrib-approvers @dmitryax +pkg/golden/ @open-telemetry/collector-contrib-approvers @djaglowski @atoulme +pkg/kafka/topic/ @open-telemetry/collector-contrib-approvers @pavolloffay @MovieStoreGuy +pkg/ottl/ @open-telemetry/collector-contrib-approvers @TylerHelmuth @kentquirk @bogdandrutu @evan-bradley +pkg/pdatatest/ @open-telemetry/collector-contrib-approvers @djaglowski @fatsheep9146 +pkg/pdatautil/ @open-telemetry/collector-contrib-approvers @dmitryax +pkg/resourcetotelemetry/ @open-telemetry/collector-contrib-approvers @mx-psi +pkg/sampling/ @open-telemetry/collector-contrib-approvers @kentquirk @jmacd +pkg/stanza/ @open-telemetry/collector-contrib-approvers @djaglowski +pkg/stanza/fileconsumer/ @open-telemetry/collector-contrib-approvers @djaglowski +pkg/status/ @open-telemetry/collector-contrib-approvers @jpkrohling @mwear +pkg/translator/azure/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers @atoulme @cparkins +pkg/translator/azurelogs/ @open-telemetry/collector-contrib-approvers @atoulme @cparkins @MikeGoldsmith +pkg/translator/jaeger/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers @frzifus +pkg/translator/loki/ @open-telemetry/collector-contrib-approvers @gouthamve @jpkrohling @mar4uk +pkg/translator/opencensus/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers +pkg/translator/prometheus/ @open-telemetry/collector-contrib-approvers @dashpole @bertysentry +pkg/translator/prometheusremotewrite/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole +pkg/translator/signalfx/ @open-telemetry/collector-contrib-approvers @dmitryax +pkg/translator/skywalking/ @open-telemetry/collector-contrib-approvers @JaredTan95 +pkg/translator/zipkin/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @andrzej-stencel @crobert-1 +pkg/winperfcounters/ @open-telemetry/collector-contrib-approvers @dashpole @Mrod1598 @alxbl @pjanotti -processor/attributesprocessor/ @open-telemetry/collector-contrib-approvers @boostchicken -processor/coralogixprocessor/ @open-telemetry/collector-contrib-approvers @crobert-1 @galrose -processor/cumulativetodeltaprocessor/ @open-telemetry/collector-contrib-approvers @TylerHelmuth -processor/deltatocumulativeprocessor/ @open-telemetry/collector-contrib-approvers @sh0rez @RichieSams @jpkrohling -processor/deltatorateprocessor/ @open-telemetry/collector-contrib-approvers @Aneurysm9 -processor/filterprocessor/ @open-telemetry/collector-contrib-approvers @TylerHelmuth @boostchicken -processor/geoipprocessor/ @open-telemetry/collector-contrib-approvers @andrzej-stencel @michalpristas @rogercoll -processor/groupbyattrsprocessor/ @open-telemetry/collector-contrib-approvers @rnishtala-sumo -processor/groupbytraceprocessor/ @open-telemetry/collector-contrib-approvers @jpkrohling -processor/intervalprocessor/ @open-telemetry/collector-contrib-approvers @RichieSams @sh0rez @djaglowski -processor/k8sattributesprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax @fatsheep9146 @TylerHelmuth -processor/logdedupprocessor/ @open-telemetry/collector-contrib-approvers @MikeGoldsmith @djaglowski -processor/logstransformprocessor/ @open-telemetry/collector-contrib-approvers @djaglowski @dehaansa -processor/metricsgenerationprocessor/ @open-telemetry/collector-contrib-approvers @Aneurysm9 -processor/metricstransformprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax -processor/probabilisticsamplerprocessor/ @open-telemetry/collector-contrib-approvers @jpkrohling @jmacd -processor/redactionprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax @mx-psi @TylerHelmuth -processor/remotetapprocessor/ @open-telemetry/collector-contrib-approvers @atoulme @jaronoff97 -processor/resourcedetectionprocessor/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole -processor/resourcedetectionprocessor/internal/aws/ec2/ @open-telemetry/collector-contrib-approvers -processor/resourcedetectionprocessor/internal/aws/ecs/ @open-telemetry/collector-contrib-approvers -processor/resourcedetectionprocessor/internal/aws/eks/ @open-telemetry/collector-contrib-approvers -processor/resourcedetectionprocessor/internal/aws/elasticbeanstalk/ @open-telemetry/collector-contrib-approvers -processor/resourcedetectionprocessor/internal/aws/lambda/ @open-telemetry/collector-contrib-approvers -processor/resourcedetectionprocessor/internal/azure/ @open-telemetry/collector-contrib-approvers @mx-psi -processor/resourcedetectionprocessor/internal/azure/aks/ @open-telemetry/collector-contrib-approvers -processor/resourcedetectionprocessor/internal/consul/ @open-telemetry/collector-contrib-approvers -processor/resourcedetectionprocessor/internal/docker/ @open-telemetry/collector-contrib-approvers -processor/resourcedetectionprocessor/internal/gcp/ @open-telemetry/collector-contrib-approvers -processor/resourcedetectionprocessor/internal/heroku/ @open-telemetry/collector-contrib-approvers @atoulme -processor/resourcedetectionprocessor/internal/k8snode/ @open-telemetry/collector-contrib-approvers -processor/resourcedetectionprocessor/internal/openshift/ @open-telemetry/collector-contrib-approvers @frzifus -processor/resourcedetectionprocessor/internal/system/ @open-telemetry/collector-contrib-approvers -processor/resourceprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax -processor/routingprocessor/ @open-telemetry/collector-contrib-approvers @jpkrohling -processor/schemaprocessor/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy -processor/spanprocessor/ @open-telemetry/collector-contrib-approvers @boostchicken -processor/sumologicprocessor/ @open-telemetry/collector-contrib-approvers @rnishtala-sumo @chan-tim-sumo -processor/tailsamplingprocessor/ @open-telemetry/collector-contrib-approvers @jpkrohling -processor/transformprocessor/ @open-telemetry/collector-contrib-approvers @TylerHelmuth @kentquirk @bogdandrutu @evan-bradley +processor/attributesprocessor/ @open-telemetry/collector-contrib-approvers @boostchicken +processor/coralogixprocessor/ @open-telemetry/collector-contrib-approvers @crobert-1 @galrose +processor/cumulativetodeltaprocessor/ @open-telemetry/collector-contrib-approvers @TylerHelmuth +processor/deltatocumulativeprocessor/ @open-telemetry/collector-contrib-approvers @sh0rez @RichieSams @jpkrohling +processor/deltatorateprocessor/ @open-telemetry/collector-contrib-approvers @Aneurysm9 +processor/filterprocessor/ @open-telemetry/collector-contrib-approvers @TylerHelmuth @boostchicken +processor/geoipprocessor/ @open-telemetry/collector-contrib-approvers @andrzej-stencel @michalpristas @rogercoll +processor/groupbyattrsprocessor/ @open-telemetry/collector-contrib-approvers @rnishtala-sumo +processor/groupbytraceprocessor/ @open-telemetry/collector-contrib-approvers @jpkrohling +processor/intervalprocessor/ @open-telemetry/collector-contrib-approvers @RichieSams @sh0rez @djaglowski +processor/k8sattributesprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax @fatsheep9146 @TylerHelmuth +processor/logdedupprocessor/ @open-telemetry/collector-contrib-approvers @MikeGoldsmith @djaglowski +processor/logstransformprocessor/ @open-telemetry/collector-contrib-approvers @djaglowski @dehaansa +processor/metricsgenerationprocessor/ @open-telemetry/collector-contrib-approvers @Aneurysm9 +processor/metricstransformprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax +processor/probabilisticsamplerprocessor/ @open-telemetry/collector-contrib-approvers @jpkrohling @jmacd +processor/redactionprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax @mx-psi @TylerHelmuth +processor/remotetapprocessor/ @open-telemetry/collector-contrib-approvers @atoulme @jaronoff97 +processor/resourcedetectionprocessor/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole +processor/resourceprocessor/ @open-telemetry/collector-contrib-approvers @dmitryax +processor/routingprocessor/ @open-telemetry/collector-contrib-approvers @jpkrohling +processor/schemaprocessor/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy +processor/spanprocessor/ @open-telemetry/collector-contrib-approvers @boostchicken +processor/sumologicprocessor/ @open-telemetry/collector-contrib-approvers @rnishtala-sumo @chan-tim-sumo +processor/tailsamplingprocessor/ @open-telemetry/collector-contrib-approvers @jpkrohling +processor/transformprocessor/ @open-telemetry/collector-contrib-approvers @TylerHelmuth @kentquirk @bogdandrutu @evan-bradley -receiver/activedirectorydsreceiver/ @open-telemetry/collector-contrib-approvers @pjanotti -receiver/aerospikereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @antonblock -receiver/apachereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski -receiver/apachesparkreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @Caleb-Hurshman @mrsillydog -receiver/awscloudwatchmetricsreceiver/ @open-telemetry/collector-contrib-approvers @jpkrohling -receiver/awscloudwatchreceiver/ @open-telemetry/collector-contrib-approvers @schmikei -receiver/awscontainerinsightreceiver/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @pxaws -receiver/awsecscontainermetricsreceiver/ @open-telemetry/collector-contrib-approvers @Aneurysm9 -receiver/awsfirehosereceiver/ @open-telemetry/collector-contrib-approvers @Aneurysm9 -receiver/awss3receiver/ @open-telemetry/collector-contrib-approvers @atoulme @adcharre -receiver/awsxrayreceiver/ @open-telemetry/collector-contrib-approvers @wangzlei @srprash -receiver/azureblobreceiver/ @open-telemetry/collector-contrib-approvers @eedorenko @mx-psi -receiver/azureeventhubreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @cparkins -receiver/azuremonitorreceiver/ @open-telemetry/collector-contrib-approvers @nslaughter @codeboten -receiver/bigipreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @StefanKurek -receiver/carbonreceiver/ @open-telemetry/collector-contrib-approvers @aboguszewski-sumo -receiver/chronyreceiver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @jamesmoessis -receiver/cloudflarereceiver/ @open-telemetry/collector-contrib-approvers @dehaansa @djaglowski -receiver/cloudfoundryreceiver/ @open-telemetry/collector-contrib-approvers @crobert-1 -receiver/collectdreceiver/ @open-telemetry/collector-contrib-approvers @atoulme -receiver/couchdbreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski -receiver/datadogreceiver/ @open-telemetry/collector-contrib-approvers @boostchicken @gouthamve @jpkrohling @MovieStoreGuy -receiver/dockerstatsreceiver/ @open-telemetry/collector-contrib-approvers @jamesmoessis -receiver/elasticsearchreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski -receiver/expvarreceiver/ @open-telemetry/collector-contrib-approvers @jamesmoessis @MovieStoreGuy -receiver/filelogreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski -receiver/filestatsreceiver/ @open-telemetry/collector-contrib-approvers @atoulme -receiver/flinkmetricsreceiver/ @open-telemetry/collector-contrib-approvers @JonathanWamsley @djaglowski -receiver/fluentforwardreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax -receiver/githubreceiver/ @open-telemetry/collector-contrib-approvers @adrielp @andrzej-stencel @crobert-1 @TylerHelmuth -receiver/googlecloudmonitoringreceiver/ @open-telemetry/collector-contrib-approvers @dashpole @TylerHelmuth @abhishek-at-cloudwerx -receiver/googlecloudpubsubreceiver/ @open-telemetry/collector-contrib-approvers @alexvanboxel -receiver/googlecloudspannerreceiver/ @open-telemetry/collector-contrib-approvers @dashpole @dsimil @KiranmayiB @harishbohara11 -receiver/haproxyreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @MovieStoreGuy -receiver/hostmetricsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @braydonk -receiver/httpcheckreceiver/ @open-telemetry/collector-contrib-approvers @codeboten -receiver/iisreceiver/ @open-telemetry/collector-contrib-approvers @Mrod1598 @pjanotti -receiver/influxdbreceiver/ @open-telemetry/collector-contrib-approvers @jacobmarble -receiver/jaegerreceiver/ @open-telemetry/collector-contrib-approvers @yurishkuro -receiver/journaldreceiver/ @open-telemetry/collector-contrib-approvers @sumo-drosiek @djaglowski -receiver/k8sclusterreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @TylerHelmuth @povilasv -receiver/k8seventsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @TylerHelmuth -receiver/k8sobjectsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @hvaghani221 @TylerHelmuth -receiver/kafkametricsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax -receiver/kafkareceiver/ @open-telemetry/collector-contrib-approvers @pavolloffay @MovieStoreGuy -receiver/kubeletstatsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @TylerHelmuth @ChrsMark -receiver/lokireceiver/ @open-telemetry/collector-contrib-approvers @mar4uk @jpkrohling -receiver/memcachedreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski -receiver/mongodbatlasreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @schmikei -receiver/mongodbreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @schmikei -receiver/mysqlreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski -receiver/namedpipereceiver/ @open-telemetry/collector-contrib-approvers @sinkingpoint @djaglowski -receiver/nginxreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski -receiver/nsxtreceiver/ @open-telemetry/collector-contrib-approvers @dashpole @schmikei -receiver/ntpreceiver/ @open-telemetry/collector-contrib-approvers @atoulme -receiver/opencensusreceiver/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers -receiver/oracledbreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @crobert-1 @atoulme -receiver/osqueryreceiver/ @open-telemetry/collector-contrib-approvers @codeboten @nslaughter @smithclay -receiver/otelarrowreceiver/ @open-telemetry/collector-contrib-approvers @jmacd @moh-osman3 -receiver/otlpjsonfilereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @atoulme -receiver/podmanreceiver/ @open-telemetry/collector-contrib-approvers @rogercoll -receiver/postgresqlreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski -receiver/prometheusreceiver/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole -receiver/prometheusremotewritereceiver/ @open-telemetry/collector-contrib-approvers @dashpole @ArthurSens -receiver/pulsarreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @dao-jun -receiver/purefareceiver/ @open-telemetry/collector-contrib-approvers @jpkrohling @dgoscn @chrroberts-pure -receiver/purefbreceiver/ @open-telemetry/collector-contrib-approvers @jpkrohling @dgoscn @chrroberts-pure -receiver/rabbitmqreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @cpheps -receiver/receivercreator/ @open-telemetry/collector-contrib-approvers @dmitryax -receiver/redisreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @hughesjj -receiver/riakreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @armstrmi -receiver/saphanareceiver/ @open-telemetry/collector-contrib-approvers @dehaansa -receiver/sapmreceiver/ @open-telemetry/collector-contrib-approvers @atoulme -receiver/signalfxreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax -receiver/simpleprometheusreceiver/ @open-telemetry/collector-contrib-approvers @fatsheep9146 -receiver/skywalkingreceiver/ @open-telemetry/collector-contrib-approvers @JaredTan95 -receiver/snmpreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @StefanKurek @tamir-michaeli -receiver/snowflakereceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @shalper2 -receiver/solacereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @mcardy -receiver/splunkenterprisereceiver/ @open-telemetry/collector-contrib-approvers @shalper2 @MovieStoreGuy @greatestusername -receiver/splunkhecreceiver/ @open-telemetry/collector-contrib-approvers @atoulme -receiver/sqlqueryreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @crobert-1 -receiver/sqlserverreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @StefanKurek -receiver/sshcheckreceiver/ @open-telemetry/collector-contrib-approvers @nslaughter @codeboten -receiver/statsdreceiver/ @open-telemetry/collector-contrib-approvers @jmacd @dmitryax -receiver/syslogreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @andrzej-stencel -receiver/systemdreceiver/ @open-telemetry/collector-contrib-approvers @Hemansh31 @atoulme -receiver/tcplogreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski -receiver/tlscheckreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @michael-burt -receiver/udplogreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski -receiver/vcenterreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @schmikei @StefanKurek -receiver/wavefrontreceiver/ @open-telemetry/collector-contrib-approvers @samiura -receiver/webhookeventreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @shalper2 -receiver/windowseventlogreceiver/ @open-telemetry/collector-contrib-approvers @armstrmi @pjanotti -receiver/windowsperfcountersreceiver/ @open-telemetry/collector-contrib-approvers @dashpole @alxbl @pjanotti -receiver/zipkinreceiver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @andrzej-stencel @crobert-1 -receiver/zookeeperreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/activedirectorydsreceiver/ @open-telemetry/collector-contrib-approvers @pjanotti +receiver/aerospikereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @antonblock +receiver/apachereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/apachesparkreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @Caleb-Hurshman @mrsillydog +receiver/awscloudwatchmetricsreceiver/ @open-telemetry/collector-contrib-approvers @jpkrohling +receiver/awscloudwatchreceiver/ @open-telemetry/collector-contrib-approvers @schmikei +receiver/awscontainerinsightreceiver/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @pxaws +receiver/awsecscontainermetricsreceiver/ @open-telemetry/collector-contrib-approvers @Aneurysm9 +receiver/awsfirehosereceiver/ @open-telemetry/collector-contrib-approvers @Aneurysm9 +receiver/awss3receiver/ @open-telemetry/collector-contrib-approvers @atoulme @adcharre +receiver/awsxrayreceiver/ @open-telemetry/collector-contrib-approvers @wangzlei @srprash +receiver/azureblobreceiver/ @open-telemetry/collector-contrib-approvers @eedorenko @mx-psi +receiver/azureeventhubreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @cparkins +receiver/azuremonitorreceiver/ @open-telemetry/collector-contrib-approvers @nslaughter @codeboten +receiver/bigipreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @StefanKurek +receiver/carbonreceiver/ @open-telemetry/collector-contrib-approvers @aboguszewski-sumo +receiver/chronyreceiver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @jamesmoessis +receiver/cloudflarereceiver/ @open-telemetry/collector-contrib-approvers @dehaansa @djaglowski +receiver/cloudfoundryreceiver/ @open-telemetry/collector-contrib-approvers @crobert-1 +receiver/collectdreceiver/ @open-telemetry/collector-contrib-approvers @atoulme +receiver/couchdbreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/datadogreceiver/ @open-telemetry/collector-contrib-approvers @boostchicken @gouthamve @jpkrohling @MovieStoreGuy +receiver/dockerstatsreceiver/ @open-telemetry/collector-contrib-approvers @jamesmoessis +receiver/elasticsearchreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/expvarreceiver/ @open-telemetry/collector-contrib-approvers @jamesmoessis @MovieStoreGuy +receiver/filelogreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/filestatsreceiver/ @open-telemetry/collector-contrib-approvers @atoulme +receiver/flinkmetricsreceiver/ @open-telemetry/collector-contrib-approvers @JonathanWamsley @djaglowski +receiver/fluentforwardreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax +receiver/githubreceiver/ @open-telemetry/collector-contrib-approvers @adrielp @andrzej-stencel @crobert-1 @TylerHelmuth +receiver/googlecloudmonitoringreceiver/ @open-telemetry/collector-contrib-approvers @dashpole @TylerHelmuth @abhishek-at-cloudwerx +receiver/googlecloudpubsubreceiver/ @open-telemetry/collector-contrib-approvers @alexvanboxel +receiver/googlecloudspannerreceiver/ @open-telemetry/collector-contrib-approvers @dashpole @dsimil @KiranmayiB @harishbohara11 +receiver/haproxyreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @MovieStoreGuy +receiver/hostmetricsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @braydonk +receiver/httpcheckreceiver/ @open-telemetry/collector-contrib-approvers @codeboten +receiver/iisreceiver/ @open-telemetry/collector-contrib-approvers @Mrod1598 @pjanotti +receiver/influxdbreceiver/ @open-telemetry/collector-contrib-approvers @jacobmarble +receiver/jaegerreceiver/ @open-telemetry/collector-contrib-approvers @yurishkuro +receiver/journaldreceiver/ @open-telemetry/collector-contrib-approvers @sumo-drosiek @djaglowski +receiver/k8sclusterreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @TylerHelmuth @povilasv +receiver/k8seventsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @TylerHelmuth +receiver/k8sobjectsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @hvaghani221 @TylerHelmuth +receiver/kafkametricsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax +receiver/kafkareceiver/ @open-telemetry/collector-contrib-approvers @pavolloffay @MovieStoreGuy +receiver/kubeletstatsreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @TylerHelmuth @ChrsMark +receiver/lokireceiver/ @open-telemetry/collector-contrib-approvers @mar4uk @jpkrohling +receiver/memcachedreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/mongodbatlasreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @schmikei +receiver/mongodbreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @schmikei +receiver/mysqlreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/namedpipereceiver/ @open-telemetry/collector-contrib-approvers @sinkingpoint @djaglowski +receiver/nginxreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/nsxtreceiver/ @open-telemetry/collector-contrib-approvers @dashpole @schmikei +receiver/ntpreceiver/ @open-telemetry/collector-contrib-approvers @atoulme +receiver/opencensusreceiver/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers +receiver/oracledbreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @crobert-1 @atoulme +receiver/osqueryreceiver/ @open-telemetry/collector-contrib-approvers @codeboten @nslaughter @smithclay +receiver/otelarrowreceiver/ @open-telemetry/collector-contrib-approvers @jmacd @moh-osman3 +receiver/otlpjsonfilereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @atoulme +receiver/podmanreceiver/ @open-telemetry/collector-contrib-approvers @rogercoll +receiver/postgresqlreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/prometheusreceiver/ @open-telemetry/collector-contrib-approvers @Aneurysm9 @dashpole +receiver/prometheusremotewritereceiver/ @open-telemetry/collector-contrib-approvers @dashpole @ArthurSens +receiver/pulsarreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @dao-jun +receiver/purefareceiver/ @open-telemetry/collector-contrib-approvers @jpkrohling @dgoscn @chrroberts-pure +receiver/purefbreceiver/ @open-telemetry/collector-contrib-approvers @jpkrohling @dgoscn @chrroberts-pure +receiver/rabbitmqreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @cpheps +receiver/receivercreator/ @open-telemetry/collector-contrib-approvers @dmitryax +receiver/redisreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @hughesjj +receiver/riakreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @armstrmi +receiver/saphanareceiver/ @open-telemetry/collector-contrib-approvers @dehaansa +receiver/sapmreceiver/ @open-telemetry/collector-contrib-approvers @atoulme +receiver/signalfxreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax +receiver/simpleprometheusreceiver/ @open-telemetry/collector-contrib-approvers @fatsheep9146 +receiver/skywalkingreceiver/ @open-telemetry/collector-contrib-approvers @JaredTan95 +receiver/snmpreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @StefanKurek @tamir-michaeli +receiver/snowflakereceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @shalper2 +receiver/solacereceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @mcardy +receiver/splunkenterprisereceiver/ @open-telemetry/collector-contrib-approvers @shalper2 @MovieStoreGuy @greatestusername +receiver/splunkhecreceiver/ @open-telemetry/collector-contrib-approvers @atoulme +receiver/sqlqueryreceiver/ @open-telemetry/collector-contrib-approvers @dmitryax @crobert-1 +receiver/sqlserverreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @StefanKurek +receiver/sshcheckreceiver/ @open-telemetry/collector-contrib-approvers @nslaughter @codeboten +receiver/statsdreceiver/ @open-telemetry/collector-contrib-approvers @jmacd @dmitryax +receiver/syslogreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @andrzej-stencel +receiver/systemdreceiver/ @open-telemetry/collector-contrib-approvers @Hemansh31 @atoulme +receiver/tcplogreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/tlscheckreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @michael-burt +receiver/udplogreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski +receiver/vcenterreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski @schmikei @StefanKurek +receiver/wavefrontreceiver/ @open-telemetry/collector-contrib-approvers @samiura +receiver/webhookeventreceiver/ @open-telemetry/collector-contrib-approvers @atoulme @shalper2 +receiver/windowseventlogreceiver/ @open-telemetry/collector-contrib-approvers @armstrmi @pjanotti +receiver/windowsperfcountersreceiver/ @open-telemetry/collector-contrib-approvers @dashpole @alxbl @pjanotti +receiver/zipkinreceiver/ @open-telemetry/collector-contrib-approvers @MovieStoreGuy @andrzej-stencel @crobert-1 +receiver/zookeeperreceiver/ @open-telemetry/collector-contrib-approvers @djaglowski -testbed/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers -testbed/mockdatasenders/mockdatadogagentexporter/ @open-telemetry/collector-contrib-approvers @boostchicken +testbed/ @open-telemetry/collector-contrib-approvers @open-telemetry/collector-approvers +testbed/mockdatasenders/mockdatadogagentexporter/ @open-telemetry/collector-contrib-approvers @boostchicken ##################################################### # @@ -322,6 +308,6 @@ reports/distributions/k8s.yaml @open-telemetry/collector-contrib-approvers ## UNMAINTAINED components -exporter/opensearchexporter/ @open-telemetry/collector-contrib-approvers -extension/observer/ecstaskobserver/ @open-telemetry/collector-contrib-approvers -receiver/jmxreceiver/ @open-telemetry/collector-contrib-approvers +exporter/opensearchexporter/ @open-telemetry/collector-contrib-approvers +extension/observer/ecstaskobserver/ @open-telemetry/collector-contrib-approvers +receiver/jmxreceiver/ @open-telemetry/collector-contrib-approvers From f20eaeef57db1ecf5b6b78238577b77661890c33 Mon Sep 17 00:00:00 2001 From: Pablo Baeyens Date: Fri, 8 Nov 2024 10:00:28 +0100 Subject: [PATCH 08/24] [chore] Promote @VihasMakwana to triager (#36257) #### Description Promote @VihasMakwana to triager. - PRs reviewed: https://github.com/open-telemetry/opentelemetry-collector-contrib/pulls?q=is%3Apr+reviewed-by%3AVihasMakwana+ - PRs authored: https://github.com/open-telemetry/opentelemetry-collector-contrib/pulls?q=is%3Apr+author%3AVihasMakwana+ - Issues created: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+author%3AVihasMakwana+ - Issues commented: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+commenter%3AVihasMakwana+ - Commits: https://github.com/open-telemetry/opentelemetry-collector-contrib/commits?author=VihasMakwana&since=2023-05-31&until=now Fixes #36025 cc @open-telemetry/collector-triagers --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index ab7b48fc23b1..1059c7d5c7cc 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,7 @@ Triagers ([@open-telemetry/collector-contrib-triagers](https://github.com/orgs/o - [Jared Tan](https://github.com/JaredTan95), DaoCloud - [Murphy Chen](https://github.com/Frapschen), DaoCloud - [Paulo Janotti](https://github.com/pjanotti), Splunk +- [Vihas Makwana](https://github.com/VihasMakwana), Elastic - Actively seeking contributors to triage issues Emeritus Triagers: From c40f00839fec4f15e7f96ff09e1a1ab99910cd94 Mon Sep 17 00:00:00 2001 From: Pablo Baeyens Date: Fri, 8 Nov 2024 10:00:39 +0100 Subject: [PATCH 09/24] [chore] Promote @bacherfl as triager (#36256) #### Description Promote @bacherfl to triager. - PRs reviewed: https://github.com/open-telemetry/opentelemetry-collector-contrib/pulls?q=is%3Apr+reviewed-by%3Abacherfl+ - PRs authored: https://github.com/open-telemetry/opentelemetry-collector-contrib/pulls?q=is%3Apr+author%3Abacherfl+ - Issues created: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+author%3Abacherfl+ - Issues commented: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues?q=is%3Aissue+commenter%3Abacherfl+ - Commits: https://github.com/open-telemetry/opentelemetry-collector-contrib/commits?author=bacherfl&since=2023-05-31&until=now cc @open-telemetry/collector-triagers --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 1059c7d5c7cc..ae1f4e7cf0d0 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ See [CONTRIBUTING.md](CONTRIBUTING.md). Triagers ([@open-telemetry/collector-contrib-triagers](https://github.com/orgs/open-telemetry/teams/collector-contrib-triagers)) - [Benedikt Bongartz](https://github.com/frzifus), Red Hat +- [Florian Bacher](https://github.com/bacherfl), Dynatrace - [Jared Tan](https://github.com/JaredTan95), DaoCloud - [Murphy Chen](https://github.com/Frapschen), DaoCloud - [Paulo Janotti](https://github.com/pjanotti), Splunk From 1c2f3222bde9396f8d6decf8b4e7a437cd816d89 Mon Sep 17 00:00:00 2001 From: Roger Coll Date: Fri, 8 Nov 2024 11:32:33 +0100 Subject: [PATCH 10/24] fix root group container permissions (#36170) #### Description Sets a specific GID for the build container's image. #### Link to tracking issue https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/35179 #### Testing (Manual) ``` $ make docker-otelcontribcol // create a sample config.yaml file $ docker run -v .:/etc/otel/ otelcontribcol $ ps -o user,group,pid,comm -ax | rg otelcontribcol 10001 10001 1903287 otelcontribcol ``` Without the changes: ``` $ ps -o user,group,pid,comm -ax | rg otelcontribcol root root 1940536 otelcontribcol ``` #### Documentation --- .../fix_group_container_permissions.yaml | 27 +++++++++++++++++++ cmd/otelcontribcol/Dockerfile | 3 ++- cmd/telemetrygen/Dockerfile | 3 ++- 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 .chloggen/fix_group_container_permissions.yaml diff --git a/.chloggen/fix_group_container_permissions.yaml b/.chloggen/fix_group_container_permissions.yaml new file mode 100644 index 000000000000..965b8af17649 --- /dev/null +++ b/.chloggen/fix_group_container_permissions.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'enhancement' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: container + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Set non root group permissions for container image + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35179] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/cmd/otelcontribcol/Dockerfile b/cmd/otelcontribcol/Dockerfile index bb8ef9835707..3c2cd1d372a8 100644 --- a/cmd/otelcontribcol/Dockerfile +++ b/cmd/otelcontribcol/Dockerfile @@ -4,7 +4,8 @@ RUN apk --update add ca-certificates FROM scratch ARG USER_UID=10001 -USER ${USER_UID} +ARG USER_GID=10001 +USER ${USER_UID}:${USER_GID} COPY --from=prep /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt COPY otelcontribcol / diff --git a/cmd/telemetrygen/Dockerfile b/cmd/telemetrygen/Dockerfile index ba1d577e41b4..9704d8f5da63 100644 --- a/cmd/telemetrygen/Dockerfile +++ b/cmd/telemetrygen/Dockerfile @@ -4,7 +4,8 @@ RUN apk --update add ca-certificates FROM scratch ARG USER_UID=10001 -USER ${USER_UID} +ARG USER_GID=10001 +USER ${USER_UID}:${USER_GID} ARG TARGETOS ARG TARGETARCH From 65cd1f9fd9d57324829d63227da9b0340a19954c Mon Sep 17 00:00:00 2001 From: Paulo Janotti Date: Fri, 8 Nov 2024 04:27:22 -0800 Subject: [PATCH 11/24] [chore] Skip dbstorage extension tests on Windows GH runners (#36267) --- extension/storage/dbstorage/extension_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/extension/storage/dbstorage/extension_test.go b/extension/storage/dbstorage/extension_test.go index 5b38f466e65f..b888c88321b3 100644 --- a/extension/storage/dbstorage/extension_test.go +++ b/extension/storage/dbstorage/extension_test.go @@ -6,6 +6,8 @@ package dbstorage import ( "context" "fmt" + "os" + "runtime" "sync" "testing" @@ -22,10 +24,18 @@ import ( ) func TestExtensionIntegrityWithSqlite(t *testing.T) { + if runtime.GOOS == "windows" && os.Getenv("GITHUB_ACTIONS") == "true" { + t.Skip("Skipping test on Windows GH runners: test requires Docker to be running Linux containers") + } + testExtensionIntegrity(t, newSqliteTestExtension(t)) } func TestExtensionIntegrityWithPostgres(t *testing.T) { + if runtime.GOOS == "windows" && os.Getenv("GITHUB_ACTIONS") == "true" { + t.Skip("Skipping test on Windows GH runners: test requires Docker to be running Linux containers") + } + testExtensionIntegrity(t, newPostgresTestExtension(t)) } From 8ec618036ab6f2e4f2f871f76a76061d9252e77b Mon Sep 17 00:00:00 2001 From: Carson Ip Date: Fri, 8 Nov 2024 14:00:27 +0000 Subject: [PATCH 12/24] [exporter/elasticsearch] Translate `k8s.*.name` resource attributes in ECS mode (#36233) #### Description In ECS mode: Translate `k8s.job.name`, `k8s.cronjob.name`, `k8s.statefulset.name`, `k8s.replicaset.name`, `k8s.daemonset.name`, `k8s.container.name` to `kubernetes.*.name`. Translate `k8s.cluster.name` to `orchestrator.cluster.name`. #### Link to tracking issue #### Testing #### Documentation --- ...earchexporter_ecs-translate-k8s-names.yaml | 27 +++++++++ exporter/elasticsearchexporter/README.md | 57 +++++++++++-------- exporter/elasticsearchexporter/model.go | 7 +++ exporter/elasticsearchexporter/model_test.go | 16 +++++- 4 files changed, 81 insertions(+), 26 deletions(-) create mode 100644 .chloggen/elasticsearchexporter_ecs-translate-k8s-names.yaml diff --git a/.chloggen/elasticsearchexporter_ecs-translate-k8s-names.yaml b/.chloggen/elasticsearchexporter_ecs-translate-k8s-names.yaml new file mode 100644 index 000000000000..155112f41da7 --- /dev/null +++ b/.chloggen/elasticsearchexporter_ecs-translate-k8s-names.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: elasticsearchexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Translate `k8s.*.name` resource attributes in ECS mode + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36233] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: Translate `k8s.job.name`, `k8s.cronjob.name`, `k8s.statefulset.name`, `k8s.replicaset.name`, `k8s.daemonset.name`, `k8s.container.name` to `kubernetes.*.name`. Translate `k8s.cluster.name` to `orchestrator.cluster.name`. + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] diff --git a/exporter/elasticsearchexporter/README.md b/exporter/elasticsearchexporter/README.md index 0ac2af2ba445..53a093109557 100644 --- a/exporter/elasticsearchexporter/README.md +++ b/exporter/elasticsearchexporter/README.md @@ -268,31 +268,38 @@ If the target ECS field name is specified as an empty string (""), the converter When "Preserved" is true, the attribute will be preserved in the payload and duplicated as mapped to its ECS equivalent. -| Semantic Convention Name | ECS Name | Preserve | -|--------------------------|----------|----------| -| cloud.platform | cloud.service.name | false | -| container.image.tags | container.image.tag | false | -| deployment.environment | service.environment | false | -| host.arch | host.architecture | false | -| host.name | host.hostname | true | -| k8s.deployment.name | kubernetes.deployment.name | false | -| k8s.namespace.name | kubernetes.namespace | false | -| k8s.node.name | kubernetes.node.name | false | -| k8s.pod.name | kubernetes.pod.name | false | -| k8s.pod.uid | kubernetes.pod.uid | false | -| os.description | host.os.full | false | -| os.name | host.os.name | false | -| os.type | host.os.platform | false | -| os.version | host.os.version | false | -| process.executable.path | process.executable | false | -| process.runtime.name | service.runtime.name | false | -| process.runtime.version | service.runtime.version | false | -| service.instance.id | service.node.name | false | -| telemetry.distro.name | "" | false | -| telemetry.distro.version | "" | false | -| telemetry.sdk.language | "" | false | -| telemetry.sdk.name | "" | false | -| telemetry.sdk.version | "" | false | +| Semantic Convention Name | ECS Name | Preserve | +|--------------------------|-----------------------------|----------| +| cloud.platform | cloud.service.name | false | +| container.image.tags | container.image.tag | false | +| deployment.environment | service.environment | false | +| host.arch | host.architecture | false | +| host.name | host.hostname | true | +| k8s.cluster.name | orchestrator.cluster.name | false | +| k8s.container.name | kubernetes.container.name | false | +| k8s.cronjob.name | kubernetes.cronjob.name | false | +| k8s.daemonset.name | kubernetes.daemonset.name | false | +| k8s.deployment.name | kubernetes.deployment.name | false | +| k8s.job.name | kubernetes.job.name | false | +| k8s.namespace.name | kubernetes.namespace | false | +| k8s.node.name | kubernetes.node.name | false | +| k8s.pod.name | kubernetes.pod.name | false | +| k8s.pod.uid | kubernetes.pod.uid | false | +| k8s.replicaset.name | kubernetes.replicaset.name | false | +| k8s.statefulset.name | kubernetes.statefulset.name | false | +| os.description | host.os.full | false | +| os.name | host.os.name | false | +| os.type | host.os.platform | false | +| os.version | host.os.version | false | +| process.executable.path | process.executable | false | +| process.runtime.name | service.runtime.name | false | +| process.runtime.version | service.runtime.version | false | +| service.instance.id | service.node.name | false | +| telemetry.distro.name | "" | false | +| telemetry.distro.version | "" | false | +| telemetry.sdk.language | "" | false | +| telemetry.sdk.name | "" | false | +| telemetry.sdk.version | "" | false | ### Compound Mapping diff --git a/exporter/elasticsearchexporter/model.go b/exporter/elasticsearchexporter/model.go index 8c71df950752..299cb3902347 100644 --- a/exporter/elasticsearchexporter/model.go +++ b/exporter/elasticsearchexporter/model.go @@ -57,6 +57,13 @@ var resourceAttrsConversionMap = map[string]string{ semconv.AttributeK8SNodeName: "kubernetes.node.name", semconv.AttributeK8SPodName: "kubernetes.pod.name", semconv.AttributeK8SPodUID: "kubernetes.pod.uid", + semconv.AttributeK8SJobName: "kubernetes.job.name", + semconv.AttributeK8SCronJobName: "kubernetes.cronjob.name", + semconv.AttributeK8SStatefulSetName: "kubernetes.statefulset.name", + semconv.AttributeK8SReplicaSetName: "kubernetes.replicaset.name", + semconv.AttributeK8SDaemonSetName: "kubernetes.daemonset.name", + semconv.AttributeK8SContainerName: "kubernetes.container.name", + semconv.AttributeK8SClusterName: "orchestrator.cluster.name", } // resourceAttrsToPreserve contains conventions that should be preserved in ECS mode. diff --git a/exporter/elasticsearchexporter/model_test.go b/exporter/elasticsearchexporter/model_test.go index 136039cf28ea..d6b1aec8387a 100644 --- a/exporter/elasticsearchexporter/model_test.go +++ b/exporter/elasticsearchexporter/model_test.go @@ -381,6 +381,13 @@ func TestEncodeLogECSMode(t *testing.T) { "k8s.pod.name": "opentelemetry-pod-autoconf", "k8s.pod.uid": "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff", "k8s.deployment.name": "coredns", + semconv.AttributeK8SJobName: "job.name", + semconv.AttributeK8SCronJobName: "cronjob.name", + semconv.AttributeK8SStatefulSetName: "statefulset.name", + semconv.AttributeK8SReplicaSetName: "replicaset.name", + semconv.AttributeK8SDaemonSetName: "daemonset.name", + semconv.AttributeK8SContainerName: "container.name", + semconv.AttributeK8SClusterName: "cluster.name", }) require.NoError(t, err) @@ -444,7 +451,14 @@ func TestEncodeLogECSMode(t *testing.T) { "kubernetes.node.name": "node-1", "kubernetes.pod.name": "opentelemetry-pod-autoconf", "kubernetes.pod.uid": "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff", - "kubernetes.deployment.name": "coredns" + "kubernetes.deployment.name": "coredns", + "kubernetes.job.name": "job.name", + "kubernetes.cronjob.name": "cronjob.name", + "kubernetes.statefulset.name": "statefulset.name", + "kubernetes.replicaset.name": "replicaset.name", + "kubernetes.daemonset.name": "daemonset.name", + "kubernetes.container.name": "container.name", + "orchestrator.cluster.name": "cluster.name" }`, buf.String()) } From f0c17362fedf1e4abd0877607ab4763d27e0abc8 Mon Sep 17 00:00:00 2001 From: Carson Ip Date: Fri, 8 Nov 2024 14:02:50 +0000 Subject: [PATCH 13/24] [chore][exporter/elasticsearch] Fix flaky recorder assertions (#36255) #### Description Incorrect rec.WaitItems call causes test to be flaky. Refactor to avoid future occurrences. #### Link to tracking issue Fixes #35924 #### Testing #### Documentation --- .../elasticsearchexporter/exporter_test.go | 40 +++++-------------- exporter/elasticsearchexporter/utils_test.go | 10 ++++- 2 files changed, 19 insertions(+), 31 deletions(-) diff --git a/exporter/elasticsearchexporter/exporter_test.go b/exporter/elasticsearchexporter/exporter_test.go index 74933eaf40c3..ab158dfc414b 100644 --- a/exporter/elasticsearchexporter/exporter_test.go +++ b/exporter/elasticsearchexporter/exporter_test.go @@ -505,7 +505,6 @@ func TestExporterLogs(t *testing.T) { ) tc.body.CopyTo(logs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Body()) mustSendLogs(t, exporter, logs) - rec.WaitItems(1) expected := []itemRequest{ { @@ -514,7 +513,7 @@ func TestExporterLogs(t *testing.T) { }, } - assertItemsEqual(t, expected, rec.Items(), false) + assertRecordedItems(t, expected, rec, false) } }) @@ -889,8 +888,6 @@ func TestExporterMetrics(t *testing.T) { mustSendMetrics(t, exporter, metrics) - rec.WaitItems(8) - expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic-bar"}}`), @@ -926,7 +923,7 @@ func TestExporterMetrics(t *testing.T) { }, } - assertItemsEqual(t, expected, rec.Items(), false) + assertRecordedItems(t, expected, rec, false) }) t.Run("publish histogram", func(t *testing.T) { @@ -959,8 +956,6 @@ func TestExporterMetrics(t *testing.T) { mustSendMetrics(t, exporter, metrics) - rec.WaitItems(2) - expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`), @@ -972,7 +967,7 @@ func TestExporterMetrics(t *testing.T) { }, } - assertItemsEqual(t, expected, rec.Items(), false) + assertRecordedItems(t, expected, rec, false) }) t.Run("publish exponential histogram", func(t *testing.T) { @@ -1005,8 +1000,6 @@ func TestExporterMetrics(t *testing.T) { mustSendMetrics(t, exporter, metrics) - rec.WaitItems(1) - expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`), @@ -1014,7 +1007,7 @@ func TestExporterMetrics(t *testing.T) { }, } - assertItemsEqual(t, expected, rec.Items(), false) + assertRecordedItems(t, expected, rec, false) }) t.Run("publish histogram cumulative temporality", func(t *testing.T) { @@ -1113,8 +1106,6 @@ func TestExporterMetrics(t *testing.T) { err := exporter.ConsumeMetrics(context.Background(), metrics) assert.NoError(t, err) - rec.WaitItems(2) - expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`), @@ -1126,7 +1117,7 @@ func TestExporterMetrics(t *testing.T) { }, } - assertItemsEqual(t, expected, rec.Items(), false) + assertRecordedItems(t, expected, rec, false) }) t.Run("otel mode", func(t *testing.T) { @@ -1176,8 +1167,6 @@ func TestExporterMetrics(t *testing.T) { mustSendMetrics(t, exporter, metrics) - rec.WaitItems(2) - expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.metric.foo":"histogram"}}}`), @@ -1197,7 +1186,7 @@ func TestExporterMetrics(t *testing.T) { }, } - assertItemsEqual(t, expected, rec.Items(), false) + assertRecordedItems(t, expected, rec, false) }) t.Run("otel mode attribute array value", func(t *testing.T) { @@ -1259,7 +1248,6 @@ func TestExporterMetrics(t *testing.T) { mustSendMetrics(t, exporter, metrics) - rec.WaitItems(1) expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.sum":"gauge_long","metrics.summary":"summary"}}}`), @@ -1267,7 +1255,7 @@ func TestExporterMetrics(t *testing.T) { }, } - assertItemsEqual(t, expected, rec.Items(), false) + assertRecordedItems(t, expected, rec, false) }) t.Run("otel mode aggregate_metric_double hint", func(t *testing.T) { @@ -1310,7 +1298,6 @@ func TestExporterMetrics(t *testing.T) { mustSendMetrics(t, exporter, metrics) - rec.WaitItems(1) expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.histogram.summary":"summary"}}}`), @@ -1322,7 +1309,7 @@ func TestExporterMetrics(t *testing.T) { }, } - assertItemsEqual(t, expected, rec.Items(), false) + assertRecordedItems(t, expected, rec, false) }) t.Run("otel mode metric name conflict", func(t *testing.T) { @@ -1354,7 +1341,6 @@ func TestExporterMetrics(t *testing.T) { mustSendMetrics(t, exporter, metrics) - rec.WaitItems(1) expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.foo.bar":"gauge_long","metrics.foo":"gauge_long","metrics.foo.bar.baz":"gauge_long"}}}`), @@ -1362,7 +1348,7 @@ func TestExporterMetrics(t *testing.T) { }, } - assertItemsEqual(t, expected, rec.Items(), false) + assertRecordedItems(t, expected, rec, false) }) t.Run("otel mode attribute key prefix conflict", func(t *testing.T) { @@ -1422,8 +1408,6 @@ func TestExporterMetrics(t *testing.T) { mustSendMetrics(t, exporter, metrics) - rec.WaitItems(2) - expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"metrics-generic-default"}}`), @@ -1435,7 +1419,7 @@ func TestExporterMetrics(t *testing.T) { }, } - assertItemsEqual(t, expected, rec.Items(), false) + assertRecordedItems(t, expected, rec, false) }) } @@ -1644,8 +1628,6 @@ func TestExporterTraces(t *testing.T) { mustSendTraces(t, exporter, traces) - rec.WaitItems(2) - expected := []itemRequest{ { Action: []byte(`{"create":{"_index":"traces-generic.otel-default"}}`), @@ -1657,7 +1639,7 @@ func TestExporterTraces(t *testing.T) { }, } - assertItemsEqual(t, expected, rec.Items(), false) + assertRecordedItems(t, expected, rec, false) }) t.Run("otel mode attribute array value", func(t *testing.T) { diff --git a/exporter/elasticsearchexporter/utils_test.go b/exporter/elasticsearchexporter/utils_test.go index 94c475219ffb..fc320b36f073 100644 --- a/exporter/elasticsearchexporter/utils_test.go +++ b/exporter/elasticsearchexporter/utils_test.go @@ -18,6 +18,7 @@ import ( "github.com/klauspost/compress/gzip" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" @@ -37,7 +38,12 @@ func itemRequestsSortFunc(a, b itemRequest) int { return comp } -func assertItemsEqual(t *testing.T, expected, actual []itemRequest, assertOrder bool) { // nolint:unparam +func assertRecordedItems(t *testing.T, expected []itemRequest, recorder *bulkRecorder, assertOrder bool) { // nolint:unparam + recorder.WaitItems(len(expected)) + assertItemRequests(t, expected, recorder.Items(), assertOrder) +} + +func assertItemRequests(t *testing.T, expected, actual []itemRequest, assertOrder bool) { // nolint:unparam expectedItems := expected actualItems := actual if !assertOrder { @@ -50,7 +56,7 @@ func assertItemsEqual(t *testing.T, expected, actual []itemRequest, assertOrder slices.SortFunc(actualItems, itemRequestsSortFunc) } - assert.Equal(t, len(expectedItems), len(actualItems), "want %d items, got %d", len(expectedItems), len(actualItems)) + require.Equal(t, len(expectedItems), len(actualItems), "want %d items, got %d", len(expectedItems), len(actualItems)) for i, want := range expectedItems { got := actualItems[i] assert.JSONEq(t, string(want.Action), string(got.Action), "item %d action", i) From d1dcee9cc88ce17dcbd1b97ef4ee08fc633b2641 Mon Sep 17 00:00:00 2001 From: Andrzej Stencel Date: Fri, 8 Nov 2024 15:44:22 +0100 Subject: [PATCH 14/24] [chore][pkg/stanza] refactor: introduce emit.Token struct (#36260) This makes the code clearer by encapsulating the token's body and attributes in a single structure. It should make future change clearer when the emit callback will be changed to accept a collection of tokens as opposed to a single token. The Sink type could use some refactoring as well, but I'm not doing it here to keep the changes to the minimum for clarity and ease of code review. --- .chloggen/refactor-callback-token.yaml | 27 +++++++++++++++++++ pkg/stanza/fileconsumer/benchmark_test.go | 5 ++-- pkg/stanza/fileconsumer/emit/emit.go | 14 +++++++++- .../fileconsumer/internal/emittest/nop.go | 4 ++- .../internal/emittest/nop_test.go | 4 ++- .../fileconsumer/internal/emittest/sink.go | 8 +++--- .../internal/emittest/sink_test.go | 4 ++- .../fileconsumer/internal/reader/reader.go | 2 +- pkg/stanza/operator/input/file/input.go | 9 ++++--- receiver/otlpjsonfilereceiver/file.go | 17 ++++++------ 10 files changed, 71 insertions(+), 23 deletions(-) create mode 100644 .chloggen/refactor-callback-token.yaml diff --git a/.chloggen/refactor-callback-token.yaml b/.chloggen/refactor-callback-token.yaml new file mode 100644 index 000000000000..a7d36715a111 --- /dev/null +++ b/.chloggen/refactor-callback-token.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: pkg/stanza + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Changed signature of `emit.Callback` function in `pkg/stanza/fileconsumer/emit` package by introducing `emit.Token` struct that encapsulates the token's body and attributes. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36260] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] diff --git a/pkg/stanza/fileconsumer/benchmark_test.go b/pkg/stanza/fileconsumer/benchmark_test.go index c470fa50e17c..20ecfc980f15 100644 --- a/pkg/stanza/fileconsumer/benchmark_test.go +++ b/pkg/stanza/fileconsumer/benchmark_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component/componenttest" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/emit" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/filetest" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/internal/fingerprint" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/testutil" @@ -187,8 +188,8 @@ func BenchmarkFileInput(b *testing.B) { cfg.PollInterval = time.Microsecond doneChan := make(chan bool, len(files)) - callback := func(_ context.Context, token []byte, _ map[string]any) error { - if len(token) == 0 { + callback := func(_ context.Context, token emit.Token) error { + if len(token.Body) == 0 { doneChan <- true } return nil diff --git a/pkg/stanza/fileconsumer/emit/emit.go b/pkg/stanza/fileconsumer/emit/emit.go index 65859444229e..6c27a3c72a4a 100644 --- a/pkg/stanza/fileconsumer/emit/emit.go +++ b/pkg/stanza/fileconsumer/emit/emit.go @@ -7,4 +7,16 @@ import ( "context" ) -type Callback func(ctx context.Context, token []byte, attrs map[string]any) error +type Callback func(ctx context.Context, token Token) error + +type Token struct { + Body []byte + Attributes map[string]any +} + +func NewToken(body []byte, attrs map[string]any) Token { + return Token{ + Body: body, + Attributes: attrs, + } +} diff --git a/pkg/stanza/fileconsumer/internal/emittest/nop.go b/pkg/stanza/fileconsumer/internal/emittest/nop.go index 09c91b7949db..80e5c3e1a618 100644 --- a/pkg/stanza/fileconsumer/internal/emittest/nop.go +++ b/pkg/stanza/fileconsumer/internal/emittest/nop.go @@ -5,8 +5,10 @@ package emittest // import "github.com/open-telemetry/opentelemetry-collector-co import ( "context" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/emit" ) -func Nop(_ context.Context, _ []byte, _ map[string]any) error { +func Nop(_ context.Context, _ emit.Token) error { return nil } diff --git a/pkg/stanza/fileconsumer/internal/emittest/nop_test.go b/pkg/stanza/fileconsumer/internal/emittest/nop_test.go index 470327925fd4..40cef35312dc 100644 --- a/pkg/stanza/fileconsumer/internal/emittest/nop_test.go +++ b/pkg/stanza/fileconsumer/internal/emittest/nop_test.go @@ -8,8 +8,10 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/emit" ) func TestNop(t *testing.T) { - require.NoError(t, Nop(context.Background(), nil, nil)) + require.NoError(t, Nop(context.Background(), emit.Token{})) } diff --git a/pkg/stanza/fileconsumer/internal/emittest/sink.go b/pkg/stanza/fileconsumer/internal/emittest/sink.go index 44d265c9af5d..6c7d9954b3fc 100644 --- a/pkg/stanza/fileconsumer/internal/emittest/sink.go +++ b/pkg/stanza/fileconsumer/internal/emittest/sink.go @@ -57,13 +57,13 @@ func NewSink(opts ...SinkOpt) *Sink { return &Sink{ emitChan: emitChan, timeout: cfg.timeout, - Callback: func(ctx context.Context, token []byte, attrs map[string]any) error { - copied := make([]byte, len(token)) - copy(copied, token) + Callback: func(ctx context.Context, token emit.Token) error { + copied := make([]byte, len(token.Body)) + copy(copied, token.Body) select { case <-ctx.Done(): return ctx.Err() - case emitChan <- &Call{copied, attrs}: + case emitChan <- &Call{copied, token.Attributes}: } return nil }, diff --git a/pkg/stanza/fileconsumer/internal/emittest/sink_test.go b/pkg/stanza/fileconsumer/internal/emittest/sink_test.go index 2cea011e7518..7d340b4a79fd 100644 --- a/pkg/stanza/fileconsumer/internal/emittest/sink_test.go +++ b/pkg/stanza/fileconsumer/internal/emittest/sink_test.go @@ -11,6 +11,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/emit" ) func TestNextToken(t *testing.T) { @@ -202,7 +204,7 @@ func sinkTest(t *testing.T, opts ...SinkOpt) (*Sink, []*Call) { } go func() { for _, c := range testCalls { - assert.NoError(t, s.Callback(context.Background(), c.Token, c.Attrs)) + assert.NoError(t, s.Callback(context.Background(), emit.NewToken(c.Token, c.Attrs))) } }() return s, testCalls diff --git a/pkg/stanza/fileconsumer/internal/reader/reader.go b/pkg/stanza/fileconsumer/internal/reader/reader.go index 207e5b745dbd..3a591574fbc6 100644 --- a/pkg/stanza/fileconsumer/internal/reader/reader.go +++ b/pkg/stanza/fileconsumer/internal/reader/reader.go @@ -209,7 +209,7 @@ func (r *Reader) readContents(ctx context.Context) { r.FileAttributes[attrs.LogFileRecordNumber] = r.RecordNum } - err = r.emitFunc(ctx, token, r.FileAttributes) + err = r.emitFunc(ctx, emit.NewToken(token, r.FileAttributes)) if err != nil { r.set.Logger.Error("failed to process token", zap.Error(err)) } diff --git a/pkg/stanza/operator/input/file/input.go b/pkg/stanza/operator/input/file/input.go index fd845499aeae..39f721ad57b3 100644 --- a/pkg/stanza/operator/input/file/input.go +++ b/pkg/stanza/operator/input/file/input.go @@ -11,6 +11,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/entry" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/emit" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/operator/helper" ) @@ -36,17 +37,17 @@ func (i *Input) Stop() error { return i.fileConsumer.Stop() } -func (i *Input) emit(ctx context.Context, token []byte, attrs map[string]any) error { - if len(token) == 0 { +func (i *Input) emit(ctx context.Context, token emit.Token) error { + if len(token.Body) == 0 { return nil } - ent, err := i.NewEntry(i.toBody(token)) + ent, err := i.NewEntry(i.toBody(token.Body)) if err != nil { return fmt.Errorf("create entry: %w", err) } - for k, v := range attrs { + for k, v := range token.Attributes { if err := ent.Set(entry.NewAttributeField(k), v); err != nil { i.Logger().Error("set attribute", zap.Error(err)) } diff --git a/receiver/otlpjsonfilereceiver/file.go b/receiver/otlpjsonfilereceiver/file.go index aad1ddef6561..c5a049dd271a 100644 --- a/receiver/otlpjsonfilereceiver/file.go +++ b/receiver/otlpjsonfilereceiver/file.go @@ -19,6 +19,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/adapter" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza/fileconsumer/emit" "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/otlpjsonfilereceiver/internal/metadata" ) @@ -82,10 +83,10 @@ func createLogsReceiver(_ context.Context, settings receiver.Settings, configura if cfg.ReplayFile { opts = append(opts, fileconsumer.WithNoTracking()) } - input, err := cfg.Config.Build(settings.TelemetrySettings, func(ctx context.Context, token []byte, _ map[string]any) error { + input, err := cfg.Config.Build(settings.TelemetrySettings, func(ctx context.Context, token emit.Token) error { ctx = obsrecv.StartLogsOp(ctx) var l plog.Logs - l, err = logsUnmarshaler.UnmarshalLogs(token) + l, err = logsUnmarshaler.UnmarshalLogs(token.Body) if err != nil { obsrecv.EndLogsOp(ctx, metadata.Type.String(), 0, err) } else { @@ -119,10 +120,10 @@ func createMetricsReceiver(_ context.Context, settings receiver.Settings, config if cfg.ReplayFile { opts = append(opts, fileconsumer.WithNoTracking()) } - input, err := cfg.Config.Build(settings.TelemetrySettings, func(ctx context.Context, token []byte, _ map[string]any) error { + input, err := cfg.Config.Build(settings.TelemetrySettings, func(ctx context.Context, token emit.Token) error { ctx = obsrecv.StartMetricsOp(ctx) var m pmetric.Metrics - m, err = metricsUnmarshaler.UnmarshalMetrics(token) + m, err = metricsUnmarshaler.UnmarshalMetrics(token.Body) if err != nil { obsrecv.EndMetricsOp(ctx, metadata.Type.String(), 0, err) } else { @@ -155,10 +156,10 @@ func createTracesReceiver(_ context.Context, settings receiver.Settings, configu if cfg.ReplayFile { opts = append(opts, fileconsumer.WithNoTracking()) } - input, err := cfg.Config.Build(settings.TelemetrySettings, func(ctx context.Context, token []byte, _ map[string]any) error { + input, err := cfg.Config.Build(settings.TelemetrySettings, func(ctx context.Context, token emit.Token) error { ctx = obsrecv.StartTracesOp(ctx) var t ptrace.Traces - t, err = tracesUnmarshaler.UnmarshalTraces(token) + t, err = tracesUnmarshaler.UnmarshalTraces(token.Body) if err != nil { obsrecv.EndTracesOp(ctx, metadata.Type.String(), 0, err) } else { @@ -183,8 +184,8 @@ func createProfilesReceiver(_ context.Context, settings receiver.Settings, confi if cfg.ReplayFile { opts = append(opts, fileconsumer.WithNoTracking()) } - input, err := cfg.Config.Build(settings.TelemetrySettings, func(ctx context.Context, token []byte, _ map[string]any) error { - p, _ := profilesUnmarshaler.UnmarshalProfiles(token) + input, err := cfg.Config.Build(settings.TelemetrySettings, func(ctx context.Context, token emit.Token) error { + p, _ := profilesUnmarshaler.UnmarshalProfiles(token.Body) if p.ResourceProfiles().Len() != 0 { _ = profiles.ConsumeProfiles(ctx, p) } From 2a3fbd0c1aea4a41c7527674c49000d8b200d7be Mon Sep 17 00:00:00 2001 From: Khushi Jain Date: Fri, 8 Nov 2024 20:17:57 +0530 Subject: [PATCH 15/24] [connector/otlpjson]: Do not emit empty batches (#35827) #### Description The connector now does not emit empty batches for invalid otlp payload and throws an error instead. Approach discussed here https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/35738#issuecomment-2438627919 #### Link to tracking issue Fixes #35738 and #35739 #### Testing Manual Testing #### Documentation --------- Co-authored-by: Daniel Jaglowski --- .../otlpjson-connector-invalid-otlp.yaml | 27 +++++++++++ connector/otlpjsonconnector/connector_test.go | 46 +++++++++++++++++++ connector/otlpjsonconnector/factory.go | 5 ++ connector/otlpjsonconnector/logs.go | 29 ++++++++---- connector/otlpjsonconnector/metrics.go | 28 +++++++---- connector/otlpjsonconnector/traces.go | 27 +++++++---- 6 files changed, 138 insertions(+), 24 deletions(-) create mode 100644 .chloggen/otlpjson-connector-invalid-otlp.yaml diff --git a/.chloggen/otlpjson-connector-invalid-otlp.yaml b/.chloggen/otlpjson-connector-invalid-otlp.yaml new file mode 100644 index 000000000000..0e5628a3f03f --- /dev/null +++ b/.chloggen/otlpjson-connector-invalid-otlp.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: connector/otlpjson + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Throw error on invalid otlp payload. + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [35738, 35739] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/connector/otlpjsonconnector/connector_test.go b/connector/otlpjsonconnector/connector_test.go index deb5ae281ec0..cd433e75ccb3 100644 --- a/connector/otlpjsonconnector/connector_test.go +++ b/connector/otlpjsonconnector/connector_test.go @@ -178,3 +178,49 @@ func TestLogsToTraces(t *testing.T) { }) } } + +// This benchmark looks at how performance is affected when all three connectors are consuming logs (at the same time) +func BenchmarkConsumeLogs(b *testing.B) { + inputlogs := "input-log.yaml" + inputTraces := "input-trace.yaml" + inputMetrics := "input-metric.yaml" + + factory := NewFactory() + // initialize log -> log connector + logsink := &consumertest.LogsSink{} + logscon, _ := factory.CreateLogsToLogs(context.Background(), + connectortest.NewNopSettings(), createDefaultConfig(), logsink) + + require.NoError(b, logscon.Start(context.Background(), componenttest.NewNopHost())) + defer func() { + assert.NoError(b, logscon.Shutdown(context.Background())) + }() + + // initialize log -> traces connector + tracesink := &consumertest.TracesSink{} + traceconn, _ := factory.CreateLogsToTraces(context.Background(), + connectortest.NewNopSettings(), createDefaultConfig(), tracesink) + require.NoError(b, traceconn.Start(context.Background(), componenttest.NewNopHost())) + defer func() { + assert.NoError(b, traceconn.Shutdown(context.Background())) + }() + + // initialize log -> metric connector + metricsink := &consumertest.MetricsSink{} + metricconn, _ := factory.CreateLogsToMetrics(context.Background(), + connectortest.NewNopSettings(), createDefaultConfig(), metricsink) + require.NoError(b, metricconn.Start(context.Background(), componenttest.NewNopHost())) + defer func() { + assert.NoError(b, metricconn.Shutdown(context.Background())) + }() + + testLogs, _ := golden.ReadLogs(filepath.Join("testdata", "logsToLogs", inputlogs)) + testTraces, _ := golden.ReadLogs(filepath.Join("testdata", "logsToTraces", inputTraces)) + testMetrics, _ := golden.ReadLogs(filepath.Join("testdata", "logsToMetrics", inputMetrics)) + + for i := 0; i < b.N; i++ { + assert.NoError(b, logscon.ConsumeLogs(context.Background(), testLogs)) + assert.NoError(b, traceconn.ConsumeLogs(context.Background(), testTraces)) + assert.NoError(b, metricconn.ConsumeLogs(context.Background(), testMetrics)) + } +} diff --git a/connector/otlpjsonconnector/factory.go b/connector/otlpjsonconnector/factory.go index 1f4dca456bac..386b765b0e34 100644 --- a/connector/otlpjsonconnector/factory.go +++ b/connector/otlpjsonconnector/factory.go @@ -5,6 +5,7 @@ package otlpjsonconnector // import "github.com/open-telemetry/opentelemetry-col import ( "context" + "regexp" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/connector" @@ -13,6 +14,10 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/connector/otlpjsonconnector/internal/metadata" ) +var logRegex = regexp.MustCompile(`^\{\s*"resourceLogs"\s*:\s*\[`) +var metricRegex = regexp.MustCompile(`^\{\s*"resourceMetrics"\s*:\s*\[`) +var traceRegex = regexp.MustCompile(`^\{\s*"resourceSpans"\s*:\s*\[`) + // NewFactory returns a ConnectorFactory. func NewFactory() connector.Factory { return connector.NewFactory( diff --git a/connector/otlpjsonconnector/logs.go b/connector/otlpjsonconnector/logs.go index 621bc1c16a49..0cb524956855 100644 --- a/connector/otlpjsonconnector/logs.go +++ b/connector/otlpjsonconnector/logs.go @@ -50,16 +50,29 @@ func (c *connectorLogs) ConsumeLogs(ctx context.Context, pl plog.Logs) error { for k := 0; k < logRecord.LogRecords().Len(); k++ { lRecord := logRecord.LogRecords().At(k) token := lRecord.Body() - var l plog.Logs - l, err := logsUnmarshaler.UnmarshalLogs([]byte(token.AsString())) - if err != nil { - c.logger.Error("could not extract logs from otlp json", zap.Error(err)) + + // Check if the "resourceLogs" key exists in the JSON data + value := token.AsString() + switch { + case logRegex.MatchString(value): + var l plog.Logs + l, err := logsUnmarshaler.UnmarshalLogs([]byte(value)) + if err != nil { + c.logger.Error("could not extract logs from otlp json", zap.Error(err)) + continue + } + err = c.logsConsumer.ConsumeLogs(ctx, l) + if err != nil { + c.logger.Error("could not consume logs from otlp json", zap.Error(err)) + } + case metricRegex.MatchString(value), traceRegex.MatchString(value): + // If it's a metric or trace payload, simply continue continue + default: + // If no regex matches, log the invalid payload + c.logger.Error("Invalid otlp payload") } - err = c.logsConsumer.ConsumeLogs(ctx, l) - if err != nil { - c.logger.Error("could not consume logs from otlp json", zap.Error(err)) - } + } } } diff --git a/connector/otlpjsonconnector/metrics.go b/connector/otlpjsonconnector/metrics.go index 3954e214512c..fd8fa9ff6c23 100644 --- a/connector/otlpjsonconnector/metrics.go +++ b/connector/otlpjsonconnector/metrics.go @@ -51,16 +51,28 @@ func (c *connectorMetrics) ConsumeLogs(ctx context.Context, pl plog.Logs) error for k := 0; k < logRecord.LogRecords().Len(); k++ { lRecord := logRecord.LogRecords().At(k) token := lRecord.Body() - var m pmetric.Metrics - m, err := metricsUnmarshaler.UnmarshalMetrics([]byte(token.AsString())) - if err != nil { - c.logger.Error("could extract metrics from otlp json", zap.Error(err)) + + value := token.AsString() + switch { + case metricRegex.MatchString(value): + var m pmetric.Metrics + m, err := metricsUnmarshaler.UnmarshalMetrics([]byte(value)) + if err != nil { + c.logger.Error("could not extract metrics from otlp json", zap.Error(err)) + continue + } + err = c.metricsConsumer.ConsumeMetrics(ctx, m) + if err != nil { + c.logger.Error("could not consume metrics from otlp json", zap.Error(err)) + } + case logRegex.MatchString(value), traceRegex.MatchString(value): + // If it's a log or trace payload, simply continue continue + default: + // If no regex matches, log the invalid payload + c.logger.Error("Invalid otlp payload") } - err = c.metricsConsumer.ConsumeMetrics(ctx, m) - if err != nil { - c.logger.Error("could not consume metrics from otlp json", zap.Error(err)) - } + } } } diff --git a/connector/otlpjsonconnector/traces.go b/connector/otlpjsonconnector/traces.go index 6210095f0912..2b1e4b1f7bc9 100644 --- a/connector/otlpjsonconnector/traces.go +++ b/connector/otlpjsonconnector/traces.go @@ -51,15 +51,26 @@ func (c *connectorTraces) ConsumeLogs(ctx context.Context, pl plog.Logs) error { for k := 0; k < logRecord.LogRecords().Len(); k++ { lRecord := logRecord.LogRecords().At(k) token := lRecord.Body() - var t ptrace.Traces - t, err := tracesUnmarshaler.UnmarshalTraces([]byte(token.AsString())) - if err != nil { - c.logger.Error("could extract traces from otlp json", zap.Error(err)) + + value := token.AsString() + switch { + case traceRegex.MatchString(value): + var t ptrace.Traces + t, err := tracesUnmarshaler.UnmarshalTraces([]byte(value)) + if err != nil { + c.logger.Error("could not extract traces from otlp json", zap.Error(err)) + continue + } + err = c.tracesConsumer.ConsumeTraces(ctx, t) + if err != nil { + c.logger.Error("could not consume traces from otlp json", zap.Error(err)) + } + case metricRegex.MatchString(value), logRegex.MatchString(value): + // If it's a metric or log payload, continue to the next iteration continue - } - err = c.tracesConsumer.ConsumeTraces(ctx, t) - if err != nil { - c.logger.Error("could not consume traces from otlp json", zap.Error(err)) + default: + // If no regex matches, log the invalid payload + c.logger.Error("Invalid otlp payload") } } } From d0f4d09549758c41e698f0ec39fa9d0e213ea848 Mon Sep 17 00:00:00 2001 From: Yang Song Date: Fri, 8 Nov 2024 10:35:46 -0500 Subject: [PATCH 16/24] [exporter/datadog] Stop prefixing 3 HTTP metrics (#36265) #### Description Stop prefixing `http_server_duration`, `http_server_request_size` and `http_server_response_size` with `otelcol`. Those metrics used to have the `otelcol` prefix if they are from collector internal metrics before v0.106, then the prefix was removed. We put back the prefix in v0.108 but the same metric can be from SDKs as well. Stop prefixing them be consistent with https://opentelemetry.io/docs/collector/internal-telemetry/#lists-of-internal-metrics --- .chloggen/datadog-metric-prefix.yaml | 27 +++++++++++++++++++ connector/datadogconnector/go.mod | 10 +++---- connector/datadogconnector/go.sum | 24 ++++++++--------- exporter/datadogexporter/go.mod | 10 +++---- exporter/datadogexporter/go.sum | 24 ++++++++--------- .../datadogexporter/integrationtest/go.mod | 10 +++---- .../datadogexporter/integrationtest/go.sum | 24 ++++++++--------- 7 files changed, 78 insertions(+), 51 deletions(-) create mode 100644 .chloggen/datadog-metric-prefix.yaml diff --git a/.chloggen/datadog-metric-prefix.yaml b/.chloggen/datadog-metric-prefix.yaml new file mode 100644 index 000000000000..f6cc82366433 --- /dev/null +++ b/.chloggen/datadog-metric-prefix.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: breaking + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: datadogexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Stop prefixing `http_server_duration`, `http_server_request_size` and `http_server_response_size` with `otelcol`" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36265] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: "These metrics can be from SDKs rather than collector. Stop prefixing them to be consistent with https://opentelemetry.io/docs/collector/internal-telemetry/#lists-of-internal-metrics" + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/connector/datadogconnector/go.mod b/connector/datadogconnector/go.mod index 2cba3b245241..79187a55ea64 100644 --- a/connector/datadogconnector/go.mod +++ b/connector/datadogconnector/go.mod @@ -8,8 +8,8 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.58.2 github.com/DataDog/datadog-agent/pkg/trace v0.59.0-devel.0.20240911192058-0c2181220f85 github.com/DataDog/datadog-go/v5 v5.5.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.21.0 github.com/google/go-cmp v0.6.0 github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.113.0 github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.113.0 @@ -101,9 +101,9 @@ require ( github.com/DataDog/go-sqllexer v0.0.14 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.21.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/DataDog/zstd v1.5.5 // indirect diff --git a/connector/datadogconnector/go.sum b/connector/datadogconnector/go.sum index d51d935661a2..742244e7c577 100644 --- a/connector/datadogconnector/go.sum +++ b/connector/datadogconnector/go.sum @@ -212,18 +212,18 @@ github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4ti github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee h1:tXibLZk3G6HncIFJKaNItsdzcrk4YqILNDZlXPTNt4k= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee/go.mod h1:nTot/Iy0kW16bXgXr6blEc8gFeAS7vTqYlhAxh+dbc0= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.21.0 h1:U+p1i7+upWb4qOIOOvjS/92iMUGlSzEC1tRxVo0Lg8Y= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.21.0/go.mod h1:dOjp1lg4jwYyIbpnqW+DoOV8qD+70C+lgpINFvUqasQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.21.0 h1:VS4NTqwczwezMVvI6A7xYR3ugPmMUJ4FcdFrsdnZI2I= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.21.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0 h1:hgbTFS6SkqbzOiWSfP58dZ/Jpjlmv6dpD4+V4LDHm2Q= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0 h1:XD9Kd+baO66+tfbdanOFSMGEfwWfnrn/IxG/Dc5bv5I= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0/go.mod h1:9ByLz9jISc176DzjIdaRfRKwaitqF8ie6RTvfP8Aufo= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.21.0 h1:8nW8jfcCIWzxWrpI31C0QYoOjTaUGp6USCwiRbP5Fp4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.21.0/go.mod h1:wuatEozcLYinJ0WYf0MlVTFtTzEmf+qyJet0H9foVAs= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0 h1:/Dp1WBvekdusS9Tw9pLE7RG04eluNktQ29arLS4SpGM= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0/go.mod h1:asNuwNy1O2HbadkcZVuqmFGonfEzXS/SBvOo8V1MJvQ= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= diff --git a/exporter/datadogexporter/go.mod b/exporter/datadogexporter/go.mod index 677a31246d27..1a3da388a9c6 100644 --- a/exporter/datadogexporter/go.mod +++ b/exporter/datadogexporter/go.mod @@ -31,11 +31,11 @@ require ( github.com/DataDog/datadog-api-client-go/v2 v2.31.0 github.com/DataDog/datadog-go/v5 v5.5.0 github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.21.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.21.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0 github.com/DataDog/sketches-go v1.4.6 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 github.com/aws/aws-sdk-go v1.55.5 diff --git a/exporter/datadogexporter/go.sum b/exporter/datadogexporter/go.sum index c29d1e892c8a..6f4b8097c6ca 100644 --- a/exporter/datadogexporter/go.sum +++ b/exporter/datadogexporter/go.sum @@ -220,18 +220,18 @@ github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4ti github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee h1:tXibLZk3G6HncIFJKaNItsdzcrk4YqILNDZlXPTNt4k= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee/go.mod h1:nTot/Iy0kW16bXgXr6blEc8gFeAS7vTqYlhAxh+dbc0= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.21.0 h1:U+p1i7+upWb4qOIOOvjS/92iMUGlSzEC1tRxVo0Lg8Y= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.21.0/go.mod h1:dOjp1lg4jwYyIbpnqW+DoOV8qD+70C+lgpINFvUqasQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.21.0 h1:VS4NTqwczwezMVvI6A7xYR3ugPmMUJ4FcdFrsdnZI2I= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.21.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0 h1:hgbTFS6SkqbzOiWSfP58dZ/Jpjlmv6dpD4+V4LDHm2Q= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0 h1:XD9Kd+baO66+tfbdanOFSMGEfwWfnrn/IxG/Dc5bv5I= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0/go.mod h1:9ByLz9jISc176DzjIdaRfRKwaitqF8ie6RTvfP8Aufo= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.21.0 h1:8nW8jfcCIWzxWrpI31C0QYoOjTaUGp6USCwiRbP5Fp4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.21.0/go.mod h1:wuatEozcLYinJ0WYf0MlVTFtTzEmf+qyJet0H9foVAs= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0 h1:/Dp1WBvekdusS9Tw9pLE7RG04eluNktQ29arLS4SpGM= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0/go.mod h1:asNuwNy1O2HbadkcZVuqmFGonfEzXS/SBvOo8V1MJvQ= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= diff --git a/exporter/datadogexporter/integrationtest/go.mod b/exporter/datadogexporter/integrationtest/go.mod index 5ae6133d9fd3..fd8397393084 100644 --- a/exporter/datadogexporter/integrationtest/go.mod +++ b/exporter/datadogexporter/integrationtest/go.mod @@ -109,11 +109,11 @@ require ( github.com/DataDog/go-sqllexer v0.0.14 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.21.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.21.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.13.5 // indirect github.com/DataDog/zstd v1.5.5 // indirect diff --git a/exporter/datadogexporter/integrationtest/go.sum b/exporter/datadogexporter/integrationtest/go.sum index 07abc40f75fe..9f42409e8d04 100644 --- a/exporter/datadogexporter/integrationtest/go.sum +++ b/exporter/datadogexporter/integrationtest/go.sum @@ -218,18 +218,18 @@ github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4ti github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee h1:tXibLZk3G6HncIFJKaNItsdzcrk4YqILNDZlXPTNt4k= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee/go.mod h1:nTot/Iy0kW16bXgXr6blEc8gFeAS7vTqYlhAxh+dbc0= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0 h1:jdsuH8u4rxfvy3ZHoSLk5NAZrQMNZqyJwhM15FpEswE= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.20.0/go.mod h1:KI5I5JhJNOQWeE4vs+qk+BY/9PVSDwNmSjrCUrmuZKw= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0 h1:e4XT2+v4vgZBCbp5JUbe0Z+PRegh+nsLMp4X+esht9E= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.20.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0 h1:JLpKc1QpkaUXEFgN68/Q9XgF0XgbVl/IXd8S1KUcEV4= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.20.0/go.mod h1:VJtgUHCz38obs58oEjNjEre6IaHmR+s7o4DvX74knq4= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0 h1:b60rxWT/EwcSA4l/zXfqTZp3udXJ1fKtz7+Qwat8OjQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.20.0/go.mod h1:6jM34grB+zhMrzWgM0V8B6vyIJ/75oAfjcx/mJWv6cE= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0 h1:0OFAPO964qsj6BzKs/hbAkpO/IIHp7vN1klKrohzULA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.20.0/go.mod h1:IDaKpBfDtw8eWBLtXR14HB5dsXDxS4VRUR0OL5rlRT8= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.21.0 h1:U+p1i7+upWb4qOIOOvjS/92iMUGlSzEC1tRxVo0Lg8Y= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.21.0/go.mod h1:dOjp1lg4jwYyIbpnqW+DoOV8qD+70C+lgpINFvUqasQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.21.0 h1:VS4NTqwczwezMVvI6A7xYR3ugPmMUJ4FcdFrsdnZI2I= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.21.0/go.mod h1:66XlN7QpQKqIvw8e2UbCXV5X8wGnEw851nT9BjJ75dY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0 h1:hgbTFS6SkqbzOiWSfP58dZ/Jpjlmv6dpD4+V4LDHm2Q= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.21.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0 h1:XD9Kd+baO66+tfbdanOFSMGEfwWfnrn/IxG/Dc5bv5I= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.21.0/go.mod h1:9ByLz9jISc176DzjIdaRfRKwaitqF8ie6RTvfP8Aufo= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.21.0 h1:8nW8jfcCIWzxWrpI31C0QYoOjTaUGp6USCwiRbP5Fp4= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.21.0/go.mod h1:wuatEozcLYinJ0WYf0MlVTFtTzEmf+qyJet0H9foVAs= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0 h1:/Dp1WBvekdusS9Tw9pLE7RG04eluNktQ29arLS4SpGM= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.21.0/go.mod h1:asNuwNy1O2HbadkcZVuqmFGonfEzXS/SBvOo8V1MJvQ= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.13.5 h1:SZMcyMknYQN2jRY/40A16gUXexlNJOI8sDs1cWZnI64= From 9c382d94db7b469dddd8bcada2ea95b386800bcb Mon Sep 17 00:00:00 2001 From: Paulo Janotti Date: Fri, 8 Nov 2024 08:40:31 -0800 Subject: [PATCH 17/24] Input operator stop must not crash even if start was not called (#36262) --- pkg/stanza/operator/input/windows/input.go | 16 +++++++++++----- pkg/stanza/operator/input/windows/input_test.go | 6 ++++++ 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/pkg/stanza/operator/input/windows/input.go b/pkg/stanza/operator/input/windows/input.go index 76d79bbfa12e..5ad0db5c4fb9 100644 --- a/pkg/stanza/operator/input/windows/input.go +++ b/pkg/stanza/operator/input/windows/input.go @@ -154,22 +154,28 @@ func (i *Input) Start(persister operator.Persister) error { // Stop will stop reading events from a subscription. func (i *Input) Stop() error { - i.cancel() + // Warning: all calls made below must be safe to be done even if Start() was not called or failed. + + if i.cancel != nil { + i.cancel() + } + i.wg.Wait() + var errs error if err := i.subscription.Close(); err != nil { - return fmt.Errorf("failed to close subscription: %w", err) + errs = errors.Join(errs, fmt.Errorf("failed to close subscription: %w", err)) } if err := i.bookmark.Close(); err != nil { - return fmt.Errorf("failed to close bookmark: %w", err) + errs = errors.Join(errs, fmt.Errorf("failed to close bookmark: %w", err)) } if err := i.publisherCache.evictAll(); err != nil { - return fmt.Errorf("failed to close publishers: %w", err) + errs = errors.Join(errs, fmt.Errorf("failed to close publishers: %w", err)) } - return i.stopRemoteSession() + return errors.Join(errs, i.stopRemoteSession()) } // readOnInterval will read events with respect to the polling interval until it reaches the end of the channel. diff --git a/pkg/stanza/operator/input/windows/input_test.go b/pkg/stanza/operator/input/windows/input_test.go index 6d530f89bca1..462dad9a499a 100644 --- a/pkg/stanza/operator/input/windows/input_test.go +++ b/pkg/stanza/operator/input/windows/input_test.go @@ -24,6 +24,12 @@ func newTestInput() *Input { }) } +// TestInputCreate_Stop ensures the input correctly shuts down even if it was never started. +func TestInputCreate_Stop(t *testing.T) { + input := newTestInput() + assert.NoError(t, input.Stop()) +} + // TestInputStart_LocalSubscriptionError ensures the input correctly handles local subscription errors. func TestInputStart_LocalSubscriptionError(t *testing.T) { persister := testutil.NewMockPersister("") From 234b5a744a2677a9a88affdaec2cd7c6c9e2bb63 Mon Sep 17 00:00:00 2001 From: Paulo Janotti Date: Fri, 8 Nov 2024 13:13:49 -0800 Subject: [PATCH 18/24] [pkg/stanza] Pass buffer by reference so buffer changes are not lost (#36252) The root of the crash reported in #36179 was the fact that the `Buffer` struct being passed by value in recursive calls made it allocate the needed amount, but, after the return of the recursive call it attempted to read a buffer that was larger than the allocated buffer on the recursive call. The crash was going to be hit whenever an XML was larger than the default size of the buffer (16KiB). The code is a bit hard to test because its fully usage actually happens on the receiver where the buffer and its components are not visible. I'll look to add a test in a follow-up. cc @djaglowski Skipping changelog since #36179 covers it. --------- Co-authored-by: Daniel Jaglowski --- pkg/stanza/operator/input/windows/bookmark.go | 2 +- pkg/stanza/operator/input/windows/buffer.go | 4 ++-- pkg/stanza/operator/input/windows/buffer_test.go | 11 ----------- pkg/stanza/operator/input/windows/event.go | 6 +++--- pkg/stanza/operator/input/windows/input.go | 2 +- 5 files changed, 7 insertions(+), 18 deletions(-) diff --git a/pkg/stanza/operator/input/windows/bookmark.go b/pkg/stanza/operator/input/windows/bookmark.go index 83ee23f665b9..23a7e8d2f939 100644 --- a/pkg/stanza/operator/input/windows/bookmark.go +++ b/pkg/stanza/operator/input/windows/bookmark.go @@ -54,7 +54,7 @@ func (b *Bookmark) Update(event Event) error { } // Render will render the bookmark as xml. -func (b *Bookmark) Render(buffer Buffer) (string, error) { +func (b *Bookmark) Render(buffer *Buffer) (string, error) { if b.handle == 0 { return "", fmt.Errorf("bookmark handle is not open") } diff --git a/pkg/stanza/operator/input/windows/buffer.go b/pkg/stanza/operator/input/windows/buffer.go index 3d083bb76693..3f939b38f42a 100644 --- a/pkg/stanza/operator/input/windows/buffer.go +++ b/pkg/stanza/operator/input/windows/buffer.go @@ -75,8 +75,8 @@ func (b *Buffer) FirstByte() *byte { } // NewBuffer creates a new buffer with the default buffer size -func NewBuffer() Buffer { - return Buffer{ +func NewBuffer() *Buffer { + return &Buffer{ buffer: make([]byte, defaultBufferSize), } } diff --git a/pkg/stanza/operator/input/windows/buffer_test.go b/pkg/stanza/operator/input/windows/buffer_test.go index 78f654742495..f8376706ec78 100644 --- a/pkg/stanza/operator/input/windows/buffer_test.go +++ b/pkg/stanza/operator/input/windows/buffer_test.go @@ -21,17 +21,6 @@ func TestBufferReadBytes(t *testing.T) { require.Equal(t, utf8, bytes) } -func TestBufferReadBytesOverflow(t *testing.T) { - buffer := NewBuffer() - utf8 := []byte("test") - utf16, _ := unicode.UTF16(unicode.LittleEndian, unicode.UseBOM).NewEncoder().Bytes(utf8) - copy(buffer.buffer, utf16) - offset := uint32(len(utf16)) - bytes, err := buffer.ReadBytes(offset * 2) - require.NoError(t, err) - require.Equal(t, utf8, bytes) -} - func TestBufferReadWideBytes(t *testing.T) { buffer := NewBuffer() utf8 := []byte("test") diff --git a/pkg/stanza/operator/input/windows/event.go b/pkg/stanza/operator/input/windows/event.go index c55703648d25..9fc2bc28b071 100644 --- a/pkg/stanza/operator/input/windows/event.go +++ b/pkg/stanza/operator/input/windows/event.go @@ -27,7 +27,7 @@ type Event struct { } // GetPublisherName will get the publisher name of the event. -func (e *Event) GetPublisherName(buffer Buffer) (string, error) { +func (e *Event) GetPublisherName(buffer *Buffer) (string, error) { if e.handle == 0 { return "", fmt.Errorf("event handle does not exist") } @@ -77,7 +77,7 @@ func NewEvent(handle uintptr) Event { } // RenderSimple will render the event as EventXML without formatted info. -func (e *Event) RenderSimple(buffer Buffer) (*EventXML, error) { +func (e *Event) RenderSimple(buffer *Buffer) (*EventXML, error) { if e.handle == 0 { return nil, fmt.Errorf("event handle does not exist") } @@ -100,7 +100,7 @@ func (e *Event) RenderSimple(buffer Buffer) (*EventXML, error) { } // RenderDeep will render the event as EventXML with all available formatted info. -func (e *Event) RenderDeep(buffer Buffer, publisher Publisher) (*EventXML, error) { +func (e *Event) RenderDeep(buffer *Buffer, publisher Publisher) (*EventXML, error) { if e.handle == 0 { return nil, fmt.Errorf("event handle does not exist") } diff --git a/pkg/stanza/operator/input/windows/input.go b/pkg/stanza/operator/input/windows/input.go index 5ad0db5c4fb9..a7cace3e7363 100644 --- a/pkg/stanza/operator/input/windows/input.go +++ b/pkg/stanza/operator/input/windows/input.go @@ -24,7 +24,7 @@ import ( type Input struct { helper.InputOperator bookmark Bookmark - buffer Buffer + buffer *Buffer channel string maxReads int startAt string From 68f39ff0e623fcebe8a2ac9129edb63c2c5e4317 Mon Sep 17 00:00:00 2001 From: Argannor <4489279+Argannor@users.noreply.github.com> Date: Fri, 8 Nov 2024 22:15:02 +0100 Subject: [PATCH 19/24] [fix][prometheusexporter] Race condition between start and shutdown (#36164) #### Description Adjusted the Start and Shutdown sequence of the prometheusexporter to prevent a race condition causing the `close tcp 127.0.0.1:8999: use of closed network connection` error as observed in #36139. The behaviour was changed in the following ways: - If an error occurs during the creation of the server, close the listener immediately, leaving shutdown a noop - Since `srv.Shutdown` will close all open listeners, the explicit call to `ln.Close` in the shutdown routine was removed #### Link to tracking issue Fixes #36139 #### Testing Unit testing, I temporarily increased the number of iterations on `TestPrometheusExporter` to 2000. The error did no longer occur. However sporadically another error occured: ``` === RUN TestPrometheusExporter prometheus_test.go:103: Error Trace: C:/development/code/opentelemetry-collector-contrib/exporter/prometheusexporter/prometheus_test.go:84 Error: Received unexpected error: listen tcp 127.0.0.1:8999: bind: Only one usage of each socket address (protocol/network address/port) is normally permitted. Test: TestPrometheusExporter --- FAIL: TestPrometheusExporter (1.16s) ``` I assume that this is because the OS (in my case Windows) won't always close the underlying sockets immediately, blocking it for some time afterwards. I'm not sure there is anything we can do about that. --------- Signed-off-by: Argannor --- ..._prometheusexporter-shutdown-server-2.yaml | 27 +++++++++++++++++++ exporter/prometheusexporter/prometheus.go | 14 ++++------ 2 files changed, 32 insertions(+), 9 deletions(-) create mode 100644 .chloggen/fix_prometheusexporter-shutdown-server-2.yaml diff --git a/.chloggen/fix_prometheusexporter-shutdown-server-2.yaml b/.chloggen/fix_prometheusexporter-shutdown-server-2.yaml new file mode 100644 index 000000000000..7b01af232225 --- /dev/null +++ b/.chloggen/fix_prometheusexporter-shutdown-server-2.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: bug_fix + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: prometheusexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Fixes a race condition between the exporter start and shutdown functions." + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36139] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [user] \ No newline at end of file diff --git a/exporter/prometheusexporter/prometheus.go b/exporter/prometheusexporter/prometheus.go index 438fb3acdd07..8de4fa367e88 100644 --- a/exporter/prometheusexporter/prometheus.go +++ b/exporter/prometheusexporter/prometheus.go @@ -66,16 +66,12 @@ func (pe *prometheusExporter) Start(ctx context.Context, host component.Host) er mux := http.NewServeMux() mux.Handle("/metrics", pe.handler) srv, err := pe.config.ToServer(ctx, host, pe.settings, mux) - pe.shutdownFunc = func(ctx context.Context) error { - errLn := ln.Close() - if srv == nil { - return errLn - } - errSrv := srv.Shutdown(ctx) - return errors.Join(errLn, errSrv) - } if err != nil { - return err + lnerr := ln.Close() + return errors.Join(err, lnerr) + } + pe.shutdownFunc = func(ctx context.Context) error { + return srv.Shutdown(ctx) } go func() { _ = srv.Serve(ln) From 5618c7cc43bfc3614ab30e5eb0b75cad50be2bb2 Mon Sep 17 00:00:00 2001 From: Paulo Janotti Date: Fri, 8 Nov 2024 13:15:27 -0800 Subject: [PATCH 20/24] [test] Fix opampsupervisor test on Windows (#36282) Fix #36278 The issue is that the temporary path used in the test is passed as text to an yaml file so the Windows dir separator ends up as a escape char on the yaml. --- cmd/opampsupervisor/e2e_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/opampsupervisor/e2e_test.go b/cmd/opampsupervisor/e2e_test.go index c0e080faa1e2..8e106b2b65f6 100644 --- a/cmd/opampsupervisor/e2e_test.go +++ b/cmd/opampsupervisor/e2e_test.go @@ -188,7 +188,7 @@ func getSupervisorConfig(t *testing.T, configType string, extraConfigData map[st "goos": runtime.GOOS, "goarch": runtime.GOARCH, "extension": extension, - "storage_dir": t.TempDir(), + "storage_dir": strings.ReplaceAll(t.TempDir(), "\\", "\\\\"), } for key, val := range extraConfigData { From 2612314ed7652a1379381e5b9068352a4af18fa5 Mon Sep 17 00:00:00 2001 From: Daniel Jaglowski Date: Fri, 8 Nov 2024 22:09:11 -0500 Subject: [PATCH 21/24] [connector/routing] Support metric routing (#36236) --- .chloggen/routing-by-metrics.yaml | 27 +++ connector/routingconnector/README.md | 4 +- connector/routingconnector/config.go | 2 +- connector/routingconnector/config_test.go | 16 ++ .../internal/pmetricutil/metrics.go | 43 ++++ .../internal/pmetricutil/metrics_test.go | 144 ++++++++++++ .../internal/pmetricutiltest/metrics.go | 56 +++++ .../internal/pmetricutiltest/metrics_test.go | 50 +++++ connector/routingconnector/logs_test.go | 32 ++- connector/routingconnector/metrics.go | 10 + connector/routingconnector/metrics_test.go | 205 +++++++++++++++++- connector/routingconnector/router.go | 24 +- 12 files changed, 588 insertions(+), 25 deletions(-) create mode 100644 .chloggen/routing-by-metrics.yaml diff --git a/.chloggen/routing-by-metrics.yaml b/.chloggen/routing-by-metrics.yaml new file mode 100644 index 000000000000..070210e19a55 --- /dev/null +++ b/.chloggen/routing-by-metrics.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: routingconnector + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add ability to route by metric context + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36236] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/connector/routingconnector/README.md b/connector/routingconnector/README.md index 6a45f230fb20..02ad40317832 100644 --- a/connector/routingconnector/README.md +++ b/connector/routingconnector/README.md @@ -33,7 +33,7 @@ If you are not already familiar with connectors, you may find it helpful to firs The following settings are available: - `table (required)`: the routing table for this connector. -- `table.context (optional, default: resource)`: the [OTTL Context] in which the statement will be evaluated. Currently, only `resource`, `log`, and `request` are supported. +- `table.context (optional, default: resource)`: the [OTTL Context] in which the statement will be evaluated. Currently, only `resource`, `metric`, `log`, and `request` are supported. - `table.statement`: the routing condition provided as the [OTTL] statement. Required if `table.condition` is not provided. May not be used for `request` context. - `table.condition`: the routing condition provided as the [OTTL] condition. Required if `table.statement` is not provided. Required for `request` context. - `table.pipelines (required)`: the list of pipelines to use when the routing condition is met. @@ -43,7 +43,7 @@ The following settings are available: ### Limitations -- The `match_once` setting is only supported when using the `resource` context. If any routes use `log` or `request` context, `match_once` must be set to `true`. +- The `match_once` setting is only supported when using the `resource` context. If any routes use `metric`, `log` or `request` context, `match_once` must be set to `true`. - The `request` context requires use of the `condition` setting, and relies on a very limited grammar. Conditions must be in the form of `request["key"] == "value"` or `request["key"] != "value"`. (In the future, this grammar may be expanded to support more complex conditions.) ### Supported [OTTL] functions diff --git a/connector/routingconnector/config.go b/connector/routingconnector/config.go index f526ec460ab9..fb2f838474c7 100644 --- a/connector/routingconnector/config.go +++ b/connector/routingconnector/config.go @@ -77,7 +77,7 @@ func (c *Config) Validate() error { return err } fallthrough - case "log": // ok + case "metric", "log": // ok if !c.MatchOnce { return fmt.Errorf(`%q context is not supported with "match_once: false"`, item.Context) } diff --git a/connector/routingconnector/config_test.go b/connector/routingconnector/config_test.go index 0cd0456ec8af..b79eb4ee1bf3 100644 --- a/connector/routingconnector/config_test.go +++ b/connector/routingconnector/config_test.go @@ -218,6 +218,22 @@ func TestValidateConfig(t *testing.T) { }, error: "invalid context: invalid", }, + { + name: "metric context with match_once false", + config: &Config{ + MatchOnce: false, + Table: []RoutingTableItem{ + { + Context: "metric", + Statement: `route() where attributes["attr"] == "acme"`, + Pipelines: []pipeline.ID{ + pipeline.NewIDWithName(pipeline.SignalTraces, "otlp"), + }, + }, + }, + }, + error: `"metric" context is not supported with "match_once: false"`, + }, { name: "log context with match_once false", config: &Config{ diff --git a/connector/routingconnector/internal/pmetricutil/metrics.go b/connector/routingconnector/internal/pmetricutil/metrics.go index 1ca6d23b1ad7..58199dc02fe8 100644 --- a/connector/routingconnector/internal/pmetricutil/metrics.go +++ b/connector/routingconnector/internal/pmetricutil/metrics.go @@ -16,3 +16,46 @@ func MoveResourcesIf(from, to pmetric.Metrics, f func(pmetric.ResourceMetrics) b return true }) } + +// MoveMetricsWithContextIf calls f sequentially for each Metric present in the first pmetric.Metrics. +// If f returns true, the element is removed from the first pmetric.Metrics and added to the second pmetric.Metrics. +// Notably, the Resource and Scope associated with the Metric are created in the second pmetric.Metrics only once. +// Resources or Scopes are removed from the original if they become empty. All ordering is preserved. +func MoveMetricsWithContextIf(from, to pmetric.Metrics, f func(pmetric.ResourceMetrics, pmetric.ScopeMetrics, pmetric.Metric) bool) { + rms := from.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rm := rms.At(i) + sms := rm.ScopeMetrics() + var rmCopy *pmetric.ResourceMetrics + for j := 0; j < sms.Len(); j++ { + sm := sms.At(j) + ms := sm.Metrics() + var smCopy *pmetric.ScopeMetrics + ms.RemoveIf(func(m pmetric.Metric) bool { + if !f(rm, sm, m) { + return false + } + if rmCopy == nil { + rmc := to.ResourceMetrics().AppendEmpty() + rmCopy = &rmc + rm.Resource().CopyTo(rmCopy.Resource()) + rmCopy.SetSchemaUrl(rm.SchemaUrl()) + } + if smCopy == nil { + smc := rmCopy.ScopeMetrics().AppendEmpty() + smCopy = &smc + sm.Scope().CopyTo(smCopy.Scope()) + smCopy.SetSchemaUrl(sm.SchemaUrl()) + } + m.CopyTo(smCopy.Metrics().AppendEmpty()) + return true + }) + } + sms.RemoveIf(func(sm pmetric.ScopeMetrics) bool { + return sm.Metrics().Len() == 0 + }) + } + rms.RemoveIf(func(rm pmetric.ResourceMetrics) bool { + return rm.ScopeMetrics().Len() == 0 + }) +} diff --git a/connector/routingconnector/internal/pmetricutil/metrics_test.go b/connector/routingconnector/internal/pmetricutil/metrics_test.go index 5b3d751c6826..8c23b4232246 100644 --- a/connector/routingconnector/internal/pmetricutil/metrics_test.go +++ b/connector/routingconnector/internal/pmetricutil/metrics_test.go @@ -80,3 +80,147 @@ func TestMoveResourcesIf(t *testing.T) { }) } } + +func TestMoveMetricsWithContextIf(t *testing.T) { + testCases := []struct { + name string + moveIf func(pmetric.ResourceMetrics, pmetric.ScopeMetrics, pmetric.Metric) bool + from pmetric.Metrics + to pmetric.Metrics + expectFrom pmetric.Metrics + expectTo pmetric.Metrics + }{ + { + name: "move_none", + moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric) bool { + return false + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectTo: pmetric.NewMetrics(), + }, + { + name: "move_all", + moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric) bool { + return true + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetric.NewMetrics(), + expectTo: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + }, + { + name: "move_all_from_one_resource", + moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, _ pmetric.Metric) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceB" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"), + expectTo: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), + }, + { + name: "move_all_from_one_scope", + moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceB" && sl.Scope().Name() == "scopeC" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + pmetricutiltest.WithResource('B', + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + ), + expectTo: pmetricutiltest.NewMetrics("B", "C", "EF", "GH"), + }, + { + name: "move_all_from_one_scope_in_each_resource", + moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric) bool { + return sl.Scope().Name() == "scopeD" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetrics("AB", "C", "EF", "GH"), + expectTo: pmetricutiltest.NewMetrics("AB", "D", "EF", "GH"), + }, + { + name: "move_one", + moveIf: func(rl pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, m pmetric.Metric) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceA" && sl.Scope().Name() == "scopeD" && m.Name() == "metricF" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH")), + ), + pmetricutiltest.WithResource('B', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + ), + expectTo: pmetricutiltest.NewMetrics("A", "D", "F", "GH"), + }, + { + name: "move_one_from_each_scope", + moveIf: func(_ pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric) bool { + return m.Name() == "metricE" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectTo: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + }, + { + name: "move_one_from_each_scope_in_one_resource", + moveIf: func(rl pmetric.ResourceMetrics, _ pmetric.ScopeMetrics, m pmetric.Metric) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceB" && m.Name() == "metricE" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetric.NewMetrics(), + expectFrom: pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + pmetricutiltest.WithResource('B', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('F', "GH")), + ), + ), + expectTo: pmetricutiltest.NewMetrics("B", "CD", "E", "GH"), + }, + { + name: "move_some_to_preexisting", + moveIf: func(_ pmetric.ResourceMetrics, sl pmetric.ScopeMetrics, _ pmetric.Metric) bool { + return sl.Scope().Name() == "scopeD" + }, + from: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + to: pmetricutiltest.NewMetrics("1", "2", "3", "4"), + expectFrom: pmetricutiltest.NewMetrics("AB", "C", "EF", "GH"), + expectTo: pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('1', pmetricutiltest.WithScope('2', pmetricutiltest.WithMetric('3', "4"))), + pmetricutiltest.WithResource('A', pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH"))), + pmetricutiltest.WithResource('B', pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH"))), + ), + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + pmetricutil.MoveMetricsWithContextIf(tt.from, tt.to, tt.moveIf) + assert.NoError(t, pmetrictest.CompareMetrics(tt.expectFrom, tt.from), "from not modified as expected") + assert.NoError(t, pmetrictest.CompareMetrics(tt.expectTo, tt.to), "to not as expected") + }) + } +} diff --git a/connector/routingconnector/internal/pmetricutiltest/metrics.go b/connector/routingconnector/internal/pmetricutiltest/metrics.go index ce8b2cb06d5e..a908e1638e63 100644 --- a/connector/routingconnector/internal/pmetricutiltest/metrics.go +++ b/connector/routingconnector/internal/pmetricutiltest/metrics.go @@ -43,3 +43,59 @@ func NewMetrics(resourceIDs, scopeIDs, metricIDs, dataPointIDs string) pmetric.M } return md } + +type Resource struct { + id byte + scopes []Scope +} + +type Scope struct { + id byte + metrics []Metric +} + +type Metric struct { + id byte + dataPoints string +} + +func WithResource(id byte, scopes ...Scope) Resource { + r := Resource{id: id} + r.scopes = append(r.scopes, scopes...) + return r +} + +func WithScope(id byte, metrics ...Metric) Scope { + s := Scope{id: id} + s.metrics = append(s.metrics, metrics...) + return s +} + +func WithMetric(id byte, dataPoints string) Metric { + return Metric{id: id, dataPoints: dataPoints} +} + +// NewMetricsFromOpts creates a pmetric.Metrics with the specified resources, scopes, metrics, +// and data points. The general idea is the same as NewMetrics, but this function allows for +// more flexibility in creating non-uniform structures. +func NewMetricsFromOpts(resources ...Resource) pmetric.Metrics { + md := pmetric.NewMetrics() + for _, resource := range resources { + r := md.ResourceMetrics().AppendEmpty() + r.Resource().Attributes().PutStr("resourceName", "resource"+string(resource.id)) + for _, scope := range resource.scopes { + s := r.ScopeMetrics().AppendEmpty() + s.Scope().SetName("scope" + string(scope.id)) + for _, metric := range scope.metrics { + m := s.Metrics().AppendEmpty() + m.SetName("metric" + string(metric.id)) + dps := m.SetEmptyGauge().DataPoints() + for i := 0; i < len(metric.dataPoints); i++ { + dp := dps.AppendEmpty() + dp.Attributes().PutStr("dpName", "dp"+string(metric.dataPoints[i])) + } + } + } + } + return md +} diff --git a/connector/routingconnector/internal/pmetricutiltest/metrics_test.go b/connector/routingconnector/internal/pmetricutiltest/metrics_test.go index 069a27f8282c..3be7405a1e14 100644 --- a/connector/routingconnector/internal/pmetricutiltest/metrics_test.go +++ b/connector/routingconnector/internal/pmetricutiltest/metrics_test.go @@ -18,6 +18,7 @@ func TestNewMetrics(t *testing.T) { t.Run("empty", func(t *testing.T) { expected := pmetric.NewMetrics() assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("", "", "", ""))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetricsFromOpts())) }) t.Run("simple", func(t *testing.T) { @@ -34,7 +35,15 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpD") // resourceA.scopeB.metricC.dpD return md }() + fromOpts := pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('B', + pmetricutiltest.WithMetric('C', "D"), + ), + ), + ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "C", "D"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) }) t.Run("two_resources", func(t *testing.T) { @@ -60,7 +69,20 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceB.scopeC.metricD.dpE return md }() + fromOpts := pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('C', + pmetricutiltest.WithMetric('D', "E"), + ), + ), + pmetricutiltest.WithResource('B', + pmetricutiltest.WithScope('C', + pmetricutiltest.WithMetric('D', "E"), + ), + ), + ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("AB", "C", "D", "E"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) }) t.Run("two_scopes", func(t *testing.T) { @@ -84,7 +106,18 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeC.metricD.dpE return md }() + fromOpts := pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('B', + pmetricutiltest.WithMetric('D', "E"), + ), + pmetricutiltest.WithScope('C', + pmetricutiltest.WithMetric('D', "E"), + ), + ), + ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "BC", "D", "E"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) }) t.Run("two_metrics", func(t *testing.T) { @@ -106,7 +139,16 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricD.dpE return md }() + fromOpts := pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('B', + pmetricutiltest.WithMetric('C', "E"), + pmetricutiltest.WithMetric('D', "E"), + ), + ), + ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "CD", "E"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) }) t.Run("two_datapoints", func(t *testing.T) { @@ -125,6 +167,14 @@ func TestNewMetrics(t *testing.T) { dp.Attributes().PutStr("dpName", "dpE") // resourceA.scopeB.metricC.dpE return md }() + fromOpts := pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('B', + pmetricutiltest.WithMetric('C', "DE"), + ), + ), + ) assert.NoError(t, pmetrictest.CompareMetrics(expected, pmetricutiltest.NewMetrics("A", "B", "C", "DE"))) + assert.NoError(t, pmetrictest.CompareMetrics(expected, fromOpts)) }) } diff --git a/connector/routingconnector/logs_test.go b/connector/routingconnector/logs_test.go index 24747154c213..c0198fe16523 100644 --- a/connector/routingconnector/logs_test.go +++ b/connector/routingconnector/logs_test.go @@ -475,22 +475,20 @@ func TestLogsConnectorDetailed(t *testing.T) { isAcme := `request["X-Tenant"] == "acme"` - isAnyResource := `attributes["resourceName"] != nil` isResourceA := `attributes["resourceName"] == "resourceA"` isResourceB := `attributes["resourceName"] == "resourceB"` isResourceX := `attributes["resourceName"] == "resourceX"` isResourceY := `attributes["resourceName"] == "resourceY"` - isScopeC := `instrumentation_scope.name == "scopeC"` - isScopeD := `instrumentation_scope.name == "scopeD"` - - isAnyLog := `body != nil` isLogE := `body == "logE"` isLogF := `body == "logF"` isLogX := `body == "logX"` isLogY := `body == "logY"` - and, or := " and ", " or " + isScopeCFromLowerContext := `instrumentation_scope.name == "scopeC"` + isScopeDFromLowerContext := `instrumentation_scope.name == "scopeD"` + + isResourceBFromLowerContext := `resource.attributes["resourceName"] == "resourceB"` testCases := []struct { name string @@ -594,7 +592,7 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "resource/all_match_first_only", cfg: testConfig( - withRoute("resource", isAnyResource, idSink0), + withRoute("resource", "true", idSink0), withRoute("resource", isResourceY, idSink1), withDefault(idSinkD), ), @@ -607,7 +605,7 @@ func TestLogsConnectorDetailed(t *testing.T) { name: "resource/all_match_last_only", cfg: testConfig( withRoute("resource", isResourceX, idSink0), - withRoute("resource", isAnyResource, idSink1), + withRoute("resource", "true", idSink1), withDefault(idSinkD), ), input: plogutiltest.NewLogs("AB", "CD", "EF"), @@ -618,8 +616,8 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "resource/all_match_only_once", cfg: testConfig( - withRoute("resource", isAnyResource, idSink0), - withRoute("resource", isResourceA+or+isResourceB, idSink1), + withRoute("resource", "true", idSink0), + withRoute("resource", isResourceA+" or "+isResourceB, idSink1), withDefault(idSinkD), ), input: plogutiltest.NewLogs("AB", "CD", "EF"), @@ -688,7 +686,7 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "log/all_match_first_only", cfg: testConfig( - withRoute("log", isAnyLog, idSink0), + withRoute("log", "true", idSink0), withRoute("log", isLogY, idSink1), withDefault(idSinkD), ), @@ -701,7 +699,7 @@ func TestLogsConnectorDetailed(t *testing.T) { name: "log/all_match_last_only", cfg: testConfig( withRoute("log", isLogX, idSink0), - withRoute("log", isAnyLog, idSink1), + withRoute("log", "true", idSink1), withDefault(idSinkD), ), input: plogutiltest.NewLogs("AB", "CD", "EF"), @@ -712,8 +710,8 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "log/all_match_only_once", cfg: testConfig( - withRoute("log", isAnyLog, idSink0), - withRoute("log", isLogE+or+isLogF, idSink1), + withRoute("log", "true", idSink0), + withRoute("log", isLogE+" or "+isLogF, idSink1), withDefault(idSinkD), ), input: plogutiltest.NewLogs("AB", "CD", "EF"), @@ -782,7 +780,7 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "log/with_resource_condition", cfg: testConfig( - withRoute("log", "resource."+isResourceB+and+isAnyLog, idSink0), + withRoute("log", isResourceBFromLowerContext, idSink0), withRoute("log", isLogY, idSink1), withDefault(idSinkD), ), @@ -794,7 +792,7 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "log/with_scope_condition", cfg: testConfig( - withRoute("log", isScopeC+and+isAnyLog, idSink0), + withRoute("log", isScopeCFromLowerContext, idSink0), withRoute("log", isLogY, idSink1), withDefault(idSinkD), ), @@ -806,7 +804,7 @@ func TestLogsConnectorDetailed(t *testing.T) { { name: "log/with_resource_and_scope_conditions", cfg: testConfig( - withRoute("log", "resource."+isResourceB+and+isScopeD+and+isAnyLog, idSink0), + withRoute("log", isResourceBFromLowerContext+" and "+isScopeDFromLowerContext, idSink0), withRoute("log", isLogY, idSink1), withDefault(idSinkD), ), diff --git a/connector/routingconnector/metrics.go b/connector/routingconnector/metrics.go index 8f25c586bf71..874d8c2d9887 100644 --- a/connector/routingconnector/metrics.go +++ b/connector/routingconnector/metrics.go @@ -15,6 +15,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector/internal/pmetricutil" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" ) @@ -88,6 +89,15 @@ func (c *metricsConnector) switchMetrics(ctx context.Context, md pmetric.Metrics return isMatch }, ) + case "metric": + pmetricutil.MoveMetricsWithContextIf(md, matchedMetrics, + func(rm pmetric.ResourceMetrics, sm pmetric.ScopeMetrics, m pmetric.Metric) bool { + mtx := ottlmetric.NewTransformContext(m, sm.Metrics(), sm.Scope(), rm.Resource(), sm, rm) + _, isMatch, err := route.metricStatement.Execute(ctx, mtx) + errs = errors.Join(errs, err) + return isMatch + }, + ) } if errs != nil { if c.config.ErrorMode == ottl.PropagateError { diff --git a/connector/routingconnector/metrics_test.go b/connector/routingconnector/metrics_test.go index 0fba4eabc748..f87a15ff613c 100644 --- a/connector/routingconnector/metrics_test.go +++ b/connector/routingconnector/metrics_test.go @@ -505,12 +505,21 @@ func TestMetricsConnectorDetailed(t *testing.T) { isAcme := `request["X-Tenant"] == "acme"` - isAnyResource := `attributes["resourceName"] != nil` isResourceA := `attributes["resourceName"] == "resourceA"` isResourceB := `attributes["resourceName"] == "resourceB"` isResourceX := `attributes["resourceName"] == "resourceX"` isResourceY := `attributes["resourceName"] == "resourceY"` + isMetricE := `name == "metricE"` + isMetricF := `name == "metricF"` + isMetricX := `name == "metricX"` + isMetricY := `name == "metricY"` + + isScopeCFromLowerContext := `instrumentation_scope.name == "scopeC"` + isScopeDFromLowerContext := `instrumentation_scope.name == "scopeD"` + + isResourceBFromLowerContext := `resource.attributes["resourceName"] == "resourceB"` + testCases := []struct { name string cfg *Config @@ -613,7 +622,7 @@ func TestMetricsConnectorDetailed(t *testing.T) { { name: "resource/all_match_first_only", cfg: testConfig( - withRoute("resource", isAnyResource, idSink0), + withRoute("resource", "true", idSink0), withRoute("resource", isResourceY, idSink1), withDefault(idSinkD), ), @@ -626,7 +635,7 @@ func TestMetricsConnectorDetailed(t *testing.T) { name: "resource/all_match_last_only", cfg: testConfig( withRoute("resource", isResourceX, idSink0), - withRoute("resource", isAnyResource, idSink1), + withRoute("resource", "true", idSink1), withDefault(idSinkD), ), input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), @@ -637,7 +646,7 @@ func TestMetricsConnectorDetailed(t *testing.T) { { name: "resource/all_match_only_once", cfg: testConfig( - withRoute("resource", isAnyResource, idSink0), + withRoute("resource", "true", idSink0), withRoute("resource", isResourceA+" or "+isResourceB, idSink1), withDefault(idSinkD), ), @@ -704,6 +713,168 @@ func TestMetricsConnectorDetailed(t *testing.T) { expectSink1: pmetric.Metrics{}, expectSinkD: pmetric.Metrics{}, }, + { + name: "metric/all_match_first_only", + cfg: testConfig( + withRoute("metric", "true", idSink0), + withRoute("metric", isMetricY, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/all_match_last_only", + cfg: testConfig( + withRoute("metric", isMetricX, idSink0), + withRoute("metric", "true", idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/all_match_only_once", + cfg: testConfig( + withRoute("metric", "true", idSink0), + withRoute("metric", isMetricE+" or "+isMetricF, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/each_matches_one", + cfg: testConfig( + withRoute("metric", isMetricE, idSink0), + withRoute("metric", isMetricF, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/some_match_with_default", + cfg: testConfig( + withRoute("metric", isMetricX, idSink0), + withRoute("metric", isMetricF, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + }, + { + name: "metric/some_match_without_default", + cfg: testConfig( + withRoute("metric", isMetricX, idSink0), + withRoute("metric", isMetricF, idSink1), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/match_none_with_default", + cfg: testConfig( + withRoute("metric", isMetricX, idSink0), + withRoute("metric", isMetricY, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + }, + { + name: "metric/match_none_without_default", + cfg: testConfig( + withRoute("metric", isMetricX, idSink0), + withRoute("metric", isMetricY, idSink1), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetric.Metrics{}, + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetric.Metrics{}, + }, + { + name: "metric/with_resource_condition", + cfg: testConfig( + withRoute("metric", isResourceBFromLowerContext, idSink0), + withRoute("metric", isMetricY, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"), + }, + { + name: "metric/with_scope_condition", + cfg: testConfig( + withRoute("metric", isScopeCFromLowerContext, idSink0), + withRoute("metric", isMetricY, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "C", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetricutiltest.NewMetrics("AB", "D", "EF", "GH"), + }, + { + name: "metric/with_resource_and_scope_conditions", + cfg: testConfig( + withRoute("metric", isResourceBFromLowerContext+" and "+isScopeDFromLowerContext, idSink0), + withRoute("metric", isMetricY, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("B", "D", "EF", "GH"), + expectSink1: pmetric.Metrics{}, + expectSinkD: pmetricutiltest.NewMetricsFromOpts( + pmetricutiltest.WithResource('A', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + pmetricutiltest.WithScope('D', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + pmetricutiltest.WithResource('B', + pmetricutiltest.WithScope('C', pmetricutiltest.WithMetric('E', "GH"), pmetricutiltest.WithMetric('F', "GH")), + ), + ), + }, + { + name: "mixed/match_resource_then_metrics", + cfg: testConfig( + withRoute("resource", isResourceA, idSink0), + withRoute("metric", isMetricE, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("A", "CD", "EF", "GH"), + expectSink1: pmetricutiltest.NewMetrics("B", "CD", "E", "GH"), + expectSinkD: pmetricutiltest.NewMetrics("B", "CD", "F", "GH"), + }, + { + name: "mixed/match_metrics_then_resource", + cfg: testConfig( + withRoute("metric", isMetricE, idSink0), + withRoute("resource", isResourceB, idSink1), + withDefault(idSinkD), + ), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + expectSink1: pmetricutiltest.NewMetrics("B", "CD", "F", "GH"), + expectSinkD: pmetricutiltest.NewMetrics("A", "CD", "F", "GH"), + }, { name: "mixed/match_resource_then_grpc_request", cfg: testConfig( @@ -717,6 +888,19 @@ func TestMetricsConnectorDetailed(t *testing.T) { expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), expectSinkD: pmetric.Metrics{}, }, + { + name: "mixed/match_metrics_then_grpc_request", + cfg: testConfig( + withRoute("metric", isMetricF, idSink0), + withRoute("request", isAcme, idSink1), + withDefault(idSinkD), + ), + ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "acme"}), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + expectSinkD: pmetric.Metrics{}, + }, { name: "mixed/match_resource_then_http_request", cfg: testConfig( @@ -730,6 +914,19 @@ func TestMetricsConnectorDetailed(t *testing.T) { expectSink1: pmetricutiltest.NewMetrics("B", "CD", "EF", "GH"), expectSinkD: pmetric.Metrics{}, }, + { + name: "mixed/match_metrics_then_http_request", + cfg: testConfig( + withRoute("metric", isMetricF, idSink0), + withRoute("request", isAcme, idSink1), + withDefault(idSinkD), + ), + ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"acme"}}), + input: pmetricutiltest.NewMetrics("AB", "CD", "EF", "GH"), + expectSink0: pmetricutiltest.NewMetrics("AB", "CD", "F", "GH"), + expectSink1: pmetricutiltest.NewMetrics("AB", "CD", "E", "GH"), + expectSinkD: pmetric.Metrics{}, + }, } for _, tt := range testCases { diff --git a/connector/routingconnector/router.go b/connector/routingconnector/router.go index 9114695bab67..01dd13143261 100644 --- a/connector/routingconnector/router.go +++ b/connector/routingconnector/router.go @@ -15,6 +15,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector/internal/common" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" ) @@ -31,6 +32,7 @@ type consumerProvider[C any] func(...pipeline.ID) (C, error) type router[C any] struct { logger *zap.Logger resourceParser ottl.Parser[ottlresource.TransformContext] + metricParser ottl.Parser[ottlmetric.TransformContext] logParser ottl.Parser[ottllog.TransformContext] table []RoutingTableItem @@ -72,15 +74,18 @@ type routingItem[C any] struct { statementContext string requestCondition *requestCondition resourceStatement *ottl.Statement[ottlresource.TransformContext] + metricStatement *ottl.Statement[ottlmetric.TransformContext] logStatement *ottl.Statement[ottllog.TransformContext] } func (r *router[C]) buildParsers(table []RoutingTableItem, settings component.TelemetrySettings) error { - var buildResource, buildLog bool + var buildResource, buildMetric, buildLog bool for _, item := range table { switch item.Context { case "", "resource": buildResource = true + case "metric": + buildMetric = true case "log": buildLog = true } @@ -98,6 +103,17 @@ func (r *router[C]) buildParsers(table []RoutingTableItem, settings component.Te errs = errors.Join(errs, err) } } + if buildMetric { + parser, err := ottlmetric.NewParser( + common.Functions[ottlmetric.TransformContext](), + settings, + ) + if err == nil { + r.metricParser = parser + } else { + errs = errors.Join(errs, err) + } + } if buildLog { parser, err := ottllog.NewParser( common.Functions[ottllog.TransformContext](), @@ -174,6 +190,12 @@ func (r *router[C]) registerRouteConsumers() (err error) { return err } route.resourceStatement = statement + case "metric": + statement, err := r.metricParser.ParseStatement(item.Statement) + if err != nil { + return err + } + route.metricStatement = statement case "log": statement, err := r.logParser.ParseStatement(item.Statement) if err != nil { From b0e89cd525dbfb30278468d0aa344299d9b84bd4 Mon Sep 17 00:00:00 2001 From: tbeemster Date: Mon, 11 Nov 2024 00:52:30 +0100 Subject: [PATCH 22/24] =?UTF-8?q?[connector/spanmetrics]=20CHANGELOG=20is?= =?UTF-8?q?=20fixed=20to=20reflect=20the=20correct=20fea=E2=80=A6=20(#3585?= =?UTF-8?q?2)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit #### Description The wrong feature gate was exposed in the CHANGELOG, this has been corrected to the correct feature gate 'connector.spanmetrics.legacyMetricNames' #### Link to tracking issue Fixes #35766 #### Documentation CHANGELOG.md is updated to have the correct feature gate name. --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 225fe8e15ace..8e9d1664c999 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -368,7 +368,7 @@ arrow.waiter_limit -> admission.waiter_limit https://github.com/open-telemetry/opentelemetry-collector/pull/6250 - `spanmetricsconnector`: Improve consistency between metrics generated by spanmetricsconnector. Added traces.span.metrics as default namespace (#33227, #32818) - Default namespace for the generated metrics is traces.span.metrics now. | The deprecated metrics are: calls, duration and events. | The feature flag connector.spanmetrics.legacyLatencyMetricNames was added to revert the behavior. + Default namespace for the generated metrics is traces.span.metrics now. | The deprecated metrics are: calls, duration and events. | The feature flag connector.spanmetrics.legacyMetricNames was added to revert the behavior. - `servicegraphconnector`: Fix histogram metrics miss unit (#34511) All metrics will remove the suffix `_seconds`. It will not introduce breaking change if users use | `prometheusexporter` or `prometheusremotewriteexporter` to exporter metrics in pipeline. | In some cases, like using `clickhouseexporter`(save data in native OTLP format), it will be a breaking change. | Users can use `transformprocessor` to add back this suffix. - `gitproviderreceiver`: The Git Provider Receiver has been renamed to GitHub Receiver. (#34731) From 4b322db0d8c87ae1e01d347a46aca74266306c6b Mon Sep 17 00:00:00 2001 From: Narcis Gemene <7252787+narcis96@users.noreply.github.com> Date: Sun, 10 Nov 2024 23:52:45 +0000 Subject: [PATCH 23/24] awsxrayreceiver: migrate to newer semconv version (#35600) Description: The version of semconv is upgraded from v1.6.1 to v1.18.0 This is a trivial upgrade. The semconv attributes' value have been compared using [go-otel-semconv-comparator](https://github.com/narcis96/go-otel-semconv-comparator). All attributes used by this component have the same value in both versions. Link to tracking Issue: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/22095 Testing: Tests passed --- receiver/awsxrayreceiver/internal/translator/aws.go | 2 +- receiver/awsxrayreceiver/internal/translator/aws_test.go | 2 +- receiver/awsxrayreceiver/internal/translator/cause.go | 2 +- receiver/awsxrayreceiver/internal/translator/http.go | 2 +- receiver/awsxrayreceiver/internal/translator/sdk.go | 2 +- receiver/awsxrayreceiver/internal/translator/sql.go | 2 +- receiver/awsxrayreceiver/internal/translator/translator.go | 2 +- receiver/awsxrayreceiver/internal/translator/translator_test.go | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/receiver/awsxrayreceiver/internal/translator/aws.go b/receiver/awsxrayreceiver/internal/translator/aws.go index c7b65d765d6a..d77cb5185d22 100644 --- a/receiver/awsxrayreceiver/internal/translator/aws.go +++ b/receiver/awsxrayreceiver/internal/translator/aws.go @@ -7,7 +7,7 @@ import ( "strconv" "go.opentelemetry.io/collector/pdata/pcommon" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) diff --git a/receiver/awsxrayreceiver/internal/translator/aws_test.go b/receiver/awsxrayreceiver/internal/translator/aws_test.go index f72b83012329..593b24738610 100644 --- a/receiver/awsxrayreceiver/internal/translator/aws_test.go +++ b/receiver/awsxrayreceiver/internal/translator/aws_test.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) diff --git a/receiver/awsxrayreceiver/internal/translator/cause.go b/receiver/awsxrayreceiver/internal/translator/cause.go index de8154452181..99eed7d80afe 100644 --- a/receiver/awsxrayreceiver/internal/translator/cause.go +++ b/receiver/awsxrayreceiver/internal/translator/cause.go @@ -8,7 +8,7 @@ import ( "strings" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) diff --git a/receiver/awsxrayreceiver/internal/translator/http.go b/receiver/awsxrayreceiver/internal/translator/http.go index bac348352e62..79a66b394cef 100644 --- a/receiver/awsxrayreceiver/internal/translator/http.go +++ b/receiver/awsxrayreceiver/internal/translator/http.go @@ -5,7 +5,7 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector- import ( "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal/tracetranslator" diff --git a/receiver/awsxrayreceiver/internal/translator/sdk.go b/receiver/awsxrayreceiver/internal/translator/sdk.go index 97cdc53d10f2..8d55e3dab457 100644 --- a/receiver/awsxrayreceiver/internal/translator/sdk.go +++ b/receiver/awsxrayreceiver/internal/translator/sdk.go @@ -7,7 +7,7 @@ import ( "strings" "go.opentelemetry.io/collector/pdata/pcommon" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) diff --git a/receiver/awsxrayreceiver/internal/translator/sql.go b/receiver/awsxrayreceiver/internal/translator/sql.go index 7659fd7e45a8..6a88511d6fdd 100644 --- a/receiver/awsxrayreceiver/internal/translator/sql.go +++ b/receiver/awsxrayreceiver/internal/translator/sql.go @@ -8,7 +8,7 @@ import ( "regexp" "go.opentelemetry.io/collector/pdata/pcommon" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" ) diff --git a/receiver/awsxrayreceiver/internal/translator/translator.go b/receiver/awsxrayreceiver/internal/translator/translator.go index e3a05d40d560..331195a5fe8c 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator.go +++ b/receiver/awsxrayreceiver/internal/translator/translator.go @@ -10,7 +10,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray/telemetry" diff --git a/receiver/awsxrayreceiver/internal/translator/translator_test.go b/receiver/awsxrayreceiver/internal/translator/translator_test.go index c0f32a852a79..68231cd57d8e 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator_test.go +++ b/receiver/awsxrayreceiver/internal/translator/translator_test.go @@ -16,7 +16,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" - conventions "go.opentelemetry.io/collector/semconv/v1.6.1" + conventions "go.opentelemetry.io/collector/semconv/v1.18.0" awsxray "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/xray/telemetry" From 3e42ac889bdf82259e2e9015324f610b912a7a48 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Mon, 11 Nov 2024 08:15:08 +0100 Subject: [PATCH 24/24] [chore]: enable whitespace linter for receivers (#36159) #### Description [whitespace](https://golangci-lint.run/usage/linters/#whitespace) is a linter that checks for unnecessary newlines at the start and end of functions. Signed-off-by: Matthieu MOREL --- .../internal/awsecscontainermetrics/accumulator.go | 1 - receiver/k8sobjectsreceiver/config.go | 2 -- receiver/k8sobjectsreceiver/mock_dynamic_client_test.go | 1 - receiver/k8sobjectsreceiver/receiver.go | 2 -- receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go | 3 --- receiver/kafkametricsreceiver/broker_scraper.go | 1 - receiver/kafkareceiver/header_extraction_test.go | 3 --- receiver/kubeletstatsreceiver/config_test.go | 1 - receiver/kubeletstatsreceiver/internal/kubelet/metadata.go | 1 - .../kubeletstatsreceiver/internal/kubelet/metadata_test.go | 1 - receiver/kubeletstatsreceiver/scraper_test.go | 1 - receiver/lokireceiver/factory.go | 1 - receiver/lokireceiver/loki_test.go | 1 - receiver/mongodbatlasreceiver/alerts.go | 1 - receiver/mongodbatlasreceiver/alerts_test.go | 1 - receiver/mongodbatlasreceiver/events.go | 1 - receiver/mongodbatlasreceiver/factory.go | 1 - .../mongodbatlasreceiver/internal/mongodb_atlas_client.go | 2 -- receiver/mongodbatlasreceiver/logs_test.go | 1 - receiver/mongodbatlasreceiver/receiver.go | 1 - receiver/mongodbreceiver/client_test.go | 2 -- receiver/mysqlreceiver/scraper_test.go | 2 -- receiver/opencensusreceiver/internal/octrace/opencensus.go | 1 - receiver/opencensusreceiver/opencensus.go | 1 - receiver/opencensusreceiver/opencensus_test.go | 3 --- receiver/oracledbreceiver/config.go | 1 - receiver/oracledbreceiver/scraper_test.go | 2 -- receiver/otelarrowreceiver/config_test.go | 1 - receiver/otelarrowreceiver/internal/arrow/arrow.go | 4 ---- receiver/otelarrowreceiver/internal/arrow/arrow_test.go | 1 - receiver/podmanreceiver/libpod_client_test.go | 2 -- receiver/podmanreceiver/podman.go | 2 -- receiver/prometheusreceiver/config.go | 1 - receiver/prometheusreceiver/internal/transaction_test.go | 2 -- receiver/prometheusreceiver/metrics_receiver_helper_test.go | 1 - receiver/prometheusreceiver/metrics_receiver_labels_test.go | 1 - .../prometheusreceiver/metrics_receiver_open_metrics_test.go | 3 --- .../prometheusreceiver/metrics_reciever_metric_rename_test.go | 3 --- receiver/prometheusreceiver/targetallocator/config.go | 1 - receiver/pulsarreceiver/factory.go | 1 - receiver/rabbitmqreceiver/config_test.go | 1 - receiver/rabbitmqreceiver/scraper.go | 1 - receiver/rabbitmqreceiver/scraper_test.go | 1 - receiver/receivercreator/observerhandler_test.go | 1 - receiver/riakreceiver/config_test.go | 1 - receiver/saphanareceiver/config_test.go | 1 - receiver/signalfxreceiver/receiver.go | 1 - receiver/skywalkingreceiver/factory.go | 2 -- receiver/skywalkingreceiver/factory_test.go | 1 - .../internal/metrics/metric_report_service.go | 1 - .../internal/metrics/skywalkingproto_to_metrics.go | 2 -- receiver/skywalkingreceiver/skywalking_receiver_test.go | 2 -- receiver/snmpreceiver/config.go | 2 -- receiver/snmpreceiver/scraper_test.go | 2 -- receiver/solacereceiver/messaging_service.go | 1 - receiver/solacereceiver/receiver.go | 2 -- receiver/solacereceiver/unmarshaller_move.go | 1 - receiver/splunkenterprisereceiver/scraper.go | 2 -- receiver/splunkhecreceiver/receiver.go | 1 - receiver/splunkhecreceiver/receiver_test.go | 2 -- receiver/splunkhecreceiver/splunk_to_logdata.go | 1 - receiver/splunkhecreceiver/splunk_to_logdata_test.go | 1 - receiver/splunkhecreceiver/splunkhec_to_metricdata.go | 1 - receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go | 1 - receiver/sqlqueryreceiver/logs_receiver.go | 2 -- receiver/sqlserverreceiver/factory.go | 1 - receiver/sqlserverreceiver/queries_test.go | 1 - receiver/sqlserverreceiver/scraper.go | 1 - receiver/sqlserverreceiver/scraper_test.go | 1 - receiver/sshcheckreceiver/factory.go | 1 - receiver/sshcheckreceiver/scraper_test.go | 1 - receiver/statsdreceiver/config.go | 1 - .../internal/protocol/metric_translator_test.go | 2 -- .../statsdreceiver/internal/protocol/statsd_parser_test.go | 3 --- receiver/statsdreceiver/receiver.go | 1 - receiver/tlscheckreceiver/config_test.go | 1 - receiver/vcenterreceiver/config_test.go | 1 - receiver/vcenterreceiver/metrics.go | 3 --- receiver/webhookeventreceiver/req_to_log.go | 1 - receiver/zipkinreceiver/trace_receiver.go | 1 - receiver/zookeeperreceiver/metrics.go | 1 - 81 files changed, 117 deletions(-) diff --git a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go index a8da66e22996..898f77757d1e 100644 --- a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go +++ b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/accumulator.go @@ -38,7 +38,6 @@ func (acc *metricDataAccumulator) getMetricsData(containerStatsMap map[string]*C acc.accumulate(convertToOTLPMetrics(containerPrefix, containerMetrics, containerResource, timestamp)) aggregateTaskMetrics(&taskMetrics, containerMetrics) } else if containerMetadata.FinishedAt != "" && containerMetadata.StartedAt != "" { - duration, err := calculateDuration(containerMetadata.StartedAt, containerMetadata.FinishedAt) if err != nil { diff --git a/receiver/k8sobjectsreceiver/config.go b/receiver/k8sobjectsreceiver/config.go index d341edc555d3..62c303748e35 100644 --- a/receiver/k8sobjectsreceiver/config.go +++ b/receiver/k8sobjectsreceiver/config.go @@ -57,7 +57,6 @@ type Config struct { } func (c *Config) Validate() error { - validObjects, err := c.getValidObjects() if err != nil { return err @@ -149,7 +148,6 @@ func (c *Config) getValidObjects() (map[string][]*schema.GroupVersionResource, e Resource: resource.Name, }) } - } return validObjects, nil } diff --git a/receiver/k8sobjectsreceiver/mock_dynamic_client_test.go b/receiver/k8sobjectsreceiver/mock_dynamic_client_test.go index 267559d0aedc..a075baa8a297 100644 --- a/receiver/k8sobjectsreceiver/mock_dynamic_client_test.go +++ b/receiver/k8sobjectsreceiver/mock_dynamic_client_test.go @@ -30,7 +30,6 @@ func newMockDynamicClient() mockDynamicClient { return mockDynamicClient{ client: fakeClient, } - } func (c mockDynamicClient) getMockDynamicClient() (dynamic.Interface, error) { diff --git a/receiver/k8sobjectsreceiver/receiver.go b/receiver/k8sobjectsreceiver/receiver.go index f10754c4f628..d69df14a19e6 100644 --- a/receiver/k8sobjectsreceiver/receiver.go +++ b/receiver/k8sobjectsreceiver/receiver.go @@ -154,9 +154,7 @@ func (kr *k8sobjectsreceiver) startPull(ctx context.Context, config *K8sObjectsC case <-stopperChan: return } - } - } func (kr *k8sobjectsreceiver) startWatch(ctx context.Context, config *K8sObjectsConfig, resource dynamic.ResourceInterface) { diff --git a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go index 8315452a260b..da71680ba5b1 100644 --- a/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go +++ b/receiver/k8sobjectsreceiver/unstructured_to_logdata_test.go @@ -89,7 +89,6 @@ func TestUnstructuredListToLogData(t *testing.T) { assert.False(t, ok) assert.Equal(t, 1, rl.ScopeLogs().Len()) assert.Equal(t, 3, logRecords.Len()) - }) t.Run("Test event.name in watch events", func(t *testing.T) { @@ -129,7 +128,6 @@ func TestUnstructuredListToLogData(t *testing.T) { eventName, ok := attrs.Get("event.name") require.True(t, ok) assert.EqualValues(t, "generic-name", eventName.AsRaw()) - }) t.Run("Test event observed timestamp is present", func(t *testing.T) { @@ -168,5 +166,4 @@ func TestUnstructuredListToLogData(t *testing.T) { assert.Positive(t, logRecords.At(0).ObservedTimestamp().AsTime().Unix()) assert.Equal(t, logRecords.At(0).ObservedTimestamp().AsTime().Unix(), observedAt.Unix()) }) - } diff --git a/receiver/kafkametricsreceiver/broker_scraper.go b/receiver/kafkametricsreceiver/broker_scraper.go index 6aed30726d2d..07aea6821634 100644 --- a/receiver/kafkametricsreceiver/broker_scraper.go +++ b/receiver/kafkametricsreceiver/broker_scraper.go @@ -47,7 +47,6 @@ func (s *brokerScraper) shutdown(context.Context) error { } func (s *brokerScraper) scrape(context.Context) (pmetric.Metrics, error) { - var scrapeErrors = scrapererror.ScrapeErrors{} if s.client == nil { diff --git a/receiver/kafkareceiver/header_extraction_test.go b/receiver/kafkareceiver/header_extraction_test.go index c2dacfff103f..76f72b8595ce 100644 --- a/receiver/kafkareceiver/header_extraction_test.go +++ b/receiver/kafkareceiver/header_extraction_test.go @@ -85,7 +85,6 @@ func TestHeaderExtractionTraces(t *testing.T) { } cancelFunc() wg.Wait() - } func TestHeaderExtractionLogs(t *testing.T) { @@ -147,7 +146,6 @@ func TestHeaderExtractionLogs(t *testing.T) { } cancelFunc() wg.Wait() - } func TestHeaderExtractionMetrics(t *testing.T) { @@ -210,7 +208,6 @@ func TestHeaderExtractionMetrics(t *testing.T) { } cancelFunc() wg.Wait() - } func validateHeader(t *testing.T, rs pcommon.Resource, headerKey string, headerValue string) { diff --git a/receiver/kubeletstatsreceiver/config_test.go b/receiver/kubeletstatsreceiver/config_test.go index f3baf8a2fb8f..03290d362cd5 100644 --- a/receiver/kubeletstatsreceiver/config_test.go +++ b/receiver/kubeletstatsreceiver/config_test.go @@ -303,7 +303,6 @@ func TestLoadConfig(t *testing.T) { assert.NoError(t, err) assert.Equal(t, tt.expected, cfg) } - }) } } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go index 954a01f03cd3..55f539044a70 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go @@ -207,7 +207,6 @@ func (m *Metadata) getContainerID(podUID string, containerName string) (string, return stripContainerID(containerStatus.ContainerID), nil } } - } } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go index 7ba5e4eabd44..fa35e320c2d0 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata_test.go @@ -390,7 +390,6 @@ func TestSetExtraLabelsForVolumeTypes(t *testing.T) { // Test happy paths for volume type metadata. func TestCpuAndMemoryGetters(t *testing.T) { - tests := []struct { name string metadata Metadata diff --git a/receiver/kubeletstatsreceiver/scraper_test.go b/receiver/kubeletstatsreceiver/scraper_test.go index 649766adff75..e089046240e6 100644 --- a/receiver/kubeletstatsreceiver/scraper_test.go +++ b/receiver/kubeletstatsreceiver/scraper_test.go @@ -303,7 +303,6 @@ func TestScraperWithMetadata(t *testing.T) { pmetrictest.IgnoreMetricDataPointsOrder(), pmetrictest.IgnoreTimestamp(), pmetrictest.IgnoreMetricsOrder())) - }) } } diff --git a/receiver/lokireceiver/factory.go b/receiver/lokireceiver/factory.go index 08e0c9aa0539..8faa7d0d128c 100644 --- a/receiver/lokireceiver/factory.go +++ b/receiver/lokireceiver/factory.go @@ -56,7 +56,6 @@ func createLogsReceiver( cfg component.Config, consumer consumer.Logs, ) (receiver.Logs, error) { - rCfg := cfg.(*Config) return newLokiReceiver(rCfg, consumer, settings) } diff --git a/receiver/lokireceiver/loki_test.go b/receiver/lokireceiver/loki_test.go index 00087b11d26c..85e0af28499a 100644 --- a/receiver/lokireceiver/loki_test.go +++ b/receiver/lokireceiver/loki_test.go @@ -365,7 +365,6 @@ func TestSendingPushRequestToGRPCEndpoint(t *testing.T) { } func TestExpectedStatus(t *testing.T) { - testcases := []struct { name string err error diff --git a/receiver/mongodbatlasreceiver/alerts.go b/receiver/mongodbatlasreceiver/alerts.go index 982970214537..2be154637407 100644 --- a/receiver/mongodbatlasreceiver/alerts.go +++ b/receiver/mongodbatlasreceiver/alerts.go @@ -498,7 +498,6 @@ func payloadToLogs(now time.Time, payload []byte) (plog.Logs, error) { attrs.PutStr("net.peer.name", host) attrs.PutInt("net.peer.port", port) - } return logs, nil diff --git a/receiver/mongodbatlasreceiver/alerts_test.go b/receiver/mongodbatlasreceiver/alerts_test.go index bb65b257d603..77e176e71a0a 100644 --- a/receiver/mongodbatlasreceiver/alerts_test.go +++ b/receiver/mongodbatlasreceiver/alerts_test.go @@ -243,7 +243,6 @@ func TestVerifyHMACSignature(t *testing.T) { } else { require.NoError(t, err) } - }) } } diff --git a/receiver/mongodbatlasreceiver/events.go b/receiver/mongodbatlasreceiver/events.go index 2dd9787ab02a..7b9ed58557ce 100644 --- a/receiver/mongodbatlasreceiver/events.go +++ b/receiver/mongodbatlasreceiver/events.go @@ -239,7 +239,6 @@ func (er *eventsReceiver) transformOrgEvents(now pcommon.Timestamp, events []*mo func (er *eventsReceiver) transformEvents(now pcommon.Timestamp, events []*mongodbatlas.Event, resourceLogs *plog.ResourceLogs) { for _, event := range events { - logRecord := resourceLogs.ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() bodyBytes, err := json.Marshal(event) if err != nil { diff --git a/receiver/mongodbatlasreceiver/factory.go b/receiver/mongodbatlasreceiver/factory.go index 899d8b519ba6..0f094a53e283 100644 --- a/receiver/mongodbatlasreceiver/factory.go +++ b/receiver/mongodbatlasreceiver/factory.go @@ -31,7 +31,6 @@ func NewFactory() receiver.Factory { createDefaultConfig, receiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability), receiver.WithLogs(createCombinedLogReceiver, metadata.LogsStability)) - } func createMetricsReceiver( diff --git a/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go b/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go index 8ba8f2acbb5a..3f1e1bbcab2d 100644 --- a/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go +++ b/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go @@ -217,7 +217,6 @@ func (s *MongoDBAtlasClient) GetOrganization(ctx context.Context, orgID string) return nil, fmt.Errorf("error retrieving project page: %w", err) } return org, nil - } // Projects returns a list of projects accessible within the provided organization @@ -719,7 +718,6 @@ type GetAccessLogsOptions struct { // GetAccessLogs returns the access logs specified for the cluster requested func (s *MongoDBAtlasClient) GetAccessLogs(ctx context.Context, groupID string, clusterName string, opts *GetAccessLogsOptions) (ret []*mongodbatlas.AccessLogs, err error) { - options := mongodbatlas.AccessLogOptions{ // Earliest Timestamp in epoch milliseconds from when Atlas should access log results Start: fmt.Sprintf("%d", opts.MinDate.UTC().UnixMilli()), diff --git a/receiver/mongodbatlasreceiver/logs_test.go b/receiver/mongodbatlasreceiver/logs_test.go index 7a7c721715e9..03cc74f40132 100644 --- a/receiver/mongodbatlasreceiver/logs_test.go +++ b/receiver/mongodbatlasreceiver/logs_test.go @@ -41,7 +41,6 @@ func TestFilterClusters(t *testing.T) { ic, err := filterClusters(clusters, includeProject) require.NoError(t, err) require.Equal(t, []mongodbatlas.Cluster{{Name: "cluster1", ID: "1"}, {Name: "cluster3", ID: "3"}}, ic) - } func TestDefaultLoggingConfig(t *testing.T) { diff --git a/receiver/mongodbatlasreceiver/receiver.go b/receiver/mongodbatlasreceiver/receiver.go index bd97b1edad15..67ffe30f503d 100644 --- a/receiver/mongodbatlasreceiver/receiver.go +++ b/receiver/mongodbatlasreceiver/receiver.go @@ -211,7 +211,6 @@ func (s *mongodbatlasreceiver) getNodeClusterNameMap( // Remove the port from the node n, _, _ := strings.Cut(node, ":") clusterMap[n] = cluster.Name - } providerMap[cluster.Name] = providerValues{ diff --git a/receiver/mongodbreceiver/client_test.go b/receiver/mongodbreceiver/client_test.go index f6ed99ebfcc2..b4657ef87fc3 100644 --- a/receiver/mongodbreceiver/client_test.go +++ b/receiver/mongodbreceiver/client_test.go @@ -92,7 +92,6 @@ func TestListDatabaseNames(t *testing.T) { require.NoError(t, err) require.Equal(t, "admin", dbNames[0]) }) - } type commandString = string @@ -232,7 +231,6 @@ func TestGetVersionFailures(t *testing.T) { require.ErrorContains(t, err, tc.partialError) }) } - } func loadDBStats() (bson.D, error) { diff --git a/receiver/mysqlreceiver/scraper_test.go b/receiver/mysqlreceiver/scraper_test.go index 8753752e948a..f2a1ef0ce5ba 100644 --- a/receiver/mysqlreceiver/scraper_test.go +++ b/receiver/mysqlreceiver/scraper_test.go @@ -123,7 +123,6 @@ func TestScrape(t *testing.T) { // and the other failure comes from a row that fails to parse as a number require.Equal(t, 5, partialError.Failed, "Expected partial error count to be 5") }) - } var _ client = (*mockClient)(nil) @@ -194,7 +193,6 @@ func (c *mockClient) getTableStats() ([]TableStats, error) { stats = append(stats, s) } return stats, nil - } func (c *mockClient) getTableIoWaitsStats() ([]TableIoWaitsStats, error) { diff --git a/receiver/opencensusreceiver/internal/octrace/opencensus.go b/receiver/opencensusreceiver/internal/octrace/opencensus.go index 68190af59528..9977d9fa6700 100644 --- a/receiver/opencensusreceiver/internal/octrace/opencensus.go +++ b/receiver/opencensusreceiver/internal/octrace/opencensus.go @@ -33,7 +33,6 @@ type Receiver struct { // New creates a new opencensus.Receiver reference. func New(nextConsumer consumer.Traces, set receiver.Settings) (*Receiver, error) { - obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: set.ID, Transport: receiverTransport, diff --git a/receiver/opencensusreceiver/opencensus.go b/receiver/opencensusreceiver/opencensus.go index 3ea3c2f4a2bd..ee2de0d033af 100644 --- a/receiver/opencensusreceiver/opencensus.go +++ b/receiver/opencensusreceiver/opencensus.go @@ -189,7 +189,6 @@ func (ocr *ocReceiver) Start(ctx context.Context, host component.Host) error { // Shutdown is a method to turn off receiving. func (ocr *ocReceiver) Shutdown(context.Context) error { - if ocr.cancel != nil { ocr.cancel() } diff --git a/receiver/opencensusreceiver/opencensus_test.go b/receiver/opencensusreceiver/opencensus_test.go index 80002b99aaed..668df1bd9438 100644 --- a/receiver/opencensusreceiver/opencensus_test.go +++ b/receiver/opencensusreceiver/opencensus_test.go @@ -303,7 +303,6 @@ func TestStartWithoutConsumersShouldFail(t *testing.T) { } func TestStartListenerClosed(t *testing.T) { - addr := testutil.GetAvailableLocalAddress(t) // Set the buffer count to 1 to make it flush the test span immediately. @@ -503,7 +502,6 @@ func TestOCReceiverTrace_HandleNextConsumerResponse(t *testing.T) { t *testing.T, cc *grpc.ClientConn, msg *agenttracepb.ExportTraceServiceRequest) error { - acc := agenttracepb.NewTraceServiceClient(cc) stream, err := acc.Export(context.Background()) require.NoError(t, err) @@ -661,7 +659,6 @@ func TestOCReceiverMetrics_HandleNextConsumerResponse(t *testing.T) { t *testing.T, cc *grpc.ClientConn, msg *agentmetricspb.ExportMetricsServiceRequest) error { - acc := agentmetricspb.NewMetricsServiceClient(cc) stream, err := acc.Export(context.Background()) require.NoError(t, err) diff --git a/receiver/oracledbreceiver/config.go b/receiver/oracledbreceiver/config.go index c14cd60beac8..70c21c2e9264 100644 --- a/receiver/oracledbreceiver/config.go +++ b/receiver/oracledbreceiver/config.go @@ -41,7 +41,6 @@ func (c Config) Validate() error { // If DataSource is defined it takes precedence over the rest of the connection options. if c.DataSource == "" { - if c.Endpoint == "" { allErrs = multierr.Append(allErrs, errEmptyEndpoint) } diff --git a/receiver/oracledbreceiver/scraper_test.go b/receiver/oracledbreceiver/scraper_test.go index 228023c390a7..608cf1facd47 100644 --- a/receiver/oracledbreceiver/scraper_test.go +++ b/receiver/oracledbreceiver/scraper_test.go @@ -40,7 +40,6 @@ var queryResponses = map[string][]metricRow{ } func TestScraper_Scrape(t *testing.T) { - tests := []struct { name string dbclientFn func(db *sql.DB, s string, logger *zap.Logger) dbClient @@ -165,5 +164,4 @@ func TestScraper_Scrape(t *testing.T) { assert.Equal(t, int64(78944), found.Sum().DataPoints().At(0).IntValue()) }) } - } diff --git a/receiver/otelarrowreceiver/config_test.go b/receiver/otelarrowreceiver/config_test.go index 60edaf00cf61..29a1e8a08ad5 100644 --- a/receiver/otelarrowreceiver/config_test.go +++ b/receiver/otelarrowreceiver/config_test.go @@ -85,7 +85,6 @@ func TestUnmarshalConfig(t *testing.T) { WaiterLimit: 100, }, }, cfg) - } // Tests that a deprecated config validation sets RequestLimitMiB and WaiterLimit in the correct config block. diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow.go b/receiver/otelarrowreceiver/internal/arrow/arrow.go index 13fdcd2395f8..50653b2e9a3a 100644 --- a/receiver/otelarrowreceiver/internal/arrow/arrow.go +++ b/receiver/otelarrowreceiver/internal/arrow/arrow.go @@ -545,7 +545,6 @@ func (id *inFlightData) anyDone(ctx context.Context) { // tracks everything that needs to be used by instrumention when the // batch finishes. func (r *receiverStream) recvOne(streamCtx context.Context, serverStream anyStreamServer, hrcv *headerReceiver, pendingCh chan<- batchResp, method string, ac arrowRecord.ConsumerAPI) (retErr error) { - // Receive a batch corresponding with one ptrace.Traces, pmetric.Metrics, // or plog.Logs item. req, recvErr := serverStream.Recv() @@ -565,12 +564,10 @@ func (r *receiverStream) recvOne(streamCtx context.Context, serverStream anyStre if recvErr != nil { if errors.Is(recvErr, io.EOF) { return recvErr - } else if errors.Is(recvErr, context.Canceled) { // This is a special case to avoid introducing a span error // for a canceled operation. return io.EOF - } else if status, ok := status.FromError(recvErr); ok && status.Code() == codes.Canceled { // This is a special case to avoid introducing a span error // for a canceled operation. @@ -773,7 +770,6 @@ func (r *receiverStream) srvSendLoop(ctx context.Context, serverStream anyStream // slice of pdata objects of the corresponding data type as `any`. // along with the number of items and true uncompressed size. func (r *Receiver) consumeBatch(arrowConsumer arrowRecord.ConsumerAPI, records *arrowpb.BatchArrowRecords) (retData any, numItems int, uncompSize int64, retErr error) { - payloads := records.GetArrowPayloads() if len(payloads) == 0 { return nil, 0, 0, nil diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go index dcbe0f8546d3..ec35cb2ef992 100644 --- a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go +++ b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go @@ -1275,7 +1275,6 @@ func testReceiverAuthHeaders(t *testing.T, includeMeta bool, dataAuth bool) { batch = copyBatch(batch) if len(md) != 0 { - hpb.Reset() for key, vals := range md { for _, val := range vals { diff --git a/receiver/podmanreceiver/libpod_client_test.go b/receiver/podmanreceiver/libpod_client_test.go index 65d511c5aa85..80d50ffd265f 100644 --- a/receiver/podmanreceiver/libpod_client_test.go +++ b/receiver/podmanreceiver/libpod_client_test.go @@ -238,7 +238,6 @@ func TestEvents(t *testing.T) { loop: for { - select { case err := <-errs: if err != nil && !errors.Is(err, io.EOF) { @@ -252,5 +251,4 @@ loop: } assert.Equal(t, expectedEvents, actualEvents) - } diff --git a/receiver/podmanreceiver/podman.go b/receiver/podmanreceiver/podman.go index 6aa8d0fd8c6f..277720f87a6c 100644 --- a/receiver/podmanreceiver/podman.go +++ b/receiver/podmanreceiver/podman.go @@ -98,7 +98,6 @@ EVENT_LOOP: for { eventCh, errCh := pc.events(ctx, filters) for { - select { case <-ctx.Done(): return @@ -132,7 +131,6 @@ EVENT_LOOP: } } } - } } } diff --git a/receiver/prometheusreceiver/config.go b/receiver/prometheusreceiver/config.go index 672f0e437b2c..556dd4244518 100644 --- a/receiver/prometheusreceiver/config.go +++ b/receiver/prometheusreceiver/config.go @@ -144,7 +144,6 @@ func validateHTTPClientConfig(cfg *commonconfig.HTTPClientConfig) error { return err } return nil - } func checkFile(fn string) error { diff --git a/receiver/prometheusreceiver/internal/transaction_test.go b/receiver/prometheusreceiver/internal/transaction_test.go index f15b06e402cb..b47b8ce83b96 100644 --- a/receiver/prometheusreceiver/internal/transaction_test.go +++ b/receiver/prometheusreceiver/internal/transaction_test.go @@ -1742,7 +1742,6 @@ func TestMetricBuilderSummary(t *testing.T) { }) } } - } func TestMetricBuilderNativeHistogram(t *testing.T) { @@ -2003,5 +2002,4 @@ func assertEquivalentMetrics(t *testing.T, want, got pmetric.Metrics) { assert.EqualValues(t, wmap, gmap) } } - } diff --git a/receiver/prometheusreceiver/metrics_receiver_helper_test.go b/receiver/prometheusreceiver/metrics_receiver_helper_test.go index 0755cf0c7e9a..cea8d429af86 100644 --- a/receiver/prometheusreceiver/metrics_receiver_helper_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_helper_test.go @@ -185,7 +185,6 @@ func waitForScrapeResults(t *testing.T, targets []*testData, cms *consumertest.M // only count target pages that are not 404, matching mock ServerHTTP func response logic want++ } - } if len(scrapes) < want { // If we don't have enough scrapes yet lets return false and wait for another tick diff --git a/receiver/prometheusreceiver/metrics_receiver_labels_test.go b/receiver/prometheusreceiver/metrics_receiver_labels_test.go index f7eb6289c1cd..b120df3d3341 100644 --- a/receiver/prometheusreceiver/metrics_receiver_labels_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_labels_test.go @@ -722,7 +722,6 @@ func verifyRelabelJobInstance(t *testing.T, td *testData, rms []pmetric.Resource }, }, })(t, rms[0]) - } const targetResourceAttsInTargetInfo = ` diff --git a/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go b/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go index ae22d4389aee..1eec3fbe16c6 100644 --- a/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_open_metrics_test.go @@ -92,7 +92,6 @@ func verifyFailTarget(t *testing.T, td *testData, mds []pmetric.ResourceMetrics) // Test open metrics negative test cases func TestOpenMetricsFail(t *testing.T) { - targetsMap := getOpenMetricsFailTestData() var targets []*testData for k, v := range targetsMap { @@ -127,7 +126,6 @@ func verifyInvalidTarget(t *testing.T, td *testData, mds []pmetric.ResourceMetri } func TestOpenMetricsInvalid(t *testing.T) { - targetsMap := getOpenMetricsInvalidTestData() var targets []*testData for k, v := range targetsMap { @@ -229,7 +227,6 @@ func TestInfoStatesetMetrics(t *testing.T) { } testComponent(t, targets, nil) - } func verifyInfoStatesetMetrics(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { diff --git a/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go b/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go index ecf7ab8b7ba1..adc0cadcb310 100644 --- a/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go +++ b/receiver/prometheusreceiver/metrics_reciever_metric_rename_test.go @@ -102,7 +102,6 @@ func TestMetricRenamingKeepAction(t *testing.T) { } } }) - } func verifyRenameMetric(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { @@ -266,7 +265,6 @@ func TestLabelRenaming(t *testing.T) { } } }) - } func verifyRenameLabel(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { @@ -373,7 +371,6 @@ func TestLabelRenamingKeepAction(t *testing.T) { } } }) - } func verifyRenameLabelKeepAction(t *testing.T, td *testData, resourceMetrics []pmetric.ResourceMetrics) { diff --git a/receiver/prometheusreceiver/targetallocator/config.go b/receiver/prometheusreceiver/targetallocator/config.go index 07fc1d579a83..5cd9d719574b 100644 --- a/receiver/prometheusreceiver/targetallocator/config.go +++ b/receiver/prometheusreceiver/targetallocator/config.go @@ -87,7 +87,6 @@ func validateHTTPClientConfig(cfg *commonconfig.HTTPClientConfig) error { return err } return nil - } func checkFile(fn string) error { diff --git a/receiver/pulsarreceiver/factory.go b/receiver/pulsarreceiver/factory.go index 8814484af3c2..aa15ed799f58 100644 --- a/receiver/pulsarreceiver/factory.go +++ b/receiver/pulsarreceiver/factory.go @@ -55,7 +55,6 @@ func withLogsUnmarshalers(logsUnmarshalers ...LogsUnmarshaler) FactoryOption { // NewFactory creates Pulsar receiver factory. func NewFactory(options ...FactoryOption) receiver.Factory { - f := &pulsarReceiverFactory{ tracesUnmarshalers: defaultTracesUnmarshalers(), metricsUnmarshalers: defaultMetricsUnmarshalers(), diff --git a/receiver/rabbitmqreceiver/config_test.go b/receiver/rabbitmqreceiver/config_test.go index f95175d341b2..dfcdfefe5cda 100644 --- a/receiver/rabbitmqreceiver/config_test.go +++ b/receiver/rabbitmqreceiver/config_test.go @@ -92,7 +92,6 @@ func TestValidate(t *testing.T) { } else { require.NoError(t, actualErr) } - }) } } diff --git a/receiver/rabbitmqreceiver/scraper.go b/receiver/rabbitmqreceiver/scraper.go index 6b488e9e463e..fd0ad5d5f529 100644 --- a/receiver/rabbitmqreceiver/scraper.go +++ b/receiver/rabbitmqreceiver/scraper.go @@ -78,7 +78,6 @@ func (r *rabbitmqScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { // Collect metrics for each queue for _, queue := range queues { - r.collectQueue(queue, now) } diff --git a/receiver/rabbitmqreceiver/scraper_test.go b/receiver/rabbitmqreceiver/scraper_test.go index 7c97bb737ca0..112941b136d9 100644 --- a/receiver/rabbitmqreceiver/scraper_test.go +++ b/receiver/rabbitmqreceiver/scraper_test.go @@ -101,7 +101,6 @@ func TestScaperScrape(t *testing.T) { return &mockClient }, expectedMetricGen: func(*testing.T) pmetric.Metrics { - return pmetric.NewMetrics() }, expectedErr: errors.New("some api error"), diff --git a/receiver/receivercreator/observerhandler_test.go b/receiver/receivercreator/observerhandler_test.go index 3968402f58e9..14cd5e7a7c97 100644 --- a/receiver/receivercreator/observerhandler_test.go +++ b/receiver/receivercreator/observerhandler_test.go @@ -321,7 +321,6 @@ func TestOnAddForTraces(t *testing.T) { t.Fatalf("unexpected startedComponent: %T", v) } require.Equal(t, test.expectedReceiverConfig, actualConfig) - }) } } diff --git a/receiver/riakreceiver/config_test.go b/receiver/riakreceiver/config_test.go index a503e4d05e86..97eaf508d23c 100644 --- a/receiver/riakreceiver/config_test.go +++ b/receiver/riakreceiver/config_test.go @@ -93,7 +93,6 @@ func TestValidate(t *testing.T) { } else { require.NoError(t, actualErr) } - }) } } diff --git a/receiver/saphanareceiver/config_test.go b/receiver/saphanareceiver/config_test.go index 0a53bcf7b428..104a6659b7d3 100644 --- a/receiver/saphanareceiver/config_test.go +++ b/receiver/saphanareceiver/config_test.go @@ -93,5 +93,4 @@ func TestLoadConfig(t *testing.T) { if diff := cmp.Diff(expected, cfg, cmpopts.IgnoreUnexported(metadata.MetricConfig{}), cmpopts.IgnoreUnexported(metadata.ResourceAttributeConfig{})); diff != "" { t.Errorf("Config mismatch (-expected +actual):\n%s", diff) } - } diff --git a/receiver/signalfxreceiver/receiver.go b/receiver/signalfxreceiver/receiver.go index ecadab4bd51e..587072e65e44 100644 --- a/receiver/signalfxreceiver/receiver.go +++ b/receiver/signalfxreceiver/receiver.go @@ -123,7 +123,6 @@ func (r *sfxReceiver) RegisterLogsConsumer(lc consumer.Logs) { // By convention the consumer of the received data is set when the receiver // instance is created. func (r *sfxReceiver) Start(ctx context.Context, host component.Host) error { - if r.server != nil { return nil } diff --git a/receiver/skywalkingreceiver/factory.go b/receiver/skywalkingreceiver/factory.go index c29e9f7abf68..458bed44ad23 100644 --- a/receiver/skywalkingreceiver/factory.go +++ b/receiver/skywalkingreceiver/factory.go @@ -66,7 +66,6 @@ func createTracesReceiver( cfg component.Config, nextConsumer consumer.Traces, ) (receiver.Traces, error) { - // Convert settings in the source c to configuration struct // that Skywalking receiver understands. rCfg := cfg.(*Config) @@ -94,7 +93,6 @@ func createMetricsReceiver( cfg component.Config, nextConsumer consumer.Metrics, ) (receiver.Metrics, error) { - // Convert settings in the source c to configuration struct // that Skywalking receiver understands. rCfg := cfg.(*Config) diff --git a/receiver/skywalkingreceiver/factory_test.go b/receiver/skywalkingreceiver/factory_test.go index 6fd8d870d358..05d8a2a2fde1 100644 --- a/receiver/skywalkingreceiver/factory_test.go +++ b/receiver/skywalkingreceiver/factory_test.go @@ -57,7 +57,6 @@ func TestCreateReceiver(t *testing.T) { mReceiver, err := factory.CreateMetrics(context.Background(), set, cfg, metricSink) assert.NoError(t, err, "metric receiver creation failed") assert.NotNil(t, mReceiver, "metric receiver creation failed") - } func TestCreateReceiverGeneralConfig(t *testing.T) { diff --git a/receiver/skywalkingreceiver/internal/metrics/metric_report_service.go b/receiver/skywalkingreceiver/internal/metrics/metric_report_service.go index e65afbe3e742..db9e34ada43d 100644 --- a/receiver/skywalkingreceiver/internal/metrics/metric_report_service.go +++ b/receiver/skywalkingreceiver/internal/metrics/metric_report_service.go @@ -65,5 +65,4 @@ func consumeMetrics(ctx context.Context, collection *agent.JVMMetricCollection, } pmd := SwMetricsToMetrics(collection) return nextConsumer.ConsumeMetrics(ctx, pmd) - } diff --git a/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go b/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go index 3d784b5c2b31..733865ebdb2c 100644 --- a/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go +++ b/receiver/skywalkingreceiver/internal/metrics/skywalkingproto_to_metrics.go @@ -96,7 +96,6 @@ func memoryPoolMetricToMetrics(timestamp int64, memoryPools []*agent.MemoryPool, fillNumberDataPointIntValue(timestamp, memoryPool.Used, dpsMp[MemoryPoolUsedName].AppendEmpty(), attrs) fillNumberDataPointIntValue(timestamp, memoryPool.Committed, dpsMp[MemoryPoolCommittedName].AppendEmpty(), attrs) } - } func buildMemoryPoolAttrs(pool *agent.MemoryPool) pcommon.Map { @@ -110,7 +109,6 @@ func buildMemoryPoolAttrs(pool *agent.MemoryPool) pcommon.Map { case agent.PoolType_NEWGEN_USAGE, agent.PoolType_OLDGEN_USAGE, agent.PoolType_SURVIVOR_USAGE: memoryType = "heap" default: - } attrs.PutStr("jvm.memory.type", memoryType) return attrs diff --git a/receiver/skywalkingreceiver/skywalking_receiver_test.go b/receiver/skywalkingreceiver/skywalking_receiver_test.go index 28d336cc9cd4..02218321a97d 100644 --- a/receiver/skywalkingreceiver/skywalking_receiver_test.go +++ b/receiver/skywalkingreceiver/skywalking_receiver_test.go @@ -83,7 +83,6 @@ func TestStartAndShutdown(t *testing.T) { require.NoError(t, err) require.NoError(t, sr.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { require.NoError(t, sr.Shutdown(context.Background())) }) - } func TestGRPCReception(t *testing.T) { @@ -151,7 +150,6 @@ func TestHttpReception(t *testing.T) { // verify assert.NoError(t, err, "send skywalking segment successful.") assert.NotNil(t, response) - } func mockGrpcTraceSegment(sequence int) *agent.SegmentObject { diff --git a/receiver/snmpreceiver/config.go b/receiver/snmpreceiver/config.go index 2e6200ec1ae8..08ae73a29034 100644 --- a/receiver/snmpreceiver/config.go +++ b/receiver/snmpreceiver/config.go @@ -500,7 +500,6 @@ func validateScalarOID(metricName string, scalarOID ScalarOID, cfg *Config) erro combinedErr = errors.Join(combinedErr, fmt.Errorf(errMsgScalarMetricHasIndexedResourceAttribute, metricName, name)) continue } - } if len(scalarOID.Attributes) == 0 { @@ -593,7 +592,6 @@ func validateResourceAttributeConfigs(cfg *Config) error { // Make sure each Resource Attribute has exactly one of OID or ScalarOID or IndexedValuePrefix, and check that scalar and column OIDs end in the right digit for attrName, attrCfg := range resourceAttributes { - hasOID := attrCfg.OID != "" hasScalarOID := attrCfg.ScalarOID != "" hasIVP := attrCfg.IndexedValuePrefix != "" diff --git a/receiver/snmpreceiver/scraper_test.go b/receiver/snmpreceiver/scraper_test.go index b5c3100c4388..1076011ae9a7 100644 --- a/receiver/snmpreceiver/scraper_test.go +++ b/receiver/snmpreceiver/scraper_test.go @@ -42,7 +42,6 @@ func (_m *MockClient) Close() error { // Connect provides a mock function with given fields: func (_m *MockClient) Connect() error { - ret := _m.Called() var r0 error if rf, ok := ret.Get(0).(func() error); ok { @@ -101,7 +100,6 @@ func TestStart(t *testing.T) { { desc: "Valid Config", testFunc: func(t *testing.T) { - scraper := &snmpScraper{ cfg: createDefaultConfig().(*Config), settings: receivertest.NewNopSettings(), diff --git a/receiver/solacereceiver/messaging_service.go b/receiver/solacereceiver/messaging_service.go index b139f7d782b2..d1903e3e5031 100644 --- a/receiver/solacereceiver/messaging_service.go +++ b/receiver/solacereceiver/messaging_service.go @@ -69,7 +69,6 @@ func newAMQPMessagingServiceFactory(cfg *Config, logger *zap.Logger) (messagingS logger: logger, } }, nil - } type amqpConnectConfig struct { diff --git a/receiver/solacereceiver/receiver.go b/receiver/solacereceiver/receiver.go index a50f138e703c..4398008982c4 100644 --- a/receiver/solacereceiver/receiver.go +++ b/receiver/solacereceiver/receiver.go @@ -69,7 +69,6 @@ type solaceTracesReceiver struct { // newTracesReceiver creates a new solaceTraceReceiver as a receiver.Traces func newTracesReceiver(config *Config, set receiver.Settings, nextConsumer consumer.Traces) (receiver.Traces, error) { - factory, err := newAMQPMessagingServiceFactory(config, set.Logger) if err != nil { set.Logger.Warn("Error validating messaging service configuration", zap.Any("error", err)) @@ -222,7 +221,6 @@ func (s *solaceTracesReceiver) receiveMessages(ctx context.Context, service mess return err } } - } // receiveMessage is the heart of the receiver's control flow. It will receive messages, unmarshal the message and forward the trace. diff --git a/receiver/solacereceiver/unmarshaller_move.go b/receiver/solacereceiver/unmarshaller_move.go index 5027c27f26ed..aa220c277b1c 100644 --- a/receiver/solacereceiver/unmarshaller_move.go +++ b/receiver/solacereceiver/unmarshaller_move.go @@ -70,7 +70,6 @@ func (u *brokerTraceMoveUnmarshallerV1) mapResourceSpanAttributes(spanData *move } func (u *brokerTraceMoveUnmarshallerV1) mapMoveSpanTracingInfo(spanData *move_v1.SpanData, span ptrace.Span) { - // hard coded to internal span // SPAN_KIND_CONSUMER == 1 span.SetKind(ptrace.SpanKindInternal) diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 4e78e709908a..54af1eae90da 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -426,7 +426,6 @@ func (s *splunkScraper) scrapeIndexerPipelineQueues(ctx context.Context, now pco errs <- errMaxSearchWaitTimeExceeded return } - } // Record the results var host string @@ -1665,7 +1664,6 @@ func (s *splunkScraper) scrapeSearchArtifacts(ctx context.Context, now pcommon.T } for _, f := range da.Entries { - if s.conf.MetricsBuilderConfig.Metrics.SplunkServerSearchartifactsAdhoc.Enabled { adhocCount, err := strconv.ParseInt(f.Content.AdhocCount, 10, 64) if err != nil { diff --git a/receiver/splunkhecreceiver/receiver.go b/receiver/splunkhecreceiver/receiver.go index 26043b1dfc7e..6d095ef61101 100644 --- a/receiver/splunkhecreceiver/receiver.go +++ b/receiver/splunkhecreceiver/receiver.go @@ -455,7 +455,6 @@ func (r *splunkReceiver) handleReq(resp http.ResponseWriter, req *http.Request) } events = append(events, &msg) } - } resourceCustomizer := r.createResourceCustomizer(req) if r.logsConsumer != nil && len(events) > 0 { diff --git a/receiver/splunkhecreceiver/receiver_test.go b/receiver/splunkhecreceiver/receiver_test.go index 85a1662ca9bf..e09e2abfaead 100644 --- a/receiver/splunkhecreceiver/receiver_test.go +++ b/receiver/splunkhecreceiver/receiver_test.go @@ -628,7 +628,6 @@ func Test_splunkhecReceiver_AccessTokenPassthrough(t *testing.T) { case <-time.After(5 * time.Second): assert.Fail(t, "Timeout") } - }) } } @@ -1786,7 +1785,6 @@ func Test_splunkhecreceiver_handle_nested_fields(t *testing.T) { assert.Equal(t, http.StatusBadRequest, w.Code) assert.JSONEq(t, fmt.Sprintf(responseErrHandlingIndexedFields, 0), w.Body.String()) } - }) } } diff --git a/receiver/splunkhecreceiver/splunk_to_logdata.go b/receiver/splunkhecreceiver/splunk_to_logdata.go index bb5b07b897f7..7b76755ae8b7 100644 --- a/receiver/splunkhecreceiver/splunk_to_logdata.go +++ b/receiver/splunkhecreceiver/splunk_to_logdata.go @@ -140,7 +140,6 @@ func convertToValue(logger *zap.Logger, src any, dest pcommon.Value) error { default: logger.Debug("Unsupported value conversion", zap.Any("value", src)) return errCannotConvertValue - } return nil } diff --git a/receiver/splunkhecreceiver/splunk_to_logdata_test.go b/receiver/splunkhecreceiver/splunk_to_logdata_test.go index f99d05b35d3d..15e8bc95df80 100644 --- a/receiver/splunkhecreceiver/splunk_to_logdata_test.go +++ b/receiver/splunkhecreceiver/splunk_to_logdata_test.go @@ -29,7 +29,6 @@ var defaultTestingHecConfig = &Config{ } func Test_SplunkHecToLogData(t *testing.T) { - time := 0.123 nanoseconds := 123000000 diff --git a/receiver/splunkhecreceiver/splunkhec_to_metricdata.go b/receiver/splunkhecreceiver/splunkhec_to_metricdata.go index d148576c0fac..568bc35cafbc 100644 --- a/receiver/splunkhecreceiver/splunkhec_to_metricdata.go +++ b/receiver/splunkhecreceiver/splunkhec_to_metricdata.go @@ -127,7 +127,6 @@ func buildAttributes(dimensions map[string]any) pcommon.Map { attributes := pcommon.NewMap() attributes.EnsureCapacity(len(dimensions)) for key, val := range dimensions { - if strings.HasPrefix(key, "metric_name") || key == "_value" { continue } diff --git a/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go b/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go index a218bbd83a88..9801b2350abd 100644 --- a/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go +++ b/receiver/splunkhecreceiver/splunkhec_to_metricdata_test.go @@ -62,7 +62,6 @@ func Test_splunkV2ToMetricsData(t *testing.T) { pt.Fields["metric_name"] = "single" pt.Fields["_value"] = int64Ptr(13) return pt - }(), wantMetricsData: buildDefaultMetricsData(nanos), hecConfig: defaultTestingHecConfig, diff --git a/receiver/sqlqueryreceiver/logs_receiver.go b/receiver/sqlqueryreceiver/logs_receiver.go index f6d68978487c..06ae82fc3fbc 100644 --- a/receiver/sqlqueryreceiver/logs_receiver.go +++ b/receiver/sqlqueryreceiver/logs_receiver.go @@ -48,7 +48,6 @@ func newLogsReceiver( createClient sqlquery.ClientProviderFunc, nextConsumer consumer.Logs, ) (*logsReceiver, error) { - obsr, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ ReceiverID: settings.ID, ReceiverCreateSettings: settings, @@ -268,7 +267,6 @@ func (queryReceiver *logsQueryReceiver) retrieveTrackingValue(ctx context.Contex } return string(storedTrackingValueBytes) - } func (queryReceiver *logsQueryReceiver) collect(ctx context.Context) (plog.Logs, error) { diff --git a/receiver/sqlserverreceiver/factory.go b/receiver/sqlserverreceiver/factory.go index 5b374edb232f..00c235170b71 100644 --- a/receiver/sqlserverreceiver/factory.go +++ b/receiver/sqlserverreceiver/factory.go @@ -52,7 +52,6 @@ func setupQueries(cfg *Config) []string { cfg.MetricsBuilderConfig.Metrics.SqlserverBatchSQLRecompilationRate.Enabled || cfg.MetricsBuilderConfig.Metrics.SqlserverBatchSQLCompilationRate.Enabled || cfg.MetricsBuilderConfig.Metrics.SqlserverUserConnectionCount.Enabled { - queries = append(queries, getSQLServerPerformanceCounterQuery(cfg.InstanceName)) } diff --git a/receiver/sqlserverreceiver/queries_test.go b/receiver/sqlserverreceiver/queries_test.go index 65e3cb050094..f45bf5742fdd 100644 --- a/receiver/sqlserverreceiver/queries_test.go +++ b/receiver/sqlserverreceiver/queries_test.go @@ -68,5 +68,4 @@ func TestQueryContents(t *testing.T) { require.Equal(t, expected, actual) }) } - } diff --git a/receiver/sqlserverreceiver/scraper.go b/receiver/sqlserverreceiver/scraper.go index 487ba5176c63..a373719cb0ed 100644 --- a/receiver/sqlserverreceiver/scraper.go +++ b/receiver/sqlserverreceiver/scraper.go @@ -51,7 +51,6 @@ func newSQLServerScraper(id component.ID, dbProviderFunc sqlquery.DbProviderFunc, clientProviderFunc sqlquery.ClientProviderFunc, mb *metadata.MetricsBuilder) *sqlServerScraperHelper { - return &sqlServerScraperHelper{ id: id, sqlQuery: query, diff --git a/receiver/sqlserverreceiver/scraper_test.go b/receiver/sqlserverreceiver/scraper_test.go index fb52efe620d4..dd1e4d654809 100644 --- a/receiver/sqlserverreceiver/scraper_test.go +++ b/receiver/sqlserverreceiver/scraper_test.go @@ -165,7 +165,6 @@ func readFile(fname string) ([]sqlquery.StringMap, error) { } return metrics, nil - } func (mc mockClient) QueryRows(context.Context, ...any) ([]sqlquery.StringMap, error) { diff --git a/receiver/sshcheckreceiver/factory.go b/receiver/sshcheckreceiver/factory.go index 003fb710a045..15ae2fbb0496 100644 --- a/receiver/sshcheckreceiver/factory.go +++ b/receiver/sshcheckreceiver/factory.go @@ -38,7 +38,6 @@ func createDefaultConfig() component.Config { } func createMetricsReceiver(_ context.Context, params receiver.Settings, rConf component.Config, consumer consumer.Metrics) (receiver.Metrics, error) { - cfg, ok := rConf.(*Config) if !ok { return nil, errConfigNotSSHCheck diff --git a/receiver/sshcheckreceiver/scraper_test.go b/receiver/sshcheckreceiver/scraper_test.go index 592a4a854350..899d89bce5a9 100644 --- a/receiver/sshcheckreceiver/scraper_test.go +++ b/receiver/sshcheckreceiver/scraper_test.go @@ -314,7 +314,6 @@ func TestCancellation(t *testing.T) { _, err := scrpr.scrape(ctx) require.Error(t, err, "should have returned error on canceled context") require.EqualValues(t, err.Error(), ctx.Err().Error(), "scrape should return context's error") - } // issue # 18193 diff --git a/receiver/statsdreceiver/config.go b/receiver/statsdreceiver/config.go index 2cb36e8837f7..92a6cc653b14 100644 --- a/receiver/statsdreceiver/config.go +++ b/receiver/statsdreceiver/config.go @@ -34,7 +34,6 @@ func (c *Config) Validate() error { var TimerHistogramMappingMissingObjectName bool for _, eachMap := range c.TimerHistogramMapping { - if eachMap.StatsdType == "" { TimerHistogramMappingMissingObjectName = true break diff --git a/receiver/statsdreceiver/internal/protocol/metric_translator_test.go b/receiver/statsdreceiver/internal/protocol/metric_translator_test.go index a4609d502643..95f18b16f01f 100644 --- a/receiver/statsdreceiver/internal/protocol/metric_translator_test.go +++ b/receiver/statsdreceiver/internal/protocol/metric_translator_test.go @@ -63,7 +63,6 @@ func TestSetTimestampsForCounterMetric(t *testing.T) { metric.Metrics().At(0).Sum().DataPoints().At(0).Timestamp(), expectedMetrics.Metrics().At(0).Sum().DataPoints().At(0).Timestamp(), ) - } func TestBuildGaugeMetric(t *testing.T) { @@ -272,5 +271,4 @@ func TestBuildHistogramMetric(t *testing.T) { require.Equal(t, "myvalue", val.Str()) val, _ = datapoint.Attributes().Get("mykey2") require.Equal(t, "myvalue2", val.Str()) - } diff --git a/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go b/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go index c908e6991aa4..bfdab7167128 100644 --- a/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go +++ b/receiver/statsdreceiver/internal/protocol/statsd_parser_test.go @@ -1636,7 +1636,6 @@ func TestStatsDParser_Mappings(t *testing.T) { } func TestStatsDParser_ScopeIsIncluded(t *testing.T) { - const devVersion = "dev-0.0.1" p := &StatsDParser{ @@ -1671,7 +1670,6 @@ func TestStatsDParser_ScopeIsIncluded(t *testing.T) { assert.Equal(t, receiverName, scope.Name()) assert.Equal(t, devVersion, scope.Version()) } - } func TestTimeNowFunc(t *testing.T) { @@ -1963,5 +1961,4 @@ func TestStatsDParser_IPOnlyAggregation(t *testing.T) { Metrics().At(0).Sum().DataPoints().At(0).IntValue() assert.Equal(t, int64(4), value) - } diff --git a/receiver/statsdreceiver/receiver.go b/receiver/statsdreceiver/receiver.go index d22c5a5be10b..81e11a67b1e5 100644 --- a/receiver/statsdreceiver/receiver.go +++ b/receiver/statsdreceiver/receiver.go @@ -46,7 +46,6 @@ func newReceiver( config Config, nextConsumer consumer.Metrics, ) (receiver.Metrics, error) { - if config.NetAddr.Endpoint == "" { config.NetAddr.Endpoint = "localhost:8125" } diff --git a/receiver/tlscheckreceiver/config_test.go b/receiver/tlscheckreceiver/config_test.go index 54e1748352c9..2aa2c47cd4bc 100644 --- a/receiver/tlscheckreceiver/config_test.go +++ b/receiver/tlscheckreceiver/config_test.go @@ -89,7 +89,6 @@ func TestValidate(t *testing.T) { } else { require.NoError(t, actualErr) } - }) } } diff --git a/receiver/vcenterreceiver/config_test.go b/receiver/vcenterreceiver/config_test.go index e4ce36c3e617..1d714f4f2348 100644 --- a/receiver/vcenterreceiver/config_test.go +++ b/receiver/vcenterreceiver/config_test.go @@ -110,5 +110,4 @@ func TestLoadConfig(t *testing.T) { if diff := cmp.Diff(expected, cfg, cmpopts.IgnoreUnexported(metadata.MetricConfig{}), cmpopts.IgnoreUnexported(metadata.ResourceAttributeConfig{})); diff != "" { t.Errorf("Config mismatch (-expected +actual):\n%s", diff) } - } diff --git a/receiver/vcenterreceiver/metrics.go b/receiver/vcenterreceiver/metrics.go index 863d1d3ee8e8..ab3bd4927940 100644 --- a/receiver/vcenterreceiver/metrics.go +++ b/receiver/vcenterreceiver/metrics.go @@ -73,7 +73,6 @@ func (v *vcenterMetricScraper) recordDatacenterStats( v.mb.RecordVcenterDatacenterDiskSpaceDataPoint(ts, dcStat.DiskFree, metadata.AttributeDiskStateAvailable) v.mb.RecordVcenterDatacenterCPULimitDataPoint(ts, dcStat.CPULimit) v.mb.RecordVcenterDatacenterMemoryLimitDataPoint(ts, dcStat.MemoryLimit) - } func getEntityStatusAttribute(status types.ManagedEntityStatus) (metadata.AttributeEntityStatus, bool) { @@ -200,7 +199,6 @@ func (v *vcenterMetricScraper) recordResourcePoolStats( v.mb.RecordVcenterResourcePoolCPUSharesDataPoint(ts, int64(s.Config.CpuAllocation.Shares.Shares)) v.mb.RecordVcenterResourcePoolMemorySharesDataPoint(ts, int64(s.Config.MemoryAllocation.Shares.Shares)) - } // recordClusterStats records stat metrics for a vSphere Host @@ -312,7 +310,6 @@ func (v *vcenterMetricScraper) recordVMStats( cpuReadiness := vm.Summary.QuickStats.OverallCpuReadiness v.mb.RecordVcenterVMCPUReadinessDataPoint(ts, int64(cpuReadiness)) - } var hostPerfMetricList = []string{ diff --git a/receiver/webhookeventreceiver/req_to_log.go b/receiver/webhookeventreceiver/req_to_log.go index ea29a57e3e32..5a8ceb13ddeb 100644 --- a/receiver/webhookeventreceiver/req_to_log.go +++ b/receiver/webhookeventreceiver/req_to_log.go @@ -58,5 +58,4 @@ func appendMetadata(resourceLog plog.ResourceLogs, query url.Values) { resourceLog.Resource().Attributes().PutStr(k, query.Get(k)) } } - } diff --git a/receiver/zipkinreceiver/trace_receiver.go b/receiver/zipkinreceiver/trace_receiver.go index 0918f254a2b9..23c77a186d4b 100644 --- a/receiver/zipkinreceiver/trace_receiver.go +++ b/receiver/zipkinreceiver/trace_receiver.go @@ -254,7 +254,6 @@ func (zr *zipkinReceiver) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) _, _ = w.Write(errNextConsumerRespBody) } - } func transportType(r *http.Request, asZipkinv1 bool) string { diff --git a/receiver/zookeeperreceiver/metrics.go b/receiver/zookeeperreceiver/metrics.go index 80a8d49a12f1..86ae2eeb0049 100644 --- a/receiver/zookeeperreceiver/metrics.go +++ b/receiver/zookeeperreceiver/metrics.go @@ -111,7 +111,6 @@ func (m *metricCreator) generateComputedMetrics(logger *zap.Logger, ts pcommon.T if err := m.computeNotSyncedFollowersMetric(ts); err != nil { logger.Debug("metric computation failed", zap.Error(err)) } - } func (m *metricCreator) computeNotSyncedFollowersMetric(ts pcommon.Timestamp) error {