From 474eb711880110f491355c64eb50d87c69829c74 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Mon, 11 Nov 2024 11:30:34 -0800 Subject: [PATCH 1/7] [chore] Remove usage of internal expr where possible (#36295) Signed-off-by: Bogdan Drutu --- connector/countconnector/factory.go | 3 +-- connector/sumconnector/factory.go | 3 +-- exporter/honeycombmarkerexporter/logs_exporter.go | 3 +-- internal/filter/filterottl/filter.go | 15 +++++++-------- processor/logdedupprocessor/processor.go | 4 ++-- .../internal/sampling/ottl.go | 5 ++--- .../internal/common/processor.go | 2 +- 7 files changed, 15 insertions(+), 20 deletions(-) diff --git a/connector/countconnector/factory.go b/connector/countconnector/factory.go index e9a44d3111a4..ce4d35524fe5 100644 --- a/connector/countconnector/factory.go +++ b/connector/countconnector/factory.go @@ -13,7 +13,6 @@ import ( "go.opentelemetry.io/collector/consumer" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/countconnector/internal/metadata" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" @@ -156,7 +155,7 @@ func createLogsToMetrics( } type metricDef[K any] struct { - condition expr.BoolExpr[K] + condition *ottl.ConditionSequence[K] desc string attrs []AttributeConfig } diff --git a/connector/sumconnector/factory.go b/connector/sumconnector/factory.go index 122484c9a843..7ebf2b263a4d 100644 --- a/connector/sumconnector/factory.go +++ b/connector/sumconnector/factory.go @@ -13,7 +13,6 @@ import ( "go.opentelemetry.io/collector/consumer" "github.com/open-telemetry/opentelemetry-collector-contrib/connector/sumconnector/internal/metadata" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" @@ -161,7 +160,7 @@ func createLogsToMetrics( } type metricDef[K any] struct { - condition expr.BoolExpr[K] + condition *ottl.ConditionSequence[K] desc string attrs []AttributeConfig sourceAttr string diff --git a/exporter/honeycombmarkerexporter/logs_exporter.go b/exporter/honeycombmarkerexporter/logs_exporter.go index 31aa476e0508..0e9bf4f7bc0b 100644 --- a/exporter/honeycombmarkerexporter/logs_exporter.go +++ b/exporter/honeycombmarkerexporter/logs_exporter.go @@ -19,7 +19,6 @@ import ( "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/pdata/plog" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" @@ -34,7 +33,7 @@ const ( type marker struct { Marker - logBoolExpr expr.BoolExpr[ottllog.TransformContext] + logBoolExpr *ottl.ConditionSequence[ottllog.TransformContext] } type honeycombLogsExporter struct { diff --git a/internal/filter/filterottl/filter.go b/internal/filter/filterottl/filter.go index e4dad6ee9359..705b2acf5a4e 100644 --- a/internal/filter/filterottl/filter.go +++ b/internal/filter/filterottl/filter.go @@ -6,7 +6,6 @@ package filterottl // import "github.com/open-telemetry/opentelemetry-collector- import ( "go.opentelemetry.io/collector/component" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottldatapoint" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" @@ -20,7 +19,7 @@ import ( // NewBoolExprForSpan creates a BoolExpr[ottlspan.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlspan.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[ottlspan.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlspan.TransformContext], error) { +func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[ottlspan.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlspan.TransformContext], error) { parser, err := ottlspan.NewParser(functions, set) if err != nil { return nil, err @@ -36,7 +35,7 @@ func NewBoolExprForSpan(conditions []string, functions map[string]ottl.Factory[o // NewBoolExprForSpanEvent creates a BoolExpr[ottlspanevent.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlspanevent.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Factory[ottlspanevent.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlspanevent.TransformContext], error) { +func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Factory[ottlspanevent.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlspanevent.TransformContext], error) { parser, err := ottlspanevent.NewParser(functions, set) if err != nil { return nil, err @@ -52,7 +51,7 @@ func NewBoolExprForSpanEvent(conditions []string, functions map[string]ottl.Fact // NewBoolExprForMetric creates a BoolExpr[ottlmetric.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlmetric.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory[ottlmetric.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlmetric.TransformContext], error) { +func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory[ottlmetric.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlmetric.TransformContext], error) { parser, err := ottlmetric.NewParser(functions, set) if err != nil { return nil, err @@ -68,7 +67,7 @@ func NewBoolExprForMetric(conditions []string, functions map[string]ottl.Factory // NewBoolExprForDataPoint creates a BoolExpr[ottldatapoint.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottldatapoint.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Factory[ottldatapoint.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottldatapoint.TransformContext], error) { +func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Factory[ottldatapoint.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottldatapoint.TransformContext], error) { parser, err := ottldatapoint.NewParser(functions, set) if err != nil { return nil, err @@ -84,7 +83,7 @@ func NewBoolExprForDataPoint(conditions []string, functions map[string]ottl.Fact // NewBoolExprForLog creates a BoolExpr[ottllog.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottllog.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ottllog.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottllog.TransformContext], error) { +func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ottllog.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottllog.TransformContext], error) { parser, err := ottllog.NewParser(functions, set) if err != nil { return nil, err @@ -100,7 +99,7 @@ func NewBoolExprForLog(conditions []string, functions map[string]ottl.Factory[ot // NewBoolExprForResource creates a BoolExpr[ottlresource.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlresource.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForResource(conditions []string, functions map[string]ottl.Factory[ottlresource.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlresource.TransformContext], error) { +func NewBoolExprForResource(conditions []string, functions map[string]ottl.Factory[ottlresource.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlresource.TransformContext], error) { parser, err := ottlresource.NewParser(functions, set) if err != nil { return nil, err @@ -116,7 +115,7 @@ func NewBoolExprForResource(conditions []string, functions map[string]ottl.Facto // NewBoolExprForScope creates a BoolExpr[ottlscope.TransformContext] that will return true if any of the given OTTL conditions evaluate to true. // The passed in functions should use the ottlresource.TransformContext. // If a function named `match` is not present in the function map it will be added automatically so that parsing works as expected -func NewBoolExprForScope(conditions []string, functions map[string]ottl.Factory[ottlscope.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (expr.BoolExpr[ottlscope.TransformContext], error) { +func NewBoolExprForScope(conditions []string, functions map[string]ottl.Factory[ottlscope.TransformContext], errorMode ottl.ErrorMode, set component.TelemetrySettings) (*ottl.ConditionSequence[ottlscope.TransformContext], error) { parser, err := ottlscope.NewParser(functions, set) if err != nil { return nil, err diff --git a/processor/logdedupprocessor/processor.go b/processor/logdedupprocessor/processor.go index 804c312bff0d..efd5095a2eaf 100644 --- a/processor/logdedupprocessor/processor.go +++ b/processor/logdedupprocessor/processor.go @@ -16,7 +16,7 @@ import ( "go.opentelemetry.io/collector/processor" "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/logdedupprocessor/internal/metadata" ) @@ -24,7 +24,7 @@ import ( // logDedupProcessor is a logDedupProcessor that counts duplicate instances of logs. type logDedupProcessor struct { emitInterval time.Duration - conditions expr.BoolExpr[ottllog.TransformContext] + conditions *ottl.ConditionSequence[ottllog.TransformContext] aggregator *logAggregator remover *fieldRemover nextConsumer consumer.Logs diff --git a/processor/tailsamplingprocessor/internal/sampling/ottl.go b/processor/tailsamplingprocessor/internal/sampling/ottl.go index 4e50358b002e..7d5f520ece50 100644 --- a/processor/tailsamplingprocessor/internal/sampling/ottl.go +++ b/processor/tailsamplingprocessor/internal/sampling/ottl.go @@ -11,7 +11,6 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.uber.org/zap" - "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/expr" "github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter/filterottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" @@ -19,8 +18,8 @@ import ( ) type ottlConditionFilter struct { - sampleSpanExpr expr.BoolExpr[ottlspan.TransformContext] - sampleSpanEventExpr expr.BoolExpr[ottlspanevent.TransformContext] + sampleSpanExpr *ottl.ConditionSequence[ottlspan.TransformContext] + sampleSpanEventExpr *ottl.ConditionSequence[ottlspanevent.TransformContext] errorMode ottl.ErrorMode logger *zap.Logger } diff --git a/processor/transformprocessor/internal/common/processor.go b/processor/transformprocessor/internal/common/processor.go index 137cac8ffeac..dee7d24e7ba9 100644 --- a/processor/transformprocessor/internal/common/processor.go +++ b/processor/transformprocessor/internal/common/processor.go @@ -212,7 +212,7 @@ func (pc parserCollection) parseCommonContextStatements(contextStatement Context } func parseGlobalExpr[K any]( - boolExprFunc func([]string, map[string]ottl.Factory[K], ottl.ErrorMode, component.TelemetrySettings) (expr.BoolExpr[K], error), + boolExprFunc func([]string, map[string]ottl.Factory[K], ottl.ErrorMode, component.TelemetrySettings) (*ottl.ConditionSequence[K], error), conditions []string, pc parserCollection, standardFuncs map[string]ottl.Factory[K]) (expr.BoolExpr[K], error) { From 64bcbb3ec6b3c2e2818cfa37168848c40dd6b362 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Mon, 11 Nov 2024 20:32:28 +0100 Subject: [PATCH 2/7] [chore]: enable whitespace linter for testbed (#36289) #### Description [whitespace](https://golangci-lint.run/usage/linters/#whitespace) is a linter that checks for unnecessary newlines at the start and end of functions. Signed-off-by: Matthieu MOREL --- testbed/correctnesstests/connectors/correctness_test.go | 2 -- testbed/correctnesstests/utils.go | 1 - testbed/datasenders/jaeger.go | 1 - testbed/datasenders/syslog.go | 1 - testbed/datasenders/tcpudp.go | 1 - testbed/testbed/child_process_collector.go | 2 -- testbed/testbed/data_providers.go | 1 - testbed/testbed/mock_backend.go | 1 - testbed/testbed/test_case.go | 1 - testbed/testbed/validator.go | 2 -- testbed/tests/e2e_test.go | 1 - testbed/tests/log_test.go | 1 - testbed/tests/metric_test.go | 1 - testbed/tests/resource_processor_test.go | 1 - testbed/tests/scenarios.go | 2 -- testbed/tests/trace_test.go | 2 -- 16 files changed, 21 deletions(-) diff --git a/testbed/correctnesstests/connectors/correctness_test.go b/testbed/correctnesstests/connectors/correctness_test.go index 40a41d63c3c5..903e329e5a4b 100644 --- a/testbed/correctnesstests/connectors/correctness_test.go +++ b/testbed/correctnesstests/connectors/correctness_test.go @@ -43,7 +43,6 @@ func TestGoldenData(t *testing.T) { t.Run(sampleTest.TestName, func(t *testing.T) { testWithGoldenDataset(t, sampleTest.DataSender, sampleTest.DataReceiver, sampleTest.ResourceSpec, sampleTest.DataConnector, processors) }) - } func testWithGoldenDataset( @@ -96,5 +95,4 @@ func testWithGoldenDataset( 3*time.Second, "all data items received") tc.StopAgent() - } diff --git a/testbed/correctnesstests/utils.go b/testbed/correctnesstests/utils.go index 38dbf813d00d..8ffd41ad0330 100644 --- a/testbed/correctnesstests/utils.go +++ b/testbed/correctnesstests/utils.go @@ -33,7 +33,6 @@ func CreateConfigYaml( connector testbed.DataConnector, processors []ProcessorNameAndConfigBody, ) string { - // Prepare extra processor config section and comma-separated list of extra processor // names to use in corresponding "processors" settings. processorsSections := "" diff --git a/testbed/datasenders/jaeger.go b/testbed/datasenders/jaeger.go index 52adcebee9ae..eb1cb488238a 100644 --- a/testbed/datasenders/jaeger.go +++ b/testbed/datasenders/jaeger.go @@ -145,7 +145,6 @@ func (s *protoGRPCSender) pushTraces( ctx context.Context, td ptrace.Traces, ) error { - batches := jaeger.ProtoFromTraces(td) if s.metadata.Len() > 0 { diff --git a/testbed/datasenders/syslog.go b/testbed/datasenders/syslog.go index 24cec672a6c7..04f6c1140b33 100644 --- a/testbed/datasenders/syslog.go +++ b/testbed/datasenders/syslog.go @@ -122,7 +122,6 @@ func (f *SyslogWriter) SendCheck() error { if err != nil { return nil } - } return nil } diff --git a/testbed/datasenders/tcpudp.go b/testbed/datasenders/tcpudp.go index 7ed2af93a50f..cdf973142b41 100644 --- a/testbed/datasenders/tcpudp.go +++ b/testbed/datasenders/tcpudp.go @@ -115,7 +115,6 @@ func (f *TCPUDPWriter) SendCheck() error { if err != nil { return nil } - } return nil } diff --git a/testbed/testbed/child_process_collector.go b/testbed/testbed/child_process_collector.go index 61114c5b95b5..6661f35595dd 100644 --- a/testbed/testbed/child_process_collector.go +++ b/testbed/testbed/child_process_collector.go @@ -176,7 +176,6 @@ func expandExeFileName(exeName string) string { // the process to. // cmdArgs is the command line arguments to pass to the process. func (cp *childProcessCollector) Start(params StartParams) error { - cp.name = params.Name cp.doneSignal = make(chan struct{}) cp.resourceSpec = params.resourceSpec @@ -249,7 +248,6 @@ func (cp *childProcessCollector) Stop() (stopped bool, err error) { return false, nil } cp.stopOnce.Do(func() { - if !cp.isStarted { // Process wasn't started, nothing to stop. return diff --git a/testbed/testbed/data_providers.go b/testbed/testbed/data_providers.go index ea0393e53dde..a5808a2aa81a 100644 --- a/testbed/testbed/data_providers.go +++ b/testbed/testbed/data_providers.go @@ -60,7 +60,6 @@ func (dp *perfTestDataProvider) GenerateTraces() (ptrace.Traces, bool) { traceID := dp.traceIDSequence.Add(1) for i := 0; i < dp.options.ItemsPerBatch; i++ { - startTime := time.Now().Add(time.Duration(i+int(traceID)*1000) * time.Second) endTime := startTime.Add(time.Millisecond) diff --git a/testbed/testbed/mock_backend.go b/testbed/testbed/mock_backend.go index 982e1c63cdca..9bca45545bc1 100644 --- a/testbed/testbed/mock_backend.go +++ b/testbed/testbed/mock_backend.go @@ -218,7 +218,6 @@ func (tc *MockTraceConsumer) ConsumeTraces(_ context.Context, td ptrace.Traces) // Ignore the seqnums for now. We will use them later. _ = spanSeqnum _ = traceSeqnum - } } } diff --git a/testbed/testbed/test_case.go b/testbed/testbed/test_case.go index 749527cf0c0b..aaca07c86d0a 100644 --- a/testbed/testbed/test_case.go +++ b/testbed/testbed/test_case.go @@ -345,5 +345,4 @@ func (tc *TestCase) AgentLogsContains(text string) bool { res, _ := grep.Output() return string(res) != "" - } diff --git a/testbed/testbed/validator.go b/testbed/testbed/validator.go index 6fed95ee857b..671d3503408f 100644 --- a/testbed/testbed/validator.go +++ b/testbed/testbed/validator.go @@ -48,7 +48,6 @@ func (v *LogPresentValidator) Validate(tc *TestCase) { } func (v *LogPresentValidator) RecordResults(tc *TestCase) { - var result string if tc.t.Failed() { result = "FAIL" @@ -418,7 +417,6 @@ func (v *CorrectnessTestValidator) diffSpanLinks(sentSpan ptrace.Span, recdSpan } v.assertionFailures = append(v.assertionFailures, af) } - } } if sentSpan.DroppedLinksCount() != recdSpan.DroppedLinksCount() { diff --git a/testbed/tests/e2e_test.go b/testbed/tests/e2e_test.go index 6d1b6800dcc3..4173a78978df 100644 --- a/testbed/tests/e2e_test.go +++ b/testbed/tests/e2e_test.go @@ -17,7 +17,6 @@ import ( ) func TestIdleMode(t *testing.T) { - options := testbed.LoadOptions{DataItemsPerSecond: 10_000, ItemsPerBatch: 10} dataProvider := testbed.NewPerfTestDataProvider(options) diff --git a/testbed/tests/log_test.go b/testbed/tests/log_test.go index 8b44f83f670a..41cb83ed0797 100644 --- a/testbed/tests/log_test.go +++ b/testbed/tests/log_test.go @@ -241,7 +241,6 @@ func TestLogOtlpSendingQueue(t *testing.T) { nil, nil) }) - } func TestLogLargeFiles(t *testing.T) { diff --git a/testbed/tests/metric_test.go b/testbed/tests/metric_test.go index 03dd0ccc26ab..6eb8b7fd9829 100644 --- a/testbed/tests/metric_test.go +++ b/testbed/tests/metric_test.go @@ -91,7 +91,6 @@ func TestMetric10kDPS(t *testing.T) { ) }) } - } func TestMetricsFromFile(t *testing.T) { diff --git a/testbed/tests/resource_processor_test.go b/testbed/tests/resource_processor_test.go index ebe91c449a2d..0e720f7ff076 100644 --- a/testbed/tests/resource_processor_test.go +++ b/testbed/tests/resource_processor_test.go @@ -49,7 +49,6 @@ type resourceProcessorTestCase struct { } func getResourceProcessorTestCases() []resourceProcessorTestCase { - tests := []resourceProcessorTestCase{ { name: "update_and_rename_existing_attributes", diff --git a/testbed/tests/scenarios.go b/testbed/tests/scenarios.go index 85973f89b29a..2b1d7c185a45 100644 --- a/testbed/tests/scenarios.go +++ b/testbed/tests/scenarios.go @@ -44,7 +44,6 @@ func createConfigYaml( processors []ProcessorNameAndConfigBody, extensions map[string]string, ) string { - // Create a config. Note that our DataSender is used to generate a config for Collector's // receiver and our DataReceiver is used to generate a config for Collector's exporter. // This is because our DataSender sends to Collector's receiver and our DataReceiver @@ -271,7 +270,6 @@ func Scenario1kSPSWithAttrs(t *testing.T, args []string, tests []TestCase, proce test := tests[i] t.Run(fmt.Sprintf("%d*%dbytes", test.attrCount, test.attrSizeByte), func(t *testing.T) { - options := constructLoadOptions(test) agentProc := testbed.NewChildProcessCollector(testbed.WithEnvVar("GOMAXPROCS", "2")) diff --git a/testbed/tests/trace_test.go b/testbed/tests/trace_test.go index 4cb95241e7a9..b0a84e209649 100644 --- a/testbed/tests/trace_test.go +++ b/testbed/tests/trace_test.go @@ -180,7 +180,6 @@ func TestTrace10kSPSJaegerGRPC(t *testing.T) { } func TestTraceNoBackend10kSPS(t *testing.T) { - limitProcessors := []ProcessorNameAndConfigBody{ { Name: "memory_limiter", @@ -277,7 +276,6 @@ func verifySingleSpan( spanName string, verifyReceived func(span ptrace.Span), ) { - // Clear previously received traces. tc.MockBackend.ClearReceivedItems() startCounter := tc.MockBackend.DataItemsReceived() From dc8197f909220365bd5bbc1887802854ae718353 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Tue, 12 Nov 2024 00:03:19 +0100 Subject: [PATCH 3/7] [chore]: enable whitespace linter for pkg and internal (#36298) #### Description [whitespace](https://golangci-lint.run/usage/linters/#whitespace) is a linter that checks for unnecessary newlines at the start and end of functions. Signed-off-by: Matthieu MOREL --- internal/aws/awsutil/conn.go | 1 - internal/aws/containerinsight/utils_test.go | 2 - internal/aws/cwlogs/cwlog_client.go | 1 - internal/aws/cwlogs/pusher.go | 3 -- internal/aws/cwlogs/pusher_test.go | 1 - internal/aws/k8s/k8sclient/obj_store.go | 1 - internal/aws/metrics/metric_calculator.go | 1 - internal/aws/xray/tracesegment_test.go | 1 - internal/aws/xray/xray_client_test.go | 1 - .../coreinternal/aggregateutil/aggregate.go | 1 - .../attraction/attraction_test.go | 6 --- internal/docker/docker_test.go | 1 - internal/filter/filterexpr/matcher_test.go | 1 - internal/filter/filterlog/filterlog.go | 1 - .../filter/filtermatcher/attributematcher.go | 2 - internal/filter/filterspan/filterspan_test.go | 1 - internal/k8stest/client.go | 1 - internal/k8stest/k8s_collector.go | 1 - internal/kafka/authentication.go | 1 - .../kafka/awsmsk/iam_scram_client_test.go | 1 - internal/kafka/scram_client.go | 1 - internal/kubelet/client.go | 2 - internal/metadataproviders/system/metadata.go | 1 - internal/otelarrow/compression/zstd/zstd.go | 1 - internal/otelarrow/test/e2e_test.go | 3 -- pkg/batchpersignal/batchpersignal_test.go | 1 - pkg/golden/sort_metrics_test.go | 2 - pkg/ottl/boolean_value.go | 1 - pkg/ottl/contexts/internal/metric_test.go | 1 - .../contexts/ottldatapoint/datapoint_test.go | 1 - pkg/ottl/contexts/ottllog/log_test.go | 1 - pkg/ottl/contexts/ottlmetric/metrics_test.go | 1 - .../contexts/ottlspanevent/span_events.go | 1 - pkg/ottl/e2e/e2e_test.go | 3 -- pkg/ottl/expression.go | 1 - pkg/ottl/math_test.go | 2 - pkg/ottl/ottlfuncs/func_base64decode.go | 1 - pkg/ottl/ottlfuncs/func_decode_test.go | 1 - .../func_extract_grok_patterns_test.go | 1 - pkg/ottl/ottlfuncs/func_flatten_test.go | 1 - pkg/ottl/ottlfuncs/func_fnv.go | 1 - .../ottlfuncs/func_keep_matching_keys_test.go | 1 - pkg/ottl/ottlfuncs/func_md5.go | 1 - pkg/ottl/ottlfuncs/func_merge_maps_test.go | 1 - pkg/ottl/ottlfuncs/func_parse_json_test.go | 2 - .../ottlfuncs/func_replace_pattern_test.go | 2 - pkg/ottl/ottlfuncs/func_sha1.go | 1 - pkg/ottl/ottlfuncs/func_sha256.go | 1 - pkg/ottl/ottlfuncs/func_sha512.go | 1 - pkg/ottl/ottlfuncs/func_sort_test.go | 1 - .../ottlfuncs/func_to_key_value_string.go | 1 - pkg/pdatatest/pmetrictest/options.go | 3 -- .../resource_to_telemetry_test.go | 2 - pkg/stanza/adapter/benchmark_test.go | 1 - pkg/stanza/adapter/frompdataconverter_test.go | 1 - pkg/stanza/adapter/receiver_test.go | 1 - pkg/stanza/adapter/storage.go | 1 - pkg/stanza/fileconsumer/attrs/attrs_test.go | 1 - pkg/stanza/fileconsumer/file_test.go | 2 - .../internal/checkpoint/checkpoint_test.go | 1 - .../internal/fileset/fileset_test.go | 1 - .../fileconsumer/internal/reader/factory.go | 1 - pkg/stanza/operator/input/windows/xml_test.go | 1 - .../operator/parser/container/parser.go | 1 - .../operator/parser/container/parser_test.go | 1 - .../operator/parser/jsonarray/config_test.go | 1 - pkg/stanza/operator/parser/syslog/parser.go | 2 - .../transformer/recombine/transformer_test.go | 2 - .../transformer/retain/config_test.go | 1 - pkg/status/aggregator_test.go | 1 - .../azure/resourcelogs_to_logs_test.go | 1 - .../azurelogs/resourcelogs_to_logs_test.go | 1 - .../jaeger/jaegerproto_to_traces_test.go | 2 - .../jaeger/jaegerthrift_to_traces_test.go | 1 - .../jaeger/traces_to_jaegerproto.go | 2 - .../jaeger/traces_to_jaegerproto_test.go | 2 - pkg/translator/loki/convert.go | 1 - pkg/translator/loki/encode.go | 1 - pkg/translator/loki/encode_test.go | 2 - pkg/translator/opencensus/traces_to_oc.go | 1 - pkg/translator/prometheus/normalize_label.go | 1 - .../prometheus/normalize_label_test.go | 2 - pkg/translator/prometheus/normalize_name.go | 2 - .../prometheus/normalize_name_test.go | 41 ------------------- pkg/translator/prometheus/testutils_test.go | 2 - .../prometheus/unit_to_ucum_test.go | 1 - .../prometheusremotewrite/helper.go | 1 - .../number_data_points_v2_test.go | 2 - .../otlp_to_openmetrics_metadata_test.go | 1 - pkg/translator/zipkin/zipkinv1/json.go | 1 - .../zipkin/zipkinv2/from_translator.go | 2 - 91 files changed, 164 deletions(-) diff --git a/internal/aws/awsutil/conn.go b/internal/aws/awsutil/conn.go index 4933a1bf9ccd..820b03c75c38 100644 --- a/internal/aws/awsutil/conn.go +++ b/internal/aws/awsutil/conn.go @@ -137,7 +137,6 @@ func GetAWSConfigSession(logger *zap.Logger, cn ConnAttr, cfg *AWSSessionSetting logger.Debug("Fetch region from ec2 metadata", zap.String("region", awsRegion)) } } - } if awsRegion == "" { diff --git a/internal/aws/containerinsight/utils_test.go b/internal/aws/containerinsight/utils_test.go index 87458895ac70..e1fe4df48c2c 100644 --- a/internal/aws/containerinsight/utils_test.go +++ b/internal/aws/containerinsight/utils_test.go @@ -133,7 +133,6 @@ func convertToFloat64(value any) float64 { func checkMetricsAreExpected(t *testing.T, md pmetric.Metrics, fields map[string]any, tags map[string]string, expectedUnits map[string]string) { - rms := md.ResourceMetrics() assert.Equal(t, 1, rms.Len()) @@ -265,7 +264,6 @@ func TestConvertToOTLPMetricsForClusterMetrics(t *testing.T) { } md = ConvertToOTLPMetrics(fields, tags, zap.NewNop()) checkMetricsAreExpected(t, md, fields, tags, expectedUnits) - } func TestConvertToOTLPMetricsForContainerMetrics(t *testing.T) { diff --git a/internal/aws/cwlogs/cwlog_client.go b/internal/aws/cwlogs/cwlog_client.go index 1cbf21ab1080..106aca07390a 100644 --- a/internal/aws/cwlogs/cwlog_client.go +++ b/internal/aws/cwlogs/cwlog_client.go @@ -122,7 +122,6 @@ func (client *Client) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput, retr client.logger.Error("cwlog_client: Error occurs in PutLogEvents", zap.Error(awsErr)) return err } - } //TODO: Should have metrics to provide visibility of these failures diff --git a/internal/aws/cwlogs/pusher.go b/internal/aws/cwlogs/pusher.go index cee16a6941bb..93c68222e943 100644 --- a/internal/aws/cwlogs/pusher.go +++ b/internal/aws/cwlogs/pusher.go @@ -195,7 +195,6 @@ type logPusher struct { // NewPusher creates a logPusher instance func NewPusher(streamKey StreamKey, retryCnt int, svcStructuredLog Client, logger *zap.Logger) Pusher { - pusher := newLogPusher(streamKey, svcStructuredLog, logger) pusher.retryCnt = defaultRetryCount @@ -250,7 +249,6 @@ func (p *logPusher) ForceFlush() error { } func (p *logPusher) pushEventBatch(req any) error { - // http://docs.aws.amazon.com/goto/SdkForGoV1/logs-2014-03-28/PutLogEvents // The log events in the batch must be in chronological ordered by their // timestamp (the time the event occurred, expressed as the number of milliseconds @@ -296,7 +294,6 @@ func (p *logPusher) addLogEvent(logEvent *Event) *eventBatch { } func (p *logPusher) renewEventBatch() *eventBatch { - var prevBatch *eventBatch if len(p.logEventBatch.putLogEventsInput.LogEvents) > 0 { prevBatch = p.logEventBatch diff --git a/internal/aws/cwlogs/pusher_test.go b/internal/aws/cwlogs/pusher_test.go index 11e445fdd126..5ba30bba0349 100644 --- a/internal/aws/cwlogs/pusher_test.go +++ b/internal/aws/cwlogs/pusher_test.go @@ -172,7 +172,6 @@ func TestPusher_addLogEventBatch(t *testing.T) { p.logEventBatch.byteTotal = 1 assert.Nil(t, p.addLogEvent(nil)) assert.Len(t, p.logEventBatch.putLogEventsInput.LogEvents, 1) - } func TestAddLogEventWithValidation(t *testing.T) { diff --git a/internal/aws/k8s/k8sclient/obj_store.go b/internal/aws/k8s/k8sclient/obj_store.go index 8f51bd5bf185..288e6eb26f84 100644 --- a/internal/aws/k8s/k8sclient/obj_store.go +++ b/internal/aws/k8s/k8sclient/obj_store.go @@ -80,7 +80,6 @@ func (s *ObjStore) Update(obj any) error { // Delete implements the Delete method of the store interface. // Delete deletes an existing entry in the ObjStore. func (s *ObjStore) Delete(obj any) error { - o, err := meta.Accessor(obj) if err != nil { return err diff --git a/internal/aws/metrics/metric_calculator.go b/internal/aws/metrics/metric_calculator.go index 5d4118e42cbd..16435ef7f754 100644 --- a/internal/aws/metrics/metric_calculator.go +++ b/internal/aws/metrics/metric_calculator.go @@ -157,7 +157,6 @@ func (m *MapWithExpiry) Shutdown() error { return errors.New("shutdown called on an already closed channel") default: close(m.doneChan) - } return nil } diff --git a/internal/aws/xray/tracesegment_test.go b/internal/aws/xray/tracesegment_test.go index 33add5dffca8..a25b538eae80 100644 --- a/internal/aws/xray/tracesegment_test.go +++ b/internal/aws/xray/tracesegment_test.go @@ -598,7 +598,6 @@ func TestTraceBodyUnMarshalling(t *testing.T) { ExceptionID: String("abcdefghijklmnop"), }, }, actualSeg, testCase+": unmarshalled segment is different from the expected") - }, }, { diff --git a/internal/aws/xray/xray_client_test.go b/internal/aws/xray/xray_client_test.go index 9fba5394434a..a0b5a7c648f7 100644 --- a/internal/aws/xray/xray_client_test.go +++ b/internal/aws/xray/xray_client_test.go @@ -39,5 +39,4 @@ func TestUserAgent(t *testing.T) { assert.Contains(t, req.HTTPRequest.UserAgent(), "xray-otel-exporter/") assert.Contains(t, req.HTTPRequest.UserAgent(), "exec-env/") assert.Contains(t, req.HTTPRequest.UserAgent(), "OS/") - } diff --git a/internal/coreinternal/aggregateutil/aggregate.go b/internal/coreinternal/aggregateutil/aggregate.go index c7bc4613b716..0cd986a33037 100644 --- a/internal/coreinternal/aggregateutil/aggregate.go +++ b/internal/coreinternal/aggregateutil/aggregate.go @@ -188,7 +188,6 @@ func mergeNumberDataPoints(dpsMap map[string]pmetric.NumberDataPointSlice, agg A dp.SetDoubleValue((medianNumbers[mNumber-1] + medianNumbers[mNumber]) / 2) } } - } case pmetric.NumberDataPointValueTypeInt: medianNumbers := []int64{dp.IntValue()} diff --git a/internal/coreinternal/attraction/attraction_test.go b/internal/coreinternal/attraction/attraction_test.go index f1df1674d14b..a107462efed2 100644 --- a/internal/coreinternal/attraction/attraction_test.go +++ b/internal/coreinternal/attraction/attraction_test.go @@ -86,7 +86,6 @@ func TestAttributes_InsertValue(t *testing.T) { } func TestAttributes_InsertFromAttribute(t *testing.T) { - testCases := []testCase{ // Ensure no attribute is inserted because because attributes do not exist. { @@ -144,7 +143,6 @@ func TestAttributes_InsertFromAttribute(t *testing.T) { } func TestAttributes_UpdateValue(t *testing.T) { - testCases := []testCase{ // Ensure no changes to the span as there is no attributes map. { @@ -190,7 +188,6 @@ func TestAttributes_UpdateValue(t *testing.T) { } func TestAttributes_UpdateFromAttribute(t *testing.T) { - testCases := []testCase{ // Ensure no changes to the span as there is no attributes map. { @@ -418,7 +415,6 @@ func TestAttributes_Extract(t *testing.T) { } func TestAttributes_UpsertFromAttribute(t *testing.T) { - testCases := []testCase{ // Ensure `new_user_key` is not set for spans with no attributes. { @@ -926,7 +922,6 @@ func TestValidConfiguration(t *testing.T) { {Key: "five", FromAttribute: "two", Action: UPSERT}, {Key: "two", Regex: compiledRegex, AttrNames: []string{"", "documentId"}, Action: EXTRACT}, }, ap.actions) - } func hash(b []byte) string { @@ -950,7 +945,6 @@ func (a mockInfoAuth) GetAttributeNames() []string { } func TestFromContext(t *testing.T) { - mdCtx := client.NewContext(context.TODO(), client.Info{ Metadata: client.NewMetadata(map[string][]string{ "source_single_val": {"single_val"}, diff --git a/internal/docker/docker_test.go b/internal/docker/docker_test.go index 2d7cae2b69c1..19cddeeed000 100644 --- a/internal/docker/docker_test.go +++ b/internal/docker/docker_test.go @@ -132,7 +132,6 @@ func TestFetchingTimeouts(t *testing.T) { t, time.Now().UnixNano(), shouldHaveTaken, "Client timeouts don't appear to have been exercised.", ) - } func TestToStatsJSONErrorHandling(t *testing.T) { diff --git a/internal/filter/filterexpr/matcher_test.go b/internal/filter/filterexpr/matcher_test.go index 6bfbe22e0ae3..ca26ec7e81b1 100644 --- a/internal/filter/filterexpr/matcher_test.go +++ b/internal/filter/filterexpr/matcher_test.go @@ -111,7 +111,6 @@ func testMetricNameMatch(t *testing.T, dataType pmetric.MetricType) { matched, err = matcher.MatchMetric(m) assert.NoError(t, err) assert.True(t, matched) - } func TestMatchIntGaugeDataPointByMetricAndSecondPointLabelValue(t *testing.T) { diff --git a/internal/filter/filterlog/filterlog.go b/internal/filter/filterlog/filterlog.go index b394397c193f..41d324d86db8 100644 --- a/internal/filter/filterlog/filterlog.go +++ b/internal/filter/filterlog/filterlog.go @@ -28,7 +28,6 @@ var useOTTLBridge = featuregate.GlobalRegistry().MustRegister( // The logic determining if a log should be processed is based on include and exclude settings. // Include properties are checked before exclude settings are checked. func NewSkipExpr(mp *filterconfig.MatchConfig) (expr.BoolExpr[ottllog.TransformContext], error) { - if useOTTLBridge.IsEnabled() { return filterottl.NewLogSkipExprBridge(mp) } diff --git a/internal/filter/filtermatcher/attributematcher.go b/internal/filter/filtermatcher/attributematcher.go index a4261a0676cd..0a1d2c6d18c3 100644 --- a/internal/filter/filtermatcher/attributematcher.go +++ b/internal/filter/filtermatcher/attributematcher.go @@ -37,7 +37,6 @@ func NewAttributesMatcher(config filterset.Config, attributes []filterconfig.Att // Convert attribute values from mp representation to in-memory representation. var rawAttributes []AttributeMatcher for _, attribute := range attributes { - if attribute.Key == "" { return nil, errors.New("can't have empty key in the list of attributes") } @@ -73,7 +72,6 @@ func NewAttributesMatcher(config filterset.Config, attributes []filterconfig.Att } default: return nil, filterset.NewUnrecognizedMatchTypeError(config.MatchType) - } } diff --git a/internal/filter/filterspan/filterspan_test.go b/internal/filter/filterspan/filterspan_test.go index 7dd815b58f12..b06171543c1a 100644 --- a/internal/filter/filterspan/filterspan_test.go +++ b/internal/filter/filterspan/filterspan_test.go @@ -304,7 +304,6 @@ func TestServiceNameForResource(t *testing.T) { resource := td.ResourceSpans().At(0).Resource() name = serviceNameForResource(resource) require.Equal(t, "", name) - } func Test_NewSkipExpr_With_Bridge(t *testing.T) { diff --git a/internal/k8stest/client.go b/internal/k8stest/client.go index 8481b37d62b9..787cd5f7a81a 100644 --- a/internal/k8stest/client.go +++ b/internal/k8stest/client.go @@ -21,7 +21,6 @@ type K8sClient struct { } func NewK8sClient(kubeconfigPath string) (*K8sClient, error) { - if kubeconfigPath == "" { return nil, errors.New("Please provide file path to load kubeconfig") } diff --git a/internal/k8stest/k8s_collector.go b/internal/k8stest/k8s_collector.go index 87252ee687e6..b74257845344 100644 --- a/internal/k8stest/k8s_collector.go +++ b/internal/k8stest/k8s_collector.go @@ -103,7 +103,6 @@ func WaitForCollectorToStart(t *testing.T, client *K8sClient, podNamespace strin return true } return false - }, time.Duration(podTimeoutMinutes)*time.Minute, 2*time.Second, "collector pods were not ready within %d minutes", podTimeoutMinutes) } diff --git a/internal/kafka/authentication.go b/internal/kafka/authentication.go index d6e48b4bc95c..491f02985a65 100644 --- a/internal/kafka/authentication.go +++ b/internal/kafka/authentication.go @@ -93,7 +93,6 @@ func configurePlaintext(config PlainTextConfig, saramaConfig *sarama.Config) { } func configureSASL(config SASLConfig, saramaConfig *sarama.Config) error { - if config.Username == "" { return fmt.Errorf("username have to be provided") } diff --git a/internal/kafka/awsmsk/iam_scram_client_test.go b/internal/kafka/awsmsk/iam_scram_client_test.go index afe355caeded..d62946eb252d 100644 --- a/internal/kafka/awsmsk/iam_scram_client_test.go +++ b/internal/kafka/awsmsk/iam_scram_client_test.go @@ -112,5 +112,4 @@ func TestValidatingServerResponse(t *testing.T) { _, err := new(IAMSASLClient).Step("") assert.ErrorIs(t, err, ErrInvalidStateReached, "Must be an invalid step when not set up correctly") - } diff --git a/internal/kafka/scram_client.go b/internal/kafka/scram_client.go index 0c3f83d8baaf..269dcfbd7713 100644 --- a/internal/kafka/scram_client.go +++ b/internal/kafka/scram_client.go @@ -34,7 +34,6 @@ func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { // completes is also an error. func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { return x.ClientConversation.Step(challenge) - } // Done returns true if the conversation is completed or has errored. diff --git a/internal/kubelet/client.go b/internal/kubelet/client.go index cf16117a9e16..eda1b97c2d25 100644 --- a/internal/kubelet/client.go +++ b/internal/kubelet/client.go @@ -101,7 +101,6 @@ func (p *kubeConfigClientProvider) BuildClient() (Client, error) { tok: nil, logger: p.logger, }, nil - } type readOnlyClientProvider struct { @@ -121,7 +120,6 @@ func (p *readOnlyClientProvider) BuildClient() (Client, error) { tok: nil, logger: p.logger, }, nil - } type tlsClientProvider struct { diff --git a/internal/metadataproviders/system/metadata.go b/internal/metadataproviders/system/metadata.go index 6a394ab4a494..0118fdeae071 100644 --- a/internal/metadataproviders/system/metadata.go +++ b/internal/metadataproviders/system/metadata.go @@ -199,7 +199,6 @@ func (p systemMetadataProvider) HostIPs() (ips []net.IP, err error) { ips = append(ips, ip) } - } return ips, err } diff --git a/internal/otelarrow/compression/zstd/zstd.go b/internal/otelarrow/compression/zstd/zstd.go index a77bf4139faa..e2a046bab8fa 100644 --- a/internal/otelarrow/compression/zstd/zstd.go +++ b/internal/otelarrow/compression/zstd/zstd.go @@ -218,7 +218,6 @@ func SetDecoderConfig(cfg DecoderConfig) error { updateOne(&staticInstances.byLevel[level].dec) } return nil - } func (cfg EncoderConfig) options() (opts []zstdlib.EOption) { diff --git a/internal/otelarrow/test/e2e_test.go b/internal/otelarrow/test/e2e_test.go index 3b2e8e288a73..f6b3f3fc4578 100644 --- a/internal/otelarrow/test/e2e_test.go +++ b/internal/otelarrow/test/e2e_test.go @@ -179,7 +179,6 @@ func basicTestConfig(t *testing.T, tp testParams, cfgF CfgFunc) (*testConsumer, require.NoError(t, err) return testCon, exporter, receiver - } func testIntegrationTraces(ctx context.Context, t *testing.T, tp testParams, cfgf CfgFunc, mkgen MkGen, errf ConsumerErrFunc, endf EndFunc) { @@ -294,7 +293,6 @@ func bulkyGenFunc() MkGen { return tracesGen.Generate(1000, time.Minute) } } - } func standardEnding(t *testing.T, params testParams, testCon *testConsumer, expect [][]ptrace.Traces) (rops, eops map[string]int) { @@ -601,7 +599,6 @@ func TestIntegrationSelfTracing(t *testing.T) { var params = testParams{ threadCount: 10, requestWhileTrue: func(test *testConsumer) bool { - cnt := 0 for _, span := range test.expSpans.GetSpans() { if span.Name == "opentelemetry.proto.experimental.arrow.v1.ArrowTracesService/ArrowTraces" { diff --git a/pkg/batchpersignal/batchpersignal_test.go b/pkg/batchpersignal/batchpersignal_test.go index e6459f64d5ea..8263c8ed39a8 100644 --- a/pkg/batchpersignal/batchpersignal_test.go +++ b/pkg/batchpersignal/batchpersignal_test.go @@ -65,7 +65,6 @@ func TestSplitDifferentTracesIntoDifferentBatches(t *testing.T) { assert.Equal(t, library.Name(), secondOutILS.Scope().Name()) assert.Equal(t, secondSpan.Name(), secondOutILS.Spans().At(0).Name()) assert.Equal(t, ils.SchemaUrl(), secondOutILS.SchemaUrl()) - } func TestSplitTracesWithNilTraceID(t *testing.T) { diff --git a/pkg/golden/sort_metrics_test.go b/pkg/golden/sort_metrics_test.go index 1595670ecfc9..e507ae41f39c 100644 --- a/pkg/golden/sort_metrics_test.go +++ b/pkg/golden/sort_metrics_test.go @@ -42,7 +42,6 @@ func TestSortAttributes(t *testing.T) { t.Errorf("Incorrect key at index %d. Expected: %s, Actual: %s", i, key, actualKeys[i]) } } - } func TestSortMetricsResourceAndScope(t *testing.T) { @@ -56,5 +55,4 @@ func TestSortMetricsResourceAndScope(t *testing.T) { after, err := ReadMetrics(afterPath) require.NoError(t, err) require.Equal(t, before, after) - } diff --git a/pkg/ottl/boolean_value.go b/pkg/ottl/boolean_value.go index 9bd2df9171b7..fd3378d343c7 100644 --- a/pkg/ottl/boolean_value.go +++ b/pkg/ottl/boolean_value.go @@ -94,7 +94,6 @@ func (p *Parser[K]) newComparisonEvaluator(comparison *comparison) (BoolExpr[K], } return p.compare(a, b, comparison.Op), nil }}, nil - } func (p *Parser[K]) newBoolExpr(expr *booleanExpression) (BoolExpr[K], error) { diff --git a/pkg/ottl/contexts/internal/metric_test.go b/pkg/ottl/contexts/internal/metric_test.go index 3165f818ff8d..b259b70b52f9 100644 --- a/pkg/ottl/contexts/internal/metric_test.go +++ b/pkg/ottl/contexts/internal/metric_test.go @@ -14,7 +14,6 @@ import ( ) func Test_MetricPathGetSetter(t *testing.T) { - refMetric := createMetricTelemetry() newMetric := pmetric.NewMetric() diff --git a/pkg/ottl/contexts/ottldatapoint/datapoint_test.go b/pkg/ottl/contexts/ottldatapoint/datapoint_test.go index 73e7d697b03a..1da5f4309e0a 100644 --- a/pkg/ottl/contexts/ottldatapoint/datapoint_test.go +++ b/pkg/ottl/contexts/ottldatapoint/datapoint_test.go @@ -496,7 +496,6 @@ func Test_newPathGetSetter_NumberDataPoint(t *testing.T) { tt.modified(exNumberDataPoint) assert.Equal(t, exNumberDataPoint, numberDataPoint) - }) } } diff --git a/pkg/ottl/contexts/ottllog/log_test.go b/pkg/ottl/contexts/ottllog/log_test.go index 5beda9fe137f..a4efc98091be 100644 --- a/pkg/ottl/contexts/ottllog/log_test.go +++ b/pkg/ottl/contexts/ottllog/log_test.go @@ -181,7 +181,6 @@ func Test_newPathGetSetter(t *testing.T) { fmt.Println(log.Body().Slice().At(0).AsString()) newBodySlice.CopyTo(log.Body().Slice()) fmt.Println(log.Body().Slice().At(0).AsString()) - }, bodyType: "slice", }, diff --git a/pkg/ottl/contexts/ottlmetric/metrics_test.go b/pkg/ottl/contexts/ottlmetric/metrics_test.go index d81458f76e1f..f83ead9e3a4b 100644 --- a/pkg/ottl/contexts/ottlmetric/metrics_test.go +++ b/pkg/ottl/contexts/ottlmetric/metrics_test.go @@ -17,7 +17,6 @@ import ( ) func Test_newPathGetSetter(t *testing.T) { - refMetric := createMetricTelemetry() newCache := pcommon.NewMap() diff --git a/pkg/ottl/contexts/ottlspanevent/span_events.go b/pkg/ottl/contexts/ottlspanevent/span_events.go index 8a4de90f15e9..78a72c2660d6 100644 --- a/pkg/ottl/contexts/ottlspanevent/span_events.go +++ b/pkg/ottl/contexts/ottlspanevent/span_events.go @@ -179,7 +179,6 @@ func (pep *pathExpressionParser) parsePath(path ottl.Path[TransformContext]) (ot default: return nil, internal.FormatDefaultErrorMessage(path.Name(), path.String(), "Span Event", internal.SpanEventRef) } - } func accessCache() ottl.StandardGetSetter[TransformContext] { return ottl.StandardGetSetter[TransformContext]{ diff --git a/pkg/ottl/e2e/e2e_test.go b/pkg/ottl/e2e/e2e_test.go index 791550a5a062..88723b9b6a41 100644 --- a/pkg/ottl/e2e/e2e_test.go +++ b/pkg/ottl/e2e/e2e_test.go @@ -285,7 +285,6 @@ func Test_e2e_editors(t *testing.T) { sv, _ := v.Map().Get("slice") s := sv.Slice() s.AppendEmpty().SetStr("sample_value") - }, }, { @@ -295,7 +294,6 @@ func Test_e2e_editors(t *testing.T) { s := v.Map().PutEmptySlice("flags") s.AppendEmpty().SetStr("pass") s.AppendEmpty().SetStr("sample_value") - }, }, { @@ -971,7 +969,6 @@ func Test_e2e_converters(t *testing.T) { m := tCtx.GetLogRecord().Attributes().PutEmptyMap("test") m.PutInt("foo", 2) m.PutInt("bar", 5) - }, }, } diff --git a/pkg/ottl/expression.go b/pkg/ottl/expression.go index ea26f0b456ca..74405e99b97f 100644 --- a/pkg/ottl/expression.go +++ b/pkg/ottl/expression.go @@ -172,7 +172,6 @@ func (m *mapGetter[K]) Get(ctx context.Context, tCtx K) (any, error) { default: evaluated[k] = t } - } result := pcommon.NewMap() if err := result.FromRaw(evaluated); err != nil { diff --git a/pkg/ottl/math_test.go b/pkg/ottl/math_test.go index 5f3d7281dc7c..f83f9af90d40 100644 --- a/pkg/ottl/math_test.go +++ b/pkg/ottl/math_test.go @@ -576,7 +576,6 @@ func Test_evaluateMathExpression_error(t *testing.T) { assert.Error(t, err) assert.ErrorContains(t, err, tt.errorMsg) } - } else { parsed, err := mathParser.ParseString("", tt.input) assert.NoError(t, err) @@ -588,7 +587,6 @@ func Test_evaluateMathExpression_error(t *testing.T) { assert.Nil(t, result) assert.Error(t, err) } - }) } } diff --git a/pkg/ottl/ottlfuncs/func_base64decode.go b/pkg/ottl/ottlfuncs/func_base64decode.go index 42e401d71505..f626adc3b557 100644 --- a/pkg/ottl/ottlfuncs/func_base64decode.go +++ b/pkg/ottl/ottlfuncs/func_base64decode.go @@ -30,7 +30,6 @@ func createBase64DecodeFunction[K any](_ ottl.FunctionContext, oArgs ottl.Argume } func Base64Decode[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) if err != nil { diff --git a/pkg/ottl/ottlfuncs/func_decode_test.go b/pkg/ottl/ottlfuncs/func_decode_test.go index e4ef6bea27fe..5f5ab8944a6f 100644 --- a/pkg/ottl/ottlfuncs/func_decode_test.go +++ b/pkg/ottl/ottlfuncs/func_decode_test.go @@ -14,7 +14,6 @@ import ( ) func TestDecode(t *testing.T) { - testByteSlice := pcommon.NewByteSlice() testByteSlice.FromRaw([]byte("test string")) testByteSliceB64 := pcommon.NewByteSlice() diff --git a/pkg/ottl/ottlfuncs/func_extract_grok_patterns_test.go b/pkg/ottl/ottlfuncs/func_extract_grok_patterns_test.go index a3266038f2ca..371934946aac 100644 --- a/pkg/ottl/ottlfuncs/func_extract_grok_patterns_test.go +++ b/pkg/ottl/ottlfuncs/func_extract_grok_patterns_test.go @@ -15,7 +15,6 @@ import ( ) func Test_extractGrokPatterns_patterns(t *testing.T) { - tests := []struct { name string targetString string diff --git a/pkg/ottl/ottlfuncs/func_flatten_test.go b/pkg/ottl/ottlfuncs/func_flatten_test.go index 09dfc9648a16..bb1b0cae0131 100644 --- a/pkg/ottl/ottlfuncs/func_flatten_test.go +++ b/pkg/ottl/ottlfuncs/func_flatten_test.go @@ -147,7 +147,6 @@ func Test_flatten(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - m := pcommon.NewMap() err := m.FromRaw(tt.target) assert.NoError(t, err) diff --git a/pkg/ottl/ottlfuncs/func_fnv.go b/pkg/ottl/ottlfuncs/func_fnv.go index 5df53a4737e1..c9a29ca5739b 100644 --- a/pkg/ottl/ottlfuncs/func_fnv.go +++ b/pkg/ottl/ottlfuncs/func_fnv.go @@ -30,7 +30,6 @@ func createFnvFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ott } func FNVHashString[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) if err != nil { diff --git a/pkg/ottl/ottlfuncs/func_keep_matching_keys_test.go b/pkg/ottl/ottlfuncs/func_keep_matching_keys_test.go index ff34d18cf006..c77e70c2fb22 100644 --- a/pkg/ottl/ottlfuncs/func_keep_matching_keys_test.go +++ b/pkg/ottl/ottlfuncs/func_keep_matching_keys_test.go @@ -14,7 +14,6 @@ import ( ) func Test_keepMatchingKeys(t *testing.T) { - in := pcommon.NewMap() in.PutStr("foo", "bar") in.PutStr("foo1", "bar") diff --git a/pkg/ottl/ottlfuncs/func_md5.go b/pkg/ottl/ottlfuncs/func_md5.go index 4fce3da16956..280ac5c8bcb8 100644 --- a/pkg/ottl/ottlfuncs/func_md5.go +++ b/pkg/ottl/ottlfuncs/func_md5.go @@ -31,7 +31,6 @@ func createMD5Function[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ott } func MD5HashString[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) if err != nil { diff --git a/pkg/ottl/ottlfuncs/func_merge_maps_test.go b/pkg/ottl/ottlfuncs/func_merge_maps_test.go index afae5d6e2d46..7a00c43bb891 100644 --- a/pkg/ottl/ottlfuncs/func_merge_maps_test.go +++ b/pkg/ottl/ottlfuncs/func_merge_maps_test.go @@ -14,7 +14,6 @@ import ( ) func Test_MergeMaps(t *testing.T) { - input := pcommon.NewMap() input.PutStr("attr1", "value1") diff --git a/pkg/ottl/ottlfuncs/func_parse_json_test.go b/pkg/ottl/ottlfuncs/func_parse_json_test.go index b409fd646874..4ef3f8fb49c2 100644 --- a/pkg/ottl/ottlfuncs/func_parse_json_test.go +++ b/pkg/ottl/ottlfuncs/func_parse_json_test.go @@ -109,7 +109,6 @@ func Test_ParseJSON(t *testing.T) { }, }, wantSlice: func(expectedSlice pcommon.Slice) { - expectedSlice.AppendEmpty().SetEmptyMap().PutStr("test", "value") expectedSlice.AppendEmpty().SetEmptyMap().PutStr("test", "value") }, @@ -186,7 +185,6 @@ func Test_ParseJSON(t *testing.T) { tt.wantSlice(expected) assert.Equal(t, expected, resultSlice) } - }) } } diff --git a/pkg/ottl/ottlfuncs/func_replace_pattern_test.go b/pkg/ottl/ottlfuncs/func_replace_pattern_test.go index 1ccfcf712081..2e2e0c59e785 100644 --- a/pkg/ottl/ottlfuncs/func_replace_pattern_test.go +++ b/pkg/ottl/ottlfuncs/func_replace_pattern_test.go @@ -35,7 +35,6 @@ func createTestFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ot } func hashString[K any](target ottl.StringGetter[K]) ottl.ExprFunc[K] { - return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) if err != nil { @@ -245,7 +244,6 @@ func Test_replacePattern(t *testing.T) { tt.want(expected) assert.Equal(t, expected, scenarioValue) - }) } } diff --git a/pkg/ottl/ottlfuncs/func_sha1.go b/pkg/ottl/ottlfuncs/func_sha1.go index 1e4c22a50e41..dd82b243e23e 100644 --- a/pkg/ottl/ottlfuncs/func_sha1.go +++ b/pkg/ottl/ottlfuncs/func_sha1.go @@ -31,7 +31,6 @@ func createSHA1Function[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ot } func SHA1HashString[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) if err != nil { diff --git a/pkg/ottl/ottlfuncs/func_sha256.go b/pkg/ottl/ottlfuncs/func_sha256.go index b2201c71b901..5876b66011b6 100644 --- a/pkg/ottl/ottlfuncs/func_sha256.go +++ b/pkg/ottl/ottlfuncs/func_sha256.go @@ -31,7 +31,6 @@ func createSHA256Function[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) ( } func SHA256HashString[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) if err != nil { diff --git a/pkg/ottl/ottlfuncs/func_sha512.go b/pkg/ottl/ottlfuncs/func_sha512.go index 370d27767534..c9fadf86bf61 100644 --- a/pkg/ottl/ottlfuncs/func_sha512.go +++ b/pkg/ottl/ottlfuncs/func_sha512.go @@ -31,7 +31,6 @@ func createSHA512Function[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) ( } func SHA512HashString[K any](target ottl.StringGetter[K]) (ottl.ExprFunc[K], error) { - return func(ctx context.Context, tCtx K) (any, error) { val, err := target.Get(ctx, tCtx) if err != nil { diff --git a/pkg/ottl/ottlfuncs/func_sort_test.go b/pkg/ottl/ottlfuncs/func_sort_test.go index 48dede0a2fa9..b6f038c946f8 100644 --- a/pkg/ottl/ottlfuncs/func_sort_test.go +++ b/pkg/ottl/ottlfuncs/func_sort_test.go @@ -14,7 +14,6 @@ import ( ) func Test_Sort(t *testing.T) { - pMap := pcommon.NewValueMap().SetEmptyMap() pMap.PutStr("k", "v") emptySlice := pcommon.NewValueSlice().SetEmptySlice() diff --git a/pkg/ottl/ottlfuncs/func_to_key_value_string.go b/pkg/ottl/ottlfuncs/func_to_key_value_string.go index ece12a88e1bc..366899115ef3 100644 --- a/pkg/ottl/ottlfuncs/func_to_key_value_string.go +++ b/pkg/ottl/ottlfuncs/func_to_key_value_string.go @@ -73,7 +73,6 @@ func toKeyValueString[K any](target ottl.PMapGetter[K], d ottl.Optional[string], // convertMapToKV converts a pcommon.Map to a key value string func convertMapToKV(target pcommon.Map, delimiter string, pairDelimiter string, sortOutput bool) string { - var kvStrings []string if sortOutput { var keyValues []struct { diff --git a/pkg/pdatatest/pmetrictest/options.go b/pkg/pdatatest/pmetrictest/options.go index 2c050536ec25..883e8fa2b3b1 100644 --- a/pkg/pdatatest/pmetrictest/options.go +++ b/pkg/pdatatest/pmetrictest/options.go @@ -63,7 +63,6 @@ func maskMetricSliceValues(metrics pmetric.MetricSlice, metricNames ...string) { default: panic(fmt.Sprintf("data type not supported: %s", metrics.At(i).Type())) } - } } } @@ -516,7 +515,6 @@ func matchMetricSliceAttributeValues(metrics pmetric.MetricSlice, attributeName return false }) } - } } } @@ -630,7 +628,6 @@ func maskSubsequentDataPoints(metrics pmetric.Metrics, metricNames []string) { return n > 1 }) } - } } } diff --git a/pkg/resourcetotelemetry/resource_to_telemetry_test.go b/pkg/resourcetotelemetry/resource_to_telemetry_test.go index 17396b512fec..6587d71ad86a 100644 --- a/pkg/resourcetotelemetry/resource_to_telemetry_test.go +++ b/pkg/resourcetotelemetry/resource_to_telemetry_test.go @@ -52,7 +52,6 @@ func TestConvertResourceToAttributesAllDataTypesEmptyDataPoint(t *testing.T) { assert.Equal(t, 1, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(4).Histogram().DataPoints().At(0).Attributes().Len()) assert.Equal(t, 1, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(5).Summary().DataPoints().At(0).Attributes().Len()) assert.Equal(t, 1, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(6).ExponentialHistogram().DataPoints().At(0).Attributes().Len()) - } func BenchmarkJoinAttributes(b *testing.B) { @@ -111,7 +110,6 @@ func BenchmarkJoinAttributes(b *testing.B) { } }) } - } func initMetricAttributes(capacity int, idx int) pcommon.Map { diff --git a/pkg/stanza/adapter/benchmark_test.go b/pkg/stanza/adapter/benchmark_test.go index a29179d1cbc1..b8641adf2d19 100644 --- a/pkg/stanza/adapter/benchmark_test.go +++ b/pkg/stanza/adapter/benchmark_test.go @@ -86,7 +86,6 @@ const ( ) func BenchmarkEndToEnd(b *testing.B) { - // These values may have meaningful performance implications, so benchmarks // should cover a variety of values in order to highlight impacts. var ( diff --git a/pkg/stanza/adapter/frompdataconverter_test.go b/pkg/stanza/adapter/frompdataconverter_test.go index 4ddcc25e533d..4e46a6ea5bca 100644 --- a/pkg/stanza/adapter/frompdataconverter_test.go +++ b/pkg/stanza/adapter/frompdataconverter_test.go @@ -126,7 +126,6 @@ func BenchmarkFromPdataConverter(b *testing.B) { for _, wc := range workerCounts { b.Run(fmt.Sprintf("worker_count=%d", wc), func(b *testing.B) { for i := 0; i < b.N; i++ { - converter := NewFromPdataConverter(componenttest.NewNopTelemetrySettings(), wc) converter.Start() defer converter.Stop() diff --git a/pkg/stanza/adapter/receiver_test.go b/pkg/stanza/adapter/receiver_test.go index c46d0c5a376f..e41df3d9a9fc 100644 --- a/pkg/stanza/adapter/receiver_test.go +++ b/pkg/stanza/adapter/receiver_test.go @@ -466,7 +466,6 @@ func (t *testInputOperator) Start(_ operator.Persister) error { return } } - }() return nil } diff --git a/pkg/stanza/adapter/storage.go b/pkg/stanza/adapter/storage.go index 56b933a13e13..86d0b7d596e4 100644 --- a/pkg/stanza/adapter/storage.go +++ b/pkg/stanza/adapter/storage.go @@ -27,7 +27,6 @@ func GetStorageClient(ctx context.Context, host component.Host, storageID *compo } return storageExtension.GetClient(ctx, component.KindReceiver, componentID, "") - } func (r *receiver) setStorageClient(ctx context.Context, host component.Host) error { diff --git a/pkg/stanza/fileconsumer/attrs/attrs_test.go b/pkg/stanza/fileconsumer/attrs/attrs_test.go index b5a79cfeb364..097eb6e4bd09 100644 --- a/pkg/stanza/fileconsumer/attrs/attrs_test.go +++ b/pkg/stanza/fileconsumer/attrs/attrs_test.go @@ -18,7 +18,6 @@ func TestResolver(t *testing.T) { t.Parallel() for i := 0; i < 64; i++ { - // Create a 6 bit string where each bit represents the value of a config option bitString := fmt.Sprintf("%06b", i) diff --git a/pkg/stanza/fileconsumer/file_test.go b/pkg/stanza/fileconsumer/file_test.go index 50cad57e821a..e4faf03a8fef 100644 --- a/pkg/stanza/fileconsumer/file_test.go +++ b/pkg/stanza/fileconsumer/file_test.go @@ -141,7 +141,6 @@ func TestReadUsingNopEncoding(t *testing.T) { for _, tc := range tcs { t.Run(tc.testName, func(t *testing.T) { - tempDir := t.TempDir() cfg := NewConfig().includeDir(tempDir) cfg.StartAt = "beginning" @@ -225,7 +224,6 @@ func TestNopEncodingDifferentLogSizes(t *testing.T) { for _, tc := range tcs { t.Run(tc.testName, func(t *testing.T) { - tempDir := t.TempDir() cfg := NewConfig().includeDir(tempDir) cfg.StartAt = "beginning" diff --git a/pkg/stanza/fileconsumer/internal/checkpoint/checkpoint_test.go b/pkg/stanza/fileconsumer/internal/checkpoint/checkpoint_test.go index fea2ca2bc5a7..d10894187700 100644 --- a/pkg/stanza/fileconsumer/internal/checkpoint/checkpoint_test.go +++ b/pkg/stanza/fileconsumer/internal/checkpoint/checkpoint_test.go @@ -141,7 +141,6 @@ func TestMigrateHeaderAttributes(t *testing.T) { }, }, }, reloaded) - } func saveDeprecated(t *testing.T, persister operator.Persister, dep *deprecatedMetadata) { diff --git a/pkg/stanza/fileconsumer/internal/fileset/fileset_test.go b/pkg/stanza/fileconsumer/internal/fileset/fileset_test.go index 0c1de5c8a8e6..c4763e86ea13 100644 --- a/pkg/stanza/fileconsumer/internal/fileset/fileset_test.go +++ b/pkg/stanza/fileconsumer/internal/fileset/fileset_test.go @@ -55,7 +55,6 @@ func match[T Matchable](ele T, expect bool) func(t *testing.T, fileset *Fileset[ require.Nil(t, r) require.Equal(t, pr, fileset.Len()) } - } } diff --git a/pkg/stanza/fileconsumer/internal/reader/factory.go b/pkg/stanza/fileconsumer/internal/reader/factory.go index 4731e6bc3ce7..f314e4aacae8 100644 --- a/pkg/stanza/fileconsumer/internal/reader/factory.go +++ b/pkg/stanza/fileconsumer/internal/reader/factory.go @@ -64,7 +64,6 @@ func (f *Factory) NewReader(file *os.File, fp *fingerprint.Fingerprint) (*Reader } func (f *Factory) NewReaderFromMetadata(file *os.File, m *Metadata) (r *Reader, err error) { - r = &Reader{ Metadata: m, set: f.TelemetrySettings, diff --git a/pkg/stanza/operator/input/windows/xml_test.go b/pkg/stanza/operator/input/windows/xml_test.go index 41991daac2cd..ac8d6e2f9835 100644 --- a/pkg/stanza/operator/input/windows/xml_test.go +++ b/pkg/stanza/operator/input/windows/xml_test.go @@ -422,7 +422,6 @@ func TestParseEventData(t *testing.T) { func TestInvalidUnmarshal(t *testing.T) { _, err := unmarshalEventXML([]byte("Test \n Invalid \t Unmarshal")) require.Error(t, err) - } func TestUnmarshalWithEventData(t *testing.T) { data, err := os.ReadFile(filepath.Join("testdata", "xmlSample.xml")) diff --git a/pkg/stanza/operator/parser/container/parser.go b/pkg/stanza/operator/parser/container/parser.go index ae27fa778318..1db63db1be4a 100644 --- a/pkg/stanza/operator/parser/container/parser.go +++ b/pkg/stanza/operator/parser/container/parser.go @@ -282,7 +282,6 @@ func (p *Parser) extractk8sMetaFromFilePath(e *entry.Entry) error { if err := newField.Set(e, parsedValues[originalKey]); err != nil { return fmt.Errorf("failed to set %v as metadata at %v", originalKey, attributeKey) } - } return nil } diff --git a/pkg/stanza/operator/parser/container/parser_test.go b/pkg/stanza/operator/parser/container/parser_test.go index 93e769c23f4b..bd3798dc2870 100644 --- a/pkg/stanza/operator/parser/container/parser_test.go +++ b/pkg/stanza/operator/parser/container/parser_test.go @@ -633,7 +633,6 @@ func TestCRIRecombineProcessWithFailedDownstreamOperator(t *testing.T) { } func TestProcessWithTimeRemovalFlagDisabled(t *testing.T) { - require.NoError(t, featuregate.GlobalRegistry().Set(removeOriginalTimeField.ID(), false)) t.Cleanup(func() { require.NoError(t, featuregate.GlobalRegistry().Set(removeOriginalTimeField.ID(), true)) diff --git a/pkg/stanza/operator/parser/jsonarray/config_test.go b/pkg/stanza/operator/parser/jsonarray/config_test.go index 73b8bd8c7bda..15700110b08f 100644 --- a/pkg/stanza/operator/parser/jsonarray/config_test.go +++ b/pkg/stanza/operator/parser/jsonarray/config_test.go @@ -96,5 +96,4 @@ func TestBuildWithFeatureGate(t *testing.T) { } }) } - } diff --git a/pkg/stanza/operator/parser/syslog/parser.go b/pkg/stanza/operator/parser/syslog/parser.go index 84b52d3ac3da..b51423edf684 100644 --- a/pkg/stanza/operator/parser/syslog/parser.go +++ b/pkg/stanza/operator/parser/syslog/parser.go @@ -38,10 +38,8 @@ type Parser struct { // Process will parse an entry field as syslog. func (p *Parser) Process(ctx context.Context, entry *entry.Entry) error { - // if pri header is missing and this is an expected behavior then facility and severity values should be skipped. if !p.enableOctetCounting && p.allowSkipPriHeader { - bytes, err := toBytes(entry.Body) if err != nil { return err diff --git a/pkg/stanza/operator/transformer/recombine/transformer_test.go b/pkg/stanza/operator/transformer/recombine/transformer_test.go index 0d4e53930018..8d9a8ce99a2d 100644 --- a/pkg/stanza/operator/transformer/recombine/transformer_test.go +++ b/pkg/stanza/operator/transformer/recombine/transformer_test.go @@ -815,7 +815,6 @@ func BenchmarkRecombineLimitTrigger(b *testing.B) { require.NoError(b, recombine.Process(ctx, next)) recombine.flushAllSources(ctx) } - } func TestTimeout(t *testing.T) { @@ -900,7 +899,6 @@ func TestTimeoutWhenAggregationKeepHappen(t *testing.T) { return case <-ticker.C: assert.NoError(t, recombine.Process(ctx, next)) - } } }() diff --git a/pkg/stanza/operator/transformer/retain/config_test.go b/pkg/stanza/operator/transformer/retain/config_test.go index 64483c088fd9..168ab9342bf7 100644 --- a/pkg/stanza/operator/transformer/retain/config_test.go +++ b/pkg/stanza/operator/transformer/retain/config_test.go @@ -92,5 +92,4 @@ func TestUnmarshal(t *testing.T) { }, }, }.Run(t) - } diff --git a/pkg/status/aggregator_test.go b/pkg/status/aggregator_test.go index 255c3c1f3dff..9dd8d7950dbf 100644 --- a/pkg/status/aggregator_test.go +++ b/pkg/status/aggregator_test.go @@ -127,7 +127,6 @@ func TestAggregateStatusVerbose(t *testing.T) { st.ComponentStatusMap[tracesKey].ComponentStatusMap[toComponentKey(traces.ExporterID)], ) }) - } func TestAggregateStatusPriorityRecoverable(t *testing.T) { diff --git a/pkg/translator/azure/resourcelogs_to_logs_test.go b/pkg/translator/azure/resourcelogs_to_logs_test.go index d9aeda5bcf75..60e178e63f3f 100644 --- a/pkg/translator/azure/resourcelogs_to_logs_test.go +++ b/pkg/translator/azure/resourcelogs_to_logs_test.go @@ -361,7 +361,6 @@ func TestExtractRawAttributes(t *testing.T) { assert.Equal(t, tt.expected, extractRawAttributes(tt.log)) }) } - } func TestUnmarshalLogs(t *testing.T) { diff --git a/pkg/translator/azurelogs/resourcelogs_to_logs_test.go b/pkg/translator/azurelogs/resourcelogs_to_logs_test.go index 70ca49070129..4a43feafdad0 100644 --- a/pkg/translator/azurelogs/resourcelogs_to_logs_test.go +++ b/pkg/translator/azurelogs/resourcelogs_to_logs_test.go @@ -451,7 +451,6 @@ func TestExtractRawAttributes(t *testing.T) { assert.Equal(t, tt.expected, extractRawAttributes(tt.log)) }) } - } func TestUnmarshalLogs(t *testing.T) { diff --git a/pkg/translator/jaeger/jaegerproto_to_traces_test.go b/pkg/translator/jaeger/jaegerproto_to_traces_test.go index ae0ea0b8911e..ff3dc300953b 100644 --- a/pkg/translator/jaeger/jaegerproto_to_traces_test.go +++ b/pkg/translator/jaeger/jaegerproto_to_traces_test.go @@ -178,7 +178,6 @@ func TestJTagsToInternalAttributes(t *testing.T) { } func TestProtoToTraces(t *testing.T) { - tests := []struct { name string jb []*model.Batch @@ -379,7 +378,6 @@ func TestProtoBatchToInternalTracesWithTwoLibraries(t *testing.T) { } func TestSetInternalSpanStatus(t *testing.T) { - emptyStatus := ptrace.NewStatus() okStatus := ptrace.NewStatus() diff --git a/pkg/translator/jaeger/jaegerthrift_to_traces_test.go b/pkg/translator/jaeger/jaegerthrift_to_traces_test.go index e3f2bb1cdf78..b9fc4b7292fb 100644 --- a/pkg/translator/jaeger/jaegerthrift_to_traces_test.go +++ b/pkg/translator/jaeger/jaegerthrift_to_traces_test.go @@ -65,7 +65,6 @@ func TestJThriftTagsToInternalAttributes(t *testing.T) { } func TestThriftBatchToInternalTraces(t *testing.T) { - tests := []struct { name string jb *jaeger.Batch diff --git a/pkg/translator/jaeger/traces_to_jaegerproto.go b/pkg/translator/jaeger/traces_to_jaegerproto.go index db79dac374b4..2bfff448dcba 100644 --- a/pkg/translator/jaeger/traces_to_jaegerproto.go +++ b/pkg/translator/jaeger/traces_to_jaegerproto.go @@ -90,7 +90,6 @@ func resourceToJaegerProtoProcess(resource pcommon.Resource) *model.Process { tags := make([]model.KeyValue, 0, attrsCount) process.Tags = appendTagsFromResourceAttributes(tags, attrs) return process - } func appendTagsFromResourceAttributes(dest []model.KeyValue, attrs pcommon.Map) []model.KeyValue { @@ -355,7 +354,6 @@ func getErrorTagFromStatusCode(statusCode ptrace.StatusCode) (model.KeyValue, bo }, true } return model.KeyValue{}, false - } func getTagFromStatusMsg(statusMsg string) (model.KeyValue, bool) { diff --git a/pkg/translator/jaeger/traces_to_jaegerproto_test.go b/pkg/translator/jaeger/traces_to_jaegerproto_test.go index 672d97c6f320..785e1f8a96e5 100644 --- a/pkg/translator/jaeger/traces_to_jaegerproto_test.go +++ b/pkg/translator/jaeger/traces_to_jaegerproto_test.go @@ -164,7 +164,6 @@ func TestGetTagFromSpanKind(t *testing.T) { } func TestAttributesToJaegerProtoTags(t *testing.T) { - attributes := pcommon.NewMap() attributes.PutBool("bool-val", true) attributes.PutInt("int-val", 123) @@ -215,7 +214,6 @@ func TestAttributesToJaegerProtoTags(t *testing.T) { } func TestInternalTracesToJaegerProto(t *testing.T) { - tests := []struct { name string td ptrace.Traces diff --git a/pkg/translator/loki/convert.go b/pkg/translator/loki/convert.go index 87eeec5a3f7e..474d5084e4ce 100644 --- a/pkg/translator/loki/convert.go +++ b/pkg/translator/loki/convert.go @@ -187,7 +187,6 @@ func convertLogToLokiEntry(lr plog.LogRecord, res pcommon.Resource, format strin default: return nil, fmt.Errorf("invalid format %s. Expected one of: %s, %s, %s", format, formatJSON, formatLogfmt, formatRaw) } - } func timestampFromLogRecord(lr plog.LogRecord) time.Time { diff --git a/pkg/translator/loki/encode.go b/pkg/translator/loki/encode.go index 2174b71f6fce..90721954a437 100644 --- a/pkg/translator/loki/encode.go +++ b/pkg/translator/loki/encode.go @@ -179,7 +179,6 @@ func valueToKeyvals(key string, value pcommon.Value) []any { prefix = key + "_" } value.Map().Range(func(k string, v pcommon.Value) bool { - keyvals = append(keyvals, valueToKeyvals(prefix+k, v)...) return true }) diff --git a/pkg/translator/loki/encode_test.go b/pkg/translator/loki/encode_test.go index 6b07e6b6c13d..56947d1f7215 100644 --- a/pkg/translator/loki/encode_test.go +++ b/pkg/translator/loki/encode_test.go @@ -13,7 +13,6 @@ import ( ) func exampleLog() (plog.LogRecord, pcommon.Resource, pcommon.InstrumentationScope) { - buffer := plog.NewLogRecord() buffer.Body().SetStr("Example log") buffer.SetSeverityText("error") @@ -66,7 +65,6 @@ func TestEncodeJsonWithInstrumentationScopeAttributes(t *testing.T) { } func TestSerializeComplexBody(t *testing.T) { - arrayval := pcommon.NewValueSlice() arrayval.Slice().AppendEmpty().SetStr("a") arrayval.Slice().AppendEmpty().SetStr("b") diff --git a/pkg/translator/opencensus/traces_to_oc.go b/pkg/translator/opencensus/traces_to_oc.go index 0f698ca5bf6b..0cea312203c2 100644 --- a/pkg/translator/opencensus/traces_to_oc.go +++ b/pkg/translator/opencensus/traces_to_oc.go @@ -159,7 +159,6 @@ func spanKindToOCAttribute(kind ptrace.SpanKind) *octrace.AttributeValue { case ptrace.SpanKindServer: // explicitly handled as SpanKind case ptrace.SpanKindClient: // explicitly handled as SpanKind default: - } if string(ocKind) == "" { diff --git a/pkg/translator/prometheus/normalize_label.go b/pkg/translator/prometheus/normalize_label.go index af0960e86237..b44ba869fe69 100644 --- a/pkg/translator/prometheus/normalize_label.go +++ b/pkg/translator/prometheus/normalize_label.go @@ -25,7 +25,6 @@ var dropSanitizationGate = featuregate.GlobalRegistry().MustRegister( // // Exception is made for double-underscores which are allowed func NormalizeLabel(label string) string { - // Trivial case if len(label) == 0 { return label diff --git a/pkg/translator/prometheus/normalize_label_test.go b/pkg/translator/prometheus/normalize_label_test.go index f00183909735..e2a440d582ff 100644 --- a/pkg/translator/prometheus/normalize_label_test.go +++ b/pkg/translator/prometheus/normalize_label_test.go @@ -12,7 +12,6 @@ import ( ) func TestSanitize(t *testing.T) { - defer testutil.SetFeatureGateForTest(t, dropSanitizationGate, false)() require.Equal(t, "", NormalizeLabel(""), "") @@ -24,7 +23,6 @@ func TestSanitize(t *testing.T) { } func TestSanitizeDropSanitization(t *testing.T) { - defer testutil.SetFeatureGateForTest(t, dropSanitizationGate, true)() require.Equal(t, "", NormalizeLabel("")) diff --git a/pkg/translator/prometheus/normalize_name.go b/pkg/translator/prometheus/normalize_name.go index 72fc04cea220..1297bc0b831b 100644 --- a/pkg/translator/prometheus/normalize_name.go +++ b/pkg/translator/prometheus/normalize_name.go @@ -106,7 +106,6 @@ func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffix // Build a normalized name for the specified metric func normalizeName(metric pmetric.Metric, namespace string) string { - // Split metric name in "tokens" (remove all non-alphanumeric) nameTokens := strings.FieldsFunc( metric.Name(), @@ -138,7 +137,6 @@ func normalizeName(metric pmetric.Metric, namespace string) string { } } } - } // Append _total for Counters diff --git a/pkg/translator/prometheus/normalize_name_test.go b/pkg/translator/prometheus/normalize_name_test.go index 6470bfe3d571..8a56d8956f7a 100644 --- a/pkg/translator/prometheus/normalize_name_test.go +++ b/pkg/translator/prometheus/normalize_name_test.go @@ -14,97 +14,70 @@ import ( ) func TestByte(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("system.filesystem.usage", "By"), "")) - } func TestByteCounter(t *testing.T) { - require.Equal(t, "system_io_bytes_total", normalizeName(createCounter("system.io", "By"), "")) require.Equal(t, "network_transmitted_bytes_total", normalizeName(createCounter("network_transmitted_bytes_total", "By"), "")) - } func TestWhiteSpaces(t *testing.T) { - require.Equal(t, "system_filesystem_usage_bytes", normalizeName(createGauge("\t system.filesystem.usage ", " By\t"), "")) - } func TestNonStandardUnit(t *testing.T) { - require.Equal(t, "system_network_dropped", normalizeName(createGauge("system.network.dropped", "{packets}"), "")) - } func TestNonStandardUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_total", normalizeName(createCounter("system.network.dropped", "{packets}"), "")) - } func TestBrokenUnit(t *testing.T) { - require.Equal(t, "system_network_dropped_packets", normalizeName(createGauge("system.network.dropped", "packets"), "")) require.Equal(t, "system_network_packets_dropped", normalizeName(createGauge("system.network.packets.dropped", "packets"), "")) require.Equal(t, "system_network_packets", normalizeName(createGauge("system.network.packets", "packets"), "")) - } func TestBrokenUnitCounter(t *testing.T) { - require.Equal(t, "system_network_dropped_packets_total", normalizeName(createCounter("system.network.dropped", "packets"), "")) require.Equal(t, "system_network_packets_dropped_total", normalizeName(createCounter("system.network.packets.dropped", "packets"), "")) require.Equal(t, "system_network_packets_total", normalizeName(createCounter("system.network.packets", "packets"), "")) - } func TestRatio(t *testing.T) { - require.Equal(t, "hw_gpu_memory_utilization_ratio", normalizeName(createGauge("hw.gpu.memory.utilization", "1"), "")) require.Equal(t, "hw_fan_speed_ratio", normalizeName(createGauge("hw.fan.speed_ratio", "1"), "")) require.Equal(t, "objects_total", normalizeName(createCounter("objects", "1"), "")) - } func TestHertz(t *testing.T) { - require.Equal(t, "hw_cpu_speed_limit_hertz", normalizeName(createGauge("hw.cpu.speed_limit", "Hz"), "")) - } func TestPer(t *testing.T) { - require.Equal(t, "broken_metric_speed_km_per_hour", normalizeName(createGauge("broken.metric.speed", "km/h"), "")) require.Equal(t, "astro_light_speed_limit_meters_per_second", normalizeName(createGauge("astro.light.speed_limit", "m/s"), "")) - } func TestPercent(t *testing.T) { - require.Equal(t, "broken_metric_success_ratio_percent", normalizeName(createGauge("broken.metric.success_ratio", "%"), "")) require.Equal(t, "broken_metric_success_percent", normalizeName(createGauge("broken.metric.success_percent", "%"), "")) - } func TestEmpty(t *testing.T) { - require.Equal(t, "test_metric_no_unit", normalizeName(createGauge("test.metric.no_unit", ""), "")) require.Equal(t, "test_metric_spaces", normalizeName(createGauge("test.metric.spaces", " \t "), "")) - } func TestUnsupportedRunes(t *testing.T) { - require.Equal(t, "unsupported_metric_temperature_F", normalizeName(createGauge("unsupported.metric.temperature", "°F"), "")) require.Equal(t, "unsupported_metric_weird", normalizeName(createGauge("unsupported.metric.weird", "+=.:,!* & #"), "")) require.Equal(t, "unsupported_metric_redundant_test_per_C", normalizeName(createGauge("unsupported.metric.redundant", "__test $/°C"), "")) - } func TestOtelReceivers(t *testing.T) { - require.Equal(t, "active_directory_ds_replication_network_io_bytes_total", normalizeName(createCounter("active_directory.ds.replication.network.io", "By"), "")) require.Equal(t, "active_directory_ds_replication_sync_object_pending_total", normalizeName(createCounter("active_directory.ds.replication.sync.object.pending", "{objects}"), "")) require.Equal(t, "active_directory_ds_replication_object_rate_per_second", normalizeName(createGauge("active_directory.ds.replication.object.rate", "{objects}/s"), "")) @@ -127,7 +100,6 @@ func TestOtelReceivers(t *testing.T) { require.Equal(t, "nginx_connections_accepted", normalizeName(createGauge("nginx.connections_accepted", "connections"), "")) require.Equal(t, "nsxt_node_memory_usage_kilobytes", normalizeName(createGauge("nsxt.node.memory.usage", "KBy"), "")) require.Equal(t, "redis_latest_fork_microseconds", normalizeName(createGauge("redis.latest_fork", "us"), "")) - } func TestTrimPromSuffixes(t *testing.T) { @@ -158,7 +130,6 @@ func TestTrimPromSuffixes(t *testing.T) { assert.Equal(t, "memcached_operation_hit_ratio_percent", TrimPromSuffixes("memcached_operation_hit_ratio_percent", pmetric.MetricTypeGauge, "%")) assert.Equal(t, "active_directory_ds_replication_object_rate_per_second", TrimPromSuffixes("active_directory_ds_replication_object_rate_per_second", pmetric.MetricTypeGauge, "{objects}/s")) assert.Equal(t, "system_disk_operation_time_seconds", TrimPromSuffixes("system_disk_operation_time_seconds_total", pmetric.MetricTypeSum, "s")) - } func TestNamespace(t *testing.T) { @@ -176,23 +147,18 @@ func TestCleanUpString(t *testing.T) { } func TestUnitMapGetOrDefault(t *testing.T) { - require.Equal(t, "", unitMapGetOrDefault("")) require.Equal(t, "seconds", unitMapGetOrDefault("s")) require.Equal(t, "invalid", unitMapGetOrDefault("invalid")) - } func TestPerUnitMapGetOrDefault(t *testing.T) { - require.Equal(t, "", perUnitMapGetOrDefault("")) require.Equal(t, "second", perUnitMapGetOrDefault("s")) require.Equal(t, "invalid", perUnitMapGetOrDefault("invalid")) - } func TestRemoveItem(t *testing.T) { - require.Equal(t, []string{}, removeItem([]string{}, "test")) require.Equal(t, []string{}, removeItem([]string{}, "")) require.Equal(t, []string{"a", "b", "c"}, removeItem([]string{"a", "b", "c"}, "d")) @@ -200,11 +166,9 @@ func TestRemoveItem(t *testing.T) { require.Equal(t, []string{"a", "b"}, removeItem([]string{"a", "b", "c"}, "c")) require.Equal(t, []string{"a", "c"}, removeItem([]string{"a", "b", "c"}, "b")) require.Equal(t, []string{"b", "c"}, removeItem([]string{"a", "b", "c"}, "a")) - } func TestBuildCompliantNameWithNormalize(t *testing.T) { - defer testutil.SetFeatureGateForTest(t, normalizeNameGate, true)() addUnitAndTypeSuffixes := true require.Equal(t, "system_io_bytes_total", BuildCompliantName(createCounter("system.io", "By"), "", addUnitAndTypeSuffixes)) @@ -213,11 +177,9 @@ func TestBuildCompliantNameWithNormalize(t *testing.T) { require.Equal(t, "envoy_rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", addUnitAndTypeSuffixes)) require.Equal(t, "foo_bar", BuildCompliantName(createGauge(":foo::bar", ""), "", addUnitAndTypeSuffixes)) require.Equal(t, "foo_bar_total", BuildCompliantName(createCounter(":foo::bar", ""), "", addUnitAndTypeSuffixes)) - } func TestBuildCompliantNameWithSuffixesFeatureGateDisabled(t *testing.T) { - defer testutil.SetFeatureGateForTest(t, normalizeNameGate, false)() addUnitAndTypeSuffixes := true require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", addUnitAndTypeSuffixes)) @@ -227,11 +189,9 @@ func TestBuildCompliantNameWithSuffixesFeatureGateDisabled(t *testing.T) { require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", addUnitAndTypeSuffixes)) require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", addUnitAndTypeSuffixes)) require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", addUnitAndTypeSuffixes)) - } func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { - defer testutil.SetFeatureGateForTest(t, normalizeNameGate, false)() addUnitAndTypeSuffixes := false require.Equal(t, "system_io", BuildCompliantName(createCounter("system.io", "By"), "", addUnitAndTypeSuffixes)) @@ -241,5 +201,4 @@ func TestBuildCompliantNameWithoutSuffixes(t *testing.T) { require.Equal(t, "envoy__rule_engine_zlib_buf_error", BuildCompliantName(createGauge("envoy__rule_engine_zlib_buf_error", ""), "", addUnitAndTypeSuffixes)) require.Equal(t, ":foo::bar", BuildCompliantName(createGauge(":foo::bar", ""), "", addUnitAndTypeSuffixes)) require.Equal(t, ":foo::bar", BuildCompliantName(createCounter(":foo::bar", ""), "", addUnitAndTypeSuffixes)) - } diff --git a/pkg/translator/prometheus/testutils_test.go b/pkg/translator/prometheus/testutils_test.go index e50ac8456019..1923326e5097 100644 --- a/pkg/translator/prometheus/testutils_test.go +++ b/pkg/translator/prometheus/testutils_test.go @@ -10,11 +10,9 @@ import ( var ilm pmetric.ScopeMetrics func init() { - metrics := pmetric.NewMetrics() resourceMetrics := metrics.ResourceMetrics().AppendEmpty() ilm = resourceMetrics.ScopeMetrics().AppendEmpty() - } // Returns a new Metric of type "Gauge" with specified name and unit diff --git a/pkg/translator/prometheus/unit_to_ucum_test.go b/pkg/translator/prometheus/unit_to_ucum_test.go index 518a2983a2fa..081decbe1aa3 100644 --- a/pkg/translator/prometheus/unit_to_ucum_test.go +++ b/pkg/translator/prometheus/unit_to_ucum_test.go @@ -57,5 +57,4 @@ func TestUnitWordToUCUM(t *testing.T) { assert.Equal(t, tc.expected, got) }) } - } diff --git a/pkg/translator/prometheusremotewrite/helper.go b/pkg/translator/prometheusremotewrite/helper.go index cb03a7c32959..4d38dfb5a9b7 100644 --- a/pkg/translator/prometheusremotewrite/helper.go +++ b/pkg/translator/prometheusremotewrite/helper.go @@ -221,7 +221,6 @@ func (c *prometheusConverter) addHistogramDataPoints(dataPoints pmetric.Histogra sumlabels := createLabels(baseName+sumStr, baseLabels) c.addSample(sum, sumlabels) - } // treat count as a sample in an individual TimeSeries diff --git a/pkg/translator/prometheusremotewrite/number_data_points_v2_test.go b/pkg/translator/prometheusremotewrite/number_data_points_v2_test.go index 5b7c2cf377d9..552952ffb5bb 100644 --- a/pkg/translator/prometheusremotewrite/number_data_points_v2_test.go +++ b/pkg/translator/prometheusremotewrite/number_data_points_v2_test.go @@ -122,7 +122,6 @@ func TestPrometheusConverterV2_addGaugeNumberDataPoints(t *testing.T) { diff := cmp.Diff(w, converter.unique, cmpopts.EquateNaNs()) assert.Empty(t, diff) - }) } } @@ -172,5 +171,4 @@ func TestPrometheusConverterV2_addGaugeNumberDataPointsDuplicate(t *testing.T) { converter.addGaugeNumberDataPoints(metric2.Gauge().DataPoints(), pcommon.NewResource(), settings, metric2.Name()) assert.Equal(t, want(), converter.unique) - } diff --git a/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata_test.go b/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata_test.go index 2127ffad06ab..de90eaab73da 100644 --- a/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata_test.go +++ b/pkg/translator/prometheusremotewrite/otlp_to_openmetrics_metadata_test.go @@ -226,7 +226,6 @@ func TestOtelMetricsToMetadata(t *testing.T) { assert.Equal(t, tt.want[i].MetricFamilyName, metaData[i].MetricFamilyName) assert.Equal(t, tt.want[i].Help, metaData[i].Help) } - }) } } diff --git a/pkg/translator/zipkin/zipkinv1/json.go b/pkg/translator/zipkin/zipkinv1/json.go index ee0725509919..b45e7b188786 100644 --- a/pkg/translator/zipkin/zipkinv1/json.go +++ b/pkg/translator/zipkin/zipkinv1/json.go @@ -167,7 +167,6 @@ func jsonBinAnnotationsToSpanAttributes(span ptrace.Span, binAnnotations []*bina sMapper := &statusMapper{} var localComponent string for _, binAnnotation := range binAnnotations { - if binAnnotation.Endpoint != nil && binAnnotation.Endpoint.ServiceName != "" { fallbackServiceName = binAnnotation.Endpoint.ServiceName } diff --git a/pkg/translator/zipkin/zipkinv2/from_translator.go b/pkg/translator/zipkin/zipkinv2/from_translator.go index 5358aee8c381..a55da57cc157 100644 --- a/pkg/translator/zipkin/zipkinv2/from_translator.go +++ b/pkg/translator/zipkin/zipkinv2/from_translator.go @@ -98,7 +98,6 @@ func spanToZipkinSpan( localServiceName string, zTags map[string]string, ) (*zipkinmodel.SpanModel, error) { - tags := aggregateSpanTags(span, zTags) zs := &zipkinmodel.SpanModel{} @@ -318,7 +317,6 @@ func zipkinEndpointFromTags( remoteEndpoint bool, redundantKeys map[string]bool, ) (endpoint *zipkinmodel.Endpoint) { - serviceName := localServiceName if peerSvc, ok := zTags[conventions.AttributePeerService]; ok && remoteEndpoint { serviceName = peerSvc From c92c21e508c15973a287016b4d857ac5def673de Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Tue, 12 Nov 2024 00:24:53 +0100 Subject: [PATCH 4/7] [chore]: enable perfsprint linter for receivers (#36197) #### Description [perfsprint](https://golangci-lint.run/usage/linters/#perfsprint) checks that fmt.Sprintf can be replaced with a faster alternative. Signed-off-by: Matthieu MOREL --- receiver/aerospikereceiver/client.go | 3 +- receiver/apachesparkreceiver/scraper.go | 72 +++++++++---------- .../internal/cadvisor/extractors/extractor.go | 4 +- .../internal/ecsInfo/cgroup.go | 3 +- .../internal/k8sapiserver/k8sapiserver.go | 8 +-- .../internal/stores/podstore.go | 4 +- .../stats_provider_test.go | 4 +- .../receiver_test.go | 3 +- receiver/awsfirehosereceiver/config_test.go | 3 +- .../nop_logs_unmarshaler_test.go | 4 +- .../nop_metrics_unmarshaler_test.go | 4 +- receiver/awsfirehosereceiver/receiver.go | 3 +- receiver/awsfirehosereceiver/receiver_test.go | 3 +- receiver/awss3receiver/notifications_test.go | 8 +-- receiver/awss3receiver/s3reader_test.go | 2 +- .../internal/translator/translator_test.go | 8 +-- receiver/chronyreceiver/config_test.go | 3 +- .../internal/chrony/client_test.go | 2 +- .../internal/chrony/util_test.go | 3 +- receiver/cloudflarereceiver/logs.go | 4 +- receiver/couchdbreceiver/metrics.go | 10 +-- .../internal/translator/tags.go | 3 +- .../translator/traces_translator_test.go | 8 +-- receiver/datadogreceiver/receiver.go | 4 +- receiver/dockerstatsreceiver/receiver.go | 2 +- receiver/elasticsearchreceiver/client.go | 4 +- receiver/expvarreceiver/config.go | 3 +- receiver/expvarreceiver/scraper.go | 3 +- receiver/filelogreceiver/filelog_test.go | 5 +- receiver/flinkmetricsreceiver/client.go | 3 +- .../googlecloudmonitoringreceiver/receiver.go | 2 +- .../internal/handler.go | 2 +- .../internal/metadata/metricsdatapoint.go | 8 +-- .../metadata/metricsdatapoint_test.go | 6 +- .../perfcounter_scraper_errors.go | 3 +- receiver/jaegerreceiver/config.go | 9 +-- receiver/jaegerreceiver/trace_receiver.go | 2 +- receiver/jmxreceiver/config.go | 2 +- .../internal/subprocess/subprocess.go | 3 +- receiver/jmxreceiver/receiver.go | 4 +- .../internal/metadata/metadata.go | 5 +- .../k8sclusterreceiver/internal/node/nodes.go | 4 +- receiver/k8sclusterreceiver/receiver.go | 3 +- receiver/k8sobjectsreceiver/config.go | 3 +- receiver/k8sobjectsreceiver/receiver.go | 3 +- .../broker_scraper_test.go | 6 +- .../consumer_scraper_test.go | 8 +-- .../kafkametricsreceiver/receiver_test.go | 6 +- .../scraper_test_helper.go | 22 +++--- .../topic_scraper_test.go | 6 +- receiver/kafkareceiver/factory.go | 4 +- receiver/kafkareceiver/header_extraction.go | 4 +- receiver/kafkareceiver/kafka_receiver.go | 3 +- .../internal/mongodb_atlas_client.go | 11 +-- receiver/mongodbreceiver/config.go | 2 +- .../otelarrowreceiver/internal/arrow/arrow.go | 9 ++- .../internal/arrow/arrow_test.go | 11 +-- receiver/otelarrowreceiver/otelarrow_test.go | 8 +-- receiver/podmanreceiver/libpod_client.go | 2 +- receiver/podmanreceiver/libpod_client_test.go | 9 ++- receiver/podmanreceiver/podman_test.go | 5 +- receiver/postgresqlreceiver/client.go | 2 +- receiver/postgresqlreceiver/scraper_test.go | 7 +- .../prometheusreceiver/metrics_receiver.go | 2 +- ...trics_receiver_scrape_config_files_test.go | 5 +- .../targetallocator/config.go | 3 +- .../targetallocator/manager.go | 2 +- receiver/receivercreator/consumer.go | 7 +- receiver/receivercreator/receiver.go | 3 +- receiver/sapmreceiver/factory.go | 3 +- receiver/signalfxreceiver/factory.go | 3 +- receiver/skywalkingreceiver/config.go | 5 +- receiver/skywalkingreceiver/factory.go | 3 +- receiver/snmpreceiver/client_test.go | 12 ++-- .../solacereceiver/messaging_service_test.go | 10 +-- receiver/solacereceiver/receiver.go | 3 +- receiver/solacereceiver/receiver_test.go | 9 ++- .../unmarshaller_egress_test.go | 4 +- receiver/splunkhecreceiver/receiver_test.go | 6 +- receiver/sqlserverreceiver/config.go | 4 +- receiver/sqlserverreceiver/scraper_test.go | 4 +- .../internal/configssh/configssh.go | 4 +- .../internal/configssh/configssh_test.go | 6 +- receiver/sshcheckreceiver/scraper_test.go | 5 +- receiver/statsdreceiver/config.go | 9 +-- .../internal/protocol/statsd_parser.go | 2 +- receiver/vcenterreceiver/processors.go | 4 +- receiver/vcenterreceiver/scraper.go | 2 +- .../receiver_others.go | 4 +- .../windowsperfcountersreceiver/config.go | 7 +- .../config_test.go | 2 +- receiver/zookeeperreceiver/scraper.go | 2 +- receiver/zookeeperreceiver/scraper_test.go | 3 +- 93 files changed, 258 insertions(+), 257 deletions(-) diff --git a/receiver/aerospikereceiver/client.go b/receiver/aerospikereceiver/client.go index 1e6fc0cd90a0..0221a92de061 100644 --- a/receiver/aerospikereceiver/client.go +++ b/receiver/aerospikereceiver/client.go @@ -5,7 +5,6 @@ package aerospikereceiver // import "github.com/open-telemetry/opentelemetry-col import ( "crypto/tls" - "fmt" "strings" "sync" "time" @@ -266,7 +265,7 @@ func allNamespaceInfo(n cluster.Node, policy *as.InfoPolicy) (metricsMap, error) commands := make([]string, len(names)) for i, name := range names { - commands[i] = fmt.Sprintf("namespace/%s", name) + commands[i] = "namespace/" + name } res, err = n.RequestInfo(policy, commands...) diff --git a/receiver/apachesparkreceiver/scraper.go b/receiver/apachesparkreceiver/scraper.go index 2fb431e6c246..a33dd27754d2 100644 --- a/receiver/apachesparkreceiver/scraper.go +++ b/receiver/apachesparkreceiver/scraper.go @@ -126,122 +126,122 @@ func (s *sparkScraper) scrape(_ context.Context) (pmetric.Metrics, error) { } func (s *sparkScraper) recordCluster(clusterStats *models.ClusterProperties, now pcommon.Timestamp, appID string, appName string) { - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.BlockManager.disk.diskSpaceUsed_MB", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.BlockManager.disk.diskSpaceUsed_MB"]; ok { s.mb.RecordSparkDriverBlockManagerDiskUsageDataPoint(now, int64(stat.Value)) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.BlockManager.memory.offHeapMemUsed_MB", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.BlockManager.memory.offHeapMemUsed_MB"]; ok { s.mb.RecordSparkDriverBlockManagerMemoryUsageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOffHeap, metadata.AttributeStateUsed) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.BlockManager.memory.onHeapMemUsed_MB", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.BlockManager.memory.onHeapMemUsed_MB"]; ok { s.mb.RecordSparkDriverBlockManagerMemoryUsageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOnHeap, metadata.AttributeStateUsed) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.BlockManager.memory.remainingOffHeapMem_MB", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.BlockManager.memory.remainingOffHeapMem_MB"]; ok { s.mb.RecordSparkDriverBlockManagerMemoryUsageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOffHeap, metadata.AttributeStateFree) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.BlockManager.memory.remainingOnHeapMem_MB", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.BlockManager.memory.remainingOnHeapMem_MB"]; ok { s.mb.RecordSparkDriverBlockManagerMemoryUsageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOnHeap, metadata.AttributeStateFree) } - if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.HiveExternalCatalog.fileCacheHits", appID)]; ok { + if stat, ok := clusterStats.Counters[appID+".driver.HiveExternalCatalog.fileCacheHits"]; ok { s.mb.RecordSparkDriverHiveExternalCatalogFileCacheHitsDataPoint(now, stat.Count) } - if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.HiveExternalCatalog.filesDiscovered", appID)]; ok { + if stat, ok := clusterStats.Counters[appID+".driver.HiveExternalCatalog.filesDiscovered"]; ok { s.mb.RecordSparkDriverHiveExternalCatalogFilesDiscoveredDataPoint(now, stat.Count) } - if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.HiveExternalCatalog.hiveClientCalls", appID)]; ok { + if stat, ok := clusterStats.Counters[appID+".driver.HiveExternalCatalog.hiveClientCalls"]; ok { s.mb.RecordSparkDriverHiveExternalCatalogHiveClientCallsDataPoint(now, stat.Count) } - if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.HiveExternalCatalog.parallelListingJobCount", appID)]; ok { + if stat, ok := clusterStats.Counters[appID+".driver.HiveExternalCatalog.parallelListingJobCount"]; ok { s.mb.RecordSparkDriverHiveExternalCatalogParallelListingJobsDataPoint(now, stat.Count) } - if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.HiveExternalCatalog.partitionsFetched", appID)]; ok { + if stat, ok := clusterStats.Counters[appID+".driver.HiveExternalCatalog.partitionsFetched"]; ok { s.mb.RecordSparkDriverHiveExternalCatalogPartitionsFetchedDataPoint(now, stat.Count) } - if stat, ok := clusterStats.Histograms[fmt.Sprintf("%s.driver.CodeGenerator.compilationTime", appID)]; ok { + if stat, ok := clusterStats.Histograms[appID+".driver.CodeGenerator.compilationTime"]; ok { s.mb.RecordSparkDriverCodeGeneratorCompilationCountDataPoint(now, stat.Count) s.mb.RecordSparkDriverCodeGeneratorCompilationAverageTimeDataPoint(now, stat.Mean) } - if stat, ok := clusterStats.Histograms[fmt.Sprintf("%s.driver.CodeGenerator.generatedClassSize", appID)]; ok { + if stat, ok := clusterStats.Histograms[appID+".driver.CodeGenerator.generatedClassSize"]; ok { s.mb.RecordSparkDriverCodeGeneratorGeneratedClassCountDataPoint(now, stat.Count) s.mb.RecordSparkDriverCodeGeneratorGeneratedClassAverageSizeDataPoint(now, stat.Mean) } - if stat, ok := clusterStats.Histograms[fmt.Sprintf("%s.driver.CodeGenerator.generatedMethodSize", appID)]; ok { + if stat, ok := clusterStats.Histograms[appID+".driver.CodeGenerator.generatedMethodSize"]; ok { s.mb.RecordSparkDriverCodeGeneratorGeneratedMethodCountDataPoint(now, stat.Count) s.mb.RecordSparkDriverCodeGeneratorGeneratedMethodAverageSizeDataPoint(now, stat.Mean) } - if stat, ok := clusterStats.Histograms[fmt.Sprintf("%s.driver.CodeGenerator.sourceCodeSize", appID)]; ok { + if stat, ok := clusterStats.Histograms[appID+".driver.CodeGenerator.sourceCodeSize"]; ok { s.mb.RecordSparkDriverCodeGeneratorSourceCodeOperationsDataPoint(now, stat.Count) s.mb.RecordSparkDriverCodeGeneratorSourceCodeAverageSizeDataPoint(now, stat.Mean) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.DAGScheduler.job.activeJobs", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.DAGScheduler.job.activeJobs"]; ok { s.mb.RecordSparkDriverDagSchedulerJobActiveDataPoint(now, int64(stat.Value)) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.DAGScheduler.job.allJobs", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.DAGScheduler.job.allJobs"]; ok { s.mb.RecordSparkDriverDagSchedulerJobCountDataPoint(now, int64(stat.Value)) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.DAGScheduler.stage.failedStages", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.DAGScheduler.stage.failedStages"]; ok { s.mb.RecordSparkDriverDagSchedulerStageFailedDataPoint(now, int64(stat.Value)) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.DAGScheduler.stage.runningStages", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.DAGScheduler.stage.runningStages"]; ok { s.mb.RecordSparkDriverDagSchedulerStageCountDataPoint(now, int64(stat.Value), metadata.AttributeSchedulerStatusRunning) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.DAGScheduler.stage.waitingStages", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.DAGScheduler.stage.waitingStages"]; ok { s.mb.RecordSparkDriverDagSchedulerStageCountDataPoint(now, int64(stat.Value), metadata.AttributeSchedulerStatusWaiting) } - if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.LiveListenerBus.numEventsPosted", appID)]; ok { + if stat, ok := clusterStats.Counters[appID+".driver.LiveListenerBus.numEventsPosted"]; ok { s.mb.RecordSparkDriverLiveListenerBusPostedDataPoint(now, stat.Count) } - if stat, ok := clusterStats.Timers[fmt.Sprintf("%s.driver.LiveListenerBus.queue.appStatus.listenerProcessingTime", appID)]; ok { + if stat, ok := clusterStats.Timers[appID+".driver.LiveListenerBus.queue.appStatus.listenerProcessingTime"]; ok { s.mb.RecordSparkDriverLiveListenerBusProcessingTimeAverageDataPoint(now, stat.Mean) } - if stat, ok := clusterStats.Counters[fmt.Sprintf("%s.driver.LiveListenerBus.queue.appStatus.numDroppedEvents", appID)]; ok { + if stat, ok := clusterStats.Counters[appID+".driver.LiveListenerBus.queue.appStatus.numDroppedEvents"]; ok { s.mb.RecordSparkDriverLiveListenerBusDroppedDataPoint(now, stat.Count) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.LiveListenerBus.queue.appStatus.size", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.LiveListenerBus.queue.appStatus.size"]; ok { s.mb.RecordSparkDriverLiveListenerBusQueueSizeDataPoint(now, int64(stat.Value)) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.JVMCPU.jvmCpuTime", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.JVMCPU.jvmCpuTime"]; ok { s.mb.RecordSparkDriverJvmCPUTimeDataPoint(now, int64(stat.Value)) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.JVMOffHeapMemory", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.JVMOffHeapMemory"]; ok { s.mb.RecordSparkDriverExecutorMemoryJvmDataPoint(now, int64(stat.Value), metadata.AttributeLocationOffHeap) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.JVMHeapMemory", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.JVMHeapMemory"]; ok { s.mb.RecordSparkDriverExecutorMemoryJvmDataPoint(now, int64(stat.Value), metadata.AttributeLocationOnHeap) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.OffHeapExecutionMemory", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.OffHeapExecutionMemory"]; ok { s.mb.RecordSparkDriverExecutorMemoryExecutionDataPoint(now, int64(stat.Value), metadata.AttributeLocationOffHeap) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.OnHeapExecutionMemory", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.OnHeapExecutionMemory"]; ok { s.mb.RecordSparkDriverExecutorMemoryExecutionDataPoint(now, int64(stat.Value), metadata.AttributeLocationOnHeap) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.OffHeapStorageMemory", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.OffHeapStorageMemory"]; ok { s.mb.RecordSparkDriverExecutorMemoryStorageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOffHeap) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.OnHeapStorageMemory", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.OnHeapStorageMemory"]; ok { s.mb.RecordSparkDriverExecutorMemoryStorageDataPoint(now, int64(stat.Value), metadata.AttributeLocationOnHeap) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.DirectPoolMemory", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.DirectPoolMemory"]; ok { s.mb.RecordSparkDriverExecutorMemoryPoolDataPoint(now, int64(stat.Value), metadata.AttributePoolMemoryTypeDirect) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.MappedPoolMemory", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.MappedPoolMemory"]; ok { s.mb.RecordSparkDriverExecutorMemoryPoolDataPoint(now, int64(stat.Value), metadata.AttributePoolMemoryTypeMapped) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.MinorGCCount", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.MinorGCCount"]; ok { s.mb.RecordSparkDriverExecutorGcOperationsDataPoint(now, int64(stat.Value), metadata.AttributeGcTypeMinor) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.MajorGCCount", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.MajorGCCount"]; ok { s.mb.RecordSparkDriverExecutorGcOperationsDataPoint(now, int64(stat.Value), metadata.AttributeGcTypeMajor) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.MinorGCTime", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.MinorGCTime"]; ok { s.mb.RecordSparkDriverExecutorGcTimeDataPoint(now, int64(stat.Value), metadata.AttributeGcTypeMinor) } - if stat, ok := clusterStats.Gauges[fmt.Sprintf("%s.driver.ExecutorMetrics.MajorGCTime", appID)]; ok { + if stat, ok := clusterStats.Gauges[appID+".driver.ExecutorMetrics.MajorGCTime"]; ok { s.mb.RecordSparkDriverExecutorGcTimeDataPoint(now, int64(stat.Value), metadata.AttributeGcTypeMajor) } diff --git a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor.go b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor.go index 398ad4805a59..18bb1a91e435 100644 --- a/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor.go +++ b/receiver/awscontainerinsightreceiver/internal/cadvisor/extractors/extractor.go @@ -164,10 +164,10 @@ func getMetricKey(metric *CAdvisorMetric) string { switch metricType { case ci.TypeInstance: // merge cpu, memory, net metric for type Instance - metricKey = fmt.Sprintf("metricType:%s", ci.TypeInstance) + metricKey = "metricType:" + ci.TypeInstance case ci.TypeNode: // merge cpu, memory, net metric for type Node - metricKey = fmt.Sprintf("metricType:%s", ci.TypeNode) + metricKey = "metricType:" + ci.TypeNode case ci.TypePod: // merge cpu, memory, net metric for type Pod metricKey = fmt.Sprintf("metricType:%s,podId:%s", ci.TypePod, metric.GetTags()[ci.PodIDKey]) diff --git a/receiver/awscontainerinsightreceiver/internal/ecsInfo/cgroup.go b/receiver/awscontainerinsightreceiver/internal/ecsInfo/cgroup.go index cce6b10419f6..b99339bfda59 100644 --- a/receiver/awscontainerinsightreceiver/internal/ecsInfo/cgroup.go +++ b/receiver/awscontainerinsightreceiver/internal/ecsInfo/cgroup.go @@ -6,6 +6,7 @@ package ecsinfo // import "github.com/open-telemetry/opentelemetry-collector-con import ( "bufio" "context" + "errors" "fmt" "log" "math" @@ -227,7 +228,7 @@ func getCGroupMountPoint(mountConfigPath string) (string, error) { return filepath.Dir(fields[4]), nil } } - return "", fmt.Errorf("mount point not existed") + return "", errors.New("mount point not existed") } func getCGroupPathForTask(cgroupMount, controller, taskID, clusterName string) (string, error) { diff --git a/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go b/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go index c64912388264..4477a27bba13 100644 --- a/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go +++ b/receiver/awscontainerinsightreceiver/internal/k8sapiserver/k8sapiserver.go @@ -260,7 +260,7 @@ func (k *K8sAPIServer) startLeaderElection(ctx context.Context, lock resourceloc RetryPeriod: 5 * time.Second, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { - k.logger.Info(fmt.Sprintf("k8sapiserver OnStartedLeading: %s", k.nodeName)) + k.logger.Info("k8sapiserver OnStartedLeading: " + k.nodeName) // we're notified when we start k.mu.Lock() k.leading = true @@ -292,7 +292,7 @@ func (k *K8sAPIServer) startLeaderElection(ctx context.Context, lock resourceloc } }, OnStoppedLeading: func() { - k.logger.Info(fmt.Sprintf("k8sapiserver OnStoppedLeading: %s", k.nodeName)) + k.logger.Info("k8sapiserver OnStoppedLeading: " + k.nodeName) // we can do cleanup here, or after the RunOrDie method returns k.mu.Lock() defer k.mu.Unlock() @@ -302,14 +302,14 @@ func (k *K8sAPIServer) startLeaderElection(ctx context.Context, lock resourceloc k.k8sClient.ShutdownPodClient() }, OnNewLeader: func(identity string) { - k.logger.Info(fmt.Sprintf("k8sapiserver Switch New Leader: %s", identity)) + k.logger.Info("k8sapiserver Switch New Leader: " + identity) }, }, }) select { case <-ctx.Done(): // when leader election ends, the channel ctx.Done() will be closed - k.logger.Info(fmt.Sprintf("k8sapiserver shutdown Leader Election: %s", k.nodeName)) + k.logger.Info("k8sapiserver shutdown Leader Election: " + k.nodeName) return default: } diff --git a/receiver/awscontainerinsightreceiver/internal/stores/podstore.go b/receiver/awscontainerinsightreceiver/internal/stores/podstore.go index bf464069a349..e6027d2dd54a 100644 --- a/receiver/awscontainerinsightreceiver/internal/stores/podstore.go +++ b/receiver/awscontainerinsightreceiver/internal/stores/podstore.go @@ -217,7 +217,7 @@ func (p *PodStore) Decorate(ctx context.Context, metric CIMetric, kubernetesBlob p.addPodOwnersAndPodName(metric, &entry.pod, kubernetesBlob) addLabels(&entry.pod, kubernetesBlob) } else { - p.logger.Warn(fmt.Sprintf("no pod information is found in podstore for pod %s", podKey)) + p.logger.Warn("no pod information is found in podstore for pod " + podKey) return false } } @@ -262,7 +262,7 @@ func (p *PodStore) refreshInternal(now time.Time, podList []corev1.Pod) { pod := podList[i] podKey := createPodKeyFromMetaData(&pod) if podKey == "" { - p.logger.Warn(fmt.Sprintf("podKey is unavailable, refresh pod store for pod %s", pod.Name)) + p.logger.Warn("podKey is unavailable, refresh pod store for pod " + pod.Name) continue } if pod.Status.Phase != corev1.PodSucceeded && pod.Status.Phase != corev1.PodFailed { diff --git a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/stats_provider_test.go b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/stats_provider_test.go index 3684dd9eccb2..e5ce59c85942 100644 --- a/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/stats_provider_test.go +++ b/receiver/awsecscontainermetricsreceiver/internal/awsecscontainermetrics/stats_provider_test.go @@ -4,7 +4,7 @@ package awsecscontainermetrics import ( - "fmt" + "errors" "os" "testing" @@ -28,7 +28,7 @@ func (f testRestClient) GetResponse(path string) ([]byte, error) { } if f.fail { - return []byte{}, fmt.Errorf("failed") + return []byte{}, errors.New("failed") } if f.invalidJSON { return []byte("wrong-json-body"), nil diff --git a/receiver/awsecscontainermetricsreceiver/receiver_test.go b/receiver/awsecscontainermetricsreceiver/receiver_test.go index ecfcf88cd218..4ee3f7752126 100644 --- a/receiver/awsecscontainermetricsreceiver/receiver_test.go +++ b/receiver/awsecscontainermetricsreceiver/receiver_test.go @@ -6,7 +6,6 @@ package awsecscontainermetricsreceiver import ( "context" "errors" - "fmt" "os" "testing" @@ -96,7 +95,7 @@ type invalidFakeClient struct { } func (f invalidFakeClient) GetResponse(_ string) ([]byte, error) { - return nil, fmt.Errorf("intentional error") + return nil, errors.New("intentional error") } func TestCollectDataFromEndpointWithEndpointError(t *testing.T) { diff --git a/receiver/awsfirehosereceiver/config_test.go b/receiver/awsfirehosereceiver/config_test.go index 9f5f89850dc4..5d9429ddc62b 100644 --- a/receiver/awsfirehosereceiver/config_test.go +++ b/receiver/awsfirehosereceiver/config_test.go @@ -4,7 +4,6 @@ package awsfirehosereceiver import ( - "fmt" "path/filepath" "testing" @@ -23,7 +22,7 @@ func TestLoadConfig(t *testing.T) { "cwmetrics", "cwlogs", "otlp_v1", "invalid", } { t.Run(configType, func(t *testing.T) { - fileName := fmt.Sprintf("%s_config.yaml", configType) + fileName := configType + "_config.yaml" cm, err := confmaptest.LoadConf(filepath.Join("testdata", fileName)) require.NoError(t, err) diff --git a/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_logs_unmarshaler_test.go b/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_logs_unmarshaler_test.go index 69be0c74f224..ce90c351cfbd 100644 --- a/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_logs_unmarshaler_test.go +++ b/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_logs_unmarshaler_test.go @@ -4,7 +4,7 @@ package unmarshalertest import ( - "fmt" + "errors" "testing" "github.com/stretchr/testify/require" @@ -31,7 +31,7 @@ func TestNewWithLogs(t *testing.T) { } func TestNewErrLogs(t *testing.T) { - wantErr := fmt.Errorf("test error") + wantErr := errors.New("test error") unmarshaler := NewErrLogs(wantErr) got, err := unmarshaler.Unmarshal(nil) require.Error(t, err) diff --git a/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_metrics_unmarshaler_test.go b/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_metrics_unmarshaler_test.go index f48aa1ebd51a..572c39bc475c 100644 --- a/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_metrics_unmarshaler_test.go +++ b/receiver/awsfirehosereceiver/internal/unmarshaler/unmarshalertest/nop_metrics_unmarshaler_test.go @@ -4,7 +4,7 @@ package unmarshalertest import ( - "fmt" + "errors" "testing" "github.com/stretchr/testify/require" @@ -31,7 +31,7 @@ func TestNewWithMetrics(t *testing.T) { } func TestNewErrMetrics(t *testing.T) { - wantErr := fmt.Errorf("test error") + wantErr := errors.New("test error") unmarshaler := NewErrMetrics(wantErr) got, err := unmarshaler.Unmarshal(nil) require.Error(t, err) diff --git a/receiver/awsfirehosereceiver/receiver.go b/receiver/awsfirehosereceiver/receiver.go index 4d78eb2778d2..79cc3e42751c 100644 --- a/receiver/awsfirehosereceiver/receiver.go +++ b/receiver/awsfirehosereceiver/receiver.go @@ -12,6 +12,7 @@ import ( "io" "net" "net/http" + "strconv" "sync" "time" @@ -282,7 +283,7 @@ func (fmr *firehoseReceiver) sendResponse(w http.ResponseWriter, requestID strin } payload, _ := json.Marshal(body) w.Header().Set(headerContentType, "application/json") - w.Header().Set(headerContentLength, fmt.Sprintf("%d", len(payload))) + w.Header().Set(headerContentLength, strconv.Itoa(len(payload))) w.WriteHeader(statusCode) if _, err = w.Write(payload); err != nil { fmr.settings.Logger.Error("Failed to send response", zap.Error(err)) diff --git a/receiver/awsfirehosereceiver/receiver_test.go b/receiver/awsfirehosereceiver/receiver_test.go index e0ece7054d72..2a70846462f5 100644 --- a/receiver/awsfirehosereceiver/receiver_test.go +++ b/receiver/awsfirehosereceiver/receiver_test.go @@ -13,6 +13,7 @@ import ( "net" "net/http" "net/http/httptest" + "strconv" "testing" "time" @@ -190,7 +191,7 @@ func TestFirehoseRequest(t *testing.T) { request := httptest.NewRequest(http.MethodPost, "/", requestBody) request.Header.Set(headerContentType, "application/json") - request.Header.Set(headerContentLength, fmt.Sprintf("%d", requestBody.Len())) + request.Header.Set(headerContentLength, strconv.Itoa(requestBody.Len())) request.Header.Set(headerFirehoseRequestID, testFirehoseRequestID) request.Header.Set(headerFirehoseAccessKey, testFirehoseAccessKey) if testCase.headers != nil { diff --git a/receiver/awss3receiver/notifications_test.go b/receiver/awss3receiver/notifications_test.go index 8d3693a85936..981358776a14 100644 --- a/receiver/awss3receiver/notifications_test.go +++ b/receiver/awss3receiver/notifications_test.go @@ -5,7 +5,7 @@ package awss3receiver // import "github.com/open-telemetry/opentelemetry-collect import ( "context" - "fmt" + "errors" "testing" "time" @@ -65,7 +65,7 @@ func (h hostWithCustomCapabilityRegistry) GetExtensions() map[component.ID]compo func (m *mockCustomCapabilityRegistry) Register(_ string, _ ...opampcustommessages.CustomCapabilityRegisterOption) (handler opampcustommessages.CustomCapabilityHandler, err error) { if m.shouldFailRegister { - return nil, fmt.Errorf("register failed") + return nil, errors.New("register failed") } if m.shouldRegisterReturnNilHandler { return nil, nil @@ -80,13 +80,13 @@ func (m *mockCustomCapabilityRegistry) Message() <-chan *protobufs.CustomMessage func (m *mockCustomCapabilityRegistry) SendMessage(messageType string, message []byte) (messageSendingChannel chan struct{}, err error) { m.sendMessageCalls++ if m.unregisterCalled { - return nil, fmt.Errorf("unregister called") + return nil, errors.New("unregister called") } if m.shouldReturnPending != nil && m.shouldReturnPending() { return m.pendingChannel, types.ErrCustomMessagePending } if m.shouldFailSend { - return nil, fmt.Errorf("send failed") + return nil, errors.New("send failed") } m.sentMessages = append(m.sentMessages, customMessage{messageType: messageType, message: message}) return nil, nil diff --git a/receiver/awss3receiver/s3reader_test.go b/receiver/awss3receiver/s3reader_test.go index 7dfe9ba4c92f..dbbe4c23b397 100644 --- a/receiver/awss3receiver/s3reader_test.go +++ b/receiver/awss3receiver/s3reader_test.go @@ -126,7 +126,7 @@ func (m *mockListObjectsV2Pager) NextPage(_ context.Context, _ ...func(*s3.Optio } if m.PageNum >= len(m.Pages) { - return nil, fmt.Errorf("no more pages") + return nil, errors.New("no more pages") } output = m.Pages[m.PageNum] m.PageNum++ diff --git a/receiver/awsxrayreceiver/internal/translator/translator_test.go b/receiver/awsxrayreceiver/internal/translator/translator_test.go index 68231cd57d8e..658bece18c0f 100644 --- a/receiver/awsxrayreceiver/internal/translator/translator_test.go +++ b/receiver/awsxrayreceiver/internal/translator/translator_test.go @@ -677,8 +677,7 @@ func TestTranslation(t *testing.T) { actualSeg *awsxray.Segment, _ ptrace.ResourceSpans, _ ptrace.Traces, err error) { assert.EqualError(t, err, - fmt.Sprintf("unexpected namespace: %s", - *actualSeg.Subsegments[0].Subsegments[0].Namespace), + "unexpected namespace: "+*actualSeg.Subsegments[0].Subsegments[0].Namespace, testCase+": translation should've failed") }, }, @@ -845,10 +844,7 @@ func TestTranslation(t *testing.T) { actualSeg *awsxray.Segment, _ ptrace.ResourceSpans, _ ptrace.Traces, err error) { assert.EqualError(t, err, - fmt.Sprintf( - "failed to parse out the database name in the \"sql.url\" field, rawUrl: %s", - *actualSeg.SQL.URL, - ), + "failed to parse out the database name in the \"sql.url\" field, rawUrl: "+*actualSeg.SQL.URL, testCase+": translation should've failed") }, }, diff --git a/receiver/chronyreceiver/config_test.go b/receiver/chronyreceiver/config_test.go index 3459cb5019f1..d2fda85fc916 100644 --- a/receiver/chronyreceiver/config_test.go +++ b/receiver/chronyreceiver/config_test.go @@ -4,7 +4,6 @@ package chronyreceiver import ( - "fmt" "os" "path/filepath" "testing" @@ -90,7 +89,7 @@ func TestValidate(t *testing.T) { { scenario: "Valid unix path", conf: Config{ - Endpoint: fmt.Sprintf("unix://%s", t.TempDir()), + Endpoint: "unix://" + t.TempDir(), ControllerConfig: scraperhelper.ControllerConfig{ CollectionInterval: time.Minute, InitialDelay: time.Second, diff --git a/receiver/chronyreceiver/internal/chrony/client_test.go b/receiver/chronyreceiver/internal/chrony/client_test.go index dd6733b07ce8..0343866d4e11 100644 --- a/receiver/chronyreceiver/internal/chrony/client_test.go +++ b/receiver/chronyreceiver/internal/chrony/client_test.go @@ -211,7 +211,7 @@ func TestGettingTrackingData(t *testing.T) { t.Run(tc.scenario, func(t *testing.T) { t.Parallel() - client, err := New(fmt.Sprintf("unix://%s", t.TempDir()), tc.timeout, func(c *client) { + client, err := New("unix://"+t.TempDir(), tc.timeout, func(c *client) { c.dialer = func(context.Context, string, string) (net.Conn, error) { if tc.dialTime > tc.timeout { return nil, os.ErrDeadlineExceeded diff --git a/receiver/chronyreceiver/internal/chrony/util_test.go b/receiver/chronyreceiver/internal/chrony/util_test.go index 7f9641defec0..cc142bb06bb6 100644 --- a/receiver/chronyreceiver/internal/chrony/util_test.go +++ b/receiver/chronyreceiver/internal/chrony/util_test.go @@ -4,7 +4,6 @@ package chrony import ( - "fmt" "os" "testing" @@ -46,7 +45,7 @@ func TestSplitNetworkEndpoint(t *testing.T) { }, { scenario: "A valid UNIX network", - in: fmt.Sprintf("unix://%s", path), + in: "unix://" + path, network: "unixgram", endpoint: path, err: nil, diff --git a/receiver/cloudflarereceiver/logs.go b/receiver/cloudflarereceiver/logs.go index 956b38118ca4..36d72d63f4b8 100644 --- a/receiver/cloudflarereceiver/logs.go +++ b/receiver/cloudflarereceiver/logs.go @@ -241,12 +241,12 @@ func (l *logsReceiver) processLogs(now pcommon.Timestamp, logs []map[string]any) if stringV, ok := v.(string); ok { ts, err := time.Parse(time.RFC3339, stringV) if err != nil { - l.logger.Warn(fmt.Sprintf("unable to parse %s", l.cfg.TimestampField), zap.Error(err), zap.String("value", stringV)) + l.logger.Warn("unable to parse "+l.cfg.TimestampField, zap.Error(err), zap.String("value", stringV)) } else { logRecord.SetTimestamp(pcommon.NewTimestampFromTime(ts)) } } else { - l.logger.Warn(fmt.Sprintf("unable to parse %s", l.cfg.TimestampField), zap.Any("value", v)) + l.logger.Warn("unable to parse "+l.cfg.TimestampField, zap.Any("value", v)) } } diff --git a/receiver/couchdbreceiver/metrics.go b/receiver/couchdbreceiver/metrics.go index ff759f542e28..1081772f5f03 100644 --- a/receiver/couchdbreceiver/metrics.go +++ b/receiver/couchdbreceiver/metrics.go @@ -4,7 +4,7 @@ package couchdbreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/couchdbreceiver" import ( - "fmt" + "errors" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/receiver/scrapererror" @@ -156,12 +156,12 @@ func getValueFromBody(keys []string, body map[string]any) (any, error) { for _, key := range keys { currentBody, ok := currentValue.(map[string]any) if !ok { - return nil, fmt.Errorf("could not find key in body") + return nil, errors.New("could not find key in body") } currentValue, ok = currentBody[key] if !ok { - return nil, fmt.Errorf("could not find key in body") + return nil, errors.New("could not find key in body") } } return currentValue, nil @@ -174,12 +174,12 @@ func (c *couchdbScraper) parseInt(value any) (int64, error) { case float64: return int64(i), nil } - return 0, fmt.Errorf("could not parse value as int") + return 0, errors.New("could not parse value as int") } func (c *couchdbScraper) parseFloat(value any) (float64, error) { if f, ok := value.(float64); ok { return f, nil } - return 0, fmt.Errorf("could not parse value as float") + return 0, errors.New("could not parse value as float") } diff --git a/receiver/datadogreceiver/internal/translator/tags.go b/receiver/datadogreceiver/internal/translator/tags.go index e2cfc3da4f80..3e50f612bcd9 100644 --- a/receiver/datadogreceiver/internal/translator/tags.go +++ b/receiver/datadogreceiver/internal/translator/tags.go @@ -4,7 +4,6 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver/internal/translator" import ( - "fmt" "strings" "sync" @@ -70,7 +69,7 @@ func translateDatadogTagToKeyValuePair(tag string) (key string, value string) { // to only support key:value pairs. // The following is a workaround to map unnamed inputTags to key:value pairs and its subject to future // changes if OTel supports unnamed inputTags in the future or if there is a better way to do this. - key = fmt.Sprintf("unnamed_%s", tag) + key = "unnamed_" + tag val = tag } return key, val diff --git a/receiver/datadogreceiver/internal/translator/traces_translator_test.go b/receiver/datadogreceiver/internal/translator/traces_translator_test.go index c0d43fae9600..e3bba6831829 100644 --- a/receiver/datadogreceiver/internal/translator/traces_translator_test.go +++ b/receiver/datadogreceiver/internal/translator/traces_translator_test.go @@ -178,11 +178,11 @@ func agentPayloadFromTraces(traces *pb.Traces) (agentPayload pb.AgentPayload) { var tracerPayloads []*pb.TracerPayload for i := 0; i < numberOfTraces; i++ { payload := &pb.TracerPayload{ - LanguageName: fmt.Sprintf("%d", i), - LanguageVersion: fmt.Sprintf("%d", i), - ContainerID: fmt.Sprintf("%d", i), + LanguageName: strconv.Itoa(i), + LanguageVersion: strconv.Itoa(i), + ContainerID: strconv.Itoa(i), Chunks: traceChunksFromTraces(*traces), - TracerVersion: fmt.Sprintf("%d", i), + TracerVersion: strconv.Itoa(i), } tracerPayloads = append(tracerPayloads, payload) } diff --git a/receiver/datadogreceiver/receiver.go b/receiver/datadogreceiver/receiver.go index 17b2814ce007..23ba993e3578 100644 --- a/receiver/datadogreceiver/receiver.go +++ b/receiver/datadogreceiver/receiver.go @@ -423,7 +423,7 @@ func (ddr *datadogReceiver) handleIntake(w http.ResponseWriter, req *http.Reques ddr.tReceiver.EndMetricsOp(obsCtx, "datadog", *metricsCount, err) }(&metricsCount) - err = fmt.Errorf("intake endpoint not implemented") + err = errors.New("intake endpoint not implemented") http.Error(w, err.Error(), http.StatusMethodNotAllowed) ddr.params.Logger.Warn("metrics consumer errored out", zap.Error(err)) } @@ -437,7 +437,7 @@ func (ddr *datadogReceiver) handleDistributionPoints(w http.ResponseWriter, req ddr.tReceiver.EndMetricsOp(obsCtx, "datadog", *metricsCount, err) }(&metricsCount) - err = fmt.Errorf("distribution points endpoint not implemented") + err = errors.New("distribution points endpoint not implemented") http.Error(w, err.Error(), http.StatusMethodNotAllowed) ddr.params.Logger.Warn("metrics consumer errored out", zap.Error(err)) } diff --git a/receiver/dockerstatsreceiver/receiver.go b/receiver/dockerstatsreceiver/receiver.go index 1b82c04611bc..7c5bf463cad7 100644 --- a/receiver/dockerstatsreceiver/receiver.go +++ b/receiver/dockerstatsreceiver/receiver.go @@ -270,7 +270,7 @@ func (r *metricsReceiver) recordCPUMetrics(now pcommon.Timestamp, cpuStats *ctyp r.mb.RecordContainerCPULogicalCountDataPoint(now, int64(cpuStats.OnlineCPUs)) for coreNum, v := range cpuStats.CPUUsage.PercpuUsage { - r.mb.RecordContainerCPUUsagePercpuDataPoint(now, int64(v), fmt.Sprintf("cpu%s", strconv.Itoa(coreNum))) + r.mb.RecordContainerCPUUsagePercpuDataPoint(now, int64(v), "cpu"+strconv.Itoa(coreNum)) } } diff --git a/receiver/elasticsearchreceiver/client.go b/receiver/elasticsearchreceiver/client.go index d70067cb0b86..b5312a9a17c8 100644 --- a/receiver/elasticsearchreceiver/client.go +++ b/receiver/elasticsearchreceiver/client.go @@ -63,7 +63,7 @@ func newElasticsearchClient(ctx context.Context, settings component.TelemetrySet if c.Username != "" && c.Password != "" { userPass := fmt.Sprintf("%s:%s", c.Username, string(c.Password)) authb64 := base64.StdEncoding.EncodeToString([]byte(userPass)) - authHeader = fmt.Sprintf("Basic %s", authb64) + authHeader = "Basic " + authb64 } esClient := defaultElasticsearchClient{ @@ -204,7 +204,7 @@ func (c defaultElasticsearchClient) ClusterStats(ctx context.Context, nodes []st nodesSpec = "_all" } - clusterStatsPath := fmt.Sprintf("_cluster/stats/nodes/%s", nodesSpec) + clusterStatsPath := "_cluster/stats/nodes/" + nodesSpec body, err := c.doRequest(ctx, clusterStatsPath) if err != nil { diff --git a/receiver/expvarreceiver/config.go b/receiver/expvarreceiver/config.go index 0718a5aa49f1..fcd51334ea70 100644 --- a/receiver/expvarreceiver/config.go +++ b/receiver/expvarreceiver/config.go @@ -4,6 +4,7 @@ package expvarreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/expvarreceiver" import ( + "errors" "fmt" "net/url" @@ -31,7 +32,7 @@ func (c *Config) Validate() error { return fmt.Errorf("scheme must be 'http' or 'https', but was '%s'", u.Scheme) } if u.Host == "" { - return fmt.Errorf("host not found in HTTP endpoint") + return errors.New("host not found in HTTP endpoint") } return nil } diff --git a/receiver/expvarreceiver/scraper.go b/receiver/expvarreceiver/scraper.go index 44a20dd075ca..93c7270cd7f0 100644 --- a/receiver/expvarreceiver/scraper.go +++ b/receiver/expvarreceiver/scraper.go @@ -6,6 +6,7 @@ package expvarreceiver // import "github.com/open-telemetry/opentelemetry-collec import ( "context" "encoding/json" + "errors" "fmt" "io" "net/http" @@ -70,7 +71,7 @@ func (e *expVarScraper) scrape(ctx context.Context) (pmetric.Metrics, error) { } memStats := result.MemStats if memStats == nil { - return emptyMetrics, fmt.Errorf("unmarshalled memstats data is nil") + return emptyMetrics, errors.New("unmarshalled memstats data is nil") } now := pcommon.NewTimestampFromTime(time.Now()) diff --git a/receiver/filelogreceiver/filelog_test.go b/receiver/filelogreceiver/filelog_test.go index 80931bbe87ee..d4968df3158d 100644 --- a/receiver/filelogreceiver/filelog_test.go +++ b/receiver/filelogreceiver/filelog_test.go @@ -11,6 +11,7 @@ import ( "os" "path/filepath" "runtime" + "strconv" "sync" "sync/atomic" "testing" @@ -320,7 +321,7 @@ func rotationTestConfig(tempDir string) *FileLogConfig { }, InputConfig: func() file.Config { c := file.NewConfig() - c.Include = []string{fmt.Sprintf("%s/*", tempDir)} + c.Include = []string{tempDir + "/*"} c.StartAt = "beginning" c.PollInterval = 10 * time.Millisecond c.IncludeFileName = false @@ -383,7 +384,7 @@ func (g *fileLogGenerator) Stop() { } func (g *fileLogGenerator) Generate() []receivertest.UniqueIDAttrVal { - id := receivertest.UniqueIDAttrVal(fmt.Sprintf("%d", atomic.AddInt64(&g.sequenceNum, 1))) + id := receivertest.UniqueIDAttrVal(strconv.FormatInt(atomic.AddInt64(&g.sequenceNum, 1), 10)) logLine := fmt.Sprintf(`{"ts": "%s", "log": "log-%s", "%s": "%s"}`, time.Now().Format(time.RFC3339), id, receivertest.UniqueIDAttrName, id) _, err := g.tmpFile.WriteString(logLine + "\n") diff --git a/receiver/flinkmetricsreceiver/client.go b/receiver/flinkmetricsreceiver/client.go index 8b02cf66ca35..a2d8b51659ce 100644 --- a/receiver/flinkmetricsreceiver/client.go +++ b/receiver/flinkmetricsreceiver/client.go @@ -10,6 +10,7 @@ import ( "io" "net/http" "os" + "strconv" "strings" "go.opentelemetry.io/collector/component" @@ -313,7 +314,7 @@ func (c *flinkClient) getSubtasksMetricsByIDs(ctx context.Context, jobsResponse TaskmanagerID: getTaskmanagerID(subtask.TaskmanagerID), JobName: jobsWithIDResponse.Name, TaskName: vertex.Name, - SubtaskIndex: fmt.Sprintf("%v", subtask.Subtask), + SubtaskIndex: strconv.Itoa(subtask.Subtask), Metrics: *subtaskMetrics, }) } diff --git a/receiver/googlecloudmonitoringreceiver/receiver.go b/receiver/googlecloudmonitoringreceiver/receiver.go index 4b0d6b256e20..f852e596de65 100644 --- a/receiver/googlecloudmonitoringreceiver/receiver.go +++ b/receiver/googlecloudmonitoringreceiver/receiver.go @@ -160,7 +160,7 @@ func (mr *monitoringReceiver) initializeClient(ctx context.Context) error { return fmt.Errorf("failed to find default credentials: %w", err) } if creds == nil || creds.JSON == nil { - return fmt.Errorf("no valid credentials found") + return errors.New("no valid credentials found") } // Attempt to create the monitoring client diff --git a/receiver/googlecloudpubsubreceiver/internal/handler.go b/receiver/googlecloudpubsubreceiver/internal/handler.go index fbb466fed8cb..ea879c857d39 100644 --- a/receiver/googlecloudpubsubreceiver/internal/handler.go +++ b/receiver/googlecloudpubsubreceiver/internal/handler.go @@ -213,7 +213,7 @@ func (handler *StreamHandler) responseStream(ctx context.Context, cancel context time.Sleep(time.Second * 60) activeStreaming = false default: - handler.logger.Warn(fmt.Sprintf("response stream breaking on gRPC s %s", s.Message()), + handler.logger.Warn("response stream breaking on gRPC s "+s.Message(), zap.String("s", s.Message()), zap.Error(err)) activeStreaming = false diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint.go index 26f76aec92df..591309082a49 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint.go @@ -4,8 +4,8 @@ package metadata // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/googlecloudspannerreceiver/internal/metadata" import ( - "fmt" "hash/fnv" + "strconv" "strings" "time" "unicode/utf8" @@ -126,9 +126,9 @@ func parseAndHashRowrangestartkey(key string) string { hashFunction.Reset() hashFunction.Write([]byte(subKey)) if cnt < len(keySlice)-1 { - builderHashedKey.WriteString(fmt.Sprint(hashFunction.Sum32()) + ",") + builderHashedKey.WriteString(strconv.FormatUint(uint64(hashFunction.Sum32()), 10) + ",") } else { - builderHashedKey.WriteString(fmt.Sprint(hashFunction.Sum32())) + builderHashedKey.WriteString(strconv.FormatUint(uint64(hashFunction.Sum32()), 10)) } } if plusPresent { @@ -182,5 +182,5 @@ func (mdp *MetricsDataPoint) hash() (string, error) { return "", err } - return fmt.Sprintf("%x", hashedData), nil + return strconv.FormatUint(hashedData, 16), nil } diff --git a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go index b837b0f6e04a..6d277ff68998 100644 --- a/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go +++ b/receiver/googlecloudspannerreceiver/internal/metadata/metricsdatapoint_test.go @@ -4,8 +4,8 @@ package metadata import ( - "fmt" "hash/fnv" + "strconv" "testing" "time" @@ -119,10 +119,10 @@ func TestMetricsDataPoint_HideLockStatsRowrangestartkeyPII(t *testing.T) { hashFunction := fnv.New32a() hashFunction.Reset() hashFunction.Write([]byte("23")) - hashOf23 := fmt.Sprint(hashFunction.Sum32()) + hashOf23 := strconv.FormatUint(uint64(hashFunction.Sum32()), 10) hashFunction.Reset() hashFunction.Write([]byte("hello")) - hashOfHello := fmt.Sprint(hashFunction.Sum32()) + hashOfHello := strconv.FormatUint(uint64(hashFunction.Sum32()), 10) metricsDataPoint.HideLockStatsRowrangestartkeyPII() diff --git a/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper_errors.go b/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper_errors.go index a38c41be96ef..b2f472896657 100644 --- a/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper_errors.go +++ b/receiver/hostmetricsreceiver/internal/perfcounters/perfcounter_scraper_errors.go @@ -4,7 +4,6 @@ package perfcounters // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/perfcounters" import ( - "fmt" "strings" ) @@ -13,7 +12,7 @@ type PerfCounterInitError struct { } func (p *PerfCounterInitError) Error() string { - return fmt.Sprintf("failed to init counters: %s", strings.Join(p.FailedObjects, "; ")) + return "failed to init counters: " + strings.Join(p.FailedObjects, "; ") } func (p *PerfCounterInitError) AddFailure(object string) { diff --git a/receiver/jaegerreceiver/config.go b/receiver/jaegerreceiver/config.go index 4ed9dda665ca..be6d3bb0cf36 100644 --- a/receiver/jaegerreceiver/config.go +++ b/receiver/jaegerreceiver/config.go @@ -4,6 +4,7 @@ package jaegerreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver" import ( + "errors" "fmt" "net" "strconv" @@ -81,7 +82,7 @@ func (cfg *Config) Validate() error { cfg.ThriftHTTP == nil && cfg.ThriftBinary == nil && cfg.ThriftCompact == nil { - return fmt.Errorf("must specify at least one protocol when using the Jaeger receiver") + return errors.New("must specify at least one protocol when using the Jaeger receiver") } if cfg.GRPC != nil { @@ -110,7 +111,7 @@ func (cfg *Config) Validate() error { if cfg.RemoteSampling != nil { if disableJaegerReceiverRemoteSampling.IsEnabled() { - return fmt.Errorf("remote sampling config detected in the Jaeger receiver; use the `jaegerremotesampling` extension instead") + return errors.New("remote sampling config detected in the Jaeger receiver; use the `jaegerremotesampling` extension instead") } } @@ -120,7 +121,7 @@ func (cfg *Config) Validate() error { // Unmarshal a config.Parser into the config struct. func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { if componentParser == nil || len(componentParser.AllKeys()) == 0 { - return fmt.Errorf("empty config for Jaeger receiver") + return errors.New("empty config for Jaeger receiver") } // UnmarshalExact will not set struct properties to nil even if no key is provided, @@ -163,7 +164,7 @@ func checkPortFromEndpoint(endpoint string) error { return fmt.Errorf("endpoint port is not a number: %w", err) } if port < 1 || port > 65535 { - return fmt.Errorf("port number must be between 1 and 65535") + return errors.New("port number must be between 1 and 65535") } return nil } diff --git a/receiver/jaegerreceiver/trace_receiver.go b/receiver/jaegerreceiver/trace_receiver.go index 1eb1159639d3..e68101c13c4c 100644 --- a/receiver/jaegerreceiver/trace_receiver.go +++ b/receiver/jaegerreceiver/trace_receiver.go @@ -173,7 +173,7 @@ var _ agent.Agent = (*agentHandler)(nil) var _ api_v2.CollectorServiceServer = (*jReceiver)(nil) var _ configmanager.ClientConfigManager = (*notImplementedConfigManager)(nil) -var errNotImplemented = fmt.Errorf("not implemented") +var errNotImplemented = errors.New("not implemented") type notImplementedConfigManager struct{} diff --git a/receiver/jmxreceiver/config.go b/receiver/jmxreceiver/config.go index b31534dae3bf..e38712125208 100644 --- a/receiver/jmxreceiver/config.go +++ b/receiver/jmxreceiver/config.go @@ -101,7 +101,7 @@ func (c *Config) parseProperties(logger *zap.Logger) []string { logLevel = getZapLoggerLevelEquivalent(logger) } - parsed = append(parsed, fmt.Sprintf("-Dorg.slf4j.simpleLogger.defaultLogLevel=%s", logLevel)) + parsed = append(parsed, "-Dorg.slf4j.simpleLogger.defaultLogLevel="+logLevel) // Sorted for testing and reproducibility sort.Strings(parsed) return parsed diff --git a/receiver/jmxreceiver/internal/subprocess/subprocess.go b/receiver/jmxreceiver/internal/subprocess/subprocess.go index e1a8dfc48a04..e50656eecfb3 100644 --- a/receiver/jmxreceiver/internal/subprocess/subprocess.go +++ b/receiver/jmxreceiver/internal/subprocess/subprocess.go @@ -6,6 +6,7 @@ package subprocess // import "github.com/open-telemetry/opentelemetry-collector- import ( "bufio" "context" + "errors" "fmt" "io" "os" @@ -123,7 +124,7 @@ func (subprocess *Subprocess) Start(ctx context.Context) error { // Shutdown is invoked during service shutdown. func (subprocess *Subprocess) Shutdown(ctx context.Context) error { if subprocess.cancel == nil { - return fmt.Errorf("no subprocess.cancel(). Has it been started properly?") + return errors.New("no subprocess.cancel(). Has it been started properly?") } timeout := defaultShutdownTimeout diff --git a/receiver/jmxreceiver/receiver.go b/receiver/jmxreceiver/receiver.go index 61274cc4254a..df62ffea1f27 100644 --- a/receiver/jmxreceiver/receiver.go +++ b/receiver/jmxreceiver/receiver.go @@ -155,7 +155,7 @@ func (jmx *jmxMetricReceiver) buildOTLPReceiver() (receiver.Metrics, error) { } defer listener.Close() addr := listener.Addr().(*net.TCPAddr) - port = fmt.Sprintf("%d", addr.Port) + port = strconv.Itoa(addr.Port) endpoint = fmt.Sprintf("%s:%s", host, port) jmx.config.OTLPExporterConfig.Endpoint = endpoint } @@ -194,7 +194,7 @@ func (jmx *jmxMetricReceiver) buildJMXMetricGathererConfig() (string, error) { endpoint := jmx.config.OTLPExporterConfig.Endpoint if !strings.HasPrefix(endpoint, "http") { - endpoint = fmt.Sprintf("http://%s", endpoint) + endpoint = "http://" + endpoint } config["otel.metrics.exporter"] = "otlp" diff --git a/receiver/k8sclusterreceiver/internal/metadata/metadata.go b/receiver/k8sclusterreceiver/internal/metadata/metadata.go index a630239cb754..5ea50a953f7b 100644 --- a/receiver/k8sclusterreceiver/internal/metadata/metadata.go +++ b/receiver/k8sclusterreceiver/internal/metadata/metadata.go @@ -54,8 +54,7 @@ func GetGenericMetadata(om *v1.ObjectMeta, resourceType string) *KubernetesMetad metadata[constants.K8sKeyWorkLoadKind] = resourceType metadata[constants.K8sKeyWorkLoadName] = om.Name - metadata[fmt.Sprintf("%s.creation_timestamp", - rType)] = om.GetCreationTimestamp().Format(time.RFC3339) + metadata[rType+".creation_timestamp"] = om.GetCreationTimestamp().Format(time.RFC3339) for _, or := range om.OwnerReferences { kind := strings.ToLower(or.Kind) @@ -80,7 +79,7 @@ func GetOTelNameFromKind(kind string) string { } func getOTelEntityTypeFromKind(kind string) string { - return fmt.Sprintf("k8s.%s", kind) + return "k8s." + kind } // mergeKubernetesMetadataMaps merges maps of string (resource id) to diff --git a/receiver/k8sclusterreceiver/internal/node/nodes.go b/receiver/k8sclusterreceiver/internal/node/nodes.go index f762b7e14ae3..9426718981d9 100644 --- a/receiver/k8sclusterreceiver/internal/node/nodes.go +++ b/receiver/k8sclusterreceiver/internal/node/nodes.go @@ -172,7 +172,7 @@ func getContainerRuntimeInfo(rawInfo string) (runtime string, version string) { return "", "" } func getNodeConditionMetric(nodeConditionTypeValue string) string { - return fmt.Sprintf("k8s.node.condition_%s", strcase.ToSnake(nodeConditionTypeValue)) + return "k8s.node.condition_" + strcase.ToSnake(nodeConditionTypeValue) } func getNodeAllocatableUnit(res corev1.ResourceName) string { @@ -198,5 +198,5 @@ func setNodeAllocatableValue(dp pmetric.NumberDataPoint, res corev1.ResourceName } func getNodeAllocatableMetric(nodeAllocatableTypeValue string) string { - return fmt.Sprintf("k8s.node.allocatable_%s", strcase.ToSnake(nodeAllocatableTypeValue)) + return "k8s.node.allocatable_" + strcase.ToSnake(nodeAllocatableTypeValue) } diff --git a/receiver/k8sclusterreceiver/receiver.go b/receiver/k8sclusterreceiver/receiver.go index ae9c5f75ca8b..10e90b16fc5f 100644 --- a/receiver/k8sclusterreceiver/receiver.go +++ b/receiver/k8sclusterreceiver/receiver.go @@ -6,7 +6,6 @@ package k8sclusterreceiver // import "github.com/open-telemetry/opentelemetry-co import ( "context" "errors" - "fmt" "time" "go.opentelemetry.io/collector/component" @@ -52,7 +51,7 @@ func (kr *kubernetesReceiver) Start(ctx context.Context, host component.Host) er ge, ok := host.(getExporters) if !ok { - return fmt.Errorf("unable to get exporters") + return errors.New("unable to get exporters") } exporters := ge.GetExporters() diff --git a/receiver/k8sobjectsreceiver/config.go b/receiver/k8sobjectsreceiver/config.go index 62c303748e35..fc4d9bc487e3 100644 --- a/receiver/k8sobjectsreceiver/config.go +++ b/receiver/k8sobjectsreceiver/config.go @@ -4,6 +4,7 @@ package k8sobjectsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sobjectsreceiver" import ( + "errors" "fmt" "strings" "time" @@ -90,7 +91,7 @@ func (c *Config) Validate() error { } if object.Mode == PullMode && len(object.ExcludeWatchType) != 0 { - return fmt.Errorf("the Exclude config can only be used with watch mode") + return errors.New("the Exclude config can only be used with watch mode") } object.gvr = gvr diff --git a/receiver/k8sobjectsreceiver/receiver.go b/receiver/k8sobjectsreceiver/receiver.go index d69df14a19e6..c1e82babd5dc 100644 --- a/receiver/k8sobjectsreceiver/receiver.go +++ b/receiver/k8sobjectsreceiver/receiver.go @@ -5,6 +5,7 @@ package k8sobjectsreceiver // import "github.com/open-telemetry/opentelemetry-co import ( "context" + "errors" "fmt" "net/http" "sync" @@ -252,7 +253,7 @@ func getResourceVersion(ctx context.Context, config *K8sObjectsConfig, resource return "", fmt.Errorf("could not perform initial list for watch on %v, %w", config.gvr.String(), err) } if objects == nil { - return "", fmt.Errorf("nil objects returned, this is an error in the k8sobjectsreceiver") + return "", errors.New("nil objects returned, this is an error in the k8sobjectsreceiver") } resourceVersion = objects.GetResourceVersion() diff --git a/receiver/kafkametricsreceiver/broker_scraper_test.go b/receiver/kafkametricsreceiver/broker_scraper_test.go index 9616ffc5f7ce..a9d3a981d4ff 100644 --- a/receiver/kafkametricsreceiver/broker_scraper_test.go +++ b/receiver/kafkametricsreceiver/broker_scraper_test.go @@ -5,7 +5,7 @@ package kafkametricsreceiver import ( "context" - "fmt" + "errors" "testing" "github.com/IBM/sarama" @@ -66,7 +66,7 @@ func TestBrokerScraperStart(t *testing.T) { func TestBrokerScraper_scrape_handles_client_error(t *testing.T) { newSaramaClient = func([]string, *sarama.Config) (sarama.Client, error) { - return nil, fmt.Errorf("new client failed") + return nil, errors.New("new client failed") } sc := sarama.NewConfig() bs, err := createBrokerScraper(context.Background(), Config{}, sc, receivertest.NewNopSettings()) @@ -78,7 +78,7 @@ func TestBrokerScraper_scrape_handles_client_error(t *testing.T) { func TestBrokerScraper_shutdown_handles_nil_client(t *testing.T) { newSaramaClient = func([]string, *sarama.Config) (sarama.Client, error) { - return nil, fmt.Errorf("new client failed") + return nil, errors.New("new client failed") } sc := sarama.NewConfig() bs, err := createBrokerScraper(context.Background(), Config{}, sc, receivertest.NewNopSettings()) diff --git a/receiver/kafkametricsreceiver/consumer_scraper_test.go b/receiver/kafkametricsreceiver/consumer_scraper_test.go index 3ae3f01183e5..360fd5afc006 100644 --- a/receiver/kafkametricsreceiver/consumer_scraper_test.go +++ b/receiver/kafkametricsreceiver/consumer_scraper_test.go @@ -5,7 +5,7 @@ package kafkametricsreceiver import ( "context" - "fmt" + "errors" "regexp" "testing" @@ -53,7 +53,7 @@ func TestConsumerScraper_createConsumerScraper(t *testing.T) { func TestConsumerScraper_scrape_handles_client_error(t *testing.T) { newSaramaClient = func([]string, *sarama.Config) (sarama.Client, error) { - return nil, fmt.Errorf("new client failed") + return nil, errors.New("new client failed") } sc := sarama.NewConfig() cs, err := createConsumerScraper(context.Background(), Config{}, sc, receivertest.NewNopSettings()) @@ -65,7 +65,7 @@ func TestConsumerScraper_scrape_handles_client_error(t *testing.T) { func TestConsumerScraper_scrape_handles_nil_client(t *testing.T) { newSaramaClient = func([]string, *sarama.Config) (sarama.Client, error) { - return nil, fmt.Errorf("new client failed") + return nil, errors.New("new client failed") } sc := sarama.NewConfig() cs, err := createConsumerScraper(context.Background(), Config{}, sc, receivertest.NewNopSettings()) @@ -83,7 +83,7 @@ func TestConsumerScraper_scrape_handles_clusterAdmin_error(t *testing.T) { return client, nil } newClusterAdmin = func([]string, *sarama.Config) (sarama.ClusterAdmin, error) { - return nil, fmt.Errorf("new cluster admin failed") + return nil, errors.New("new cluster admin failed") } sc := sarama.NewConfig() cs, err := createConsumerScraper(context.Background(), Config{}, sc, receivertest.NewNopSettings()) diff --git a/receiver/kafkametricsreceiver/receiver_test.go b/receiver/kafkametricsreceiver/receiver_test.go index 455cc797ae04..5e37ccf39c83 100644 --- a/receiver/kafkametricsreceiver/receiver_test.go +++ b/receiver/kafkametricsreceiver/receiver_test.go @@ -5,7 +5,7 @@ package kafkametricsreceiver import ( "context" - "fmt" + "errors" "testing" "github.com/IBM/sarama" @@ -37,7 +37,7 @@ func TestNewReceiver_invalid_scraper_error(t *testing.T) { allScrapers["brokers"] = mockScraper r, err := newMetricsReceiver(context.Background(), *c, receivertest.NewNopSettings(), nil) assert.Nil(t, r) - expectedError := fmt.Errorf("no scraper found for key: cpu") + expectedError := errors.New("no scraper found for key: cpu") if assert.Error(t, err) { assert.Equal(t, expectedError, err) } @@ -75,7 +75,7 @@ func TestNewReceiver_handles_scraper_error(t *testing.T) { c := createDefaultConfig().(*Config) c.Scrapers = []string{"brokers"} mockScraper := func(context.Context, Config, *sarama.Config, receiver.Settings) (scraperhelper.Scraper, error) { - return nil, fmt.Errorf("fail") + return nil, errors.New("fail") } allScrapers["brokers"] = mockScraper r, err := newMetricsReceiver(context.Background(), *c, receivertest.NewNopSettings(), consumertest.NewNop()) diff --git a/receiver/kafkametricsreceiver/scraper_test_helper.go b/receiver/kafkametricsreceiver/scraper_test_helper.go index b3867ea4b17a..4c7cabf4ae6d 100644 --- a/receiver/kafkametricsreceiver/scraper_test_helper.go +++ b/receiver/kafkametricsreceiver/scraper_test_helper.go @@ -4,7 +4,7 @@ package kafkametricsreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkametricsreceiver" import ( - "fmt" + "errors" "strconv" "github.com/IBM/sarama" @@ -74,35 +74,35 @@ func (s *mockSaramaClient) Topics() ([]string, error) { if s.topics != nil { return s.topics, nil } - return nil, fmt.Errorf("mock topic error") + return nil, errors.New("mock topic error") } func (s *mockSaramaClient) Partitions(string) ([]int32, error) { if s.partitions != nil { return s.partitions, nil } - return nil, fmt.Errorf("mock partition error") + return nil, errors.New("mock partition error") } func (s *mockSaramaClient) GetOffset(string, int32, int64) (int64, error) { if s.offset != -1 { return s.offset, nil } - return s.offset, fmt.Errorf("mock offset error") + return s.offset, errors.New("mock offset error") } func (s *mockSaramaClient) Replicas(string, int32) ([]int32, error) { if s.replicas != nil { return s.replicas, nil } - return nil, fmt.Errorf("mock replicas error") + return nil, errors.New("mock replicas error") } func (s *mockSaramaClient) InSyncReplicas(string, int32) ([]int32, error) { if s.inSyncReplicas != nil { return s.inSyncReplicas, nil } - return nil, fmt.Errorf("mock in sync replicas error") + return nil, errors.New("mock in sync replicas error") } func newMockClient() *mockSaramaClient { @@ -134,28 +134,28 @@ type mockClusterAdmin struct { func (s *mockClusterAdmin) ListTopics() (map[string]sarama.TopicDetail, error) { if s.topics == nil { - return nil, fmt.Errorf("error getting topics") + return nil, errors.New("error getting topics") } return s.topics, nil } func (s *mockClusterAdmin) ListConsumerGroups() (map[string]string, error) { if s.consumerGroups == nil { - return nil, fmt.Errorf("error getting consumer groups") + return nil, errors.New("error getting consumer groups") } return s.consumerGroups, nil } func (s *mockClusterAdmin) DescribeConsumerGroups([]string) ([]*sarama.GroupDescription, error) { if s.consumerGroupDescriptions == nil { - return nil, fmt.Errorf("error describing consumer groups") + return nil, errors.New("error describing consumer groups") } return s.consumerGroupDescriptions, nil } func (s *mockClusterAdmin) ListConsumerGroupOffsets(string, map[string][]int32) (*sarama.OffsetFetchResponse, error) { if s.consumerGroupOffsets == nil { - return nil, fmt.Errorf("mock consumer group offset error") + return nil, errors.New("mock consumer group offset error") } return s.consumerGroupOffsets, nil } @@ -166,7 +166,7 @@ func (s *mockClusterAdmin) DescribeConfig(cr sarama.ConfigResource) ([]sarama.Co return s.brokerConfigs, nil } if s.topics[topicName].ConfigEntries == nil { - return nil, fmt.Errorf("no config entries found for topic") + return nil, errors.New("no config entries found for topic") } configEntry := make([]sarama.ConfigEntry, 1) for name, entry := range s.topics[topicName].ConfigEntries { diff --git a/receiver/kafkametricsreceiver/topic_scraper_test.go b/receiver/kafkametricsreceiver/topic_scraper_test.go index a32127a57205..62920485251e 100644 --- a/receiver/kafkametricsreceiver/topic_scraper_test.go +++ b/receiver/kafkametricsreceiver/topic_scraper_test.go @@ -5,7 +5,7 @@ package kafkametricsreceiver import ( "context" - "fmt" + "errors" "regexp" "testing" @@ -56,7 +56,7 @@ func TestTopicScraper_createsScraper(t *testing.T) { func TestTopicScraper_ScrapeHandlesError(t *testing.T) { newSaramaClient = func([]string, *sarama.Config) (sarama.Client, error) { - return nil, fmt.Errorf("no scraper here") + return nil, errors.New("no scraper here") } sc := sarama.NewConfig() ms, err := createTopicsScraper(context.Background(), Config{}, sc, receivertest.NewNopSettings()) @@ -68,7 +68,7 @@ func TestTopicScraper_ScrapeHandlesError(t *testing.T) { func TestTopicScraper_ShutdownHandlesNilClient(t *testing.T) { newSaramaClient = func([]string, *sarama.Config) (sarama.Client, error) { - return nil, fmt.Errorf("no scraper here") + return nil, errors.New("no scraper here") } sc := sarama.NewConfig() ms, err := createTopicsScraper(context.Background(), Config{}, sc, receivertest.NewNopSettings()) diff --git a/receiver/kafkareceiver/factory.go b/receiver/kafkareceiver/factory.go index a7749d45809a..be413ec507c5 100644 --- a/receiver/kafkareceiver/factory.go +++ b/receiver/kafkareceiver/factory.go @@ -5,7 +5,7 @@ package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collect import ( "context" - "fmt" + "errors" "strings" "time" @@ -49,7 +49,7 @@ const ( defaultMaxFetchSize = int32(0) ) -var errUnrecognizedEncoding = fmt.Errorf("unrecognized encoding") +var errUnrecognizedEncoding = errors.New("unrecognized encoding") // FactoryOption applies changes to kafkaExporterFactory. type FactoryOption func(factory *kafkaReceiverFactory) diff --git a/receiver/kafkareceiver/header_extraction.go b/receiver/kafkareceiver/header_extraction.go index 265c84fb33db..efae723c2011 100644 --- a/receiver/kafkareceiver/header_extraction.go +++ b/receiver/kafkareceiver/header_extraction.go @@ -4,8 +4,6 @@ package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kafkareceiver" import ( - "fmt" - "github.com/IBM/sarama" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" @@ -14,7 +12,7 @@ import ( ) func getAttribute(key string) string { - return fmt.Sprintf("kafka.header.%s", key) + return "kafka.header." + key } type HeaderExtractor interface { diff --git a/receiver/kafkareceiver/kafka_receiver.go b/receiver/kafkareceiver/kafka_receiver.go index 48ea87559a56..497d761c8060 100644 --- a/receiver/kafkareceiver/kafka_receiver.go +++ b/receiver/kafkareceiver/kafka_receiver.go @@ -5,6 +5,7 @@ package kafkareceiver // import "github.com/open-telemetry/opentelemetry-collect import ( "context" + "errors" "fmt" "strconv" "sync" @@ -32,7 +33,7 @@ const ( attrPartition = "partition" ) -var errInvalidInitialOffset = fmt.Errorf("invalid initial offset") +var errInvalidInitialOffset = errors.New("invalid initial offset") // kafkaTracesConsumer uses sarama to consume and handle messages from kafka. type kafkaTracesConsumer struct { diff --git a/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go b/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go index 3f1e1bbcab2d..b3ccb9a3b737 100644 --- a/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go +++ b/receiver/mongodbatlasreceiver/internal/mongodb_atlas_client.go @@ -6,6 +6,7 @@ package internal // import "github.com/open-telemetry/opentelemetry-collector-co import ( "bytes" "context" + "errors" "fmt" "net/http" "strconv" @@ -70,7 +71,7 @@ func (rt *clientRoundTripper) Shutdown() error { func (rt *clientRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { if rt.isStopped() { - return nil, fmt.Errorf("request cancelled due to shutdown") + return nil, errors.New("request cancelled due to shutdown") } resp, err := rt.originalTransport.RoundTrip(r) @@ -100,9 +101,9 @@ func (rt *clientRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) zap.Duration("delay", delay)) select { case <-r.Context().Done(): - return resp, fmt.Errorf("request was cancelled or timed out") + return resp, errors.New("request was cancelled or timed out") case <-rt.shutdownChan: - return resp, fmt.Errorf("request is cancelled due to server shutdown") + return resp, errors.New("request is cancelled due to server shutdown") case <-time.After(delay): } @@ -720,9 +721,9 @@ type GetAccessLogsOptions struct { func (s *MongoDBAtlasClient) GetAccessLogs(ctx context.Context, groupID string, clusterName string, opts *GetAccessLogsOptions) (ret []*mongodbatlas.AccessLogs, err error) { options := mongodbatlas.AccessLogOptions{ // Earliest Timestamp in epoch milliseconds from when Atlas should access log results - Start: fmt.Sprintf("%d", opts.MinDate.UTC().UnixMilli()), + Start: strconv.FormatInt(opts.MinDate.UTC().UnixMilli(), 10), // Latest Timestamp in epoch milliseconds from when Atlas should access log results - End: fmt.Sprintf("%d", opts.MaxDate.UTC().UnixMilli()), + End: strconv.FormatInt(opts.MaxDate.UTC().UnixMilli(), 10), // If true, only return successful access attempts; if false, only return failed access attempts // If nil, return both successful and failed access attempts AuthResult: opts.AuthResult, diff --git a/receiver/mongodbreceiver/config.go b/receiver/mongodbreceiver/config.go index 62cace947979..17539c93d980 100644 --- a/receiver/mongodbreceiver/config.go +++ b/receiver/mongodbreceiver/config.go @@ -61,7 +61,7 @@ func (c *Config) Validate() error { func (c *Config) ClientOptions() *options.ClientOptions { clientOptions := options.Client() - connString := fmt.Sprintf("mongodb://%s", strings.Join(c.hostlist(), ",")) + connString := "mongodb://" + strings.Join(c.hostlist(), ",") clientOptions.ApplyURI(connString) if c.Timeout > 0 { diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow.go b/receiver/otelarrowreceiver/internal/arrow/arrow.go index 50653b2e9a3a..21d3a421bcf7 100644 --- a/receiver/otelarrowreceiver/internal/arrow/arrow.go +++ b/receiver/otelarrowreceiver/internal/arrow/arrow.go @@ -6,7 +6,6 @@ package arrow // import "github.com/open-telemetry/opentelemetry-collector-contr import ( "context" "errors" - "fmt" "io" "runtime" "strings" @@ -50,10 +49,10 @@ const ( ) var ( - ErrNoMetricsConsumer = fmt.Errorf("no metrics consumer") - ErrNoLogsConsumer = fmt.Errorf("no logs consumer") - ErrNoTracesConsumer = fmt.Errorf("no traces consumer") - ErrUnrecognizedPayload = consumererror.NewPermanent(fmt.Errorf("unrecognized OTel-Arrow payload")) + ErrNoMetricsConsumer = errors.New("no metrics consumer") + ErrNoLogsConsumer = errors.New("no logs consumer") + ErrNoTracesConsumer = errors.New("no traces consumer") + ErrUnrecognizedPayload = consumererror.NewPermanent(errors.New("unrecognized OTel-Arrow payload")) ) type Consumers interface { diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go index ec35cb2ef992..ea011caa82d7 100644 --- a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go +++ b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go @@ -7,6 +7,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "strings" @@ -340,9 +341,9 @@ func (ctc *commonTestCase) newErrorConsumer() arrowRecord.ConsumerAPI { mock := arrowRecordMock.NewMockConsumerAPI(ctc.ctrl) mock.EXPECT().Close().Times(1).Return(nil) - mock.EXPECT().TracesFrom(gomock.Any()).AnyTimes().Return(nil, fmt.Errorf("test invalid error")) - mock.EXPECT().MetricsFrom(gomock.Any()).AnyTimes().Return(nil, fmt.Errorf("test invalid error")) - mock.EXPECT().LogsFrom(gomock.Any()).AnyTimes().Return(nil, fmt.Errorf("test invalid error")) + mock.EXPECT().TracesFrom(gomock.Any()).AnyTimes().Return(nil, errors.New("test invalid error")) + mock.EXPECT().MetricsFrom(gomock.Any()).AnyTimes().Return(nil, errors.New("test invalid error")) + mock.EXPECT().LogsFrom(gomock.Any()).AnyTimes().Return(nil, errors.New("test invalid error")) return mock } @@ -549,7 +550,7 @@ func TestReceiverRecvError(t *testing.T) { ctc.start(ctc.newRealConsumer, defaultBQ()) - ctc.putBatch(nil, fmt.Errorf("test recv error")) + ctc.putBatch(nil, errors.New("test recv error")) err := ctc.wait() require.ErrorContains(t, err, "test recv error") @@ -1259,7 +1260,7 @@ func testReceiverAuthHeaders(t *testing.T, includeMeta bool, dataAuth bool) { Metadata: client.NewMetadata(newmd), }), nil } - return ctx, fmt.Errorf("not authorized") + return ctx, errors.New("not authorized") }) go func() { diff --git a/receiver/otelarrowreceiver/otelarrow_test.go b/receiver/otelarrowreceiver/otelarrow_test.go index 0cb563007358..02b966711c4c 100644 --- a/receiver/otelarrowreceiver/otelarrow_test.go +++ b/receiver/otelarrowreceiver/otelarrow_test.go @@ -575,7 +575,7 @@ func TestGRPCArrowReceiver(t *testing.T) { headerBuf.Reset() err := hpd.WriteField(hpack.HeaderField{ Name: "seq", - Value: fmt.Sprint(i), + Value: strconv.Itoa(i), }) require.NoError(t, err) err = hpd.WriteField(hpack.HeaderField{ @@ -584,7 +584,7 @@ func TestGRPCArrowReceiver(t *testing.T) { }) require.NoError(t, err) expectMDs = append(expectMDs, metadata.MD{ - "seq": []string{fmt.Sprint(i)}, + "seq": []string{strconv.Itoa(i)}, "test": []string{"value"}, }) @@ -668,7 +668,7 @@ func TestGRPCArrowReceiverAuth(t *testing.T) { map[component.ID]component.Component{ authID: newTestAuthExtension(t, func(ctx context.Context, _ map[string][]string) (context.Context, error) { if ctx.Value(inStreamCtx{}) != nil { - return ctx, fmt.Errorf(errorString) + return ctx, errors.New(errorString) } return context.WithValue(ctx, inStreamCtx{}, t), nil }), @@ -760,7 +760,7 @@ func TestConcurrentArrowReceiver(t *testing.T) { headerBuf.Reset() err := hpd.WriteField(hpack.HeaderField{ Name: "seq", - Value: fmt.Sprint(i), + Value: strconv.Itoa(i), }) assert.NoError(t, err) diff --git a/receiver/podmanreceiver/libpod_client.go b/receiver/podmanreceiver/libpod_client.go index 0e65155f2042..cb084fc67b71 100644 --- a/receiver/podmanreceiver/libpod_client.go +++ b/receiver/podmanreceiver/libpod_client.go @@ -18,7 +18,7 @@ import ( ) var ( - errNoStatsFound = fmt.Errorf("No stats found") + errNoStatsFound = errors.New("No stats found") ) type libpodClient struct { diff --git a/receiver/podmanreceiver/libpod_client_test.go b/receiver/podmanreceiver/libpod_client_test.go index 80d50ffd265f..eb23a85825b7 100644 --- a/receiver/podmanreceiver/libpod_client_test.go +++ b/receiver/podmanreceiver/libpod_client_test.go @@ -8,7 +8,6 @@ package podmanreceiver import ( "context" "errors" - "fmt" "io" "net" "net/http" @@ -60,7 +59,7 @@ func TestStats(t *testing.T) { defer srv.Close() config := &Config{ - Endpoint: fmt.Sprintf("unix://%s", addr), + Endpoint: "unix://" + addr, } cli, err := newLibpodClient(zap.NewNop(), config) @@ -116,7 +115,7 @@ func TestStatsError(t *testing.T) { defer srv.Close() config := &Config{ - Endpoint: fmt.Sprintf("unix://%s", addr), + Endpoint: "unix://" + addr, } cli, err := newLibpodClient(zap.NewNop(), config) @@ -150,7 +149,7 @@ func TestList(t *testing.T) { defer srv.Close() config := &Config{ - Endpoint: fmt.Sprintf("unix://%s", addr), + Endpoint: "unix://" + addr, } cli, err := newLibpodClient(zap.NewNop(), config) @@ -221,7 +220,7 @@ func TestEvents(t *testing.T) { defer srv.Close() config := &Config{ - Endpoint: fmt.Sprintf("unix://%s", addr), + Endpoint: "unix://" + addr, } cli, err := newLibpodClient(zap.NewNop(), config) diff --git a/receiver/podmanreceiver/podman_test.go b/receiver/podmanreceiver/podman_test.go index 3cb648587ef8..5225b940a9cc 100644 --- a/receiver/podmanreceiver/podman_test.go +++ b/receiver/podmanreceiver/podman_test.go @@ -7,7 +7,6 @@ package podmanreceiver import ( "context" - "fmt" "net/http" "net/http/httptest" "net/url" @@ -69,7 +68,7 @@ func TestWatchingTimeouts(t *testing.T) { defer os.Remove(addr) config := &Config{ - Endpoint: fmt.Sprintf("unix://%s", addr), + Endpoint: "unix://" + addr, ControllerConfig: scraperhelper.ControllerConfig{ Timeout: 50 * time.Millisecond, }, @@ -121,7 +120,7 @@ func TestEventLoopHandlesError(t *testing.T) { observed, logs := observer.New(zapcore.WarnLevel) config := &Config{ - Endpoint: fmt.Sprintf("unix://%s", addr), + Endpoint: "unix://" + addr, ControllerConfig: scraperhelper.ControllerConfig{ Timeout: 50 * time.Millisecond, }, diff --git a/receiver/postgresqlreceiver/client.go b/receiver/postgresqlreceiver/client.go index fde63bd0ba56..056167eea49c 100644 --- a/receiver/postgresqlreceiver/client.go +++ b/receiver/postgresqlreceiver/client.go @@ -116,7 +116,7 @@ func (c postgreSQLConfig) ConnectionString() (string, error) { if c.address.Transport == confignet.TransportTypeUnix { // lib/pg expects a unix socket host to start with a "/" and appends the appropriate .s.PGSQL.port internally - host = fmt.Sprintf("/%s", host) + host = "/" + host } return fmt.Sprintf("port=%s host=%s user=%s password=%s dbname=%s %s", port, host, c.username, c.password, database, sslConnectionString(c.tls)), nil diff --git a/receiver/postgresqlreceiver/scraper_test.go b/receiver/postgresqlreceiver/scraper_test.go index e660e892d869..b1c967b32aab 100644 --- a/receiver/postgresqlreceiver/scraper_test.go +++ b/receiver/postgresqlreceiver/scraper_test.go @@ -5,6 +5,7 @@ package postgresqlreceiver import ( "context" + "errors" "fmt" "path/filepath" "testing" @@ -453,7 +454,7 @@ func (m *mockClient) initMocks(database string, schema string, databases []strin lockType: "relation", locks: 5600, }, - }, fmt.Errorf("some error")) + }, errors.New("some error")) m.On("getReplicationStats", mock.Anything).Return([]replicationStats{ { clientAddr: "unix", @@ -542,8 +543,8 @@ func (m *mockClient) initMocks(database string, schema string, databases []strin m.On("getDatabaseTableMetrics", mock.Anything, database).Return(tableMetrics, nil) m.On("getBlocksReadByTable", mock.Anything, database).Return(blocksMetrics, nil) - index1 := fmt.Sprintf("%s_test1_pkey", database) - index2 := fmt.Sprintf("%s_test2_pkey", database) + index1 := database + "_test1_pkey" + index2 := database + "_test2_pkey" indexStats := map[indexIdentifer]indexStat{ indexKey(database, schema, table1, index1): { database: database, diff --git a/receiver/prometheusreceiver/metrics_receiver.go b/receiver/prometheusreceiver/metrics_receiver.go index 90613149b14b..239a6172c466 100644 --- a/receiver/prometheusreceiver/metrics_receiver.go +++ b/receiver/prometheusreceiver/metrics_receiver.go @@ -111,7 +111,7 @@ func (r *pReceiver) initPrometheusComponents(ctx context.Context, logger log.Log if r.discoveryManager == nil { // NewManager can sometimes return nil if it encountered an error, but // the error message is logged separately. - return fmt.Errorf("failed to create discovery manager") + return errors.New("failed to create discovery manager") } go func() { diff --git a/receiver/prometheusreceiver/metrics_receiver_scrape_config_files_test.go b/receiver/prometheusreceiver/metrics_receiver_scrape_config_files_test.go index 67fa4e4105b0..94329dde22f3 100644 --- a/receiver/prometheusreceiver/metrics_receiver_scrape_config_files_test.go +++ b/receiver/prometheusreceiver/metrics_receiver_scrape_config_files_test.go @@ -4,7 +4,6 @@ package prometheusreceiver import ( - "fmt" "os" "testing" @@ -42,8 +41,8 @@ func TestScrapeConfigFiles(t *testing.T) { marshalledScrapeConfigs, err := yaml.Marshal(cfg.PrometheusConfig.ScrapeConfigs) require.NoError(t, err) tmpDir := t.TempDir() - cfgFileName := fmt.Sprintf("%s/test-scrape-config.yaml", tmpDir) - scrapeConfigFileContent := fmt.Sprintf("scrape_configs:\n%s", string(marshalledScrapeConfigs)) + cfgFileName := tmpDir + "/test-scrape-config.yaml" + scrapeConfigFileContent := "scrape_configs:\n" + string(marshalledScrapeConfigs) err = os.WriteFile(cfgFileName, []byte(scrapeConfigFileContent), 0400) require.NoError(t, err) cfg.PrometheusConfig.ScrapeConfigs = []*config.ScrapeConfig{} diff --git a/receiver/prometheusreceiver/targetallocator/config.go b/receiver/prometheusreceiver/targetallocator/config.go index 5cd9d719574b..f0a1199f6490 100644 --- a/receiver/prometheusreceiver/targetallocator/config.go +++ b/receiver/prometheusreceiver/targetallocator/config.go @@ -5,6 +5,7 @@ package targetallocator // import "github.com/open-telemetry/opentelemetry-colle import ( "encoding/base64" + "errors" "fmt" "net/url" "os" @@ -37,7 +38,7 @@ func (cfg *Config) Validate() error { } // ensure valid collectorID without variables if cfg.CollectorID == "" || strings.Contains(cfg.CollectorID, "${") { - return fmt.Errorf("CollectorID is not a valid ID") + return errors.New("CollectorID is not a valid ID") } return nil diff --git a/receiver/prometheusreceiver/targetallocator/manager.go b/receiver/prometheusreceiver/targetallocator/manager.go index 06b3f2802d6d..cd431a3ed72d 100644 --- a/receiver/prometheusreceiver/targetallocator/manager.go +++ b/receiver/prometheusreceiver/targetallocator/manager.go @@ -182,7 +182,7 @@ func (m *Manager) applyCfg() error { } func getScrapeConfigsResponse(httpClient *http.Client, baseURL string) (map[string]*promconfig.ScrapeConfig, error) { - scrapeConfigsURL := fmt.Sprintf("%s/scrape_configs", baseURL) + scrapeConfigsURL := baseURL + "/scrape_configs" _, err := url.Parse(scrapeConfigsURL) // check if valid if err != nil { return nil, err diff --git a/receiver/receivercreator/consumer.go b/receiver/receivercreator/consumer.go index 35fdf7efe8bb..923ba41c171a 100644 --- a/receiver/receivercreator/consumer.go +++ b/receiver/receivercreator/consumer.go @@ -5,6 +5,7 @@ package receivercreator // import "github.com/open-telemetry/opentelemetry-colle import ( "context" + "errors" "fmt" "go.opentelemetry.io/collector/consumer" @@ -80,7 +81,7 @@ func (*enhancingConsumer) Capabilities() consumer.Capabilities { func (ec *enhancingConsumer) ConsumeLogs(ctx context.Context, ld plog.Logs) error { if ec.logs == nil { - return fmt.Errorf("no log consumer available") + return errors.New("no log consumer available") } rl := ld.ResourceLogs() for i := 0; i < rl.Len(); i++ { @@ -92,7 +93,7 @@ func (ec *enhancingConsumer) ConsumeLogs(ctx context.Context, ld plog.Logs) erro func (ec *enhancingConsumer) ConsumeMetrics(ctx context.Context, md pmetric.Metrics) error { if ec.metrics == nil { - return fmt.Errorf("no metric consumer available") + return errors.New("no metric consumer available") } rm := md.ResourceMetrics() for i := 0; i < rm.Len(); i++ { @@ -104,7 +105,7 @@ func (ec *enhancingConsumer) ConsumeMetrics(ctx context.Context, md pmetric.Metr func (ec *enhancingConsumer) ConsumeTraces(ctx context.Context, td ptrace.Traces) error { if ec.traces == nil { - return fmt.Errorf("no trace consumer available") + return errors.New("no trace consumer available") } rs := td.ResourceSpans() for i := 0; i < rs.Len(); i++ { diff --git a/receiver/receivercreator/receiver.go b/receiver/receivercreator/receiver.go index c1aab76c9014..5715f40456c0 100644 --- a/receiver/receivercreator/receiver.go +++ b/receiver/receivercreator/receiver.go @@ -5,6 +5,7 @@ package receivercreator // import "github.com/open-telemetry/opentelemetry-colle import ( "context" + "errors" "fmt" "go.opentelemetry.io/collector/component" @@ -44,7 +45,7 @@ type host interface { func (rc *receiverCreator) Start(_ context.Context, h component.Host) error { rcHost, ok := h.(host) if !ok { - return fmt.Errorf("the receivercreator is not compatible with the provided component.host") + return errors.New("the receivercreator is not compatible with the provided component.host") } rc.observerHandler = &observerHandler{ diff --git a/receiver/sapmreceiver/factory.go b/receiver/sapmreceiver/factory.go index 8d2ded01073a..dabaa4a6871d 100644 --- a/receiver/sapmreceiver/factory.go +++ b/receiver/sapmreceiver/factory.go @@ -7,6 +7,7 @@ package sapmreceiver // import "github.com/open-telemetry/opentelemetry-collecto import ( "context" + "errors" "fmt" "net" "strconv" @@ -54,7 +55,7 @@ func extractPortFromEndpoint(endpoint string) (int, error) { return 0, fmt.Errorf("endpoint port is not a number: %w", err) } if port < 1 || port > 65535 { - return 0, fmt.Errorf("port number must be between 1 and 65535") + return 0, errors.New("port number must be between 1 and 65535") } return int(port), nil } diff --git a/receiver/signalfxreceiver/factory.go b/receiver/signalfxreceiver/factory.go index 87f71817f67d..a8ac397e3dd0 100644 --- a/receiver/signalfxreceiver/factory.go +++ b/receiver/signalfxreceiver/factory.go @@ -5,6 +5,7 @@ package signalfxreceiver // import "github.com/open-telemetry/opentelemetry-coll import ( "context" + "errors" "fmt" "net" "strconv" @@ -56,7 +57,7 @@ func extractPortFromEndpoint(endpoint string) (int, error) { return 0, fmt.Errorf("endpoint port is not a number: %w", err) } if port < 1 || port > 65535 { - return 0, fmt.Errorf("port number must be between 1 and 65535") + return 0, errors.New("port number must be between 1 and 65535") } return int(port), nil } diff --git a/receiver/skywalkingreceiver/config.go b/receiver/skywalkingreceiver/config.go index 02afbd1a8a43..ae94bbd19f70 100644 --- a/receiver/skywalkingreceiver/config.go +++ b/receiver/skywalkingreceiver/config.go @@ -4,6 +4,7 @@ package skywalkingreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/skywalkingreceiver" import ( + "errors" "fmt" "go.opentelemetry.io/collector/component" @@ -34,7 +35,7 @@ var _ confmap.Unmarshaler = (*Config)(nil) // Validate checks the receiver configuration is valid func (cfg *Config) Validate() error { if cfg.GRPC == nil && cfg.HTTP == nil { - return fmt.Errorf("must specify at least one protocol when using the Skywalking receiver") + return errors.New("must specify at least one protocol when using the Skywalking receiver") } if cfg.GRPC != nil { @@ -56,7 +57,7 @@ func (cfg *Config) Validate() error { // Unmarshal a config.Parser into the config struct. func (cfg *Config) Unmarshal(componentParser *confmap.Conf) error { if componentParser == nil || len(componentParser.AllKeys()) == 0 { - return fmt.Errorf("empty config for Skywalking receiver") + return errors.New("empty config for Skywalking receiver") } // UnmarshalExact will not set struct properties to nil even if no key is provided, diff --git a/receiver/skywalkingreceiver/factory.go b/receiver/skywalkingreceiver/factory.go index 458bed44ad23..933e97ca06bd 100644 --- a/receiver/skywalkingreceiver/factory.go +++ b/receiver/skywalkingreceiver/factory.go @@ -7,6 +7,7 @@ package skywalkingreceiver // import "github.com/open-telemetry/opentelemetry-co import ( "context" + "errors" "fmt" "net" "strconv" @@ -146,7 +147,7 @@ func extractPortFromEndpoint(endpoint string) (int, error) { return 0, fmt.Errorf("endpoint port is not a number: %w", err) } if port < 1 || port > 65535 { - return 0, fmt.Errorf("port number must be between 1 and 65535") + return 0, errors.New("port number must be between 1 and 65535") } return int(port), nil } diff --git a/receiver/snmpreceiver/client_test.go b/receiver/snmpreceiver/client_test.go index 5bc38d09ad29..a4b3e881a13b 100644 --- a/receiver/snmpreceiver/client_test.go +++ b/receiver/snmpreceiver/client_test.go @@ -472,7 +472,7 @@ func TestGetScalarData(t *testing.T) { var scraperErrors scrapererror.ScrapeErrors oidSlice := []string{"1"} returnedSNMPData := client.GetScalarData(oidSlice, &scraperErrors) - expectedErr := fmt.Errorf("problem with getting scalar data: data for OID '1' not a supported type") + expectedErr := errors.New("problem with getting scalar data: data for OID '1' not a supported type") require.EqualError(t, scraperErrors.Combine(), expectedErr.Error()) require.Nil(t, returnedSNMPData) }, @@ -495,7 +495,7 @@ func TestGetScalarData(t *testing.T) { var scraperErrors scrapererror.ScrapeErrors oidSlice := []string{"1"} returnedSNMPData := client.GetScalarData(oidSlice, &scraperErrors) - expectedErr := fmt.Errorf("problem with getting scalar data: data for OID '1' not a supported type") + expectedErr := errors.New("problem with getting scalar data: data for OID '1' not a supported type") require.EqualError(t, scraperErrors.Combine(), expectedErr.Error()) require.Nil(t, returnedSNMPData) }, @@ -518,7 +518,7 @@ func TestGetScalarData(t *testing.T) { var scraperErrors scrapererror.ScrapeErrors oidSlice := []string{"1"} returnedSNMPData := client.GetScalarData(oidSlice, &scraperErrors) - expectedErr := fmt.Errorf("problem with getting scalar data: data for OID '1' not a supported type") + expectedErr := errors.New("problem with getting scalar data: data for OID '1' not a supported type") require.EqualError(t, scraperErrors.Combine(), expectedErr.Error()) require.Nil(t, returnedSNMPData) }, @@ -833,7 +833,7 @@ func TestGetIndexedData(t *testing.T) { } var scraperErrors scrapererror.ScrapeErrors returnedSNMPData := client.GetIndexedData([]string{"1"}, &scraperErrors) - expectedErr := fmt.Errorf("problem with getting indexed data: data for OID '1.1' not a supported type") + expectedErr := errors.New("problem with getting indexed data: data for OID '1.1' not a supported type") require.EqualError(t, scraperErrors.Combine(), expectedErr.Error()) require.Nil(t, returnedSNMPData) }, @@ -855,7 +855,7 @@ func TestGetIndexedData(t *testing.T) { } var scraperErrors scrapererror.ScrapeErrors returnedSNMPData := client.GetIndexedData([]string{"1"}, &scraperErrors) - expectedErr := fmt.Errorf("problem with getting indexed data: data for OID '1.1' not a supported type") + expectedErr := errors.New("problem with getting indexed data: data for OID '1.1' not a supported type") require.EqualError(t, scraperErrors.Combine(), expectedErr.Error()) require.Nil(t, returnedSNMPData) }, @@ -877,7 +877,7 @@ func TestGetIndexedData(t *testing.T) { } var scraperErrors scrapererror.ScrapeErrors returnedSNMPData := client.GetIndexedData([]string{"1"}, &scraperErrors) - expectedErr := fmt.Errorf("problem with getting indexed data: data for OID '1.1' not a supported type") + expectedErr := errors.New("problem with getting indexed data: data for OID '1.1' not a supported type") require.EqualError(t, scraperErrors.Combine(), expectedErr.Error()) require.Nil(t, returnedSNMPData) }, diff --git a/receiver/solacereceiver/messaging_service_test.go b/receiver/solacereceiver/messaging_service_test.go index 1179b1311647..60d7538378fe 100644 --- a/receiver/solacereceiver/messaging_service_test.go +++ b/receiver/solacereceiver/messaging_service_test.go @@ -7,7 +7,7 @@ import ( "bytes" "context" "crypto/tls" - "fmt" + "errors" "net" "reflect" "runtime" @@ -182,7 +182,7 @@ func TestNewAMQPMessagingServiceFactory(t *testing.T) { func TestAMQPDialFailure(t *testing.T) { const expectedAddr = "some-host:1234" - var expectedErr = fmt.Errorf("some error") + var expectedErr = errors.New("some error") dialFunc = func(_ context.Context, addr string, _ *amqp.ConnOptions) (*amqp.Conn, error) { defer func() { dialFunc = amqp.Dial }() // reset dialFunc assert.Equal(t, expectedAddr, addr) @@ -207,7 +207,7 @@ func TestAMQPDialFailure(t *testing.T) { func TestAMQPDialConfigOptionsWithoutTLS(t *testing.T) { // try creating a service without a tls config calling dial expecting no tls config passed const expectedAddr = "some-host:1234" - var expectedErr = fmt.Errorf("some error") + var expectedErr = errors.New("some error") expectedAuthConnOption := amqp.SASLTypeAnonymous() dialFunc = func(_ context.Context, addr string, opts *amqp.ConnOptions) (*amqp.Conn, error) { defer func() { dialFunc = amqp.Dial }() // reset dialFunc @@ -235,7 +235,7 @@ func TestAMQPDialConfigOptionsWithoutTLS(t *testing.T) { func TestAMQPDialConfigOptionsWithTLS(t *testing.T) { // try creating a service with a tls config calling dial const expectedAddr = "some-host:1234" - var expectedErr = fmt.Errorf("some error") + var expectedErr = errors.New("some error") expectedAuthConnOption := amqp.SASLTypeAnonymous() expectedTLSConnOption := &tls.Config{ InsecureSkipVerify: false, @@ -302,7 +302,7 @@ func TestAMQPNewClientDialAndCloseConnFailure(t *testing.T) { closed := false conn.setCloseHandler(func() error { closed = true - return fmt.Errorf("some error") + return errors.New("some error") }) service.close(context.Background()) // expect conn.Close to have been called diff --git a/receiver/solacereceiver/receiver.go b/receiver/solacereceiver/receiver.go index 4398008982c4..893c677bd69e 100644 --- a/receiver/solacereceiver/receiver.go +++ b/receiver/solacereceiver/receiver.go @@ -6,7 +6,6 @@ package solacereceiver // import "github.com/open-telemetry/opentelemetry-collec import ( "context" "errors" - "fmt" "sync" "sync/atomic" "time" @@ -283,7 +282,7 @@ flowControlLoop: case <-ctx.Done(): s.settings.Logger.Info("Context was cancelled while attempting redelivery, exiting") disposition = nil // do not make any network requests, we are shutting down - return fmt.Errorf("delayed retry interrupted by shutdown request") + return errors.New("delayed retry interrupted by shutdown request") } } else { // error is permanent, we want to accept the message and increment the number of dropped messages s.settings.Logger.Warn("Encountered permanent error while forwarding traces to next receiver, will swallow trace", zap.Error(forwardErr)) diff --git a/receiver/solacereceiver/receiver_test.go b/receiver/solacereceiver/receiver_test.go index 5b993b2797e9..d6ce7913ce2a 100644 --- a/receiver/solacereceiver/receiver_test.go +++ b/receiver/solacereceiver/receiver_test.go @@ -6,7 +6,6 @@ package solacereceiver // import "github.com/open-telemetry/opentelemetry-collec import ( "context" "errors" - "fmt" "runtime" "sync" "sync/atomic" @@ -660,7 +659,7 @@ func TestReceiverUnmarshalVersionFailureExpectingDisable(t *testing.T) { } func TestReceiverFlowControlDelayedRetry(t *testing.T) { - someError := consumererror.NewPermanent(fmt.Errorf("some error")) + someError := consumererror.NewPermanent(errors.New("some error")) testCases := []struct { name string nextConsumer consumer.Traces @@ -773,7 +772,7 @@ func TestReceiverFlowControlDelayedRetry(t *testing.T) { // we want to return an error at first, then set the next consumer to a noop consumer receiver.nextConsumer, err = consumer.NewTraces(func(context.Context, ptrace.Traces) error { receiver.nextConsumer = tc.nextConsumer - return fmt.Errorf("Some temporary error") + return errors.New("Some temporary error") }) require.NoError(t, err) @@ -953,7 +952,7 @@ func TestReceiverFlowControlDelayedRetryInterrupt(t *testing.T) { return nil }) require.NoError(t, err) - return fmt.Errorf("Some temporary error") + return errors.New("Some temporary error") }) require.NoError(t, err) @@ -1050,7 +1049,7 @@ func TestReceiverFlowControlDelayedRetryMultipleRetries(t *testing.T) { }) } require.NoError(t, err) - return fmt.Errorf("Some temporary error") + return errors.New("Some temporary error") }) require.NoError(t, err) diff --git a/receiver/solacereceiver/unmarshaller_egress_test.go b/receiver/solacereceiver/unmarshaller_egress_test.go index 3bde7f1b4d53..ecb40572b957 100644 --- a/receiver/solacereceiver/unmarshaller_egress_test.go +++ b/receiver/solacereceiver/unmarshaller_egress_test.go @@ -3,7 +3,7 @@ package solacereceiver import ( - "fmt" + "strconv" "testing" "github.com/Azure/go-amqp" @@ -285,7 +285,7 @@ func TestEgressUnmarshallerEgressSpan(t *testing.T) { } var i = 1 for _, dataRef := range validEgressSpans { - name := "valid span " + fmt.Sprint(i) + name := "valid span " + strconv.Itoa(i) i++ want := dataRef.out spanData := dataRef.in diff --git a/receiver/splunkhecreceiver/receiver_test.go b/receiver/splunkhecreceiver/receiver_test.go index e09e2abfaead..39553a0a7bcb 100644 --- a/receiver/splunkhecreceiver/receiver_test.go +++ b/receiver/splunkhecreceiver/receiver_test.go @@ -450,7 +450,7 @@ func Test_splunkhecReceiver_TLS(t *testing.T) { body, err := json.Marshal(buildSplunkHecMsg(sec, 0)) require.NoErrorf(t, err, "failed to marshal Splunk message: %v", err) - url := fmt.Sprintf("https://%s", addr) + url := "https://" + addr req, err := http.NewRequestWithContext(context.Background(), http.MethodPost, url, bytes.NewReader(body)) require.NoErrorf(t, err, "should have no errors with new request: %v", err) @@ -1833,7 +1833,7 @@ func Test_splunkhecReceiver_rawReqHasmetadataInResource(t *testing.T) { for _, k := range []string{config.HecToOtelAttrs.Index, config.HecToOtelAttrs.SourceType, config.HecToOtelAttrs.Source, config.HecToOtelAttrs.Host} { v, ok := resource.Get(k) if !ok { - assert.Fail(t, fmt.Sprintf("does not contain query param: %s", k)) + assert.Fail(t, "does not contain query param: "+k) } assert.Equal(t, "bar", v.AsString()) } @@ -1858,7 +1858,7 @@ func Test_splunkhecReceiver_rawReqHasmetadataInResource(t *testing.T) { for _, k := range [2]string{config.HecToOtelAttrs.Index, config.HecToOtelAttrs.Source} { v, ok := resource.Get(k) if !ok { - assert.Fail(t, fmt.Sprintf("does not contain query param: %s", k)) + assert.Fail(t, "does not contain query param: "+k) } assert.Equal(t, "bar", v.AsString()) } diff --git a/receiver/sqlserverreceiver/config.go b/receiver/sqlserverreceiver/config.go index bdd79a5f935d..d44dc4a4a64b 100644 --- a/receiver/sqlserverreceiver/config.go +++ b/receiver/sqlserverreceiver/config.go @@ -4,7 +4,7 @@ package sqlserverreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlserverreceiver" import ( - "fmt" + "errors" "go.opentelemetry.io/collector/config/configopaque" "go.opentelemetry.io/collector/receiver/scraperhelper" @@ -35,7 +35,7 @@ func (cfg *Config) Validate() error { if !directDBConnectionEnabled(cfg) { if cfg.Server != "" || cfg.Username != "" || string(cfg.Password) != "" { - return fmt.Errorf("Found one or more of the following configuration options set: [server, port, username, password]. " + + return errors.New("Found one or more of the following configuration options set: [server, port, username, password]. " + "All of these options must be configured to directly connect to a SQL Server instance.") } } diff --git a/receiver/sqlserverreceiver/scraper_test.go b/receiver/sqlserverreceiver/scraper_test.go index dd1e4d654809..91ed9de2e1eb 100644 --- a/receiver/sqlserverreceiver/scraper_test.go +++ b/receiver/sqlserverreceiver/scraper_test.go @@ -6,7 +6,7 @@ package sqlserverreceiver import ( "context" "encoding/json" - "fmt" + "errors" "os" "path/filepath" "testing" @@ -179,7 +179,7 @@ func (mc mockClient) QueryRows(context.Context, ...any) ([]sqlquery.StringMap, e case getSQLServerPropertiesQuery(mc.instanceName): queryResults, err = readFile("propertyQueryData.txt") default: - return nil, fmt.Errorf("No valid query found") + return nil, errors.New("No valid query found") } if err != nil { diff --git a/receiver/sshcheckreceiver/internal/configssh/configssh.go b/receiver/sshcheckreceiver/internal/configssh/configssh.go index 8d5712cc1d21..cf13f1636059 100644 --- a/receiver/sshcheckreceiver/internal/configssh/configssh.go +++ b/receiver/sshcheckreceiver/internal/configssh/configssh.go @@ -55,7 +55,7 @@ func (c *Client) Dial(endpoint string) (err error) { func (c *Client) SFTPClient() (*SFTPClient, error) { if c.Client == nil || c.Client.Conn == nil { - return nil, fmt.Errorf("SSH client not initialized") + return nil, errors.New("SSH client not initialized") } client, err := sftp.NewClient(c.Client) if err != nil { @@ -135,7 +135,7 @@ func defaultKnownHostsPath() (string, error) { if err != nil { return "", err } - path := fmt.Sprintf("%s/.ssh/known_hosts", home) + path := home + "/.ssh/known_hosts" if _, err := os.Stat(path); err != nil { return "", errMissingKnownHosts } diff --git a/receiver/sshcheckreceiver/internal/configssh/configssh_test.go b/receiver/sshcheckreceiver/internal/configssh/configssh_test.go index 2b384653c4f5..77d418ca48e2 100644 --- a/receiver/sshcheckreceiver/internal/configssh/configssh_test.go +++ b/receiver/sshcheckreceiver/internal/configssh/configssh_test.go @@ -4,7 +4,7 @@ package configssh // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sshcheckreceiver/internal/configssh" import ( - "fmt" + "errors" "testing" "time" @@ -164,7 +164,7 @@ func Test_Client_Dial(t *testing.T) { KeyFile: keyfile, }, dial: func(_, _ string, _ *ssh.ClientConfig) (*ssh.Client, error) { - return nil, fmt.Errorf("dial") + return nil, errors.New("dial") }, shouldError: true, }, @@ -237,7 +237,7 @@ func Test_Client_ToSFTPClient(t *testing.T) { KeyFile: keyfile, }, dial: func(_, _ string, _ *ssh.ClientConfig) (*ssh.Client, error) { - return nil, fmt.Errorf("dial") + return nil, errors.New("dial") }, shouldError: true, }, diff --git a/receiver/sshcheckreceiver/scraper_test.go b/receiver/sshcheckreceiver/scraper_test.go index 899d89bce5a9..074370b34445 100644 --- a/receiver/sshcheckreceiver/scraper_test.go +++ b/receiver/sshcheckreceiver/scraper_test.go @@ -6,7 +6,6 @@ package sshcheckreceiver // import "github.com/open-telemetry/opentelemetry-coll import ( "context" "errors" - "fmt" "io" "net" "os" @@ -49,7 +48,7 @@ func (s *sshServer) runSSHServer(t *testing.T) string { if c.User() == "otelu" && string(pass) == "otelp" { return nil, nil } - return nil, fmt.Errorf("wrong username or password") + return nil, errors.New("wrong username or password") }, } @@ -93,7 +92,7 @@ func (s *sshServer) shutdown() { func handleChannels(chans <-chan ssh.NewChannel) { for newChannel := range chans { if t := newChannel.ChannelType(); t != "session" { - if err := newChannel.Reject(ssh.UnknownChannelType, fmt.Sprintf("unknown channel type: %s", t)); err != nil { + if err := newChannel.Reject(ssh.UnknownChannelType, "unknown channel type: "+t); err != nil { return } continue diff --git a/receiver/statsdreceiver/config.go b/receiver/statsdreceiver/config.go index 92a6cc653b14..150c4aecd6ea 100644 --- a/receiver/statsdreceiver/config.go +++ b/receiver/statsdreceiver/config.go @@ -4,6 +4,7 @@ package statsdreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/statsdreceiver" import ( + "errors" "fmt" "time" @@ -29,7 +30,7 @@ func (c *Config) Validate() error { var errs error if c.AggregationInterval <= 0 { - errs = multierr.Append(errs, fmt.Errorf("aggregation_interval must be a positive duration")) + errs = multierr.Append(errs, errors.New("aggregation_interval must be a positive duration")) } var TimerHistogramMappingMissingObjectName bool @@ -70,7 +71,7 @@ func (c *Config) Validate() error { // Non-histogram observer w/ histogram config var empty protocol.HistogramConfig if eachMap.Histogram != empty { - errs = multierr.Append(errs, fmt.Errorf("histogram configuration requires observer_type: histogram")) + errs = multierr.Append(errs, errors.New("histogram configuration requires observer_type: histogram")) } } if len(eachMap.Summary.Percentiles) != 0 { @@ -80,13 +81,13 @@ func (c *Config) Validate() error { } } if eachMap.ObserverType != protocol.SummaryObserver { - errs = multierr.Append(errs, fmt.Errorf("summary configuration requires observer_type: summary")) + errs = multierr.Append(errs, errors.New("summary configuration requires observer_type: summary")) } } } if TimerHistogramMappingMissingObjectName { - errs = multierr.Append(errs, fmt.Errorf("must specify object id for all TimerHistogramMappings")) + errs = multierr.Append(errs, errors.New("must specify object id for all TimerHistogramMappings")) } return errs diff --git a/receiver/statsdreceiver/internal/protocol/statsd_parser.go b/receiver/statsdreceiver/internal/protocol/statsd_parser.go index 8a2ad5237c77..1efb176e66ee 100644 --- a/receiver/statsdreceiver/internal/protocol/statsd_parser.go +++ b/receiver/statsdreceiver/internal/protocol/statsd_parser.go @@ -459,7 +459,7 @@ func parseMessageToMetric(line string, enableMetricType bool, enableSimpleTags b // As per DogStatD protocol v1.3: // https://docs.datadoghq.com/developers/dogstatsd/datagram_shell/?tab=metrics#dogstatsd-protocol-v13 if inType != CounterType && inType != GaugeType { - return result, fmt.Errorf("only GAUGE and COUNT metrics support a timestamp") + return result, errors.New("only GAUGE and COUNT metrics support a timestamp") } timestampStr := strings.TrimPrefix(part, "T") diff --git a/receiver/vcenterreceiver/processors.go b/receiver/vcenterreceiver/processors.go index f3f06fea5f46..b743d4b2cbf1 100644 --- a/receiver/vcenterreceiver/processors.go +++ b/receiver/vcenterreceiver/processors.go @@ -200,7 +200,7 @@ func (v *vcenterMetricScraper) buildHostMetrics( } if hs.Config == nil || hs.Config.VsanHostConfig == nil || hs.Config.VsanHostConfig.ClusterInfo == nil { - v.logger.Info(fmt.Sprintf("couldn't determine UUID necessary for vSAN metrics for host %s", hs.Name)) + v.logger.Info("couldn't determine UUID necessary for vSAN metrics for host " + hs.Name) v.mb.EmitForResource(metadata.WithResource(rb.Emit())) return vmRefToComputeRef, nil } @@ -383,7 +383,7 @@ func (v *vcenterMetricScraper) buildClusterMetrics( v.recordClusterStats(ts, cr, vmGroupInfo) vSANConfig := cr.ConfigurationEx.(*types.ClusterConfigInfoEx).VsanConfigInfo if vSANConfig == nil || vSANConfig.Enabled == nil || !*vSANConfig.Enabled || vSANConfig.DefaultConfig == nil { - v.logger.Info(fmt.Sprintf("couldn't determine UUID necessary for vSAN metrics for cluster %s", cr.Name)) + v.logger.Info("couldn't determine UUID necessary for vSAN metrics for cluster " + cr.Name) v.mb.EmitForResource(metadata.WithResource(rb.Emit())) return err } diff --git a/receiver/vcenterreceiver/scraper.go b/receiver/vcenterreceiver/scraper.go index e4b4bae52f37..9c1f395f2c99 100644 --- a/receiver/vcenterreceiver/scraper.go +++ b/receiver/vcenterreceiver/scraper.go @@ -92,7 +92,7 @@ func (v *vcenterMetricScraper) Start(ctx context.Context, _ component.Host) erro connectErr := v.client.EnsureConnection(ctx) // don't fail to start if we cannot establish connection, just log an error if connectErr != nil { - v.logger.Error(fmt.Sprintf("unable to establish a connection to the vSphere SDK %s", connectErr.Error())) + v.logger.Error("unable to establish a connection to the vSphere SDK " + connectErr.Error()) } return nil } diff --git a/receiver/windowseventlogreceiver/receiver_others.go b/receiver/windowseventlogreceiver/receiver_others.go index a14da170e37a..57b3aaec3f49 100644 --- a/receiver/windowseventlogreceiver/receiver_others.go +++ b/receiver/windowseventlogreceiver/receiver_others.go @@ -7,7 +7,7 @@ package windowseventlogreceiver // import "github.com/open-telemetry/opentelemet import ( "context" - "fmt" + "errors" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/consumer" @@ -30,5 +30,5 @@ func createLogsReceiver( _ component.Config, _ consumer.Logs, ) (receiver.Logs, error) { - return nil, fmt.Errorf("windows eventlog receiver is only supported on Windows") + return nil, errors.New("windows eventlog receiver is only supported on Windows") } diff --git a/receiver/windowsperfcountersreceiver/config.go b/receiver/windowsperfcountersreceiver/config.go index a6b865fe7cd5..399d17dede8d 100644 --- a/receiver/windowsperfcountersreceiver/config.go +++ b/receiver/windowsperfcountersreceiver/config.go @@ -4,6 +4,7 @@ package windowsperfcountersreceiver // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/windowsperfcountersreceiver" import ( + "errors" "fmt" "go.opentelemetry.io/collector/receiver/scraperhelper" @@ -57,11 +58,11 @@ func (c *Config) Validate() error { var errs error if c.CollectionInterval <= 0 { - errs = multierr.Append(errs, fmt.Errorf("collection_interval must be a positive duration")) + errs = multierr.Append(errs, errors.New("collection_interval must be a positive duration")) } if len(c.PerfCounters) == 0 { - errs = multierr.Append(errs, fmt.Errorf("must specify at least one perf counter")) + errs = multierr.Append(errs, errors.New("must specify at least one perf counter")) } for name, metric := range c.MetricMetaData { @@ -116,7 +117,7 @@ func (c *Config) Validate() error { } if perfCounterMissingObjectName { - errs = multierr.Append(errs, fmt.Errorf("must specify object name for all perf counters")) + errs = multierr.Append(errs, errors.New("must specify object name for all perf counters")) } return errs diff --git a/receiver/windowsperfcountersreceiver/config_test.go b/receiver/windowsperfcountersreceiver/config_test.go index db22ea8dc0a8..22d89d091ad7 100644 --- a/receiver/windowsperfcountersreceiver/config_test.go +++ b/receiver/windowsperfcountersreceiver/config_test.go @@ -181,7 +181,7 @@ func TestLoadConfig(t *testing.T) { }, { id: component.NewIDWithName(metadata.Type, "negative-collection-interval"), - expectedErr: fmt.Sprintf("collection_interval must be a positive duration; %s", negativeCollectionIntervalErr), + expectedErr: "collection_interval must be a positive duration; " + negativeCollectionIntervalErr, }, { id: component.NewIDWithName(metadata.Type, "noperfcounters"), diff --git a/receiver/zookeeperreceiver/scraper.go b/receiver/zookeeperreceiver/scraper.go index 44bbf5ae1b51..760072514900 100644 --- a/receiver/zookeeperreceiver/scraper.go +++ b/receiver/zookeeperreceiver/scraper.go @@ -164,7 +164,7 @@ func (z *zookeeperMetricsScraper) processMntr(response []string) { int64Val, err := strconv.ParseInt(metricValue, 10, 64) if err != nil { z.logger.Debug( - fmt.Sprintf("non-integer value from %s", mntrCommand), + "non-integer value from "+mntrCommand, zap.String("value", metricValue), ) continue diff --git a/receiver/zookeeperreceiver/scraper_test.go b/receiver/zookeeperreceiver/scraper_test.go index 4e643fdd5de3..cee0c9cacfea 100644 --- a/receiver/zookeeperreceiver/scraper_test.go +++ b/receiver/zookeeperreceiver/scraper_test.go @@ -7,7 +7,6 @@ import ( "bufio" "context" "errors" - "fmt" "net" "os" "path/filepath" @@ -328,7 +327,7 @@ func TestZookeeperMetricsScraperScrape(t *testing.T) { return } - expectedFile := filepath.Join("testdata", "scraper", fmt.Sprintf("%s.yaml", tt.expectedMetricsFilename)) + expectedFile := filepath.Join("testdata", "scraper", tt.expectedMetricsFilename+".yaml") expectedMetrics, err := golden.ReadMetrics(expectedFile) require.NoError(t, err) From 3394d44374909d8ed6d6304b60f71d4678657f42 Mon Sep 17 00:00:00 2001 From: Antoine Toulme Date: Mon, 11 Nov 2024 15:26:59 -0800 Subject: [PATCH 5/7] [chore] initialize slice with enough capacity based on log records (#36155) --- extension/encoding/jsonlogencodingextension/extension.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extension/encoding/jsonlogencodingextension/extension.go b/extension/encoding/jsonlogencodingextension/extension.go index d272c5873d0c..266c6d91aa60 100644 --- a/extension/encoding/jsonlogencodingextension/extension.go +++ b/extension/encoding/jsonlogencodingextension/extension.go @@ -73,7 +73,7 @@ func (e *jsonLogExtension) Shutdown(_ context.Context) error { } func (e *jsonLogExtension) logProcessor(ld plog.Logs) ([]byte, error) { - logs := make([]logBody, ld.ResourceLogs().Len()-1) + logs := make([]logBody, 0, ld.LogRecordCount()) rls := ld.ResourceLogs() for i := 0; i < rls.Len(); i++ { From 88e9445a04e2efdc9b6e88ca4813860bdc88b53a Mon Sep 17 00:00:00 2001 From: James Geisler Date: Mon, 11 Nov 2024 17:29:28 -0600 Subject: [PATCH 6/7] [receiver/datadogreceiver] add json handling for the api/v2/series endpoint (#36218) #### Description Adding json handling for the `api/v2/series` endpoint. The datadog api client libraries use json messages, however only protobuf messages are currently supported in the` api/v2/series` endpoint, so requests fail with `proto: illegal wireType 6` If `Content-Type: application/json` is set, then we handle the json message. Otherwise, we handle the protobuf message. #### Link to tracking issue Fixes https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/36079 #### Testing Added test with a json metric payload that now passes. Additionally, I also tested these changes in my own image and confirmed that the datadog api client libraries can now successfully ship metrics to the `api/v2/series` endpoint. I also confirmed with the following curl: ``` curl -X POST \ -H "Content-Type: application/json" \ -H "DD-API-KEY: your_api_key_here" \ -d '{ "series": [ { "resources": [ { "name": "dummyhost", "type": "host" } ], "tags": ["env:test"], "metric": "test.metric", "points": [ { "timestamp": 1730829575, "value": 1.0 } ], "type": 3 } ] }' \ https://datadog-receiver/api/v2/series {"errors":[]} ``` --------- Co-authored-by: Sean Marciniak <30928402+MovieStoreGuy@users.noreply.github.com> --- .../36079-add-datadog-json-handling.yaml | 27 +++++++ .../internal/translator/series.go | 17 +++- receiver/datadogreceiver/receiver_test.go | 79 +++++++++++++++++++ 3 files changed, 119 insertions(+), 4 deletions(-) create mode 100644 .chloggen/36079-add-datadog-json-handling.yaml diff --git a/.chloggen/36079-add-datadog-json-handling.yaml b/.chloggen/36079-add-datadog-json-handling.yaml new file mode 100644 index 000000000000..2d55b8e4056e --- /dev/null +++ b/.chloggen/36079-add-datadog-json-handling.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'enhancement' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: 'datadogreceiver' + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: "Add json handling for the `api/v2/series` endpoint in the datadogreceiver" + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36079] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/receiver/datadogreceiver/internal/translator/series.go b/receiver/datadogreceiver/internal/translator/series.go index 9588839ebdb8..f6fac8b75210 100644 --- a/receiver/datadogreceiver/internal/translator/series.go +++ b/receiver/datadogreceiver/internal/translator/series.go @@ -4,6 +4,7 @@ package translator // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/datadogreceiver/internal/translator" import ( + "encoding/json" "io" "net/http" "strings" @@ -27,7 +28,6 @@ type SeriesList struct { Series []datadogV1.Series `json:"series"` } -// TODO: add handling for JSON format in additional to protobuf? func (mt *MetricsTranslator) HandleSeriesV2Payload(req *http.Request) (mp []*gogen.MetricPayload_MetricSeries, err error) { buf := GetBuffer() defer PutBuffer(buf) @@ -35,11 +35,20 @@ func (mt *MetricsTranslator) HandleSeriesV2Payload(req *http.Request) (mp []*gog return mp, err } + contentType := req.Header.Get("Content-Type") + pl := new(gogen.MetricPayload) - if err := pl.Unmarshal(buf.Bytes()); err != nil { - return mp, err - } + // handle json messages if set, otherwise handle protobuf + if contentType == "application/json" { + if err := json.Unmarshal(buf.Bytes(), &pl); err != nil { + return mp, err + } + } else { + if err := pl.Unmarshal(buf.Bytes()); err != nil { + return mp, err + } + } return pl.GetSeries(), nil } diff --git a/receiver/datadogreceiver/receiver_test.go b/receiver/datadogreceiver/receiver_test.go index 7283c8ba2f77..c9fc5c9b00f9 100644 --- a/receiver/datadogreceiver/receiver_test.go +++ b/receiver/datadogreceiver/receiver_test.go @@ -459,6 +459,85 @@ func TestDatadogMetricsV2_EndToEnd(t *testing.T) { assert.Equal(t, pcommon.Timestamp(1636629071*1_000_000_000), metric.Sum().DataPoints().At(1).StartTimestamp()) } +func TestDatadogMetricsV2_EndToEndJSON(t *testing.T) { + cfg := createDefaultConfig().(*Config) + cfg.Endpoint = "localhost:0" // Using a randomly assigned address + sink := new(consumertest.MetricsSink) + + dd, err := newDataDogReceiver( + cfg, + receivertest.NewNopSettings(), + ) + require.NoError(t, err, "Must not error when creating receiver") + dd.(*datadogReceiver).nextMetricsConsumer = sink + + require.NoError(t, dd.Start(context.Background(), componenttest.NewNopHost())) + defer func() { + require.NoError(t, dd.Shutdown(context.Background())) + }() + + metricsPayloadV2 := []byte(`{ + "series": [ + { + "metric": "system.load.1", + "type": 1, + "points": [ + { + "timestamp": 1636629071, + "value": 1.5 + }, + { + "timestamp": 1636629081, + "value": 2.0 + } + ], + "resources": [ + { + "name": "dummyhost", + "type": "host" + } + ] + } + ] + }`) + + req, err := http.NewRequest( + http.MethodPost, + fmt.Sprintf("http://%s/api/v2/series", dd.(*datadogReceiver).address), + io.NopCloser(bytes.NewReader(metricsPayloadV2)), + ) + + req.Header.Set("Content-Type", "application/json") + + require.NoError(t, err, "Must not error when creating request") + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err, "Must not error performing request") + + body, err := io.ReadAll(resp.Body) + require.NoError(t, multierr.Combine(err, resp.Body.Close()), "Must not error when reading body") + require.JSONEq(t, `{"errors": []}`, string(body), "Expected JSON response to be `{\"errors\": []}`, got %s", string(body)) + require.Equal(t, http.StatusAccepted, resp.StatusCode) + + mds := sink.AllMetrics() + require.Len(t, mds, 1) + got := mds[0] + require.Equal(t, 1, got.ResourceMetrics().Len()) + metrics := got.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics() + assert.Equal(t, 1, metrics.Len()) + metric := metrics.At(0) + assert.Equal(t, pmetric.MetricTypeSum, metric.Type()) + assert.Equal(t, "system.load.1", metric.Name()) + assert.Equal(t, pmetric.AggregationTemporalityDelta, metric.Sum().AggregationTemporality()) + assert.False(t, metric.Sum().IsMonotonic()) + assert.Equal(t, pcommon.Timestamp(1636629071*1_000_000_000), metric.Sum().DataPoints().At(0).Timestamp()) + assert.Equal(t, 1.5, metric.Sum().DataPoints().At(0).DoubleValue()) + assert.Equal(t, pcommon.Timestamp(0), metric.Sum().DataPoints().At(0).StartTimestamp()) + assert.Equal(t, pcommon.Timestamp(1636629081*1_000_000_000), metric.Sum().DataPoints().At(1).Timestamp()) + assert.Equal(t, 2.0, metric.Sum().DataPoints().At(1).DoubleValue()) + assert.Equal(t, pcommon.Timestamp(1636629071*1_000_000_000), metric.Sum().DataPoints().At(1).StartTimestamp()) +} + func TestDatadogSketches_EndToEnd(t *testing.T) { cfg := createDefaultConfig().(*Config) cfg.Endpoint = "localhost:0" // Using a randomly assigned address From 4e7f7d2fdde0b5fd612b0a094450e7d07b27cf25 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Tue, 12 Nov 2024 03:34:37 +0100 Subject: [PATCH 7/7] [chore]: enable whitespace linter for processor and receiver (#36297) #### Description [whitespace](https://golangci-lint.run/usage/linters/#whitespace) is a linter that checks for unnecessary newlines at the start and end of functions. Signed-off-by: Matthieu MOREL --- .../attributesprocessor/attributes_metric.go | 1 - processor/attributesprocessor/factory.go | 1 - .../internal/data/expo_test.go | 1 - .../internal/delta/delta_test.go | 1 - .../processor_test.go | 1 - .../deltatorateprocessor/processor_test.go | 1 - processor/filterprocessor/config_test.go | 5 ---- processor/filterprocessor/logs_test.go | 3 --- processor/filterprocessor/metrics_test.go | 1 - processor/filterprocessor/traces_test.go | 1 - processor/geoipprocessor/factory.go | 1 - .../groupbyattrsprocessor/attribute_groups.go | 1 - processor/groupbyattrsprocessor/factory.go | 3 --- processor/groupbyattrsprocessor/processor.go | 7 ------ .../groupbyattrsprocessor/processor_test.go | 1 - processor/groupbytraceprocessor/factory.go | 2 -- .../internal/kube/client.go | 1 - .../internal/kube/client_test.go | 3 --- .../internal/kube/informer.go | 2 -- .../k8sattributesprocessor/options_test.go | 1 - .../k8sattributesprocessor/processor_test.go | 4 ---- processor/logstransformprocessor/processor.go | 1 - .../logstransformprocessor/processor_test.go | 1 - .../factory_test.go | 1 - .../processor_test.go | 2 -- .../metricstransformprocessor/factory.go | 1 - .../metrics_transform_processor_otlp.go | 2 -- .../logsprocessor.go | 1 - .../logsprocessor_test.go | 2 -- .../tracesprocessor_test.go | 5 ---- .../remotetapprocessor/processor_test.go | 1 - .../internal/heroku/heroku_test.go | 1 - .../internal/system/system_test.go | 2 -- processor/routingprocessor/extract.go | 1 - processor/routingprocessor/metrics_test.go | 1 - processor/routingprocessor/traces_test.go | 1 - .../internal/migrate/attributes_test.go | 1 - processor/spanprocessor/factory.go | 1 - processor/spanprocessor/span_test.go | 5 ---- .../sumologicprocessor/processor_test.go | 1 - .../translate_attributes_processor_test.go | 1 - .../internal/sampling/and.go | 2 -- .../internal/sampling/and_test.go | 4 ---- .../sampling/boolean_tag_filter_test.go | 2 -- .../internal/sampling/composite.go | 1 - .../internal/sampling/composite_test.go | 7 ------ .../sampling/numeric_tag_filter_test.go | 2 -- .../sampling/string_tag_filter_test.go | 1 - .../sampling/trace_state_filter_test.go | 1 - .../tailsamplingprocessor/processor_test.go | 1 - .../internal/common/processor.go | 1 - .../internal/logs/processor_test.go | 2 -- ...nvert_exponential_hist_to_explicit_hist.go | 2 -- ..._exponential_hist_to_explicit_hist_test.go | 24 ------------------- .../internal/metrics/functions_test.go | 1 - receiver/splunkenterprisereceiver/scraper.go | 1 - 56 files changed, 126 deletions(-) diff --git a/processor/attributesprocessor/attributes_metric.go b/processor/attributesprocessor/attributes_metric.go index b09de40d4340..3ae4afa77319 100644 --- a/processor/attributesprocessor/attributes_metric.go +++ b/processor/attributesprocessor/attributes_metric.go @@ -62,7 +62,6 @@ func (a *metricAttributesProcessor) processMetrics(ctx context.Context, md pmetr // Attributes are provided for each log and trace, but not at the metric level // Need to process attributes for every data point within a metric. func (a *metricAttributesProcessor) processMetricAttributes(ctx context.Context, m pmetric.Metric) { - // This is a lot of repeated code, but since there is no single parent superclass // between metric data types, we can't use polymorphism. //exhaustive:enforce diff --git a/processor/attributesprocessor/factory.go b/processor/attributesprocessor/factory.go index 9a7a73dc75e7..5844db4744d2 100644 --- a/processor/attributesprocessor/factory.go +++ b/processor/attributesprocessor/factory.go @@ -92,7 +92,6 @@ func createMetricsProcessor( cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { - oCfg := cfg.(*Config) attrProc, err := attraction.NewAttrProc(&oCfg.Settings) if err != nil { diff --git a/processor/deltatocumulativeprocessor/internal/data/expo_test.go b/processor/deltatocumulativeprocessor/internal/data/expo_test.go index f544932a4530..bbc88dc9c5eb 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo_test.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo_test.go @@ -117,7 +117,6 @@ func TestExpoAdd(t *testing.T) { } t.Run(cs.name, run(cs.dp, cs.in)) } - } func rawbs(data []uint64, offset int32) expo.Buckets { diff --git a/processor/deltatocumulativeprocessor/internal/delta/delta_test.go b/processor/deltatocumulativeprocessor/internal/delta/delta_test.go index 4b0be3be724d..961428ed3c4a 100644 --- a/processor/deltatocumulativeprocessor/internal/delta/delta_test.go +++ b/processor/deltatocumulativeprocessor/internal/delta/delta_test.go @@ -222,7 +222,6 @@ func TestErrs(t *testing.T) { require.Equal(t, r1.IntValue(), r2.IntValue()) }) } - } func time(ts int) pcommon.Timestamp { diff --git a/processor/deltatocumulativeprocessor/processor_test.go b/processor/deltatocumulativeprocessor/processor_test.go index 12d4452e621f..506cd4a7a511 100644 --- a/processor/deltatocumulativeprocessor/processor_test.go +++ b/processor/deltatocumulativeprocessor/processor_test.go @@ -72,7 +72,6 @@ func TestProcessor(t *testing.T) { } } }) - } } diff --git a/processor/deltatorateprocessor/processor_test.go b/processor/deltatorateprocessor/processor_test.go index facc997d0c65..3b859d490e0c 100644 --- a/processor/deltatorateprocessor/processor_test.go +++ b/processor/deltatorateprocessor/processor_test.go @@ -172,7 +172,6 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { require.Equal(t, eDataPoints.At(j).DoubleValue(), aDataPoints.At(j).DoubleValue()) } } - } require.NoError(t, mgp.Shutdown(ctx)) diff --git a/processor/filterprocessor/config_test.go b/processor/filterprocessor/config_test.go index eda1d7cec90e..d009cdd739a0 100644 --- a/processor/filterprocessor/config_test.go +++ b/processor/filterprocessor/config_test.go @@ -97,7 +97,6 @@ func TestLoadingConfigStrict(t *testing.T) { // TestLoadingConfigStrictLogs tests loading testdata/config_logs_strict.yaml func TestLoadingConfigStrictLogs(t *testing.T) { - testDataLogPropertiesInclude := &LogMatchProperties{ LogMatchType: strictType, ResourceAttributes: []filterconfig.Attribute{ @@ -180,7 +179,6 @@ func TestLoadingConfigStrictLogs(t *testing.T) { // TestLoadingConfigSeverityLogsStrict tests loading testdata/config_logs_severity_strict.yaml func TestLoadingConfigSeverityLogsStrict(t *testing.T) { - testDataLogPropertiesInclude := &LogMatchProperties{ LogMatchType: strictType, SeverityTexts: []string{"INFO"}, @@ -305,7 +303,6 @@ func TestLoadingConfigSeverityLogsRegexp(t *testing.T) { // TestLoadingConfigBodyLogsStrict tests loading testdata/config_logs_body_strict.yaml func TestLoadingConfigBodyLogsStrict(t *testing.T) { - testDataLogPropertiesInclude := &LogMatchProperties{ LogMatchType: strictType, LogBodies: []string{"This is an important event"}, @@ -368,7 +365,6 @@ func TestLoadingConfigBodyLogsStrict(t *testing.T) { // TestLoadingConfigBodyLogsStrict tests loading testdata/config_logs_body_regexp.yaml func TestLoadingConfigBodyLogsRegexp(t *testing.T) { - testDataLogPropertiesInclude := &LogMatchProperties{ LogMatchType: regexpType, LogBodies: []string{"^IMPORTANT:"}, @@ -832,7 +828,6 @@ func TestLogSeverity_severityValidate(t *testing.T) { } func TestLoadingConfigOTTL(t *testing.T) { - cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config_ottl.yaml")) require.NoError(t, err) diff --git a/processor/filterprocessor/logs_test.go b/processor/filterprocessor/logs_test.go index ebe1d037c32b..0c4783e21bfd 100644 --- a/processor/filterprocessor/logs_test.go +++ b/processor/filterprocessor/logs_test.go @@ -794,7 +794,6 @@ func TestFilterLogProcessorTelemetry(t *testing.T) { } tel.assertMetrics(t, want) - } func constructLogs() plog.Logs { @@ -825,7 +824,6 @@ func fillLogOne(log plog.LogRecord) { log.Attributes().PutStr("http.path", "/health") log.Attributes().PutStr("http.url", "http://localhost/health") log.Attributes().PutStr("flags", "A|B|C") - } func fillLogTwo(log plog.LogRecord) { @@ -836,5 +834,4 @@ func fillLogTwo(log plog.LogRecord) { log.Attributes().PutStr("http.path", "/health") log.Attributes().PutStr("http.url", "http://localhost/health") log.Attributes().PutStr("flags", "C|D") - } diff --git a/processor/filterprocessor/metrics_test.go b/processor/filterprocessor/metrics_test.go index 6056d98deca8..6ba0e029f314 100644 --- a/processor/filterprocessor/metrics_test.go +++ b/processor/filterprocessor/metrics_test.go @@ -943,7 +943,6 @@ func TestFilterMetricProcessorWithOTTL(t *testing.T) { if tt.filterEverything { assert.Equal(t, processorhelper.ErrSkipProcessingData, err) } else { - exTd := constructMetrics() tt.want(exTd) assert.Equal(t, exTd, got) diff --git a/processor/filterprocessor/traces_test.go b/processor/filterprocessor/traces_test.go index 39624b97a369..b23cf4b3c007 100644 --- a/processor/filterprocessor/traces_test.go +++ b/processor/filterprocessor/traces_test.go @@ -273,7 +273,6 @@ func TestFilterTraceProcessorWithOTTL(t *testing.T) { if tt.filterEverything { assert.Equal(t, processorhelper.ErrSkipProcessingData, err) } else { - exTd := constructTraces() tt.want(exTd) assert.Equal(t, exTd, got) diff --git a/processor/geoipprocessor/factory.go b/processor/geoipprocessor/factory.go index dca0a85cd712..fc2f40e0b0b2 100644 --- a/processor/geoipprocessor/factory.go +++ b/processor/geoipprocessor/factory.go @@ -77,7 +77,6 @@ func createGeoIPProviders( } providers = append(providers, provider) - } return providers, nil diff --git a/processor/groupbyattrsprocessor/attribute_groups.go b/processor/groupbyattrsprocessor/attribute_groups.go index e106d79c112f..fef49f112033 100644 --- a/processor/groupbyattrsprocessor/attribute_groups.go +++ b/processor/groupbyattrsprocessor/attribute_groups.go @@ -64,7 +64,6 @@ func (mg *metricsGroup) findOrCreateResourceMetrics(originResource pcommon.Resou referenceResource.MoveTo(rm.Resource()) mg.resourceHashes = append(mg.resourceHashes, referenceResourceHash) return rm - } type logsGroup struct { diff --git a/processor/groupbyattrsprocessor/factory.go b/processor/groupbyattrsprocessor/factory.go index 319c3b00fd4d..666e15e87ce5 100644 --- a/processor/groupbyattrsprocessor/factory.go +++ b/processor/groupbyattrsprocessor/factory.go @@ -65,7 +65,6 @@ func createTracesProcessor( set processor.Settings, cfg component.Config, nextConsumer consumer.Traces) (processor.Traces, error) { - oCfg := cfg.(*Config) gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys) if err != nil { @@ -87,7 +86,6 @@ func createLogsProcessor( set processor.Settings, cfg component.Config, nextConsumer consumer.Logs) (processor.Logs, error) { - oCfg := cfg.(*Config) gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys) if err != nil { @@ -109,7 +107,6 @@ func createMetricsProcessor( set processor.Settings, cfg component.Config, nextConsumer consumer.Metrics) (processor.Metrics, error) { - oCfg := cfg.(*Config) gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys) if err != nil { diff --git a/processor/groupbyattrsprocessor/processor.go b/processor/groupbyattrsprocessor/processor.go index d91193145264..ac1b14224267 100644 --- a/processor/groupbyattrsprocessor/processor.go +++ b/processor/groupbyattrsprocessor/processor.go @@ -90,7 +90,6 @@ func (gap *groupByAttrsProcessor) processLogs(ctx context.Context, ld plog.Logs) log.CopyTo(lr) } } - } // Copy the grouped data into output @@ -114,7 +113,6 @@ func (gap *groupByAttrsProcessor) processMetrics(ctx context.Context, md pmetric //exhaustive:enforce switch metric.Type() { - case pmetric.MetricTypeGauge: for pointIndex := 0; pointIndex < metric.Gauge().DataPoints().Len(); pointIndex++ { dataPoint := metric.Gauge().DataPoints().At(pointIndex) @@ -174,7 +172,6 @@ func deleteAttributes(attrsForRemoval, targetAttrs pcommon.Map) { // - whether any attribute matched (true) or none (false) // - the extracted AttributeMap of matching keys and their corresponding values func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pcommon.Map) (bool, pcommon.Map) { - groupingAttributes := pcommon.NewMap() foundMatch := false @@ -191,7 +188,6 @@ func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pcommon.Map) // Searches for metric with same name in the specified InstrumentationLibrary and returns it. If nothing is found, create it. func getMetricInInstrumentationLibrary(ilm pmetric.ScopeMetrics, searchedMetric pmetric.Metric) pmetric.Metric { - // Loop through all metrics and try to find the one that matches with the one we search for // (name and type) for i := 0; i < ilm.Metrics().Len(); i++ { @@ -211,7 +207,6 @@ func getMetricInInstrumentationLibrary(ilm pmetric.ScopeMetrics, searchedMetric // Move other special type specific values //exhaustive:enforce switch searchedMetric.Type() { - case pmetric.MetricTypeHistogram: metric.SetEmptyHistogram().SetAggregationTemporality(searchedMetric.Histogram().AggregationTemporality()) @@ -243,7 +238,6 @@ func (gap *groupByAttrsProcessor) getGroupedMetricsFromAttributes( metric pmetric.Metric, attributes pcommon.Map, ) pmetric.Metric { - toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(attributes) if toBeGrouped { gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedMetrics.Add(ctx, 1) @@ -262,5 +256,4 @@ func (gap *groupByAttrsProcessor) getGroupedMetricsFromAttributes( // Return the metric in this resource return getMetricInInstrumentationLibrary(groupedInstrumentationLibrary, metric) - } diff --git a/processor/groupbyattrsprocessor/processor_test.go b/processor/groupbyattrsprocessor/processor_test.go index 9f9fb1e09c87..ea32b35d4291 100644 --- a/processor/groupbyattrsprocessor/processor_test.go +++ b/processor/groupbyattrsprocessor/processor_test.go @@ -845,7 +845,6 @@ func someExponentialHistogramMetrics(attrs pcommon.Map, instrumentationLibraryCo } func TestMetricAdvancedGrouping(t *testing.T) { - // Input: // // Resource {host.name="localhost"} diff --git a/processor/groupbytraceprocessor/factory.go b/processor/groupbytraceprocessor/factory.go index 06bf13a90437..61ee23aa7a70 100644 --- a/processor/groupbytraceprocessor/factory.go +++ b/processor/groupbytraceprocessor/factory.go @@ -30,7 +30,6 @@ var ( // NewFactory returns a new factory for the Filter processor. func NewFactory() processor.Factory { - return processor.NewFactory( metadata.Type, createDefaultConfig, @@ -56,7 +55,6 @@ func createTracesProcessor( params processor.Settings, cfg component.Config, nextConsumer consumer.Traces) (processor.Traces, error) { - oCfg := cfg.(*Config) var st storage diff --git a/processor/k8sattributesprocessor/internal/kube/client.go b/processor/k8sattributesprocessor/internal/kube/client.go index a43049f09ebf..9624fb250b22 100644 --- a/processor/k8sattributesprocessor/internal/kube/client.go +++ b/processor/k8sattributesprocessor/internal/kube/client.go @@ -538,7 +538,6 @@ func (c *WatchClient) extractPodAttributes(pod *api_v1.Pod) map[string]string { // This function removes all data from the Pod except what is required by extraction rules and pod association func removeUnnecessaryPodData(pod *api_v1.Pod, rules ExtractionRules) *api_v1.Pod { - // name, namespace, uid, start time and ip are needed for identifying Pods // there's room to optimize this further, it's kept this way for simplicity transformedPod := api_v1.Pod{ diff --git a/processor/k8sattributesprocessor/internal/kube/client_test.go b/processor/k8sattributesprocessor/internal/kube/client_test.go index 97b0cdc06b16..c904a6291f79 100644 --- a/processor/k8sattributesprocessor/internal/kube/client_test.go +++ b/processor/k8sattributesprocessor/internal/kube/client_test.go @@ -88,7 +88,6 @@ func podAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { assert.Equal(t, "2.2.2.2", got.Address) assert.Equal(t, "podC", got.Name) assert.Equal(t, "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", got.PodUID) - } func namespaceAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { @@ -290,7 +289,6 @@ func TestReplicaSetHandler(t *testing.T) { Obj: replicaset, }) assert.Empty(t, c.ReplicaSets) - } func TestPodHostNetwork(t *testing.T) { @@ -1373,7 +1371,6 @@ func TestFilters(t *testing.T) { assert.Equal(t, tc.fields, inf.fieldSelector.String()) }) } - } func TestPodIgnorePatterns(t *testing.T) { diff --git a/processor/k8sattributesprocessor/internal/kube/informer.go b/processor/k8sattributesprocessor/internal/kube/informer.go index ddb10c24f060..85459afe53bb 100644 --- a/processor/k8sattributesprocessor/internal/kube/informer.go +++ b/processor/k8sattributesprocessor/internal/kube/informer.go @@ -70,7 +70,6 @@ func informerListFuncWithSelectors(client kubernetes.Interface, namespace string opts.FieldSelector = fs.String() return client.CoreV1().Pods(namespace).List(context.Background(), opts) } - } func informerWatchFuncWithSelectors(client kubernetes.Interface, namespace string, ls labels.Selector, fs fields.Selector) cache.WatchFunc { @@ -120,7 +119,6 @@ func namespaceInformerListFunc(client kubernetes.Interface) cache.ListFunc { return func(opts metav1.ListOptions) (runtime.Object, error) { return client.CoreV1().Namespaces().List(context.Background(), opts) } - } func namespaceInformerWatchFunc(client kubernetes.Interface) cache.WatchFunc { diff --git a/processor/k8sattributesprocessor/options_test.go b/processor/k8sattributesprocessor/options_test.go index a87874597c7c..9afe3cdee1dc 100644 --- a/processor/k8sattributesprocessor/options_test.go +++ b/processor/k8sattributesprocessor/options_test.go @@ -490,7 +490,6 @@ func TestWithFilterLabels(t *testing.T) { } func TestWithFilterFields(t *testing.T) { - tests := []struct { name string args []FieldFilterConfig diff --git a/processor/k8sattributesprocessor/processor_test.go b/processor/k8sattributesprocessor/processor_test.go index 4f9e8701b348..f7493cc38cdc 100644 --- a/processor/k8sattributesprocessor/processor_test.go +++ b/processor/k8sattributesprocessor/processor_test.go @@ -321,7 +321,6 @@ func (strAddr) Network() string { } func TestIPDetectionFromContext(t *testing.T) { - addresses := []net.Addr{ &net.IPAddr{ IP: net.IPv4(1, 1, 1, 1), @@ -357,7 +356,6 @@ func TestIPDetectionFromContext(t *testing.T) { assertResourceHasStringAttribute(t, r, "k8s.pod.ip", "1.1.1.1") }) } - } func TestNilBatch(t *testing.T) { @@ -1352,7 +1350,6 @@ func TestMetricsProcessorHostname(t *testing.T) { } }) } - } func TestMetricsProcessorHostnameWithPodAssociation(t *testing.T) { @@ -1435,7 +1432,6 @@ func TestMetricsProcessorHostnameWithPodAssociation(t *testing.T) { } }) } - } func TestPassthroughStart(t *testing.T) { diff --git a/processor/logstransformprocessor/processor.go b/processor/logstransformprocessor/processor.go index 09f3a16430c9..72d36fec1307 100644 --- a/processor/logstransformprocessor/processor.go +++ b/processor/logstransformprocessor/processor.go @@ -79,7 +79,6 @@ func (ltp *logsTransformProcessor) Shutdown(ctx context.Context) error { } func (ltp *logsTransformProcessor) Start(ctx context.Context, _ component.Host) error { - wkrCount := int(math.Max(1, float64(runtime.NumCPU()))) ltp.fromConverter = adapter.NewFromPdataConverter(ltp.set, wkrCount) diff --git a/processor/logstransformprocessor/processor_test.go b/processor/logstransformprocessor/processor_test.go index 0ccc71cec10b..fed71c028099 100644 --- a/processor/logstransformprocessor/processor_test.go +++ b/processor/logstransformprocessor/processor_test.go @@ -205,7 +205,6 @@ type laggyOperator struct { } func (t *laggyOperator) Process(ctx context.Context, e *entry.Entry) error { - // Wait for a large amount of time every 100 logs if t.logsCount%100 == 0 { time.Sleep(100 * time.Millisecond) diff --git a/processor/metricsgenerationprocessor/factory_test.go b/processor/metricsgenerationprocessor/factory_test.go index 1d8fe694115c..121cd5c9563c 100644 --- a/processor/metricsgenerationprocessor/factory_test.go +++ b/processor/metricsgenerationprocessor/factory_test.go @@ -40,7 +40,6 @@ func TestCreateProcessors(t *testing.T) { for k := range cm.ToStringMap() { // Check if all processor variations that are defined in test config can be actually created t.Run(k, func(t *testing.T) { - factory := NewFactory() cfg := factory.CreateDefaultConfig() diff --git a/processor/metricsgenerationprocessor/processor_test.go b/processor/metricsgenerationprocessor/processor_test.go index e6ebcf3e0e5a..68cffadca240 100644 --- a/processor/metricsgenerationprocessor/processor_test.go +++ b/processor/metricsgenerationprocessor/processor_test.go @@ -325,10 +325,8 @@ func TestMetricsGenerationProcessor(t *testing.T) { case pmetric.NumberDataPointValueTypeInt: require.Equal(t, eDataPoints.At(j).IntValue(), aDataPoints.At(j).IntValue()) } - } } - } require.NoError(t, mgp.Shutdown(ctx)) diff --git a/processor/metricstransformprocessor/factory.go b/processor/metricstransformprocessor/factory.go index 142164880b19..6a87e8feae10 100644 --- a/processor/metricstransformprocessor/factory.go +++ b/processor/metricstransformprocessor/factory.go @@ -127,7 +127,6 @@ func validateConfiguration(config *Config) error { func buildHelperConfig(config *Config, version string) ([]internalTransform, error) { helperDataTransforms := make([]internalTransform, len(config.Transforms)) for i, t := range config.Transforms { - if t.MetricIncludeFilter.MatchType == "" { t.MetricIncludeFilter.MatchType = strictMatchType } diff --git a/processor/metricstransformprocessor/metrics_transform_processor_otlp.go b/processor/metricstransformprocessor/metrics_transform_processor_otlp.go index 1d1abac32830..398cd67cd96e 100644 --- a/processor/metricstransformprocessor/metrics_transform_processor_otlp.go +++ b/processor/metricstransformprocessor/metrics_transform_processor_otlp.go @@ -368,7 +368,6 @@ func canBeCombined(metrics []pmetric.Metric) error { "metrics cannot be combined as they have different aggregation temporalities: %v (%v) and %v (%v)", firstMetric.Name(), firstMetric.Histogram().AggregationTemporality(), metric.Name(), metric.Histogram().AggregationTemporality()) - } case pmetric.MetricTypeExponentialHistogram: if firstMetric.ExponentialHistogram().AggregationTemporality() != metric.ExponentialHistogram().AggregationTemporality() { @@ -376,7 +375,6 @@ func canBeCombined(metrics []pmetric.Metric) error { "metrics cannot be combined as they have different aggregation temporalities: %v (%v) and %v (%v)", firstMetric.Name(), firstMetric.ExponentialHistogram().AggregationTemporality(), metric.Name(), metric.ExponentialHistogram().AggregationTemporality()) - } } } diff --git a/processor/probabilisticsamplerprocessor/logsprocessor.go b/processor/probabilisticsamplerprocessor/logsprocessor.go index 9f1122bc6784..fd4fa6b3ff53 100644 --- a/processor/probabilisticsamplerprocessor/logsprocessor.go +++ b/processor/probabilisticsamplerprocessor/logsprocessor.go @@ -268,7 +268,6 @@ func (lsp *logsProcessor) logRecordToPriorityThreshold(logRec plog.LogRecord) sa // The record has supplied a valid alternative sampling probability return th } - } } return sampling.NeverSampleThreshold diff --git a/processor/probabilisticsamplerprocessor/logsprocessor_test.go b/processor/probabilisticsamplerprocessor/logsprocessor_test.go index f018fb49ea94..9da4b6412f95 100644 --- a/processor/probabilisticsamplerprocessor/logsprocessor_test.go +++ b/processor/probabilisticsamplerprocessor/logsprocessor_test.go @@ -385,7 +385,6 @@ func TestLogsSamplingState(t *testing.T) { } for _, tt := range tests { t.Run(fmt.Sprint(tt.name), func(t *testing.T) { - sink := new(consumertest.LogsSink) cfg := &Config{} if tt.cfg != nil { @@ -473,7 +472,6 @@ func TestLogsMissingRandomness(t *testing.T) { {100, traceIDAttributeSource, false, true}, } { t.Run(fmt.Sprint(tt.pct, "_", tt.source, "_", tt.failClosed, "_", mode), func(t *testing.T) { - ctx := context.Background() logs := plog.NewLogs() record := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() diff --git a/processor/probabilisticsamplerprocessor/tracesprocessor_test.go b/processor/probabilisticsamplerprocessor/tracesprocessor_test.go index 25883fa0fc01..74cdd1f5a1a3 100644 --- a/processor/probabilisticsamplerprocessor/tracesprocessor_test.go +++ b/processor/probabilisticsamplerprocessor/tracesprocessor_test.go @@ -222,7 +222,6 @@ func Test_tracesamplerprocessor_SamplingPercentageRange_MultipleResourceSpans(t assert.Equal(t, tt.resourceSpanPerTrace*tt.numTracesPerBatch, sink.SpanCount()) sink.Reset() } - }) } } @@ -246,7 +245,6 @@ func Test_tracessamplerprocessor_MissingRandomness(t *testing.T) { {100, false, true}, } { t.Run(fmt.Sprint(tt.pct, "_", tt.failClosed), func(t *testing.T) { - ctx := context.Background() traces := ptrace.NewTraces() span := traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() @@ -388,7 +386,6 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { for _, mode := range AllModes { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sink := new(consumertest.TracesSink) cfg := &Config{} @@ -846,7 +843,6 @@ func Test_tracesamplerprocessor_TraceState(t *testing.T) { for _, tt := range tests { for _, mode := range []SamplerMode{Equalizing, Proportional} { t.Run(fmt.Sprint(mode, "_", tt.name), func(t *testing.T) { - sink := new(consumertest.TracesSink) cfg := &Config{} if tt.cfg != nil { @@ -1013,7 +1009,6 @@ func Test_tracesamplerprocessor_TraceStateErrors(t *testing.T) { expectMessage := "" if tt.sf != nil { expectMessage = tt.sf(mode) - } tsp, err := newTracesProcessor(context.Background(), set, cfg, sink) diff --git a/processor/remotetapprocessor/processor_test.go b/processor/remotetapprocessor/processor_test.go index 9908cd379fa6..50c59ad34a13 100644 --- a/processor/remotetapprocessor/processor_test.go +++ b/processor/remotetapprocessor/processor_test.go @@ -60,7 +60,6 @@ func TestConsumeMetrics(t *testing.T) { processor.cs.closeAndRemove(idx) wg.Wait() assert.Equal(t, c.limit, receiveNum) - }) } } diff --git a/processor/resourcedetectionprocessor/internal/heroku/heroku_test.go b/processor/resourcedetectionprocessor/internal/heroku/heroku_test.go index cabb11994391..02acdaa79467 100644 --- a/processor/resourcedetectionprocessor/internal/heroku/heroku_test.go +++ b/processor/resourcedetectionprocessor/internal/heroku/heroku_test.go @@ -81,7 +81,6 @@ func TestDetectTruePartialMissingDynoId(t *testing.T) { } func TestDetectFalse(t *testing.T) { - detector, err := NewDetector(processortest.NewNopSettings(), CreateDefaultConfig()) require.NoError(t, err) res, schemaURL, err := detector.Detect(context.Background()) diff --git a/processor/resourcedetectionprocessor/internal/system/system_test.go b/processor/resourcedetectionprocessor/internal/system/system_test.go index 1626dbf2b4b8..aa5123a041c8 100644 --- a/processor/resourcedetectionprocessor/internal/system/system_test.go +++ b/processor/resourcedetectionprocessor/internal/system/system_test.go @@ -178,7 +178,6 @@ func TestDetectFQDNAvailable(t *testing.T) { } assert.Equal(t, expected, res.Attributes().AsRaw()) - } func TestFallbackHostname(t *testing.T) { @@ -368,7 +367,6 @@ func TestDetectCPUInfo(t *testing.T) { } assert.Equal(t, expected, res.Attributes().AsRaw()) - } func newTestDetector(mock *mockMetadata, hostnameSources []string, resCfg metadata.ResourceAttributesConfig) *Detector { diff --git a/processor/routingprocessor/extract.go b/processor/routingprocessor/extract.go index b96a07d48a42..a0007b4ec934 100644 --- a/processor/routingprocessor/extract.go +++ b/processor/routingprocessor/extract.go @@ -53,7 +53,6 @@ func (e extractor) extractFromContext(ctx context.Context) string { } func (e extractor) extractFromGRPCContext(ctx context.Context) ([]string, bool) { - md, ok := metadata.FromIncomingContext(ctx) if !ok { diff --git a/processor/routingprocessor/metrics_test.go b/processor/routingprocessor/metrics_test.go index 430a8efbecc5..b1bf73f89c7f 100644 --- a/processor/routingprocessor/metrics_test.go +++ b/processor/routingprocessor/metrics_test.go @@ -187,7 +187,6 @@ func TestMetrics_RoutingWorks_Context(t *testing.T) { "metric should not be routed to non default exporter", ) }) - } func TestMetrics_RoutingWorks_ResourceAttribute(t *testing.T) { diff --git a/processor/routingprocessor/traces_test.go b/processor/routingprocessor/traces_test.go index 30a8093b4dcb..538af2d79373 100644 --- a/processor/routingprocessor/traces_test.go +++ b/processor/routingprocessor/traces_test.go @@ -497,7 +497,6 @@ func TestTracesAttributeWithOTTLDoesNotCauseCrash(t *testing.T) { // verify assert.Len(t, defaultExp.AllTraces(), 1) assert.Empty(t, firstExp.AllTraces()) - } func TestTraceProcessorCapabilities(t *testing.T) { diff --git a/processor/schemaprocessor/internal/migrate/attributes_test.go b/processor/schemaprocessor/internal/migrate/attributes_test.go index f3cc5d79dc6e..3d1e708f3b92 100644 --- a/processor/schemaprocessor/internal/migrate/attributes_test.go +++ b/processor/schemaprocessor/internal/migrate/attributes_test.go @@ -268,7 +268,6 @@ func TestNewAttributeChangeSetSliceApplyRollback(t *testing.T) { ), attr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("application.service.version", "v0.0.1") - }), expect: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service_version", "v0.0.1") diff --git a/processor/spanprocessor/factory.go b/processor/spanprocessor/factory.go index 4bec805ffeae..8e4906d20726 100644 --- a/processor/spanprocessor/factory.go +++ b/processor/spanprocessor/factory.go @@ -53,7 +53,6 @@ func createTracesProcessor( cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { - // 'from_attributes' or 'to_attributes' under 'name' has to be set for the span // processor to be valid. If not set and not enforced, the processor would do no work. oCfg := cfg.(*Config) diff --git a/processor/spanprocessor/span_test.go b/processor/spanprocessor/span_test.go index 460e1c430d13..fff402f055cd 100644 --- a/processor/spanprocessor/span_test.go +++ b/processor/spanprocessor/span_test.go @@ -294,7 +294,6 @@ func TestSpanProcessor_MissingKeys(t *testing.T) { // TestSpanProcessor_Separator ensures naming a span with a single key and separator will only contain the value from // the single key. func TestSpanProcessor_Separator(t *testing.T) { - factory := NewFactory() cfg := factory.CreateDefaultConfig() oCfg := cfg.(*Config) @@ -323,7 +322,6 @@ func TestSpanProcessor_Separator(t *testing.T) { // TestSpanProcessor_NoSeparatorMultipleKeys tests naming a span using multiple keys and no separator. func TestSpanProcessor_NoSeparatorMultipleKeys(t *testing.T) { - factory := NewFactory() cfg := factory.CreateDefaultConfig() oCfg := cfg.(*Config) @@ -353,7 +351,6 @@ func TestSpanProcessor_NoSeparatorMultipleKeys(t *testing.T) { // TestSpanProcessor_SeparatorMultipleKeys tests naming a span with multiple keys and a separator. func TestSpanProcessor_SeparatorMultipleKeys(t *testing.T) { - factory := NewFactory() cfg := factory.CreateDefaultConfig() oCfg := cfg.(*Config) @@ -388,7 +385,6 @@ func TestSpanProcessor_SeparatorMultipleKeys(t *testing.T) { // TestSpanProcessor_NilName tests naming a span when the input span had no name. func TestSpanProcessor_NilName(t *testing.T) { - factory := NewFactory() cfg := factory.CreateDefaultConfig() oCfg := cfg.(*Config) @@ -417,7 +413,6 @@ func TestSpanProcessor_NilName(t *testing.T) { // TestSpanProcessor_ToAttributes func TestSpanProcessor_ToAttributes(t *testing.T) { - testCases := []struct { rules []string breakAfterMatch bool diff --git a/processor/sumologicprocessor/processor_test.go b/processor/sumologicprocessor/processor_test.go index 0e0759a2a21b..0f4d28716f49 100644 --- a/processor/sumologicprocessor/processor_test.go +++ b/processor/sumologicprocessor/processor_test.go @@ -1318,7 +1318,6 @@ func TestLogFieldsConversionLogs(t *testing.T) { attribute4, found := outputLogs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().Get("traceid") assert.True(t, found) assert.Equal(t, "01010101010101010101010101010101", attribute4.Str()) - }, }, } diff --git a/processor/sumologicprocessor/translate_attributes_processor_test.go b/processor/sumologicprocessor/translate_attributes_processor_test.go index c9dd1443e2cc..e7535e9dac00 100644 --- a/processor/sumologicprocessor/translate_attributes_processor_test.go +++ b/processor/sumologicprocessor/translate_attributes_processor_test.go @@ -110,7 +110,6 @@ func assertAttribute(t *testing.T, metadata pcommon.Map, attributeName string, e } else { assert.True(t, exists) assert.Equal(t, expectedValue, value.Str()) - } } diff --git a/processor/tailsamplingprocessor/internal/sampling/and.go b/processor/tailsamplingprocessor/internal/sampling/and.go index b5779fb82c9d..408fedfbd240 100644 --- a/processor/tailsamplingprocessor/internal/sampling/and.go +++ b/processor/tailsamplingprocessor/internal/sampling/and.go @@ -20,7 +20,6 @@ func NewAnd( logger *zap.Logger, subpolicies []PolicyEvaluator, ) PolicyEvaluator { - return &And{ subpolicies: subpolicies, logger: logger, @@ -39,7 +38,6 @@ func (c *And) Evaluate(ctx context.Context, traceID pcommon.TraceID, trace *Trac if decision == NotSampled || decision == InvertNotSampled { return decision, nil } - } return Sampled, nil } diff --git a/processor/tailsamplingprocessor/internal/sampling/and_test.go b/processor/tailsamplingprocessor/internal/sampling/and_test.go index 0094768f7590..29a771971665 100644 --- a/processor/tailsamplingprocessor/internal/sampling/and_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/and_test.go @@ -36,7 +36,6 @@ func TestAndEvaluatorNotSampled(t *testing.T) { decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) assert.Equal(t, NotSampled, decision) - } func TestAndEvaluatorSampled(t *testing.T) { @@ -62,7 +61,6 @@ func TestAndEvaluatorSampled(t *testing.T) { decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) assert.Equal(t, Sampled, decision) - } func TestAndEvaluatorStringInvertSampled(t *testing.T) { @@ -88,7 +86,6 @@ func TestAndEvaluatorStringInvertSampled(t *testing.T) { decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) assert.Equal(t, Sampled, decision) - } func TestAndEvaluatorStringInvertNotSampled(t *testing.T) { @@ -114,5 +111,4 @@ func TestAndEvaluatorStringInvertNotSampled(t *testing.T) { decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) assert.Equal(t, InvertNotSampled, decision) - } diff --git a/processor/tailsamplingprocessor/internal/sampling/boolean_tag_filter_test.go b/processor/tailsamplingprocessor/internal/sampling/boolean_tag_filter_test.go index 2ff998bb3e86..f3ca91c53879 100644 --- a/processor/tailsamplingprocessor/internal/sampling/boolean_tag_filter_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/boolean_tag_filter_test.go @@ -15,7 +15,6 @@ import ( ) func TestBooleanTagFilter(t *testing.T) { - var empty = map[string]any{} filter := NewBooleanAttributeFilter(componenttest.NewNopTelemetrySettings(), "example", true, false) @@ -55,7 +54,6 @@ func TestBooleanTagFilter(t *testing.T) { } func TestBooleanTagFilterInverted(t *testing.T) { - var empty = map[string]any{} filter := NewBooleanAttributeFilter(componenttest.NewNopTelemetrySettings(), "example", true, true) diff --git a/processor/tailsamplingprocessor/internal/sampling/composite.go b/processor/tailsamplingprocessor/internal/sampling/composite.go index b221229d7534..0c98c5a1f195 100644 --- a/processor/tailsamplingprocessor/internal/sampling/composite.go +++ b/processor/tailsamplingprocessor/internal/sampling/composite.go @@ -53,7 +53,6 @@ func NewComposite( subPolicyParams []SubPolicyEvalParams, timeProvider TimeProvider, ) PolicyEvaluator { - var subpolicies []*subpolicy for i := 0; i < len(subPolicyParams); i++ { diff --git a/processor/tailsamplingprocessor/internal/sampling/composite_test.go b/processor/tailsamplingprocessor/internal/sampling/composite_test.go index 66a7d1606c34..67a977f90335 100644 --- a/processor/tailsamplingprocessor/internal/sampling/composite_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/composite_test.go @@ -57,7 +57,6 @@ func newTraceWithKV(traceID pcommon.TraceID, key string, val int64) *TraceData { } func TestCompositeEvaluatorNotSampled(t *testing.T) { - // Create 2 policies which do not match any trace n1 := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", 0, 100, false) n2 := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", 200, 300, false) @@ -75,7 +74,6 @@ func TestCompositeEvaluatorNotSampled(t *testing.T) { } func TestCompositeEvaluatorSampled(t *testing.T) { - // Create 2 subpolicies. First results in 100% NotSampled, the second in 100% Sampled. n1 := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", 0, 100, false) n2 := NewAlwaysSample(componenttest.NewNopTelemetrySettings()) @@ -92,7 +90,6 @@ func TestCompositeEvaluatorSampled(t *testing.T) { } func TestCompositeEvaluator_OverflowAlwaysSampled(t *testing.T) { - timeProvider := &FakeTimeProvider{second: 0} // Create 2 subpolicies. First results in 100% NotSampled, the second in 100% Sampled. @@ -128,7 +125,6 @@ func TestCompositeEvaluator_OverflowAlwaysSampled(t *testing.T) { } func TestCompositeEvaluatorSampled_AlwaysSampled(t *testing.T) { - // Create 2 subpolicies. First results in 100% NotSampled, the second in 100% Sampled. n1 := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", 0, 100, false) n2 := NewAlwaysSample(componenttest.NewNopTelemetrySettings()) @@ -147,7 +143,6 @@ func TestCompositeEvaluatorSampled_AlwaysSampled(t *testing.T) { } func TestCompositeEvaluatorInverseSampled_AlwaysSampled(t *testing.T) { - // The first policy does not match, the second matches through invert n1 := NewStringAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", []string{"foo"}, false, 0, false) n2 := NewStringAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", []string{"foo"}, false, 0, true) @@ -166,7 +161,6 @@ func TestCompositeEvaluatorInverseSampled_AlwaysSampled(t *testing.T) { } func TestCompositeEvaluatorThrottling(t *testing.T) { - // Create only one subpolicy, with 100% Sampled policy. n1 := NewAlwaysSample(componenttest.NewNopTelemetrySettings()) timeProvider := &FakeTimeProvider{second: 0} @@ -207,7 +201,6 @@ func TestCompositeEvaluatorThrottling(t *testing.T) { } func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) { - n1 := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", 0, 100, false) n2 := NewAlwaysSample(componenttest.NewNopTelemetrySettings()) timeProvider := &FakeTimeProvider{second: 0} diff --git a/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go b/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go index a7d058c2156e..7b8db1265c2e 100644 --- a/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go @@ -16,7 +16,6 @@ import ( ) func TestNumericTagFilter(t *testing.T) { - var empty = map[string]any{} filter := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "example", math.MinInt32, math.MaxInt32, false) @@ -86,7 +85,6 @@ func TestNumericTagFilter(t *testing.T) { } func TestNumericTagFilterInverted(t *testing.T) { - var empty = map[string]any{} filter := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "example", math.MinInt32, math.MaxInt32, true) diff --git a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go index 65bed0193a09..e9ee3da86773 100644 --- a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go @@ -23,7 +23,6 @@ type TestStringAttributeCfg struct { } func TestStringTagFilter(t *testing.T) { - cases := []struct { Desc string Trace *TraceData diff --git a/processor/tailsamplingprocessor/internal/sampling/trace_state_filter_test.go b/processor/tailsamplingprocessor/internal/sampling/trace_state_filter_test.go index be826307c5a0..c4481a9c5e76 100644 --- a/processor/tailsamplingprocessor/internal/sampling/trace_state_filter_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/trace_state_filter_test.go @@ -20,7 +20,6 @@ type TestTraceStateCfg struct { } func TestTraceStateFilter(t *testing.T) { - cases := []struct { Desc string Trace *TraceData diff --git a/processor/tailsamplingprocessor/processor_test.go b/processor/tailsamplingprocessor/processor_test.go index 1f1096e88975..05d3825b0f87 100644 --- a/processor/tailsamplingprocessor/processor_test.go +++ b/processor/tailsamplingprocessor/processor_test.go @@ -472,7 +472,6 @@ func TestSubSecondDecisionTime(t *testing.T) { require.Eventually(t, func() bool { return len(msp.AllTraces()) == 1 }, time.Second, 10*time.Millisecond) - } func TestPolicyLoggerAddsPolicyName(t *testing.T) { diff --git a/processor/transformprocessor/internal/common/processor.go b/processor/transformprocessor/internal/common/processor.go index dee7d24e7ba9..07164658de1b 100644 --- a/processor/transformprocessor/internal/common/processor.go +++ b/processor/transformprocessor/internal/common/processor.go @@ -216,7 +216,6 @@ func parseGlobalExpr[K any]( conditions []string, pc parserCollection, standardFuncs map[string]ottl.Factory[K]) (expr.BoolExpr[K], error) { - if len(conditions) > 0 { return boolExprFunc(conditions, standardFuncs, pc.errorMode, pc.settings) } diff --git a/processor/transformprocessor/internal/logs/processor_test.go b/processor/transformprocessor/internal/logs/processor_test.go index e6c3e117647e..d3e06f65ac01 100644 --- a/processor/transformprocessor/internal/logs/processor_test.go +++ b/processor/transformprocessor/internal/logs/processor_test.go @@ -536,7 +536,6 @@ func fillLogOne(log plog.LogRecord) { log.Attributes().PutStr("http.url", "http://localhost/health") log.Attributes().PutStr("flags", "A|B|C") log.Attributes().PutStr("total.string", "123456789") - } func fillLogTwo(log plog.LogRecord) { @@ -548,5 +547,4 @@ func fillLogTwo(log plog.LogRecord) { log.Attributes().PutStr("http.url", "http://localhost/health") log.Attributes().PutStr("flags", "C|D") log.Attributes().PutStr("total.string", "345678") - } diff --git a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go index 4e8958706841..b918377af67c 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go +++ b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go @@ -46,7 +46,6 @@ func createconvertExponentialHistToExplicitHistFunction(_ ottl.FunctionContext, if _, ok := distributionFnMap[args.DistributionFn]; !ok { return nil, fmt.Errorf("invalid conversion function: %s, must be one of [upper, midpoint, random, uniform]", args.DistributionFn) - } return convertExponentialHistToExplicitHist(args.DistributionFn, args.ExplicitBounds) @@ -54,7 +53,6 @@ func createconvertExponentialHistToExplicitHistFunction(_ ottl.FunctionContext, // convertExponentialHistToExplicitHist converts an exponential histogram to a bucketed histogram func convertExponentialHistToExplicitHist(distributionFn string, explicitBounds []float64) (ottl.ExprFunc[ottlmetric.TransformContext], error) { - if len(explicitBounds) == 0 { return nil, fmt.Errorf("explicit bounds cannot be empty: %v", explicitBounds) } diff --git a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go index f61969b17c18..b2aae4691260 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go +++ b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go @@ -91,7 +91,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{1.0, 2.0, 3.0, 4.0, 5.0}, distribution: "upper", want: func(metric pmetric.Metric) { - metric.SetName("response_time") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -109,7 +108,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1.0, 2.0, 3.0, 4.0, 5.0) - }, }, { @@ -120,7 +118,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{1000.0, 2000.0, 3000.0, 4000.0, 5000.0}, distribution: "upper", want: func(metric pmetric.Metric) { - metric.SetName("response_time") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -138,7 +135,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1000.0, 2000.0, 3000.0, 4000.0, 5000.0) - }, }, { @@ -148,7 +144,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{160.0, 170.0, 180.0, 190.0, 200.0}, distribution: "upper", want: func(metric pmetric.Metric) { - metric.SetName("response_time") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -166,7 +161,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(160.0, 170.0, 180.0, 190.0, 200.0) - }, }, { @@ -175,7 +169,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{160.0, 170.0, 180.0, 190.0, 200.0}, distribution: "upper", want: func(metric pmetric.Metric) { - metric.SetName("response_time") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -193,7 +186,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(160.0, 170.0, 180.0, 190.0, 200.0) - }, }, { @@ -328,7 +320,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1.0, 2.0, 3.0, 4.0, 5.0) - }, }, { @@ -339,7 +330,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{1000.0, 2000.0, 3000.0, 4000.0, 5000.0}, distribution: "midpoint", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -357,7 +347,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1000.0, 2000.0, 3000.0, 4000.0, 5000.0) - }, }, { @@ -367,7 +356,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0}, distribution: "midpoint", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -385,7 +373,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0) - }, }, { @@ -399,7 +386,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0}, distribution: "midpoint", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -417,7 +403,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0) - }, }, { @@ -519,7 +504,6 @@ func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1.0, 2.0, 3.0, 4.0, 5.0) - }, }, { @@ -530,7 +514,6 @@ func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{1000.0, 2000.0, 3000.0, 4000.0, 5000.0}, distribution: "uniform", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -548,7 +531,6 @@ func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1000.0, 2000.0, 3000.0, 4000.0, 5000.0) - }, }, { @@ -558,7 +540,6 @@ func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0}, distribution: "uniform", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -576,7 +557,6 @@ func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0) - }, }, } @@ -654,7 +634,6 @@ func TestRandom_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1.0, 2.0, 3.0, 4.0, 5.0) - }, }, { @@ -665,7 +644,6 @@ func TestRandom_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{1000.0, 2000.0, 3000.0, 4000.0, 5000.0}, distribution: "random", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -683,7 +661,6 @@ func TestRandom_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1000.0, 2000.0, 3000.0, 4000.0, 5000.0) - }, }, { @@ -693,7 +670,6 @@ func TestRandom_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0}, distribution: "random", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) diff --git a/processor/transformprocessor/internal/metrics/functions_test.go b/processor/transformprocessor/internal/metrics/functions_test.go index 62def6453fe2..cf3ed6adccc0 100644 --- a/processor/transformprocessor/internal/metrics/functions_test.go +++ b/processor/transformprocessor/internal/metrics/functions_test.go @@ -46,7 +46,6 @@ func Test_DataPointFunctions(t *testing.T) { }, ) } - } func Test_MetricFunctions(t *testing.T) { diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 54af1eae90da..61bf663b6b46 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -1732,6 +1732,5 @@ func (s *splunkScraper) scrapeSearchArtifacts(ctx context.Context, now pcommon.T } s.mb.RecordSplunkServerSearchartifactsJobCacheCountDataPoint(now, cacheTotalEntries, s.conf.SHEndpoint.Endpoint) } - } }