diff --git a/.chloggen/routing-by-traces.yaml b/.chloggen/routing-by-traces.yaml new file mode 100644 index 000000000000..3414a5e8064a --- /dev/null +++ b/.chloggen/routing-by-traces.yaml @@ -0,0 +1,27 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: 'enhancement' + +# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver) +component: routingconnector + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Add ability to route by span context + +# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists. +issues: [36276] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# If your change doesn't affect end users or the exported elements of any package, +# you should instead start your pull request title with [chore] or use the "Skip Changelog" label. +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [] diff --git a/connector/routingconnector/README.md b/connector/routingconnector/README.md index 02ad40317832..d7a3a7731419 100644 --- a/connector/routingconnector/README.md +++ b/connector/routingconnector/README.md @@ -33,7 +33,7 @@ If you are not already familiar with connectors, you may find it helpful to firs The following settings are available: - `table (required)`: the routing table for this connector. -- `table.context (optional, default: resource)`: the [OTTL Context] in which the statement will be evaluated. Currently, only `resource`, `metric`, `log`, and `request` are supported. +- `table.context (optional, default: resource)`: the [OTTL Context] in which the statement will be evaluated. Currently, only `resource`, `span`, `metric`, `log`, and `request` are supported. - `table.statement`: the routing condition provided as the [OTTL] statement. Required if `table.condition` is not provided. May not be used for `request` context. - `table.condition`: the routing condition provided as the [OTTL] condition. Required if `table.statement` is not provided. Required for `request` context. - `table.pipelines (required)`: the list of pipelines to use when the routing condition is met. @@ -43,7 +43,7 @@ The following settings are available: ### Limitations -- The `match_once` setting is only supported when using the `resource` context. If any routes use `metric`, `log` or `request` context, `match_once` must be set to `true`. +- The `match_once` setting is only supported when using the `resource` context. If any routes use `span`, `metric`, `log` or `request` context, `match_once` must be set to `true`. - The `request` context requires use of the `condition` setting, and relies on a very limited grammar. Conditions must be in the form of `request["key"] == "value"` or `request["key"] != "value"`. (In the future, this grammar may be expanded to support more complex conditions.) ### Supported [OTTL] functions @@ -287,7 +287,6 @@ service: ## Differences between the Routing Connector and Routing Processor -- Routing on context values is only supported for logs at this time. - The connector routes to pipelines, not exporters as the processor does. [Connectors README]:https://github.com/open-telemetry/opentelemetry-collector/blob/main/connector/README.md diff --git a/connector/routingconnector/config.go b/connector/routingconnector/config.go index fb2f838474c7..33a0c702bca9 100644 --- a/connector/routingconnector/config.go +++ b/connector/routingconnector/config.go @@ -77,7 +77,7 @@ func (c *Config) Validate() error { return err } fallthrough - case "metric", "log": // ok + case "span", "metric", "log": // ok if !c.MatchOnce { return fmt.Errorf(`%q context is not supported with "match_once: false"`, item.Context) } diff --git a/connector/routingconnector/config_test.go b/connector/routingconnector/config_test.go index b79eb4ee1bf3..4a0ef0d0d5a4 100644 --- a/connector/routingconnector/config_test.go +++ b/connector/routingconnector/config_test.go @@ -218,6 +218,22 @@ func TestValidateConfig(t *testing.T) { }, error: "invalid context: invalid", }, + { + name: "span context with match_once false", + config: &Config{ + MatchOnce: false, + Table: []RoutingTableItem{ + { + Context: "span", + Statement: `route() where attributes["attr"] == "acme"`, + Pipelines: []pipeline.ID{ + pipeline.NewIDWithName(pipeline.SignalTraces, "otlp"), + }, + }, + }, + }, + error: `"span" context is not supported with "match_once: false"`, + }, { name: "metric context with match_once false", config: &Config{ diff --git a/connector/routingconnector/internal/ptraceutil/traces.go b/connector/routingconnector/internal/ptraceutil/traces.go index 4f925fb98fcb..e47bb7529dce 100644 --- a/connector/routingconnector/internal/ptraceutil/traces.go +++ b/connector/routingconnector/internal/ptraceutil/traces.go @@ -8,11 +8,54 @@ import "go.opentelemetry.io/collector/pdata/ptrace" // MoveResourcesIf calls f sequentially for each ResourceSpans present in the first ptrace.Traces. // If f returns true, the element is removed from the first ptrace.Traces and added to the second ptrace.Traces. func MoveResourcesIf(from, to ptrace.Traces, f func(ptrace.ResourceSpans) bool) { - from.ResourceSpans().RemoveIf(func(rs ptrace.ResourceSpans) bool { - if !f(rs) { + from.ResourceSpans().RemoveIf(func(resoruceSpans ptrace.ResourceSpans) bool { + if !f(resoruceSpans) { return false } - rs.CopyTo(to.ResourceSpans().AppendEmpty()) + resoruceSpans.CopyTo(to.ResourceSpans().AppendEmpty()) return true }) } + +// MoveSpansWithContextIf calls f sequentially for each Span present in the first ptrace.Traces. +// If f returns true, the element is removed from the first ptrace.Traces and added to the second ptrace.Traces. +// Notably, the Resource and Scope associated with the Span are created in the second ptrace.Traces only once. +// Resources or Scopes are removed from the original if they become empty. All ordering is preserved. +func MoveSpansWithContextIf(from, to ptrace.Traces, f func(ptrace.ResourceSpans, ptrace.ScopeSpans, ptrace.Span) bool) { + resourceSpansSlice := from.ResourceSpans() + for i := 0; i < resourceSpansSlice.Len(); i++ { + resourceSpans := resourceSpansSlice.At(i) + scopeSpanSlice := resourceSpans.ScopeSpans() + var resourceSpansCopy *ptrace.ResourceSpans + for j := 0; j < scopeSpanSlice.Len(); j++ { + scopeSpans := scopeSpanSlice.At(j) + spanSlice := scopeSpans.Spans() + var scopeSpansCopy *ptrace.ScopeSpans + spanSlice.RemoveIf(func(span ptrace.Span) bool { + if !f(resourceSpans, scopeSpans, span) { + return false + } + if resourceSpansCopy == nil { + rmc := to.ResourceSpans().AppendEmpty() + resourceSpansCopy = &rmc + resourceSpans.Resource().CopyTo(resourceSpansCopy.Resource()) + resourceSpansCopy.SetSchemaUrl(resourceSpans.SchemaUrl()) + } + if scopeSpansCopy == nil { + smc := resourceSpansCopy.ScopeSpans().AppendEmpty() + scopeSpansCopy = &smc + scopeSpans.Scope().CopyTo(scopeSpansCopy.Scope()) + scopeSpansCopy.SetSchemaUrl(scopeSpans.SchemaUrl()) + } + span.CopyTo(scopeSpansCopy.Spans().AppendEmpty()) + return true + }) + } + scopeSpanSlice.RemoveIf(func(sm ptrace.ScopeSpans) bool { + return sm.Spans().Len() == 0 + }) + } + resourceSpansSlice.RemoveIf(func(resourceSpans ptrace.ResourceSpans) bool { + return resourceSpans.ScopeSpans().Len() == 0 + }) +} diff --git a/connector/routingconnector/internal/ptraceutil/traces_test.go b/connector/routingconnector/internal/ptraceutil/traces_test.go index 7a47633e2e8e..40d05c5bec8e 100644 --- a/connector/routingconnector/internal/ptraceutil/traces_test.go +++ b/connector/routingconnector/internal/ptraceutil/traces_test.go @@ -80,3 +80,147 @@ func TestMoveResourcesIf(t *testing.T) { }) } } + +func TestMoveSpansWithContextIf(t *testing.T) { + testCases := []struct { + name string + moveIf func(ptrace.ResourceSpans, ptrace.ScopeSpans, ptrace.Span) bool + from ptrace.Traces + to ptrace.Traces + expectFrom ptrace.Traces + expectTo ptrace.Traces + }{ + { + name: "move_none", + moveIf: func(_ ptrace.ResourceSpans, _ ptrace.ScopeSpans, _ ptrace.Span) bool { + return false + }, + from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + to: ptrace.NewTraces(), + expectFrom: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectTo: ptrace.NewTraces(), + }, + { + name: "move_all", + moveIf: func(_ ptrace.ResourceSpans, _ ptrace.ScopeSpans, _ ptrace.Span) bool { + return true + }, + from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + to: ptrace.NewTraces(), + expectFrom: ptrace.NewTraces(), + expectTo: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + }, + { + name: "move_all_from_one_resource", + moveIf: func(rl ptrace.ResourceSpans, _ ptrace.ScopeSpans, _ ptrace.Span) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceB" + }, + from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + to: ptrace.NewTraces(), + expectFrom: ptraceutiltest.NewTraces("A", "CD", "EF", "GH"), + expectTo: ptraceutiltest.NewTraces("B", "CD", "EF", "GH"), + }, + { + name: "move_all_from_one_scope", + moveIf: func(rl ptrace.ResourceSpans, sl ptrace.ScopeSpans, _ ptrace.Span) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceB" && sl.Scope().Name() == "scopeC" + }, + from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + to: ptrace.NewTraces(), + expectFrom: ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.WithResource('A', + ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ), + ptraceutiltest.WithResource('B', + ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ), + ), + expectTo: ptraceutiltest.NewTraces("B", "C", "EF", "GH"), + }, + { + name: "move_all_from_one_scope_in_each_resource", + moveIf: func(_ ptrace.ResourceSpans, sl ptrace.ScopeSpans, _ ptrace.Span) bool { + return sl.Scope().Name() == "scopeD" + }, + from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + to: ptrace.NewTraces(), + expectFrom: ptraceutiltest.NewTraces("AB", "C", "EF", "GH"), + expectTo: ptraceutiltest.NewTraces("AB", "D", "EF", "GH"), + }, + { + name: "move_one", + moveIf: func(rl ptrace.ResourceSpans, sl ptrace.ScopeSpans, m ptrace.Span) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceA" && sl.Scope().Name() == "scopeD" && m.Name() == "spanF" + }, + from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + to: ptrace.NewTraces(), + expectFrom: ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.WithResource('A', + ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH")), + ), + ptraceutiltest.WithResource('B', + ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ), + ), + expectTo: ptraceutiltest.NewTraces("A", "D", "F", "GH"), + }, + { + name: "move_one_from_each_scope", + moveIf: func(_ ptrace.ResourceSpans, _ ptrace.ScopeSpans, m ptrace.Span) bool { + return m.Name() == "spanE" + }, + from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + to: ptrace.NewTraces(), + expectFrom: ptraceutiltest.NewTraces("AB", "CD", "F", "GH"), + expectTo: ptraceutiltest.NewTraces("AB", "CD", "E", "GH"), + }, + { + name: "move_one_from_each_scope_in_one_resource", + moveIf: func(rl ptrace.ResourceSpans, _ ptrace.ScopeSpans, m ptrace.Span) bool { + rname, ok := rl.Resource().Attributes().Get("resourceName") + return ok && rname.AsString() == "resourceB" && m.Name() == "spanE" + }, + from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + to: ptrace.NewTraces(), + expectFrom: ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.WithResource('A', + ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ), + ptraceutiltest.WithResource('B', + ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('F', "GH")), + ), + ), + expectTo: ptraceutiltest.NewTraces("B", "CD", "E", "GH"), + }, + { + name: "move_some_to_preexisting", + moveIf: func(_ ptrace.ResourceSpans, sl ptrace.ScopeSpans, _ ptrace.Span) bool { + return sl.Scope().Name() == "scopeD" + }, + from: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + to: ptraceutiltest.NewTraces("1", "2", "3", "4"), + expectFrom: ptraceutiltest.NewTraces("AB", "C", "EF", "GH"), + expectTo: ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.WithResource('1', ptraceutiltest.WithScope('2', ptraceutiltest.WithSpan('3', "4"))), + ptraceutiltest.WithResource('A', ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH"))), + ptraceutiltest.WithResource('B', ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH"))), + ), + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + ptraceutil.MoveSpansWithContextIf(tt.from, tt.to, tt.moveIf) + assert.NoError(t, ptracetest.CompareTraces(tt.expectFrom, tt.from), "from not modified as expected") + assert.NoError(t, ptracetest.CompareTraces(tt.expectTo, tt.to), "to not as expected") + }) + } +} diff --git a/connector/routingconnector/internal/ptraceutiltest/traces.go b/connector/routingconnector/internal/ptraceutiltest/traces.go index 863eeb888b58..4317a113e34b 100644 --- a/connector/routingconnector/internal/ptraceutiltest/traces.go +++ b/connector/routingconnector/internal/ptraceutiltest/traces.go @@ -42,3 +42,57 @@ func NewTraces(resourceIDs, scopeIDs, spanIDs, spanEventIDs string) ptrace.Trace } return td } + +type Resource struct { + id byte + scopes []Scope +} + +type Scope struct { + id byte + spans []Span +} + +type Span struct { + id byte + spanEvents string +} + +func WithResource(id byte, scopes ...Scope) Resource { + r := Resource{id: id} + r.scopes = append(r.scopes, scopes...) + return r +} + +func WithScope(id byte, spans ...Span) Scope { + s := Scope{id: id} + s.spans = append(s.spans, spans...) + return s +} + +func WithSpan(id byte, spanEvents string) Span { + return Span{id: id, spanEvents: spanEvents} +} + +// NewTracesFromOpts creates a ptrace.Traces with the specified resources, scopes, metrics, +// and data points. The general idea is the same as NewMetrics, but this function allows for +// more flexibility in creating non-uniform structures. +func NewTracesFromOpts(resources ...Resource) ptrace.Traces { + td := ptrace.NewTraces() + for _, resource := range resources { + r := td.ResourceSpans().AppendEmpty() + r.Resource().Attributes().PutStr("resourceName", "resource"+string(resource.id)) + for _, scope := range resource.scopes { + ss := r.ScopeSpans().AppendEmpty() + ss.Scope().SetName("scope" + string(scope.id)) + for _, span := range scope.spans { + s := ss.Spans().AppendEmpty() + s.SetName("span" + string(span.id)) + for i := 0; i < len(span.spanEvents); i++ { + s.Events().AppendEmpty().Attributes().PutStr("spanEventName", "spanEvent"+string(span.spanEvents[i])) + } + } + } + } + return td +} diff --git a/connector/routingconnector/internal/ptraceutiltest/traces_test.go b/connector/routingconnector/internal/ptraceutiltest/traces_test.go index 0de27955b630..41d553444d38 100644 --- a/connector/routingconnector/internal/ptraceutiltest/traces_test.go +++ b/connector/routingconnector/internal/ptraceutiltest/traces_test.go @@ -18,6 +18,7 @@ func TestNewTraces(t *testing.T) { t.Run("empty", func(t *testing.T) { expected := ptrace.NewTraces() assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("", "", "", ""))) + assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTracesFromOpts())) }) t.Run("simple", func(t *testing.T) { @@ -33,7 +34,15 @@ func TestNewTraces(t *testing.T) { se.Attributes().PutStr("spanEventName", "spanEventD") // resourceA.scopeB.spanC.spanEventD return td }() + fromOpts := ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.WithResource('A', + ptraceutiltest.WithScope('B', + ptraceutiltest.WithSpan('C', "D"), + ), + ), + ) assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("A", "B", "C", "D"))) + assert.NoError(t, ptracetest.CompareTraces(expected, fromOpts)) }) t.Run("two_resources", func(t *testing.T) { @@ -57,7 +66,20 @@ func TestNewTraces(t *testing.T) { se.Attributes().PutStr("spanEventName", "spanEventE") // resourceB.scopeC.spanD.spanEventE return td }() + fromOpts := ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.WithResource('A', + ptraceutiltest.WithScope('C', + ptraceutiltest.WithSpan('D', "E"), + ), + ), + ptraceutiltest.WithResource('B', + ptraceutiltest.WithScope('C', + ptraceutiltest.WithSpan('D', "E"), + ), + ), + ) assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("AB", "C", "D", "E"))) + assert.NoError(t, ptracetest.CompareTraces(expected, fromOpts)) }) t.Run("two_scopes", func(t *testing.T) { @@ -79,7 +101,18 @@ func TestNewTraces(t *testing.T) { se.Attributes().PutStr("spanEventName", "spanEventE") // resourceA.scopeC.spanD.spanEventE return td }() + fromOpts := ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.WithResource('A', + ptraceutiltest.WithScope('B', + ptraceutiltest.WithSpan('D', "E"), + ), + ptraceutiltest.WithScope('C', + ptraceutiltest.WithSpan('D', "E"), + ), + ), + ) assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("A", "BC", "D", "E"))) + assert.NoError(t, ptracetest.CompareTraces(expected, fromOpts)) }) t.Run("two_spans", func(t *testing.T) { @@ -99,7 +132,16 @@ func TestNewTraces(t *testing.T) { se.Attributes().PutStr("spanEventName", "spanEventE") // resourceA.scopeB.spanD.spanEventE return td }() + fromOpts := ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.WithResource('A', + ptraceutiltest.WithScope('B', + ptraceutiltest.WithSpan('C', "E"), + ptraceutiltest.WithSpan('D', "E"), + ), + ), + ) assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("A", "B", "CD", "E"))) + assert.NoError(t, ptracetest.CompareTraces(expected, fromOpts)) }) t.Run("two_spanevents", func(t *testing.T) { @@ -117,6 +159,14 @@ func TestNewTraces(t *testing.T) { se.Attributes().PutStr("spanEventName", "spanEventE") // resourceA.scopeB.spanC.spanEventE return td }() + fromOpts := ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.WithResource('A', + ptraceutiltest.WithScope('B', + ptraceutiltest.WithSpan('C', "DE"), + ), + ), + ) assert.NoError(t, ptracetest.CompareTraces(expected, ptraceutiltest.NewTraces("A", "B", "C", "DE"))) + assert.NoError(t, ptracetest.CompareTraces(expected, fromOpts)) }) } diff --git a/connector/routingconnector/router.go b/connector/routingconnector/router.go index 01dd13143261..98f05bc92287 100644 --- a/connector/routingconnector/router.go +++ b/connector/routingconnector/router.go @@ -17,6 +17,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottllog" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlmetric" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" ) var errPipelineNotFound = errors.New("pipeline not found") @@ -32,6 +33,7 @@ type consumerProvider[C any] func(...pipeline.ID) (C, error) type router[C any] struct { logger *zap.Logger resourceParser ottl.Parser[ottlresource.TransformContext] + spanParser ottl.Parser[ottlspan.TransformContext] metricParser ottl.Parser[ottlmetric.TransformContext] logParser ottl.Parser[ottllog.TransformContext] @@ -74,16 +76,19 @@ type routingItem[C any] struct { statementContext string requestCondition *requestCondition resourceStatement *ottl.Statement[ottlresource.TransformContext] + spanStatement *ottl.Statement[ottlspan.TransformContext] metricStatement *ottl.Statement[ottlmetric.TransformContext] logStatement *ottl.Statement[ottllog.TransformContext] } func (r *router[C]) buildParsers(table []RoutingTableItem, settings component.TelemetrySettings) error { - var buildResource, buildMetric, buildLog bool + var buildResource, buildSpan, buildMetric, buildLog bool for _, item := range table { switch item.Context { case "", "resource": buildResource = true + case "span": + buildSpan = true case "metric": buildMetric = true case "log": @@ -103,6 +108,17 @@ func (r *router[C]) buildParsers(table []RoutingTableItem, settings component.Te errs = errors.Join(errs, err) } } + if buildSpan { + parser, err := ottlspan.NewParser( + common.Functions[ottlspan.TransformContext](), + settings, + ) + if err == nil { + r.spanParser = parser + } else { + errs = errors.Join(errs, err) + } + } if buildMetric { parser, err := ottlmetric.NewParser( common.Functions[ottlmetric.TransformContext](), @@ -110,8 +126,6 @@ func (r *router[C]) buildParsers(table []RoutingTableItem, settings component.Te ) if err == nil { r.metricParser = parser - } else { - errs = errors.Join(errs, err) } } if buildLog { @@ -190,6 +204,12 @@ func (r *router[C]) registerRouteConsumers() (err error) { return err } route.resourceStatement = statement + case "span": + statement, err := r.spanParser.ParseStatement(item.Statement) + if err != nil { + return err + } + route.spanStatement = statement case "metric": statement, err := r.metricParser.ParseStatement(item.Statement) if err != nil { diff --git a/connector/routingconnector/traces.go b/connector/routingconnector/traces.go index a82ee85a9973..7df2effd74f0 100644 --- a/connector/routingconnector/traces.go +++ b/connector/routingconnector/traces.go @@ -16,6 +16,7 @@ import ( "github.com/open-telemetry/opentelemetry-collector-contrib/connector/routingconnector/internal/ptraceutil" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl" "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlresource" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/contexts/ottlspan" ) type tracesConnector struct { @@ -88,6 +89,15 @@ func (c *tracesConnector) switchTraces(ctx context.Context, td ptrace.Traces) er return isMatch }, ) + case "span": + ptraceutil.MoveSpansWithContextIf(td, matchedSpans, + func(rs ptrace.ResourceSpans, ss ptrace.ScopeSpans, s ptrace.Span) bool { + mtx := ottlspan.NewTransformContext(s, ss.Scope(), rs.Resource(), ss, rs) + _, isMatch, err := route.spanStatement.Execute(ctx, mtx) + errs = errors.Join(errs, err) + return isMatch + }, + ) } if errs != nil { if c.config.ErrorMode == ottl.PropagateError { diff --git a/connector/routingconnector/traces_test.go b/connector/routingconnector/traces_test.go index 291e8fd230af..78f7e46f414a 100644 --- a/connector/routingconnector/traces_test.go +++ b/connector/routingconnector/traces_test.go @@ -429,12 +429,21 @@ func TestTracesConnectorDetailed(t *testing.T) { isAcme := `request["X-Tenant"] == "acme"` - isAnyResource := `attributes["resourceName"] != nil` isResourceA := `attributes["resourceName"] == "resourceA"` isResourceB := `attributes["resourceName"] == "resourceB"` isResourceX := `attributes["resourceName"] == "resourceX"` isResourceY := `attributes["resourceName"] == "resourceY"` + isSpanE := `name == "spanE"` + isSpanF := `name == "spanF"` + isSpanX := `name == "spanX"` + isSpanY := `name == "spanY"` + + isScopeCFromLowerContext := `instrumentation_scope.name == "scopeC"` + isScopeDFromLowerContext := `instrumentation_scope.name == "scopeD"` + + isResourceBFromLowerContext := `resource.attributes["resourceName"] == "resourceB"` + testCases := []struct { name string cfg *Config @@ -537,7 +546,7 @@ func TestTracesConnectorDetailed(t *testing.T) { { name: "resource/all_match_first_only", cfg: testConfig( - withRoute("resource", isAnyResource, idSink0), + withRoute("resource", "true", idSink0), withRoute("resource", isResourceY, idSink1), withDefault(idSinkD), ), @@ -550,7 +559,7 @@ func TestTracesConnectorDetailed(t *testing.T) { name: "resource/all_match_last_only", cfg: testConfig( withRoute("resource", isResourceX, idSink0), - withRoute("resource", isAnyResource, idSink1), + withRoute("resource", "true", idSink1), withDefault(idSinkD), ), input: ptraceutiltest.NewTraces("AB", "CD", "EF", "FG"), @@ -561,7 +570,7 @@ func TestTracesConnectorDetailed(t *testing.T) { { name: "resource/all_match_only_once", cfg: testConfig( - withRoute("resource", isAnyResource, idSink0), + withRoute("resource", "true", idSink0), withRoute("resource", isResourceA+" or "+isResourceB, idSink1), withDefault(idSinkD), ), @@ -628,6 +637,169 @@ func TestTracesConnectorDetailed(t *testing.T) { expectSink1: ptrace.Traces{}, expectSinkD: ptrace.Traces{}, }, + { + name: "span/all_match_first_only", + cfg: testConfig( + withRoute("span", "true", idSink0), + withRoute("span", isSpanY, idSink1), + withDefault(idSinkD), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink1: ptrace.Traces{}, + expectSinkD: ptrace.Traces{}, + }, + { + name: "span/all_match_last_only", + cfg: testConfig( + withRoute("span", isSpanX, idSink0), + withRoute("span", "true", idSink1), + withDefault(idSinkD), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptrace.Traces{}, + expectSink1: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSinkD: ptrace.Traces{}, + }, + { + name: "span/all_match_only_once", + cfg: testConfig( + withRoute("span", "true", idSink0), + withRoute("span", isSpanE+" or "+isSpanF, idSink1), + withDefault(idSinkD), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink1: ptrace.Traces{}, + expectSinkD: ptrace.Traces{}, + }, + { + name: "span/each_matches_one", + cfg: testConfig( + withRoute("span", isSpanE, idSink0), + withRoute("span", isSpanF, idSink1), + withDefault(idSinkD), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("AB", "CD", "E", "GH"), + expectSink1: ptraceutiltest.NewTraces("AB", "CD", "F", "GH"), + expectSinkD: ptrace.Traces{}, + }, + { + name: "span/some_match_with_default", + cfg: testConfig( + withRoute("span", isSpanX, idSink0), + withRoute("span", isSpanF, idSink1), + withDefault(idSinkD), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptrace.Traces{}, + expectSink1: ptraceutiltest.NewTraces("AB", "CD", "F", "GH"), + expectSinkD: ptraceutiltest.NewTraces("AB", "CD", "E", "GH"), + }, + { + name: "span/some_match_without_default", + cfg: testConfig( + withRoute("span", isSpanX, idSink0), + withRoute("span", isSpanF, idSink1), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptrace.Traces{}, + expectSink1: ptraceutiltest.NewTraces("AB", "CD", "F", "GH"), + expectSinkD: ptrace.Traces{}, + }, + { + name: "span/match_none_with_default", + cfg: testConfig( + withRoute("span", isSpanX, idSink0), + withRoute("span", isSpanY, idSink1), + withDefault(idSinkD), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptrace.Traces{}, + expectSink1: ptrace.Traces{}, + expectSinkD: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + }, + { + name: "span/match_none_without_default", + cfg: testConfig( + withRoute("span", isSpanX, idSink0), + withRoute("span", isSpanY, idSink1), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptrace.Traces{}, + expectSink1: ptrace.Traces{}, + expectSinkD: ptrace.Traces{}, + }, + { + name: "span/with_resource_condition", + cfg: testConfig( + withRoute("span", isResourceBFromLowerContext, idSink0), + withRoute("span", isSpanY, idSink1), + withDefault(idSinkD), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("B", "CD", "EF", "GH"), + expectSink1: ptrace.Traces{}, + expectSinkD: ptraceutiltest.NewTraces("A", "CD", "EF", "GH"), + }, + { + name: "span/with_scope_condition", + cfg: testConfig( + withRoute("span", isScopeCFromLowerContext, idSink0), + withRoute("span", isSpanY, idSink1), + withDefault(idSinkD), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("AB", "C", "EF", "GH"), + expectSink1: ptrace.Traces{}, + expectSinkD: ptraceutiltest.NewTraces("AB", "D", "EF", "GH"), + }, + { + name: "span/with_resource_and_scope_conditions", + cfg: testConfig( + withRoute("span", isResourceBFromLowerContext+" and "+isScopeDFromLowerContext, idSink0), + withRoute("span", isSpanY, idSink1), + withDefault(idSinkD), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("B", "D", "EF", "GH"), + expectSink1: ptrace.Traces{}, + expectSinkD: ptraceutiltest.NewTracesFromOpts( + ptraceutiltest.WithResource('A', + ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ptraceutiltest.WithScope('D', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ), + ptraceutiltest.WithResource('B', + ptraceutiltest.WithScope('C', ptraceutiltest.WithSpan('E', "GH"), ptraceutiltest.WithSpan('F', "GH")), + ), + ), + }, + { + name: "mixed/match_resource_then_metrics", + cfg: testConfig( + withRoute("resource", isResourceA, idSink0), + withRoute("span", isSpanE, idSink1), + withDefault(idSinkD), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("A", "CD", "EF", "GH"), + expectSink1: ptraceutiltest.NewTraces("B", "CD", "E", "GH"), + expectSinkD: ptraceutiltest.NewTraces("B", "CD", "F", "GH"), + }, + { + name: "mixed/match_metrics_then_resource", + cfg: testConfig( + withRoute("span", isSpanE, idSink0), + withRoute("resource", isResourceB, idSink1), + withDefault(idSinkD), + ), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("AB", "CD", "E", "GH"), + expectSink1: ptraceutiltest.NewTraces("B", "CD", "F", "GH"), + expectSinkD: ptraceutiltest.NewTraces("A", "CD", "F", "GH"), + }, + { name: "mixed/match_resource_then_grpc_request", cfg: testConfig( @@ -641,6 +813,19 @@ func TestTracesConnectorDetailed(t *testing.T) { expectSink1: ptraceutiltest.NewTraces("B", "CD", "EF", "GH"), expectSinkD: ptrace.Traces{}, }, + { + name: "mixed/match_metrics_then_grpc_request", + cfg: testConfig( + withRoute("span", isSpanF, idSink0), + withRoute("request", isAcme, idSink1), + withDefault(idSinkD), + ), + ctx: withGRPCMetadata(context.Background(), map[string]string{"X-Tenant": "acme"}), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("AB", "CD", "F", "GH"), + expectSink1: ptraceutiltest.NewTraces("AB", "CD", "E", "GH"), + expectSinkD: ptrace.Traces{}, + }, { name: "mixed/match_resource_then_http_request", cfg: testConfig( @@ -654,6 +839,19 @@ func TestTracesConnectorDetailed(t *testing.T) { expectSink1: ptraceutiltest.NewTraces("B", "CD", "EF", "GH"), expectSinkD: ptrace.Traces{}, }, + { + name: "mixed/match_metrics_then_http_request", + cfg: testConfig( + withRoute("span", isSpanF, idSink0), + withRoute("request", isAcme, idSink1), + withDefault(idSinkD), + ), + ctx: withHTTPMetadata(context.Background(), map[string][]string{"X-Tenant": {"acme"}}), + input: ptraceutiltest.NewTraces("AB", "CD", "EF", "GH"), + expectSink0: ptraceutiltest.NewTraces("AB", "CD", "F", "GH"), + expectSink1: ptraceutiltest.NewTraces("AB", "CD", "E", "GH"), + expectSinkD: ptrace.Traces{}, + }, } for _, tt := range testCases { diff --git a/processor/attributesprocessor/attributes_metric.go b/processor/attributesprocessor/attributes_metric.go index b09de40d4340..3ae4afa77319 100644 --- a/processor/attributesprocessor/attributes_metric.go +++ b/processor/attributesprocessor/attributes_metric.go @@ -62,7 +62,6 @@ func (a *metricAttributesProcessor) processMetrics(ctx context.Context, md pmetr // Attributes are provided for each log and trace, but not at the metric level // Need to process attributes for every data point within a metric. func (a *metricAttributesProcessor) processMetricAttributes(ctx context.Context, m pmetric.Metric) { - // This is a lot of repeated code, but since there is no single parent superclass // between metric data types, we can't use polymorphism. //exhaustive:enforce diff --git a/processor/attributesprocessor/factory.go b/processor/attributesprocessor/factory.go index 9a7a73dc75e7..5844db4744d2 100644 --- a/processor/attributesprocessor/factory.go +++ b/processor/attributesprocessor/factory.go @@ -92,7 +92,6 @@ func createMetricsProcessor( cfg component.Config, nextConsumer consumer.Metrics, ) (processor.Metrics, error) { - oCfg := cfg.(*Config) attrProc, err := attraction.NewAttrProc(&oCfg.Settings) if err != nil { diff --git a/processor/deltatocumulativeprocessor/internal/data/expo_test.go b/processor/deltatocumulativeprocessor/internal/data/expo_test.go index f544932a4530..bbc88dc9c5eb 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo_test.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo_test.go @@ -117,7 +117,6 @@ func TestExpoAdd(t *testing.T) { } t.Run(cs.name, run(cs.dp, cs.in)) } - } func rawbs(data []uint64, offset int32) expo.Buckets { diff --git a/processor/deltatocumulativeprocessor/internal/delta/delta_test.go b/processor/deltatocumulativeprocessor/internal/delta/delta_test.go index 4b0be3be724d..961428ed3c4a 100644 --- a/processor/deltatocumulativeprocessor/internal/delta/delta_test.go +++ b/processor/deltatocumulativeprocessor/internal/delta/delta_test.go @@ -222,7 +222,6 @@ func TestErrs(t *testing.T) { require.Equal(t, r1.IntValue(), r2.IntValue()) }) } - } func time(ts int) pcommon.Timestamp { diff --git a/processor/deltatocumulativeprocessor/processor_test.go b/processor/deltatocumulativeprocessor/processor_test.go index 12d4452e621f..506cd4a7a511 100644 --- a/processor/deltatocumulativeprocessor/processor_test.go +++ b/processor/deltatocumulativeprocessor/processor_test.go @@ -72,7 +72,6 @@ func TestProcessor(t *testing.T) { } } }) - } } diff --git a/processor/deltatorateprocessor/processor_test.go b/processor/deltatorateprocessor/processor_test.go index facc997d0c65..3b859d490e0c 100644 --- a/processor/deltatorateprocessor/processor_test.go +++ b/processor/deltatorateprocessor/processor_test.go @@ -172,7 +172,6 @@ func TestCumulativeToDeltaProcessor(t *testing.T) { require.Equal(t, eDataPoints.At(j).DoubleValue(), aDataPoints.At(j).DoubleValue()) } } - } require.NoError(t, mgp.Shutdown(ctx)) diff --git a/processor/filterprocessor/config_test.go b/processor/filterprocessor/config_test.go index eda1d7cec90e..d009cdd739a0 100644 --- a/processor/filterprocessor/config_test.go +++ b/processor/filterprocessor/config_test.go @@ -97,7 +97,6 @@ func TestLoadingConfigStrict(t *testing.T) { // TestLoadingConfigStrictLogs tests loading testdata/config_logs_strict.yaml func TestLoadingConfigStrictLogs(t *testing.T) { - testDataLogPropertiesInclude := &LogMatchProperties{ LogMatchType: strictType, ResourceAttributes: []filterconfig.Attribute{ @@ -180,7 +179,6 @@ func TestLoadingConfigStrictLogs(t *testing.T) { // TestLoadingConfigSeverityLogsStrict tests loading testdata/config_logs_severity_strict.yaml func TestLoadingConfigSeverityLogsStrict(t *testing.T) { - testDataLogPropertiesInclude := &LogMatchProperties{ LogMatchType: strictType, SeverityTexts: []string{"INFO"}, @@ -305,7 +303,6 @@ func TestLoadingConfigSeverityLogsRegexp(t *testing.T) { // TestLoadingConfigBodyLogsStrict tests loading testdata/config_logs_body_strict.yaml func TestLoadingConfigBodyLogsStrict(t *testing.T) { - testDataLogPropertiesInclude := &LogMatchProperties{ LogMatchType: strictType, LogBodies: []string{"This is an important event"}, @@ -368,7 +365,6 @@ func TestLoadingConfigBodyLogsStrict(t *testing.T) { // TestLoadingConfigBodyLogsStrict tests loading testdata/config_logs_body_regexp.yaml func TestLoadingConfigBodyLogsRegexp(t *testing.T) { - testDataLogPropertiesInclude := &LogMatchProperties{ LogMatchType: regexpType, LogBodies: []string{"^IMPORTANT:"}, @@ -832,7 +828,6 @@ func TestLogSeverity_severityValidate(t *testing.T) { } func TestLoadingConfigOTTL(t *testing.T) { - cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config_ottl.yaml")) require.NoError(t, err) diff --git a/processor/filterprocessor/logs_test.go b/processor/filterprocessor/logs_test.go index ebe1d037c32b..0c4783e21bfd 100644 --- a/processor/filterprocessor/logs_test.go +++ b/processor/filterprocessor/logs_test.go @@ -794,7 +794,6 @@ func TestFilterLogProcessorTelemetry(t *testing.T) { } tel.assertMetrics(t, want) - } func constructLogs() plog.Logs { @@ -825,7 +824,6 @@ func fillLogOne(log plog.LogRecord) { log.Attributes().PutStr("http.path", "/health") log.Attributes().PutStr("http.url", "http://localhost/health") log.Attributes().PutStr("flags", "A|B|C") - } func fillLogTwo(log plog.LogRecord) { @@ -836,5 +834,4 @@ func fillLogTwo(log plog.LogRecord) { log.Attributes().PutStr("http.path", "/health") log.Attributes().PutStr("http.url", "http://localhost/health") log.Attributes().PutStr("flags", "C|D") - } diff --git a/processor/filterprocessor/metrics_test.go b/processor/filterprocessor/metrics_test.go index 6056d98deca8..6ba0e029f314 100644 --- a/processor/filterprocessor/metrics_test.go +++ b/processor/filterprocessor/metrics_test.go @@ -943,7 +943,6 @@ func TestFilterMetricProcessorWithOTTL(t *testing.T) { if tt.filterEverything { assert.Equal(t, processorhelper.ErrSkipProcessingData, err) } else { - exTd := constructMetrics() tt.want(exTd) assert.Equal(t, exTd, got) diff --git a/processor/filterprocessor/traces_test.go b/processor/filterprocessor/traces_test.go index 39624b97a369..b23cf4b3c007 100644 --- a/processor/filterprocessor/traces_test.go +++ b/processor/filterprocessor/traces_test.go @@ -273,7 +273,6 @@ func TestFilterTraceProcessorWithOTTL(t *testing.T) { if tt.filterEverything { assert.Equal(t, processorhelper.ErrSkipProcessingData, err) } else { - exTd := constructTraces() tt.want(exTd) assert.Equal(t, exTd, got) diff --git a/processor/geoipprocessor/factory.go b/processor/geoipprocessor/factory.go index dca0a85cd712..fc2f40e0b0b2 100644 --- a/processor/geoipprocessor/factory.go +++ b/processor/geoipprocessor/factory.go @@ -77,7 +77,6 @@ func createGeoIPProviders( } providers = append(providers, provider) - } return providers, nil diff --git a/processor/groupbyattrsprocessor/attribute_groups.go b/processor/groupbyattrsprocessor/attribute_groups.go index e106d79c112f..fef49f112033 100644 --- a/processor/groupbyattrsprocessor/attribute_groups.go +++ b/processor/groupbyattrsprocessor/attribute_groups.go @@ -64,7 +64,6 @@ func (mg *metricsGroup) findOrCreateResourceMetrics(originResource pcommon.Resou referenceResource.MoveTo(rm.Resource()) mg.resourceHashes = append(mg.resourceHashes, referenceResourceHash) return rm - } type logsGroup struct { diff --git a/processor/groupbyattrsprocessor/factory.go b/processor/groupbyattrsprocessor/factory.go index 319c3b00fd4d..666e15e87ce5 100644 --- a/processor/groupbyattrsprocessor/factory.go +++ b/processor/groupbyattrsprocessor/factory.go @@ -65,7 +65,6 @@ func createTracesProcessor( set processor.Settings, cfg component.Config, nextConsumer consumer.Traces) (processor.Traces, error) { - oCfg := cfg.(*Config) gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys) if err != nil { @@ -87,7 +86,6 @@ func createLogsProcessor( set processor.Settings, cfg component.Config, nextConsumer consumer.Logs) (processor.Logs, error) { - oCfg := cfg.(*Config) gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys) if err != nil { @@ -109,7 +107,6 @@ func createMetricsProcessor( set processor.Settings, cfg component.Config, nextConsumer consumer.Metrics) (processor.Metrics, error) { - oCfg := cfg.(*Config) gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys) if err != nil { diff --git a/processor/groupbyattrsprocessor/processor.go b/processor/groupbyattrsprocessor/processor.go index d91193145264..ac1b14224267 100644 --- a/processor/groupbyattrsprocessor/processor.go +++ b/processor/groupbyattrsprocessor/processor.go @@ -90,7 +90,6 @@ func (gap *groupByAttrsProcessor) processLogs(ctx context.Context, ld plog.Logs) log.CopyTo(lr) } } - } // Copy the grouped data into output @@ -114,7 +113,6 @@ func (gap *groupByAttrsProcessor) processMetrics(ctx context.Context, md pmetric //exhaustive:enforce switch metric.Type() { - case pmetric.MetricTypeGauge: for pointIndex := 0; pointIndex < metric.Gauge().DataPoints().Len(); pointIndex++ { dataPoint := metric.Gauge().DataPoints().At(pointIndex) @@ -174,7 +172,6 @@ func deleteAttributes(attrsForRemoval, targetAttrs pcommon.Map) { // - whether any attribute matched (true) or none (false) // - the extracted AttributeMap of matching keys and their corresponding values func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pcommon.Map) (bool, pcommon.Map) { - groupingAttributes := pcommon.NewMap() foundMatch := false @@ -191,7 +188,6 @@ func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pcommon.Map) // Searches for metric with same name in the specified InstrumentationLibrary and returns it. If nothing is found, create it. func getMetricInInstrumentationLibrary(ilm pmetric.ScopeMetrics, searchedMetric pmetric.Metric) pmetric.Metric { - // Loop through all metrics and try to find the one that matches with the one we search for // (name and type) for i := 0; i < ilm.Metrics().Len(); i++ { @@ -211,7 +207,6 @@ func getMetricInInstrumentationLibrary(ilm pmetric.ScopeMetrics, searchedMetric // Move other special type specific values //exhaustive:enforce switch searchedMetric.Type() { - case pmetric.MetricTypeHistogram: metric.SetEmptyHistogram().SetAggregationTemporality(searchedMetric.Histogram().AggregationTemporality()) @@ -243,7 +238,6 @@ func (gap *groupByAttrsProcessor) getGroupedMetricsFromAttributes( metric pmetric.Metric, attributes pcommon.Map, ) pmetric.Metric { - toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(attributes) if toBeGrouped { gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedMetrics.Add(ctx, 1) @@ -262,5 +256,4 @@ func (gap *groupByAttrsProcessor) getGroupedMetricsFromAttributes( // Return the metric in this resource return getMetricInInstrumentationLibrary(groupedInstrumentationLibrary, metric) - } diff --git a/processor/groupbyattrsprocessor/processor_test.go b/processor/groupbyattrsprocessor/processor_test.go index 9f9fb1e09c87..ea32b35d4291 100644 --- a/processor/groupbyattrsprocessor/processor_test.go +++ b/processor/groupbyattrsprocessor/processor_test.go @@ -845,7 +845,6 @@ func someExponentialHistogramMetrics(attrs pcommon.Map, instrumentationLibraryCo } func TestMetricAdvancedGrouping(t *testing.T) { - // Input: // // Resource {host.name="localhost"} diff --git a/processor/groupbytraceprocessor/factory.go b/processor/groupbytraceprocessor/factory.go index 06bf13a90437..61ee23aa7a70 100644 --- a/processor/groupbytraceprocessor/factory.go +++ b/processor/groupbytraceprocessor/factory.go @@ -30,7 +30,6 @@ var ( // NewFactory returns a new factory for the Filter processor. func NewFactory() processor.Factory { - return processor.NewFactory( metadata.Type, createDefaultConfig, @@ -56,7 +55,6 @@ func createTracesProcessor( params processor.Settings, cfg component.Config, nextConsumer consumer.Traces) (processor.Traces, error) { - oCfg := cfg.(*Config) var st storage diff --git a/processor/k8sattributesprocessor/internal/kube/client.go b/processor/k8sattributesprocessor/internal/kube/client.go index a43049f09ebf..9624fb250b22 100644 --- a/processor/k8sattributesprocessor/internal/kube/client.go +++ b/processor/k8sattributesprocessor/internal/kube/client.go @@ -538,7 +538,6 @@ func (c *WatchClient) extractPodAttributes(pod *api_v1.Pod) map[string]string { // This function removes all data from the Pod except what is required by extraction rules and pod association func removeUnnecessaryPodData(pod *api_v1.Pod, rules ExtractionRules) *api_v1.Pod { - // name, namespace, uid, start time and ip are needed for identifying Pods // there's room to optimize this further, it's kept this way for simplicity transformedPod := api_v1.Pod{ diff --git a/processor/k8sattributesprocessor/internal/kube/client_test.go b/processor/k8sattributesprocessor/internal/kube/client_test.go index 97b0cdc06b16..c904a6291f79 100644 --- a/processor/k8sattributesprocessor/internal/kube/client_test.go +++ b/processor/k8sattributesprocessor/internal/kube/client_test.go @@ -88,7 +88,6 @@ func podAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { assert.Equal(t, "2.2.2.2", got.Address) assert.Equal(t, "podC", got.Name) assert.Equal(t, "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", got.PodUID) - } func namespaceAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) { @@ -290,7 +289,6 @@ func TestReplicaSetHandler(t *testing.T) { Obj: replicaset, }) assert.Empty(t, c.ReplicaSets) - } func TestPodHostNetwork(t *testing.T) { @@ -1373,7 +1371,6 @@ func TestFilters(t *testing.T) { assert.Equal(t, tc.fields, inf.fieldSelector.String()) }) } - } func TestPodIgnorePatterns(t *testing.T) { diff --git a/processor/k8sattributesprocessor/internal/kube/informer.go b/processor/k8sattributesprocessor/internal/kube/informer.go index ddb10c24f060..85459afe53bb 100644 --- a/processor/k8sattributesprocessor/internal/kube/informer.go +++ b/processor/k8sattributesprocessor/internal/kube/informer.go @@ -70,7 +70,6 @@ func informerListFuncWithSelectors(client kubernetes.Interface, namespace string opts.FieldSelector = fs.String() return client.CoreV1().Pods(namespace).List(context.Background(), opts) } - } func informerWatchFuncWithSelectors(client kubernetes.Interface, namespace string, ls labels.Selector, fs fields.Selector) cache.WatchFunc { @@ -120,7 +119,6 @@ func namespaceInformerListFunc(client kubernetes.Interface) cache.ListFunc { return func(opts metav1.ListOptions) (runtime.Object, error) { return client.CoreV1().Namespaces().List(context.Background(), opts) } - } func namespaceInformerWatchFunc(client kubernetes.Interface) cache.WatchFunc { diff --git a/processor/k8sattributesprocessor/options_test.go b/processor/k8sattributesprocessor/options_test.go index a87874597c7c..9afe3cdee1dc 100644 --- a/processor/k8sattributesprocessor/options_test.go +++ b/processor/k8sattributesprocessor/options_test.go @@ -490,7 +490,6 @@ func TestWithFilterLabels(t *testing.T) { } func TestWithFilterFields(t *testing.T) { - tests := []struct { name string args []FieldFilterConfig diff --git a/processor/k8sattributesprocessor/processor_test.go b/processor/k8sattributesprocessor/processor_test.go index 4f9e8701b348..f7493cc38cdc 100644 --- a/processor/k8sattributesprocessor/processor_test.go +++ b/processor/k8sattributesprocessor/processor_test.go @@ -321,7 +321,6 @@ func (strAddr) Network() string { } func TestIPDetectionFromContext(t *testing.T) { - addresses := []net.Addr{ &net.IPAddr{ IP: net.IPv4(1, 1, 1, 1), @@ -357,7 +356,6 @@ func TestIPDetectionFromContext(t *testing.T) { assertResourceHasStringAttribute(t, r, "k8s.pod.ip", "1.1.1.1") }) } - } func TestNilBatch(t *testing.T) { @@ -1352,7 +1350,6 @@ func TestMetricsProcessorHostname(t *testing.T) { } }) } - } func TestMetricsProcessorHostnameWithPodAssociation(t *testing.T) { @@ -1435,7 +1432,6 @@ func TestMetricsProcessorHostnameWithPodAssociation(t *testing.T) { } }) } - } func TestPassthroughStart(t *testing.T) { diff --git a/processor/logstransformprocessor/processor.go b/processor/logstransformprocessor/processor.go index 09f3a16430c9..72d36fec1307 100644 --- a/processor/logstransformprocessor/processor.go +++ b/processor/logstransformprocessor/processor.go @@ -79,7 +79,6 @@ func (ltp *logsTransformProcessor) Shutdown(ctx context.Context) error { } func (ltp *logsTransformProcessor) Start(ctx context.Context, _ component.Host) error { - wkrCount := int(math.Max(1, float64(runtime.NumCPU()))) ltp.fromConverter = adapter.NewFromPdataConverter(ltp.set, wkrCount) diff --git a/processor/logstransformprocessor/processor_test.go b/processor/logstransformprocessor/processor_test.go index 0ccc71cec10b..fed71c028099 100644 --- a/processor/logstransformprocessor/processor_test.go +++ b/processor/logstransformprocessor/processor_test.go @@ -205,7 +205,6 @@ type laggyOperator struct { } func (t *laggyOperator) Process(ctx context.Context, e *entry.Entry) error { - // Wait for a large amount of time every 100 logs if t.logsCount%100 == 0 { time.Sleep(100 * time.Millisecond) diff --git a/processor/metricsgenerationprocessor/factory_test.go b/processor/metricsgenerationprocessor/factory_test.go index 1d8fe694115c..121cd5c9563c 100644 --- a/processor/metricsgenerationprocessor/factory_test.go +++ b/processor/metricsgenerationprocessor/factory_test.go @@ -40,7 +40,6 @@ func TestCreateProcessors(t *testing.T) { for k := range cm.ToStringMap() { // Check if all processor variations that are defined in test config can be actually created t.Run(k, func(t *testing.T) { - factory := NewFactory() cfg := factory.CreateDefaultConfig() diff --git a/processor/metricsgenerationprocessor/processor_test.go b/processor/metricsgenerationprocessor/processor_test.go index e6ebcf3e0e5a..68cffadca240 100644 --- a/processor/metricsgenerationprocessor/processor_test.go +++ b/processor/metricsgenerationprocessor/processor_test.go @@ -325,10 +325,8 @@ func TestMetricsGenerationProcessor(t *testing.T) { case pmetric.NumberDataPointValueTypeInt: require.Equal(t, eDataPoints.At(j).IntValue(), aDataPoints.At(j).IntValue()) } - } } - } require.NoError(t, mgp.Shutdown(ctx)) diff --git a/processor/metricstransformprocessor/factory.go b/processor/metricstransformprocessor/factory.go index 142164880b19..6a87e8feae10 100644 --- a/processor/metricstransformprocessor/factory.go +++ b/processor/metricstransformprocessor/factory.go @@ -127,7 +127,6 @@ func validateConfiguration(config *Config) error { func buildHelperConfig(config *Config, version string) ([]internalTransform, error) { helperDataTransforms := make([]internalTransform, len(config.Transforms)) for i, t := range config.Transforms { - if t.MetricIncludeFilter.MatchType == "" { t.MetricIncludeFilter.MatchType = strictMatchType } diff --git a/processor/metricstransformprocessor/metrics_transform_processor_otlp.go b/processor/metricstransformprocessor/metrics_transform_processor_otlp.go index 1d1abac32830..398cd67cd96e 100644 --- a/processor/metricstransformprocessor/metrics_transform_processor_otlp.go +++ b/processor/metricstransformprocessor/metrics_transform_processor_otlp.go @@ -368,7 +368,6 @@ func canBeCombined(metrics []pmetric.Metric) error { "metrics cannot be combined as they have different aggregation temporalities: %v (%v) and %v (%v)", firstMetric.Name(), firstMetric.Histogram().AggregationTemporality(), metric.Name(), metric.Histogram().AggregationTemporality()) - } case pmetric.MetricTypeExponentialHistogram: if firstMetric.ExponentialHistogram().AggregationTemporality() != metric.ExponentialHistogram().AggregationTemporality() { @@ -376,7 +375,6 @@ func canBeCombined(metrics []pmetric.Metric) error { "metrics cannot be combined as they have different aggregation temporalities: %v (%v) and %v (%v)", firstMetric.Name(), firstMetric.ExponentialHistogram().AggregationTemporality(), metric.Name(), metric.ExponentialHistogram().AggregationTemporality()) - } } } diff --git a/processor/probabilisticsamplerprocessor/logsprocessor.go b/processor/probabilisticsamplerprocessor/logsprocessor.go index 9f1122bc6784..fd4fa6b3ff53 100644 --- a/processor/probabilisticsamplerprocessor/logsprocessor.go +++ b/processor/probabilisticsamplerprocessor/logsprocessor.go @@ -268,7 +268,6 @@ func (lsp *logsProcessor) logRecordToPriorityThreshold(logRec plog.LogRecord) sa // The record has supplied a valid alternative sampling probability return th } - } } return sampling.NeverSampleThreshold diff --git a/processor/probabilisticsamplerprocessor/logsprocessor_test.go b/processor/probabilisticsamplerprocessor/logsprocessor_test.go index f018fb49ea94..9da4b6412f95 100644 --- a/processor/probabilisticsamplerprocessor/logsprocessor_test.go +++ b/processor/probabilisticsamplerprocessor/logsprocessor_test.go @@ -385,7 +385,6 @@ func TestLogsSamplingState(t *testing.T) { } for _, tt := range tests { t.Run(fmt.Sprint(tt.name), func(t *testing.T) { - sink := new(consumertest.LogsSink) cfg := &Config{} if tt.cfg != nil { @@ -473,7 +472,6 @@ func TestLogsMissingRandomness(t *testing.T) { {100, traceIDAttributeSource, false, true}, } { t.Run(fmt.Sprint(tt.pct, "_", tt.source, "_", tt.failClosed, "_", mode), func(t *testing.T) { - ctx := context.Background() logs := plog.NewLogs() record := logs.ResourceLogs().AppendEmpty().ScopeLogs().AppendEmpty().LogRecords().AppendEmpty() diff --git a/processor/probabilisticsamplerprocessor/tracesprocessor_test.go b/processor/probabilisticsamplerprocessor/tracesprocessor_test.go index 25883fa0fc01..74cdd1f5a1a3 100644 --- a/processor/probabilisticsamplerprocessor/tracesprocessor_test.go +++ b/processor/probabilisticsamplerprocessor/tracesprocessor_test.go @@ -222,7 +222,6 @@ func Test_tracesamplerprocessor_SamplingPercentageRange_MultipleResourceSpans(t assert.Equal(t, tt.resourceSpanPerTrace*tt.numTracesPerBatch, sink.SpanCount()) sink.Reset() } - }) } } @@ -246,7 +245,6 @@ func Test_tracessamplerprocessor_MissingRandomness(t *testing.T) { {100, false, true}, } { t.Run(fmt.Sprint(tt.pct, "_", tt.failClosed), func(t *testing.T) { - ctx := context.Background() traces := ptrace.NewTraces() span := traces.ResourceSpans().AppendEmpty().ScopeSpans().AppendEmpty().Spans().AppendEmpty() @@ -388,7 +386,6 @@ func Test_tracesamplerprocessor_SpanSamplingPriority(t *testing.T) { for _, mode := range AllModes { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - sink := new(consumertest.TracesSink) cfg := &Config{} @@ -846,7 +843,6 @@ func Test_tracesamplerprocessor_TraceState(t *testing.T) { for _, tt := range tests { for _, mode := range []SamplerMode{Equalizing, Proportional} { t.Run(fmt.Sprint(mode, "_", tt.name), func(t *testing.T) { - sink := new(consumertest.TracesSink) cfg := &Config{} if tt.cfg != nil { @@ -1013,7 +1009,6 @@ func Test_tracesamplerprocessor_TraceStateErrors(t *testing.T) { expectMessage := "" if tt.sf != nil { expectMessage = tt.sf(mode) - } tsp, err := newTracesProcessor(context.Background(), set, cfg, sink) diff --git a/processor/remotetapprocessor/processor_test.go b/processor/remotetapprocessor/processor_test.go index 9908cd379fa6..50c59ad34a13 100644 --- a/processor/remotetapprocessor/processor_test.go +++ b/processor/remotetapprocessor/processor_test.go @@ -60,7 +60,6 @@ func TestConsumeMetrics(t *testing.T) { processor.cs.closeAndRemove(idx) wg.Wait() assert.Equal(t, c.limit, receiveNum) - }) } } diff --git a/processor/resourcedetectionprocessor/internal/heroku/heroku_test.go b/processor/resourcedetectionprocessor/internal/heroku/heroku_test.go index cabb11994391..02acdaa79467 100644 --- a/processor/resourcedetectionprocessor/internal/heroku/heroku_test.go +++ b/processor/resourcedetectionprocessor/internal/heroku/heroku_test.go @@ -81,7 +81,6 @@ func TestDetectTruePartialMissingDynoId(t *testing.T) { } func TestDetectFalse(t *testing.T) { - detector, err := NewDetector(processortest.NewNopSettings(), CreateDefaultConfig()) require.NoError(t, err) res, schemaURL, err := detector.Detect(context.Background()) diff --git a/processor/resourcedetectionprocessor/internal/system/system_test.go b/processor/resourcedetectionprocessor/internal/system/system_test.go index 1626dbf2b4b8..aa5123a041c8 100644 --- a/processor/resourcedetectionprocessor/internal/system/system_test.go +++ b/processor/resourcedetectionprocessor/internal/system/system_test.go @@ -178,7 +178,6 @@ func TestDetectFQDNAvailable(t *testing.T) { } assert.Equal(t, expected, res.Attributes().AsRaw()) - } func TestFallbackHostname(t *testing.T) { @@ -368,7 +367,6 @@ func TestDetectCPUInfo(t *testing.T) { } assert.Equal(t, expected, res.Attributes().AsRaw()) - } func newTestDetector(mock *mockMetadata, hostnameSources []string, resCfg metadata.ResourceAttributesConfig) *Detector { diff --git a/processor/routingprocessor/extract.go b/processor/routingprocessor/extract.go index b96a07d48a42..a0007b4ec934 100644 --- a/processor/routingprocessor/extract.go +++ b/processor/routingprocessor/extract.go @@ -53,7 +53,6 @@ func (e extractor) extractFromContext(ctx context.Context) string { } func (e extractor) extractFromGRPCContext(ctx context.Context) ([]string, bool) { - md, ok := metadata.FromIncomingContext(ctx) if !ok { diff --git a/processor/routingprocessor/metrics_test.go b/processor/routingprocessor/metrics_test.go index 430a8efbecc5..b1bf73f89c7f 100644 --- a/processor/routingprocessor/metrics_test.go +++ b/processor/routingprocessor/metrics_test.go @@ -187,7 +187,6 @@ func TestMetrics_RoutingWorks_Context(t *testing.T) { "metric should not be routed to non default exporter", ) }) - } func TestMetrics_RoutingWorks_ResourceAttribute(t *testing.T) { diff --git a/processor/routingprocessor/traces_test.go b/processor/routingprocessor/traces_test.go index 30a8093b4dcb..538af2d79373 100644 --- a/processor/routingprocessor/traces_test.go +++ b/processor/routingprocessor/traces_test.go @@ -497,7 +497,6 @@ func TestTracesAttributeWithOTTLDoesNotCauseCrash(t *testing.T) { // verify assert.Len(t, defaultExp.AllTraces(), 1) assert.Empty(t, firstExp.AllTraces()) - } func TestTraceProcessorCapabilities(t *testing.T) { diff --git a/processor/schemaprocessor/internal/migrate/attributes_test.go b/processor/schemaprocessor/internal/migrate/attributes_test.go index f3cc5d79dc6e..3d1e708f3b92 100644 --- a/processor/schemaprocessor/internal/migrate/attributes_test.go +++ b/processor/schemaprocessor/internal/migrate/attributes_test.go @@ -268,7 +268,6 @@ func TestNewAttributeChangeSetSliceApplyRollback(t *testing.T) { ), attr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("application.service.version", "v0.0.1") - }), expect: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service_version", "v0.0.1") diff --git a/processor/spanprocessor/factory.go b/processor/spanprocessor/factory.go index 4bec805ffeae..8e4906d20726 100644 --- a/processor/spanprocessor/factory.go +++ b/processor/spanprocessor/factory.go @@ -53,7 +53,6 @@ func createTracesProcessor( cfg component.Config, nextConsumer consumer.Traces, ) (processor.Traces, error) { - // 'from_attributes' or 'to_attributes' under 'name' has to be set for the span // processor to be valid. If not set and not enforced, the processor would do no work. oCfg := cfg.(*Config) diff --git a/processor/spanprocessor/span_test.go b/processor/spanprocessor/span_test.go index 460e1c430d13..fff402f055cd 100644 --- a/processor/spanprocessor/span_test.go +++ b/processor/spanprocessor/span_test.go @@ -294,7 +294,6 @@ func TestSpanProcessor_MissingKeys(t *testing.T) { // TestSpanProcessor_Separator ensures naming a span with a single key and separator will only contain the value from // the single key. func TestSpanProcessor_Separator(t *testing.T) { - factory := NewFactory() cfg := factory.CreateDefaultConfig() oCfg := cfg.(*Config) @@ -323,7 +322,6 @@ func TestSpanProcessor_Separator(t *testing.T) { // TestSpanProcessor_NoSeparatorMultipleKeys tests naming a span using multiple keys and no separator. func TestSpanProcessor_NoSeparatorMultipleKeys(t *testing.T) { - factory := NewFactory() cfg := factory.CreateDefaultConfig() oCfg := cfg.(*Config) @@ -353,7 +351,6 @@ func TestSpanProcessor_NoSeparatorMultipleKeys(t *testing.T) { // TestSpanProcessor_SeparatorMultipleKeys tests naming a span with multiple keys and a separator. func TestSpanProcessor_SeparatorMultipleKeys(t *testing.T) { - factory := NewFactory() cfg := factory.CreateDefaultConfig() oCfg := cfg.(*Config) @@ -388,7 +385,6 @@ func TestSpanProcessor_SeparatorMultipleKeys(t *testing.T) { // TestSpanProcessor_NilName tests naming a span when the input span had no name. func TestSpanProcessor_NilName(t *testing.T) { - factory := NewFactory() cfg := factory.CreateDefaultConfig() oCfg := cfg.(*Config) @@ -417,7 +413,6 @@ func TestSpanProcessor_NilName(t *testing.T) { // TestSpanProcessor_ToAttributes func TestSpanProcessor_ToAttributes(t *testing.T) { - testCases := []struct { rules []string breakAfterMatch bool diff --git a/processor/sumologicprocessor/processor_test.go b/processor/sumologicprocessor/processor_test.go index 0e0759a2a21b..0f4d28716f49 100644 --- a/processor/sumologicprocessor/processor_test.go +++ b/processor/sumologicprocessor/processor_test.go @@ -1318,7 +1318,6 @@ func TestLogFieldsConversionLogs(t *testing.T) { attribute4, found := outputLogs.ResourceLogs().At(0).ScopeLogs().At(0).LogRecords().At(0).Attributes().Get("traceid") assert.True(t, found) assert.Equal(t, "01010101010101010101010101010101", attribute4.Str()) - }, }, } diff --git a/processor/sumologicprocessor/translate_attributes_processor_test.go b/processor/sumologicprocessor/translate_attributes_processor_test.go index c9dd1443e2cc..e7535e9dac00 100644 --- a/processor/sumologicprocessor/translate_attributes_processor_test.go +++ b/processor/sumologicprocessor/translate_attributes_processor_test.go @@ -110,7 +110,6 @@ func assertAttribute(t *testing.T, metadata pcommon.Map, attributeName string, e } else { assert.True(t, exists) assert.Equal(t, expectedValue, value.Str()) - } } diff --git a/processor/tailsamplingprocessor/internal/sampling/and.go b/processor/tailsamplingprocessor/internal/sampling/and.go index b5779fb82c9d..408fedfbd240 100644 --- a/processor/tailsamplingprocessor/internal/sampling/and.go +++ b/processor/tailsamplingprocessor/internal/sampling/and.go @@ -20,7 +20,6 @@ func NewAnd( logger *zap.Logger, subpolicies []PolicyEvaluator, ) PolicyEvaluator { - return &And{ subpolicies: subpolicies, logger: logger, @@ -39,7 +38,6 @@ func (c *And) Evaluate(ctx context.Context, traceID pcommon.TraceID, trace *Trac if decision == NotSampled || decision == InvertNotSampled { return decision, nil } - } return Sampled, nil } diff --git a/processor/tailsamplingprocessor/internal/sampling/and_test.go b/processor/tailsamplingprocessor/internal/sampling/and_test.go index 0094768f7590..29a771971665 100644 --- a/processor/tailsamplingprocessor/internal/sampling/and_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/and_test.go @@ -36,7 +36,6 @@ func TestAndEvaluatorNotSampled(t *testing.T) { decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) assert.Equal(t, NotSampled, decision) - } func TestAndEvaluatorSampled(t *testing.T) { @@ -62,7 +61,6 @@ func TestAndEvaluatorSampled(t *testing.T) { decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) assert.Equal(t, Sampled, decision) - } func TestAndEvaluatorStringInvertSampled(t *testing.T) { @@ -88,7 +86,6 @@ func TestAndEvaluatorStringInvertSampled(t *testing.T) { decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) assert.Equal(t, Sampled, decision) - } func TestAndEvaluatorStringInvertNotSampled(t *testing.T) { @@ -114,5 +111,4 @@ func TestAndEvaluatorStringInvertNotSampled(t *testing.T) { decision, err := and.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate and policy: %v", err) assert.Equal(t, InvertNotSampled, decision) - } diff --git a/processor/tailsamplingprocessor/internal/sampling/boolean_tag_filter_test.go b/processor/tailsamplingprocessor/internal/sampling/boolean_tag_filter_test.go index 2ff998bb3e86..f3ca91c53879 100644 --- a/processor/tailsamplingprocessor/internal/sampling/boolean_tag_filter_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/boolean_tag_filter_test.go @@ -15,7 +15,6 @@ import ( ) func TestBooleanTagFilter(t *testing.T) { - var empty = map[string]any{} filter := NewBooleanAttributeFilter(componenttest.NewNopTelemetrySettings(), "example", true, false) @@ -55,7 +54,6 @@ func TestBooleanTagFilter(t *testing.T) { } func TestBooleanTagFilterInverted(t *testing.T) { - var empty = map[string]any{} filter := NewBooleanAttributeFilter(componenttest.NewNopTelemetrySettings(), "example", true, true) diff --git a/processor/tailsamplingprocessor/internal/sampling/composite.go b/processor/tailsamplingprocessor/internal/sampling/composite.go index b221229d7534..0c98c5a1f195 100644 --- a/processor/tailsamplingprocessor/internal/sampling/composite.go +++ b/processor/tailsamplingprocessor/internal/sampling/composite.go @@ -53,7 +53,6 @@ func NewComposite( subPolicyParams []SubPolicyEvalParams, timeProvider TimeProvider, ) PolicyEvaluator { - var subpolicies []*subpolicy for i := 0; i < len(subPolicyParams); i++ { diff --git a/processor/tailsamplingprocessor/internal/sampling/composite_test.go b/processor/tailsamplingprocessor/internal/sampling/composite_test.go index 66a7d1606c34..67a977f90335 100644 --- a/processor/tailsamplingprocessor/internal/sampling/composite_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/composite_test.go @@ -57,7 +57,6 @@ func newTraceWithKV(traceID pcommon.TraceID, key string, val int64) *TraceData { } func TestCompositeEvaluatorNotSampled(t *testing.T) { - // Create 2 policies which do not match any trace n1 := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", 0, 100, false) n2 := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", 200, 300, false) @@ -75,7 +74,6 @@ func TestCompositeEvaluatorNotSampled(t *testing.T) { } func TestCompositeEvaluatorSampled(t *testing.T) { - // Create 2 subpolicies. First results in 100% NotSampled, the second in 100% Sampled. n1 := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", 0, 100, false) n2 := NewAlwaysSample(componenttest.NewNopTelemetrySettings()) @@ -92,7 +90,6 @@ func TestCompositeEvaluatorSampled(t *testing.T) { } func TestCompositeEvaluator_OverflowAlwaysSampled(t *testing.T) { - timeProvider := &FakeTimeProvider{second: 0} // Create 2 subpolicies. First results in 100% NotSampled, the second in 100% Sampled. @@ -128,7 +125,6 @@ func TestCompositeEvaluator_OverflowAlwaysSampled(t *testing.T) { } func TestCompositeEvaluatorSampled_AlwaysSampled(t *testing.T) { - // Create 2 subpolicies. First results in 100% NotSampled, the second in 100% Sampled. n1 := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", 0, 100, false) n2 := NewAlwaysSample(componenttest.NewNopTelemetrySettings()) @@ -147,7 +143,6 @@ func TestCompositeEvaluatorSampled_AlwaysSampled(t *testing.T) { } func TestCompositeEvaluatorInverseSampled_AlwaysSampled(t *testing.T) { - // The first policy does not match, the second matches through invert n1 := NewStringAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", []string{"foo"}, false, 0, false) n2 := NewStringAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", []string{"foo"}, false, 0, true) @@ -166,7 +161,6 @@ func TestCompositeEvaluatorInverseSampled_AlwaysSampled(t *testing.T) { } func TestCompositeEvaluatorThrottling(t *testing.T) { - // Create only one subpolicy, with 100% Sampled policy. n1 := NewAlwaysSample(componenttest.NewNopTelemetrySettings()) timeProvider := &FakeTimeProvider{second: 0} @@ -207,7 +201,6 @@ func TestCompositeEvaluatorThrottling(t *testing.T) { } func TestCompositeEvaluator2SubpolicyThrottling(t *testing.T) { - n1 := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "tag", 0, 100, false) n2 := NewAlwaysSample(componenttest.NewNopTelemetrySettings()) timeProvider := &FakeTimeProvider{second: 0} diff --git a/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go b/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go index a7d058c2156e..7b8db1265c2e 100644 --- a/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/numeric_tag_filter_test.go @@ -16,7 +16,6 @@ import ( ) func TestNumericTagFilter(t *testing.T) { - var empty = map[string]any{} filter := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "example", math.MinInt32, math.MaxInt32, false) @@ -86,7 +85,6 @@ func TestNumericTagFilter(t *testing.T) { } func TestNumericTagFilterInverted(t *testing.T) { - var empty = map[string]any{} filter := NewNumericAttributeFilter(componenttest.NewNopTelemetrySettings(), "example", math.MinInt32, math.MaxInt32, true) diff --git a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go index 65bed0193a09..e9ee3da86773 100644 --- a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter_test.go @@ -23,7 +23,6 @@ type TestStringAttributeCfg struct { } func TestStringTagFilter(t *testing.T) { - cases := []struct { Desc string Trace *TraceData diff --git a/processor/tailsamplingprocessor/internal/sampling/trace_state_filter_test.go b/processor/tailsamplingprocessor/internal/sampling/trace_state_filter_test.go index be826307c5a0..c4481a9c5e76 100644 --- a/processor/tailsamplingprocessor/internal/sampling/trace_state_filter_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/trace_state_filter_test.go @@ -20,7 +20,6 @@ type TestTraceStateCfg struct { } func TestTraceStateFilter(t *testing.T) { - cases := []struct { Desc string Trace *TraceData diff --git a/processor/tailsamplingprocessor/processor_test.go b/processor/tailsamplingprocessor/processor_test.go index 1f1096e88975..05d3825b0f87 100644 --- a/processor/tailsamplingprocessor/processor_test.go +++ b/processor/tailsamplingprocessor/processor_test.go @@ -472,7 +472,6 @@ func TestSubSecondDecisionTime(t *testing.T) { require.Eventually(t, func() bool { return len(msp.AllTraces()) == 1 }, time.Second, 10*time.Millisecond) - } func TestPolicyLoggerAddsPolicyName(t *testing.T) { diff --git a/processor/transformprocessor/internal/common/processor.go b/processor/transformprocessor/internal/common/processor.go index dee7d24e7ba9..07164658de1b 100644 --- a/processor/transformprocessor/internal/common/processor.go +++ b/processor/transformprocessor/internal/common/processor.go @@ -216,7 +216,6 @@ func parseGlobalExpr[K any]( conditions []string, pc parserCollection, standardFuncs map[string]ottl.Factory[K]) (expr.BoolExpr[K], error) { - if len(conditions) > 0 { return boolExprFunc(conditions, standardFuncs, pc.errorMode, pc.settings) } diff --git a/processor/transformprocessor/internal/logs/processor_test.go b/processor/transformprocessor/internal/logs/processor_test.go index e6c3e117647e..d3e06f65ac01 100644 --- a/processor/transformprocessor/internal/logs/processor_test.go +++ b/processor/transformprocessor/internal/logs/processor_test.go @@ -536,7 +536,6 @@ func fillLogOne(log plog.LogRecord) { log.Attributes().PutStr("http.url", "http://localhost/health") log.Attributes().PutStr("flags", "A|B|C") log.Attributes().PutStr("total.string", "123456789") - } func fillLogTwo(log plog.LogRecord) { @@ -548,5 +547,4 @@ func fillLogTwo(log plog.LogRecord) { log.Attributes().PutStr("http.url", "http://localhost/health") log.Attributes().PutStr("flags", "C|D") log.Attributes().PutStr("total.string", "345678") - } diff --git a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go index 4e8958706841..b918377af67c 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go +++ b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go @@ -46,7 +46,6 @@ func createconvertExponentialHistToExplicitHistFunction(_ ottl.FunctionContext, if _, ok := distributionFnMap[args.DistributionFn]; !ok { return nil, fmt.Errorf("invalid conversion function: %s, must be one of [upper, midpoint, random, uniform]", args.DistributionFn) - } return convertExponentialHistToExplicitHist(args.DistributionFn, args.ExplicitBounds) @@ -54,7 +53,6 @@ func createconvertExponentialHistToExplicitHistFunction(_ ottl.FunctionContext, // convertExponentialHistToExplicitHist converts an exponential histogram to a bucketed histogram func convertExponentialHistToExplicitHist(distributionFn string, explicitBounds []float64) (ottl.ExprFunc[ottlmetric.TransformContext], error) { - if len(explicitBounds) == 0 { return nil, fmt.Errorf("explicit bounds cannot be empty: %v", explicitBounds) } diff --git a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go index f61969b17c18..b2aae4691260 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go +++ b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go @@ -91,7 +91,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{1.0, 2.0, 3.0, 4.0, 5.0}, distribution: "upper", want: func(metric pmetric.Metric) { - metric.SetName("response_time") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -109,7 +108,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1.0, 2.0, 3.0, 4.0, 5.0) - }, }, { @@ -120,7 +118,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{1000.0, 2000.0, 3000.0, 4000.0, 5000.0}, distribution: "upper", want: func(metric pmetric.Metric) { - metric.SetName("response_time") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -138,7 +135,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1000.0, 2000.0, 3000.0, 4000.0, 5000.0) - }, }, { @@ -148,7 +144,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{160.0, 170.0, 180.0, 190.0, 200.0}, distribution: "upper", want: func(metric pmetric.Metric) { - metric.SetName("response_time") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -166,7 +161,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(160.0, 170.0, 180.0, 190.0, 200.0) - }, }, { @@ -175,7 +169,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{160.0, 170.0, 180.0, 190.0, 200.0}, distribution: "upper", want: func(metric pmetric.Metric) { - metric.SetName("response_time") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -193,7 +186,6 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(160.0, 170.0, 180.0, 190.0, 200.0) - }, }, { @@ -328,7 +320,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1.0, 2.0, 3.0, 4.0, 5.0) - }, }, { @@ -339,7 +330,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{1000.0, 2000.0, 3000.0, 4000.0, 5000.0}, distribution: "midpoint", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -357,7 +347,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1000.0, 2000.0, 3000.0, 4000.0, 5000.0) - }, }, { @@ -367,7 +356,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0}, distribution: "midpoint", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -385,7 +373,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0) - }, }, { @@ -399,7 +386,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0}, distribution: "midpoint", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -417,7 +403,6 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(0, 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0) - }, }, { @@ -519,7 +504,6 @@ func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1.0, 2.0, 3.0, 4.0, 5.0) - }, }, { @@ -530,7 +514,6 @@ func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{1000.0, 2000.0, 3000.0, 4000.0, 5000.0}, distribution: "uniform", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -548,7 +531,6 @@ func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1000.0, 2000.0, 3000.0, 4000.0, 5000.0) - }, }, { @@ -558,7 +540,6 @@ func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0}, distribution: "uniform", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -576,7 +557,6 @@ func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0) - }, }, } @@ -654,7 +634,6 @@ func TestRandom_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1.0, 2.0, 3.0, 4.0, 5.0) - }, }, { @@ -665,7 +644,6 @@ func TestRandom_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{1000.0, 2000.0, 3000.0, 4000.0, 5000.0}, distribution: "random", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) @@ -683,7 +661,6 @@ func TestRandom_convert_exponential_hist_to_explicit_hist(t *testing.T) { // set explictbounds dp.ExplicitBounds().Append(1000.0, 2000.0, 3000.0, 4000.0, 5000.0) - }, }, { @@ -693,7 +670,6 @@ func TestRandom_convert_exponential_hist_to_explicit_hist(t *testing.T) { arg: []float64{10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0}, distribution: "random", want: func(metric pmetric.Metric) { - metric.SetName("test-metric") dp := metric.SetEmptyHistogram().DataPoints().AppendEmpty() metric.Histogram().SetAggregationTemporality(1) diff --git a/processor/transformprocessor/internal/metrics/functions_test.go b/processor/transformprocessor/internal/metrics/functions_test.go index 62def6453fe2..cf3ed6adccc0 100644 --- a/processor/transformprocessor/internal/metrics/functions_test.go +++ b/processor/transformprocessor/internal/metrics/functions_test.go @@ -46,7 +46,6 @@ func Test_DataPointFunctions(t *testing.T) { }, ) } - } func Test_MetricFunctions(t *testing.T) { diff --git a/receiver/splunkenterprisereceiver/scraper.go b/receiver/splunkenterprisereceiver/scraper.go index 54af1eae90da..61bf663b6b46 100644 --- a/receiver/splunkenterprisereceiver/scraper.go +++ b/receiver/splunkenterprisereceiver/scraper.go @@ -1732,6 +1732,5 @@ func (s *splunkScraper) scrapeSearchArtifacts(ctx context.Context, now pcommon.T } s.mb.RecordSplunkServerSearchartifactsJobCacheCountDataPoint(now, cacheTotalEntries, s.conf.SHEndpoint.Endpoint) } - } }