Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[chore]: enable whitespace linter for processor and receiver #36297

Merged
merged 1 commit into from
Nov 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion processor/attributesprocessor/attributes_metric.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,6 @@ func (a *metricAttributesProcessor) processMetrics(ctx context.Context, md pmetr
// Attributes are provided for each log and trace, but not at the metric level
// Need to process attributes for every data point within a metric.
func (a *metricAttributesProcessor) processMetricAttributes(ctx context.Context, m pmetric.Metric) {

// This is a lot of repeated code, but since there is no single parent superclass
// between metric data types, we can't use polymorphism.
//exhaustive:enforce
Expand Down
1 change: 0 additions & 1 deletion processor/attributesprocessor/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,6 @@ func createMetricsProcessor(
cfg component.Config,
nextConsumer consumer.Metrics,
) (processor.Metrics, error) {

oCfg := cfg.(*Config)
attrProc, err := attraction.NewAttrProc(&oCfg.Settings)
if err != nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,6 @@ func TestExpoAdd(t *testing.T) {
}
t.Run(cs.name, run(cs.dp, cs.in))
}

}

func rawbs(data []uint64, offset int32) expo.Buckets {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,6 @@ func TestErrs(t *testing.T) {
require.Equal(t, r1.IntValue(), r2.IntValue())
})
}

}

func time(ts int) pcommon.Timestamp {
Expand Down
1 change: 0 additions & 1 deletion processor/deltatocumulativeprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ func TestProcessor(t *testing.T) {
}
}
})

}
}

Expand Down
1 change: 0 additions & 1 deletion processor/deltatorateprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,6 @@ func TestCumulativeToDeltaProcessor(t *testing.T) {
require.Equal(t, eDataPoints.At(j).DoubleValue(), aDataPoints.At(j).DoubleValue())
}
}

}

require.NoError(t, mgp.Shutdown(ctx))
Expand Down
5 changes: 0 additions & 5 deletions processor/filterprocessor/config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,6 @@ func TestLoadingConfigStrict(t *testing.T) {

// TestLoadingConfigStrictLogs tests loading testdata/config_logs_strict.yaml
func TestLoadingConfigStrictLogs(t *testing.T) {

testDataLogPropertiesInclude := &LogMatchProperties{
LogMatchType: strictType,
ResourceAttributes: []filterconfig.Attribute{
Expand Down Expand Up @@ -180,7 +179,6 @@ func TestLoadingConfigStrictLogs(t *testing.T) {

// TestLoadingConfigSeverityLogsStrict tests loading testdata/config_logs_severity_strict.yaml
func TestLoadingConfigSeverityLogsStrict(t *testing.T) {

testDataLogPropertiesInclude := &LogMatchProperties{
LogMatchType: strictType,
SeverityTexts: []string{"INFO"},
Expand Down Expand Up @@ -305,7 +303,6 @@ func TestLoadingConfigSeverityLogsRegexp(t *testing.T) {

// TestLoadingConfigBodyLogsStrict tests loading testdata/config_logs_body_strict.yaml
func TestLoadingConfigBodyLogsStrict(t *testing.T) {

testDataLogPropertiesInclude := &LogMatchProperties{
LogMatchType: strictType,
LogBodies: []string{"This is an important event"},
Expand Down Expand Up @@ -368,7 +365,6 @@ func TestLoadingConfigBodyLogsStrict(t *testing.T) {

// TestLoadingConfigBodyLogsStrict tests loading testdata/config_logs_body_regexp.yaml
func TestLoadingConfigBodyLogsRegexp(t *testing.T) {

testDataLogPropertiesInclude := &LogMatchProperties{
LogMatchType: regexpType,
LogBodies: []string{"^IMPORTANT:"},
Expand Down Expand Up @@ -832,7 +828,6 @@ func TestLogSeverity_severityValidate(t *testing.T) {
}

func TestLoadingConfigOTTL(t *testing.T) {

cm, err := confmaptest.LoadConf(filepath.Join("testdata", "config_ottl.yaml"))
require.NoError(t, err)

Expand Down
3 changes: 0 additions & 3 deletions processor/filterprocessor/logs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -794,7 +794,6 @@ func TestFilterLogProcessorTelemetry(t *testing.T) {
}

tel.assertMetrics(t, want)

}

func constructLogs() plog.Logs {
Expand Down Expand Up @@ -825,7 +824,6 @@ func fillLogOne(log plog.LogRecord) {
log.Attributes().PutStr("http.path", "/health")
log.Attributes().PutStr("http.url", "http://localhost/health")
log.Attributes().PutStr("flags", "A|B|C")

}

func fillLogTwo(log plog.LogRecord) {
Expand All @@ -836,5 +834,4 @@ func fillLogTwo(log plog.LogRecord) {
log.Attributes().PutStr("http.path", "/health")
log.Attributes().PutStr("http.url", "http://localhost/health")
log.Attributes().PutStr("flags", "C|D")

}
1 change: 0 additions & 1 deletion processor/filterprocessor/metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -943,7 +943,6 @@ func TestFilterMetricProcessorWithOTTL(t *testing.T) {
if tt.filterEverything {
assert.Equal(t, processorhelper.ErrSkipProcessingData, err)
} else {

exTd := constructMetrics()
tt.want(exTd)
assert.Equal(t, exTd, got)
Expand Down
1 change: 0 additions & 1 deletion processor/filterprocessor/traces_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,6 @@ func TestFilterTraceProcessorWithOTTL(t *testing.T) {
if tt.filterEverything {
assert.Equal(t, processorhelper.ErrSkipProcessingData, err)
} else {

exTd := constructTraces()
tt.want(exTd)
assert.Equal(t, exTd, got)
Expand Down
1 change: 0 additions & 1 deletion processor/geoipprocessor/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,6 @@ func createGeoIPProviders(
}

providers = append(providers, provider)

}

return providers, nil
Expand Down
1 change: 0 additions & 1 deletion processor/groupbyattrsprocessor/attribute_groups.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@ func (mg *metricsGroup) findOrCreateResourceMetrics(originResource pcommon.Resou
referenceResource.MoveTo(rm.Resource())
mg.resourceHashes = append(mg.resourceHashes, referenceResourceHash)
return rm

}

type logsGroup struct {
Expand Down
3 changes: 0 additions & 3 deletions processor/groupbyattrsprocessor/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ func createTracesProcessor(
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Traces) (processor.Traces, error) {

oCfg := cfg.(*Config)
gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys)
if err != nil {
Expand All @@ -87,7 +86,6 @@ func createLogsProcessor(
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Logs) (processor.Logs, error) {

oCfg := cfg.(*Config)
gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys)
if err != nil {
Expand All @@ -109,7 +107,6 @@ func createMetricsProcessor(
set processor.Settings,
cfg component.Config,
nextConsumer consumer.Metrics) (processor.Metrics, error) {

oCfg := cfg.(*Config)
gap, err := createGroupByAttrsProcessor(set, oCfg.GroupByKeys)
if err != nil {
Expand Down
7 changes: 0 additions & 7 deletions processor/groupbyattrsprocessor/processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@ func (gap *groupByAttrsProcessor) processLogs(ctx context.Context, ld plog.Logs)
log.CopyTo(lr)
}
}

}

// Copy the grouped data into output
Expand All @@ -114,7 +113,6 @@ func (gap *groupByAttrsProcessor) processMetrics(ctx context.Context, md pmetric

//exhaustive:enforce
switch metric.Type() {

case pmetric.MetricTypeGauge:
for pointIndex := 0; pointIndex < metric.Gauge().DataPoints().Len(); pointIndex++ {
dataPoint := metric.Gauge().DataPoints().At(pointIndex)
Expand Down Expand Up @@ -174,7 +172,6 @@ func deleteAttributes(attrsForRemoval, targetAttrs pcommon.Map) {
// - whether any attribute matched (true) or none (false)
// - the extracted AttributeMap of matching keys and their corresponding values
func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pcommon.Map) (bool, pcommon.Map) {

groupingAttributes := pcommon.NewMap()
foundMatch := false

Expand All @@ -191,7 +188,6 @@ func (gap *groupByAttrsProcessor) extractGroupingAttributes(attrMap pcommon.Map)

// Searches for metric with same name in the specified InstrumentationLibrary and returns it. If nothing is found, create it.
func getMetricInInstrumentationLibrary(ilm pmetric.ScopeMetrics, searchedMetric pmetric.Metric) pmetric.Metric {

// Loop through all metrics and try to find the one that matches with the one we search for
// (name and type)
for i := 0; i < ilm.Metrics().Len(); i++ {
Expand All @@ -211,7 +207,6 @@ func getMetricInInstrumentationLibrary(ilm pmetric.ScopeMetrics, searchedMetric
// Move other special type specific values
//exhaustive:enforce
switch searchedMetric.Type() {

case pmetric.MetricTypeHistogram:
metric.SetEmptyHistogram().SetAggregationTemporality(searchedMetric.Histogram().AggregationTemporality())

Expand Down Expand Up @@ -243,7 +238,6 @@ func (gap *groupByAttrsProcessor) getGroupedMetricsFromAttributes(
metric pmetric.Metric,
attributes pcommon.Map,
) pmetric.Metric {

toBeGrouped, requiredAttributes := gap.extractGroupingAttributes(attributes)
if toBeGrouped {
gap.telemetryBuilder.ProcessorGroupbyattrsNumGroupedMetrics.Add(ctx, 1)
Expand All @@ -262,5 +256,4 @@ func (gap *groupByAttrsProcessor) getGroupedMetricsFromAttributes(

// Return the metric in this resource
return getMetricInInstrumentationLibrary(groupedInstrumentationLibrary, metric)

}
1 change: 0 additions & 1 deletion processor/groupbyattrsprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -845,7 +845,6 @@ func someExponentialHistogramMetrics(attrs pcommon.Map, instrumentationLibraryCo
}

func TestMetricAdvancedGrouping(t *testing.T) {

// Input:
//
// Resource {host.name="localhost"}
Expand Down
2 changes: 0 additions & 2 deletions processor/groupbytraceprocessor/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ var (

// NewFactory returns a new factory for the Filter processor.
func NewFactory() processor.Factory {

return processor.NewFactory(
metadata.Type,
createDefaultConfig,
Expand All @@ -56,7 +55,6 @@ func createTracesProcessor(
params processor.Settings,
cfg component.Config,
nextConsumer consumer.Traces) (processor.Traces, error) {

oCfg := cfg.(*Config)

var st storage
Expand Down
1 change: 0 additions & 1 deletion processor/k8sattributesprocessor/internal/kube/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -538,7 +538,6 @@ func (c *WatchClient) extractPodAttributes(pod *api_v1.Pod) map[string]string {

// This function removes all data from the Pod except what is required by extraction rules and pod association
func removeUnnecessaryPodData(pod *api_v1.Pod, rules ExtractionRules) *api_v1.Pod {

// name, namespace, uid, start time and ip are needed for identifying Pods
// there's room to optimize this further, it's kept this way for simplicity
transformedPod := api_v1.Pod{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,6 @@ func podAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) {
assert.Equal(t, "2.2.2.2", got.Address)
assert.Equal(t, "podC", got.Name)
assert.Equal(t, "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee", got.PodUID)

}

func namespaceAddAndUpdateTest(t *testing.T, c *WatchClient, handler func(obj any)) {
Expand Down Expand Up @@ -290,7 +289,6 @@ func TestReplicaSetHandler(t *testing.T) {
Obj: replicaset,
})
assert.Empty(t, c.ReplicaSets)

}

func TestPodHostNetwork(t *testing.T) {
Expand Down Expand Up @@ -1373,7 +1371,6 @@ func TestFilters(t *testing.T) {
assert.Equal(t, tc.fields, inf.fieldSelector.String())
})
}

}

func TestPodIgnorePatterns(t *testing.T) {
Expand Down
2 changes: 0 additions & 2 deletions processor/k8sattributesprocessor/internal/kube/informer.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@ func informerListFuncWithSelectors(client kubernetes.Interface, namespace string
opts.FieldSelector = fs.String()
return client.CoreV1().Pods(namespace).List(context.Background(), opts)
}

}

func informerWatchFuncWithSelectors(client kubernetes.Interface, namespace string, ls labels.Selector, fs fields.Selector) cache.WatchFunc {
Expand Down Expand Up @@ -120,7 +119,6 @@ func namespaceInformerListFunc(client kubernetes.Interface) cache.ListFunc {
return func(opts metav1.ListOptions) (runtime.Object, error) {
return client.CoreV1().Namespaces().List(context.Background(), opts)
}

}

func namespaceInformerWatchFunc(client kubernetes.Interface) cache.WatchFunc {
Expand Down
1 change: 0 additions & 1 deletion processor/k8sattributesprocessor/options_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,6 @@ func TestWithFilterLabels(t *testing.T) {
}

func TestWithFilterFields(t *testing.T) {

tests := []struct {
name string
args []FieldFilterConfig
Expand Down
4 changes: 0 additions & 4 deletions processor/k8sattributesprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,6 @@ func (strAddr) Network() string {
}

func TestIPDetectionFromContext(t *testing.T) {

addresses := []net.Addr{
&net.IPAddr{
IP: net.IPv4(1, 1, 1, 1),
Expand Down Expand Up @@ -357,7 +356,6 @@ func TestIPDetectionFromContext(t *testing.T) {
assertResourceHasStringAttribute(t, r, "k8s.pod.ip", "1.1.1.1")
})
}

}

func TestNilBatch(t *testing.T) {
Expand Down Expand Up @@ -1352,7 +1350,6 @@ func TestMetricsProcessorHostname(t *testing.T) {
}
})
}

}

func TestMetricsProcessorHostnameWithPodAssociation(t *testing.T) {
Expand Down Expand Up @@ -1435,7 +1432,6 @@ func TestMetricsProcessorHostnameWithPodAssociation(t *testing.T) {
}
})
}

}

func TestPassthroughStart(t *testing.T) {
Expand Down
1 change: 0 additions & 1 deletion processor/logstransformprocessor/processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ func (ltp *logsTransformProcessor) Shutdown(ctx context.Context) error {
}

func (ltp *logsTransformProcessor) Start(ctx context.Context, _ component.Host) error {

wkrCount := int(math.Max(1, float64(runtime.NumCPU())))
ltp.fromConverter = adapter.NewFromPdataConverter(ltp.set, wkrCount)

Expand Down
1 change: 0 additions & 1 deletion processor/logstransformprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,6 @@ type laggyOperator struct {
}

func (t *laggyOperator) Process(ctx context.Context, e *entry.Entry) error {

// Wait for a large amount of time every 100 logs
if t.logsCount%100 == 0 {
time.Sleep(100 * time.Millisecond)
Expand Down
1 change: 0 additions & 1 deletion processor/metricsgenerationprocessor/factory_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ func TestCreateProcessors(t *testing.T) {
for k := range cm.ToStringMap() {
// Check if all processor variations that are defined in test config can be actually created
t.Run(k, func(t *testing.T) {

factory := NewFactory()
cfg := factory.CreateDefaultConfig()

Expand Down
2 changes: 0 additions & 2 deletions processor/metricsgenerationprocessor/processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -325,10 +325,8 @@ func TestMetricsGenerationProcessor(t *testing.T) {
case pmetric.NumberDataPointValueTypeInt:
require.Equal(t, eDataPoints.At(j).IntValue(), aDataPoints.At(j).IntValue())
}

}
}

}

require.NoError(t, mgp.Shutdown(ctx))
Expand Down
1 change: 0 additions & 1 deletion processor/metricstransformprocessor/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,6 @@ func validateConfiguration(config *Config) error {
func buildHelperConfig(config *Config, version string) ([]internalTransform, error) {
helperDataTransforms := make([]internalTransform, len(config.Transforms))
for i, t := range config.Transforms {

if t.MetricIncludeFilter.MatchType == "" {
t.MetricIncludeFilter.MatchType = strictMatchType
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -368,15 +368,13 @@ func canBeCombined(metrics []pmetric.Metric) error {
"metrics cannot be combined as they have different aggregation temporalities: %v (%v) and %v (%v)",
firstMetric.Name(), firstMetric.Histogram().AggregationTemporality(), metric.Name(),
metric.Histogram().AggregationTemporality())

}
case pmetric.MetricTypeExponentialHistogram:
if firstMetric.ExponentialHistogram().AggregationTemporality() != metric.ExponentialHistogram().AggregationTemporality() {
return fmt.Errorf(
"metrics cannot be combined as they have different aggregation temporalities: %v (%v) and %v (%v)",
firstMetric.Name(), firstMetric.ExponentialHistogram().AggregationTemporality(), metric.Name(),
metric.ExponentialHistogram().AggregationTemporality())

}
}
}
Expand Down
1 change: 0 additions & 1 deletion processor/probabilisticsamplerprocessor/logsprocessor.go
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,6 @@ func (lsp *logsProcessor) logRecordToPriorityThreshold(logRec plog.LogRecord) sa
// The record has supplied a valid alternative sampling probability
return th
}

}
}
return sampling.NeverSampleThreshold
Expand Down
Loading
Loading