From 36350ff68ae34c3a760693f6f280ea8c2d2e7f2a Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Thu, 9 Jan 2025 20:16:37 +0100 Subject: [PATCH] chore: enable early-return from revive Signed-off-by: Matthieu MOREL --- .golangci.yml | 5 ++ confmap/provider/aesprovider/provider_test.go | 5 +- .../datadogexporter/metrics_exporter_test.go | 20 ++++---- exporter/datasetexporter/datasetexporter.go | 5 +- exporter/honeycombmarkerexporter/config.go | 31 ++++++------ exporter/prometheusexporter/accumulator.go | 7 ++- .../internal/correlation/correlation.go | 5 +- exporter/sumologicexporter/exporter.go | 6 +-- internal/aws/metrics/metric_calculator.go | 5 +- internal/sqlquery/scraper.go | 5 +- pkg/stanza/entry/entry.go | 6 +-- .../internal/stores/podstore.go | 17 ++++--- receiver/awss3receiver/receiver.go | 6 +-- .../azureresourcemetrics_unmarshaler.go | 5 +- receiver/cloudfoundryreceiver/stream.go | 5 +- .../internal/kubelet/metadata.go | 12 ++--- .../internal/transaction.go | 6 +-- receiver/saphanareceiver/queries.go | 9 ++-- receiver/solacereceiver/receiver.go | 48 +++++++++---------- receiver/splunkhecreceiver/receiver.go | 20 ++++---- receiver/sqlserverreceiver/scraper.go | 15 +++--- .../windowsperfcounters_scraper_test.go | 5 +- receiver/zookeeperreceiver/scraper.go | 5 +- testbed/testbed/child_process_collector.go | 8 ++-- 24 files changed, 122 insertions(+), 139 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 27206273d148..53d3ba663ade 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -89,6 +89,9 @@ linters-settings: - name: context-keys-type # Importing with `.` makes the programs much harder to understand - name: dot-imports + - name: early-return + arguments: + - "preserveScope" # Empty blocks make code less readable and could be a symptom of a bug or unfinished refactoring. - name: empty-block # for better readability, variables of type `error` must be named with the prefix `err`. @@ -111,6 +114,8 @@ linters-settings: - name: redefines-builtin-id # redundant else-blocks that can be eliminated from the code. - name: superfluous-else + arguments: + - "preserveScope" # prevent confusing name for variables when using `time` package - name: time-naming # warns when an exported function or method returns a value of an un-exported type. diff --git a/confmap/provider/aesprovider/provider_test.go b/confmap/provider/aesprovider/provider_test.go index cc8e62489246..d81aee471564 100644 --- a/confmap/provider/aesprovider/provider_test.go +++ b/confmap/provider/aesprovider/provider_test.go @@ -104,13 +104,12 @@ func TestAESCredentialProvider(t *testing.T) { p := NewFactory().Create(confmap.ProviderSettings{}) retrieved, err := p.Retrieve(context.Background(), tt.configValue, nil) - if tt.expectedError == "" { - require.NoError(t, err) - } else { + if tt.expectedError != "" { require.Error(t, err) require.Equal(t, tt.expectedError, err.Error()) return } + require.NoError(t, err) require.NotNil(t, retrieved) stringValue, err := retrieved.AsString() require.NoError(t, err) diff --git a/exporter/datadogexporter/metrics_exporter_test.go b/exporter/datadogexporter/metrics_exporter_test.go index 95935fd053a4..fbd4c1f0fbd3 100644 --- a/exporter/datadogexporter/metrics_exporter_test.go +++ b/exporter/datadogexporter/metrics_exporter_test.go @@ -371,20 +371,18 @@ func Test_metricsExporter_PushMetricsData(t *testing.T) { reporter, nil, ) - if tt.expectedErr == nil { - assert.NoError(t, err, "unexpected error") - } else { + if tt.expectedErr != nil { assert.Equal(t, tt.expectedErr, err, "expected error doesn't match") return } + assert.NoError(t, err, "unexpected error") exp.getPushTime = func() uint64 { return 0 } err = exp.PushMetricsData(context.Background(), tt.metrics) - if tt.expectedErr == nil { - assert.NoError(t, err, "unexpected error") - } else { + if tt.expectedErr != nil { assert.Equal(t, tt.expectedErr, err, "expected error doesn't match") return } + assert.NoError(t, err, "unexpected error") if len(tt.expectedSeries) == 0 { assert.Nil(t, seriesRecorder.ByteBody) } else { @@ -815,20 +813,18 @@ func Test_metricsExporter_PushMetricsData_Zorkian(t *testing.T) { reporter, nil, ) - if tt.expectedErr == nil { - assert.NoError(t, err, "unexpected error") - } else { + if tt.expectedErr != nil { assert.Equal(t, tt.expectedErr, err, "expected error doesn't match") return } + assert.NoError(t, err, "unexpected error") exp.getPushTime = func() uint64 { return 0 } err = exp.PushMetricsData(context.Background(), tt.metrics) - if tt.expectedErr == nil { - assert.NoError(t, err, "unexpected error") - } else { + if tt.expectedErr != nil { assert.Equal(t, tt.expectedErr, err, "expected error doesn't match") return } + assert.NoError(t, err, "unexpected error") if len(tt.expectedSeries) == 0 { assert.Nil(t, seriesRecorder.ByteBody) } else { diff --git a/exporter/datasetexporter/datasetexporter.go b/exporter/datasetexporter/datasetexporter.go index 8f50db3a2a20..d4230329d641 100644 --- a/exporter/datasetexporter/datasetexporter.go +++ b/exporter/datasetexporter/datasetexporter.go @@ -109,12 +109,11 @@ func updateWithPrefixedValues(target map[string]any, prefix string, separator st // now the last value wins // Should the first value win? _, found := target[prefix] - if found && len(suffix) > 0 { - prefix += suffix - } else { + if !found || len(suffix) == 0 { target[prefix] = source break } + prefix += suffix } } diff --git a/exporter/honeycombmarkerexporter/config.go b/exporter/honeycombmarkerexporter/config.go index f7e4b9facab0..05f3cafb1b56 100644 --- a/exporter/honeycombmarkerexporter/config.go +++ b/exporter/honeycombmarkerexporter/config.go @@ -64,24 +64,23 @@ func (cfg *Config) Validate() error { return fmt.Errorf("invalid API Key") } - if len(cfg.Markers) != 0 { - for _, m := range cfg.Markers { - if m.Type == "" { - return fmt.Errorf("marker must have a type %v", m) - } - - if len(m.Rules.LogConditions) == 0 { - return fmt.Errorf("marker must have rules %v", m) - } - - _, err := filterottl.NewBoolExprForLog(m.Rules.LogConditions, filterottl.StandardLogFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()}) - if err != nil { - return err - } - } - } else { + if len(cfg.Markers) == 0 { return fmt.Errorf("no markers supplied") } + for _, m := range cfg.Markers { + if m.Type == "" { + return fmt.Errorf("marker must have a type %v", m) + } + + if len(m.Rules.LogConditions) == 0 { + return fmt.Errorf("marker must have rules %v", m) + } + + _, err := filterottl.NewBoolExprForLog(m.Rules.LogConditions, filterottl.StandardLogFuncs(), ottl.PropagateError, component.TelemetrySettings{Logger: zap.NewNop()}) + if err != nil { + return err + } + } return nil } diff --git a/exporter/prometheusexporter/accumulator.go b/exporter/prometheusexporter/accumulator.go index 3a6559118fe5..449d7de4d6cf 100644 --- a/exporter/prometheusexporter/accumulator.go +++ b/exporter/prometheusexporter/accumulator.go @@ -261,15 +261,14 @@ func (a *lastValueAccumulator) accumulateHistogram(metric pmetric.Metric, il pco zap.String("pp_timestamp", pp.Timestamp().String()), zap.String("ip_timestamp", ip.Timestamp().String()), ).Warn("Misaligned starting timestamps") - if ip.StartTimestamp().AsTime().After(pp.Timestamp().AsTime()) { - a.logger.Debug("treating it like reset") - ip.CopyTo(m.Histogram().DataPoints().AppendEmpty()) - } else { + if !ip.StartTimestamp().AsTime().After(pp.Timestamp().AsTime()) { a.logger.With( zap.String("metric_name", metric.Name()), ).Warn("Dropped misaligned histogram datapoint") continue } + a.logger.Debug("treating it like reset") + ip.CopyTo(m.Histogram().DataPoints().AppendEmpty()) } else { a.logger.Debug("Accumulate another histogram datapoint") accumulateHistogramValues(pp, ip, m.Histogram().DataPoints().AppendEmpty()) diff --git a/exporter/signalfxexporter/internal/correlation/correlation.go b/exporter/signalfxexporter/internal/correlation/correlation.go index 5b3ac9c66fde..590d1735b901 100644 --- a/exporter/signalfxexporter/internal/correlation/correlation.go +++ b/exporter/signalfxexporter/internal/correlation/correlation.go @@ -90,12 +90,11 @@ func (cor *Tracker) ProcessTraces(ctx context.Context, traces ptrace.Traces) err res := traces.ResourceSpans().At(0).Resource() hostID, ok := splunk.ResourceToHostID(res) - if ok { - cor.log.Info("Detected host resource ID for correlation", zap.Any("hostID", hostID)) - } else { + if !ok { cor.log.Warn("Unable to determine host resource ID for correlation syncing") return } + cor.log.Info("Detected host resource ID for correlation", zap.Any("hostID", hostID)) hostDimension := string(hostID.Key) diff --git a/exporter/sumologicexporter/exporter.go b/exporter/sumologicexporter/exporter.go index 9659b5f240f1..14060843d923 100644 --- a/exporter/sumologicexporter/exporter.go +++ b/exporter/sumologicexporter/exporter.go @@ -390,12 +390,12 @@ func (se *sumologicexporter) handleUnauthorizedErrors(ctx context.Context, errs for _, err := range errs { if errors.Is(err, errUnauthorized) { se.logger.Warn("Received unauthorized status code, triggering reconfiguration") - if errC := se.configure(ctx); errC != nil { - se.logger.Error("Error configuring the exporter with new credentials", zap.Error(err)) - } else { + errC := se.configure(ctx) + if errC == nil { // It's enough to successfully reconfigure the exporter just once. return } + se.logger.Error("Error configuring the exporter with new credentials", zap.Error(err)) } } } diff --git a/internal/aws/metrics/metric_calculator.go b/internal/aws/metrics/metric_calculator.go index 16435ef7f754..979bb91a6c50 100644 --- a/internal/aws/metrics/metric_calculator.go +++ b/internal/aws/metrics/metric_calculator.go @@ -26,11 +26,10 @@ func NewFloat64DeltaCalculator() MetricCalculator { func calculateDelta(prev *MetricValue, val any, _ time.Time) (any, bool) { var deltaValue float64 - if prev != nil { - deltaValue = val.(float64) - prev.RawValue.(float64) - } else { + if prev == nil { return deltaValue, false } + deltaValue = val.(float64) - prev.RawValue.(float64) return deltaValue, true } diff --git a/internal/sqlquery/scraper.go b/internal/sqlquery/scraper.go index 680a6164fa0a..0de61c0559e9 100644 --- a/internal/sqlquery/scraper.go +++ b/internal/sqlquery/scraper.go @@ -74,11 +74,10 @@ func (s *Scraper) ScrapeMetrics(ctx context.Context) (pmetric.Metrics, error) { out := pmetric.NewMetrics() rows, err := s.Client.QueryRows(ctx) if err != nil { - if errors.Is(err, ErrNullValueWarning) { - s.Logger.Warn("problems encountered getting metric rows", zap.Error(err)) - } else { + if !errors.Is(err, ErrNullValueWarning) { return out, fmt.Errorf("Scraper: %w", err) } + s.Logger.Warn("problems encountered getting metric rows", zap.Error(err)) } ts := pcommon.NewTimestampFromTime(time.Now()) rms := out.ResourceMetrics() diff --git a/pkg/stanza/entry/entry.go b/pkg/stanza/entry/entry.go index 882f22695f41..caf8876644e2 100644 --- a/pkg/stanza/entry/entry.go +++ b/pkg/stanza/entry/entry.go @@ -136,11 +136,11 @@ func (entry *Entry) readToStringMap(field FieldInterface, dest *map[string]strin case map[string]any: newDest := make(map[string]string) for k, v := range m { - if vStr, ok := v.(string); ok { - newDest[k] = vStr - } else { + vStr, ok := v.(string) + if !ok { return fmt.Errorf("can not cast map members '%s' of type '%s' to string", k, v) } + newDest[k] = vStr } *dest = newDest case map[any]any: diff --git a/receiver/awscontainerinsightreceiver/internal/stores/podstore.go b/receiver/awscontainerinsightreceiver/internal/stores/podstore.go index 16350c21fe04..71396090b20e 100644 --- a/receiver/awscontainerinsightreceiver/internal/stores/podstore.go +++ b/receiver/awscontainerinsightreceiver/internal/stores/podstore.go @@ -206,18 +206,17 @@ func (p *PodStore) Decorate(ctx context.Context, metric CIMetric, kubernetesBlob } // If the entry is not a placeholder, decorate the pod - if entry.pod.Name != "" { - p.decorateCPU(metric, &entry.pod) - p.decorateMem(metric, &entry.pod) - p.addStatus(metric, &entry.pod) - addContainerCount(metric, &entry.pod) - addContainerID(&entry.pod, metric, kubernetesBlob, p.logger) - p.addPodOwnersAndPodName(metric, &entry.pod, kubernetesBlob) - addLabels(&entry.pod, kubernetesBlob) - } else { + if entry.pod.Name == "" { p.logger.Warn("no pod information is found in podstore for pod " + podKey) return false } + p.decorateCPU(metric, &entry.pod) + p.decorateMem(metric, &entry.pod) + p.addStatus(metric, &entry.pod) + addContainerCount(metric, &entry.pod) + addContainerID(&entry.pod, metric, kubernetesBlob, p.logger) + p.addPodOwnersAndPodName(metric, &entry.pod, kubernetesBlob) + addLabels(&entry.pod, kubernetesBlob) } return true } diff --git a/receiver/awss3receiver/receiver.go b/receiver/awss3receiver/receiver.go index 89e67c4f525c..b4df749767b7 100644 --- a/receiver/awss3receiver/receiver.go +++ b/receiver/awss3receiver/receiver.go @@ -251,11 +251,11 @@ func newEncodingExtensions(encodingsConfig []Encoding, host component.Host) (enc encodings := make(encodingExtensions, 0) extensions := host.GetExtensions() for _, configItem := range encodingsConfig { - if e, ok := extensions[configItem.Extension]; ok { - encodings = append(encodings, encodingExtension{extension: e, suffix: configItem.Suffix}) - } else { + e, ok := extensions[configItem.Extension] + if !ok { return nil, fmt.Errorf("extension %q not found", configItem.Extension) } + encodings = append(encodings, encodingExtension{extension: e, suffix: configItem.Suffix}) } return encodings, nil } diff --git a/receiver/azureeventhubreceiver/azureresourcemetrics_unmarshaler.go b/receiver/azureeventhubreceiver/azureresourcemetrics_unmarshaler.go index cce62e907fbf..145482a0222c 100644 --- a/receiver/azureeventhubreceiver/azureresourcemetrics_unmarshaler.go +++ b/receiver/azureeventhubreceiver/azureresourcemetrics_unmarshaler.go @@ -97,12 +97,11 @@ func (r azureResourceMetricsUnmarshaler) UnmarshalMetrics(event *eventhub.Event) } var startTimestamp pcommon.Timestamp - if azureMetric.TimeGrain == "PT1M" { - startTimestamp = pcommon.NewTimestampFromTime(nanos.AsTime().Add(-time.Minute)) - } else { + if azureMetric.TimeGrain != "PT1M" { r.logger.Warn("Unhandled Time Grain", zap.String("timegrain", azureMetric.TimeGrain)) continue } + startTimestamp = pcommon.NewTimestampFromTime(nanos.AsTime().Add(-time.Minute)) metricTotal := metrics.AppendEmpty() metricTotal.SetName(strings.ToLower(fmt.Sprintf("%s_%s", strings.ReplaceAll(azureMetric.MetricName, " ", "_"), "Total"))) diff --git a/receiver/cloudfoundryreceiver/stream.go b/receiver/cloudfoundryreceiver/stream.go index c0b2e7b088ff..af007455690b 100644 --- a/receiver/cloudfoundryreceiver/stream.go +++ b/receiver/cloudfoundryreceiver/stream.go @@ -89,12 +89,11 @@ type authorizationProvider struct { func (ap *authorizationProvider) Do(request *http.Request) (*http.Response, error) { token, err := ap.authTokenProvider.ProvideToken() - if err == nil { - request.Header.Set("Authorization", token) - } else { + if err != nil { ap.logger.Error("fetching authentication token", zap.Error(err)) return nil, errors.New("obtaining authentication token for the request") } + request.Header.Set("Authorization", token) return ap.client.Do(request) } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go index a2ffdb452dc4..5824b46a2b48 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go @@ -34,14 +34,14 @@ var supportedLabels = map[MetadataLabel]bool{ func ValidateMetadataLabelsConfig(labels []MetadataLabel) error { labelsFound := map[MetadataLabel]bool{} for _, label := range labels { - if _, supported := supportedLabels[label]; supported { - if _, duplicate := labelsFound[label]; duplicate { - return fmt.Errorf("duplicate metadata label: %q", label) - } - labelsFound[label] = true - } else { + _, supported := supportedLabels[label] + if !supported { return fmt.Errorf("label %q is not supported", label) } + if _, duplicate := labelsFound[label]; duplicate { + return fmt.Errorf("duplicate metadata label: %q", label) + } + labelsFound[label] = true } return nil } diff --git a/receiver/prometheusreceiver/internal/transaction.go b/receiver/prometheusreceiver/internal/transaction.go index 00728449629d..fdcbea49d07f 100644 --- a/receiver/prometheusreceiver/internal/transaction.go +++ b/receiver/prometheusreceiver/internal/transaction.go @@ -198,13 +198,13 @@ func (t *transaction) getOrCreateMetricFamily(key resourceKey, scope scopeID, mn if _, ok := t.mc.GetMetadata(mn); !ok { fn = normalizeMetricName(mn) } - if mf, ok := t.families[key][scope][fn]; ok && mf.includesMetric(mn) { - curMf = mf - } else { + mf, ok := t.families[key][scope][fn] + if !ok || !mf.includesMetric(mn) { curMf = newMetricFamily(mn, t.mc, t.logger) t.families[key][scope][curMf.name] = curMf return curMf, false } + curMf = mf } return curMf, true } diff --git a/receiver/saphanareceiver/queries.go b/receiver/saphanareceiver/queries.go index ec565dcea8f7..c87a718920a6 100644 --- a/receiver/saphanareceiver/queries.go +++ b/receiver/saphanareceiver/queries.go @@ -37,13 +37,12 @@ func (q *queryStat) collectStat(s *sapHanaScraper, m *monitoringQuery, now pcomm return fmt.Errorf("unable to parse metric for key %s: %w", q.key, err) } - if q.addMetricFunction != nil { - if err = q.addMetricFunction(mb, now, val, row); err != nil { - return fmt.Errorf("failed to record metric for key %s: %w", q.key, err) - } - } else { + if q.addMetricFunction == nil { return errors.New("incorrectly configured query, addMetricFunction must be provided") } + if err = q.addMetricFunction(mb, now, val, row); err != nil { + return fmt.Errorf("failed to record metric for key %s: %w", q.key, err) + } } return nil } diff --git a/receiver/solacereceiver/receiver.go b/receiver/solacereceiver/receiver.go index 893c677bd69e..a6a6daf203cc 100644 --- a/receiver/solacereceiver/receiver.go +++ b/receiver/solacereceiver/receiver.go @@ -265,35 +265,33 @@ flowControlLoop: } forwardErr := s.nextConsumer.ConsumeTraces(ctx, traces) - if forwardErr != nil { - if !consumererror.IsPermanent(forwardErr) { - s.settings.Logger.Info("Encountered temporary error while forwarding traces to next receiver, will allow redelivery", zap.Error(forwardErr)) - // handle flow control metrics - if flowControlCount == 0 { - s.telemetryBuilder.SolacereceiverReceiverFlowControlStatus.Record(ctx, int64(flowControlStateControlled), metric.WithAttributeSet(s.metricAttrs)) - } - flowControlCount++ - s.telemetryBuilder.SolacereceiverReceiverFlowControlRecentRetries.Record(ctx, flowControlCount, metric.WithAttributeSet(s.metricAttrs)) - // Backpressure scenario. For now, we are only delayed retry, eventually we may need to handle this - delayTimer := time.NewTimer(s.config.Flow.DelayedRetry.Delay) - select { - case <-delayTimer.C: - continue flowControlLoop - case <-ctx.Done(): - s.settings.Logger.Info("Context was cancelled while attempting redelivery, exiting") - disposition = nil // do not make any network requests, we are shutting down - return errors.New("delayed retry interrupted by shutdown request") - } - } else { // error is permanent, we want to accept the message and increment the number of dropped messages - s.settings.Logger.Warn("Encountered permanent error while forwarding traces to next receiver, will swallow trace", zap.Error(forwardErr)) - s.telemetryBuilder.SolacereceiverDroppedSpanMessages.Add(ctx, 1, metric.WithAttributeSet(s.metricAttrs)) - break flowControlLoop - } - } else { + if forwardErr == nil { // no forward error s.telemetryBuilder.SolacereceiverReportedSpans.Add(ctx, int64(spanCount), metric.WithAttributeSet(s.metricAttrs)) break flowControlLoop } + if consumererror.IsPermanent(forwardErr) { // error is permanent, we want to accept the message and increment the number of dropped messages + s.settings.Logger.Warn("Encountered permanent error while forwarding traces to next receiver, will swallow trace", zap.Error(forwardErr)) + s.telemetryBuilder.SolacereceiverDroppedSpanMessages.Add(ctx, 1, metric.WithAttributeSet(s.metricAttrs)) + break flowControlLoop + } + s.settings.Logger.Info("Encountered temporary error while forwarding traces to next receiver, will allow redelivery", zap.Error(forwardErr)) + // handle flow control metrics + if flowControlCount == 0 { + s.telemetryBuilder.SolacereceiverReceiverFlowControlStatus.Record(ctx, int64(flowControlStateControlled), metric.WithAttributeSet(s.metricAttrs)) + } + flowControlCount++ + s.telemetryBuilder.SolacereceiverReceiverFlowControlRecentRetries.Record(ctx, flowControlCount, metric.WithAttributeSet(s.metricAttrs)) + // Backpressure scenario. For now, we are only delayed retry, eventually we may need to handle this + delayTimer := time.NewTimer(s.config.Flow.DelayedRetry.Delay) + select { + case <-delayTimer.C: + continue flowControlLoop + case <-ctx.Done(): + s.settings.Logger.Info("Context was cancelled while attempting redelivery, exiting") + disposition = nil // do not make any network requests, we are shutting down + return errors.New("delayed retry interrupted by shutdown request") + } } // Make sure to clear the stats no matter what, unless we were interrupted in which case we should preserve the last state if flowControlCount != 0 { diff --git a/receiver/splunkhecreceiver/receiver.go b/receiver/splunkhecreceiver/receiver.go index 2bf5458cc7f7..4fa9552466d5 100644 --- a/receiver/splunkhecreceiver/receiver.go +++ b/receiver/splunkhecreceiver/receiver.go @@ -147,12 +147,12 @@ func (r *splunkReceiver) Start(ctx context.Context, host component.Host) error { mx := mux.NewRouter() // set up the ack API handler if the ack extension is present if r.config.Ack.Extension != nil { - if ext, found := host.GetExtensions()[*r.config.Ack.Extension]; found { - r.ackExt = ext.(ackextension.AckExtension) - mx.NewRoute().Path(r.config.Ack.Path).HandlerFunc(r.handleAck) - } else { + ext, found := host.GetExtensions()[*r.config.Ack.Extension] + if !found { return fmt.Errorf("specified ack extension with id %q could not be found", *r.config.Ack.Extension) } + r.ackExt = ext.(ackextension.AckExtension) + mx.NewRoute().Path(r.config.Ack.Path).HandlerFunc(r.handleAck) } mx.NewRoute().Path(r.config.HealthPath).HandlerFunc(r.handleHealthReq) @@ -227,15 +227,15 @@ func (r *splunkReceiver) handleAck(resp http.ResponseWriter, req *http.Request) var channelID string var extracted bool - if channelID, extracted = r.extractChannel(req); extracted { - if channelErr := r.validateChannelHeader(channelID); channelErr != nil { - r.failRequest(resp, http.StatusBadRequest, []byte(channelErr.Error()), channelErr) - return - } - } else { + channelID, extracted = r.extractChannel(req) + if !extracted { r.failRequest(resp, http.StatusBadRequest, requiredDataChannelHeader, nil) return } + if channelErr := r.validateChannelHeader(channelID); channelErr != nil { + r.failRequest(resp, http.StatusBadRequest, []byte(channelErr.Error()), channelErr) + return + } dec := json.NewDecoder(req.Body) var ackRequest splunk.AckRequest diff --git a/receiver/sqlserverreceiver/scraper.go b/receiver/sqlserverreceiver/scraper.go index 9b43f4b5d37e..bd3e50fa4db3 100644 --- a/receiver/sqlserverreceiver/scraper.go +++ b/receiver/sqlserverreceiver/scraper.go @@ -123,11 +123,10 @@ func (s *sqlServerScraperHelper) recordDatabaseIOMetrics(ctx context.Context) er rows, err := s.client.QueryRows(ctx) if err != nil { - if errors.Is(err, sqlquery.ErrNullValueWarning) { - s.logger.Warn("problems encountered getting metric rows", zap.Error(err)) - } else { + if !errors.Is(err, sqlquery.ErrNullValueWarning) { return fmt.Errorf("sqlServerScraperHelper: %w", err) } + s.logger.Warn("problems encountered getting metric rows", zap.Error(err)) } var errs []error @@ -186,11 +185,10 @@ func (s *sqlServerScraperHelper) recordDatabasePerfCounterMetrics(ctx context.Co rows, err := s.client.QueryRows(ctx) if err != nil { - if errors.Is(err, sqlquery.ErrNullValueWarning) { - s.logger.Warn("problems encountered getting metric rows", zap.Error(err)) - } else { + if !errors.Is(err, sqlquery.ErrNullValueWarning) { return fmt.Errorf("sqlServerScraperHelper: %w", err) } + s.logger.Warn("problems encountered getting metric rows", zap.Error(err)) } var errs []error @@ -274,11 +272,10 @@ func (s *sqlServerScraperHelper) recordDatabaseStatusMetrics(ctx context.Context rows, err := s.client.QueryRows(ctx) if err != nil { - if errors.Is(err, sqlquery.ErrNullValueWarning) { - s.logger.Warn("problems encountered getting metric rows", zap.Error(err)) - } else { + if !errors.Is(err, sqlquery.ErrNullValueWarning) { return fmt.Errorf("sqlServerScraperHelper failed getting metric rows: %w", err) } + s.logger.Warn("problems encountered getting metric rows", zap.Error(err)) } var errs []error diff --git a/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper_test.go b/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper_test.go index 9d7239d269d1..d137ff891e68 100644 --- a/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper_test.go +++ b/receiver/windowsperfcountersreceiver/windowsperfcounters_scraper_test.go @@ -214,9 +214,7 @@ func Test_WindowsPerfCounterScraper(t *testing.T) { scraper := newScraper(cfg, settings) err := scraper.start(context.Background(), componenttest.NewNopHost()) - if test.startErr == "" { - require.Equal(t, 0, obs.Len()) - } else { + if test.startErr != "" { require.Equal(t, 1, obs.Len()) log := obs.All()[0] assert.Equal(t, zapcore.WarnLevel, log.Level) @@ -225,6 +223,7 @@ func Test_WindowsPerfCounterScraper(t *testing.T) { assert.EqualError(t, log.Context[0].Interface.(error), test.startErr) return } + require.Equal(t, 0, obs.Len()) require.NoError(t, err) actualMetrics, err := scraper.scrape(context.Background()) diff --git a/receiver/zookeeperreceiver/scraper.go b/receiver/zookeeperreceiver/scraper.go index 17062a3784a8..d26c639cbab7 100644 --- a/receiver/zookeeperreceiver/scraper.go +++ b/receiver/zookeeperreceiver/scraper.go @@ -184,14 +184,13 @@ func (z *zookeeperMetricsScraper) processRuok(response []string) { metricValue := int64(0) if len(response) > 0 { - if response[0] == "imok" { - metricValue = int64(1) - } else { + if response[0] != "imok" { z.logger.Error("invalid response from ruok", zap.String("command", ruokCommand), ) return } + metricValue = int64(1) } recordDataPoints := creator.recordDataPointsFunc(metricKey) diff --git a/testbed/testbed/child_process_collector.go b/testbed/testbed/child_process_collector.go index 6661f35595dd..59246f04cc53 100644 --- a/testbed/testbed/child_process_collector.go +++ b/testbed/testbed/child_process_collector.go @@ -334,12 +334,12 @@ func (cp *childProcessCollector) WatchResourceConsumption() error { for start := time.Now(); time.Since(start) < time.Minute; { cp.fetchRAMUsage() cp.fetchCPUUsage() - if err := cp.checkAllowedResourceUsage(); err != nil { - log.Printf("Allowed usage of resources is too high before test starts wait for one second : %v", err) - time.Sleep(time.Second) - } else { + err := cp.checkAllowedResourceUsage() + if err == nil { break } + log.Printf("Allowed usage of resources is too high before test starts wait for one second : %v", err) + time.Sleep(time.Second) } remainingFailures := cp.resourceSpec.MaxConsecutiveFailures