diff --git a/processor/attributesprocessor/factory_test.go b/processor/attributesprocessor/factory_test.go index c210ffd08a1d..72a26027ba52 100644 --- a/processor/attributesprocessor/factory_test.go +++ b/processor/attributesprocessor/factory_test.go @@ -91,7 +91,7 @@ func TestFactory_CreateMetrics(t *testing.T) { {Key: "fake_key", Action: attraction.UPSERT}, } - // Upsert should fail on non-existent key + // Upsert should fail on nonexistent key mp, err = factory.CreateMetrics(context.Background(), processortest.NewNopSettings(), cfg, consumertest.NewNop()) require.Nil(t, mp) require.Error(t, err) diff --git a/processor/attributesprocessor/testdata/config.yaml b/processor/attributesprocessor/testdata/config.yaml index ec78f3b09c96..8f0f7dbb09a8 100644 --- a/processor/attributesprocessor/testdata/config.yaml +++ b/processor/attributesprocessor/testdata/config.yaml @@ -33,7 +33,7 @@ attributes/regex_insert: # http_path: path # http_query_params=queryParam1=value1,queryParam2=value2 # http.url value does NOT change. - # Note: Similar to the Span Procesor, if a target key already exists, + # Note: Similar to the Span Processor, if a target key already exists, # it will be updated. - key: "http.url" pattern: ^(?P.*):\/\/(?P.*)\/(?P.*)(\?|\&)(?P.*) diff --git a/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go b/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go index 8ebcdb188f23..3412efc5849b 100644 --- a/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go +++ b/processor/cumulativetodeltaprocessor/internal/tracking/tracker_test.go @@ -40,7 +40,7 @@ func TestMetricTracker_Convert(t *testing.T) { future := time.Now().Add(1 * time.Hour) keepSubsequentTest := subTest{ - name: "keep subsequet value", + name: "keep subsequent value", value: ValuePoint{ ObservedTimestamp: pcommon.NewTimestampFromTime(future.Add(time.Minute)), FloatValue: 225, diff --git a/processor/deltatocumulativeprocessor/internal/data/expo/scale.go b/processor/deltatocumulativeprocessor/internal/data/expo/scale.go index c19830071eaf..9c3ee88652c8 100644 --- a/processor/deltatocumulativeprocessor/internal/data/expo/scale.go +++ b/processor/deltatocumulativeprocessor/internal/data/expo/scale.go @@ -47,7 +47,7 @@ func Downscale(bs Buckets, from, to Scale) { case from < to: // because even distribution within the buckets cannot be assumed, it is // not possible to correctly upscale (split) buckets. - // any attempt to do so would yield erronous data. + // any attempt to do so would yield erroneous data. panic(fmt.Sprintf("cannot upscale without introducing error (%d -> %d)", from, to)) } @@ -107,7 +107,7 @@ func Collapse(bs Buckets) { // zero the excess area. its not needed to represent the observation // anymore, but kept for two reasons: // 1. future observations may need it, no need to re-alloc then if kept - // 2. [pcommon.Uint64Slice] can not, in fact, be sliced, so getting rid + // 2. [pcommon.Uint64Slice] cannot, in fact, be sliced, so getting rid // of it would alloc ¯\_(ツ)_/¯ for i := size; i < counts.Len(); i++ { counts.SetAt(i, 0) diff --git a/processor/filterprocessor/metrics.go b/processor/filterprocessor/metrics.go index 63beb811e2d6..655f4ba0b3f7 100644 --- a/processor/filterprocessor/metrics.go +++ b/processor/filterprocessor/metrics.go @@ -157,7 +157,7 @@ func (fmp *filterMetricProcessor) processMetrics(ctx context.Context, md pmetric errors = multierr.Append(errors, fmp.handleHistogramDataPoints(ctx, metric.Histogram().DataPoints(), metric, smetrics.Metrics(), scope, resource)) return metric.Histogram().DataPoints().Len() == 0 case pmetric.MetricTypeExponentialHistogram: - errors = multierr.Append(errors, fmp.handleExponetialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metric, smetrics.Metrics(), scope, resource)) + errors = multierr.Append(errors, fmp.handleExponentialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metric, smetrics.Metrics(), scope, resource)) return metric.ExponentialHistogram().DataPoints().Len() == 0 case pmetric.MetricTypeSummary: errors = multierr.Append(errors, fmp.handleSummaryDataPoints(ctx, metric.Summary().DataPoints(), metric, smetrics.Metrics(), scope, resource)) @@ -283,7 +283,7 @@ func (fmp *filterMetricProcessor) handleHistogramDataPoints(ctx context.Context, return errors } -func (fmp *filterMetricProcessor) handleExponetialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { +func (fmp *filterMetricProcessor) handleExponentialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource) error { var errors error dps.RemoveIf(func(datapoint pmetric.ExponentialHistogramDataPoint) bool { skip, err := fmp.skipDataPointExpr.Eval(ctx, ottldatapoint.NewTransformContext(datapoint, metric, metrics, is, resource, pmetric.NewScopeMetrics(), pmetric.NewResourceMetrics())) diff --git a/processor/geoipprocessor/internal/provider/maxmindprovider/provider.go b/processor/geoipprocessor/internal/provider/maxmindprovider/provider.go index f56b02fe4de0..9fb128b69bcc 100644 --- a/processor/geoipprocessor/internal/provider/maxmindprovider/provider.go +++ b/processor/geoipprocessor/internal/provider/maxmindprovider/provider.go @@ -58,7 +58,7 @@ func (g *maxMindProvider) Location(_ context.Context, ipAddress net.IP) (attribu } } -// cityAttributes returns a list of key-values containing geographical metadata associated to the provided IP. The key names are populated using the internal geo IP conventions package. If the an invalid or nil IP is provided, an error is returned. +// cityAttributes returns a list of key-values containing geographical metadata associated to the provided IP. The key names are populated using the internal geo IP conventions package. If an invalid or nil IP is provided, an error is returned. func (g *maxMindProvider) cityAttributes(ipAddress net.IP) (*[]attribute.KeyValue, error) { attributes := make([]attribute.KeyValue, 0, 11) diff --git a/processor/geoipprocessor/internal/provider/maxmindprovider/testdata/generate_db.go b/processor/geoipprocessor/internal/provider/maxmindprovider/testdata/generate_db.go index 335713f2deec..10fc2f5f3779 100644 --- a/processor/geoipprocessor/internal/provider/maxmindprovider/testdata/generate_db.go +++ b/processor/geoipprocessor/internal/provider/maxmindprovider/testdata/generate_db.go @@ -10,7 +10,7 @@ import ( "github.com/maxmind/MaxMind-DB/pkg/writer" ) -// GenerateLocalDB generates *.mmdb databases files given a source directory data. It uses a the writer functionality provided by MaxMind-Db/pkg/writer +// GenerateLocalDB generates *.mmdb databases files given a source directory data. It uses the writer functionality provided by MaxMind-Db/pkg/writer func GenerateLocalDB(t *testing.T, sourceData string) string { tmpDir, err := os.MkdirTemp("", "") if err != nil { diff --git a/processor/groupbytraceprocessor/README.md b/processor/groupbytraceprocessor/README.md index c4fc5e14ba3a..9aa43e96df53 100644 --- a/processor/groupbytraceprocessor/README.md +++ b/processor/groupbytraceprocessor/README.md @@ -68,7 +68,7 @@ The following metrics are recorded by this processor: A healthy system would have the same value for the metric `otelcol_processor_groupbytrace_spans_released` and for three events under `otelcol_processor_groupbytrace_event_latency_bucket`: `onTraceExpired`, `onTraceRemoved` and `onTraceReleased`. -The metric `otelcol_processor_groupbytrace_event_latency_bucket` is a bucket and shows how long each event took to be processed in miliseconds. In most cases, it should take less than 5ms for an event to be processed, but it might be the case where an event could take 10ms. Higher latencies are possible, but it should never really reach the last item, representing 1s. Events taking more than 1s are killed automatically, and if you have multiple items in this bucket, it might indicate a bug in the software. +The metric `otelcol_processor_groupbytrace_event_latency_bucket` is a bucket and shows how long each event took to be processed in milliseconds. In most cases, it should take less than 5ms for an event to be processed, but it might be the case where an event could take 10ms. Higher latencies are possible, but it should never really reach the last item, representing 1s. Events taking more than 1s are killed automatically, and if you have multiple items in this bucket, it might indicate a bug in the software. Most metrics are updated when the events occur, except for the following ones, which are updated periodically: * `otelcol_processor_groupbytrace_num_events_in_queue` diff --git a/processor/intervalprocessor/README.md b/processor/intervalprocessor/README.md index 10debe739740..8659cd76629b 100644 --- a/processor/intervalprocessor/README.md +++ b/processor/intervalprocessor/README.md @@ -51,29 +51,29 @@ intervalprocessor: The following sum metrics come into the processor to be handled -| Timestamp | Metric Name | Aggregation Temporarility | Attributes | Value | -| --------- | ------------ | ------------------------- | ----------------- | ----: | -| 0 | test_metric | Cumulative | labelA: foo | 4.0 | -| 2 | test_metric | Cumulative | labelA: bar | 3.1 | -| 4 | other_metric | Delta | fruitType: orange | 77.4 | -| 6 | test_metric | Cumulative | labelA: foo | 8.2 | -| 8 | test_metric | Cumulative | labelA: foo | 12.8 | -| 10 | test_metric | Cumulative | labelA: bar | 6.4 | +| Timestamp | Metric Name | Aggregation Temporality | Attributes | Value | +| --------- | ------------ | ----------------------- | ----------------- | ----: | +| 0 | test_metric | Cumulative | labelA: foo | 4.0 | +| 2 | test_metric | Cumulative | labelA: bar | 3.1 | +| 4 | other_metric | Delta | fruitType: orange | 77.4 | +| 6 | test_metric | Cumulative | labelA: foo | 8.2 | +| 8 | test_metric | Cumulative | labelA: foo | 12.8 | +| 10 | test_metric | Cumulative | labelA: bar | 6.4 | The processor would immediately pass the following metrics to the next processor in the chain -| Timestamp | Metric Name | Aggregation Temporarility | Attributes | Value | -| --------- | ------------ | ------------------------- | ----------------- | ----: | -| 4 | other_metric | Delta | fruitType: orange | 77.4 | +| Timestamp | Metric Name | Aggregation Temporality | Attributes | Value | +| --------- | ------------ | ----------------------- | ----------------- | ----: | +| 4 | other_metric | Delta | fruitType: orange | 77.4 | Because it's a Delta metric. At the next `interval` (15s by default), the processor would pass the following metrics to the next processor in the chain -| Timestamp | Metric Name | Aggregation Temporarility | Attributes | Value | -| --------- | ----------- | ------------------------- | ----------- | ----: | -| 8 | test_metric | Cumulative | labelA: foo | 12.8 | -| 10 | test_metric | Cumulative | labelA: bar | 6.4 | +| Timestamp | Metric Name | Aggregation Temporality | Attributes | Value | +| --------- | ----------- | ----------------------- | ----------- | ----: | +| 8 | test_metric | Cumulative | labelA: foo | 12.8 | +| 10 | test_metric | Cumulative | labelA: bar | 6.4 | > [!IMPORTANT] > After exporting, any internal state is cleared. So if no new metrics come in, the next interval will export nothing. diff --git a/processor/k8sattributesprocessor/README.md b/processor/k8sattributesprocessor/README.md index ca38d8599ff9..425bef183f4a 100644 --- a/processor/k8sattributesprocessor/README.md +++ b/processor/k8sattributesprocessor/README.md @@ -341,7 +341,7 @@ k8sattributes: filter: namespace: ``` -With the namespace filter set, the processor will only look up pods and replicasets in the selected namespace. Note that with just a role binding, the processor can not query metadata such as labels and annotations from k8s `nodes` and `namespaces` which are cluster-scoped objects. This also means that the processor can not set the value for `k8s.cluster.uid` attribute if enabled, since the `k8s.cluster.uid` attribute is set to the uid of the namespace `kube-system` which is not queryable with namespaced rbac. +With the namespace filter set, the processor will only look up pods and replicasets in the selected namespace. Note that with just a role binding, the processor cannot query metadata such as labels and annotations from k8s `nodes` and `namespaces` which are cluster-scoped objects. This also means that the processor cannot set the value for `k8s.cluster.uid` attribute if enabled, since the `k8s.cluster.uid` attribute is set to the uid of the namespace `kube-system` which is not queryable with namespaced rbac. Example `Role` and `RoleBinding` to create in the namespace being watched. ```yaml @@ -389,7 +389,7 @@ When running as an agent, the processor detects IP addresses of pods sending spa and uses this information to extract metadata from pods. When running as an agent, it is important to apply a discovery filter so that the processor only discovers pods from the same host that it is running on. Not using such a filter can result in unnecessary resource usage especially on very large clusters. Once the filter is applied, -each processor will only query the k8s API for pods running on it's own node. +each processor will only query the k8s API for pods running on its own node. Node filter can be applied by setting the `filter.node` config option to the name of a k8s node. While this works as expected, it cannot be used to automatically filter pods by the same node that the processor is running on in @@ -498,7 +498,7 @@ The following config with the feature gate set will lead to validation error: #### Migration -Deprecation of the `extract.annotations.regex` and `extract.labels.regex` fields means that it is recommended to use the `ExtractPatterns` function from the transform processor instead. To convert your current configuration please check the `ExtractPatterns` function [documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#extractpatterns). You should use the `pattern` parameter of `ExtractPatterns` instead of using the the `extract.annotations.regex` and `extract.labels.regex` fields. +Deprecation of the `extract.annotations.regex` and `extract.labels.regex` fields means that it is recommended to use the `ExtractPatterns` function from the transform processor instead. To convert your current configuration please check the `ExtractPatterns` function [documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/pkg/ottl/ottlfuncs#extractpatterns). You should use the `pattern` parameter of `ExtractPatterns` instead of using the `extract.annotations.regex` and `extract.labels.regex` fields. ##### Example diff --git a/processor/k8sattributesprocessor/internal/kube/client.go b/processor/k8sattributesprocessor/internal/kube/client.go index a589d5a1c1f5..3e38cafeaaef 100644 --- a/processor/k8sattributesprocessor/internal/kube/client.go +++ b/processor/k8sattributesprocessor/internal/kube/client.go @@ -273,7 +273,7 @@ func (c *WatchClient) Start() error { return nil } -// Stop signals the the k8s watcher/informer to stop watching for new events. +// Stop signals the k8s watcher/informer to stop watching for new events. func (c *WatchClient) Stop() { close(c.stopCh) } diff --git a/processor/k8sattributesprocessor/internal/kube/client_test.go b/processor/k8sattributesprocessor/internal/kube/client_test.go index de701f6fd673..5d28ac5b9508 100644 --- a/processor/k8sattributesprocessor/internal/kube/client_test.go +++ b/processor/k8sattributesprocessor/internal/kube/client_test.go @@ -428,7 +428,7 @@ func TestPodDelete(t *testing.T) { // delete empty IP pod c.handlePodDelete(&api_v1.Pod{}) - // delete non-existent IP + // delete nonexistent IP c.deleteQueue = c.deleteQueue[:0] pod := &api_v1.Pod{} pod.Status.PodIP = "9.9.9.9" @@ -494,14 +494,14 @@ func TestNamespaceDelete(t *testing.T) { // delete empty namespace c.handleNamespaceDelete(&api_v1.Namespace{}) - // delete non-existent namespace + // delete nonexistent namespace namespace := &api_v1.Namespace{} namespace.Name = "namespaceC" c.handleNamespaceDelete(namespace) assert.Len(t, c.Namespaces, 2) got := c.Namespaces["namespaceA"] assert.Equal(t, "namespaceA", got.Name) - // delete non-existent namespace when DeletedFinalStateUnknown + // delete nonexistent namespace when DeletedFinalStateUnknown c.handleNamespaceDelete(cache.DeletedFinalStateUnknown{Obj: namespace}) assert.Len(t, c.Namespaces, 2) got = c.Namespaces["namespaceA"] @@ -529,14 +529,14 @@ func TestNodeDelete(t *testing.T) { // delete empty node c.handleNodeDelete(&api_v1.Node{}) - // delete non-existent node + // delete nonexistent node node := &api_v1.Node{} node.Name = "nodeC" c.handleNodeDelete(node) assert.Len(t, c.Nodes, 2) got := c.Nodes["nodeA"] assert.Equal(t, "nodeA", got.Name) - // delete non-existent namespace when DeletedFinalStateUnknown + // delete nonexistent namespace when DeletedFinalStateUnknown c.handleNodeDelete(cache.DeletedFinalStateUnknown{Obj: node}) assert.Len(t, c.Nodes, 2) got = c.Nodes["nodeA"] diff --git a/processor/k8sattributesprocessor/processor_test.go b/processor/k8sattributesprocessor/processor_test.go index ee51cc82d9f0..6e6458ea4c82 100644 --- a/processor/k8sattributesprocessor/processor_test.go +++ b/processor/k8sattributesprocessor/processor_test.go @@ -1296,7 +1296,7 @@ func TestProcessorAddContainerAttributes(t *testing.T) { } } -func TestProcessorPicksUpPassthoughPodIp(t *testing.T) { +func TestProcessorPicksUpPassthroughPodIp(t *testing.T) { m := newMultiTest( t, NewFactory().CreateDefaultConfig(), diff --git a/processor/k8sattributesprocessor/testdata/config.yaml b/processor/k8sattributesprocessor/testdata/config.yaml index 1078ab73b7c5..9ac386ad4763 100644 --- a/processor/k8sattributesprocessor/testdata/config.yaml +++ b/processor/k8sattributesprocessor/testdata/config.yaml @@ -89,7 +89,7 @@ k8sattributes/4: auth_type: "kubeConfig" extract: metadata: - # the following metadata field has been depracated + # the following metadata field has been deprecated - k8s.cluster.name k8sattributes/too_many_sources: diff --git a/processor/logdedupprocessor/config_test.go b/processor/logdedupprocessor/config_test.go index a4e8fb120cd5..23a2b484f542 100644 --- a/processor/logdedupprocessor/config_test.go +++ b/processor/logdedupprocessor/config_test.go @@ -75,7 +75,7 @@ func TestValidateConfig(t *testing.T) { expectedErr: errors.New("an excludefield must start with"), }, { - desc: "invalid duplice exclude field", + desc: "invalid duplicate exclude field", cfg: &Config{ LogCountAttribute: defaultLogCountAttribute, Interval: defaultInterval, diff --git a/processor/logdedupprocessor/field_remover.go b/processor/logdedupprocessor/field_remover.go index bd82a7715214..dd261a14c6f1 100644 --- a/processor/logdedupprocessor/field_remover.go +++ b/processor/logdedupprocessor/field_remover.go @@ -15,7 +15,7 @@ const ( // fieldDelimiter is the delimiter used to split a field key into its parts. fieldDelimiter = "." - // fieldEscapeKeyReplacement is the string used to temporarily replace escaped delimters while splitting a field key. + // fieldEscapeKeyReplacement is the string used to temporarily replace escaped delimiters while splitting a field key. fieldEscapeKeyReplacement = "{TEMP_REPLACE}" ) diff --git a/processor/logdedupprocessor/field_remover_test.go b/processor/logdedupprocessor/field_remover_test.go index 173bee6b2e9e..9972acd3ee9d 100644 --- a/processor/logdedupprocessor/field_remover_test.go +++ b/processor/logdedupprocessor/field_remover_test.go @@ -73,7 +73,7 @@ func TestRemoveFields(t *testing.T) { nestedAttrMap := logRecord.Attributes().PutEmptyMap("nested") nestedAttrMap.PutInt("int", 2) - // Expected attribut map + // Expected attribute map expectedAttrsMap := pcommon.NewMap() expectedAttrsMap.PutStr("str", "attr str") expectedAttrHash := pdatautil.MapHash(expectedAttrsMap) diff --git a/processor/metricsgenerationprocessor/config.go b/processor/metricsgenerationprocessor/config.go index d9be98b559f0..54c857c5604b 100644 --- a/processor/metricsgenerationprocessor/config.go +++ b/processor/metricsgenerationprocessor/config.go @@ -61,7 +61,7 @@ type GenerationType string const ( - // Generates a new metric applying an arithmatic operation with two operands + // Generates a new metric applying an arithmetic operation with two operands calculate GenerationType = "calculate" // Generates a new metric scaling the value of s given metric with a provided constant diff --git a/processor/metricstransformprocessor/README.md b/processor/metricstransformprocessor/README.md index 34b3d25b59e5..c4b87e17307a 100644 --- a/processor/metricstransformprocessor/README.md +++ b/processor/metricstransformprocessor/README.md @@ -311,7 +311,7 @@ operations: ```yaml # Group metrics from one single ResourceMetrics and report them as multiple ResourceMetrics. # -# ex: Consider pod and container metrics collected from Kubernetes. Both the metrics are recorded under under one ResourceMetric +# ex: Consider pod and container metrics collected from Kubernetes. Both the metrics are recorded under one ResourceMetric # applying this transformation will result in two separate ResourceMetric packets with corresponding resource labels in the resource headers # # instead of regular $ use double dollar $$. Because $ is treated as a special character. @@ -320,11 +320,11 @@ operations: - include: ^k8s\.pod\.(.*)$$ match_type: regexp action: group - group_resource_labels: {"resouce.type": "k8s.pod", "source": "kubelet"} + group_resource_labels: {"resource.type": "k8s.pod", "source": "kubelet"} - include: ^container\.(.*)$$ match_type: regexp action: group - group_resource_labels: {"resouce.type": "container", "source": "kubelet"} + group_resource_labels: {"resource.type": "container", "source": "kubelet"} ``` ### Metric Transform Processor vs. [Attributes Processor for Metrics](../attributesprocessor) diff --git a/processor/metricstransformprocessor/config.go b/processor/metricstransformprocessor/config.go index ca08ec49c47c..d86493b97a28 100644 --- a/processor/metricstransformprocessor/config.go +++ b/processor/metricstransformprocessor/config.go @@ -18,7 +18,7 @@ const ( // newNameFieldName is the mapstructure field name for NewName field newNameFieldName = "new_name" - // groupResourceLabelsFieldName is the mapstructure field name for GroupResouceLabels field + // groupResourceLabelsFieldName is the mapstructure field name for GroupResourceLabels field groupResourceLabelsFieldName = "group_resource_labels" // aggregationTypeFieldName is the mapstructure field name for aggregationType field @@ -69,7 +69,7 @@ type transform struct { // REQUIRED only if Action is INSERT. NewName string `mapstructure:"new_name"` - // GroupResourceLabels specifes resource labels that will be appended to this group's new ResourceMetrics message + // GroupResourceLabels specifies resource labels that will be appended to this group's new ResourceMetrics message // REQUIRED only if Action is GROUP GroupResourceLabels map[string]string `mapstructure:"group_resource_labels"` @@ -152,7 +152,7 @@ const ( // Combine combines multiple metrics into a single metric. Combine ConfigAction = "combine" - // Group groups mutiple metrics matching the predicate into multiple ResourceMetrics messages + // Group groups multiple metrics matching the predicate into multiple ResourceMetrics messages Group ConfigAction = "group" ) @@ -168,7 +168,7 @@ func (ca ConfigAction) isValid() bool { return false } -// operationAction is the enum to capture the thress types of actions to perform for an operation. +// operationAction is the enum to capture the types of actions to perform for an operation. type operationAction string const ( diff --git a/processor/metricstransformprocessor/metrics_transform_processor_otlp.go b/processor/metricstransformprocessor/metrics_transform_processor_otlp.go index 398cd67cd96e..cf6ee289bb86 100644 --- a/processor/metricstransformprocessor/metrics_transform_processor_otlp.go +++ b/processor/metricstransformprocessor/metrics_transform_processor_otlp.go @@ -91,9 +91,9 @@ func (f internalFilterRegexp) submatches(metric pmetric.Metric) []int { return f.include.FindStringSubmatchIndex(metric.Name()) } -func (f internalFilterRegexp) expand(metricTempate, metricName string) string { +func (f internalFilterRegexp) expand(metricTemplate, metricName string) string { if submatches := f.include.FindStringSubmatchIndex(metricName); submatches != nil { - return string(f.include.ExpandString([]byte{}, metricTempate, metricName, submatches)) + return string(f.include.ExpandString([]byte{}, metricTemplate, metricName, submatches)) } return "" } @@ -442,7 +442,7 @@ func combine(transform internalTransform, metrics pmetric.MetricSlice) pmetric.M // groupMetrics groups all the provided timeseries that will be aggregated together based on all the label values. // Returns a map of grouped timeseries and the corresponding selected labels -// canBeCombined must be callled before. +// canBeCombined must be called before. func groupMetrics(metrics pmetric.MetricSlice, aggType aggregateutil.AggregationType, to pmetric.Metric) { ag := aggregateutil.AggGroups{} for i := 0; i < metrics.Len(); i++ { diff --git a/processor/probabilisticsamplerprocessor/README.md b/processor/probabilisticsamplerprocessor/README.md index e0059d9050e3..e4409897b0dd 100644 --- a/processor/probabilisticsamplerprocessor/README.md +++ b/processor/probabilisticsamplerprocessor/README.md @@ -60,7 +60,7 @@ instead of using the parent-based approach (e.g., using the `TraceIDRatioBased` sampler for a non-root span), incompleteness may result, and when spans and log records are independently sampled in a processor, as by this component, the same potential for completeness -arises. The consistency guarantee helps minimimize this issue. +arises. The consistency guarantee helps minimize this issue. Consistent probability samplers can be safely used with a mixture of probabilities and preserve sub-trace completeness, provided that child @@ -158,7 +158,7 @@ implies collecting log records from an expected value of 10 pods. OpenTelemetry specifies a consistent sampling mechanism using 56 bits of randomness, which may be obtained from the Trace ID according to the W3C Trace Context Level 2 specification. Randomness can also be -explicly encoding in the OpenTelemetry `tracestate` field, where it is +explicitly encoding in the OpenTelemetry `tracestate` field, where it is known as the R-value. This mode is named because it reduces the number of items transmitted @@ -183,7 +183,7 @@ for every 4 items input. ### Equalizing -This mode uses the same randomness mechanism as the propotional +This mode uses the same randomness mechanism as the proportional sampling mode, in this case considering how much each item was already sampled by preceding samplers. This mode can be used to lower sampling probability to a minimum value across a whole pipeline, @@ -241,7 +241,7 @@ tracestate: ot=th:0;rv:9b8233f7e3a151 This component, using either proportional or equalizing modes, could apply 50% sampling the Span. This span with randomness value `9b8233f7e3a151` is consistently sampled at 50% because the threshold, -when zero padded (i.e., `80000000000000`), is less than the randomess +when zero padded (i.e., `80000000000000`), is less than the randomness value. The resulting span will have the following tracestate: ``` diff --git a/processor/probabilisticsamplerprocessor/logsprocessor.go b/processor/probabilisticsamplerprocessor/logsprocessor.go index fd4fa6b3ff53..970c5321f7b7 100644 --- a/processor/probabilisticsamplerprocessor/logsprocessor.go +++ b/processor/probabilisticsamplerprocessor/logsprocessor.go @@ -166,7 +166,7 @@ func (th *hashingSampler) randomnessFromLogRecord(logRec plog.LogRecord) (random } // randomnessFromLogRecord (hashingSampler) uses OTEP 235 semantic -// conventions basing its deicsion only on the TraceID. +// conventions basing its decision only on the TraceID. func (ctc *consistentTracestateCommon) randomnessFromLogRecord(logRec plog.LogRecord) (randomnessNamer, samplingCarrier, error) { lrc, err := newLogRecordCarrier(logRec) rnd := newMissingRandomnessMethod() diff --git a/processor/probabilisticsamplerprocessor/logsprocessor_test.go b/processor/probabilisticsamplerprocessor/logsprocessor_test.go index 7f675d80a09e..510ff038c92e 100644 --- a/processor/probabilisticsamplerprocessor/logsprocessor_test.go +++ b/processor/probabilisticsamplerprocessor/logsprocessor_test.go @@ -354,7 +354,7 @@ func TestLogsSamplingState(t *testing.T) { tid: mustParseTID("fefefefefefefefefefefefefefefefe"), attrs: map[string]any{ "sampling.threshold": "c", // Corresponds with 25% - "prio": 37, // Lower than 50, higher than 25 + "prio": 37, // Lower than 50, greater than 25 }, sampled: true, adjCount: 4, diff --git a/processor/probabilisticsamplerprocessor/sampler_mode.go b/processor/probabilisticsamplerprocessor/sampler_mode.go index 3fe15612280c..47b74e520b6d 100644 --- a/processor/probabilisticsamplerprocessor/sampler_mode.go +++ b/processor/probabilisticsamplerprocessor/sampler_mode.go @@ -63,7 +63,7 @@ const ( // Proportional uses OpenTelemetry consistent probability // sampling information (OTEP 235), multiplies incoming - // sampling probaiblities. + // sampling probabilities. Proportional SamplerMode = "proportional" // defaultHashSeed is applied when the mode is unset. diff --git a/processor/resourcedetectionprocessor/README.md b/processor/resourcedetectionprocessor/README.md index cf16a5abc9e8..6186156d57ab 100644 --- a/processor/resourcedetectionprocessor/README.md +++ b/processor/resourcedetectionprocessor/README.md @@ -410,7 +410,7 @@ If accurate parsing cannot be performed, the infrastructure resource group value ### Consul -Queries a [consul agent](https://www.consul.io/docs/agent) and reads its' [configuration endpoint](https://www.consul.io/api-docs/agent#read-configuration) to retrieve related resource attributes: +Queries a [consul agent](https://www.consul.io/docs/agent) and reads its [configuration endpoint](https://www.consul.io/api-docs/agent#read-configuration) to retrieve related resource attributes: The list of the populated resource attributes can be found at [Consul Detector Resource Attributes](./internal/consul/documentation.md). @@ -481,11 +481,11 @@ and add this to your workload: fieldPath: spec.nodeName ``` -### Openshift +### OpenShift Queries the OpenShift and Kubernetes API to retrieve related resource attributes. -The list of the populated resource attributes can be found at [Openshift Detector Resource Attributes](./internal/openshift/documentation.md). +The list of the populated resource attributes can be found at [OpenShift Detector Resource Attributes](./internal/openshift/documentation.md). The following permissions are required: ```yaml @@ -588,5 +588,5 @@ Note that if multiple detectors are inserting the same attribute name, the first * ecs * ec2 -The full list of settings exposed for this extension are documented [here](./config.go) -with detailed sample configurations [here](./testdata/config.yaml). +The full list of settings exposed for this extension are documented in [config.go](./config.go) +with detailed sample configurations in [testdata/config.yaml](./testdata/config.yaml). diff --git a/processor/resourcedetectionprocessor/config.go b/processor/resourcedetectionprocessor/config.go index 78fb07a423ba..ba34649e2c72 100644 --- a/processor/resourcedetectionprocessor/config.go +++ b/processor/resourcedetectionprocessor/config.go @@ -80,7 +80,7 @@ type DetectorConfig struct { // SystemConfig contains user-specified configurations for the System detector SystemConfig system.Config `mapstructure:"system"` - // OpenShift contains user-specified configurations for the Openshift detector + // OpenShift contains user-specified configurations for the OpenShift detector OpenShiftConfig openshift.Config `mapstructure:"openshift"` // K8SNode contains user-specified configurations for the K8SNode detector diff --git a/processor/resourcedetectionprocessor/internal/heroku/documentation.md b/processor/resourcedetectionprocessor/internal/heroku/documentation.md index 674a47c4bade..ac927afa7c53 100644 --- a/processor/resourcedetectionprocessor/internal/heroku/documentation.md +++ b/processor/resourcedetectionprocessor/internal/heroku/documentation.md @@ -15,4 +15,4 @@ | heroku.release.creation_timestamp | The heroku.release.creation_timestamp | Any Str | true | | service.instance.id | The service.instance.id | Any Str | true | | service.name | Heroku app name recorded as service.name. | Any Str | true | -| service.version | Heroku relese version set as service.version. | Any Str | true | +| service.version | Heroku release version set as service.version. | Any Str | true | diff --git a/processor/resourcedetectionprocessor/internal/heroku/metadata.yaml b/processor/resourcedetectionprocessor/internal/heroku/metadata.yaml index 275932d67fb7..3deaa872d0fa 100644 --- a/processor/resourcedetectionprocessor/internal/heroku/metadata.yaml +++ b/processor/resourcedetectionprocessor/internal/heroku/metadata.yaml @@ -32,6 +32,6 @@ resource_attributes: enabled: true type: string service.version: - description: Heroku relese version set as service.version. + description: Heroku release version set as service.version. enabled: true type: string diff --git a/processor/resourcedetectionprocessor/internal/openshift/config.go b/processor/resourcedetectionprocessor/internal/openshift/config.go index 408bcc760984..bed20f02f493 100644 --- a/processor/resourcedetectionprocessor/internal/openshift/config.go +++ b/processor/resourcedetectionprocessor/internal/openshift/config.go @@ -47,7 +47,7 @@ type Config struct { Token string `mapstructure:"token"` // TLSSettings contains TLS configurations that are specific to client - // connection used to communicate with the Openshift API. + // connection used to communicate with the OpenShift API. TLSSettings configtls.ClientConfig `mapstructure:"tls"` ResourceAttributes metadata.ResourceAttributesConfig `mapstructure:"resource_attributes"` diff --git a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go index 4fcba6437995..f07702adac8b 100644 --- a/processor/resourcedetectionprocessor/internal/resourcedetection_test.go +++ b/processor/resourcedetectionprocessor/internal/resourcedetection_test.go @@ -119,7 +119,7 @@ func TestDetectResource_InvalidDetectorType(t *testing.T) { require.EqualError(t, err, fmt.Sprintf("invalid detector key: %v", mockDetectorKey)) } -func TestDetectResource_DetectoryFactoryError(t *testing.T) { +func TestDetectResource_DetectorFactoryError(t *testing.T) { mockDetectorKey := DetectorType("mock") p := NewProviderFactory(map[DetectorType]DetectorFactory{ mockDetectorKey: func(processor.Settings, DetectorConfig) (Detector, error) { diff --git a/processor/routingprocessor/README.md b/processor/routingprocessor/README.md index 5ae2c563d5c7..3a4dbc65b6a7 100644 --- a/processor/routingprocessor/README.md +++ b/processor/routingprocessor/README.md @@ -175,7 +175,7 @@ It is also possible to mix both the conventional routing configuration and the r - [delete_key](../../pkg/ottl/ottlfuncs/README.md#delete_key) - [delete_matching_keys](../../pkg/ottl/ottlfuncs/README.md#delete_matching_keys) -The full list of settings exposed for this processor are documented [here](./config.go) with detailed sample configuration files: +The full list of settings exposed for this processor are documented in [config.go](./config.go) with detailed sample configuration files: - [logs](./testdata/config_logs.yaml) - [metrics](./testdata/config_metrics.yaml) diff --git a/processor/routingprocessor/extract_test.go b/processor/routingprocessor/extract_test.go index b5a3136ab23b..243f2fa9b7b7 100644 --- a/processor/routingprocessor/extract_test.go +++ b/processor/routingprocessor/extract_test.go @@ -68,7 +68,7 @@ func TestExtractorForTraces_FromContext(t *testing.T) { expectedValue: "acme", }, { - name: "value from existing HTTP attribute: case insensitive", + name: "value from existing HTTP attribute: case-insensitive", ctxFunc: func() context.Context { return client.NewContext(context.Background(), client.Info{Metadata: client.NewMetadata(map[string][]string{ diff --git a/processor/schemaprocessor/README.md b/processor/schemaprocessor/README.md index 93af6889ffed..08e322121d71 100644 --- a/processor/schemaprocessor/README.md +++ b/processor/schemaprocessor/README.md @@ -26,7 +26,7 @@ the translations needed for signals that match the schema URL. ## Schema Formats -A schema URl is made up in two parts, _Schema Family_ and _Schema Version_, the schema URL is broken down like so: +A [schema URL](https://opentelemetry.io/docs/reference/specification/schemas/overview/#schema-url) is made up in two parts, _Schema Family_ and _Schema Version_, the schema URL is broken down like so: ```text | Schema URL | @@ -35,7 +35,6 @@ A schema URl is made up in two parts, _Schema Family_ and _Schema Version_, the ``` The final path in the schema URL _MUST_ be the schema version and the preceding portion of the URL is the _Schema Family_. -To read about schema formats, please read more [here](https://opentelemetry.io/docs/reference/specification/schemas/overview/#schema-url) ## Targets Schemas diff --git a/processor/schemaprocessor/internal/fixture/parallel.go b/processor/schemaprocessor/internal/fixture/parallel.go index efbcff92de40..c7c0eb5233de 100644 --- a/processor/schemaprocessor/internal/fixture/parallel.go +++ b/processor/schemaprocessor/internal/fixture/parallel.go @@ -14,7 +14,7 @@ import ( ) // ParallelRaceCompute starts `count` number of go routines that calls the provided function `fn` -// at the same to allow the race detector greater oppotunity to capture known race conditions. +// at the same to allow the race detector greater opportunity to capture known race conditions. // This method blocks until each count number of fn has completed, any returned errors is considered // a failing test method. // If the race detector is not enabled, the function then skips with an notice. diff --git a/processor/schemaprocessor/internal/migrate/conditional_test.go b/processor/schemaprocessor/internal/migrate/conditional_test.go index de6e5cad298c..8c2c6a10d3d6 100644 --- a/processor/schemaprocessor/internal/migrate/conditional_test.go +++ b/processor/schemaprocessor/internal/migrate/conditional_test.go @@ -39,7 +39,7 @@ func TestConditionalAttributeSetApply(t *testing.T) { }, "application start", ), - check: "datatbase operation", + check: "database operation", attr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service.version", "v0.0.0") }), @@ -48,13 +48,13 @@ func TestConditionalAttributeSetApply(t *testing.T) { }), }, { - name: "No condition set, applys to all", + name: "No condition set, applies to all", cond: NewConditionalAttributeSet[string]( map[string]string{ "service.version": "application.version", }, ), - check: "datatbase operation", + check: "database operation", attr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service.version", "v0.0.0") }), @@ -118,7 +118,7 @@ func TestConditionalAttributeSetRollback(t *testing.T) { }, "application start", ), - check: "datatbase operation", + check: "database operation", attr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service.version", "v0.0.0") }), @@ -127,13 +127,13 @@ func TestConditionalAttributeSetRollback(t *testing.T) { }), }, { - name: "No condition set, applys to all", + name: "No condition set, applies to all", cond: NewConditionalAttributeSet[string]( map[string]string{ "service.version": "application.version", }, ), - check: "datatbase operation", + check: "database operation", attr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("application.version", "v0.0.0") }), diff --git a/processor/schemaprocessor/internal/migrate/multi_conditional_test.go b/processor/schemaprocessor/internal/migrate/multi_conditional_test.go index 98c2c204b43a..af1013e432f4 100644 --- a/processor/schemaprocessor/internal/migrate/multi_conditional_test.go +++ b/processor/schemaprocessor/internal/migrate/multi_conditional_test.go @@ -40,7 +40,7 @@ func TestMultiConditionalAttributeSetApply(t *testing.T) { }, map[string][]string{"span.name": {"application start"}}, ), - inCondData: map[string]string{"span.name": "datatbase operation"}, + inCondData: map[string]string{"span.name": "database operation"}, inAttr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service.version", "v0.0.0") }), @@ -49,14 +49,14 @@ func TestMultiConditionalAttributeSetApply(t *testing.T) { }), }, { - name: "No condition set, applys to all", + name: "No condition set, applies to all", cond: NewMultiConditionalAttributeSet[string]( map[string]string{ "service.version": "application.version", }, map[string][]string{}, ), - inCondData: map[string]string{"span.name": "datatbase operation"}, + inCondData: map[string]string{"span.name": "database operation"}, inAttr: testHelperBuildMap(func(m pcommon.Map) { m.PutStr("service.version", "v0.0.0") }), diff --git a/processor/sumologicprocessor/README.md b/processor/sumologicprocessor/README.md index d49a71d42bf9..4f46afec810a 100644 --- a/processor/sumologicprocessor/README.md +++ b/processor/sumologicprocessor/README.md @@ -101,7 +101,7 @@ processors: ### Adding `cloud.namespace` resource attribute Some of the apps in Sumo Logic require the `cloud.namespace` attribute to be set -to better understand the data coming from AWS EC2, AWS ECS and AWS Elactic Beanstalk. +to better understand the data coming from AWS EC2, AWS ECS and AWS Elastic Beanstalk. This attribute is similar to the standard OpenTelemetry attribute [`cloud.provider`][opentelemetry_cloud_provider_attribute]. In the future, the Sumo Logic apps might switch to the standard `cloud.provider` attribute. Before this happens, the following mapping defines the relationship between `cloud.provider` and `cloud.namespace` values: diff --git a/processor/sumologicprocessor/config.go b/processor/sumologicprocessor/config.go index ec38c01da927..4c7c3ee55608 100644 --- a/processor/sumologicprocessor/config.go +++ b/processor/sumologicprocessor/config.go @@ -26,7 +26,7 @@ const ( defaultAddCloudNamespace = true defaultTranslateAttributes = true defaultTranslateTelegrafAttributes = true - defaultTranlateDockerMetrics = false + defaultTranslateDockerMetrics = false // Nesting processor default config defaultNestingEnabled = false @@ -72,7 +72,7 @@ func createDefaultConfig() component.Config { SpanIDAttribute: &logFieldAttribute{defaultAddSpanIDAttribute, SpanIDAttributeName}, TraceIDAttribute: &logFieldAttribute{defaultAddTraceIDAttribute, TraceIDAttributeName}, }, - TranslateDockerMetrics: defaultTranlateDockerMetrics, + TranslateDockerMetrics: defaultTranslateDockerMetrics, } } diff --git a/processor/sumologicprocessor/processor_test.go b/processor/sumologicprocessor/processor_test.go index 6f9c11a726c0..02f31879a84f 100644 --- a/processor/sumologicprocessor/processor_test.go +++ b/processor/sumologicprocessor/processor_test.go @@ -86,7 +86,7 @@ func TestAddCloudNamespaceForLogs(t *testing.T) { }, }, { - name: "does not add cloud.namespce attribute when disabled", + name: "does not add cloud.namespace attribute when disabled", addCloudNamespace: false, createLogs: func() plog.Logs { inputLogs := plog.NewLogs() @@ -212,7 +212,7 @@ func TestAddCloudNamespaceForMetrics(t *testing.T) { }, }, { - name: "does not add cloud.namespce attribute when disabled", + name: "does not add cloud.namespace attribute when disabled", addCloudNamespace: false, createMetrics: func() pmetric.Metrics { inputMetrics := pmetric.NewMetrics() @@ -338,7 +338,7 @@ func TestAddCloudNamespaceForTraces(t *testing.T) { }, }, { - name: "does not add cloud.namespce attribute when disabled", + name: "does not add cloud.namespace attribute when disabled", addCloudNamespace: false, createTraces: func() ptrace.Traces { inputTraces := ptrace.NewTraces() diff --git a/processor/sumologicprocessor/translate_docker_metrics_processor.go b/processor/sumologicprocessor/translate_docker_metrics_processor.go index 9e103bd8bcaa..2e8ee41ee29d 100644 --- a/processor/sumologicprocessor/translate_docker_metrics_processor.go +++ b/processor/sumologicprocessor/translate_docker_metrics_processor.go @@ -68,7 +68,7 @@ var dockerMetricsTranslations = map[string]string{ "container.blockio.sectors_recursive": "sectors_recursive", } -var dockerReasourceAttributeTranslations = map[string]string{ +var dockerResourceAttributeTranslations = map[string]string{ "container.id": "container.FullID", "container.image.name": "container.ImageName", "container.name": "container.Name", @@ -132,7 +132,7 @@ func translateDockerResourceAttributes(attributes pcommon.Map) { result.EnsureCapacity(attributes.Len()) attributes.Range(func(otKey string, value pcommon.Value) bool { - if sumoKey, ok := dockerReasourceAttributeTranslations[otKey]; ok { + if sumoKey, ok := dockerResourceAttributeTranslations[otKey]; ok { // Only insert if it doesn't exist yet to prevent overwriting. // We have to do it this way since the final return value is not // ready yet to rely on .Insert() not overwriting. diff --git a/processor/sumologicprocessor/translate_docker_metrics_processor_test.go b/processor/sumologicprocessor/translate_docker_metrics_processor_test.go index a6dd12f78ae6..5f3c871bdc87 100644 --- a/processor/sumologicprocessor/translate_docker_metrics_processor_test.go +++ b/processor/sumologicprocessor/translate_docker_metrics_processor_test.go @@ -77,7 +77,7 @@ func TestTranslateDockerMetric_NamesAreTranslatedCorrectly(t *testing.T) { } } -func TestTranslateDockerMetric_ResourceAttrbutesAreTranslatedCorrectly(t *testing.T) { +func TestTranslateDockerMetric_ResourceAttributesAreTranslatedCorrectly(t *testing.T) { testcases := []struct { nameIn string nameOut string diff --git a/processor/tailsamplingprocessor/README.md b/processor/tailsamplingprocessor/README.md index 0c6c9978f588..a6df70b6566a 100644 --- a/processor/tailsamplingprocessor/README.md +++ b/processor/tailsamplingprocessor/README.md @@ -49,7 +49,7 @@ The following configuration options can also be modified: - `decision_cache`: Options for configuring caches for sampling decisions. You may want to vary the size of these caches depending on how many "keep" vs "drop" decisions you expect from your policies. For example, you may allocate a larger `non_sampled_cache_size` if you expect most traces to be dropped. - Additionally, if using, configure this as much higher than `num_traces` so decisions for trace IDs are kept + Additionally, if using, configure this as much greater than `num_traces` so decisions for trace IDs are kept longer than the span data for the trace. - `sampled_cache_size` (default = 0): Configures amount of trace IDs to be kept in an LRU cache, persisting the "keep" decisions for traces that may have already been released from memory. @@ -469,7 +469,7 @@ A circular buffer is used to ensure the number of traces in-memory doesn't excee otelcol_processor_tail_sampling_sampling_trace_dropped_too_early ``` -**Pre-emptively Preventing Dropped Traces** +**Preemptively Preventing Dropped Traces** A trace is dropped without sampling if it's removed from the circular buffer before `decision_wait`. diff --git a/processor/tailsamplingprocessor/config.go b/processor/tailsamplingprocessor/config.go index 1b18c039fb00..9bda384d64b2 100644 --- a/processor/tailsamplingprocessor/config.go +++ b/processor/tailsamplingprocessor/config.go @@ -190,7 +190,7 @@ type StringAttributeCfg struct { // RateLimitingCfg holds the configurable settings to create a rate limiting // sampling policy evaluator. type RateLimitingCfg struct { - // SpansPerSecond sets the limit on the maximum nuber of spans that can be processed each second. + // SpansPerSecond sets the limit on the maximum number of spans that can be processed each second. SpansPerSecond int64 `mapstructure:"spans_per_second"` } @@ -227,12 +227,12 @@ type OTTLConditionCfg struct { type DecisionCacheConfig struct { // SampledCacheSize specifies the size of the cache that holds the sampled trace IDs. // This value will be the maximum amount of trace IDs that the cache can hold before overwriting previous IDs. - // For effective use, this value should be at least an order of magnitude higher than Config.NumTraces. + // For effective use, this value should be at least an order of magnitude greater than Config.NumTraces. // If left as default 0, a no-op DecisionCache will be used. SampledCacheSize int `mapstructure:"sampled_cache_size"` // NonSampledCacheSize specifies the size of the cache that holds the non-sampled trace IDs. // This value will be the maximum amount of trace IDs that the cache can hold before overwriting previous IDs. - // For effective use, this value should be at least an order of magnitude higher than Config.NumTraces. + // For effective use, this value should be at least an order of magnitude greater than Config.NumTraces. // If left as default 0, a no-op DecisionCache will be used. NonSampledCacheSize int `mapstructure:"non_sampled_cache_size"` } diff --git a/processor/tailsamplingprocessor/internal/sampling/composite_test.go b/processor/tailsamplingprocessor/internal/sampling/composite_test.go index c323fe849946..c813bdeddabf 100644 --- a/processor/tailsamplingprocessor/internal/sampling/composite_test.go +++ b/processor/tailsamplingprocessor/internal/sampling/composite_test.go @@ -119,7 +119,7 @@ func TestCompositeEvaluator_OverflowAlwaysSampled(t *testing.T) { decision, err = c.Evaluate(context.Background(), traceID, trace) require.NoError(t, err, "Failed to evaluate composite policy: %v", err) - // The first policy fails as the tag value is higher than the range set where as the second policy is AlwaysSample, so the decision should be Sampled. + // The first policy fails as the tag value is greater than the range set whereas the second policy is AlwaysSample, so the decision should be Sampled. expected = Sampled assert.Equal(t, expected, decision) } diff --git a/processor/tailsamplingprocessor/internal/sampling/latency.go b/processor/tailsamplingprocessor/internal/sampling/latency.go index be87f47165c9..2b24ba3a2496 100644 --- a/processor/tailsamplingprocessor/internal/sampling/latency.go +++ b/processor/tailsamplingprocessor/internal/sampling/latency.go @@ -20,7 +20,7 @@ type latency struct { var _ PolicyEvaluator = (*latency)(nil) -// NewLatency creates a policy evaluator sampling traces with a duration higher than a configured threshold +// NewLatency creates a policy evaluator sampling traces with a duration greater than a configured threshold func NewLatency(settings component.TelemetrySettings, thresholdMs int64, upperThresholdMs int64) PolicyEvaluator { return &latency{ logger: settings.Logger, diff --git a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go index dd22d04eaa24..02f965af8bd5 100644 --- a/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go +++ b/processor/tailsamplingprocessor/internal/sampling/string_tag_filter.go @@ -92,7 +92,7 @@ func NewStringAttributeFilter(settings component.TelemetrySettings, key string, // The SamplingDecision is made by comparing the attribute values with the matching values, // which might be static strings or regular expressions. func (saf *stringAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, trace *TraceData) (Decision, error) { - saf.logger.Debug("Evaluting spans in string-tag filter") + saf.logger.Debug("Evaluating spans in string-tag filter") trace.Lock() defer trace.Unlock() batches := trace.ReceivedBatches @@ -111,8 +111,8 @@ func (saf *stringAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, }, func(span ptrace.Span) bool { if v, ok := span.Attributes().Get(saf.key); ok { - truncableStr := v.Str() - if len(truncableStr) > 0 { + truncatableStr := v.Str() + if len(truncatableStr) > 0 { if ok := saf.matcher(v.Str()); ok { return false } @@ -135,8 +135,8 @@ func (saf *stringAttributeFilter) Evaluate(_ context.Context, _ pcommon.TraceID, }, func(span ptrace.Span) bool { if v, ok := span.Attributes().Get(saf.key); ok { - truncableStr := v.Str() - if len(truncableStr) > 0 { + truncatableStr := v.Str() + if len(truncatableStr) > 0 { if ok := saf.matcher(v.Str()); ok { return true } diff --git a/processor/transformprocessor/README.md b/processor/transformprocessor/README.md index c207dbdf63b8..4651ab6be710 100644 --- a/processor/transformprocessor/README.md +++ b/processor/transformprocessor/README.md @@ -264,7 +264,7 @@ The `extract_count_metric` function creates a new Sum metric from a Histogram, E `is_monotonic` is a boolean representing the monotonicity of the new metric. -The name for the new metric will be `_count`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, `description`, and `aggregation_temporality`. As metrics of type Summary don't have an `aggregation_temporality` field, this field will be set to `AGGREGATION_TEMPORALITY_CUMULATIVE` for those metrics. +The name for the new metric will be `_count`. The fields that are copied are: `timestamp`, `starttimestamp`, `attributes`, `description`, and `aggregation_temporality`. As metrics of type Summary don't have an `aggregation_temporality` field, this field will be set to `AGGREGATION_TEMPORALITY_CUMULATIVE` for those metrics. The new metric that is created will be passed to all subsequent statements in the metrics statements list. @@ -288,7 +288,7 @@ The `extract_sum_metric` function creates a new Sum metric from a Histogram, Exp `is_monotonic` is a boolean representing the monotonicity of the new metric. -The name for the new metric will be `_sum`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, `description`, and `aggregation_temporality`. As metrics of type Summary don't have an `aggregation_temporality` field, this field will be set to `AGGREGATION_TEMPORALITY_CUMULATIVE` for those metrics. +The name for the new metric will be `_sum`. The fields that are copied are: `timestamp`, `starttimestamp`, `attributes`, `description`, and `aggregation_temporality`. As metrics of type Summary don't have an `aggregation_temporality` field, this field will be set to `AGGREGATION_TEMPORALITY_CUMULATIVE` for those metrics. The new metric that is created will be passed to all subsequent statements in the metrics statements list. @@ -309,7 +309,7 @@ The `convert_summary_count_val_to_sum` function creates a new Sum metric from a `aggregation_temporality` is a string (`"cumulative"` or `"delta"`) representing the desired aggregation temporality of the new metric. `is_monotonic` is a boolean representing the monotonicity of the new metric. -The name for the new metric will be `_count`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply. +The name for the new metric will be `_count`. The fields that are copied are: `timestamp`, `starttimestamp`, `attributes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply. **NOTE:** This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use at your own risk. @@ -328,7 +328,7 @@ The `convert_summary_sum_val_to_sum` function creates a new Sum metric from a Su `aggregation_temporality` is a string (`"cumulative"` or `"delta"`) representing the desired aggregation temporality of the new metric. `is_monotonic` is a boolean representing the monotonicity of the new metric. -The name for the new metric will be `_sum`. The fields that are copied are: `timestamp`, `starttimestamp`, `attibutes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply. +The name for the new metric will be `_sum`. The fields that are copied are: `timestamp`, `starttimestamp`, `attributes`, and `description`. The new metric that is created will be passed to all functions in the metrics statements list. Function conditions will apply. **NOTE:** This function may cause a metric to break semantics for [Sum metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#sums). Use at your own risk. diff --git a/processor/transformprocessor/internal/common/metrics.go b/processor/transformprocessor/internal/common/metrics.go index 3ae07920ca2c..f4cec79cd15e 100644 --- a/processor/transformprocessor/internal/common/metrics.go +++ b/processor/transformprocessor/internal/common/metrics.go @@ -88,7 +88,7 @@ func (d dataPointStatements) ConsumeMetrics(ctx context.Context, md pmetric.Metr case pmetric.MetricTypeHistogram: err = d.handleHistogramDataPoints(ctx, metric.Histogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) case pmetric.MetricTypeExponentialHistogram: - err = d.handleExponetialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) + err = d.handleExponentialHistogramDataPoints(ctx, metric.ExponentialHistogram().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) case pmetric.MetricTypeSummary: err = d.handleSummaryDataPoints(ctx, metric.Summary().DataPoints(), metrics.At(k), metrics, smetrics.Scope(), rmetrics.Resource(), smetrics, rmetrics) } @@ -135,7 +135,7 @@ func (d dataPointStatements) handleHistogramDataPoints(ctx context.Context, dps return nil } -func (d dataPointStatements) handleExponetialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics) error { +func (d dataPointStatements) handleExponentialHistogramDataPoints(ctx context.Context, dps pmetric.ExponentialHistogramDataPointSlice, metric pmetric.Metric, metrics pmetric.MetricSlice, is pcommon.InstrumentationScope, resource pcommon.Resource, scopeMetrics pmetric.ScopeMetrics, resourceMetrics pmetric.ResourceMetrics) error { for i := 0; i < dps.Len(); i++ { tCtx := ottldatapoint.NewTransformContext(dps.At(i), metric, metrics, is, resource, scopeMetrics, resourceMetrics) condition, err := d.BoolExpr.Eval(ctx, tCtx) diff --git a/processor/transformprocessor/internal/logs/processor_test.go b/processor/transformprocessor/internal/logs/processor_test.go index d3e06f65ac01..448328138c21 100644 --- a/processor/transformprocessor/internal/logs/processor_test.go +++ b/processor/transformprocessor/internal/logs/processor_test.go @@ -366,13 +366,13 @@ func Test_ProcessLogs_LogContext(t *testing.T) { func Test_ProcessLogs_MixContext(t *testing.T) { tests := []struct { - name string - contextStatments []common.ContextStatements - want func(td plog.Logs) + name string + contextStatements []common.ContextStatements + want func(td plog.Logs) }{ { name: "set resource and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "resource", Statements: []string{ @@ -394,7 +394,7 @@ func Test_ProcessLogs_MixContext(t *testing.T) { }, { name: "set scope and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -416,7 +416,7 @@ func Test_ProcessLogs_MixContext(t *testing.T) { }, { name: "order matters", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "log", Statements: []string{ @@ -436,7 +436,7 @@ func Test_ProcessLogs_MixContext(t *testing.T) { }, { name: "reuse context", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -467,7 +467,7 @@ func Test_ProcessLogs_MixContext(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { td := constructLogs() - processor, err := NewProcessor(tt.contextStatments, ottl.IgnoreError, false, componenttest.NewNopTelemetrySettings()) + processor, err := NewProcessor(tt.contextStatements, ottl.IgnoreError, false, componenttest.NewNopTelemetrySettings()) assert.NoError(t, err) _, err = processor.ProcessLogs(context.Background(), td) diff --git a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go index f23136ac5d66..167d5293461d 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go +++ b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist.go @@ -199,7 +199,7 @@ var midpointAlgorithm distAlgorithm = func(count uint64, (*bucketCountsDst)[len(boundaries)-1] += count // Overflow bucket } -// uniformAlgorithm distributes counts from a given set of bucket sounrces into a set of linear boundaries using uniform distribution +// uniformAlgorithm distributes counts from a given set of bucket sources into a set of linear boundaries using uniform distribution var uniformAlgorithm distAlgorithm = func(count uint64, upper, lower float64, boundaries []float64, bucketCountsDst *[]uint64, diff --git a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go index aee2cdf07fea..4aede94561f5 100644 --- a/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go +++ b/processor/transformprocessor/internal/metrics/func_convert_exponential_hist_to_explicit_hist_test.go @@ -191,7 +191,7 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { // 0 scale exponential histogram will result in an extremely large upper bound // resulting in all the counts being in buckets much larger than the explicit bounds // thus all counts will be in the overflow bucket - name: "0 scale expontential histogram given using upper distribute", + name: "0 scale exponential histogram given using upper distribute", input: func() pmetric.Metric { m := pmetric.NewMetric() defaultTestMetric().CopyTo(m) @@ -221,7 +221,7 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { }, }, { - name: "empty expontential histogram given using upper distribute", + name: "empty exponential histogram given using upper distribute", input: func() pmetric.Metric { m := pmetric.NewMetric() m.SetName("empty") @@ -236,7 +236,7 @@ func TestUpper_convert_exponential_hist_to_explicit_hist(t *testing.T) { }, }, { - name: "non-expontential histogram", + name: "non-exponential histogram", arg: []float64{0}, distribution: "upper", input: nonExponentialHist, @@ -403,7 +403,7 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { }, }, { - name: "empty expontential histogram given", + name: "empty exponential histogram given", input: func() pmetric.Metric { m := pmetric.NewMetric() m.SetName("empty") @@ -418,7 +418,7 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { }, }, { - name: "non-expontential histogram given using upper distribute", + name: "non-exponential histogram given using upper distribute", arg: []float64{0}, distribution: "midpoint", input: nonExponentialHist, @@ -448,7 +448,7 @@ func TestMidpoint_convert_exponential_hist_to_explicit_hist(t *testing.T) { } } -func TestUniforn_convert_exponential_hist_to_explicit_hist(t *testing.T) { +func TestUniform_convert_exponential_hist_to_explicit_hist(t *testing.T) { ts := pcommon.NewTimestampFromTime(time.Now()) defaultTestMetric := func() pmetric.Metric { m := pmetric.NewMetric() diff --git a/processor/transformprocessor/internal/metrics/processor_test.go b/processor/transformprocessor/internal/metrics/processor_test.go index 6087fcd70d74..128a9d00ced0 100644 --- a/processor/transformprocessor/internal/metrics/processor_test.go +++ b/processor/transformprocessor/internal/metrics/processor_test.go @@ -726,13 +726,13 @@ func Test_ProcessMetrics_DataPointContext(t *testing.T) { func Test_ProcessMetrics_MixContext(t *testing.T) { tests := []struct { - name string - contextStatments []common.ContextStatements - want func(td pmetric.Metrics) + name string + contextStatements []common.ContextStatements + want func(td pmetric.Metrics) }{ { name: "set resource and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "resource", Statements: []string{ @@ -761,7 +761,7 @@ func Test_ProcessMetrics_MixContext(t *testing.T) { }, { name: "set scope and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -790,7 +790,7 @@ func Test_ProcessMetrics_MixContext(t *testing.T) { }, { name: "order matters", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "datapoint", Statements: []string{ @@ -810,7 +810,7 @@ func Test_ProcessMetrics_MixContext(t *testing.T) { }, { name: "reuse context ", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -848,7 +848,7 @@ func Test_ProcessMetrics_MixContext(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { td := constructMetrics() - processor, err := NewProcessor(tt.contextStatments, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + processor, err := NewProcessor(tt.contextStatements, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) assert.NoError(t, err) _, err = processor.ProcessMetrics(context.Background(), td) diff --git a/processor/transformprocessor/internal/traces/processor_test.go b/processor/transformprocessor/internal/traces/processor_test.go index e6928ba9fa38..0da86dfeb262 100644 --- a/processor/transformprocessor/internal/traces/processor_test.go +++ b/processor/transformprocessor/internal/traces/processor_test.go @@ -443,13 +443,13 @@ func Test_ProcessTraces_SpanEventContext(t *testing.T) { func Test_ProcessTraces_MixContext(t *testing.T) { tests := []struct { - name string - contextStatments []common.ContextStatements - want func(td ptrace.Traces) + name string + contextStatements []common.ContextStatements + want func(td ptrace.Traces) }{ { name: "set resource and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "resource", Statements: []string{ @@ -471,7 +471,7 @@ func Test_ProcessTraces_MixContext(t *testing.T) { }, { name: "set scope and then use", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -493,7 +493,7 @@ func Test_ProcessTraces_MixContext(t *testing.T) { }, { name: "order matters", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "span", Statements: []string{ @@ -513,7 +513,7 @@ func Test_ProcessTraces_MixContext(t *testing.T) { }, { name: "reuse context", - contextStatments: []common.ContextStatements{ + contextStatements: []common.ContextStatements{ { Context: "scope", Statements: []string{ @@ -544,7 +544,7 @@ func Test_ProcessTraces_MixContext(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { td := constructTraces() - processor, err := NewProcessor(tt.contextStatments, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) + processor, err := NewProcessor(tt.contextStatements, ottl.IgnoreError, componenttest.NewNopTelemetrySettings()) assert.NoError(t, err) _, err = processor.ProcessTraces(context.Background(), td)