diff --git a/.chloggen/elasticsearchexporter_drop-non-delta-histogram-exponential-histogram.yaml b/.chloggen/elasticsearchexporter_drop-non-delta-histogram-exponential-histogram.yaml
new file mode 100644
index 000000000000..ae93d7a2c3f9
--- /dev/null
+++ b/.chloggen/elasticsearchexporter_drop-non-delta-histogram-exponential-histogram.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: breaking
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: elasticsearchexporter
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Drop cumulative temporality histogram and exponential histogram
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [35442]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext: Cumulative temporality histogram and exponential histogram are not supported by Elasticsearch. Use cumulativetodeltaprocessor to convert cumulative temporality to delta temporality.
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: [user]
diff --git a/.chloggen/elasticsearchexporter_receiver-based-routing.yaml b/.chloggen/elasticsearchexporter_receiver-based-routing.yaml
new file mode 100644
index 000000000000..85101276c2b1
--- /dev/null
+++ b/.chloggen/elasticsearchexporter_receiver-based-routing.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: breaking
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: elasticsearchexporter
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Implement receiver-based routing under *_dynamic_index config
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [34246]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: [user]
diff --git a/.chloggen/hostmetricsreceiver-mute-all-errors.yaml b/.chloggen/hostmetricsreceiver-mute-all-errors.yaml
new file mode 100644
index 000000000000..585928d258af
--- /dev/null
+++ b/.chloggen/hostmetricsreceiver-mute-all-errors.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: hostmetricsreceiver
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Add ability to mute all errors (mainly due to access rights) coming from process scraper of the hostmetricsreceiver
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [20435]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/.chloggen/kubeletstats_featuregate_metrics.yaml b/.chloggen/kubeletstats_featuregate_metrics.yaml
new file mode 100644
index 000000000000..9c1fed91a6b1
--- /dev/null
+++ b/.chloggen/kubeletstats_featuregate_metrics.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: kubeletstats
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Introduce feature gate for deprecation of container.cpu.utilization, k8s.pod.cpu.utilization and k8s.node.cpu.utilization metrics
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [35139]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: [user]
diff --git a/.chloggen/metricsgeneration_relax_type_req.yaml b/.chloggen/metricsgeneration_relax_type_req.yaml
new file mode 100644
index 000000000000..cd8ba247c435
--- /dev/null
+++ b/.chloggen/metricsgeneration_relax_type_req.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: bug_fix
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: metricsgenerationprocessor
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Allow metric calculations to be done on sum metrics
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [35428]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/.chloggen/ottl-add-element-xml.yaml b/.chloggen/ottl-add-element-xml.yaml
new file mode 100644
index 000000000000..f74f7c3fd3c1
--- /dev/null
+++ b/.chloggen/ottl-add-element-xml.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: 'enhancement'
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: pkg/ottl
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Add InsertXML Converter
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [35436]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/.chloggen/remove_exclusion_rule.yaml b/.chloggen/remove_exclusion_rule.yaml
new file mode 100644
index 000000000000..e70d4e3ef527
--- /dev/null
+++ b/.chloggen/remove_exclusion_rule.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: breaking
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: signalfxexporter
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Do not exclude the metric `container.memory.working_set`
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [35475]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/.chloggen/sqlqueryreceiver-fix-ts-tracking-column.yaml b/.chloggen/sqlqueryreceiver-fix-ts-tracking-column.yaml
new file mode 100644
index 000000000000..73d0823dfe83
--- /dev/null
+++ b/.chloggen/sqlqueryreceiver-fix-ts-tracking-column.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: bug_fix
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: sqlqueryreceiver
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Fix reprocessing of logs when tracking_column type is timestamp
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [35194]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/.chloggen/sqlserver_computer_rattr.yaml b/.chloggen/sqlserver_computer_rattr.yaml
new file mode 100644
index 000000000000..8bf4ab24527a
--- /dev/null
+++ b/.chloggen/sqlserver_computer_rattr.yaml
@@ -0,0 +1,27 @@
+# Use this changelog template to create an entry for release notes.
+
+# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
+change_type: enhancement
+
+# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
+component: sqlserverreceiver
+
+# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
+note: Add computer name resource attribute to relevant metrics
+
+# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
+issues: [35040]
+
+# (Optional) One or more lines of additional information to render under the primary note.
+# These lines will be padded with 2 spaces and then inserted directly into the document.
+# Use pipe (|) for multiline entries.
+subtext:
+
+# If your change doesn't affect end users or the exported elements of any package,
+# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
+# Optional: The change log or logs in which this entry should be included.
+# e.g. '[user]' or '[user, api]'
+# Include 'user' if the change is relevant to end users.
+# Include 'api' if there is a change to a library API.
+# Default: '[user]'
+change_logs: []
diff --git a/connector/countconnector/config_test.go b/connector/countconnector/config_test.go
index 02f9a1a1104c..d3f37776f846 100644
--- a/connector/countconnector/config_test.go
+++ b/connector/countconnector/config_test.go
@@ -515,8 +515,7 @@ func TestConfigErrors(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
err := tc.input.Validate()
- assert.Error(t, err)
- assert.Contains(t, err.Error(), tc.expect)
+ assert.ErrorContains(t, err, tc.expect)
})
}
}
diff --git a/connector/sumconnector/config_test.go b/connector/sumconnector/config_test.go
index 17cc98ed5926..79462d8f3099 100644
--- a/connector/sumconnector/config_test.go
+++ b/connector/sumconnector/config_test.go
@@ -574,8 +574,7 @@ func TestConfigErrors(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
err := tc.input.Validate()
- assert.Error(t, err)
- assert.Contains(t, err.Error(), tc.expect)
+ assert.ErrorContains(t, err, tc.expect)
})
}
}
diff --git a/exporter/clickhouseexporter/exporter_logs_test.go b/exporter/clickhouseexporter/exporter_logs_test.go
index a83b0952faff..7388e68f243c 100644
--- a/exporter/clickhouseexporter/exporter_logs_test.go
+++ b/exporter/clickhouseexporter/exporter_logs_test.go
@@ -40,8 +40,7 @@ func TestLogsExporter_New(t *testing.T) {
failWithMsg := func(msg string) validate {
return func(t *testing.T, _ *logsExporter, err error) {
- require.Error(t, err)
- require.Contains(t, err.Error(), msg)
+ require.ErrorContains(t, err, msg)
}
}
diff --git a/exporter/elasticsearchexporter/README.md b/exporter/elasticsearchexporter/README.md
index 083aa2826182..31d2cae89c6f 100644
--- a/exporter/elasticsearchexporter/README.md
+++ b/exporter/elasticsearchexporter/README.md
@@ -121,7 +121,7 @@ This can be customised through the following settings:
- `logs_dynamic_index` (optional): uses resource, scope, or log record attributes to dynamically construct index name.
- `enabled`(default=false): Enable/Disable dynamic index for log records. If `data_stream.dataset` or `data_stream.namespace` exist in attributes (precedence: log record attribute > scope attribute > resource attribute), they will be used to dynamically construct index name in the form `logs-${data_stream.dataset}-${data_stream.namespace}`. Otherwise, if
- `elasticsearch.index.prefix` or `elasticsearch.index.suffix` exist in attributes (precedence: resource attribute > scope attribute > log record attribute), they will be used to dynamically construct index name in the form `${elasticsearch.index.prefix}${logs_index}${elasticsearch.index.suffix}`. Otherwise, the index name falls back to `logs-generic-default`, and `logs_index` config will be ignored. Except for prefix/suffix attribute presence, the resulting docs will contain the corresponding `data_stream.*` fields.
+ `elasticsearch.index.prefix` or `elasticsearch.index.suffix` exist in attributes (precedence: resource attribute > scope attribute > log record attribute), they will be used to dynamically construct index name in the form `${elasticsearch.index.prefix}${logs_index}${elasticsearch.index.suffix}`. Otherwise, if scope name matches regex `/receiver/(\w*receiver)`, `data_stream.dataset` will be capture group #1. Otherwise, the index name falls back to `logs-generic-default`, and `logs_index` config will be ignored. Except for prefix/suffix attribute presence, the resulting docs will contain the corresponding `data_stream.*` fields.
- `metrics_index` (optional): The [index] or [data stream] name to publish metrics to. The default value is `metrics-generic-default`.
⚠️ Note that metrics support is currently in development.
@@ -129,13 +129,13 @@ This can be customised through the following settings:
- `metrics_dynamic_index` (optional): uses resource, scope or data point attributes to dynamically construct index name.
⚠️ Note that metrics support is currently in development.
- `enabled`(default=true): Enable/disable dynamic index for metrics. If `data_stream.dataset` or `data_stream.namespace` exist in attributes (precedence: data point attribute > scope attribute > resource attribute), they will be used to dynamically construct index name in the form `metrics-${data_stream.dataset}-${data_stream.namespace}`. Otherwise, if
- `elasticsearch.index.prefix` or `elasticsearch.index.suffix` exist in attributes (precedence: resource attribute > scope attribute > data point attribute), they will be used to dynamically construct index name in the form `${elasticsearch.index.prefix}${metrics_index}${elasticsearch.index.suffix}`. Otherwise, the index name falls back to `metrics-generic-default`, and `metrics_index` config will be ignored. Except for prefix/suffix attribute presence, the resulting docs will contain the corresponding `data_stream.*` fields.
+ `elasticsearch.index.prefix` or `elasticsearch.index.suffix` exist in attributes (precedence: resource attribute > scope attribute > data point attribute), they will be used to dynamically construct index name in the form `${elasticsearch.index.prefix}${metrics_index}${elasticsearch.index.suffix}`. Otherwise, if scope name matches regex `/receiver/(\w*receiver)`, `data_stream.dataset` will be capture group #1. Otherwise, the index name falls back to `metrics-generic-default`, and `metrics_index` config will be ignored. Except for prefix/suffix attribute presence, the resulting docs will contain the corresponding `data_stream.*` fields.
- `traces_index`: The [index] or [data stream] name to publish traces to. The default value is `traces-generic-default`.
- `traces_dynamic_index` (optional): uses resource, scope, or span attributes to dynamically construct index name.
- `enabled`(default=false): Enable/Disable dynamic index for trace spans. If `data_stream.dataset` or `data_stream.namespace` exist in attributes (precedence: span attribute > scope attribute > resource attribute), they will be used to dynamically construct index name in the form `traces-${data_stream.dataset}-${data_stream.namespace}`. Otherwise, if
- `elasticsearch.index.prefix` or `elasticsearch.index.suffix` exist in attributes (precedence: resource attribute > scope attribute > span attribute), they will be used to dynamically construct index name in the form `${elasticsearch.index.prefix}${traces_index}${elasticsearch.index.suffix}`. Otherwise, the index name falls back to `traces-generic-default`, and `traces_index` config will be ignored. Except for prefix/suffix attribute presence, the resulting docs will contain the corresponding `data_stream.*` fields. There is an exception for span events under OTel mapping mode (`mapping::mode: otel`), where span event attributes instead of span attributes are considered, and `data_stream.type` is always `logs` instead of `traces` such that documents are routed to `logs-${data_stream.dataset}-${data_stream.namespace}`.
+ `elasticsearch.index.prefix` or `elasticsearch.index.suffix` exist in attributes (precedence: resource attribute > scope attribute > span attribute), they will be used to dynamically construct index name in the form `${elasticsearch.index.prefix}${traces_index}${elasticsearch.index.suffix}`. Otherwise, if scope name matches regex `/receiver/(\w*receiver)`, `data_stream.dataset` will be capture group #1. Otherwise, the index name falls back to `traces-generic-default`, and `traces_index` config will be ignored. Except for prefix/suffix attribute presence, the resulting docs will contain the corresponding `data_stream.*` fields. There is an exception for span events under OTel mapping mode (`mapping::mode: otel`), where span event attributes instead of span attributes are considered, and `data_stream.type` is always `logs` instead of `traces` such that documents are routed to `logs-${data_stream.dataset}-${data_stream.namespace}`.
- `logstash_format` (optional): Logstash format compatibility. Logs, metrics and traces can be written into an index in Logstash format.
- `enabled`(default=false): Enable/disable Logstash format compatibility. When `logstash_format.enabled` is `true`, the index name is composed using `(logs|metrics|traces)_index` or `(logs|metrics|traces)_dynamic_index` as prefix and the date as suffix,
@@ -235,8 +235,8 @@ The metric types supported are:
- Gauge
- Sum
-- Histogram
-- Exponential histogram
+- Histogram (Delta temporality only)
+- Exponential histogram (Delta temporality only)
- Summary
[confighttp]: https://github.com/open-telemetry/opentelemetry-collector/tree/main/config/confighttp/README.md#http-configuration-settings
diff --git a/exporter/elasticsearchexporter/data_stream_router.go b/exporter/elasticsearchexporter/data_stream_router.go
index 851bb92d9756..df9b17c6cc6e 100644
--- a/exporter/elasticsearchexporter/data_stream_router.go
+++ b/exporter/elasticsearchexporter/data_stream_router.go
@@ -5,18 +5,20 @@ package elasticsearchexporter // import "github.com/open-telemetry/opentelemetry
import (
"fmt"
+ "regexp"
"go.opentelemetry.io/collector/pdata/pcommon"
- "go.opentelemetry.io/collector/pdata/plog"
- "go.opentelemetry.io/collector/pdata/ptrace"
)
+var receiverRegex = regexp.MustCompile(`/receiver/(\w*receiver)`)
+
func routeWithDefaults(defaultDSType string) func(
pcommon.Map,
pcommon.Map,
pcommon.Map,
string,
bool,
+ string,
) string {
return func(
recordAttr pcommon.Map,
@@ -24,11 +26,13 @@ func routeWithDefaults(defaultDSType string) func(
resourceAttr pcommon.Map,
fIndex string,
otel bool,
+ scopeName string,
) string {
// Order:
// 1. read data_stream.* from attributes
// 2. read elasticsearch.index.* from attributes
- // 3. use default hardcoded data_stream.*
+ // 3. receiver-based routing
+ // 4. use default hardcoded data_stream.*
dataset, datasetExists := getFromAttributes(dataStreamDataset, defaultDataStreamDataset, recordAttr, scopeAttr, resourceAttr)
namespace, namespaceExists := getFromAttributes(dataStreamNamespace, defaultDataStreamNamespace, recordAttr, scopeAttr, resourceAttr)
dataStreamMode := datasetExists || namespaceExists
@@ -40,8 +44,17 @@ func routeWithDefaults(defaultDSType string) func(
}
}
+ // Receiver-based routing
+ // For example, hostmetricsreceiver (or hostmetricsreceiver.otel in the OTel output mode)
+ // for the scope name
+ // github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper
+ if submatch := receiverRegex.FindStringSubmatch(scopeName); len(submatch) > 0 {
+ receiverName := submatch[1]
+ dataset = receiverName
+ }
+
// The naming convention for datastream is expected to be "logs-[dataset].otel-[namespace]".
- // This is in order to match the soon to be built-in logs-*.otel-* index template.
+ // This is in order to match the built-in logs-*.otel-* index template.
if otel {
dataset += ".otel"
}
@@ -53,55 +66,20 @@ func routeWithDefaults(defaultDSType string) func(
}
}
-// routeLogRecord returns the name of the index to send the log record to according to data stream routing attributes and prefix/suffix attributes.
-// This function may mutate record attributes.
-func routeLogRecord(
- record plog.LogRecord,
- scope pcommon.InstrumentationScope,
- resource pcommon.Resource,
- fIndex string,
- otel bool,
-) string {
- route := routeWithDefaults(defaultDataStreamTypeLogs)
- return route(record.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, otel)
-}
+var (
+ // routeLogRecord returns the name of the index to send the log record to according to data stream routing related attributes.
+ // This function may mutate record attributes.
+ routeLogRecord = routeWithDefaults(defaultDataStreamTypeLogs)
-// routeDataPoint returns the name of the index to send the data point to according to data stream routing attributes.
-// This function may mutate record attributes.
-func routeDataPoint(
- dataPoint dataPoint,
- scope pcommon.InstrumentationScope,
- resource pcommon.Resource,
- fIndex string,
- otel bool,
-) string {
- route := routeWithDefaults(defaultDataStreamTypeMetrics)
- return route(dataPoint.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, otel)
-}
+ // routeDataPoint returns the name of the index to send the data point to according to data stream routing related attributes.
+ // This function may mutate record attributes.
+ routeDataPoint = routeWithDefaults(defaultDataStreamTypeMetrics)
-// routeSpan returns the name of the index to send the span to according to data stream routing attributes.
-// This function may mutate record attributes.
-func routeSpan(
- span ptrace.Span,
- scope pcommon.InstrumentationScope,
- resource pcommon.Resource,
- fIndex string,
- otel bool,
-) string {
- route := routeWithDefaults(defaultDataStreamTypeTraces)
- return route(span.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, otel)
-}
+ // routeSpan returns the name of the index to send the span to according to data stream routing related attributes.
+ // This function may mutate record attributes.
+ routeSpan = routeWithDefaults(defaultDataStreamTypeTraces)
-// routeSpanEvent returns the name of the index to send the span event to according to data stream routing attributes.
-// This function may mutate record attributes.
-func routeSpanEvent(
- spanEvent ptrace.SpanEvent,
- scope pcommon.InstrumentationScope,
- resource pcommon.Resource,
- fIndex string,
- otel bool,
-) string {
- // span events are sent to logs-*, not traces-*
- route := routeWithDefaults(defaultDataStreamTypeLogs)
- return route(spanEvent.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, otel)
-}
+ // routeSpanEvent returns the name of the index to send the span event to according to data stream routing related attributes.
+ // This function may mutate record attributes.
+ routeSpanEvent = routeWithDefaults(defaultDataStreamTypeLogs)
+)
diff --git a/exporter/elasticsearchexporter/data_stream_router_test.go b/exporter/elasticsearchexporter/data_stream_router_test.go
index 0d64a6b2184a..81450da4d7a1 100644
--- a/exporter/elasticsearchexporter/data_stream_router_test.go
+++ b/exporter/elasticsearchexporter/data_stream_router_test.go
@@ -8,70 +8,90 @@ import (
"testing"
"github.com/stretchr/testify/assert"
- "go.opentelemetry.io/collector/pdata/plog"
- "go.opentelemetry.io/collector/pdata/pmetric"
- "go.opentelemetry.io/collector/pdata/ptrace"
+ "go.opentelemetry.io/collector/pdata/pcommon"
)
-type routeTestInfo struct {
- name string
- otel bool
- want string
+type routeTestCase struct {
+ name string
+ otel bool
+ scopeName string
+ want string
}
-func createRouteTests(dsType string) []routeTestInfo {
- renderWantRoute := func(dsType string, otel bool) string {
+func createRouteTests(dsType string) []routeTestCase {
+ renderWantRoute := func(dsType, dsDataset string, otel bool) string {
if otel {
- return fmt.Sprintf("%s-%s.otel-%s", dsType, defaultDataStreamDataset, defaultDataStreamNamespace)
+ return fmt.Sprintf("%s-%s.otel-%s", dsType, dsDataset, defaultDataStreamNamespace)
}
- return fmt.Sprintf("%s-%s-%s", dsType, defaultDataStreamDataset, defaultDataStreamNamespace)
+ return fmt.Sprintf("%s-%s-%s", dsType, dsDataset, defaultDataStreamNamespace)
}
- return []routeTestInfo{
+ return []routeTestCase{
{
name: "default",
otel: false,
- want: renderWantRoute(dsType, false),
+ want: renderWantRoute(dsType, defaultDataStreamDataset, false),
},
{
name: "otel",
otel: true,
- want: renderWantRoute(dsType, true),
+ want: renderWantRoute(dsType, defaultDataStreamDataset, true),
+ },
+ {
+ name: "default with receiver scope name",
+ otel: false,
+ scopeName: "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper",
+ want: renderWantRoute(dsType, "hostmetricsreceiver", false),
+ },
+ {
+ name: "otel with receiver scope name",
+ otel: true,
+ scopeName: "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/cpuscraper",
+ want: renderWantRoute(dsType, "hostmetricsreceiver", true),
+ },
+ {
+ name: "default with non-receiver scope name",
+ otel: false,
+ scopeName: "some_other_scope_name",
+ want: renderWantRoute(dsType, defaultDataStreamDataset, false),
+ },
+ {
+ name: "otel with non-receiver scope name",
+ otel: true,
+ scopeName: "some_other_scope_name",
+ want: renderWantRoute(dsType, defaultDataStreamDataset, true),
},
}
}
func TestRouteLogRecord(t *testing.T) {
-
tests := createRouteTests(defaultDataStreamTypeLogs)
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- ds := routeLogRecord(plog.NewLogRecord(), plog.NewScopeLogs().Scope(), plog.NewResourceLogs().Resource(), "", tc.otel)
+ ds := routeLogRecord(pcommon.NewMap(), pcommon.NewMap(), pcommon.NewMap(), "", tc.otel, tc.scopeName)
assert.Equal(t, tc.want, ds)
})
}
}
func TestRouteDataPoint(t *testing.T) {
-
tests := createRouteTests(defaultDataStreamTypeMetrics)
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- ds := routeDataPoint(numberDataPoint{pmetric.NewNumberDataPoint()}, plog.NewScopeLogs().Scope(), plog.NewResourceLogs().Resource(), "", tc.otel)
+ ds := routeDataPoint(pcommon.NewMap(), pcommon.NewMap(), pcommon.NewMap(), "", tc.otel, tc.scopeName)
assert.Equal(t, tc.want, ds)
})
}
}
func TestRouteSpan(t *testing.T) {
-
tests := createRouteTests(defaultDataStreamTypeTraces)
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- ds := routeSpan(ptrace.NewSpan(), plog.NewScopeLogs().Scope(), plog.NewResourceLogs().Resource(), "", tc.otel)
+ ds := routeSpan(pcommon.NewMap(), pcommon.NewMap(), pcommon.NewMap(), "", tc.otel, tc.scopeName)
assert.Equal(t, tc.want, ds)
})
}
diff --git a/exporter/elasticsearchexporter/exporter.go b/exporter/elasticsearchexporter/exporter.go
index 3c4725cac266..e8990fc1a606 100644
--- a/exporter/elasticsearchexporter/exporter.go
+++ b/exporter/elasticsearchexporter/exporter.go
@@ -156,7 +156,7 @@ func (e *elasticsearchExporter) pushLogRecord(
) error {
fIndex := e.index
if e.dynamicIndex {
- fIndex = routeLogRecord(record, scope, resource, fIndex, e.otel)
+ fIndex = routeLogRecord(record.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, e.otel, scope.Name())
}
if e.logstashFormat.Enabled {
@@ -238,6 +238,10 @@ func (e *elasticsearchExporter) pushMetricsData(
}
}
case pmetric.MetricTypeExponentialHistogram:
+ if metric.ExponentialHistogram().AggregationTemporality() == pmetric.AggregationTemporalityCumulative {
+ errs = append(errs, fmt.Errorf("dropping cumulative temporality exponential histogram %q", metric.Name()))
+ continue
+ }
dps := metric.ExponentialHistogram().DataPoints()
for l := 0; l < dps.Len(); l++ {
dp := dps.At(l)
@@ -247,6 +251,10 @@ func (e *elasticsearchExporter) pushMetricsData(
}
}
case pmetric.MetricTypeHistogram:
+ if metric.Histogram().AggregationTemporality() == pmetric.AggregationTemporalityCumulative {
+ errs = append(errs, fmt.Errorf("dropping cumulative temporality histogram %q", metric.Name()))
+ continue
+ }
dps := metric.Histogram().DataPoints()
for l := 0; l < dps.Len(); l++ {
dp := dps.At(l)
@@ -305,7 +313,7 @@ func (e *elasticsearchExporter) getMetricDataPointIndex(
) (string, error) {
fIndex := e.index
if e.dynamicIndex {
- fIndex = routeDataPoint(dataPoint, scope, resource, fIndex, e.otel)
+ fIndex = routeDataPoint(dataPoint.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, e.otel, scope.Name())
}
if e.logstashFormat.Enabled {
@@ -379,7 +387,7 @@ func (e *elasticsearchExporter) pushTraceRecord(
) error {
fIndex := e.index
if e.dynamicIndex {
- fIndex = routeSpan(span, scope, resource, fIndex, e.otel)
+ fIndex = routeSpan(span.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, e.otel, span.Name())
}
if e.logstashFormat.Enabled {
@@ -409,7 +417,7 @@ func (e *elasticsearchExporter) pushSpanEvent(
) error {
fIndex := e.index
if e.dynamicIndex {
- fIndex = routeSpanEvent(spanEvent, scope, resource, fIndex, e.otel)
+ fIndex = routeSpanEvent(spanEvent.Attributes(), scope.Attributes(), resource.Attributes(), fIndex, e.otel, scope.Name())
}
if e.logstashFormat.Enabled {
diff --git a/exporter/elasticsearchexporter/exporter_test.go b/exporter/elasticsearchexporter/exporter_test.go
index 3c11272f408f..bb78458eada3 100644
--- a/exporter/elasticsearchexporter/exporter_test.go
+++ b/exporter/elasticsearchexporter/exporter_test.go
@@ -730,7 +730,9 @@ func TestExporterMetrics(t *testing.T) {
metricSlice := scopeA.Metrics()
fooMetric := metricSlice.AppendEmpty()
fooMetric.SetName("metric.foo")
- fooDps := fooMetric.SetEmptyHistogram().DataPoints()
+ fooHistogram := fooMetric.SetEmptyHistogram()
+ fooHistogram.SetAggregationTemporality(pmetric.AggregationTemporalityDelta)
+ fooDps := fooHistogram.DataPoints()
fooDp := fooDps.AppendEmpty()
fooDp.ExplicitBounds().FromRaw([]float64{1.0, 2.0, 3.0})
fooDp.BucketCounts().FromRaw([]uint64{1, 2, 3, 4})
@@ -774,7 +776,9 @@ func TestExporterMetrics(t *testing.T) {
metricSlice := scopeA.Metrics()
fooMetric := metricSlice.AppendEmpty()
fooMetric.SetName("metric.foo")
- fooDps := fooMetric.SetEmptyExponentialHistogram().DataPoints()
+ fooHistogram := fooMetric.SetEmptyExponentialHistogram()
+ fooHistogram.SetAggregationTemporality(pmetric.AggregationTemporalityDelta)
+ fooDps := fooHistogram.DataPoints()
fooDp := fooDps.AppendEmpty()
fooDp.SetZeroCount(2)
fooDp.Positive().SetOffset(1)
@@ -797,6 +801,64 @@ func TestExporterMetrics(t *testing.T) {
assertItemsEqual(t, expected, rec.Items(), false)
})
+ t.Run("publish histogram cumulative temporality", func(t *testing.T) {
+ server := newESTestServer(t, func(_ []itemRequest) ([]itemResponse, error) {
+ require.Fail(t, "unexpected request")
+ return nil, nil
+ })
+
+ exporter := newTestMetricsExporter(t, server.URL, func(cfg *Config) {
+ cfg.Mapping.Mode = "ecs"
+ })
+
+ metrics := pmetric.NewMetrics()
+ resourceMetrics := metrics.ResourceMetrics().AppendEmpty()
+ scopeA := resourceMetrics.ScopeMetrics().AppendEmpty()
+ metricSlice := scopeA.Metrics()
+ fooMetric := metricSlice.AppendEmpty()
+ fooMetric.SetName("metric.foo")
+ fooHistogram := fooMetric.SetEmptyHistogram()
+ fooHistogram.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+ fooDps := fooHistogram.DataPoints()
+ fooDp := fooDps.AppendEmpty()
+ fooDp.ExplicitBounds().FromRaw([]float64{1.0, 2.0, 3.0})
+ fooDp.BucketCounts().FromRaw([]uint64{1, 2, 3, 4})
+
+ err := exporter.ConsumeMetrics(context.Background(), metrics)
+ assert.ErrorContains(t, err, "dropping cumulative temporality histogram \"metric.foo\"")
+ })
+
+ t.Run("publish exponential histogram cumulative temporality", func(t *testing.T) {
+ server := newESTestServer(t, func(_ []itemRequest) ([]itemResponse, error) {
+ require.Fail(t, "unexpected request")
+ return nil, nil
+ })
+
+ exporter := newTestMetricsExporter(t, server.URL, func(cfg *Config) {
+ cfg.Mapping.Mode = "ecs"
+ })
+
+ metrics := pmetric.NewMetrics()
+ resourceMetrics := metrics.ResourceMetrics().AppendEmpty()
+ scopeA := resourceMetrics.ScopeMetrics().AppendEmpty()
+ metricSlice := scopeA.Metrics()
+ fooMetric := metricSlice.AppendEmpty()
+ fooMetric.SetName("metric.foo")
+ fooHistogram := fooMetric.SetEmptyExponentialHistogram()
+ fooHistogram.SetAggregationTemporality(pmetric.AggregationTemporalityCumulative)
+ fooDps := fooHistogram.DataPoints()
+ fooDp := fooDps.AppendEmpty()
+ fooDp.SetZeroCount(2)
+ fooDp.Positive().SetOffset(1)
+ fooDp.Positive().BucketCounts().FromRaw([]uint64{0, 1, 1, 0})
+
+ fooDp.Negative().SetOffset(1)
+ fooDp.Negative().BucketCounts().FromRaw([]uint64{1, 0, 0, 1})
+
+ err := exporter.ConsumeMetrics(context.Background(), metrics)
+ assert.ErrorContains(t, err, "dropping cumulative temporality exponential histogram \"metric.foo\"")
+ })
+
t.Run("publish only valid data points", func(t *testing.T) {
rec := newBulkRecorder()
server := newESTestServer(t, func(docs []itemRequest) ([]itemResponse, error) {
@@ -814,7 +876,9 @@ func TestExporterMetrics(t *testing.T) {
metricSlice := scopeA.Metrics()
fooMetric := metricSlice.AppendEmpty()
fooMetric.SetName("metric.foo")
- fooDps := fooMetric.SetEmptyHistogram().DataPoints()
+ fooHistogram := fooMetric.SetEmptyHistogram()
+ fooHistogram.SetAggregationTemporality(pmetric.AggregationTemporalityDelta)
+ fooDps := fooHistogram.DataPoints()
fooDp := fooDps.AppendEmpty()
fooDp.ExplicitBounds().FromRaw([]float64{1.0, 2.0, 3.0})
fooDp.BucketCounts().FromRaw([]uint64{})
@@ -867,7 +931,9 @@ func TestExporterMetrics(t *testing.T) {
metricSlice := scopeA.Metrics()
fooMetric := metricSlice.AppendEmpty()
fooMetric.SetName("metric.foo")
- fooDps := fooMetric.SetEmptyHistogram().DataPoints()
+ fooHistogram := fooMetric.SetEmptyHistogram()
+ fooHistogram.SetAggregationTemporality(pmetric.AggregationTemporalityDelta)
+ fooDps := fooHistogram.DataPoints()
fooDp := fooDps.AppendEmpty()
fooDp.ExplicitBounds().FromRaw([]float64{1.0, 2.0, 3.0})
fooDp.BucketCounts().FromRaw([]uint64{1, 2, 3, 4})
@@ -911,7 +977,7 @@ func TestExporterMetrics(t *testing.T) {
Document: []byte(`{"@timestamp":"1970-01-01T01:00:00.000000000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"metrics":{"metric.sum":1.5},"resource":{"dropped_attributes_count":0},"scope":{"dropped_attributes_count":0},"start_timestamp":"1970-01-01T02:00:00.000000000Z"}`),
},
{
- Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.metric.summary":"summary_metrics"}}}`),
+ Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.metric.summary":"summary"}}}`),
Document: []byte(`{"@timestamp":"1970-01-01T03:00:00.000000000Z","data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"metrics":{"metric.summary":{"sum":1.5,"value_count":1}},"resource":{"dropped_attributes_count":0},"scope":{"dropped_attributes_count":0},"start_timestamp":"1970-01-01T03:00:00.000000000Z"}`),
},
}
@@ -981,7 +1047,7 @@ func TestExporterMetrics(t *testing.T) {
rec.WaitItems(2)
expected := []itemRequest{
{
- Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.summary":"summary_metrics"}}}`),
+ Action: []byte(`{"create":{"_index":"metrics-generic.otel-default","dynamic_templates":{"metrics.summary":"summary"}}}`),
Document: []byte(`{"@timestamp":"1970-01-01T00:00:00.000000000Z","_doc_count":10,"attributes":{"_doc_count":true},"data_stream":{"dataset":"generic.otel","namespace":"default","type":"metrics"},"metrics":{"summary":{"sum":1.0,"value_count":10}},"resource":{"dropped_attributes_count":0},"scope":{"dropped_attributes_count":0}}`),
},
{
diff --git a/exporter/elasticsearchexporter/model.go b/exporter/elasticsearchexporter/model.go
index fa0296d3d7a3..4ad88c6d5483 100644
--- a/exporter/elasticsearchexporter/model.go
+++ b/exporter/elasticsearchexporter/model.go
@@ -345,7 +345,7 @@ func (dp summaryDataPoint) Value() (pcommon.Value, error) {
}
func (dp summaryDataPoint) DynamicTemplate(_ pmetric.Metric) string {
- return "summary_metrics"
+ return "summary"
}
func (dp summaryDataPoint) DocCount() uint64 {
diff --git a/exporter/elasticsearchexporter/model_test.go b/exporter/elasticsearchexporter/model_test.go
index e0e719586b61..5ce8a04115e8 100644
--- a/exporter/elasticsearchexporter/model_test.go
+++ b/exporter/elasticsearchexporter/model_test.go
@@ -1055,7 +1055,7 @@ func TestEncodeLogOtelMode(t *testing.T) {
record, scope, resource := createTestOTelLogRecord(t, tc.rec)
// This sets the data_stream values default or derived from the record/scope/resources
- routeLogRecord(record, scope, resource, "", true)
+ routeLogRecord(record.Attributes(), scope.Attributes(), resource.Attributes(), "", true, scope.Name())
b, err := m.encodeLog(resource, tc.rec.Resource.SchemaURL, record, scope, tc.rec.Scope.SchemaURL)
require.NoError(t, err)
diff --git a/exporter/kafkaexporter/kafka_exporter_test.go b/exporter/kafkaexporter/kafka_exporter_test.go
index 670318887703..c229b2829890 100644
--- a/exporter/kafkaexporter/kafka_exporter_test.go
+++ b/exporter/kafkaexporter/kafka_exporter_test.go
@@ -131,18 +131,15 @@ func TestNewExporter_err_auth_type(t *testing.T) {
texp := newTracesExporter(c, exportertest.NewNopSettings())
require.NotNil(t, texp)
err := texp.start(context.Background(), componenttest.NewNopHost())
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failed to load TLS config")
+ assert.ErrorContains(t, err, "failed to load TLS config")
mexp := newMetricsExporter(c, exportertest.NewNopSettings())
require.NotNil(t, mexp)
err = mexp.start(context.Background(), componenttest.NewNopHost())
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failed to load TLS config")
+ assert.ErrorContains(t, err, "failed to load TLS config")
lexp := newLogsExporter(c, exportertest.NewNopSettings())
require.NotNil(t, lexp)
err = lexp.start(context.Background(), componenttest.NewNopHost())
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failed to load TLS config")
+ assert.ErrorContains(t, err, "failed to load TLS config")
}
@@ -157,7 +154,7 @@ func TestNewExporter_err_compression(t *testing.T) {
require.NotNil(t, texp)
err := texp.start(context.Background(), componenttest.NewNopHost())
assert.Error(t, err)
- assert.Contains(t, err.Error(), "producer.compression should be one of 'none', 'gzip', 'snappy', 'lz4', or 'zstd'. configured value idk")
+ assert.ErrorContains(t, err, "producer.compression should be one of 'none', 'gzip', 'snappy', 'lz4', or 'zstd'. configured value idk")
}
func TestTracesExporter_encoding_extension(t *testing.T) {
@@ -249,8 +246,7 @@ func TestTracesPusher_marshal_error(t *testing.T) {
}
td := testdata.GenerateTraces(2)
err := p.tracesPusher(context.Background(), td)
- require.Error(t, err)
- assert.Contains(t, err.Error(), expErr.Error())
+ assert.ErrorContains(t, err, expErr.Error())
}
func TestMetricsDataPusher(t *testing.T) {
@@ -331,8 +327,7 @@ func TestMetricsDataPusher_marshal_error(t *testing.T) {
}
md := testdata.GenerateMetrics(2)
err := p.metricsDataPusher(context.Background(), md)
- require.Error(t, err)
- assert.Contains(t, err.Error(), expErr.Error())
+ assert.ErrorContains(t, err, expErr.Error())
}
func TestLogsDataPusher(t *testing.T) {
@@ -413,8 +408,7 @@ func TestLogsDataPusher_marshal_error(t *testing.T) {
}
ld := testdata.GenerateLogs(1)
err := p.logsDataPusher(context.Background(), ld)
- require.Error(t, err)
- assert.Contains(t, err.Error(), expErr.Error())
+ assert.ErrorContains(t, err, expErr.Error())
}
type tracesErrorMarshaler struct {
diff --git a/exporter/lokiexporter/config_test.go b/exporter/lokiexporter/config_test.go
index 0e5dbc510122..caf6ee5a120e 100644
--- a/exporter/lokiexporter/config_test.go
+++ b/exporter/lokiexporter/config_test.go
@@ -122,8 +122,7 @@ func TestConfigValidate(t *testing.T) {
t.Run(tc.desc, func(t *testing.T) {
err := tc.cfg.Validate()
if tc.err != nil {
- require.Error(t, err)
- assert.Contains(t, err.Error(), tc.err.Error())
+ assert.ErrorContains(t, err, tc.err.Error())
} else {
require.NoError(t, err)
}
diff --git a/exporter/otelarrowexporter/internal/arrow/stream_test.go b/exporter/otelarrowexporter/internal/arrow/stream_test.go
index 9b39d4d9c644..5b38bd3071fc 100644
--- a/exporter/otelarrowexporter/internal/arrow/stream_test.go
+++ b/exporter/otelarrowexporter/internal/arrow/stream_test.go
@@ -247,12 +247,10 @@ func TestStreamStatusUnavailableInvalid(t *testing.T) {
}()
// sender should get "test unavailable" once, success second time.
err := tc.mustSendAndWait()
- require.Error(t, err)
- require.Contains(t, err.Error(), "test unavailable")
+ require.ErrorContains(t, err, "test unavailable")
err = tc.mustSendAndWait()
- require.Error(t, err)
- require.Contains(t, err.Error(), "test invalid")
+ require.ErrorContains(t, err, "test invalid")
err = tc.mustSendAndWait()
require.NoError(t, err)
@@ -282,8 +280,7 @@ func TestStreamStatusUnrecognized(t *testing.T) {
channel.recv <- statusUnrecognizedFor(batch.BatchId)
}()
err := tc.mustSendAndWait()
- require.Error(t, err)
- require.Contains(t, err.Error(), "test unrecognized")
+ require.ErrorContains(t, err, "test unrecognized")
// Note: do not cancel the context, the stream should be
// shutting down due to the error.
diff --git a/exporter/otelarrowexporter/metadata_test.go b/exporter/otelarrowexporter/metadata_test.go
index ce18b5f1dee2..e450f284fbc6 100644
--- a/exporter/otelarrowexporter/metadata_test.go
+++ b/exporter/otelarrowexporter/metadata_test.go
@@ -121,9 +121,8 @@ func TestDuplicateMetadataKeys(t *testing.T) {
cfg := createDefaultConfig().(*Config)
cfg.MetadataKeys = []string{"myTOKEN", "mytoken"}
err := cfg.Validate()
- require.Error(t, err)
- require.Contains(t, err.Error(), "duplicate")
- require.Contains(t, err.Error(), "mytoken")
+ require.ErrorContains(t, err, "duplicate")
+ require.ErrorContains(t, err, "mytoken")
}
func TestMetadataExporterCardinalityLimit(t *testing.T) {
@@ -196,7 +195,7 @@ func TestMetadataExporterCardinalityLimit(t *testing.T) {
err = exp.ConsumeTraces(ctx, td)
require.Error(t, err)
assert.True(t, consumererror.IsPermanent(err))
- assert.Contains(t, err.Error(), "too many")
+ assert.ErrorContains(t, err, "too many")
assert.Eventually(t, func() bool {
return rcv.requestCount.Load() == int32(cardLimit)
diff --git a/exporter/otelarrowexporter/otelarrow_test.go b/exporter/otelarrowexporter/otelarrow_test.go
index 4c73c153f342..4ae4c6b7d9b8 100644
--- a/exporter/otelarrowexporter/otelarrow_test.go
+++ b/exporter/otelarrowexporter/otelarrow_test.go
@@ -1133,8 +1133,7 @@ func TestSendArrowFailedTraces(t *testing.T) {
// Send two trace items.
td := testdata.GenerateTraces(2)
err = exp.ConsumeTraces(context.Background(), td)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "test failed")
+ assert.ErrorContains(t, err, "test failed")
// Wait until it is received.
assert.Eventually(t, func() bool {
diff --git a/exporter/signalfxexporter/exporter_test.go b/exporter/signalfxexporter/exporter_test.go
index a2cb4b1e0a70..07dc89f29d9a 100644
--- a/exporter/signalfxexporter/exporter_test.go
+++ b/exporter/signalfxexporter/exporter_test.go
@@ -218,7 +218,7 @@ func TestConsumeMetrics(t *testing.T) {
assert.Error(t, err)
assert.True(t, consumererror.IsPermanent(err))
assert.True(t, strings.HasPrefix(err.Error(), tt.expectedErrorMsg))
- assert.Contains(t, err.Error(), "response content")
+ assert.ErrorContains(t, err, "response content")
return
}
@@ -1843,7 +1843,7 @@ func TestConsumeMixedMetrics(t *testing.T) {
assert.Error(t, err)
assert.True(t, consumererror.IsPermanent(err))
assert.True(t, strings.HasPrefix(err.Error(), tt.expectedErrorMsg))
- assert.Contains(t, err.Error(), "response content")
+ assert.ErrorContains(t, err, "response content")
return
}
diff --git a/exporter/signalfxexporter/factory_test.go b/exporter/signalfxexporter/factory_test.go
index e762f179ee4f..eac89f009426 100644
--- a/exporter/signalfxexporter/factory_test.go
+++ b/exporter/signalfxexporter/factory_test.go
@@ -600,7 +600,7 @@ func TestDefaultExcludes_not_translated(t *testing.T) {
require.NoError(t, err)
md := getMetrics(metrics)
- require.Equal(t, 69, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len())
+ require.Equal(t, 68, md.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().Len())
dps := converter.MetricsToSignalFxV2(md)
require.Empty(t, dps)
}
diff --git a/exporter/signalfxexporter/internal/correlation/correlation_test.go b/exporter/signalfxexporter/internal/correlation/correlation_test.go
index dba4f06a0f34..da3e922d47fd 100644
--- a/exporter/signalfxexporter/internal/correlation/correlation_test.go
+++ b/exporter/signalfxexporter/internal/correlation/correlation_test.go
@@ -81,7 +81,7 @@ func TestTrackerStart(t *testing.T) {
if tt.wantErr {
require.Error(t, err)
if tt.errMsg != "" {
- require.Contains(t, err.Error(), tt.errMsg)
+ require.ErrorContains(t, err, tt.errMsg)
}
} else {
require.NoError(t, err)
diff --git a/exporter/signalfxexporter/internal/translation/default_metrics.go b/exporter/signalfxexporter/internal/translation/default_metrics.go
index d0d5fc71156b..14e94d853108 100644
--- a/exporter/signalfxexporter/internal/translation/default_metrics.go
+++ b/exporter/signalfxexporter/internal/translation/default_metrics.go
@@ -121,7 +121,7 @@ exclude_metrics:
- /^(?i:(container)|(k8s\.node)|(k8s\.pod))\.memory\.page_faults$/
- /^(?i:(container)|(k8s\.node)|(k8s\.pod))\.memory\.rss$/
- /^(?i:(k8s\.node)|(k8s\.pod))\.memory\.usage$/
- - /^(?i:(container)|(k8s\.node)|(k8s\.pod))\.memory\.working_set$/
+ - /^(?i:(k8s\.node)|(k8s\.pod))\.memory\.working_set$/
# matches (k8s.node|k8s.pod).filesystem...
- /^k8s\.(?i:(node)|(pod))\.filesystem\.available$/
diff --git a/exporter/signalfxexporter/testdata/json/non_default_metrics_otel_convention.json b/exporter/signalfxexporter/testdata/json/non_default_metrics_otel_convention.json
index e9ea9839db75..fc996acca3df 100644
--- a/exporter/signalfxexporter/testdata/json/non_default_metrics_otel_convention.json
+++ b/exporter/signalfxexporter/testdata/json/non_default_metrics_otel_convention.json
@@ -131,9 +131,6 @@
{
"container.memory.rss": null
},
- {
- "container.memory.working_set": null
- },
{
"k8s.pod.memory.available": null
},
diff --git a/exporter/splunkhecexporter/client_test.go b/exporter/splunkhecexporter/client_test.go
index 7dacd2f297bd..1812e0b97bd6 100644
--- a/exporter/splunkhecexporter/client_test.go
+++ b/exporter/splunkhecexporter/client_test.go
@@ -1608,9 +1608,9 @@ func Test_pushLogData_ShouldAddResponseTo400Error(t *testing.T) {
// Sending logs using the client.
err := splunkClient.pushLogData(context.Background(), logs)
require.True(t, consumererror.IsPermanent(err), "Expecting permanent error")
- require.Contains(t, err.Error(), "HTTP/0.0 400")
+ require.ErrorContains(t, err, "HTTP/0.0 400")
// The returned error should contain the response body responseBody.
- assert.Contains(t, err.Error(), responseBody)
+ assert.ErrorContains(t, err, responseBody)
// An HTTP client that returns some other status code other than 400 and response body responseBody.
httpClient, _ = newTestClient(500, responseBody)
@@ -1618,7 +1618,7 @@ func Test_pushLogData_ShouldAddResponseTo400Error(t *testing.T) {
// Sending logs using the client.
err = splunkClient.pushLogData(context.Background(), logs)
require.False(t, consumererror.IsPermanent(err), "Expecting non-permanent error")
- require.Contains(t, err.Error(), "HTTP 500")
+ require.ErrorContains(t, err, "HTTP 500")
// The returned error should not contain the response body responseBody.
assert.NotContains(t, err.Error(), responseBody)
}
@@ -1953,7 +1953,7 @@ func Test_pushLogData_Small_MaxContentLength(t *testing.T) {
require.Error(t, err)
assert.True(t, consumererror.IsPermanent(err))
- assert.Contains(t, err.Error(), "dropped log event")
+ assert.ErrorContains(t, err, "dropped log event")
}
}
diff --git a/extension/oauth2clientauthextension/extension_test.go b/extension/oauth2clientauthextension/extension_test.go
index c22561fb5c0e..efc49b09452a 100644
--- a/extension/oauth2clientauthextension/extension_test.go
+++ b/extension/oauth2clientauthextension/extension_test.go
@@ -82,8 +82,7 @@ func TestOAuthClientSettings(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
rc, err := newClientAuthenticator(test.settings, zap.NewNop())
if test.shouldError {
- assert.Error(t, err)
- assert.Contains(t, err.Error(), test.expectedError)
+ assert.ErrorContains(t, err, test.expectedError)
return
}
assert.NoError(t, err)
@@ -296,7 +295,7 @@ func TestFailContactingOAuth(t *testing.T) {
_, err = credential.GetRequestMetadata(context.Background())
assert.ErrorIs(t, err, errFailedToGetSecurityToken)
- assert.Contains(t, err.Error(), serverURL.String())
+ assert.ErrorContains(t, err, serverURL.String())
transport := http.DefaultTransport.(*http.Transport).Clone()
baseRoundTripper := (http.RoundTripper)(transport)
@@ -311,5 +310,5 @@ func TestFailContactingOAuth(t *testing.T) {
require.NoError(t, err)
_, err = client.Do(req)
assert.ErrorIs(t, err, errFailedToGetSecurityToken)
- assert.Contains(t, err.Error(), serverURL.String())
+ assert.ErrorContains(t, err, serverURL.String())
}
diff --git a/extension/observer/dockerobserver/config_test.go b/extension/observer/dockerobserver/config_test.go
index 98c806cef97b..a00c82925634 100644
--- a/extension/observer/dockerobserver/config_test.go
+++ b/extension/observer/dockerobserver/config_test.go
@@ -99,8 +99,7 @@ func TestApiVersionCustomError(t *testing.T) {
factory := NewFactory()
cfg := factory.CreateDefaultConfig()
err := sub.Unmarshal(cfg)
- require.Error(t, err)
- assert.Contains(t, err.Error(),
+ assert.ErrorContains(t, err,
`Hint: You may want to wrap the 'api_version' value in quotes (api_version: "1.40")`,
)
diff --git a/extension/observer/ecsobserver/task_test.go b/extension/observer/ecsobserver/task_test.go
index 2daea1bdb2a6..2085912b8655 100644
--- a/extension/observer/ecsobserver/task_test.go
+++ b/extension/observer/ecsobserver/task_test.go
@@ -90,7 +90,7 @@ func TestTask_PrivateIP(t *testing.T) {
assert.Equal(t, mode, errPINF.NetworkMode)
// doing contains on error message is not good, but this line increase test coverage from 93% to 98%
// not sure how the average coverage is calculated ...
- assert.Contains(t, err.Error(), mode)
+ assert.ErrorContains(t, err, mode)
}
})
}
@@ -185,7 +185,7 @@ func TestTask_MappedPort(t *testing.T) {
errMPNF := &errMappedPortNotFound{}
require.ErrorAs(t, err, &errMPNF)
assert.Equal(t, mode, errMPNF.NetworkMode)
- assert.Contains(t, err.Error(), mode) // for coverage
+ assert.ErrorContains(t, err, mode) // for coverage
}
})
}
diff --git a/internal/aws/proxy/conn_test.go b/internal/aws/proxy/conn_test.go
index 77b3ddc8d0c8..97584a897205 100644
--- a/internal/aws/proxy/conn_test.go
+++ b/internal/aws/proxy/conn_test.go
@@ -255,8 +255,7 @@ func TestLoadEnvConfigCreds(t *testing.T) {
assert.Equal(t, cases.Val, value, "Expect the credentials value to match")
_, err = newAWSSession("ROLEARN", "TEST", zap.NewNop())
- assert.Error(t, err, "expected error")
- assert.Contains(t, err.Error(), "unable to handle AWS error", "expected error message")
+ assert.ErrorContains(t, err, "unable to handle AWS error", "expected error message")
}
func TestGetProxyUrlProxyAddressNotValid(t *testing.T) {
@@ -339,8 +338,7 @@ func TestProxyServerTransportInvalidProxyAddr(t *testing.T) {
_, err := proxyServerTransport(&Config{
ProxyAddress: "invalid\n",
})
- assert.Error(t, err, "expected error")
- assert.Contains(t, err.Error(), "invalid control character in URL")
+ assert.ErrorContains(t, err, "invalid control character in URL")
}
func TestProxyServerTransportHappyCase(t *testing.T) {
diff --git a/internal/aws/proxy/server_test.go b/internal/aws/proxy/server_test.go
index 664684bb33f5..d65902f1ba31 100644
--- a/internal/aws/proxy/server_test.go
+++ b/internal/aws/proxy/server_test.go
@@ -207,7 +207,7 @@ func TestCantGetServiceEndpoint(t *testing.T) {
_, err := NewServer(cfg, logger)
assert.Error(t, err, "NewServer should fail")
- assert.Contains(t, err.Error(), "invalid region")
+ assert.ErrorContains(t, err, "invalid region")
}
func TestAWSEndpointInvalid(t *testing.T) {
@@ -222,7 +222,7 @@ func TestAWSEndpointInvalid(t *testing.T) {
_, err := NewServer(cfg, logger)
assert.Error(t, err, "NewServer should fail")
- assert.Contains(t, err.Error(), "unable to parse AWS service endpoint")
+ assert.ErrorContains(t, err, "unable to parse AWS service endpoint")
}
func TestCanCreateTransport(t *testing.T) {
@@ -237,7 +237,7 @@ func TestCanCreateTransport(t *testing.T) {
_, err := NewServer(cfg, logger)
assert.Error(t, err, "NewServer should fail")
- assert.Contains(t, err.Error(), "failed to parse proxy URL")
+ assert.ErrorContains(t, err, "failed to parse proxy URL")
}
func TestGetServiceEndpointInvalidAWSConfig(t *testing.T) {
diff --git a/internal/coreinternal/consumerretry/logs_test.go b/internal/coreinternal/consumerretry/logs_test.go
index c4e6321b638e..7193797b76e7 100644
--- a/internal/coreinternal/consumerretry/logs_test.go
+++ b/internal/coreinternal/consumerretry/logs_test.go
@@ -88,8 +88,7 @@ func TestConsumeLogs_ContextDeadline(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Millisecond)
defer cancel()
err := consumer.ConsumeLogs(ctx, testdata.GenerateLogsTwoLogRecordsSameResource())
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "context is cancelled or timed out retry later")
+ assert.ErrorContains(t, err, "context is cancelled or timed out retry later")
}
func TestConsumeLogs_PartialRetry(t *testing.T) {
diff --git a/internal/coreinternal/timeutils/parser_test.go b/internal/coreinternal/timeutils/parser_test.go
index f83133c18589..80d572ef2c70 100644
--- a/internal/coreinternal/timeutils/parser_test.go
+++ b/internal/coreinternal/timeutils/parser_test.go
@@ -13,8 +13,7 @@ import (
func TestParseGoTimeBadLocation(t *testing.T) {
_, err := ParseGotime(time.RFC822, "02 Jan 06 15:04 BST", time.UTC)
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed to load location BST")
+ require.ErrorContains(t, err, "failed to load location BST")
}
func Test_setTimestampYear(t *testing.T) {
diff --git a/internal/docker/docker_test.go b/internal/docker/docker_test.go
index 0166812fb1ab..5589bdcef49d 100644
--- a/internal/docker/docker_test.go
+++ b/internal/docker/docker_test.go
@@ -62,8 +62,7 @@ func TestWatchingTimeouts(t *testing.T) {
shouldHaveTaken := time.Now().Add(100 * time.Millisecond).UnixNano()
err = cli.LoadContainerList(context.Background())
- require.Error(t, err)
- assert.Contains(t, err.Error(), expectedError)
+ assert.ErrorContains(t, err, expectedError)
observed, logs := observer.New(zapcore.WarnLevel)
cli, err = NewDockerClient(config, zap.New(observed))
assert.NotNil(t, cli)
@@ -120,9 +119,8 @@ func TestFetchingTimeouts(t *testing.T) {
)
assert.Nil(t, statsJSON)
- require.Error(t, err)
- assert.Contains(t, err.Error(), expectedError)
+ assert.ErrorContains(t, err, expectedError)
assert.Len(t, logs.All(), 1)
for _, l := range logs.All() {
diff --git a/internal/kafka/authentication_test.go b/internal/kafka/authentication_test.go
index 6571817d5495..1f797e3019b7 100644
--- a/internal/kafka/authentication_test.go
+++ b/internal/kafka/authentication_test.go
@@ -155,8 +155,7 @@ func TestAuthentication(t *testing.T) {
config := &sarama.Config{}
err := ConfigureAuthentication(test.auth, config)
if test.err != "" {
- require.Error(t, err)
- assert.Contains(t, err.Error(), test.err)
+ assert.ErrorContains(t, err, test.err)
} else {
// equalizes SCRAMClientGeneratorFunc to do assertion with the same reference.
config.Net.SASL.SCRAMClientGeneratorFunc = test.saramaConfig.Net.SASL.SCRAMClientGeneratorFunc
diff --git a/internal/sqlquery/row_scanner.go b/internal/sqlquery/row_scanner.go
index bfda8aab270e..5366f808160c 100644
--- a/internal/sqlquery/row_scanner.go
+++ b/internal/sqlquery/row_scanner.go
@@ -32,7 +32,7 @@ func newRowScanner(colTypes []colType) *rowScanner {
}
format := "%v"
if t, isTime := v.(time.Time); isTime {
- return t.Format(time.RFC3339), nil
+ return t.Format(time.RFC3339Nano), nil
}
if reflect.TypeOf(v).Kind() == reflect.Slice {
// The Postgres driver returns a []uint8 (ascii string) for decimal and numeric types,
diff --git a/pkg/ottl/e2e/e2e_test.go b/pkg/ottl/e2e/e2e_test.go
index b908319382a4..d1294b22bd96 100644
--- a/pkg/ottl/e2e/e2e_test.go
+++ b/pkg/ottl/e2e/e2e_test.go
@@ -420,6 +420,12 @@ func Test_e2e_converters(t *testing.T) {
tCtx.GetLogRecord().Attributes().PutDouble("test", 1.5)
},
},
+ {
+ statement: `set(attributes["test"], InsertXML("", "/a", ""))`,
+ want: func(tCtx ottllog.TransformContext) {
+ tCtx.GetLogRecord().Attributes().PutStr("test", "")
+ },
+ },
{
statement: `set(attributes["test"], Int(1.0))`,
want: func(tCtx ottllog.TransformContext) {
diff --git a/pkg/ottl/ottlfuncs/README.md b/pkg/ottl/ottlfuncs/README.md
index a4f0281c4d2c..ab36043ada24 100644
--- a/pkg/ottl/ottlfuncs/README.md
+++ b/pkg/ottl/ottlfuncs/README.md
@@ -414,6 +414,8 @@ Available Converters:
- [Concat](#concat)
- [ConvertCase](#convertcase)
- [Day](#day)
+- [Double](#double)
+- [Duration](#duration)
- [ExtractPatterns](#extractpatterns)
- [ExtractGrokPatterns](#extractgrokpatterns)
- [FNV](#fnv)
@@ -422,8 +424,7 @@ Available Converters:
- [Hex](#hex)
- [Hour](#hour)
- [Hours](#hours)
-- [Double](#double)
-- [Duration](#duration)
+- [InsertXML](#insertxml)
- [Int](#int)
- [IsBool](#isbool)
- [IsDouble](#isdouble)
@@ -829,6 +830,35 @@ Examples:
- `Hours(Duration("1h"))`
+### InsertXML
+
+`InsertXML(target, xpath, value)`
+
+The `InsertXML` Converter returns an edited version of an XML string with child elements added to selected elements.
+
+`target` is a Getter that returns a string. This string should be in XML format and represents the document which will
+be modified. If `target` is not a string, nil, or is not valid xml, `InsertXML` will return an error.
+
+`xpath` is a string that specifies an [XPath](https://www.w3.org/TR/1999/REC-xpath-19991116/) expression that
+selects one or more elements.
+
+`value` is a Getter that returns a string. This string should be in XML format and represents the document which will
+be inserted into `target`. If `value` is not a string, nil, or is not valid xml, `InsertXML` will return an error.
+
+Examples:
+
+Add an element "foo" to the root of the document
+
+- `InsertXML(body, "/", "")`
+
+Add an element "bar" to any element called "foo"
+
+- `InsertXML(body, "//foo", "")`
+
+Fetch and insert an xml document into another
+
+- `InsertXML(body, "/subdoc", attributes["subdoc"])`
+
### Int
`Int(value)`
diff --git a/pkg/ottl/ottlfuncs/func_insert_xml.go b/pkg/ottl/ottlfuncs/func_insert_xml.go
new file mode 100644
index 000000000000..778b16938a07
--- /dev/null
+++ b/pkg/ottl/ottlfuncs/func_insert_xml.go
@@ -0,0 +1,75 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/antchfx/xmlquery"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+type InsertXMLArguments[K any] struct {
+ Target ottl.StringGetter[K]
+ XPath string
+ SubDocument ottl.StringGetter[K]
+}
+
+func NewInsertXMLFactory[K any]() ottl.Factory[K] {
+ return ottl.NewFactory("InsertXML", &InsertXMLArguments[K]{}, createInsertXMLFunction[K])
+}
+
+func createInsertXMLFunction[K any](_ ottl.FunctionContext, oArgs ottl.Arguments) (ottl.ExprFunc[K], error) {
+ args, ok := oArgs.(*InsertXMLArguments[K])
+
+ if !ok {
+ return nil, fmt.Errorf("InsertXML args must be of type *InsertXMLAguments[K]")
+ }
+
+ if err := validateXPath(args.XPath); err != nil {
+ return nil, err
+ }
+
+ return insertXML(args.Target, args.XPath, args.SubDocument), nil
+}
+
+// insertXML returns a XML formatted string that is a result of inserting another XML document into
+// the content of each selected target element.
+func insertXML[K any](target ottl.StringGetter[K], xPath string, subGetter ottl.StringGetter[K]) ottl.ExprFunc[K] {
+ return func(ctx context.Context, tCtx K) (any, error) {
+ var doc *xmlquery.Node
+ if targetVal, err := target.Get(ctx, tCtx); err != nil {
+ return nil, err
+ } else if doc, err = parseNodesXML(targetVal); err != nil {
+ return nil, err
+ }
+
+ var subDoc *xmlquery.Node
+ if subDocVal, err := subGetter.Get(ctx, tCtx); err != nil {
+ return nil, err
+ } else if subDoc, err = parseNodesXML(subDocVal); err != nil {
+ return nil, err
+ }
+
+ nodes, errs := xmlquery.QueryAll(doc, xPath)
+ for _, n := range nodes {
+ switch n.Type {
+ case xmlquery.ElementNode, xmlquery.DocumentNode:
+ var nextSibling *xmlquery.Node
+ for c := subDoc.FirstChild; c != nil; c = nextSibling {
+ // AddChild updates c.NextSibling but not subDoc.FirstChild
+ // so we need to get the handle to it prior to the update.
+ nextSibling = c.NextSibling
+ xmlquery.AddChild(n, c)
+ }
+ default:
+ errs = errors.Join(errs, fmt.Errorf("InsertXML XPath selected non-element: %q", n.Data))
+ }
+ }
+ return doc.OutputXML(false), errs
+ }
+}
diff --git a/pkg/ottl/ottlfuncs/func_insert_xml_test.go b/pkg/ottl/ottlfuncs/func_insert_xml_test.go
new file mode 100644
index 000000000000..32750d4c8feb
--- /dev/null
+++ b/pkg/ottl/ottlfuncs/func_insert_xml_test.go
@@ -0,0 +1,185 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package ottlfuncs // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl/ottlfuncs"
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl"
+)
+
+func Test_InsertXML(t *testing.T) {
+ tests := []struct {
+ name string
+ document string
+ xPath string
+ subdoc string
+ want string
+ expectErr string
+ }{
+ {
+ name: "add single element",
+ document: ``,
+ xPath: "/a",
+ subdoc: ``,
+ want: ``,
+ },
+ {
+ name: "add single element to multiple matches",
+ document: ``,
+ xPath: "/a",
+ subdoc: ``,
+ want: ``,
+ },
+ {
+ name: "add single element at multiple levels",
+ document: ``,
+ xPath: "//a",
+ subdoc: ``,
+ want: ``,
+ },
+ {
+ name: "add multiple elements at root",
+ document: ``,
+ xPath: "/",
+ subdoc: ``,
+ want: ``,
+ },
+ {
+ name: "add multiple elements to other element",
+ document: ``,
+ xPath: "/a",
+ subdoc: ``,
+ want: ``,
+ },
+ {
+ name: "add multiple elements to multiple elements",
+ document: ``,
+ xPath: "/a",
+ subdoc: ``,
+ want: ``,
+ },
+ {
+ name: "add multiple elements at multiple levels",
+ document: ``,
+ xPath: "//a",
+ subdoc: ``,
+ want: ``,
+ },
+ {
+ name: "add rich doc",
+ document: ``,
+ xPath: "/a",
+ subdoc: `text1`,
+ want: `text1`,
+ },
+ {
+ name: "add root element to empty document",
+ document: ``,
+ xPath: "/",
+ subdoc: ``,
+ want: ``,
+ },
+ {
+ name: "add root element to non-empty document",
+ document: ``,
+ xPath: "/",
+ subdoc: ``,
+ want: ``,
+ },
+ {
+ name: "err on attribute",
+ document: ``,
+ xPath: "/a/@foo",
+ subdoc: "",
+ want: ``,
+ expectErr: `InsertXML XPath selected non-element: "foo"`,
+ },
+ {
+ name: "err on text content",
+ document: `foo`,
+ xPath: "/a/text()",
+ subdoc: "",
+ want: `foo`,
+ expectErr: `InsertXML XPath selected non-element: "foo"`,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ f := NewInsertXMLFactory[any]()
+ exprFunc, err := f.CreateFunction(
+ ottl.FunctionContext{},
+ &InsertXMLArguments[any]{
+ Target: ottl.StandardStringGetter[any]{
+ Getter: func(_ context.Context, _ any) (any, error) {
+ return tt.document, nil
+ },
+ },
+ XPath: tt.xPath,
+ SubDocument: ottl.StandardStringGetter[any]{
+ Getter: func(_ context.Context, _ any) (any, error) {
+ return tt.subdoc, nil
+ },
+ },
+ })
+ assert.NoError(t, err)
+
+ result, err := exprFunc(context.Background(), nil)
+ if tt.expectErr == "" {
+ assert.NoError(t, err)
+ } else {
+ assert.EqualError(t, err, tt.expectErr)
+ }
+ assert.Equal(t, tt.want, result)
+ })
+ }
+}
+
+func TestCreateInsertXMLFunc(t *testing.T) {
+ factory := NewInsertXMLFactory[any]()
+ fCtx := ottl.FunctionContext{}
+
+ // Invalid arg type
+ exprFunc, err := factory.CreateFunction(fCtx, nil)
+ assert.Error(t, err)
+ assert.Nil(t, exprFunc)
+
+ // Invalid XPath should error on function creation
+ exprFunc, err = factory.CreateFunction(
+ fCtx, &InsertXMLArguments[any]{
+ XPath: "!",
+ })
+ assert.Error(t, err)
+ assert.Nil(t, exprFunc)
+
+ // Invalid XML target should error on function execution
+ exprFunc, err = factory.CreateFunction(
+ fCtx, &InsertXMLArguments[any]{
+ Target: invalidXMLGetter(),
+ XPath: "/",
+ })
+ assert.NoError(t, err)
+ assert.NotNil(t, exprFunc)
+ _, err = exprFunc(context.Background(), nil)
+ assert.Error(t, err)
+
+ // Invalid XML subdoc should error on function execution
+ exprFunc, err = factory.CreateFunction(
+ fCtx, &InsertXMLArguments[any]{
+ Target: ottl.StandardStringGetter[any]{
+ Getter: func(_ context.Context, _ any) (any, error) {
+ return "", nil
+ },
+ },
+ XPath: "/",
+ SubDocument: invalidXMLGetter(),
+ })
+ assert.NoError(t, err)
+ assert.NotNil(t, exprFunc)
+ _, err = exprFunc(context.Background(), nil)
+ assert.Error(t, err)
+}
diff --git a/pkg/ottl/ottlfuncs/func_replace_all_patterns_test.go b/pkg/ottl/ottlfuncs/func_replace_all_patterns_test.go
index 534bd80f2c07..2d2be602ca00 100644
--- a/pkg/ottl/ottlfuncs/func_replace_all_patterns_test.go
+++ b/pkg/ottl/ottlfuncs/func_replace_all_patterns_test.go
@@ -627,5 +627,5 @@ func Test_replaceAllPatterns_invalid_model(t *testing.T) {
invalidMode := "invalid"
exprFunc, err := replaceAllPatterns[any](target, invalidMode, "regex", replacement, function, replacementFormat)
assert.Nil(t, exprFunc)
- assert.Contains(t, err.Error(), "invalid mode")
+ assert.ErrorContains(t, err, "invalid mode")
}
diff --git a/pkg/ottl/ottlfuncs/functions.go b/pkg/ottl/ottlfuncs/functions.go
index fc61975c6a09..99bcd1ad3b8f 100644
--- a/pkg/ottl/ottlfuncs/functions.go
+++ b/pkg/ottl/ottlfuncs/functions.go
@@ -49,6 +49,7 @@ func converters[K any]() []ottl.Factory[K] {
NewGetXMLFactory[K](),
NewHourFactory[K](),
NewHoursFactory[K](),
+ NewInsertXMLFactory[K](),
NewIntFactory[K](),
NewIsBoolFactory[K](),
NewIsDoubleFactory[K](),
diff --git a/pkg/sampling/oteltracestate_test.go b/pkg/sampling/oteltracestate_test.go
index 6763e6d9e177..ba96f054b70b 100644
--- a/pkg/sampling/oteltracestate_test.go
+++ b/pkg/sampling/oteltracestate_test.go
@@ -87,7 +87,7 @@ func TestOpenTelemetryTraceStateRValuePValue(t *testing.T) {
require.Equal(t, "", otts.RValue())
// The error is oblivious to the old r-value, but that's ok.
- require.Contains(t, err.Error(), "14 hex digits")
+ require.ErrorContains(t, err, "14 hex digits")
require.Equal(t, []KV{{"p", "2"}}, otts.ExtraValues())
diff --git a/pkg/stanza/entry/attribute_field_test.go b/pkg/stanza/entry/attribute_field_test.go
index ade5c61bd999..ad55479d4279 100644
--- a/pkg/stanza/entry/attribute_field_test.go
+++ b/pkg/stanza/entry/attribute_field_test.go
@@ -460,13 +460,11 @@ func TestAttributeFieldUnmarshalFailure(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
var fy AttributeField
err := yaml.UnmarshalStrict(tc.invalid, &fy)
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
var fj AttributeField
err = json.Unmarshal(tc.invalid, &fj)
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
})
}
}
diff --git a/pkg/stanza/entry/body_field_test.go b/pkg/stanza/entry/body_field_test.go
index 66100bb33adf..48238eda3a25 100644
--- a/pkg/stanza/entry/body_field_test.go
+++ b/pkg/stanza/entry/body_field_test.go
@@ -386,13 +386,11 @@ func TestBodyFieldUnmarshalFailure(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
var fy BodyField
err := yaml.UnmarshalStrict(tc.invalid, &fy)
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
var fj BodyField
err = json.Unmarshal(tc.invalid, &fj)
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
})
}
}
diff --git a/pkg/stanza/entry/entry_test.go b/pkg/stanza/entry/entry_test.go
index 1486ad1ace75..8d944156d947 100644
--- a/pkg/stanza/entry/entry_test.go
+++ b/pkg/stanza/entry/entry_test.go
@@ -275,8 +275,7 @@ func TestReadToInterfaceMapWithMissingField(t *testing.T) {
field := NewAttributeField("label")
dest := map[string]any{}
err := entry.readToInterfaceMap(field, &dest)
- require.Error(t, err)
- require.Contains(t, err.Error(), "can not be read as a map[string]any")
+ require.ErrorContains(t, err, "can not be read as a map[string]any")
}
func TestReadToStringMapWithMissingField(t *testing.T) {
@@ -284,8 +283,7 @@ func TestReadToStringMapWithMissingField(t *testing.T) {
field := NewAttributeField("label")
dest := map[string]string{}
err := entry.readToStringMap(field, &dest)
- require.Error(t, err)
- require.Contains(t, err.Error(), "can not be read as a map[string]string")
+ require.ErrorContains(t, err, "can not be read as a map[string]string")
}
func TestReadToInterfaceMissingField(t *testing.T) {
@@ -293,8 +291,7 @@ func TestReadToInterfaceMissingField(t *testing.T) {
field := NewAttributeField("label")
var dest any
err := entry.readToInterface(field, &dest)
- require.Error(t, err)
- require.Contains(t, err.Error(), "can not be read as a any")
+ require.ErrorContains(t, err, "can not be read as a any")
}
func TestDefaultTimestamps(t *testing.T) {
diff --git a/pkg/stanza/entry/field_test.go b/pkg/stanza/entry/field_test.go
index f07c4c3b45de..617b812790dc 100644
--- a/pkg/stanza/entry/field_test.go
+++ b/pkg/stanza/entry/field_test.go
@@ -143,13 +143,11 @@ func TestFieldUnmarshalJSON(t *testing.T) {
switch {
case tc.expectedErrRootable != "":
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
require.Error(t, errRootable)
require.Contains(t, errRootable.Error(), tc.expectedErrRootable)
case tc.expectedErr != "":
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
require.NoError(t, errRootable)
require.Equal(t, tc.expected, rootableField.Field)
default:
@@ -233,8 +231,7 @@ func TestFieldUnmarshalYAMLFailure(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
var f Field
err := yaml.UnmarshalStrict(tc.input, &f)
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.expected)
+ require.ErrorContains(t, err, tc.expected)
})
}
}
@@ -284,8 +281,7 @@ func TestFromJSONDot(t *testing.T) {
func TestFieldFromStringInvalidSplit(t *testing.T) {
_, err := NewField("resource[test]")
- require.Error(t, err)
- require.Contains(t, err.Error(), "splitting field")
+ require.ErrorContains(t, err, "splitting field")
}
func TestFieldFromStringWithResource(t *testing.T) {
diff --git a/pkg/stanza/entry/resource_field_test.go b/pkg/stanza/entry/resource_field_test.go
index 6dbe6c499699..75a4f95c2a00 100644
--- a/pkg/stanza/entry/resource_field_test.go
+++ b/pkg/stanza/entry/resource_field_test.go
@@ -460,13 +460,11 @@ func TestResourceFieldUnmarshalFailure(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
var fy ResourceField
err := yaml.UnmarshalStrict(tc.invalid, &fy)
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
var fj ResourceField
err = json.Unmarshal(tc.invalid, &fj)
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
})
}
}
diff --git a/pkg/stanza/operator/config_test.go b/pkg/stanza/operator/config_test.go
index 14375683a424..8d3899634f74 100644
--- a/pkg/stanza/operator/config_test.go
+++ b/pkg/stanza/operator/config_test.go
@@ -43,24 +43,21 @@ func TestUnmarshalJSONErrors(t *testing.T) {
raw := `{}}`
cfg := &Config{}
err := cfg.UnmarshalJSON([]byte(raw))
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid")
+ require.ErrorContains(t, err, "invalid")
})
t.Run("MissingType", func(t *testing.T) {
raw := `{"id":"stdout"}`
var cfg Config
err := json.Unmarshal([]byte(raw), &cfg)
- require.Error(t, err)
- require.Contains(t, err.Error(), "missing required field")
+ require.ErrorContains(t, err, "missing required field")
})
t.Run("UnknownType", func(t *testing.T) {
raw := `{"id":"stdout","type":"nonexist"}`
var cfg Config
err := json.Unmarshal([]byte(raw), &cfg)
- require.Error(t, err)
- require.Contains(t, err.Error(), "unsupported type")
+ require.ErrorContains(t, err, "unsupported type")
})
t.Run("TypeSpecificUnmarshal", func(t *testing.T) {
@@ -68,8 +65,7 @@ func TestUnmarshalJSONErrors(t *testing.T) {
Register("operator", func() Builder { return &FakeBuilder{} })
var cfg Config
err := json.Unmarshal([]byte(raw), &cfg)
- require.Error(t, err)
- require.Contains(t, err.Error(), "cannot unmarshal string into")
+ require.ErrorContains(t, err, "cannot unmarshal string into")
})
}
@@ -87,32 +83,28 @@ func TestUnmarshalYAMLErrors(t *testing.T) {
raw := `-- - \n||\\`
var cfg Config
err := yaml.Unmarshal([]byte(raw), &cfg)
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed ")
+ require.ErrorContains(t, err, "failed ")
})
t.Run("MissingType", func(t *testing.T) {
raw := "id: operator\n"
var cfg Config
err := yaml.Unmarshal([]byte(raw), &cfg)
- require.Error(t, err)
- require.Contains(t, err.Error(), "missing required field")
+ require.ErrorContains(t, err, "missing required field")
})
t.Run("NonStringType", func(t *testing.T) {
raw := "id: operator\ntype: 123"
var cfg Config
err := yaml.Unmarshal([]byte(raw), &cfg)
- require.Error(t, err)
- require.Contains(t, err.Error(), "non-string type")
+ require.ErrorContains(t, err, "non-string type")
})
t.Run("UnknownType", func(t *testing.T) {
raw := "id: operator\ntype: unknown\n"
var cfg Config
err := yaml.Unmarshal([]byte(raw), &cfg)
- require.Error(t, err)
- require.Contains(t, err.Error(), "unsupported type")
+ require.ErrorContains(t, err, "unsupported type")
})
t.Run("TypeSpecificUnmarshal", func(t *testing.T) {
@@ -120,7 +112,6 @@ func TestUnmarshalYAMLErrors(t *testing.T) {
Register("operator", func() Builder { return &FakeBuilder{} })
var cfg Config
err := yaml.Unmarshal([]byte(raw), &cfg)
- require.Error(t, err)
- require.Contains(t, err.Error(), "cannot unmarshal !!str")
+ require.ErrorContains(t, err, "cannot unmarshal !!str")
})
}
diff --git a/pkg/stanza/operator/helper/input_test.go b/pkg/stanza/operator/helper/input_test.go
index 8d3e16a704c7..cfa58c304936 100644
--- a/pkg/stanza/operator/helper/input_test.go
+++ b/pkg/stanza/operator/helper/input_test.go
@@ -23,8 +23,7 @@ func TestInputConfigMissingBase(t *testing.T) {
set := componenttest.NewNopTelemetrySettings()
_, err := config.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "missing required `type` field.")
+ require.ErrorContains(t, err, "missing required `type` field.")
}
func TestInputConfigMissingOutput(t *testing.T) {
diff --git a/pkg/stanza/operator/helper/operator_test.go b/pkg/stanza/operator/helper/operator_test.go
index 99f7f6b8eb58..41577e1cfdc6 100644
--- a/pkg/stanza/operator/helper/operator_test.go
+++ b/pkg/stanza/operator/helper/operator_test.go
@@ -46,8 +46,7 @@ func TestBasicConfigBuildWithoutType(t *testing.T) {
set := componenttest.NewNopTelemetrySettings()
_, err := config.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "missing required `type` field.")
+ require.ErrorContains(t, err, "missing required `type` field.")
}
func TestBasicConfigBuildMissingLogger(t *testing.T) {
@@ -59,8 +58,7 @@ func TestBasicConfigBuildMissingLogger(t *testing.T) {
set := componenttest.NewNopTelemetrySettings()
set.Logger = nil
_, err := config.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "operator build context is missing a logger.")
+ require.ErrorContains(t, err, "operator build context is missing a logger.")
}
func TestBasicConfigBuildValid(t *testing.T) {
diff --git a/pkg/stanza/operator/helper/output_test.go b/pkg/stanza/operator/helper/output_test.go
index 2dc62b929fb2..b55e33abd4ca 100644
--- a/pkg/stanza/operator/helper/output_test.go
+++ b/pkg/stanza/operator/helper/output_test.go
@@ -17,8 +17,7 @@ func TestOutputConfigMissingBase(t *testing.T) {
config := OutputConfig{}
set := componenttest.NewNopTelemetrySettings()
_, err := config.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "missing required `type` field.")
+ require.ErrorContains(t, err, "missing required `type` field.")
}
func TestOutputConfigBuildValid(t *testing.T) {
@@ -84,6 +83,5 @@ func TestOutputOperatorSetOutputs(t *testing.T) {
}
err := output.SetOutputs([]operator.Operator{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "Operator can not output")
+ require.ErrorContains(t, err, "Operator can not output")
}
diff --git a/pkg/stanza/operator/helper/parser_test.go b/pkg/stanza/operator/helper/parser_test.go
index e7d635377d1e..36a5fbb37f91 100644
--- a/pkg/stanza/operator/helper/parser_test.go
+++ b/pkg/stanza/operator/helper/parser_test.go
@@ -24,8 +24,7 @@ func TestParserConfigMissingBase(t *testing.T) {
config := ParserConfig{}
set := componenttest.NewNopTelemetrySettings()
_, err := config.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "missing required `type` field.")
+ require.ErrorContains(t, err, "missing required `type` field.")
}
func TestParserConfigInvalidTimeParser(t *testing.T) {
@@ -39,8 +38,7 @@ func TestParserConfigInvalidTimeParser(t *testing.T) {
set := componenttest.NewNopTelemetrySettings()
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "missing required configuration parameter `layout`")
+ require.ErrorContains(t, err, "missing required configuration parameter `layout`")
}
func TestParserConfigBodyCollision(t *testing.T) {
@@ -52,8 +50,7 @@ func TestParserConfigBodyCollision(t *testing.T) {
set := componenttest.NewNopTelemetrySettings()
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "`parse_to: body` not allowed when `body` is configured")
+ require.ErrorContains(t, err, "`parse_to: body` not allowed when `body` is configured")
}
func TestParserConfigBuildValid(t *testing.T) {
@@ -123,8 +120,7 @@ func TestParserMissingField(t *testing.T) {
ctx := context.Background()
testEntry := entry.New()
err := parser.ProcessWith(ctx, testEntry, parse)
- require.Error(t, err)
- require.Contains(t, err.Error(), "Entry is missing the expected parse_from field.")
+ require.ErrorContains(t, err, "Entry is missing the expected parse_from field.")
}
func TestParserInvalidParseDrop(t *testing.T) {
@@ -142,8 +138,7 @@ func TestParserInvalidParseDrop(t *testing.T) {
ctx := context.Background()
testEntry := entry.New()
err := parser.ProcessWith(ctx, testEntry, parse)
- require.Error(t, err)
- require.Contains(t, err.Error(), "parse failure")
+ require.ErrorContains(t, err, "parse failure")
fakeOut.ExpectNoEntry(t, 100*time.Millisecond)
}
@@ -162,8 +157,7 @@ func TestParserInvalidParseSend(t *testing.T) {
ctx := context.Background()
testEntry := entry.New()
err := parser.ProcessWith(ctx, testEntry, parse)
- require.Error(t, err)
- require.Contains(t, err.Error(), "parse failure")
+ require.ErrorContains(t, err, "parse failure")
fakeOut.ExpectEntry(t, testEntry)
fakeOut.ExpectNoEntry(t, 100*time.Millisecond)
}
@@ -190,8 +184,7 @@ func TestParserInvalidTimeParseDrop(t *testing.T) {
ctx := context.Background()
testEntry := entry.New()
err := parser.ProcessWith(ctx, testEntry, parse)
- require.Error(t, err)
- require.Contains(t, err.Error(), "time parser: log entry does not have the expected parse_from field")
+ require.ErrorContains(t, err, "time parser: log entry does not have the expected parse_from field")
fakeOut.ExpectNoEntry(t, 100*time.Millisecond)
}
@@ -217,8 +210,7 @@ func TestParserInvalidTimeParseSend(t *testing.T) {
ctx := context.Background()
testEntry := entry.New()
err := parser.ProcessWith(ctx, testEntry, parse)
- require.Error(t, err)
- require.Contains(t, err.Error(), "time parser: log entry does not have the expected parse_from field")
+ require.ErrorContains(t, err, "time parser: log entry does not have the expected parse_from field")
fakeOut.ExpectEntry(t, testEntry)
fakeOut.ExpectNoEntry(t, 100*time.Millisecond)
}
@@ -241,8 +233,7 @@ func TestParserInvalidSeverityParseDrop(t *testing.T) {
ctx := context.Background()
testEntry := entry.New()
err := parser.ProcessWith(ctx, testEntry, parse)
- require.Error(t, err)
- require.Contains(t, err.Error(), "severity parser: log entry does not have the expected parse_from field")
+ require.ErrorContains(t, err, "severity parser: log entry does not have the expected parse_from field")
fakeOut.ExpectNoEntry(t, 100*time.Millisecond)
}
@@ -284,8 +275,7 @@ func TestParserInvalidTimeValidSeverityParse(t *testing.T) {
require.NoError(t, err)
err = parser.ProcessWith(ctx, testEntry, parse)
- require.Error(t, err)
- require.Contains(t, err.Error(), "time parser: log entry does not have the expected parse_from field")
+ require.ErrorContains(t, err, "time parser: log entry does not have the expected parse_from field")
// But, this should have been set anyways
require.Equal(t, entry.Info, testEntry.Severity)
@@ -339,8 +329,7 @@ func TestParserValidTimeInvalidSeverityParse(t *testing.T) {
require.NoError(t, err)
err = parser.ProcessWith(ctx, testEntry, parse)
- require.Error(t, err)
- require.Contains(t, err.Error(), "severity parser: log entry does not have the expected parse_from field")
+ require.ErrorContains(t, err, "severity parser: log entry does not have the expected parse_from field")
require.Equal(t, expected, testEntry.Timestamp)
}
diff --git a/pkg/stanza/operator/helper/time_test.go b/pkg/stanza/operator/helper/time_test.go
index 51b4dec7979a..0d06b456e1a9 100644
--- a/pkg/stanza/operator/helper/time_test.go
+++ b/pkg/stanza/operator/helper/time_test.go
@@ -571,8 +571,7 @@ func TestSetInvalidLocation(t *testing.T) {
tp := NewTimeParser()
tp.Location = "not_a_location"
err := tp.setLocation()
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed to load location "+"not_a_location")
+ require.ErrorContains(t, err, "failed to load location "+"not_a_location")
}
func TestUnmarshal(t *testing.T) {
diff --git a/pkg/stanza/operator/helper/transformer_test.go b/pkg/stanza/operator/helper/transformer_test.go
index 9e1ed76da5cd..9bacf0fd6e7f 100644
--- a/pkg/stanza/operator/helper/transformer_test.go
+++ b/pkg/stanza/operator/helper/transformer_test.go
@@ -26,8 +26,7 @@ func TestTransformerConfigMissingBase(t *testing.T) {
cfg.OutputIDs = []string{"test-output"}
set := componenttest.NewNopTelemetrySettings()
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "missing required `type` field.")
+ require.ErrorContains(t, err, "missing required `type` field.")
}
func TestTransformerConfigMissingOutput(t *testing.T) {
@@ -58,8 +57,7 @@ func TestTransformerOnErrorInvalid(t *testing.T) {
cfg.OnError = "invalid"
set := componenttest.NewNopTelemetrySettings()
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "operator config has an invalid `on_error` field.")
+ require.ErrorContains(t, err, "operator config has an invalid `on_error` field.")
}
func TestTransformerOperatorCanProcess(t *testing.T) {
diff --git a/pkg/stanza/operator/helper/writer_test.go b/pkg/stanza/operator/helper/writer_test.go
index 5505984920b5..385c536cf080 100644
--- a/pkg/stanza/operator/helper/writer_test.go
+++ b/pkg/stanza/operator/helper/writer_test.go
@@ -119,8 +119,7 @@ func TestWriterSetOutputsMissing(t *testing.T) {
}
err := writer.SetOutputs([]operator.Operator{output1})
- require.Error(t, err)
- require.Contains(t, err.Error(), "does not exist")
+ require.ErrorContains(t, err, "does not exist")
}
func TestWriterSetOutputsInvalid(t *testing.T) {
@@ -132,8 +131,7 @@ func TestWriterSetOutputsInvalid(t *testing.T) {
}
err := writer.SetOutputs([]operator.Operator{output1})
- require.Error(t, err)
- require.Contains(t, err.Error(), "can not process entries")
+ require.ErrorContains(t, err, "can not process entries")
}
func TestWriterSetOutputsValid(t *testing.T) {
diff --git a/pkg/stanza/operator/input/windows/bookmark_test.go b/pkg/stanza/operator/input/windows/bookmark_test.go
index 988d6a3bc995..28a318243dab 100644
--- a/pkg/stanza/operator/input/windows/bookmark_test.go
+++ b/pkg/stanza/operator/input/windows/bookmark_test.go
@@ -14,16 +14,14 @@ import (
func TestBookmarkOpenPreexisting(t *testing.T) {
bookmark := Bookmark{handle: 5}
err := bookmark.Open("")
- require.Error(t, err)
- require.Contains(t, err.Error(), "bookmark handle is already open")
+ require.ErrorContains(t, err, "bookmark handle is already open")
}
func TestBookmarkOpenInvalidUTF8(t *testing.T) {
bookmark := NewBookmark()
invalidUTF8 := "\u0000"
err := bookmark.Open(invalidUTF8)
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed to convert bookmark xml to utf16")
+ require.ErrorContains(t, err, "failed to convert bookmark xml to utf16")
}
func TestBookmarkOpenSyscallFailure(t *testing.T) {
@@ -31,8 +29,7 @@ func TestBookmarkOpenSyscallFailure(t *testing.T) {
xml := "<\\bookmark>"
createBookmarkProc = SimpleMockProc(0, 0, ErrorNotSupported)
err := bookmark.Open(xml)
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed to create bookmark handle from xml")
+ require.ErrorContains(t, err, "failed to create bookmark handle from xml")
}
func TestBookmarkOpenSuccess(t *testing.T) {
@@ -49,8 +46,7 @@ func TestBookmarkUpdateFailureOnCreateSyscall(t *testing.T) {
bookmark := NewBookmark()
createBookmarkProc = SimpleMockProc(0, 0, ErrorNotSupported)
err := bookmark.Update(event)
- require.Error(t, err)
- require.Contains(t, err.Error(), "syscall to `EvtCreateBookmark` failed")
+ require.ErrorContains(t, err, "syscall to `EvtCreateBookmark` failed")
}
func TestBookmarkUpdateFailureOnUpdateSyscall(t *testing.T) {
@@ -59,8 +55,7 @@ func TestBookmarkUpdateFailureOnUpdateSyscall(t *testing.T) {
createBookmarkProc = SimpleMockProc(1, 0, ErrorSuccess)
updateBookmarkProc = SimpleMockProc(0, 0, ErrorNotSupported)
err := bookmark.Update(event)
- require.Error(t, err)
- require.Contains(t, err.Error(), "syscall to `EvtUpdateBookmark` failed")
+ require.ErrorContains(t, err, "syscall to `EvtUpdateBookmark` failed")
}
func TestBookmarkUpdateSuccess(t *testing.T) {
@@ -83,8 +78,7 @@ func TestBookmarkCloseSyscallFailure(t *testing.T) {
bookmark := Bookmark{handle: 5}
closeProc = SimpleMockProc(0, 0, ErrorNotSupported)
err := bookmark.Close()
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed to close bookmark handle")
+ require.ErrorContains(t, err, "failed to close bookmark handle")
}
func TestBookmarkCloseSuccess(t *testing.T) {
@@ -99,8 +93,7 @@ func TestBookmarkRenderWhenClosed(t *testing.T) {
bookmark := NewBookmark()
buffer := NewBuffer()
_, err := bookmark.Render(buffer)
- require.Error(t, err)
- require.Contains(t, err.Error(), "bookmark handle is not open")
+ require.ErrorContains(t, err, "bookmark handle is not open")
}
func TestBookmarkRenderInvalidSyscall(t *testing.T) {
@@ -108,6 +101,5 @@ func TestBookmarkRenderInvalidSyscall(t *testing.T) {
buffer := NewBuffer()
renderProc = SimpleMockProc(0, 0, ErrorNotSupported)
_, err := bookmark.Render(buffer)
- require.Error(t, err)
- require.Contains(t, err.Error(), "syscall to 'EvtRender' failed")
+ require.ErrorContains(t, err, "syscall to 'EvtRender' failed")
}
diff --git a/pkg/stanza/operator/input/windows/event_test.go b/pkg/stanza/operator/input/windows/event_test.go
index 0acc6240c62c..4650cdf736ce 100644
--- a/pkg/stanza/operator/input/windows/event_test.go
+++ b/pkg/stanza/operator/input/windows/event_test.go
@@ -21,8 +21,7 @@ func TestEventCloseSyscallFailure(t *testing.T) {
event := NewEvent(5)
closeProc = SimpleMockProc(0, 0, ErrorNotSupported)
err := event.Close()
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed to close event handle")
+ require.ErrorContains(t, err, "failed to close event handle")
}
func TestEventCloseSuccess(t *testing.T) {
diff --git a/pkg/stanza/operator/input/windows/input_test.go b/pkg/stanza/operator/input/windows/input_test.go
index 4597a924d74f..6d530f89bca1 100644
--- a/pkg/stanza/operator/input/windows/input_test.go
+++ b/pkg/stanza/operator/input/windows/input_test.go
@@ -34,8 +34,7 @@ func TestInputStart_LocalSubscriptionError(t *testing.T) {
input.pollInterval = 1 * time.Second
err := input.Start(persister)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "The specified channel could not be found")
+ assert.ErrorContains(t, err, "The specified channel could not be found")
}
// TestInputStart_RemoteSubscriptionError ensures the input correctly handles remote subscription errors.
@@ -52,8 +51,7 @@ func TestInputStart_RemoteSubscriptionError(t *testing.T) {
}
err := input.Start(persister)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "The specified channel could not be found")
+ assert.ErrorContains(t, err, "The specified channel could not be found")
}
// TestInputStart_RemoteSessionError ensures the input correctly handles remote session errors.
@@ -72,8 +70,7 @@ func TestInputStart_RemoteSessionError(t *testing.T) {
}
err := input.Start(persister)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failed to start remote session for server remote-server: remote session error")
+ assert.ErrorContains(t, err, "failed to start remote session for server remote-server: remote session error")
}
// TestInputStart_RemoteAccessDeniedError ensures the input correctly handles remote access denied errors.
@@ -97,9 +94,8 @@ func TestInputStart_RemoteAccessDeniedError(t *testing.T) {
}
err := input.Start(persister)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failed to open subscription for remote server")
- assert.Contains(t, err.Error(), "Access is denied")
+ assert.ErrorContains(t, err, "failed to open subscription for remote server")
+ assert.ErrorContains(t, err, "Access is denied")
}
// TestInputStart_BadChannelName ensures the input correctly handles bad channel names.
@@ -123,7 +119,6 @@ func TestInputStart_BadChannelName(t *testing.T) {
}
err := input.Start(persister)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failed to open subscription for remote server")
- assert.Contains(t, err.Error(), "The specified channel could not be found")
+ assert.ErrorContains(t, err, "failed to open subscription for remote server")
+ assert.ErrorContains(t, err, "The specified channel could not be found")
}
diff --git a/pkg/stanza/operator/input/windows/publisher_test.go b/pkg/stanza/operator/input/windows/publisher_test.go
index 34da0b15c07d..75ca47798d20 100644
--- a/pkg/stanza/operator/input/windows/publisher_test.go
+++ b/pkg/stanza/operator/input/windows/publisher_test.go
@@ -14,8 +14,7 @@ import (
func TestPublisherOpenPreexisting(t *testing.T) {
publisher := Publisher{handle: 5}
err := publisher.Open("provider_name_does_not_matter_for_this_test")
- require.Error(t, err)
- require.Contains(t, err.Error(), "publisher handle is already open")
+ require.ErrorContains(t, err, "publisher handle is already open")
require.True(t, publisher.Valid())
}
@@ -23,8 +22,7 @@ func TestPublisherOpenInvalidUTF8(t *testing.T) {
publisher := NewPublisher()
invalidUTF8 := "\u0000"
err := publisher.Open(invalidUTF8)
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed to convert the provider name \"\\x00\" to utf16: invalid argument")
+ require.ErrorContains(t, err, "failed to convert the provider name \"\\x00\" to utf16: invalid argument")
require.False(t, publisher.Valid())
}
@@ -33,8 +31,7 @@ func TestPublisherOpenSyscallFailure(t *testing.T) {
provider := "provider"
defer mockWithDeferredRestore(&openPublisherMetadataProc, SimpleMockProc(0, 0, ErrorNotSupported))()
err := publisher.Open(provider)
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed to open the metadata for the \"provider\" provider: The request is not supported.")
+ require.ErrorContains(t, err, "failed to open the metadata for the \"provider\" provider: The request is not supported.")
require.False(t, publisher.Valid())
}
@@ -59,8 +56,7 @@ func TestPublisherCloseSyscallFailure(t *testing.T) {
publisher := Publisher{handle: 5}
defer mockWithDeferredRestore(&closeProc, SimpleMockProc(0, 0, ErrorNotSupported))()
err := publisher.Close()
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed to close publisher")
+ require.ErrorContains(t, err, "failed to close publisher")
require.True(t, publisher.Valid())
}
diff --git a/pkg/stanza/operator/output/drop/output_test.go b/pkg/stanza/operator/output/drop/output_test.go
index 44e48499d84e..520dcfee6e4b 100644
--- a/pkg/stanza/operator/output/drop/output_test.go
+++ b/pkg/stanza/operator/output/drop/output_test.go
@@ -26,8 +26,7 @@ func TestBuildIvalid(t *testing.T) {
set := componenttest.NewNopTelemetrySettings()
set.Logger = nil
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "build context is missing a logger")
+ require.ErrorContains(t, err, "build context is missing a logger")
}
func TestProcess(t *testing.T) {
diff --git a/pkg/stanza/operator/parser/container/parser_test.go b/pkg/stanza/operator/parser/container/parser_test.go
index 7e58e616b479..44c430ce4cfc 100644
--- a/pkg/stanza/operator/parser/container/parser_test.go
+++ b/pkg/stanza/operator/parser/container/parser_test.go
@@ -39,8 +39,7 @@ func TestConfigBuildFailure(t *testing.T) {
config.OnError = "invalid_on_error"
set := componenttest.NewNopTelemetrySettings()
_, err := config.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid `on_error` field")
+ require.ErrorContains(t, err, "invalid `on_error` field")
}
func TestConfigBuildFormatError(t *testing.T) {
@@ -48,29 +47,25 @@ func TestConfigBuildFormatError(t *testing.T) {
config.Format = "invalid_runtime"
set := componenttest.NewNopTelemetrySettings()
_, err := config.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid `format` field")
+ require.ErrorContains(t, err, "invalid `format` field")
}
func TestDockerParserInvalidType(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parseDocker([]int{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "type '[]int' cannot be parsed as docker container logs")
+ require.ErrorContains(t, err, "type '[]int' cannot be parsed as docker container logs")
}
func TestCrioParserInvalidType(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parseCRIO([]int{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "type '[]int' cannot be parsed as cri-o container logs")
+ require.ErrorContains(t, err, "type '[]int' cannot be parsed as cri-o container logs")
}
func TestContainerdParserInvalidType(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parseContainerd([]int{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "type '[]int' cannot be parsed as containerd logs")
+ require.ErrorContains(t, err, "type '[]int' cannot be parsed as containerd logs")
}
func TestFormatDetectionFailure(t *testing.T) {
@@ -79,8 +74,7 @@ func TestFormatDetectionFailure(t *testing.T) {
Body: `invalid container format`,
}
_, err := parser.detectFormat(e)
- require.Error(t, err)
- require.Contains(t, err.Error(), "entry cannot be parsed as container logs")
+ require.ErrorContains(t, err, "entry cannot be parsed as container logs")
}
func TestInternalRecombineCfg(t *testing.T) {
diff --git a/pkg/stanza/operator/parser/csv/parser_test.go b/pkg/stanza/operator/parser/csv/parser_test.go
index 27fd4a7a94ea..daab359ed2e1 100644
--- a/pkg/stanza/operator/parser/csv/parser_test.go
+++ b/pkg/stanza/operator/parser/csv/parser_test.go
@@ -41,8 +41,7 @@ func TestParserBuildFailure(t *testing.T) {
cfg.OnError = "invalid_on_error"
set := componenttest.NewNopTelemetrySettings()
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid `on_error` field")
+ require.ErrorContains(t, err, "invalid `on_error` field")
}
func TestParserBuildFailureLazyIgnoreQuotes(t *testing.T) {
@@ -62,8 +61,7 @@ func TestParserBuildFailureInvalidDelimiter(t *testing.T) {
cfg.FieldDelimiter = ";;"
set := componenttest.NewNopTelemetrySettings()
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid 'delimiter': ';;'")
+ require.ErrorContains(t, err, "invalid 'delimiter': ';;'")
}
func TestParserBuildFailureBadHeaderConfig(t *testing.T) {
@@ -72,36 +70,31 @@ func TestParserBuildFailureBadHeaderConfig(t *testing.T) {
cfg.HeaderAttribute = "testheader"
set := componenttest.NewNopTelemetrySettings()
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "only one header parameter can be set: 'header' or 'header_attribute'")
+ require.ErrorContains(t, err, "only one header parameter can be set: 'header' or 'header_attribute'")
}
func TestParserByteFailure(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse([]byte("invalid"))
- require.Error(t, err)
- require.Contains(t, err.Error(), "wrong number of fields: expected 3, found 1")
+ require.ErrorContains(t, err, "wrong number of fields: expected 3, found 1")
}
func TestParserStringFailure(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse("invalid")
- require.Error(t, err)
- require.Contains(t, err.Error(), "wrong number of fields: expected 3, found 1")
+ require.ErrorContains(t, err, "wrong number of fields: expected 3, found 1")
}
func TestParserInvalidType(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse([]int{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "type '[]int' cannot be parsed as csv")
+ require.ErrorContains(t, err, "type '[]int' cannot be parsed as csv")
}
func TestParserInvalidTypeIgnoreQuotes(t *testing.T) {
parser := newTestParserIgnoreQuotes(t)
_, err := parser.parse([]int{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "type '[]int' cannot be parsed as csv")
+ require.ErrorContains(t, err, "type '[]int' cannot be parsed as csv")
}
func TestParserCSV(t *testing.T) {
@@ -1112,8 +1105,7 @@ func TestBuildParserCSV(t *testing.T) {
c.Header = "name"
set := componenttest.NewNopTelemetrySettings()
_, err := c.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "missing field delimiter in header")
+ require.ErrorContains(t, err, "missing field delimiter in header")
})
t.Run("InvalidHeaderFieldWrongDelimiter", func(t *testing.T) {
@@ -1130,7 +1122,6 @@ func TestBuildParserCSV(t *testing.T) {
c.FieldDelimiter = ":"
set := componenttest.NewNopTelemetrySettings()
_, err := c.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "missing field delimiter in header")
+ require.ErrorContains(t, err, "missing field delimiter in header")
})
}
diff --git a/pkg/stanza/operator/parser/json/parser_test.go b/pkg/stanza/operator/parser/json/parser_test.go
index f9efe32a01c2..4563b4ded816 100644
--- a/pkg/stanza/operator/parser/json/parser_test.go
+++ b/pkg/stanza/operator/parser/json/parser_test.go
@@ -40,29 +40,25 @@ func TestConfigBuildFailure(t *testing.T) {
config.OnError = "invalid_on_error"
set := componenttest.NewNopTelemetrySettings()
_, err := config.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid `on_error` field")
+ require.ErrorContains(t, err, "invalid `on_error` field")
}
func TestParserStringFailure(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse("invalid")
- require.Error(t, err)
- require.Contains(t, err.Error(), "expected { character for map value")
+ require.ErrorContains(t, err, "expected { character for map value")
}
func TestParserByteFailure(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse([]byte("invalid"))
- require.Error(t, err)
- require.Contains(t, err.Error(), "type []uint8 cannot be parsed as JSON")
+ require.ErrorContains(t, err, "type []uint8 cannot be parsed as JSON")
}
func TestParserInvalidType(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse([]int{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "type []int cannot be parsed as JSON")
+ require.ErrorContains(t, err, "type []int cannot be parsed as JSON")
}
func TestJSONImplementations(t *testing.T) {
diff --git a/pkg/stanza/operator/parser/jsonarray/config_test.go b/pkg/stanza/operator/parser/jsonarray/config_test.go
index 442e67d68a86..73b8bd8c7bda 100644
--- a/pkg/stanza/operator/parser/jsonarray/config_test.go
+++ b/pkg/stanza/operator/parser/jsonarray/config_test.go
@@ -92,7 +92,7 @@ func TestBuildWithFeatureGate(t *testing.T) {
set := componenttest.NewNopTelemetrySettings()
_, err := buildFunc().Build(set)
if err != nil {
- require.Contains(t, err.Error(), c.onErr)
+ require.ErrorContains(t, err, c.onErr)
}
})
}
diff --git a/pkg/stanza/operator/parser/jsonarray/parser_test.go b/pkg/stanza/operator/parser/jsonarray/parser_test.go
index a07131e0a75e..f20a2f8be43d 100644
--- a/pkg/stanza/operator/parser/jsonarray/parser_test.go
+++ b/pkg/stanza/operator/parser/jsonarray/parser_test.go
@@ -33,15 +33,13 @@ func TestParserBuildFailure(t *testing.T) {
cfg.OnError = "invalid_on_error"
set := componenttest.NewNopTelemetrySettings()
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid `on_error` field")
+ require.ErrorContains(t, err, "invalid `on_error` field")
}
func TestParserInvalidType(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse([]int{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "type '[]int' cannot be parsed as json array")
+ require.ErrorContains(t, err, "type '[]int' cannot be parsed as json array")
}
func TestParserByteFailureHeadersMismatch(t *testing.T) {
@@ -54,8 +52,7 @@ func TestParserByteFailureHeadersMismatch(t *testing.T) {
require.NoError(t, err)
parser := op.(*Parser)
_, err = parser.parse("[\"stanza\",\"INFO\",\"started agent\", 42, true]")
- require.Error(t, err)
- require.Contains(t, err.Error(), "wrong number of fields: expected 3, found 5")
+ require.ErrorContains(t, err, "wrong number of fields: expected 3, found 5")
}
func TestParserJarray(t *testing.T) {
diff --git a/pkg/stanza/operator/parser/keyvalue/parser_test.go b/pkg/stanza/operator/parser/keyvalue/parser_test.go
index 4df63935dd47..640790620c9b 100644
--- a/pkg/stanza/operator/parser/keyvalue/parser_test.go
+++ b/pkg/stanza/operator/parser/keyvalue/parser_test.go
@@ -44,8 +44,7 @@ func TestConfigBuildFailure(t *testing.T) {
config.OnError = "invalid_on_error"
set := componenttest.NewNopTelemetrySettings()
_, err := config.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid `on_error` field")
+ require.ErrorContains(t, err, "invalid `on_error` field")
}
func TestBuild(t *testing.T) {
@@ -151,22 +150,19 @@ func TestBuild(t *testing.T) {
func TestParserStringFailure(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse("invalid")
- require.Error(t, err)
- require.Contains(t, err.Error(), fmt.Sprintf("cannot split %q into 2 items, got 1 item(s)", "invalid"))
+ require.ErrorContains(t, err, fmt.Sprintf("cannot split %q into 2 items, got 1 item(s)", "invalid"))
}
func TestParserInvalidType(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse([]int{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "type []int cannot be parsed as key value pairs")
+ require.ErrorContains(t, err, "type []int cannot be parsed as key value pairs")
}
func TestParserEmptyInput(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse("")
- require.Error(t, err)
- require.Contains(t, err.Error(), "parse from field body is empty")
+ require.ErrorContains(t, err, "parse from field body is empty")
}
func TestKVImplementations(t *testing.T) {
diff --git a/pkg/stanza/operator/parser/regex/parser_test.go b/pkg/stanza/operator/parser/regex/parser_test.go
index 8a44342e69ec..56e119bbec6e 100644
--- a/pkg/stanza/operator/parser/regex/parser_test.go
+++ b/pkg/stanza/operator/parser/regex/parser_test.go
@@ -36,29 +36,25 @@ func TestParserBuildFailure(t *testing.T) {
cfg.OnError = "invalid_on_error"
set := componenttest.NewNopTelemetrySettings()
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid `on_error` field")
+ require.ErrorContains(t, err, "invalid `on_error` field")
}
func TestParserByteFailure(t *testing.T) {
parser := newTestParser(t, "^(?Ptest)", 0)
_, err := parser.parse([]byte("invalid"))
- require.Error(t, err)
- require.Contains(t, err.Error(), "type '[]uint8' cannot be parsed as regex")
+ require.ErrorContains(t, err, "type '[]uint8' cannot be parsed as regex")
}
func TestParserStringFailure(t *testing.T) {
parser := newTestParser(t, "^(?Ptest)", 0)
_, err := parser.parse("invalid")
- require.Error(t, err)
- require.Contains(t, err.Error(), "regex pattern does not match")
+ require.ErrorContains(t, err, "regex pattern does not match")
}
func TestParserInvalidType(t *testing.T) {
parser := newTestParser(t, "^(?Ptest)", 0)
_, err := parser.parse([]int{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "type '[]int' cannot be parsed as regex")
+ require.ErrorContains(t, err, "type '[]int' cannot be parsed as regex")
}
func TestParserCache(t *testing.T) {
@@ -67,8 +63,7 @@ func TestParserCache(t *testing.T) {
require.NoError(t, parser.Stop())
}()
_, err := parser.parse([]int{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "type '[]int' cannot be parsed as regex")
+ require.ErrorContains(t, err, "type '[]int' cannot be parsed as regex")
require.NotNil(t, parser.cache, "expected cache to be configured")
require.Equal(t, uint16(200), parser.cache.maxSize())
}
@@ -197,8 +192,7 @@ func TestBuildParserRegex(t *testing.T) {
c.Regex = ".*"
set := componenttest.NewNopTelemetrySettings()
_, err := c.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "no named capture groups")
+ require.ErrorContains(t, err, "no named capture groups")
})
t.Run("NoNamedGroups", func(t *testing.T) {
@@ -206,8 +200,7 @@ func TestBuildParserRegex(t *testing.T) {
c.Regex = "(.*)"
set := componenttest.NewNopTelemetrySettings()
_, err := c.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "no named capture groups")
+ require.ErrorContains(t, err, "no named capture groups")
})
}
diff --git a/pkg/stanza/operator/parser/syslog/config_test.go b/pkg/stanza/operator/parser/syslog/config_test.go
index 3d29fc57885d..82ceac100365 100644
--- a/pkg/stanza/operator/parser/syslog/config_test.go
+++ b/pkg/stanza/operator/parser/syslog/config_test.go
@@ -128,8 +128,7 @@ func TestUnmarshal(t *testing.T) {
func TestParserMissingProtocol(t *testing.T) {
set := componenttest.NewNopTelemetrySettings()
_, err := NewConfig().Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "missing field 'protocol'")
+ require.ErrorContains(t, err, "missing field 'protocol'")
}
func TestRFC6587ConfigOptions(t *testing.T) {
@@ -232,6 +231,5 @@ func TestParserInvalidLocation(t *testing.T) {
set := componenttest.NewNopTelemetrySettings()
_, err := config.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed to load location "+config.Location)
+ require.ErrorContains(t, err, "failed to load location "+config.Location)
}
diff --git a/pkg/stanza/operator/parser/syslog/parser_test.go b/pkg/stanza/operator/parser/syslog/parser_test.go
index f22b04a48ab7..cbff227b19e6 100644
--- a/pkg/stanza/operator/parser/syslog/parser_test.go
+++ b/pkg/stanza/operator/parser/syslog/parser_test.go
@@ -70,8 +70,7 @@ func TestSyslogParseRFC5424_SDNameTooLong(t *testing.T) {
newEntry := entry.New()
newEntry.Body = body
err = op.Process(context.Background(), newEntry)
- require.Error(t, err)
- require.Contains(t, err.Error(), "expecting a structured data element id (from 1 to max 32 US-ASCII characters")
+ require.ErrorContains(t, err, "expecting a structured data element id (from 1 to max 32 US-ASCII characters")
select {
case e := <-fake.Received:
@@ -100,8 +99,7 @@ func TestSyslogParseRFC5424_Octet_Counting_MessageTooLong(t *testing.T) {
newEntry := entry.New()
newEntry.Body = body
err = op.Process(context.Background(), newEntry)
- require.Error(t, err)
- require.Contains(t, err.Error(), "message too long to parse. was size 215, max length 214")
+ require.ErrorContains(t, err, "message too long to parse. was size 215, max length 214")
select {
case e := <-fake.Received:
diff --git a/pkg/stanza/operator/parser/uri/parser_test.go b/pkg/stanza/operator/parser/uri/parser_test.go
index f6c0290b3f2a..d0c856cae1e1 100644
--- a/pkg/stanza/operator/parser/uri/parser_test.go
+++ b/pkg/stanza/operator/parser/uri/parser_test.go
@@ -33,29 +33,25 @@ func TestParserBuildFailure(t *testing.T) {
cfg.OnError = "invalid_on_error"
set := componenttest.NewNopTelemetrySettings()
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "invalid `on_error` field")
+ require.ErrorContains(t, err, "invalid `on_error` field")
}
func TestParserByteFailure(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse([]byte("invalid"))
- require.Error(t, err)
- require.Contains(t, err.Error(), "type '[]uint8' cannot be parsed as URI")
+ require.ErrorContains(t, err, "type '[]uint8' cannot be parsed as URI")
}
func TestParserStringFailure(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse("invalid")
- require.Error(t, err)
- require.Contains(t, err.Error(), "parse \"invalid\": invalid URI for request")
+ require.ErrorContains(t, err, "parse \"invalid\": invalid URI for request")
}
func TestParserInvalidType(t *testing.T) {
parser := newTestParser(t)
_, err := parser.parse([]int{})
- require.Error(t, err)
- require.Contains(t, err.Error(), "type '[]int' cannot be parsed as URI")
+ require.ErrorContains(t, err, "type '[]int' cannot be parsed as URI")
}
func TestProcess(t *testing.T) {
diff --git a/pkg/stanza/operator/transformer/noop/config_test.go b/pkg/stanza/operator/transformer/noop/config_test.go
index 92255e04e707..18fa1dd64d89 100644
--- a/pkg/stanza/operator/transformer/noop/config_test.go
+++ b/pkg/stanza/operator/transformer/noop/config_test.go
@@ -23,6 +23,5 @@ func TestBuildInvalid(t *testing.T) {
set := componenttest.NewNopTelemetrySettings()
set.Logger = nil
_, err := cfg.Build(set)
- require.Error(t, err)
- require.Contains(t, err.Error(), "build context is missing a logger")
+ require.ErrorContains(t, err, "build context is missing a logger")
}
diff --git a/pkg/stanza/pipeline/directed_test.go b/pkg/stanza/pipeline/directed_test.go
index fc84f634403b..079ac9253565 100644
--- a/pkg/stanza/pipeline/directed_test.go
+++ b/pkg/stanza/pipeline/directed_test.go
@@ -101,8 +101,7 @@ func TestPipeline(t *testing.T) {
operator2.On("Outputs").Return(nil)
_, err := NewDirectedPipeline([]operator.Operator{operator1, operator2})
- require.Error(t, err)
- require.Contains(t, err.Error(), "already exists")
+ require.ErrorContains(t, err, "already exists")
})
t.Run("OutputNotExist", func(t *testing.T) {
@@ -115,8 +114,7 @@ func TestPipeline(t *testing.T) {
operator2.On("Outputs").Return([]operator.Operator{operator1})
_, err := NewDirectedPipeline([]operator.Operator{operator2})
- require.Error(t, err)
- require.Contains(t, err.Error(), "does not exist")
+ require.ErrorContains(t, err, "does not exist")
})
t.Run("OutputNotProcessor", func(t *testing.T) {
@@ -132,8 +130,7 @@ func TestPipeline(t *testing.T) {
operator2.On("Outputs").Return([]operator.Operator{operator1})
_, err := NewDirectedPipeline([]operator.Operator{operator1, operator2})
- require.Error(t, err)
- require.Contains(t, err.Error(), "can not process")
+ require.ErrorContains(t, err, "can not process")
})
t.Run("DuplicateEdges", func(t *testing.T) {
@@ -155,8 +152,7 @@ func TestPipeline(t *testing.T) {
graph.SetEdge(edge)
err := connectNode(graph, node2)
- require.Error(t, err)
- require.Contains(t, err.Error(), "connection already exists")
+ require.ErrorContains(t, err, "connection already exists")
})
t.Run("Cyclical", func(t *testing.T) {
@@ -171,8 +167,7 @@ func TestPipeline(t *testing.T) {
mockOperator3.On("SetOutputs", mock.Anything).Return(nil)
_, err := NewDirectedPipeline([]operator.Operator{mockOperator1, mockOperator2, mockOperator3})
- require.Error(t, err)
- require.Contains(t, err.Error(), "circular dependency")
+ require.ErrorContains(t, err, "circular dependency")
})
}
@@ -205,8 +200,7 @@ func TestPipelineStartOrder(t *testing.T) {
require.NoError(t, err)
err = pipeline.Start(mockPersister)
- require.Error(t, err)
- require.Contains(t, err.Error(), "operator 1 failed to start")
+ require.ErrorContains(t, err, "operator 1 failed to start")
require.True(t, mock2Started)
require.True(t, mock3Started)
}
diff --git a/processor/geoipprocessor/config_test.go b/processor/geoipprocessor/config_test.go
index 8020bf32b1d8..746b210685fc 100644
--- a/processor/geoipprocessor/config_test.go
+++ b/processor/geoipprocessor/config_test.go
@@ -96,7 +96,7 @@ func TestLoadConfig_InvalidProviderKey(t *testing.T) {
factories.Processors[metadata.Type] = factory
_, err = otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config-invalidProviderKey.yaml"), factories)
- require.Contains(t, err.Error(), "error reading configuration for \"geoip\": invalid provider key: invalidProviderKey")
+ require.ErrorContains(t, err, "error reading configuration for \"geoip\": invalid provider key: invalidProviderKey")
}
func TestLoadConfig_ValidProviderKey(t *testing.T) {
@@ -152,5 +152,5 @@ func TestLoadConfig_ProviderValidateError(t *testing.T) {
factories.Processors[metadata.Type] = factory
_, err = otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config-mockProvider.yaml"), factories)
- require.Contains(t, err.Error(), "error validating provider mock")
+ require.ErrorContains(t, err, "error validating provider mock")
}
diff --git a/processor/geoipprocessor/internal/provider/maxmindprovider/factory_test.go b/processor/geoipprocessor/internal/provider/maxmindprovider/factory_test.go
index a70bd4bc4267..fcc6fa48d18c 100644
--- a/processor/geoipprocessor/internal/provider/maxmindprovider/factory_test.go
+++ b/processor/geoipprocessor/internal/provider/maxmindprovider/factory_test.go
@@ -25,6 +25,6 @@ func TestCreateProvider(t *testing.T) {
provider, err := factory.CreateGeoIPProvider(context.Background(), processortest.NewNopSettings(), cfg)
- assert.Contains(t, err.Error(), "could not open geoip database")
+ assert.ErrorContains(t, err, "could not open geoip database")
assert.Nil(t, provider)
}
diff --git a/processor/metricsgenerationprocessor/README.md b/processor/metricsgenerationprocessor/README.md
index 3d81bcad46f7..42498a5ca312 100644
--- a/processor/metricsgenerationprocessor/README.md
+++ b/processor/metricsgenerationprocessor/README.md
@@ -16,12 +16,14 @@
## Description
-The metrics generation processor (`experimental_metricsgenerationprocessor`) can be used to create new metrics using existing metrics following a given rule. Currently it supports following two approaches for creating a new metric.
+The metrics generation processor (`experimental_metricsgenerationprocessor`) can be used to create new metrics using existing metrics following a given rule. This processor currently supports the following two approaches for creating a new metric.
-1. It can create a new metric from two existing metrics by applying one of the following arithmetic operations: add, subtract, multiply, divide and percent. One use case is to calculate the `pod.memory.utilization` metric like the following equation-
+1. It can create a new metric from two existing metrics by applying one of the following arithmetic operations: add, subtract, multiply, divide, or percent. One use case is to calculate the `pod.memory.utilization` metric like the following equation-
`pod.memory.utilization` = (`pod.memory.usage.bytes` / `node.memory.limit`)
1. It can create a new metric by scaling the value of an existing metric with a given constant number. One use case is to convert `pod.memory.usage` metric values from Megabytes to Bytes (multiply the existing metric's value by 1,048,576)
+Note: The created metric's type is inherited from the metric configured as `metric1`.
+
## Configuration
Configuration is specified through a list of generation rules. Generation rules find the metrics which
@@ -43,10 +45,10 @@ processors:
# type describes how the new metric will be generated. It can be one of `calculate` or `scale`. calculate generates a metric applying the given operation on two operand metrics. scale operates only on operand1 metric to generate the new metric.
type: {calculate, scale}
- # This is a required field.
+ # This is a required field. This must be a gauge or sum metric.
metric1:
- # This field is required only if the type is "calculate".
+ # This field is required only if the type is "calculate". When required, this must be a gauge or sum metric.
metric2:
# Operation specifies which arithmetic operation to apply. It must be one of the five supported operations.
diff --git a/processor/metricsgenerationprocessor/go.mod b/processor/metricsgenerationprocessor/go.mod
index 5e7e10e33aa9..d1c2e84f8d5f 100644
--- a/processor/metricsgenerationprocessor/go.mod
+++ b/processor/metricsgenerationprocessor/go.mod
@@ -3,6 +3,8 @@ module github.com/open-telemetry/opentelemetry-collector-contrib/processor/metri
go 1.22.0
require (
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.110.0
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.110.0
github.com/stretchr/testify v1.9.0
go.opentelemetry.io/collector/component v0.110.0
go.opentelemetry.io/collector/confmap v1.16.0
@@ -15,6 +17,7 @@ require (
)
require (
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@@ -29,6 +32,7 @@ require (
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.110.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.12.0 // indirect
go.opentelemetry.io/collector/component/componentstatus v0.110.0 // indirect
@@ -59,3 +63,9 @@ retract (
v0.76.1
v0.65.0
)
+
+replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden => ../../pkg/golden
+
+replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest => ../../pkg/pdatatest
+
+replace github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil => ../../pkg/pdatautil
diff --git a/processor/metricsgenerationprocessor/go.sum b/processor/metricsgenerationprocessor/go.sum
index e195251f7c4d..12d9a3eacfd4 100644
--- a/processor/metricsgenerationprocessor/go.sum
+++ b/processor/metricsgenerationprocessor/go.sum
@@ -1,3 +1,5 @@
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
diff --git a/processor/metricsgenerationprocessor/processor_test.go b/processor/metricsgenerationprocessor/processor_test.go
index 49cd25058abe..ef0e62ebfd84 100644
--- a/processor/metricsgenerationprocessor/processor_test.go
+++ b/processor/metricsgenerationprocessor/processor_test.go
@@ -5,15 +5,21 @@ package metricsgenerationprocessor
import (
"context"
+ "fmt"
+ "path/filepath"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "go.opentelemetry.io/collector/confmap/confmaptest"
"go.opentelemetry.io/collector/consumer/consumertest"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/processor/processortest"
+
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden"
+ "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest"
)
type testMetric struct {
@@ -384,3 +390,107 @@ func getOutputForIntGaugeTest() pmetric.Metrics {
return intGaugeOutputMetrics
}
+
+func TestSumCalculateNewMetric(t *testing.T) {
+ next := new(consumertest.MetricsSink)
+ cfg := &Config{
+ Rules: []Rule{
+ {
+ Name: "system.filesystem.capacity",
+ Unit: "bytes",
+ Type: "calculate",
+ Metric1: "system.filesystem.usage",
+ Metric2: "system.filesystem.utilization",
+ Operation: "divide",
+ },
+ },
+ }
+ factory := NewFactory()
+ mgp, err := factory.CreateMetricsProcessor(
+ context.Background(),
+ processortest.NewNopSettings(),
+ cfg,
+ next,
+ )
+ assert.NotNil(t, mgp)
+ assert.NoError(t, err)
+
+ assert.True(t, mgp.Capabilities().MutatesData)
+ require.NoError(t, mgp.Start(context.Background(), nil))
+
+ inputMetrics, err := golden.ReadMetrics(filepath.Join("testdata", "filesystem_metrics_input.yaml"))
+ assert.NoError(t, err)
+
+ err = mgp.ConsumeMetrics(context.Background(), inputMetrics)
+ assert.NoError(t, err)
+
+ got := next.AllMetrics()
+ // golden.WriteMetrics(t, filepath.Join(".", "testdata", "filesystem_metrics_expected.yaml"), got[0])
+ expected, err := golden.ReadMetrics(filepath.Join("testdata", "filesystem_metrics_expected.yaml"))
+ assert.NoError(t, err)
+ assert.Len(t, got, 1)
+ err = pmetrictest.CompareMetrics(expected, got[0],
+ pmetrictest.IgnoreMetricDataPointsOrder(),
+ pmetrictest.IgnoreStartTimestamp(),
+ pmetrictest.IgnoreTimestamp())
+ assert.NoError(t, err)
+}
+
+func TestResultingMetricTypes(t *testing.T) {
+ testCaseNames := []string{
+ "add_sum_sum",
+ "add_gauge_gauge",
+ "add_gauge_sum",
+ "add_sum_gauge",
+ "multiply_gauge_sum",
+ "multiply_sum_gauge",
+ "divide_gauge_sum",
+ "divide_sum_gauge",
+ "subtract_gauge_sum",
+ "subtract_sum_gauge",
+ "percent_sum_gauge",
+ "percent_gauge_sum",
+ }
+
+ cm, err := confmaptest.LoadConf(filepath.Join("testdata", "metric_types", "gauge_sum_metrics_config.yaml"))
+ assert.NoError(t, err)
+
+ for _, testCase := range testCaseNames {
+ next := new(consumertest.MetricsSink)
+ factory := NewFactory()
+ cfg := factory.CreateDefaultConfig()
+
+ sub, err := cm.Sub(fmt.Sprintf("%s/%s", "experimental_metricsgeneration", testCase))
+ require.NoError(t, err)
+ require.NoError(t, sub.Unmarshal(cfg))
+
+ mgp, err := factory.CreateMetricsProcessor(
+ context.Background(),
+ processortest.NewNopSettings(),
+ cfg,
+ next,
+ )
+ assert.NotNil(t, mgp)
+ assert.NoError(t, err)
+
+ assert.True(t, mgp.Capabilities().MutatesData)
+ require.NoError(t, mgp.Start(context.Background(), nil))
+
+ inputMetrics, err := golden.ReadMetrics(filepath.Join("testdata", "metric_types", "gauge_sum_metrics_input.yaml"))
+ assert.NoError(t, err)
+
+ err = mgp.ConsumeMetrics(context.Background(), inputMetrics)
+ assert.NoError(t, err)
+
+ got := next.AllMetrics()
+ // golden.WriteMetrics(t, filepath.Join("testdata", "metric_types", fmt.Sprintf("%s_%s", testCase, "expected.yaml")), got[0])
+ expected, err := golden.ReadMetrics(filepath.Join("testdata", "metric_types", fmt.Sprintf("%s_%s", testCase, "expected.yaml")))
+ assert.NoError(t, err)
+ assert.Len(t, got, 1)
+ err = pmetrictest.CompareMetrics(expected, got[0],
+ pmetrictest.IgnoreMetricDataPointsOrder(),
+ pmetrictest.IgnoreStartTimestamp(),
+ pmetrictest.IgnoreTimestamp())
+ assert.NoError(t, err)
+ }
+}
diff --git a/processor/metricsgenerationprocessor/testdata/filesystem_metrics_expected.yaml b/processor/metricsgenerationprocessor/testdata/filesystem_metrics_expected.yaml
new file mode 100644
index 000000000000..b2ac2ec86563
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/filesystem_metrics_expected.yaml
@@ -0,0 +1,1205 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: FileSystem inodes used.
+ name: system.filesystem.inodes.usage
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "2183953600"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s1
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "4770142"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s1
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "2183953600"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s2
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Preboot
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "1813"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s2
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Preboot
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "2183953600"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s4s1
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: /
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "404475"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s4s1
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: /
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "2183953600"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s5
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Update
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "24"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s5
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Update
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "2183953600"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s6
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/VM
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "4"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s6
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/VM
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /dev
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: devfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "666"
+ attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /dev
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: devfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data/home
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: autofs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data/home
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: autofs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: '{inodes}'
+ - description: Filesystem bytes used.
+ name: system.filesystem.usage
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "223636848640"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s1
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s1
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "276326326272"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s1
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "223636848640"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s2
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Preboot
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s2
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Preboot
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "276326326272"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s2
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Preboot
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "223636848640"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s4s1
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: /
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s4s1
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: /
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "276326326272"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s4s1
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: /
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "223636848640"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s5
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Update
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s5
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Update
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "276326326272"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s5
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Update
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "223636848640"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s6
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/VM
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s6
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/VM
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "276326326272"
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s6
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/VM
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /dev
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: devfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /dev
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: devfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "197120"
+ attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /dev
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: devfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data/home
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: autofs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data/home
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: autofs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asInt: "0"
+ attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data/home
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: autofs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: Fraction of filesystem bytes used.
+ gauge:
+ dataPoints:
+ - asDouble: 0.5526933585071281
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s1
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0.5526933585071281
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s2
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Preboot
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0.5526933585071281
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s4s1
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: /
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0.5526933585071281
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s5
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Update
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0.5526933585071281
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s6
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/VM
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 1
+ attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /dev
+ - key: type
+ value:
+ stringValue: devfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0
+ attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data/home
+ - key: type
+ value:
+ stringValue: autofs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: system.filesystem.utilization
+ unit: "1"
+ - name: system.filesystem.capacity
+ sum:
+ dataPoints:
+ - asDouble: 4.046309679639759e+11
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s1
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s1
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 4.99963174912e+11
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s1
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 4.046309679639759e+11
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s2
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Preboot
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s2
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Preboot
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 4.99963174912e+11
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s2
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Preboot
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 4.046309679639759e+11
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s4s1
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: /
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s4s1
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: /
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 4.99963174912e+11
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s4s1
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: /
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 4.046309679639759e+11
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s5
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Update
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s5
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Update
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 4.99963174912e+11
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s5
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Update
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 4.046309679639759e+11
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s6
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/VM
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s6
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/VM
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 4.99963174912e+11
+ attributes:
+ - key: device
+ value:
+ stringValue: /dev/disk1s6
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/VM
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0
+ attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /dev
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: devfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0
+ attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /dev
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: devfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 356653.46247770725
+ attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /dev
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: devfs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0
+ attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data/home
+ - key: state
+ value:
+ stringValue: free
+ - key: type
+ value:
+ stringValue: autofs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0
+ attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data/home
+ - key: state
+ value:
+ stringValue: reserved
+ - key: type
+ value:
+ stringValue: autofs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ - asDouble: 0
+ attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: /System/Volumes/Data/home
+ - key: state
+ value:
+ stringValue: used
+ - key: type
+ value:
+ stringValue: autofs
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: bytes
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: 0.110.0-dev
diff --git a/processor/metricsgenerationprocessor/testdata/filesystem_metrics_input.yaml b/processor/metricsgenerationprocessor/testdata/filesystem_metrics_input.yaml
new file mode 100644
index 000000000000..6e4fee3c6c5c
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/filesystem_metrics_input.yaml
@@ -0,0 +1,802 @@
+resourceMetrics:
+ - resource: {}
+ scopeMetrics:
+ - scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: 0.110.0-dev
+ metrics:
+ - name: system.filesystem.inodes.usage
+ description: FileSystem inodes used.
+ unit: "{inodes}"
+ sum:
+ dataPoints:
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s4s1"
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: "/"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '404475'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s4s1"
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: "/"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '2183953600'
+ - attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/dev"
+ - key: type
+ value:
+ stringValue: devfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '666'
+ - attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/dev"
+ - key: type
+ value:
+ stringValue: devfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s2"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Preboot"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '1813'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s2"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Preboot"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '2183953600'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s6"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/VM"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '4'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s6"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/VM"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '2183953600'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s5"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Update"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '24'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s5"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Update"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '2183953600'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s1"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '4770142'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s1"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '2183953600'
+ - attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data/home"
+ - key: type
+ value:
+ stringValue: autofs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ - attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data/home"
+ - key: type
+ value:
+ stringValue: autofs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ aggregationTemporality: 2
+ - name: system.filesystem.usage
+ description: Filesystem bytes used.
+ unit: By
+ sum:
+ dataPoints:
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s4s1"
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: "/"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '276326326272'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s4s1"
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: "/"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '223636848640'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s4s1"
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: "/"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: reserved
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ - attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/dev"
+ - key: type
+ value:
+ stringValue: devfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '197120'
+ - attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/dev"
+ - key: type
+ value:
+ stringValue: devfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ - attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/dev"
+ - key: type
+ value:
+ stringValue: devfs
+ - key: state
+ value:
+ stringValue: reserved
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s2"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Preboot"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '276326326272'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s2"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Preboot"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '223636848640'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s2"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Preboot"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: reserved
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s6"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/VM"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '276326326272'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s6"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/VM"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '223636848640'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s6"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/VM"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: reserved
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s5"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Update"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '276326326272'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s5"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Update"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '223636848640'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s5"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Update"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: reserved
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s1"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '276326326272'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s1"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '223636848640'
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s1"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data"
+ - key: type
+ value:
+ stringValue: apfs
+ - key: state
+ value:
+ stringValue: reserved
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ - attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data/home"
+ - key: type
+ value:
+ stringValue: autofs
+ - key: state
+ value:
+ stringValue: used
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ - attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data/home"
+ - key: type
+ value:
+ stringValue: autofs
+ - key: state
+ value:
+ stringValue: free
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ - attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data/home"
+ - key: type
+ value:
+ stringValue: autofs
+ - key: state
+ value:
+ stringValue: reserved
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asInt: '0'
+ aggregationTemporality: 2
+ - name: system.filesystem.utilization
+ description: Fraction of filesystem bytes used.
+ unit: '1'
+ gauge:
+ dataPoints:
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s4s1"
+ - key: mode
+ value:
+ stringValue: ro
+ - key: mountpoint
+ value:
+ stringValue: "/"
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asDouble: 0.5526933585071281
+ - attributes:
+ - key: device
+ value:
+ stringValue: devfs
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/dev"
+ - key: type
+ value:
+ stringValue: devfs
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asDouble: 1
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s2"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Preboot"
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asDouble: 0.5526933585071281
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s6"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/VM"
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asDouble: 0.5526933585071281
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s5"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Update"
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asDouble: 0.5526933585071281
+ - attributes:
+ - key: device
+ value:
+ stringValue: "/dev/disk1s1"
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data"
+ - key: type
+ value:
+ stringValue: apfs
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asDouble: 0.5526933585071281
+ - attributes:
+ - key: device
+ value:
+ stringValue: map auto_home
+ - key: mode
+ value:
+ stringValue: rw
+ - key: mountpoint
+ value:
+ stringValue: "/System/Volumes/Data/home"
+ - key: type
+ value:
+ stringValue: autofs
+ startTimeUnixNano: '1726497870000000000'
+ timeUnixNano: '1727303734559741000'
+ asDouble: 0
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/add_gauge_gauge_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/add_gauge_gauge_expected.yaml
new file mode 100644
index 000000000000..9bfcbb8c0c9f
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/add_gauge_gauge_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - gauge:
+ dataPoints:
+ - asDouble: 100
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: new_metric
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/add_gauge_sum_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/add_gauge_sum_expected.yaml
new file mode 100644
index 000000000000..ee7c710d0414
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/add_gauge_sum_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - gauge:
+ dataPoints:
+ - asDouble: 1050
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: new_metric
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/add_sum_gauge_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/add_sum_gauge_expected.yaml
new file mode 100644
index 000000000000..1769886855bb
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/add_sum_gauge_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - name: new_metric
+ sum:
+ dataPoints:
+ - asDouble: 1050
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/add_sum_sum_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/add_sum_sum_expected.yaml
new file mode 100644
index 000000000000..a198c0c8a435
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/add_sum_sum_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - name: new_metric
+ sum:
+ dataPoints:
+ - asDouble: 2000
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/divide_gauge_sum_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/divide_gauge_sum_expected.yaml
new file mode 100644
index 000000000000..dc7a2f34c64c
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/divide_gauge_sum_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - gauge:
+ dataPoints:
+ - asDouble: 50000
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: new_metric
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/divide_sum_gauge_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/divide_sum_gauge_expected.yaml
new file mode 100644
index 000000000000..1aea52d63388
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/divide_sum_gauge_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - name: new_metric
+ sum:
+ dataPoints:
+ - asDouble: 20
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/gauge_sum_metrics_config.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/gauge_sum_metrics_config.yaml
new file mode 100644
index 000000000000..9af299c78caa
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/gauge_sum_metrics_config.yaml
@@ -0,0 +1,96 @@
+experimental_metricsgeneration/add_sum_sum:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: sum
+ metric2: sum
+ operation: add
+experimental_metricsgeneration/add_gauge_gauge:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: gauge
+ metric2: gauge
+ operation: add
+experimental_metricsgeneration/add_gauge_sum:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: gauge
+ metric2: sum
+ operation: add
+experimental_metricsgeneration/add_sum_gauge:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: sum
+ metric2: gauge
+ operation: add
+experimental_metricsgeneration/multiply_gauge_sum:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: gauge
+ metric2: sum
+ operation: multiply
+experimental_metricsgeneration/multiply_sum_gauge:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: sum
+ metric2: gauge
+ operation: multiply
+experimental_metricsgeneration/divide_gauge_sum:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: gauge
+ metric2: sum
+ operation: multiply
+experimental_metricsgeneration/divide_sum_gauge:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: sum
+ metric2: gauge
+ operation: divide
+experimental_metricsgeneration/subtract_gauge_sum:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: gauge
+ metric2: sum
+ operation: subtract
+experimental_metricsgeneration/subtract_sum_gauge:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: sum
+ metric2: gauge
+ operation: subtract
+experimental_metricsgeneration/percent_gauge_sum:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: gauge
+ metric2: sum
+ operation: percent
+experimental_metricsgeneration/percent_sum_gauge:
+ rules:
+ - name: new_metric
+ unit: percent
+ type: calculate
+ metric1: sum
+ metric2: gauge
+ operation: percent
\ No newline at end of file
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/gauge_sum_metrics_input.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/gauge_sum_metrics_input.yaml
new file mode 100644
index 000000000000..cc63a8a46ead
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/gauge_sum_metrics_input.yaml
@@ -0,0 +1,25 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: "50"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/multiply_gauge_sum_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/multiply_gauge_sum_expected.yaml
new file mode 100644
index 000000000000..dc7a2f34c64c
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/multiply_gauge_sum_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - gauge:
+ dataPoints:
+ - asDouble: 50000
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: new_metric
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/multiply_sum_gauge_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/multiply_sum_gauge_expected.yaml
new file mode 100644
index 000000000000..1e6a0afba6c0
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/multiply_sum_gauge_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - name: new_metric
+ sum:
+ dataPoints:
+ - asDouble: 50000
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/percent_gauge_sum_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/percent_gauge_sum_expected.yaml
new file mode 100644
index 000000000000..345f5a96576b
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/percent_gauge_sum_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - gauge:
+ dataPoints:
+ - asDouble: 5
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: new_metric
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/percent_sum_gauge_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/percent_sum_gauge_expected.yaml
new file mode 100644
index 000000000000..a198c0c8a435
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/percent_sum_gauge_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - name: new_metric
+ sum:
+ dataPoints:
+ - asDouble: 2000
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/subtract_gauge_sum_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/subtract_gauge_sum_expected.yaml
new file mode 100644
index 000000000000..d2ab7b52109f
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/subtract_gauge_sum_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - gauge:
+ dataPoints:
+ - asDouble: -950
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: new_metric
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/testdata/metric_types/subtract_sum_gauge_expected.yaml b/processor/metricsgenerationprocessor/testdata/metric_types/subtract_sum_gauge_expected.yaml
new file mode 100644
index 000000000000..190498b748f5
--- /dev/null
+++ b/processor/metricsgenerationprocessor/testdata/metric_types/subtract_sum_gauge_expected.yaml
@@ -0,0 +1,32 @@
+resourceMetrics:
+ - resource: {}
+ schemaUrl: https://opentelemetry.io/schemas/1.9.0
+ scopeMetrics:
+ - metrics:
+ - description: foo
+ name: sum
+ sum:
+ aggregationTemporality: 2
+ dataPoints:
+ - asInt: "1000"
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: By
+ - description: bar
+ gauge:
+ dataPoints:
+ - asDouble: 50
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ name: gauge
+ unit: "1"
+ - name: new_metric
+ sum:
+ dataPoints:
+ - asDouble: 950
+ startTimeUnixNano: "1000000"
+ timeUnixNano: "2000000"
+ unit: percent
+ scope:
+ name: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper
+ version: latest
diff --git a/processor/metricsgenerationprocessor/utils.go b/processor/metricsgenerationprocessor/utils.go
index af613fdb0992..beb001a383b1 100644
--- a/processor/metricsgenerationprocessor/utils.go
+++ b/processor/metricsgenerationprocessor/utils.go
@@ -4,6 +4,8 @@
package metricsgenerationprocessor // import "github.com/open-telemetry/opentelemetry-collector-contrib/processor/metricsgenerationprocessor"
import (
+ "fmt"
+
"go.opentelemetry.io/collector/pdata/pmetric"
"go.uber.org/zap"
)
@@ -25,24 +27,31 @@ func getNameToMetricMap(rm pmetric.ResourceMetrics) map[string]pmetric.Metric {
// getMetricValue returns the value of the first data point from the given metric.
func getMetricValue(metric pmetric.Metric) float64 {
- if metric.Type() == pmetric.MetricTypeGauge {
- dataPoints := metric.Gauge().DataPoints()
- if dataPoints.Len() > 0 {
- switch dataPoints.At(0).ValueType() {
- case pmetric.NumberDataPointValueTypeDouble:
- return dataPoints.At(0).DoubleValue()
- case pmetric.NumberDataPointValueTypeInt:
- return float64(dataPoints.At(0).IntValue())
- }
- }
+ var dataPoints pmetric.NumberDataPointSlice
+
+ switch metricType := metric.Type(); metricType {
+ case pmetric.MetricTypeGauge:
+ dataPoints = metric.Gauge().DataPoints()
+ case pmetric.MetricTypeSum:
+ dataPoints = metric.Sum().DataPoints()
+ default:
return 0
}
+
+ if dataPoints.Len() > 0 {
+ switch dataPoints.At(0).ValueType() {
+ case pmetric.NumberDataPointValueTypeDouble:
+ return dataPoints.At(0).DoubleValue()
+ case pmetric.NumberDataPointValueTypeInt:
+ return float64(dataPoints.At(0).IntValue())
+ }
+ }
+
return 0
}
// generateMetrics creates a new metric based on the given rule and add it to the Resource Metric.
-// The value for newly calculated metrics is always a floting point number and the dataType is set
-// as MetricTypeDoubleGauge.
+// The value for newly calculated metrics is always a floating point number.
func generateMetrics(rm pmetric.ResourceMetrics, operand2 float64, rule internalRule, logger *zap.Logger) {
ilms := rm.ScopeMetrics()
for i := 0; i < ilms.Len(); i++ {
@@ -52,15 +61,27 @@ func generateMetrics(rm pmetric.ResourceMetrics, operand2 float64, rule internal
metric := metricSlice.At(j)
if metric.Name() == rule.metric1 {
newMetric := appendMetric(ilm, rule.name, rule.unit)
- newMetric.SetEmptyGauge()
- addDoubleGaugeDataPoints(metric, newMetric, operand2, rule.operation, logger)
+ addDoubleDataPoints(metric, newMetric, operand2, rule.operation, logger)
}
}
}
}
-func addDoubleGaugeDataPoints(from pmetric.Metric, to pmetric.Metric, operand2 float64, operation string, logger *zap.Logger) {
- dataPoints := from.Gauge().DataPoints()
+func addDoubleDataPoints(from pmetric.Metric, to pmetric.Metric, operand2 float64, operation string, logger *zap.Logger) {
+ var dataPoints pmetric.NumberDataPointSlice
+
+ switch metricType := from.Type(); metricType {
+ case pmetric.MetricTypeGauge:
+ to.SetEmptyGauge()
+ dataPoints = from.Gauge().DataPoints()
+ case pmetric.MetricTypeSum:
+ to.SetEmptySum()
+ dataPoints = from.Sum().DataPoints()
+ default:
+ logger.Debug(fmt.Sprintf("Calculations are only supported on gauge or sum metric types. Given metric '%s' is of type `%s`", from.Name(), metricType.String()))
+ return
+ }
+
for i := 0; i < dataPoints.Len(); i++ {
fromDataPoint := dataPoints.At(i)
var operand1 float64
@@ -71,7 +92,14 @@ func addDoubleGaugeDataPoints(from pmetric.Metric, to pmetric.Metric, operand2 f
operand1 = float64(fromDataPoint.IntValue())
}
- neweDoubleDataPoint := to.Gauge().DataPoints().AppendEmpty()
+ var neweDoubleDataPoint pmetric.NumberDataPoint
+ switch to.Type() {
+ case pmetric.MetricTypeGauge:
+ neweDoubleDataPoint = to.Gauge().DataPoints().AppendEmpty()
+ case pmetric.MetricTypeSum:
+ neweDoubleDataPoint = to.Sum().DataPoints().AppendEmpty()
+ }
+
fromDataPoint.CopyTo(neweDoubleDataPoint)
value := calculateValue(operand1, operand2, operation, logger, to.Name())
neweDoubleDataPoint.SetDoubleValue(value)
diff --git a/receiver/activedirectorydsreceiver/scraper_test.go b/receiver/activedirectorydsreceiver/scraper_test.go
index aca02e6aa5a3..041ab3abd4ef 100644
--- a/receiver/activedirectorydsreceiver/scraper_test.go
+++ b/receiver/activedirectorydsreceiver/scraper_test.go
@@ -73,8 +73,8 @@ func TestScrape(t *testing.T) {
scrapeData, err := scraper.scrape(context.Background())
require.Error(t, err)
require.True(t, scrapererror.IsPartialScrapeError(err))
- require.Contains(t, err.Error(), fullSyncObjectsRemainingErr.Error())
- require.Contains(t, err.Error(), draInboundValuesDNErr.Error())
+ require.ErrorContains(t, err, fullSyncObjectsRemainingErr.Error())
+ require.ErrorContains(t, err, draInboundValuesDNErr.Error())
expectedMetrics, err := golden.ReadMetrics(partialScrapePath)
require.NoError(t, err)
@@ -106,9 +106,8 @@ func TestScrape(t *testing.T) {
}
err = scraper.shutdown(context.Background())
- require.Error(t, err)
- require.Contains(t, err.Error(), fullSyncObjectsRemainingErr.Error())
- require.Contains(t, err.Error(), draInboundValuesDNErr.Error())
+ require.ErrorContains(t, err, fullSyncObjectsRemainingErr.Error())
+ require.ErrorContains(t, err, draInboundValuesDNErr.Error())
})
t.Run("Double shutdown does not error", func(t *testing.T) {
diff --git a/receiver/apachesparkreceiver/client_test.go b/receiver/apachesparkreceiver/client_test.go
index cd0a1f1665d7..956bccc8f4d0 100644
--- a/receiver/apachesparkreceiver/client_test.go
+++ b/receiver/apachesparkreceiver/client_test.go
@@ -55,7 +55,7 @@ func TestNewApacheSparkClient(t *testing.T) {
ac, err := newApacheSparkClient(context.Background(), tc.cfg, tc.host, tc.settings)
if tc.expectError != nil {
require.Nil(t, ac)
- require.Contains(t, err.Error(), tc.expectError.Error())
+ require.ErrorContains(t, err, tc.expectError.Error())
} else {
require.NoError(t, err)
diff --git a/receiver/awsxrayreceiver/internal/tracesegment/util_test.go b/receiver/awsxrayreceiver/internal/tracesegment/util_test.go
index e59ee4d7ab2e..913645ced4ee 100644
--- a/receiver/awsxrayreceiver/internal/tracesegment/util_test.go
+++ b/receiver/awsxrayreceiver/internal/tracesegment/util_test.go
@@ -52,7 +52,7 @@ func TestSplitHeaderBodyNonJsonHeader(t *testing.T) {
var errRecv *recvErr.ErrRecoverable
assert.ErrorAs(t, err, &errRecv, "should return recoverable error")
- assert.Contains(t, err.Error(), "invalid character 'o'")
+ assert.ErrorContains(t, err, "invalid character 'o'")
}
func TestSplitHeaderBodyEmptyBody(t *testing.T) {
@@ -76,7 +76,7 @@ func TestSplitHeaderBodyInvalidJsonHeader(t *testing.T) {
var errRecv *recvErr.ErrRecoverable
assert.ErrorAs(t, err, &errRecv, "should return recoverable error")
- assert.Contains(t, err.Error(),
+ assert.ErrorContains(t, err,
fmt.Sprintf("invalid header %+v", Header{
Format: "json",
Version: 20,
diff --git a/receiver/bigipreceiver/client_test.go b/receiver/bigipreceiver/client_test.go
index 9d5c12e5b0fd..7b8ec60db4d0 100644
--- a/receiver/bigipreceiver/client_test.go
+++ b/receiver/bigipreceiver/client_test.go
@@ -83,7 +83,7 @@ func TestNewClient(t *testing.T) {
ac, err := newClient(context.Background(), tc.cfg, tc.host, tc.settings, tc.logger)
if tc.expectError != nil {
require.Nil(t, ac)
- require.Contains(t, err.Error(), tc.expectError.Error())
+ require.ErrorContains(t, err, tc.expectError.Error())
} else {
require.NoError(t, err)
@@ -135,7 +135,7 @@ func TestGetNewToken(t *testing.T) {
tc := createTestClient(t, ts.URL)
err := tc.GetNewToken(context.Background())
- require.Contains(t, err.Error(), "failed to decode response payload")
+ require.ErrorContains(t, err, "failed to decode response payload")
hasToken := tc.HasToken()
require.False(t, hasToken)
},
@@ -215,7 +215,7 @@ func TestGetVirtualServers(t *testing.T) {
pools, err := tc.GetPools(context.Background())
require.Nil(t, pools)
- require.Contains(t, err.Error(), "failed to decode response payload")
+ require.ErrorContains(t, err, "failed to decode response payload")
},
},
{
@@ -413,7 +413,7 @@ func TestGetPools(t *testing.T) {
pools, err := tc.GetPools(context.Background())
require.Nil(t, pools)
- require.Contains(t, err.Error(), "failed to decode response payload")
+ require.ErrorContains(t, err, "failed to decode response payload")
},
},
{
@@ -666,7 +666,7 @@ func TestGetNodes(t *testing.T) {
nodes, err := tc.GetNodes(context.Background())
require.Nil(t, nodes)
- require.Contains(t, err.Error(), "failed to decode response payload")
+ require.ErrorContains(t, err, "failed to decode response payload")
},
},
{
diff --git a/receiver/cloudflarereceiver/logs_test.go b/receiver/cloudflarereceiver/logs_test.go
index 696a6b5f645b..7a60461d3247 100644
--- a/receiver/cloudflarereceiver/logs_test.go
+++ b/receiver/cloudflarereceiver/logs_test.go
@@ -132,7 +132,7 @@ func TestPayloadToLogRecord(t *testing.T) {
if tc.expectedErr != "" {
require.Error(t, err)
require.Nil(t, logs)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
} else {
require.NoError(t, err)
require.NotNil(t, logs)
diff --git a/receiver/couchdbreceiver/client_test.go b/receiver/couchdbreceiver/client_test.go
index 3306e5a14501..4164f6721267 100644
--- a/receiver/couchdbreceiver/client_test.go
+++ b/receiver/couchdbreceiver/client_test.go
@@ -48,8 +48,7 @@ func TestNewCouchDBClient(t *testing.T) {
componenttest.NewNopHost(),
componenttest.NewNopTelemetrySettings())
- require.Error(t, err)
- require.Contains(t, err.Error(), "failed to create HTTP Client: ")
+ require.ErrorContains(t, err, "failed to create HTTP Client: ")
require.Nil(t, couchdbClient)
})
t.Run("no error", func(t *testing.T) {
@@ -87,27 +86,24 @@ func TestGet(t *testing.T) {
couchdbClient := defaultClient(t, url)
result, err := couchdbClient.Get(url)
- require.Error(t, err)
require.Nil(t, result)
- require.Contains(t, err.Error(), "invalid port ")
+ require.ErrorContains(t, err, "invalid port ")
})
t.Run("invalid endpoint", func(t *testing.T) {
url := ts.URL + "/invalid_endpoint"
couchdbClient := defaultClient(t, url)
result, err := couchdbClient.Get(url)
- require.Error(t, err)
require.Nil(t, result)
- require.Contains(t, err.Error(), "404 Not Found")
+ require.ErrorContains(t, err, "404 Not Found")
})
t.Run("invalid body", func(t *testing.T) {
url := ts.URL + "/invalid_body"
couchdbClient := defaultClient(t, url)
result, err := couchdbClient.Get(url)
- require.Error(t, err)
require.Nil(t, result)
- require.Contains(t, err.Error(), "failed to read response body ")
+ require.ErrorContains(t, err, "failed to read response body ")
})
t.Run("401 Unauthorized", func(t *testing.T) {
url := ts.URL + "/_node/_local/_stats/couchdb"
@@ -127,8 +123,7 @@ func TestGet(t *testing.T) {
result, err := couchdbClient.Get(url)
require.Nil(t, result)
- require.Error(t, err)
- require.Contains(t, err.Error(), "401 Unauthorized")
+ require.ErrorContains(t, err, "401 Unauthorized")
})
t.Run("no error", func(t *testing.T) {
url := ts.URL + "/_node/_local/_stats/couchdb"
diff --git a/receiver/dockerstatsreceiver/config_test.go b/receiver/dockerstatsreceiver/config_test.go
index c9008dcb61bc..1fd7602e06df 100644
--- a/receiver/dockerstatsreceiver/config_test.go
+++ b/receiver/dockerstatsreceiver/config_test.go
@@ -127,8 +127,7 @@ func TestApiVersionCustomError(t *testing.T) {
factory := NewFactory()
cfg := factory.CreateDefaultConfig()
err := sub.Unmarshal(cfg)
- require.Error(t, err)
- assert.Contains(t, err.Error(),
+ assert.ErrorContains(t, err,
`Hint: You may want to wrap the 'api_version' value in quotes (api_version: "1.40")`,
)
diff --git a/receiver/dockerstatsreceiver/receiver_test.go b/receiver/dockerstatsreceiver/receiver_test.go
index 099267467c5c..c610668aace9 100644
--- a/receiver/dockerstatsreceiver/receiver_test.go
+++ b/receiver/dockerstatsreceiver/receiver_test.go
@@ -150,13 +150,11 @@ func TestErrorsInStart(t *testing.T) {
cfg.Endpoint = "..not/a/valid/endpoint"
err := recv.start(context.Background(), componenttest.NewNopHost())
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "unable to parse docker host")
+ assert.ErrorContains(t, err, "unable to parse docker host")
cfg.Endpoint = unreachable
err = recv.start(context.Background(), componenttest.NewNopHost())
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "context deadline exceeded")
+ assert.ErrorContains(t, err, "context deadline exceeded")
}
func TestScrapeV2(t *testing.T) {
diff --git a/receiver/elasticsearchreceiver/client_test.go b/receiver/elasticsearchreceiver/client_test.go
index c9e5a2d49125..fcf64eebeac6 100644
--- a/receiver/elasticsearchreceiver/client_test.go
+++ b/receiver/elasticsearchreceiver/client_test.go
@@ -370,8 +370,7 @@ func TestDoRequest404(t *testing.T) {
require.NoError(t, err)
_, err = client.doRequest(context.Background(), "invalid_path")
- require.Error(t, err)
- require.Contains(t, err.Error(), "404")
+ require.ErrorContains(t, err, "404")
}
func TestIndexStatsNoPassword(t *testing.T) {
diff --git a/receiver/elasticsearchreceiver/config_test.go b/receiver/elasticsearchreceiver/config_test.go
index 1cc8c213c28a..885e540ddb1d 100644
--- a/receiver/elasticsearchreceiver/config_test.go
+++ b/receiver/elasticsearchreceiver/config_test.go
@@ -129,8 +129,7 @@ func TestValidateEndpoint(t *testing.T) {
case testCase.expectedErr != nil:
require.ErrorIs(t, err, testCase.expectedErr)
case testCase.expectedErrStr != "":
- require.Error(t, err)
- require.Contains(t, err.Error(), testCase.expectedErrStr)
+ require.ErrorContains(t, err, testCase.expectedErrStr)
default:
require.NoError(t, err)
}
diff --git a/receiver/elasticsearchreceiver/scraper_test.go b/receiver/elasticsearchreceiver/scraper_test.go
index c34601544200..38d3f0b74f82 100644
--- a/receiver/elasticsearchreceiver/scraper_test.go
+++ b/receiver/elasticsearchreceiver/scraper_test.go
@@ -257,7 +257,7 @@ func TestScrapingError(t *testing.T) {
_, err = sc.scrape(context.Background())
require.True(t, scrapererror.IsPartialScrapeError(err))
- require.Equal(t, err.Error(), err404.Error())
+ require.EqualError(t, err, err404.Error())
},
},
@@ -284,7 +284,7 @@ func TestScrapingError(t *testing.T) {
_, err = sc.scrape(context.Background())
require.True(t, scrapererror.IsPartialScrapeError(err))
- require.Equal(t, err.Error(), err404.Error())
+ require.EqualError(t, err, err404.Error())
},
},
@@ -311,8 +311,8 @@ func TestScrapingError(t *testing.T) {
sc.client = &mockClient
m, err := sc.scrape(context.Background())
- require.Contains(t, err.Error(), err404.Error())
- require.Contains(t, err.Error(), err500.Error())
+ require.ErrorContains(t, err, err404.Error())
+ require.ErrorContains(t, err, err500.Error())
require.Equal(t, 0, m.DataPointCount())
},
@@ -340,7 +340,7 @@ func TestScrapingError(t *testing.T) {
_, err = sc.scrape(context.Background())
require.True(t, scrapererror.IsPartialScrapeError(err))
- require.Contains(t, err.Error(), err404.Error())
+ require.ErrorContains(t, err, err404.Error())
},
},
{
@@ -366,8 +366,8 @@ func TestScrapingError(t *testing.T) {
sc.client = &mockClient
m, err := sc.scrape(context.Background())
- require.Contains(t, err.Error(), err404.Error())
- require.Contains(t, err.Error(), err500.Error())
+ require.ErrorContains(t, err, err404.Error())
+ require.ErrorContains(t, err, err500.Error())
require.Equal(t, 0, m.DataPointCount())
},
@@ -396,7 +396,7 @@ func TestScrapingError(t *testing.T) {
_, err = sc.scrape(context.Background())
require.True(t, scrapererror.IsPartialScrapeError(err))
- require.Contains(t, err.Error(), errUnknownClusterStatus.Error())
+ require.ErrorContains(t, err, errUnknownClusterStatus.Error())
},
},
}
diff --git a/receiver/flinkmetricsreceiver/client_test.go b/receiver/flinkmetricsreceiver/client_test.go
index 01a07ca4a6b4..2403e97c36c5 100644
--- a/receiver/flinkmetricsreceiver/client_test.go
+++ b/receiver/flinkmetricsreceiver/client_test.go
@@ -93,7 +93,7 @@ func TestNewClient(t *testing.T) {
ac, err := newClient(context.Background(), tc.cfg, tc.host, tc.settings, tc.logger)
if tc.expectError != nil {
require.Nil(t, ac)
- require.Contains(t, err.Error(), tc.expectError.Error())
+ require.ErrorContains(t, err, tc.expectError.Error())
} else {
require.NoError(t, err)
@@ -151,7 +151,7 @@ func TestGetJobmanagerMetrics(t *testing.T) {
metrics, err := tc.GetJobmanagerMetrics(context.Background())
require.Nil(t, metrics)
- require.Contains(t, err.Error(), "failed to unmarshal response body")
+ require.ErrorContains(t, err, "failed to unmarshal response body")
},
},
{
@@ -220,7 +220,7 @@ func TestGetTaskmanagersMetrics(t *testing.T) {
metrics, err := tc.GetTaskmanagersMetrics(context.Background())
require.Nil(t, metrics)
- require.Contains(t, err.Error(), "failed to unmarshal response body:")
+ require.ErrorContains(t, err, "failed to unmarshal response body:")
},
},
{
@@ -243,7 +243,7 @@ func TestGetTaskmanagersMetrics(t *testing.T) {
metrics, err := tc.GetTaskmanagersMetrics(context.Background())
require.Nil(t, metrics)
- require.Contains(t, err.Error(), "failed to unmarshal response body:")
+ require.ErrorContains(t, err, "failed to unmarshal response body:")
},
},
{
@@ -321,7 +321,7 @@ func TestGetJobsMetrics(t *testing.T) {
metrics, err := tc.GetJobsMetrics(context.Background())
require.Nil(t, metrics)
- require.Contains(t, err.Error(), "failed to unmarshal response body")
+ require.ErrorContains(t, err, "failed to unmarshal response body")
},
},
{
@@ -343,7 +343,7 @@ func TestGetJobsMetrics(t *testing.T) {
metrics, err := tc.GetJobsMetrics(context.Background())
require.Nil(t, metrics)
- require.Contains(t, err.Error(), "failed to unmarshal response body")
+ require.ErrorContains(t, err, "failed to unmarshal response body")
},
},
{
@@ -423,7 +423,7 @@ func TestGetSubtasksMetrics(t *testing.T) {
metrics, err := tc.GetSubtasksMetrics(context.Background())
require.Nil(t, metrics)
- require.Contains(t, err.Error(), "failed to unmarshal response body")
+ require.ErrorContains(t, err, "failed to unmarshal response body")
},
},
{
@@ -445,7 +445,7 @@ func TestGetSubtasksMetrics(t *testing.T) {
metrics, err := tc.GetSubtasksMetrics(context.Background())
require.Nil(t, metrics)
- require.Contains(t, err.Error(), "failed to unmarshal response body")
+ require.ErrorContains(t, err, "failed to unmarshal response body")
},
},
{
@@ -473,7 +473,7 @@ func TestGetSubtasksMetrics(t *testing.T) {
metrics, err := tc.GetSubtasksMetrics(context.Background())
require.Nil(t, metrics)
- require.Contains(t, err.Error(), "failed to unmarshal response body")
+ require.ErrorContains(t, err, "failed to unmarshal response body")
},
},
{
@@ -507,7 +507,7 @@ func TestGetSubtasksMetrics(t *testing.T) {
metrics, err := tc.GetSubtasksMetrics(context.Background())
require.Nil(t, metrics)
- require.Contains(t, err.Error(), "failed to unmarshal response body")
+ require.ErrorContains(t, err, "failed to unmarshal response body")
},
},
{
diff --git a/receiver/fluentforwardreceiver/conversion_test.go b/receiver/fluentforwardreceiver/conversion_test.go
index 2ffb6cdb1419..b566ad955641 100644
--- a/receiver/fluentforwardreceiver/conversion_test.go
+++ b/receiver/fluentforwardreceiver/conversion_test.go
@@ -188,8 +188,7 @@ func TestPackedForwardEventConversionWithErrors(t *testing.T) {
var event PackedForwardEventLogRecords
err := event.DecodeMsg(reader)
- require.Error(t, err)
- require.Contains(t, err.Error(), "gzip")
+ require.ErrorContains(t, err, "gzip")
fmt.Println(err.Error())
})
}
diff --git a/receiver/githubreceiver/config_test.go b/receiver/githubreceiver/config_test.go
index 3a6c26542c62..bd3d8ba21000 100644
--- a/receiver/githubreceiver/config_test.go
+++ b/receiver/githubreceiver/config_test.go
@@ -67,7 +67,7 @@ func TestLoadInvalidConfig_NoScrapers(t *testing.T) {
// nolint:staticcheck
_, err = otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config-noscrapers.yaml"), factories)
- require.Contains(t, err.Error(), "must specify at least one scraper")
+ require.ErrorContains(t, err, "must specify at least one scraper")
}
func TestLoadInvalidConfig_InvalidScraperKey(t *testing.T) {
@@ -80,7 +80,7 @@ func TestLoadInvalidConfig_InvalidScraperKey(t *testing.T) {
// nolint:staticcheck
_, err = otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config-invalidscraperkey.yaml"), factories)
- require.Contains(t, err.Error(), "error reading configuration for \"github\": invalid scraper key: \"invalidscraperkey\"")
+ require.ErrorContains(t, err, "error reading configuration for \"github\": invalid scraper key: \"invalidscraperkey\"")
}
func TestConfig_Unmarshal(t *testing.T) {
diff --git a/receiver/hostmetricsreceiver/README.md b/receiver/hostmetricsreceiver/README.md
index 1098fbe6f4ca..d4ab9b7e6a94 100644
--- a/receiver/hostmetricsreceiver/README.md
+++ b/receiver/hostmetricsreceiver/README.md
@@ -114,6 +114,7 @@ process:
:
names: [ , ... ]
match_type:
+ mute_process_all_errors:
mute_process_name_error:
mute_process_exe_error:
mute_process_io_error:
@@ -123,12 +124,12 @@ process:
```
The following settings are optional:
-
-- `mute_process_name_error` (default: false): mute the error encountered when trying to read a process name the collector does not have permission to read
-- `mute_process_io_error` (default: false): mute the error encountered when trying to read IO metrics of a process the collector does not have permission to read
-- `mute_process_cgroup_error` (default: false): mute the error encountered when trying to read the cgroup of a process the collector does not have permission to read
-- `mute_process_exe_error` (default: false): mute the error encountered when trying to read the executable path of a process the collector does not have permission to read (Linux only)
-- `mute_process_user_error` (default: false): mute the error encountered when trying to read a uid which doesn't exist on the system, eg. is owned by a user that only exists in a container.
+- `mute_process_all_errors` (default: false): mute all the errors encountered when trying to read metrics of a process. When this flag is enabled, there is no need to activate any other error suppression flags.
+- `mute_process_name_error` (default: false): mute the error encountered when trying to read a process name the collector does not have permission to read. This flag is ignored when `mute_process_all_errors` is set to true as all errors are muted.
+- `mute_process_io_error` (default: false): mute the error encountered when trying to read IO metrics of a process the collector does not have permission to read. This flag is ignored when `mute_process_all_errors` is set to true as all errors are muted.
+- `mute_process_cgroup_error` (default: false): mute the error encountered when trying to read the cgroup of a process the collector does not have permission to read. This flag is ignored when `mute_process_all_errors` is set to true as all errors are muted.
+- `mute_process_exe_error` (default: false): mute the error encountered when trying to read the executable path of a process the collector does not have permission to read (Linux only). This flag is ignored when `mute_process_all_errors` is set to true as all errors are muted.
+- `mute_process_user_error` (default: false): mute the error encountered when trying to read a uid which doesn't exist on the system, eg. is owned by a user that only exists in a container. This flag is ignored when `mute_process_all_errors` is set to true as all errors are muted.
## Advanced Configuration
diff --git a/receiver/hostmetricsreceiver/config_test.go b/receiver/hostmetricsreceiver/config_test.go
index 61559ac81f90..fcb722db6817 100644
--- a/receiver/hostmetricsreceiver/config_test.go
+++ b/receiver/hostmetricsreceiver/config_test.go
@@ -134,7 +134,7 @@ func TestLoadInvalidConfig_NoScrapers(t *testing.T) {
// nolint:staticcheck
_, err = otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config-noscrapers.yaml"), factories)
- require.Contains(t, err.Error(), "must specify at least one scraper when using hostmetrics receiver")
+ require.ErrorContains(t, err, "must specify at least one scraper when using hostmetrics receiver")
}
func TestLoadInvalidConfig_InvalidScraperKey(t *testing.T) {
@@ -147,5 +147,5 @@ func TestLoadInvalidConfig_InvalidScraperKey(t *testing.T) {
// nolint:staticcheck
_, err = otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "config-invalidscraperkey.yaml"), factories)
- require.Contains(t, err.Error(), "error reading configuration for \"hostmetrics\": invalid scraper key: invalidscraperkey")
+ require.ErrorContains(t, err, "error reading configuration for \"hostmetrics\": invalid scraper key: invalidscraperkey")
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go
index 726ec916b081..d438357c58e0 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/filesystemscraper/filesystem_scraper_test.go
@@ -383,7 +383,7 @@ func TestScrape(t *testing.T) {
md, err := scraper.scrape(context.Background())
if test.expectedErr != "" {
- assert.Contains(t, err.Error(), test.expectedErr)
+ assert.ErrorContains(t, err, test.expectedErr)
isPartial := scrapererror.IsPartialScrapeError(err)
assert.True(t, isPartial)
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/config.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/config.go
index 76003bf72ce0..60c3ea0b8535 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/config.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/config.go
@@ -22,29 +22,38 @@ type Config struct {
Include MatchConfig `mapstructure:"include"`
Exclude MatchConfig `mapstructure:"exclude"`
+ // MuteProcessAllErrors is a flag that will mute all the errors encountered when trying to read metrics of a process.
+ // When this flag is enabled, there is no need to activate any other error suppression flags.
+ MuteProcessAllErrors bool `mapstructure:"mute_process_all_errors,omitempty"`
+
// MuteProcessNameError is a flag that will mute the error encountered when trying to read a process name the
// collector does not have permission to read.
// See https://github.com/open-telemetry/opentelemetry-collector/issues/3004 for more information.
+ // This flag is ignored when MuteProcessAllErrors is set to true as all errors are muted.
MuteProcessNameError bool `mapstructure:"mute_process_name_error,omitempty"`
// MuteProcessIOError is a flag that will mute the error encountered when trying to read IO metrics of a process
// the collector does not have permission to read.
+ // This flag is ignored when MuteProcessAllErrors is set to true as all errors are muted.
MuteProcessIOError bool `mapstructure:"mute_process_io_error,omitempty"`
// MuteProcessCgroupError is a flag that will mute the error encountered when trying to read the cgroup of a process
// the collector does not have permission to read.
+ // This flag is ignored when MuteProcessAllErrors is set to true as all errors are muted.
MuteProcessCgroupError bool `mapstructure:"mute_process_cgroup_error,omitempty"`
// MuteProcessExeError is a flag that will mute the error encountered when trying to read the executable path of a process
- // the collector does not have permission to read (Linux)
+ // the collector does not have permission to read (Linux).
+ // This flag is ignored when MuteProcessAllErrors is set to true as all errors are muted.
MuteProcessExeError bool `mapstructure:"mute_process_exe_error,omitempty"`
// MuteProcessUserError is a flag that will mute the error encountered when trying to read uid which
- // doesn't exist on the system, eg. is owned by user existing in container only
+ // doesn't exist on the system, eg. is owned by user existing in container only.
+ // This flag is ignored when MuteProcessAllErrors is set to true as all errors are muted.
MuteProcessUserError bool `mapstructure:"mute_process_user_error,omitempty"`
// ScrapeProcessDelay is used to indicate the minimum amount of time a process must be running
- // before metrics are scraped for it. The default value is 0 seconds (0s)
+ // before metrics are scraped for it. The default value is 0 seconds (0s).
ScrapeProcessDelay time.Duration `mapstructure:"scrape_process_delay"`
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go
index 3b9a4bbee701..0a918a112ba1 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper.go
@@ -187,6 +187,10 @@ func (s *scraper) scrape(ctx context.Context) (pmetric.Metrics, error) {
}
}
+ if s.config.MuteProcessAllErrors {
+ return s.mb.Emit(), nil
+ }
+
return s.mb.Emit(), errs.Combine()
}
diff --git a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go
index b1a232e5d7cd..4d983b8a664e 100644
--- a/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go
+++ b/receiver/hostmetricsreceiver/internal/scraper/processscraper/process_scraper_test.go
@@ -1003,6 +1003,7 @@ func TestScrapeMetrics_MuteErrorFlags(t *testing.T) {
muteProcessExeError bool
muteProcessIOError bool
muteProcessUserError bool
+ muteProcessAllErrors bool
skipProcessNameError bool
omitConfigField bool
expectedError string
@@ -1093,6 +1094,30 @@ func TestScrapeMetrics_MuteErrorFlags(t *testing.T) {
return 4
}(),
},
+ {
+ name: "All Process Errors Muted",
+ muteProcessNameError: false,
+ muteProcessExeError: false,
+ muteProcessIOError: false,
+ muteProcessUserError: false,
+ muteProcessAllErrors: true,
+ expectedCount: 0,
+ },
+ {
+ name: "Process User Error Enabled and All Process Errors Muted",
+ muteProcessUserError: false,
+ skipProcessNameError: true,
+ muteProcessExeError: true,
+ muteProcessNameError: true,
+ muteProcessAllErrors: true,
+ expectedCount: func() int {
+ if runtime.GOOS == "darwin" {
+ // disk.io is not collected on darwin
+ return 3
+ }
+ return 4
+ }(),
+ },
}
for _, test := range testCases {
@@ -1106,6 +1131,7 @@ func TestScrapeMetrics_MuteErrorFlags(t *testing.T) {
config.MuteProcessExeError = test.muteProcessExeError
config.MuteProcessIOError = test.muteProcessIOError
config.MuteProcessUserError = test.muteProcessUserError
+ config.MuteProcessAllErrors = test.muteProcessAllErrors
}
scraper, err := newProcessScraper(receivertest.NewNopSettings(), config)
require.NoError(t, err, "Failed to create process scraper: %v", err)
@@ -1135,7 +1161,7 @@ func TestScrapeMetrics_MuteErrorFlags(t *testing.T) {
assert.Equal(t, test.expectedCount, md.MetricCount())
- if config.MuteProcessNameError && config.MuteProcessExeError && config.MuteProcessUserError {
+ if (config.MuteProcessNameError && config.MuteProcessExeError && config.MuteProcessUserError) || config.MuteProcessAllErrors {
assert.NoError(t, err)
} else {
assert.EqualError(t, err, test.expectedError)
diff --git a/receiver/jmxreceiver/integration_test.go b/receiver/jmxreceiver/integration_test.go
index 67d2f95b37b3..79a9d14894b6 100644
--- a/receiver/jmxreceiver/integration_test.go
+++ b/receiver/jmxreceiver/integration_test.go
@@ -157,5 +157,5 @@ func TestJMXReceiverInvalidOTLPEndpointIntegration(t *testing.T) {
}()
err := receiver.Start(context.Background(), componenttest.NewNopHost())
- require.Contains(t, err.Error(), "listen tcp: lookup :")
+ require.ErrorContains(t, err, "listen tcp: lookup :")
}
diff --git a/receiver/jmxreceiver/receiver_test.go b/receiver/jmxreceiver/receiver_test.go
index ad0e970031ab..acf322ce65d4 100644
--- a/receiver/jmxreceiver/receiver_test.go
+++ b/receiver/jmxreceiver/receiver_test.go
@@ -181,8 +181,7 @@ func TestBuildOTLPReceiverInvalidEndpoints(t *testing.T) {
params := receivertest.NewNopSettings()
jmxReceiver := newJMXMetricReceiver(params, test.config, consumertest.NewNop())
otlpReceiver, err := jmxReceiver.buildOTLPReceiver()
- require.Error(t, err)
- require.Contains(t, err.Error(), test.expectedErr)
+ require.ErrorContains(t, err, test.expectedErr)
require.Nil(t, otlpReceiver)
})
}
diff --git a/receiver/kafkametricsreceiver/receiver_test.go b/receiver/kafkametricsreceiver/receiver_test.go
index efbc7527f71c..455cc797ae04 100644
--- a/receiver/kafkametricsreceiver/receiver_test.go
+++ b/receiver/kafkametricsreceiver/receiver_test.go
@@ -53,8 +53,7 @@ func TestNewReceiver_invalid_auth_error(t *testing.T) {
},
}
r, err := newMetricsReceiver(context.Background(), *c, receivertest.NewNopSettings(), nil)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failed to load TLS config")
+ assert.ErrorContains(t, err, "failed to load TLS config")
assert.Nil(t, r)
}
diff --git a/receiver/kafkareceiver/kafka_receiver_test.go b/receiver/kafkareceiver/kafka_receiver_test.go
index 2bc0f4ea57d8..9313ff805f1d 100644
--- a/receiver/kafkareceiver/kafka_receiver_test.go
+++ b/receiver/kafkareceiver/kafka_receiver_test.go
@@ -76,7 +76,7 @@ func TestNewTracesReceiver_err_auth_type(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, r)
err = r.Start(context.Background(), componenttest.NewNopHost())
- assert.Contains(t, err.Error(), "failed to load TLS config")
+ assert.ErrorContains(t, err, "failed to load TLS config")
}
func TestNewTracesReceiver_initial_offset_err(t *testing.T) {
@@ -428,8 +428,7 @@ func TestNewMetricsExporter_err_auth_type(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, r)
err = r.Start(context.Background(), componenttest.NewNopHost())
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failed to load TLS config")
+ assert.ErrorContains(t, err, "failed to load TLS config")
}
func TestNewMetricsReceiver_initial_offset_err(t *testing.T) {
@@ -768,8 +767,7 @@ func TestNewLogsExporter_err_auth_type(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, r)
err = r.Start(context.Background(), componenttest.NewNopHost())
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failed to load TLS config")
+ assert.ErrorContains(t, err, "failed to load TLS config")
}
func TestNewLogsReceiver_initial_offset_err(t *testing.T) {
diff --git a/receiver/kubeletstatsreceiver/README.md b/receiver/kubeletstatsreceiver/README.md
index a250ba5dff37..65596786d9a0 100644
--- a/receiver/kubeletstatsreceiver/README.md
+++ b/receiver/kubeletstatsreceiver/README.md
@@ -285,3 +285,23 @@ rules:
resources: ["nodes/proxy"]
verbs: ["get"]
```
+
+### Warning about metrics' deprecation
+
+The following metrics will be renamed in a future version:
+- `k8s.node.cpu.utilization` (renamed to `k8s.node.cpu.usage`)
+- `k8s.pod.cpu.utilization` (renamed to `k8s.pod.cpu.usage`)
+- `container.cpu.utilization` (renamed to `container.cpu.usage`)
+
+The above metrics show usage counted in CPUs and it's not a percentage of used resources.
+These metrics were previously incorrectly named using the utilization term.
+
+#### `receiver.kubeletstats.enableCPUUsageMetrics` feature gate
+
+- alpha: when enabled it makes the `.cpu.usage` metrics enabled by default, disabling the `.cpu.utilization` metrics
+- beta: `.cpu.usage` metrics are enabled by default and any configuration enabling the deprecated `.cpu.utilization` metrics will be failing. Explicitly disabling the feature gate provides the old (deprecated) behavior.
+- stable: `.cpu.usage` metrics are enabled by default and the deprecated metrics are completely removed.
+- removed three releases after stable.
+
+More information about the deprecation plan and
+the background reasoning can be found at https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27885.
\ No newline at end of file
diff --git a/receiver/kubeletstatsreceiver/factory.go b/receiver/kubeletstatsreceiver/factory.go
index d5ed491f5a65..bc831b11a760 100644
--- a/receiver/kubeletstatsreceiver/factory.go
+++ b/receiver/kubeletstatsreceiver/factory.go
@@ -9,6 +9,7 @@ import (
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/consumer"
+ "go.opentelemetry.io/collector/featuregate"
"go.opentelemetry.io/collector/receiver"
"go.opentelemetry.io/collector/receiver/scraperhelper"
"go.uber.org/zap"
@@ -20,7 +21,16 @@ import (
)
const (
- metricGroupsConfig = "metric_groups"
+ metricGroupsConfig = "metric_groups"
+ enableCPUUsageMetricsFeatureFlag = "receiver.kubeletstats.enableCPUUsageMetrics"
+)
+
+var enableCPUUsageMetrics = featuregate.GlobalRegistry().MustRegister(
+ enableCPUUsageMetricsFeatureFlag,
+ featuregate.StageAlpha,
+ featuregate.WithRegisterDescription("When enabled the container.cpu.utilization, k8s.pod.cpu.utilization and k8s.node.cpu.utilization metrics will be replaced by the container.cpu.usage, k8s.pod.cpu.usage and k8s.node.cpu.usage"),
+ featuregate.WithRegisterFromVersion("v0.110.0"),
+ featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/27885"),
)
var defaultMetricGroups = []kubelet.MetricGroup{
@@ -68,6 +78,31 @@ func createMetricsReceiver(
return nil, err
}
+ if enableCPUUsageMetrics.IsEnabled() {
+ if cfg.MetricsBuilderConfig.Metrics.ContainerCPUUtilization.Enabled {
+ cfg.MetricsBuilderConfig.Metrics.ContainerCPUUtilization.Enabled = false
+ cfg.MetricsBuilderConfig.Metrics.ContainerCPUUsage.Enabled = true
+ }
+ if cfg.MetricsBuilderConfig.Metrics.K8sPodCPUUtilization.Enabled {
+ cfg.MetricsBuilderConfig.Metrics.K8sPodCPUUtilization.Enabled = false
+ cfg.MetricsBuilderConfig.Metrics.K8sPodCPUUsage.Enabled = true
+ }
+ if cfg.MetricsBuilderConfig.Metrics.K8sNodeCPUUtilization.Enabled {
+ cfg.MetricsBuilderConfig.Metrics.K8sNodeCPUUtilization.Enabled = false
+ cfg.MetricsBuilderConfig.Metrics.K8sNodeCPUUsage.Enabled = true
+ }
+ } else {
+ if cfg.MetricsBuilderConfig.Metrics.ContainerCPUUtilization.Enabled {
+ set.Logger.Warn("The default metric container.cpu.utilization is being replaced by the container.cpu.usage metric. Switch now by enabling the receiver.kubeletstats.enableCPUUsageMetrics feature gate.")
+ }
+ if cfg.MetricsBuilderConfig.Metrics.K8sPodCPUUtilization.Enabled {
+ set.Logger.Warn("The default metric k8s.pod.cpu.utilization is being replaced by the k8s.pod.cpu.usage metric. Switch now by enabling the receiver.kubeletstats.enableCPUUsageMetrics feature gate.")
+ }
+ if cfg.MetricsBuilderConfig.Metrics.K8sNodeCPUUtilization.Enabled {
+ set.Logger.Warn("The default metric k8s.node.cpu.utilization is being replaced by the k8s.node.cpu.usage metric. Switch now by enabling the receiver.kubeletstats.enableCPUUsageMetrics feature gate.")
+ }
+ }
+
scrp, err := newKubletScraper(rest, set, rOptions, cfg.MetricsBuilderConfig, cfg.NodeName)
if err != nil {
return nil, err
diff --git a/receiver/kubeletstatsreceiver/go.mod b/receiver/kubeletstatsreceiver/go.mod
index 03b1346bda82..874dc66b9ac8 100644
--- a/receiver/kubeletstatsreceiver/go.mod
+++ b/receiver/kubeletstatsreceiver/go.mod
@@ -17,6 +17,7 @@ require (
go.opentelemetry.io/collector/confmap v1.16.0
go.opentelemetry.io/collector/consumer v0.110.0
go.opentelemetry.io/collector/consumer/consumertest v0.110.0
+ go.opentelemetry.io/collector/featuregate v1.16.0
go.opentelemetry.io/collector/filter v0.110.0
go.opentelemetry.io/collector/pdata v1.16.0
go.opentelemetry.io/collector/pipeline v0.110.0
@@ -56,6 +57,7 @@ require (
github.com/golang/snappy v0.0.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/gofuzz v1.2.0 // indirect
+ github.com/hashicorp/go-version v1.7.0 // indirect
github.com/imdario/mergo v0.3.11 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
diff --git a/receiver/kubeletstatsreceiver/go.sum b/receiver/kubeletstatsreceiver/go.sum
index afc2c26a24e7..aca29ff33744 100644
--- a/receiver/kubeletstatsreceiver/go.sum
+++ b/receiver/kubeletstatsreceiver/go.sum
@@ -173,6 +173,8 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k=
+github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
+github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
@@ -337,6 +339,8 @@ go.opentelemetry.io/collector/extension v0.110.0 h1:AYFk57W25f7xOo3I6pV0rWNWVtOL
go.opentelemetry.io/collector/extension v0.110.0/go.mod h1:zD/pw9o83SFyn/DCbBdBcH0eUPyGtYgpMSAOqotFYRc=
go.opentelemetry.io/collector/extension/auth v0.110.0 h1:9SHC2sF/KR99LciHABDXRIsXLiujzIjTJpWHO0V8Bqg=
go.opentelemetry.io/collector/extension/auth v0.110.0/go.mod h1:NjpHds6mjeT8Zn2KJVxZtV9c59AoIr6RlBha1RpmScQ=
+go.opentelemetry.io/collector/featuregate v1.16.0 h1:mNA/ga+5FErnbJ/47KsGsF+CWqgQSOxTIseo6WuMcsc=
+go.opentelemetry.io/collector/featuregate v1.16.0/go.mod h1:47xrISO71vJ83LSMm8+yIDsUbKktUp48Ovt7RR6VbRs=
go.opentelemetry.io/collector/filter v0.110.0 h1:yepvSd82x8gQwFaEb49AS2wOWdJxWVC+RCY5J+JMxgY=
go.opentelemetry.io/collector/filter v0.110.0/go.mod h1:cYdCpDj9WtHZkLtPCMsOWH9QFU686j8K93n2jO+mdjw=
go.opentelemetry.io/collector/internal/globalsignal v0.110.0 h1:S6bfFEiek8vJeXAbciWS7W8UR6ZrVJB3ftNyFTMHQaY=
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go
index 7fc1c918ac0c..32883ff709d9 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics.go
@@ -3102,10 +3102,10 @@ func WithStartTime(startTime pcommon.Timestamp) MetricBuilderOption {
func NewMetricsBuilder(mbc MetricsBuilderConfig, settings receiver.Settings, options ...MetricBuilderOption) *MetricsBuilder {
if mbc.Metrics.ContainerCPUUtilization.Enabled {
- settings.Logger.Warn("[WARNING] `container.cpu.utilization` should not be enabled: WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead.")
+ settings.Logger.Warn("[WARNING] `container.cpu.utilization` should not be enabled: This metric will be disabled in a future release. Use metric container.cpu.usage instead.")
}
if mbc.Metrics.K8sNodeCPUUtilization.Enabled {
- settings.Logger.Warn("[WARNING] `k8s.node.cpu.utilization` should not be enabled: WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead.")
+ settings.Logger.Warn("[WARNING] `k8s.node.cpu.utilization` should not be enabled: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead.")
}
if mbc.Metrics.K8sPodCPUUtilization.Enabled {
settings.Logger.Warn("[WARNING] `k8s.pod.cpu.utilization` should not be enabled: This metric will be disabled in a future release. Use metric k8s.pod.cpu.usage instead.")
diff --git a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go
index 1f4325186b76..6a0606a51e85 100644
--- a/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go
+++ b/receiver/kubeletstatsreceiver/internal/metadata/generated_metrics_test.go
@@ -63,11 +63,11 @@ func TestMetricsBuilder(t *testing.T) {
expectedWarnings := 0
if tt.metricsSet == testDataSetDefault || tt.metricsSet == testDataSetAll {
- assert.Equal(t, "[WARNING] `container.cpu.utilization` should not be enabled: WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message)
+ assert.Equal(t, "[WARNING] `container.cpu.utilization` should not be enabled: This metric will be disabled in a future release. Use metric container.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message)
expectedWarnings++
}
if tt.metricsSet == testDataSetDefault || tt.metricsSet == testDataSetAll {
- assert.Equal(t, "[WARNING] `k8s.node.cpu.utilization` should not be enabled: WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message)
+ assert.Equal(t, "[WARNING] `k8s.node.cpu.utilization` should not be enabled: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead.", observedLogs.All()[expectedWarnings].Message)
expectedWarnings++
}
if tt.metricsSet == testDataSetDefault || tt.metricsSet == testDataSetAll {
diff --git a/receiver/kubeletstatsreceiver/metadata.yaml b/receiver/kubeletstatsreceiver/metadata.yaml
index 67b7c362221a..5f093e30ba7c 100644
--- a/receiver/kubeletstatsreceiver/metadata.yaml
+++ b/receiver/kubeletstatsreceiver/metadata.yaml
@@ -92,7 +92,7 @@ metrics:
enabled: true
description: "Node CPU utilization"
warnings:
- if_enabled: "WARNING: This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead."
+ if_enabled: "This metric will be disabled in a future release. Use metric k8s.node.cpu.usage instead."
unit: "1"
gauge:
value_type: double
@@ -364,7 +364,7 @@ metrics:
enabled: true
description: "Container CPU utilization"
warnings:
- if_enabled: "WARNING: This metric will be disabled in a future release. Use metric container.cpu.usage instead."
+ if_enabled: "This metric will be disabled in a future release. Use metric container.cpu.usage instead."
unit: "1"
gauge:
value_type: double
diff --git a/receiver/mongodbatlasreceiver/alerts_test.go b/receiver/mongodbatlasreceiver/alerts_test.go
index 95736392f955..df70651517cb 100644
--- a/receiver/mongodbatlasreceiver/alerts_test.go
+++ b/receiver/mongodbatlasreceiver/alerts_test.go
@@ -163,9 +163,8 @@ func TestPayloadToLogRecord(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
logs, err := payloadToLogs(now, []byte(tc.payload))
if tc.expectedErr != "" {
- require.Error(t, err)
require.Nil(t, logs)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
} else {
require.NoError(t, err)
require.NotNil(t, logs)
@@ -240,8 +239,7 @@ func TestVerifyHMACSignature(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
err := verifyHMACSignature(tc.secret, tc.payload, tc.signatureHeader)
if tc.expectedErr != "" {
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
} else {
require.NoError(t, err)
}
diff --git a/receiver/mongodbatlasreceiver/config_test.go b/receiver/mongodbatlasreceiver/config_test.go
index e9ff7777cf5b..a2932e213306 100644
--- a/receiver/mongodbatlasreceiver/config_test.go
+++ b/receiver/mongodbatlasreceiver/config_test.go
@@ -340,8 +340,7 @@ func TestValidate(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
err := tc.input.Validate()
if tc.expectedErr != "" {
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.expectedErr)
+ require.ErrorContains(t, err, tc.expectedErr)
} else {
require.NoError(t, err)
}
diff --git a/receiver/mongodbreceiver/client_test.go b/receiver/mongodbreceiver/client_test.go
index 5f4fdc24cd7d..f6ed99ebfcc2 100644
--- a/receiver/mongodbreceiver/client_test.go
+++ b/receiver/mongodbreceiver/client_test.go
@@ -229,8 +229,7 @@ func TestGetVersionFailures(t *testing.T) {
}
_, err := client.GetVersion(context.TODO())
- require.Error(t, err)
- require.Contains(t, err.Error(), tc.partialError)
+ require.ErrorContains(t, err, tc.partialError)
})
}
diff --git a/receiver/mongodbreceiver/config_test.go b/receiver/mongodbreceiver/config_test.go
index 011283d5c476..b5c3bced8297 100644
--- a/receiver/mongodbreceiver/config_test.go
+++ b/receiver/mongodbreceiver/config_test.go
@@ -94,7 +94,7 @@ func TestValidate(t *testing.T) {
if tc.expected == nil {
require.NoError(t, err)
} else {
- require.Contains(t, err.Error(), tc.expected.Error())
+ require.ErrorContains(t, err, tc.expected.Error())
}
})
}
diff --git a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go
index 11621acbba5c..9a11185ee24a 100644
--- a/receiver/otelarrowreceiver/internal/arrow/arrow_test.go
+++ b/receiver/otelarrowreceiver/internal/arrow/arrow_test.go
@@ -591,8 +591,7 @@ func TestReceiverRecvError(t *testing.T) {
ctc.putBatch(nil, fmt.Errorf("test recv error"))
err := ctc.wait()
- require.Error(t, err)
- require.Contains(t, err.Error(), "test recv error")
+ require.ErrorContains(t, err, "test recv error")
}
func TestReceiverSendError(t *testing.T) {
diff --git a/receiver/podmanreceiver/podman_test.go b/receiver/podmanreceiver/podman_test.go
index c7523936778a..3cb648587ef8 100644
--- a/receiver/podmanreceiver/podman_test.go
+++ b/receiver/podmanreceiver/podman_test.go
@@ -92,8 +92,7 @@ func TestWatchingTimeouts(t *testing.T) {
defer fetchCancel()
container, err := cli.fetchContainerStats(ctx, container{})
- require.Error(t, err)
- assert.Contains(t, err.Error(), expectedError)
+ assert.ErrorContains(t, err, expectedError)
assert.Empty(t, container)
assert.GreaterOrEqual(
diff --git a/receiver/prometheusreceiver/internal/transaction_test.go b/receiver/prometheusreceiver/internal/transaction_test.go
index 6b72d14d2422..f15b06e402cb 100644
--- a/receiver/prometheusreceiver/internal/transaction_test.go
+++ b/receiver/prometheusreceiver/internal/transaction_test.go
@@ -305,8 +305,7 @@ func testTransactionAppendDuplicateLabels(t *testing.T, enableNativeHistograms b
)
_, err := tr.Append(0, dupLabels, 1917, 1.0)
- require.Error(t, err)
- assert.Contains(t, err.Error(), `invalid sample: non-unique label names: "a"`)
+ assert.ErrorContains(t, err, `invalid sample: non-unique label names: "a"`)
}
func TestTransactionAppendHistogramNoLe(t *testing.T) {
@@ -539,8 +538,7 @@ func testAppendExemplarWithDuplicateLabels(t *testing.T, enableNativeHistograms
"a", "c",
)
_, err := tr.AppendExemplar(0, labels, exemplar.Exemplar{Value: 0})
- require.Error(t, err)
- assert.Contains(t, err.Error(), `invalid sample: non-unique label names: "a"`)
+ assert.ErrorContains(t, err, `invalid sample: non-unique label names: "a"`)
}
func TestAppendExemplarWithoutAddingMetric(t *testing.T) {
diff --git a/receiver/rabbitmqreceiver/client_test.go b/receiver/rabbitmqreceiver/client_test.go
index e6be65b156d6..8952e8792297 100644
--- a/receiver/rabbitmqreceiver/client_test.go
+++ b/receiver/rabbitmqreceiver/client_test.go
@@ -74,7 +74,7 @@ func TestNewClient(t *testing.T) {
ac, err := newClient(context.Background(), tc.cfg, tc.host, tc.settings, tc.logger)
if tc.expectError != nil {
require.Nil(t, ac)
- require.Contains(t, err.Error(), tc.expectError.Error())
+ require.ErrorContains(t, err, tc.expectError.Error())
} else {
require.NoError(t, err)
@@ -126,7 +126,7 @@ func TestGetQueuesDetails(t *testing.T) {
clusters, err := tc.GetQueues(context.Background())
require.Nil(t, clusters)
- require.Contains(t, err.Error(), "failed to decode response payload")
+ require.ErrorContains(t, err, "failed to decode response payload")
},
},
{
diff --git a/receiver/receivercreator/config_test.go b/receiver/receivercreator/config_test.go
index c58700c35cfc..2e491053edbe 100644
--- a/receiver/receivercreator/config_test.go
+++ b/receiver/receivercreator/config_test.go
@@ -147,7 +147,7 @@ func TestInvalidResourceAttributeEndpointType(t *testing.T) {
// https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594
// nolint:staticcheck
cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "invalid-resource-attributes.yaml"), factories)
- require.Contains(t, err.Error(), "error reading configuration for \"receiver_creator\": resource attributes for unsupported endpoint type \"not.a.real.type\"")
+ require.ErrorContains(t, err, "error reading configuration for \"receiver_creator\": resource attributes for unsupported endpoint type \"not.a.real.type\"")
require.Nil(t, cfg)
}
@@ -162,7 +162,7 @@ func TestInvalidReceiverResourceAttributeValueType(t *testing.T) {
// https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/33594
// nolint:staticcheck
cfg, err := otelcoltest.LoadConfigAndValidate(filepath.Join("testdata", "invalid-receiver-resource-attributes.yaml"), factories)
- require.Contains(t, err.Error(), "error reading configuration for \"receiver_creator\": unsupported `resource_attributes` \"one\" value in examplereceiver/1")
+ require.ErrorContains(t, err, "error reading configuration for \"receiver_creator\": unsupported `resource_attributes` \"one\" value in examplereceiver/1")
require.Nil(t, cfg)
}
diff --git a/receiver/redisreceiver/redis_scraper_test.go b/receiver/redisreceiver/redis_scraper_test.go
index 4429502190b6..616f975768ef 100644
--- a/receiver/redisreceiver/redis_scraper_test.go
+++ b/receiver/redisreceiver/redis_scraper_test.go
@@ -39,8 +39,7 @@ func TestRedisRunnable(t *testing.T) {
func TestNewReceiver_invalid_endpoint(t *testing.T) {
c := createDefaultConfig().(*Config)
_, err := createMetricsReceiver(context.Background(), receivertest.NewNopSettings(), c, nil)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "invalid endpoint")
+ assert.ErrorContains(t, err, "invalid endpoint")
}
func TestNewReceiver_invalid_auth_error(t *testing.T) {
@@ -51,7 +50,6 @@ func TestNewReceiver_invalid_auth_error(t *testing.T) {
},
}
r, err := createMetricsReceiver(context.Background(), receivertest.NewNopSettings(), c, nil)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failed to load TLS config")
+ assert.ErrorContains(t, err, "failed to load TLS config")
assert.Nil(t, r)
}
diff --git a/receiver/riakreceiver/client_test.go b/receiver/riakreceiver/client_test.go
index 9ebe35826978..6730fc71cccc 100644
--- a/receiver/riakreceiver/client_test.go
+++ b/receiver/riakreceiver/client_test.go
@@ -64,7 +64,7 @@ func TestNewClient(t *testing.T) {
ac, err := newClient(context.Background(), tc.cfg, componenttest.NewNopHost(), componenttest.NewNopTelemetrySettings(), zap.NewNop())
if tc.expectError != nil {
require.Nil(t, ac)
- require.Contains(t, err.Error(), tc.expectError.Error())
+ require.ErrorContains(t, err, tc.expectError.Error())
} else {
require.NoError(t, err)
diff --git a/receiver/saphanareceiver/client_test.go b/receiver/saphanareceiver/client_test.go
index 802650a93e82..045a7011f884 100644
--- a/receiver/saphanareceiver/client_test.go
+++ b/receiver/saphanareceiver/client_test.go
@@ -211,7 +211,7 @@ func TestNullOutput(t *testing.T) {
results, err := client.collectDataFromQuery(context.TODO(), query)
// Error expected for second row, but data is also returned
- require.Contains(t, err.Error(), "database row NULL value for required metric label id")
+ require.ErrorContains(t, err, "database row NULL value for required metric label id")
require.Equal(t, []map[string]string{
{
"id": "my_id",
diff --git a/receiver/snmpreceiver/client_test.go b/receiver/snmpreceiver/client_test.go
index b4eb5ffc0eab..8cfb26ad1f09 100644
--- a/receiver/snmpreceiver/client_test.go
+++ b/receiver/snmpreceiver/client_test.go
@@ -67,7 +67,7 @@ func TestNewClient(t *testing.T) {
ac, err := newClient(tc.cfg, tc.logger)
if tc.expectError != nil {
require.Nil(t, ac)
- require.Contains(t, err.Error(), tc.expectError.Error())
+ require.ErrorContains(t, err, tc.expectError.Error())
} else {
require.NoError(t, err)
diff --git a/receiver/snowflakereceiver/config_test.go b/receiver/snowflakereceiver/config_test.go
index 6c4421afa0bc..77ccd5ab6480 100644
--- a/receiver/snowflakereceiver/config_test.go
+++ b/receiver/snowflakereceiver/config_test.go
@@ -97,8 +97,7 @@ func TestValidateConfig(t *testing.T) {
t.Parallel()
err := test.conf.Validate()
- require.Error(t, err)
- require.Contains(t, err.Error(), test.expect.Error())
+ require.ErrorContains(t, err, test.expect.Error())
})
}
}
diff --git a/receiver/solacereceiver/unmarshaller_test.go b/receiver/solacereceiver/unmarshaller_test.go
index 72fb828ed5b0..157ec31729f3 100644
--- a/receiver/solacereceiver/unmarshaller_test.go
+++ b/receiver/solacereceiver/unmarshaller_test.go
@@ -327,8 +327,7 @@ func TestSolaceMessageUnmarshallerUnmarshal(t *testing.T) {
u := newTracesUnmarshaller(zap.NewNop(), telemetryBuilder, metricAttr)
traces, err := u.unmarshal(tt.message)
if tt.err != nil {
- require.Error(t, err)
- assert.Contains(t, err.Error(), tt.err.Error())
+ assert.ErrorContains(t, err, tt.err.Error())
} else {
assert.NoError(t, err)
}
diff --git a/receiver/sqlqueryreceiver/integration_test.go b/receiver/sqlqueryreceiver/integration_test.go
index 801625c19f20..7a8bc3ce2455 100644
--- a/receiver/sqlqueryreceiver/integration_test.go
+++ b/receiver/sqlqueryreceiver/integration_test.go
@@ -125,6 +125,53 @@ func TestPostgresIntegrationLogsTrackingWithoutStorage(t *testing.T) {
testAllSimpleLogs(t, consumer.AllLogs())
}
+func TestPostgresIntegrationLogsTrackingByTimestampColumnWithoutStorage(t *testing.T) {
+ // Start Postgres container.
+ externalPort := "15432"
+ dbContainer := startPostgresDbContainer(t, externalPort)
+ defer func() {
+ require.NoError(t, dbContainer.Terminate(context.Background()))
+ }()
+
+ // Start the SQL Query receiver.
+ receiverCreateSettings := receivertest.NewNopSettings()
+ receiver, config, consumer := createTestLogsReceiverForPostgres(t, externalPort, receiverCreateSettings)
+ config.CollectionInterval = 100 * time.Millisecond
+ config.Queries = []sqlquery.Query{
+ {
+ SQL: "select * from simple_logs where insert_time > $1 order by insert_time asc",
+ Logs: []sqlquery.LogsCfg{
+ {
+ BodyColumn: "body",
+ AttributeColumns: []string{"attribute"},
+ },
+ },
+ TrackingColumn: "insert_time",
+ TrackingStartValue: "2022-06-03 21:00:00+00",
+ },
+ }
+ host := componenttest.NewNopHost()
+ err := receiver.Start(context.Background(), host)
+ require.NoError(t, err)
+
+ // Verify there's 5 logs received.
+ require.Eventuallyf(
+ t,
+ func() bool {
+ return consumer.LogRecordCount() > 0
+ },
+ 1*time.Minute,
+ 500*time.Millisecond,
+ "failed to receive more than 0 logs",
+ )
+ require.Equal(t, 5, consumer.LogRecordCount())
+ testAllSimpleLogs(t, consumer.AllLogs())
+
+ // Stop the SQL Query receiver.
+ err = receiver.Shutdown(context.Background())
+ require.NoError(t, err)
+}
+
func TestPostgresIntegrationLogsTrackingWithStorage(t *testing.T) {
// start Postgres container
externalPort := "15431"
diff --git a/receiver/sqlqueryreceiver/testdata/integration/mysql/init.sql b/receiver/sqlqueryreceiver/testdata/integration/mysql/init.sql
index 7c562c635294..e1ac6a2e21ad 100644
--- a/receiver/sqlqueryreceiver/testdata/integration/mysql/init.sql
+++ b/receiver/sqlqueryreceiver/testdata/integration/mysql/init.sql
@@ -29,7 +29,7 @@ create table simple_logs
insert into simple_logs (id, insert_time, body, attribute) values
(1, '2022-06-03 21:59:26', '- - - [03/Jun/2022:21:59:26 +0000] "GET /api/health HTTP/1.1" 200 6197 4 "-" "-" 445af8e6c428303f -', 'TLSv1.2'),
-(2, '2022-06-03 21:59:26', '- - - [03/Jun/2022:21:59:26 +0000] "GET /api/health HTTP/1.1" 200 6205 5 "-" "-" 3285f43cd4baa202 -', 'TLSv1'),
-(3, '2022-06-03 21:59:29', '- - - [03/Jun/2022:21:59:29 +0000] "GET /api/health HTTP/1.1" 200 6233 4 "-" "-" 579e8362d3185b61 -', 'TLSv1.2'),
+(2, '2022-06-03 21:59:26.692991', '- - - [03/Jun/2022:21:59:26 +0000] "GET /api/health HTTP/1.1" 200 6205 5 "-" "-" 3285f43cd4baa202 -', 'TLSv1'),
+(3, '2022-06-03 21:59:29.212212', '- - - [03/Jun/2022:21:59:29 +0000] "GET /api/health HTTP/1.1" 200 6233 4 "-" "-" 579e8362d3185b61 -', 'TLSv1.2'),
(4, '2022-06-03 21:59:31', '- - - [03/Jun/2022:21:59:31 +0000] "GET /api/health HTTP/1.1" 200 6207 5 "-" "-" 8c6ac61ae66e509f -', 'TLSv1'),
-(5, '2022-06-03 21:59:31', '- - - [03/Jun/2022:21:59:31 +0000] "GET /api/health HTTP/1.1" 200 6200 4 "-" "-" c163495861e873d8 -', 'TLSv1.2');
+(5, '2022-06-03 21:59:31.332121', '- - - [03/Jun/2022:21:59:31 +0000] "GET /api/health HTTP/1.1" 200 6200 4 "-" "-" c163495861e873d8 -', 'TLSv1.2');
diff --git a/receiver/sqlqueryreceiver/testdata/integration/oracle/init.sql b/receiver/sqlqueryreceiver/testdata/integration/oracle/init.sql
index 213f3beebac1..8597cfa846b6 100644
--- a/receiver/sqlqueryreceiver/testdata/integration/oracle/init.sql
+++ b/receiver/sqlqueryreceiver/testdata/integration/oracle/init.sql
@@ -35,10 +35,10 @@ grant select on simple_logs to otel;
insert into simple_logs (id, insert_time, body, attribute) values
(1, TIMESTAMP '2022-06-03 21:59:26 +00:00', '- - - [03/Jun/2022:21:59:26 +0000] "GET /api/health HTTP/1.1" 200 6197 4 "-" "-" 445af8e6c428303f -', 'TLSv1.2');
insert into simple_logs (id, insert_time, body, attribute) values
-(2, TIMESTAMP '2022-06-03 21:59:26 +00:00', '- - - [03/Jun/2022:21:59:26 +0000] "GET /api/health HTTP/1.1" 200 6205 5 "-" "-" 3285f43cd4baa202 -', 'TLSv1');
+(2, TIMESTAMP '2022-06-03 21:59:26.692991 +00:00', '- - - [03/Jun/2022:21:59:26 +0000] "GET /api/health HTTP/1.1" 200 6205 5 "-" "-" 3285f43cd4baa202 -', 'TLSv1');
insert into simple_logs (id, insert_time, body, attribute) values
-(3, TIMESTAMP '2022-06-03 21:59:29 +00:00', '- - - [03/Jun/2022:21:59:29 +0000] "GET /api/health HTTP/1.1" 200 6233 4 "-" "-" 579e8362d3185b61 -', 'TLSv1.2');
+(3, TIMESTAMP '2022-06-03 21:59:29.212212 +00:00', '- - - [03/Jun/2022:21:59:29 +0000] "GET /api/health HTTP/1.1" 200 6233 4 "-" "-" 579e8362d3185b61 -', 'TLSv1.2');
insert into simple_logs (id, insert_time, body, attribute) values
(4, TIMESTAMP '2022-06-03 21:59:31 +00:00', '- - - [03/Jun/2022:21:59:31 +0000] "GET /api/health HTTP/1.1" 200 6207 5 "-" "-" 8c6ac61ae66e509f -', 'TLSv1');
insert into simple_logs (id, insert_time, body, attribute) values
-(5, TIMESTAMP '2022-06-03 21:59:31 +00:00', '- - - [03/Jun/2022:21:59:31 +0000] "GET /api/health HTTP/1.1" 200 6200 4 "-" "-" c163495861e873d8 -', 'TLSv1.2');
+(5, TIMESTAMP '2022-06-03 21:59:31.332121 +00:00', '- - - [03/Jun/2022:21:59:31 +0000] "GET /api/health HTTP/1.1" 200 6200 4 "-" "-" c163495861e873d8 -', 'TLSv1.2');
diff --git a/receiver/sqlqueryreceiver/testdata/integration/postgresql/init.sql b/receiver/sqlqueryreceiver/testdata/integration/postgresql/init.sql
index 0379e43136a6..4ef4274072e5 100644
--- a/receiver/sqlqueryreceiver/testdata/integration/postgresql/init.sql
+++ b/receiver/sqlqueryreceiver/testdata/integration/postgresql/init.sql
@@ -32,8 +32,8 @@ grant select, insert on simple_logs to otel;
insert into simple_logs (id, insert_time, body, attribute) values
(1, '2022-06-03 21:59:26+00', '- - - [03/Jun/2022:21:59:26 +0000] "GET /api/health HTTP/1.1" 200 6197 4 "-" "-" 445af8e6c428303f -', 'TLSv1.2'),
-(2, '2022-06-03 21:59:26+00', '- - - [03/Jun/2022:21:59:26 +0000] "GET /api/health HTTP/1.1" 200 6205 5 "-" "-" 3285f43cd4baa202 -', 'TLSv1'),
-(3, '2022-06-03 21:59:29+00', '- - - [03/Jun/2022:21:59:29 +0000] "GET /api/health HTTP/1.1" 200 6233 4 "-" "-" 579e8362d3185b61 -', 'TLSv1.2'),
+(2, '2022-06-03 21:59:26.692991+00', '- - - [03/Jun/2022:21:59:26 +0000] "GET /api/health HTTP/1.1" 200 6205 5 "-" "-" 3285f43cd4baa202 -', 'TLSv1'),
+(3, '2022-06-03 21:59:29.212212+00', '- - - [03/Jun/2022:21:59:29 +0000] "GET /api/health HTTP/1.1" 200 6233 4 "-" "-" 579e8362d3185b61 -', 'TLSv1.2'),
(4, '2022-06-03 21:59:31+00', '- - - [03/Jun/2022:21:59:31 +0000] "GET /api/health HTTP/1.1" 200 6207 5 "-" "-" 8c6ac61ae66e509f -', 'TLSv1'),
-(5, '2022-06-03 21:59:31+00', '- - - [03/Jun/2022:21:59:31 +0000] "GET /api/health HTTP/1.1" 200 6200 4 "-" "-" c163495861e873d8 -', 'TLSv1.2');
+(5, '2022-06-03 21:59:31.332121+00', '- - - [03/Jun/2022:21:59:31 +0000] "GET /api/health HTTP/1.1" 200 6200 4 "-" "-" c163495861e873d8 -', 'TLSv1.2');
diff --git a/receiver/sqlserverreceiver/queries.go b/receiver/sqlserverreceiver/queries.go
index 70ed98ad439f..177bbfb2d347 100644
--- a/receiver/sqlserverreceiver/queries.go
+++ b/receiver/sqlserverreceiver/queries.go
@@ -211,6 +211,7 @@ INSERT INTO @PCounters SELECT * FROM PerfCounters;
SELECT
'sqlserver_performance' AS [measurement]
,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
+ ,HOST_NAME() AS [computer_name]
,pc.[object_name] AS [object]
,pc.[counter_name] AS [counter]
,CASE pc.[instance_name] WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance]
@@ -288,6 +289,7 @@ EXEC [xp_instance_regread]
SELECT
''sqlserver_server_properties'' AS [measurement]
,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance]
+ ,HOST_NAME() AS [computer_name]
,@@SERVICENAME AS [service_name]
,si.[cpu_count]
,(SELECT [total_physical_memory_kb] FROM sys.[dm_os_sys_memory]) AS [server_memory]
diff --git a/receiver/sqlserverreceiver/scraper.go b/receiver/sqlserverreceiver/scraper.go
index ef11b72e1060..487ba5176c63 100644
--- a/receiver/sqlserverreceiver/scraper.go
+++ b/receiver/sqlserverreceiver/scraper.go
@@ -21,7 +21,10 @@ import (
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/sqlserverreceiver/internal/metadata"
)
-const instanceNameKey = "sql_instance"
+const (
+ computerNameKey = "computer_name"
+ instanceNameKey = "sql_instance"
+)
type sqlServerScraperHelper struct {
id component.ID
@@ -106,7 +109,6 @@ func (s *sqlServerScraperHelper) Shutdown(_ context.Context) error {
}
func (s *sqlServerScraperHelper) recordDatabaseIOMetrics(ctx context.Context) error {
- const computerNameKey = "computer_name"
const databaseNameKey = "database_name"
const physicalFilenameKey = "physical_filename"
const logicalFilenameKey = "logical_filename"
@@ -195,6 +197,7 @@ func (s *sqlServerScraperHelper) recordDatabasePerfCounterMetrics(ctx context.Co
now := pcommon.NewTimestampFromTime(time.Now())
for i, row := range rows {
rb := s.mb.NewResourceBuilder()
+ rb.SetSqlserverComputerName(row[computerNameKey])
rb.SetSqlserverInstanceName(row[instanceNameKey])
switch row[counterKey] {
@@ -283,6 +286,7 @@ func (s *sqlServerScraperHelper) recordDatabaseStatusMetrics(ctx context.Context
now := pcommon.NewTimestampFromTime(time.Now())
for _, row := range rows {
rb := s.mb.NewResourceBuilder()
+ rb.SetSqlserverComputerName(row[computerNameKey])
rb.SetSqlserverInstanceName(row[instanceNameKey])
errs = append(errs, s.mb.RecordSqlserverDatabaseCountDataPoint(now, row[dbOnline], metadata.AttributeDatabaseStatusOnline))
diff --git a/receiver/sqlserverreceiver/testdata/perfCounterQueryData.txt b/receiver/sqlserverreceiver/testdata/perfCounterQueryData.txt
index 4b9ec5411d96..6057147b8695 100644
--- a/receiver/sqlserverreceiver/testdata/perfCounterQueryData.txt
+++ b/receiver/sqlserverreceiver/testdata/perfCounterQueryData.txt
@@ -6,6 +6,8 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Access Methods",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -15,6 +17,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Access Methods",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"14458"
},
{
@@ -24,6 +27,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Access Methods",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1.619118e+06"
},
{
@@ -33,6 +37,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Access Methods",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"871"
},
{
@@ -42,6 +47,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Access Methods",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -51,6 +57,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Access Methods",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"816"
},
{
@@ -60,6 +67,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Access Methods",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"895"
},
{
@@ -69,6 +77,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Availability Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -78,6 +87,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Availability Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -87,6 +97,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Availability Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -96,6 +107,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Availability Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -105,6 +117,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Availability Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -114,6 +127,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Availability Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -123,6 +137,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Availability Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -132,6 +147,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Availability Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -141,6 +157,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Availability Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -150,6 +167,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1952"
},
{
@@ -159,6 +177,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"205"
},
{
@@ -168,6 +187,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"160"
},
{
@@ -177,6 +197,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"93"
},
{
@@ -186,6 +207,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"220"
},
{
@@ -195,6 +217,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"437"
},
{
@@ -204,6 +227,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"158"
},
{
@@ -213,6 +237,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"130"
},
{
@@ -222,6 +247,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"18"
},
{
@@ -231,6 +257,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1"
},
{
@@ -240,6 +267,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -249,6 +277,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -258,6 +287,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -267,6 +297,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -276,6 +307,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -285,6 +317,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -294,6 +327,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -303,6 +337,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -312,6 +347,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -321,6 +357,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -330,6 +367,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -339,6 +377,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -348,6 +387,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -357,6 +397,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -366,6 +407,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"531"
},
{
@@ -375,6 +417,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"5576"
},
{
@@ -384,6 +427,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"16932"
},
{
@@ -393,6 +437,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"10879"
},
{
@@ -402,6 +447,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"13253"
},
{
@@ -411,6 +457,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"3032"
},
{
@@ -420,6 +467,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"613"
},
{
@@ -429,6 +477,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"457"
},
{
@@ -438,6 +487,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"205"
},
{
@@ -447,6 +497,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -456,6 +507,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1884"
},
{
@@ -465,6 +517,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"197"
},
{
@@ -474,6 +527,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"175"
},
{
@@ -483,6 +537,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"95"
},
{
@@ -492,6 +547,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"204"
},
{
@@ -501,6 +557,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"316"
},
{
@@ -510,6 +567,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"150"
},
{
@@ -519,6 +577,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"135"
},
{
@@ -528,6 +587,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"189"
},
{
@@ -537,6 +597,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"5"
},
{
@@ -546,6 +607,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -555,6 +617,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -564,6 +627,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -573,6 +637,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -582,6 +647,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -591,6 +657,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -600,6 +667,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -609,6 +677,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -618,6 +687,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -627,6 +697,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -636,6 +707,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -645,6 +717,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -654,6 +727,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -663,6 +737,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -672,6 +747,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"2777"
},
{
@@ -681,6 +757,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"68267"
},
{
@@ -690,6 +767,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"19282"
},
{
@@ -699,6 +777,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"10344"
},
{
@@ -708,6 +787,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"8733"
},
{
@@ -717,6 +797,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"2882"
},
{
@@ -726,6 +807,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"636"
},
{
@@ -735,6 +817,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"501"
},
{
@@ -744,6 +827,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"197"
},
{
@@ -753,6 +837,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Batch Resp Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -762,6 +847,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"248"
},
{
@@ -771,6 +857,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"100"
},
{
@@ -780,6 +867,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"504"
},
{
@@ -789,6 +877,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"4844"
},
{
@@ -798,6 +887,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -807,6 +897,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -816,6 +907,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"433208"
},
{
@@ -825,6 +917,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"3.294704e+06"
},
{
@@ -834,6 +927,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"4410"
},
{
@@ -843,6 +937,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1114"
},
{
@@ -852,6 +947,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"379"
},
{
@@ -861,6 +957,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Node",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"4844"
},
{
@@ -870,6 +967,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Buffer Node",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"433208"
},
{
@@ -879,6 +977,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Database Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -888,6 +987,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Database Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -897,6 +997,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Database Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -906,6 +1007,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Database Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -915,6 +1017,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Database Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -924,6 +1027,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Database Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -933,6 +1037,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Database Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -942,6 +1047,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Database Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -951,6 +1057,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Database Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -960,6 +1067,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Database Replica",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -969,6 +1077,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -978,6 +1087,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -987,6 +1097,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"155648"
},
{
@@ -996,6 +1107,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1.1055104e+07"
},
{
@@ -1005,6 +1117,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1014,6 +1127,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"23752"
},
{
@@ -1023,6 +1137,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"5476"
},
{
@@ -1032,6 +1147,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"2736"
},
{
@@ -1041,6 +1157,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"775"
},
{
@@ -1050,6 +1167,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"16"
},
{
@@ -1059,6 +1177,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"23"
},
{
@@ -1068,6 +1187,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"16622"
},
{
@@ -1077,6 +1197,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"718"
},
{
@@ -1086,6 +1207,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"338"
},
{
@@ -1095,6 +1217,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"2372"
},
{
@@ -1104,6 +1227,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"67"
},
{
@@ -1113,6 +1237,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"6"
},
{
@@ -1122,6 +1247,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"412"
},
{
@@ -1131,6 +1257,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"2247"
},
{
@@ -1140,6 +1267,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1378"
},
{
@@ -1149,6 +1277,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"2040"
},
{
@@ -1158,6 +1287,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1167,6 +1297,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"5.873664e+06"
},
{
@@ -1176,6 +1307,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"4800"
},
{
@@ -1185,6 +1317,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1194,6 +1327,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1203,6 +1337,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1212,6 +1347,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1221,6 +1357,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"8192"
},
{
@@ -1230,6 +1367,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"675840"
},
{
@@ -1239,6 +1377,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1248,6 +1387,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"8184"
},
{
@@ -1257,6 +1397,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"853"
},
{
@@ -1266,6 +1407,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"113"
},
{
@@ -1275,6 +1417,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"73"
},
{
@@ -1284,6 +1427,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1293,6 +1437,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"10"
},
{
@@ -1302,6 +1447,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"4395"
},
{
@@ -1311,6 +1457,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"33"
},
{
@@ -1320,6 +1467,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"34"
},
{
@@ -1329,6 +1477,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1449"
},
{
@@ -1338,6 +1487,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"61"
},
{
@@ -1347,6 +1497,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"2"
},
{
@@ -1356,6 +1507,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"78"
},
{
@@ -1365,6 +1517,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"120"
},
{
@@ -1374,6 +1527,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"629"
},
{
@@ -1383,6 +1537,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1016"
},
{
@@ -1392,6 +1547,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1401,6 +1557,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"692224"
},
{
@@ -1410,6 +1567,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"15680"
},
{
@@ -1419,6 +1577,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1428,6 +1587,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1437,6 +1597,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1446,6 +1607,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1455,6 +1617,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"4800"
},
{
@@ -1464,6 +1627,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"970752"
},
{
@@ -1473,6 +1637,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1482,6 +1647,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1784"
},
{
@@ -1491,6 +1657,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"774"
},
{
@@ -1500,6 +1667,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"138"
},
{
@@ -1509,6 +1677,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"87"
},
{
@@ -1518,6 +1687,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"5"
},
{
@@ -1527,6 +1697,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"43"
},
{
@@ -1536,6 +1707,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1452"
},
{
@@ -1545,6 +1717,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"36"
},
{
@@ -1554,6 +1727,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"33"
},
{
@@ -1563,6 +1737,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"4614"
},
{
@@ -1572,6 +1747,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"53"
},
{
@@ -1581,6 +1757,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"3"
},
{
@@ -1590,6 +1767,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"76"
},
{
@@ -1599,6 +1777,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"118"
},
{
@@ -1608,6 +1787,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"678"
},
{
@@ -1617,6 +1797,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1272"
},
{
@@ -1626,6 +1807,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1635,6 +1817,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"708608"
},
{
@@ -1644,6 +1827,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"15680"
},
{
@@ -1653,6 +1837,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1662,6 +1847,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1671,6 +1857,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1680,6 +1867,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1689,6 +1877,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"40960"
},
{
@@ -1698,6 +1887,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1707,6 +1897,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1716,6 +1907,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1272"
},
{
@@ -1725,6 +1917,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"648"
},
{
@@ -1734,6 +1927,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1743,6 +1937,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1752,6 +1947,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1761,6 +1957,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"50"
},
{
@@ -1770,6 +1967,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"5"
},
{
@@ -1779,6 +1977,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1788,6 +1987,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"244"
},
{
@@ -1797,6 +1997,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"2335"
},
{
@@ -1806,6 +2007,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"30"
},
{
@@ -1815,6 +2017,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1824,6 +2027,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"49"
},
{
@@ -1833,6 +2037,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1842,6 +2047,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"2468"
},
{
@@ -1851,6 +2057,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"8184"
},
{
@@ -1860,6 +2067,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1869,6 +2077,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"2.134016e+06"
},
{
@@ -1878,6 +2087,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"65536"
},
{
@@ -1887,6 +2097,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1896,6 +2107,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Databases",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1905,6 +2117,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Exec Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1914,6 +2127,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Exec Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1923,6 +2137,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Exec Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1932,6 +2147,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Exec Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1941,6 +2157,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Exec Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1950,6 +2167,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Exec Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1959,6 +2177,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Exec Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1968,6 +2187,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Exec Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -1977,6 +2197,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:General Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1"
},
{
@@ -1986,6 +2207,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:General Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"3"
},
{
@@ -1995,6 +2217,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:General Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"333"
},
{
@@ -2004,6 +2227,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:General Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"330"
},
{
@@ -2013,6 +2237,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:General Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2022,6 +2247,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:General Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"62"
},
{
@@ -2031,6 +2257,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:General Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2040,6 +2267,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:General Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"3"
},
{
@@ -2049,6 +2277,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Latches",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"423"
},
{
@@ -2058,6 +2287,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Latches",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1036"
},
{
@@ -2067,6 +2297,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Locks",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2076,6 +2307,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Locks",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1"
},
{
@@ -2085,6 +2317,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Locks",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"17"
},
{
@@ -2094,6 +2327,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Locks",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2103,6 +2337,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Memory Broker Clerks",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"4844"
},
{
@@ -2112,6 +2347,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Memory Broker Clerks",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"16"
},
{
@@ -2121,6 +2357,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Memory Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2130,6 +2367,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Memory Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2139,6 +2377,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Memory Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"4.588888e+06"
},
{
@@ -2148,6 +2387,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Memory Manager",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"376968"
},
{
@@ -2157,6 +2397,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Query Store",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2166,6 +2407,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Query Store",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2175,6 +2417,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Query Store",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2184,6 +2427,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Query Store",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2193,6 +2437,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2202,6 +2447,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2211,6 +2457,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2220,6 +2467,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"30960"
},
{
@@ -2229,6 +2477,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2238,6 +2487,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2247,6 +2497,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2256,6 +2507,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2265,6 +2517,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2274,6 +2527,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2283,6 +2537,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2292,6 +2547,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2301,6 +2557,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2310,6 +2567,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2319,6 +2577,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"277608"
},
{
@@ -2328,6 +2587,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2337,6 +2597,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2346,6 +2607,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Resource Pool Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2355,6 +2617,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:SQL Errors",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"1027"
},
{
@@ -2364,6 +2627,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:SQL Errors",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2373,6 +2637,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:SQL Errors",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"825"
},
{
@@ -2382,6 +2647,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:SQL Errors",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2391,6 +2657,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:SQL Errors",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"202"
},
{
@@ -2400,6 +2667,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:SQL Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"3375"
},
{
@@ -2409,6 +2677,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:SQL Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"413"
},
{
@@ -2418,6 +2687,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:SQL Statistics",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"63"
},
{
@@ -2427,6 +2697,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Transactions",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"61824"
},
{
@@ -2436,6 +2707,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Transactions",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2445,6 +2717,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:User Settable",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2454,6 +2727,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:User Settable",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2463,6 +2737,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:User Settable",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2472,6 +2747,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:User Settable",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2481,6 +2757,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:User Settable",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2490,6 +2767,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:User Settable",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2499,6 +2777,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:User Settable",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2508,6 +2787,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:User Settable",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2517,6 +2797,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:User Settable",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2526,6 +2807,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:User Settable",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2535,6 +2817,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Workload Group Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2544,6 +2827,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Workload Group Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2553,6 +2837,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Workload Group Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2562,6 +2847,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Workload Group Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2571,6 +2857,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Workload Group Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2580,6 +2867,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Workload Group Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2589,6 +2877,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Workload Group Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
},
{
@@ -2598,6 +2887,7 @@
"measurement":"sqlserver_performance",
"object":"SQLServer:Workload Group Stats",
"sql_instance":"8cac97ac9b8f",
+ "computer_name":"abcde",
"value":"0"
}
]
diff --git a/receiver/sqlserverreceiver/testdata/perfCounterQueryWithInstanceName.txt b/receiver/sqlserverreceiver/testdata/perfCounterQueryWithInstanceName.txt
index 7e03e7319dd8..14915e9f0290 100644
--- a/receiver/sqlserverreceiver/testdata/perfCounterQueryWithInstanceName.txt
+++ b/receiver/sqlserverreceiver/testdata/perfCounterQueryWithInstanceName.txt
@@ -145,6 +145,7 @@ INSERT INTO @PCounters SELECT * FROM PerfCounters;
SELECT
'sqlserver_performance' AS [measurement]
,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
+ ,HOST_NAME() AS [computer_name]
,pc.[object_name] AS [object]
,pc.[counter_name] AS [counter]
,CASE pc.[instance_name] WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance]
diff --git a/receiver/sqlserverreceiver/testdata/perfCounterQueryWithoutInstanceName.txt b/receiver/sqlserverreceiver/testdata/perfCounterQueryWithoutInstanceName.txt
index 0f8e4f7da9d5..912806a8458d 100644
--- a/receiver/sqlserverreceiver/testdata/perfCounterQueryWithoutInstanceName.txt
+++ b/receiver/sqlserverreceiver/testdata/perfCounterQueryWithoutInstanceName.txt
@@ -145,6 +145,7 @@ INSERT INTO @PCounters SELECT * FROM PerfCounters;
SELECT
'sqlserver_performance' AS [measurement]
,REPLACE(@@SERVERNAME,'\',':') AS [sql_instance]
+ ,HOST_NAME() AS [computer_name]
,pc.[object_name] AS [object]
,pc.[counter_name] AS [counter]
,CASE pc.[instance_name] WHEN '_Total' THEN 'Total' ELSE ISNULL(pc.[instance_name],'') END AS [instance]
diff --git a/receiver/sqlserverreceiver/testdata/propertyQueryData.txt b/receiver/sqlserverreceiver/testdata/propertyQueryData.txt
index 53d1712e25ac..2c543fb7b328 100644
--- a/receiver/sqlserverreceiver/testdata/propertyQueryData.txt
+++ b/receiver/sqlserverreceiver/testdata/propertyQueryData.txt
@@ -1 +1 @@
-[{"ForceEncryption":"0","Port":"1433","PortType":"Static","available_server_memory":"4517288","cpu_count":"16","db_offline":"0","db_online":"4","db_recovering":"0","db_recoveryPending":"0","db_restoring":"0","db_suspect":"0","engine_edition":"3","hardware_type":"HYPERVISOR","instance_type":"0","is_hadr_enabled":"0","measurement":"sqlserver_server_properties","server_memory":"6421504","service_name":"MSSQLSERVER","sku":"Developer Edition (64-bit)","sql_instance":"ad8fb2b53dce","sql_version":"16.0.4105.2","sql_version_desc":"Microsoft SQL Server 2022 (RTM-CU11) (KB5032679) ","uptime":"17393"}]
+[{"ForceEncryption":"0","Port":"1433","PortType":"Static","available_server_memory":"4517288","cpu_count":"16","db_offline":"0","db_online":"4","db_recovering":"0","db_recoveryPending":"0","db_restoring":"0","db_suspect":"0","engine_edition":"3","hardware_type":"HYPERVISOR","instance_type":"0","is_hadr_enabled":"0","measurement":"sqlserver_server_properties","server_memory":"6421504","service_name":"MSSQLSERVER","sku":"Developer Edition (64-bit)","sql_instance":"ad8fb2b53dce","computer_name":"abcde","sql_version":"16.0.4105.2","sql_version_desc":"Microsoft SQL Server 2022 (RTM-CU11) (KB5032679) ","uptime":"17393"}]
diff --git a/receiver/sqlserverreceiver/testdata/propertyQueryWithInstanceName.txt b/receiver/sqlserverreceiver/testdata/propertyQueryWithInstanceName.txt
index d03cb7018efd..1b23f72c515f 100644
--- a/receiver/sqlserverreceiver/testdata/propertyQueryWithInstanceName.txt
+++ b/receiver/sqlserverreceiver/testdata/propertyQueryWithInstanceName.txt
@@ -44,6 +44,7 @@ EXEC [xp_instance_regread]
SELECT
''sqlserver_server_properties'' AS [measurement]
,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance]
+ ,HOST_NAME() AS [computer_name]
,@@SERVICENAME AS [service_name]
,si.[cpu_count]
,(SELECT [total_physical_memory_kb] FROM sys.[dm_os_sys_memory]) AS [server_memory]
diff --git a/receiver/sqlserverreceiver/testdata/propertyQueryWithoutInstanceName.txt b/receiver/sqlserverreceiver/testdata/propertyQueryWithoutInstanceName.txt
index d36f56d58625..d5a13bc61d2e 100644
--- a/receiver/sqlserverreceiver/testdata/propertyQueryWithoutInstanceName.txt
+++ b/receiver/sqlserverreceiver/testdata/propertyQueryWithoutInstanceName.txt
@@ -44,6 +44,7 @@ EXEC [xp_instance_regread]
SELECT
''sqlserver_server_properties'' AS [measurement]
,REPLACE(@@SERVERNAME,''\'','':'') AS [sql_instance]
+ ,HOST_NAME() AS [computer_name]
,@@SERVICENAME AS [service_name]
,si.[cpu_count]
,(SELECT [total_physical_memory_kb] FROM sys.[dm_os_sys_memory]) AS [server_memory]
diff --git a/receiver/statsdreceiver/config_test.go b/receiver/statsdreceiver/config_test.go
index 5ff7f4907f22..f1129b37bde2 100644
--- a/receiver/statsdreceiver/config_test.go
+++ b/receiver/statsdreceiver/config_test.go
@@ -213,8 +213,7 @@ func TestConfig_Validate_MaxSize(t *testing.T) {
},
}
err := cfg.Validate()
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "histogram max_size out of range")
+ assert.ErrorContains(t, err, "histogram max_size out of range")
}
}
func TestConfig_Validate_HistogramGoodConfig(t *testing.T) {
diff --git a/receiver/webhookeventreceiver/config_test.go b/receiver/webhookeventreceiver/config_test.go
index faa09f4d3220..3e119efb0d20 100644
--- a/receiver/webhookeventreceiver/config_test.go
+++ b/receiver/webhookeventreceiver/config_test.go
@@ -106,8 +106,7 @@ func TestValidateConfig(t *testing.T) {
for _, test := range tests {
t.Run(test.desc, func(t *testing.T) {
err := test.conf.Validate()
- require.Error(t, err)
- require.Contains(t, err.Error(), test.expect.Error())
+ require.ErrorContains(t, err, test.expect.Error())
})
}
}